summaryrefslogtreecommitdiff
path: root/source4/locking
diff options
context:
space:
mode:
authorAndrew Tridgell <tridge@samba.org>2003-08-13 01:53:07 +0000
committerAndrew Tridgell <tridge@samba.org>2003-08-13 01:53:07 +0000
commitef2e26c91b80556af033d3335e55f5dfa6fff31d (patch)
treefaa21bfd7e7b5247250b47c7891dc1a5ebee6be9 /source4/locking
downloadsamba-ef2e26c91b80556af033d3335e55f5dfa6fff31d.tar.gz
samba-ef2e26c91b80556af033d3335e55f5dfa6fff31d.tar.bz2
samba-ef2e26c91b80556af033d3335e55f5dfa6fff31d.zip
first public release of samba4 code
(This used to be commit b0510b5428b3461aeb9bbe3cc95f62fc73e2b97f)
Diffstat (limited to 'source4/locking')
-rw-r--r--source4/locking/brlock.c698
-rw-r--r--source4/locking/locking.c849
-rw-r--r--source4/locking/posix.c1313
3 files changed, 2860 insertions, 0 deletions
diff --git a/source4/locking/brlock.c b/source4/locking/brlock.c
new file mode 100644
index 0000000000..4cd885f1a6
--- /dev/null
+++ b/source4/locking/brlock.c
@@ -0,0 +1,698 @@
+/*
+ Unix SMB/CIFS implementation.
+ byte range locking code
+ Updated to handle range splits/merges.
+
+ Copyright (C) Andrew Tridgell 1992-2000
+ Copyright (C) Jeremy Allison 1992-2000
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+/* This module implements a tdb based byte range locking service,
+ replacing the fcntl() based byte range locking previously
+ used. This allows us to provide the same semantics as NT */
+
+#include "includes.h"
+
+#define ZERO_ZERO 0
+
+/* This contains elements that differentiate locks. The smbpid is a
+ client supplied pid, and is essentially the locking context for
+ this client */
+
+struct lock_context {
+ uint16 smbpid;
+ uint16 tid;
+ pid_t pid;
+};
+
+/* The data in brlock records is an unsorted linear array of these
+ records. It is unnecessary to store the count as tdb provides the
+ size of the record */
+
+struct lock_struct {
+ struct lock_context context;
+ br_off start;
+ br_off size;
+ int fnum;
+ enum brl_type lock_type;
+};
+
+/* The key used in the brlock database. */
+
+struct lock_key {
+ SMB_DEV_T device;
+ SMB_INO_T inode;
+};
+
+/* The open brlock.tdb database. */
+
+static TDB_CONTEXT *tdb;
+
+/****************************************************************************
+ Create a locking key - ensuring zero filled for pad purposes.
+****************************************************************************/
+
+static TDB_DATA locking_key(SMB_DEV_T dev, SMB_INO_T inode)
+{
+ static struct lock_key key;
+ TDB_DATA kbuf;
+
+ memset(&key, '\0', sizeof(key));
+ key.device = dev;
+ key.inode = inode;
+ kbuf.dptr = (char *)&key;
+ kbuf.dsize = sizeof(key);
+ return kbuf;
+}
+
+/****************************************************************************
+ See if two locking contexts are equal.
+****************************************************************************/
+
+static BOOL brl_same_context(struct lock_context *ctx1,
+ struct lock_context *ctx2)
+{
+ return (ctx1->pid == ctx2->pid) &&
+ (ctx1->smbpid == ctx2->smbpid) &&
+ (ctx1->tid == ctx2->tid);
+}
+
+/****************************************************************************
+ See if lock2 can be added when lock1 is in place.
+****************************************************************************/
+
+static BOOL brl_conflict(struct lock_struct *lck1,
+ struct lock_struct *lck2)
+{
+ if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
+ return False;
+
+ if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
+ return False;
+ }
+
+ if (brl_same_context(&lck1->context, &lck2->context) &&
+ lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
+ return False;
+ }
+
+ if (lck1->start >= (lck2->start + lck2->size) ||
+ lck2->start >= (lck1->start + lck1->size)) {
+ return False;
+ }
+
+ return True;
+}
+
+#if ZERO_ZERO
+static BOOL brl_conflict1(struct lock_struct *lck1,
+ struct lock_struct *lck2)
+{
+ if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
+ return False;
+
+ if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
+ return False;
+ }
+
+ if (brl_same_context(&lck1->context, &lck2->context) &&
+ lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
+ return False;
+ }
+
+ if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
+ return True;
+ }
+
+ if (lck1->start >= (lck2->start + lck2->size) ||
+ lck2->start >= (lck1->start + lck1->size)) {
+ return False;
+ }
+
+ return True;
+}
+#endif
+
+/****************************************************************************
+ Check to see if this lock conflicts, but ignore our own locks on the
+ same fnum only.
+****************************************************************************/
+
+static BOOL brl_conflict_other(struct lock_struct *lck1, struct lock_struct *lck2)
+{
+ if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
+ return False;
+
+ if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
+ return False;
+
+ /*
+ * Incoming WRITE locks conflict with existing READ locks even
+ * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
+ */
+
+ if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
+ if (brl_same_context(&lck1->context, &lck2->context) &&
+ lck1->fnum == lck2->fnum)
+ return False;
+ }
+
+ if (lck1->start >= (lck2->start + lck2->size) ||
+ lck2->start >= (lck1->start + lck1->size)) return False;
+
+ return True;
+}
+
+
+#if DONT_DO_THIS
+ /* doing this traversal could kill solaris machines under high load (tridge) */
+ /* delete any dead locks */
+
+/****************************************************************************
+ Delete a record if it is for a dead process, if check_self is true, then
+ delete any records belonging to this pid also (there shouldn't be any).
+****************************************************************************/
+
+static int delete_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
+{
+ struct lock_struct *locks;
+ int count, i;
+ BOOL check_self = *(BOOL *)state;
+ pid_t mypid = sys_getpid();
+
+ tdb_chainlock(tdb, kbuf);
+
+ locks = (struct lock_struct *)dbuf.dptr;
+
+ count = dbuf.dsize / sizeof(*locks);
+ for (i=0; i<count; i++) {
+ struct lock_struct *lock = &locks[i];
+
+ /* If check_self is true we want to remove our own records. */
+ if (check_self && (mypid == lock->context.pid)) {
+
+ DEBUG(0,("brlock : delete_fn. LOGIC ERROR ! Shutting down and a record for my pid (%u) exists !\n",
+ (unsigned int)lock->context.pid ));
+
+ } else if (process_exists(lock->context.pid)) {
+
+ DEBUG(10,("brlock : delete_fn. pid %u exists.\n", (unsigned int)lock->context.pid ));
+ continue;
+ }
+
+ DEBUG(10,("brlock : delete_fn. Deleting record for process %u\n",
+ (unsigned int)lock->context.pid ));
+
+ if (count > 1 && i < count-1) {
+ memmove(&locks[i], &locks[i+1],
+ sizeof(*locks)*((count-1) - i));
+ }
+ count--;
+ i--;
+ }
+
+ if (count == 0) {
+ tdb_delete(tdb, kbuf);
+ } else if (count < (dbuf.dsize / sizeof(*locks))) {
+ dbuf.dsize = count * sizeof(*locks);
+ tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
+ }
+
+ tdb_chainunlock(tdb, kbuf);
+ return 0;
+}
+#endif
+
+/****************************************************************************
+ Open up the brlock.tdb database.
+****************************************************************************/
+
+void brl_init(int read_only)
+{
+ if (tdb)
+ return;
+ tdb = tdb_open_log(lock_path("brlock.tdb"), 0, TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
+ read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644);
+ if (!tdb) {
+ DEBUG(0,("Failed to open byte range locking database\n"));
+ return;
+ }
+
+#if DONT_DO_THIS
+ /* doing this traversal could kill solaris machines under high load (tridge) */
+ /* delete any dead locks */
+ if (!read_only) {
+ BOOL check_self = False;
+ tdb_traverse(tdb, delete_fn, &check_self);
+ }
+#endif
+}
+
+/****************************************************************************
+ Close down the brlock.tdb database.
+****************************************************************************/
+
+void brl_shutdown(int read_only)
+{
+ if (!tdb)
+ return;
+
+#if DONT_DO_THIS
+ /* doing this traversal could kill solaris machines under high load (tridge) */
+ /* delete any dead locks */
+ if (!read_only) {
+ BOOL check_self = True;
+ tdb_traverse(tdb, delete_fn, &check_self);
+ }
+#endif
+
+ tdb_close(tdb);
+}
+
+#if ZERO_ZERO
+/****************************************************************************
+compare two locks for sorting
+****************************************************************************/
+static int lock_compare(struct lock_struct *lck1,
+ struct lock_struct *lck2)
+{
+ if (lck1->start != lck2->start) return (lck1->start - lck2->start);
+ if (lck2->size != lck1->size) {
+ return ((int)lck1->size - (int)lck2->size);
+ }
+ return 0;
+}
+#endif
+
+/****************************************************************************
+ Lock a range of bytes.
+****************************************************************************/
+
+NTSTATUS brl_lock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
+ uint16 smbpid, pid_t pid, uint16 tid,
+ br_off start, br_off size,
+ enum brl_type lock_type)
+{
+ TDB_DATA kbuf, dbuf;
+ int count, i;
+ struct lock_struct lock, *locks;
+ char *tp;
+ NTSTATUS status = NT_STATUS_OK;
+ static int last_failed = -1;
+ static br_off last_failed_start;
+
+ kbuf = locking_key(dev,ino);
+
+ dbuf.dptr = NULL;
+
+#if !ZERO_ZERO
+ if (start == 0 && size == 0) {
+ DEBUG(0,("client sent 0/0 lock - please report this\n"));
+ }
+#endif
+
+ tdb_chainlock(tdb, kbuf);
+ dbuf = tdb_fetch(tdb, kbuf);
+
+ lock.context.smbpid = smbpid;
+ lock.context.pid = pid;
+ lock.context.tid = tid;
+ lock.start = start;
+ lock.size = size;
+ lock.fnum = fnum;
+ lock.lock_type = lock_type;
+
+ if (dbuf.dptr) {
+ /* there are existing locks - make sure they don't conflict */
+ locks = (struct lock_struct *)dbuf.dptr;
+ count = dbuf.dsize / sizeof(*locks);
+ for (i=0; i<count; i++) {
+ if (brl_conflict(&locks[i], &lock)) {
+ status = NT_STATUS_LOCK_NOT_GRANTED;
+ goto fail;
+ }
+#if ZERO_ZERO
+ if (lock.start == 0 && lock.size == 0 &&
+ locks[i].size == 0) {
+ break;
+ }
+#endif
+ }
+ }
+
+ /* no conflicts - add it to the list of locks */
+ tp = Realloc(dbuf.dptr, dbuf.dsize + sizeof(*locks));
+ if (!tp) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ } else {
+ dbuf.dptr = tp;
+ }
+ memcpy(dbuf.dptr + dbuf.dsize, &lock, sizeof(lock));
+ dbuf.dsize += sizeof(lock);
+
+#if ZERO_ZERO
+ /* sort the lock list */
+ qsort(dbuf.dptr, dbuf.dsize/sizeof(lock), sizeof(lock), lock_compare);
+#endif
+
+ tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
+
+ SAFE_FREE(dbuf.dptr);
+ tdb_chainunlock(tdb, kbuf);
+ return NT_STATUS_OK;
+
+ fail:
+ /* this is a nasty hack to try to simulate the lock result cache code in w2k.
+ It isn't completely accurate as I haven't yet worked out the correct
+ semantics (tridge)
+ */
+ if (last_failed == fnum &&
+ last_failed_start == start &&
+ NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED)) {
+ status = NT_STATUS_FILE_LOCK_CONFLICT;
+ }
+ last_failed = fnum;
+ last_failed_start = start;
+
+ SAFE_FREE(dbuf.dptr);
+ tdb_chainunlock(tdb, kbuf);
+ return status;
+}
+
+/****************************************************************************
+ Check if an unlock overlaps a pending lock.
+****************************************************************************/
+
+static BOOL brl_pending_overlap(struct lock_struct *lock, struct lock_struct *pend_lock)
+{
+ if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
+ return True;
+ if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
+ return True;
+ return False;
+}
+
+/****************************************************************************
+ Unlock a range of bytes.
+****************************************************************************/
+
+BOOL brl_unlock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
+ uint16 smbpid, pid_t pid, uint16 tid,
+ br_off start, br_off size,
+ BOOL remove_pending_locks_only)
+{
+ TDB_DATA kbuf, dbuf;
+ int count, i, j;
+ struct lock_struct *locks;
+ struct lock_context context;
+
+ kbuf = locking_key(dev,ino);
+
+ dbuf.dptr = NULL;
+
+ tdb_chainlock(tdb, kbuf);
+ dbuf = tdb_fetch(tdb, kbuf);
+
+ if (!dbuf.dptr) {
+ DEBUG(10,("brl_unlock: tdb_fetch failed !\n"));
+ goto fail;
+ }
+
+ context.smbpid = smbpid;
+ context.pid = pid;
+ context.tid = tid;
+
+ /* there are existing locks - find a match */
+ locks = (struct lock_struct *)dbuf.dptr;
+ count = dbuf.dsize / sizeof(*locks);
+
+#if ZERO_ZERO
+ for (i=0; i<count; i++) {
+ struct lock_struct *lock = &locks[i];
+
+ if (lock->lock_type == WRITE_LOCK &&
+ brl_same_context(&lock->context, &context) &&
+ lock->fnum == fnum &&
+ lock->start == start &&
+ lock->size == size) {
+ /* found it - delete it */
+ if (count == 1) {
+ tdb_delete(tdb, kbuf);
+ } else {
+ if (i < count-1) {
+ memmove(&locks[i], &locks[i+1],
+ sizeof(*locks)*((count-1) - i));
+ }
+ dbuf.dsize -= sizeof(*locks);
+ tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
+ }
+
+ SAFE_FREE(dbuf.dptr);
+ tdb_chainunlock(tdb, kbuf);
+ return True;
+ }
+ }
+#endif
+
+ locks = (struct lock_struct *)dbuf.dptr;
+ count = dbuf.dsize / sizeof(*locks);
+ for (i=0; i<count; i++) {
+ struct lock_struct *lock = &locks[i];
+
+ if (brl_same_context(&lock->context, &context) &&
+ lock->fnum == fnum &&
+ lock->start == start &&
+ lock->size == size) {
+
+ if (remove_pending_locks_only && lock->lock_type != PENDING_LOCK)
+ continue;
+
+ if (lock->lock_type != PENDING_LOCK) {
+ /* Send unlock messages to any pending waiters that overlap. */
+ for (j=0; j<count; j++) {
+ struct lock_struct *pend_lock = &locks[j];
+
+ /* Ignore non-pending locks. */
+ if (pend_lock->lock_type != PENDING_LOCK)
+ continue;
+
+ /* We could send specific lock info here... */
+ if (brl_pending_overlap(lock, pend_lock)) {
+ DEBUG(10,("brl_unlock: sending unlock message to pid %u\n",
+ (unsigned int)pend_lock->context.pid ));
+
+ message_send_pid(pend_lock->context.pid,
+ MSG_SMB_UNLOCK,
+ NULL, 0, True);
+ }
+ }
+ }
+
+ /* found it - delete it */
+ if (count == 1) {
+ tdb_delete(tdb, kbuf);
+ } else {
+ if (i < count-1) {
+ memmove(&locks[i], &locks[i+1],
+ sizeof(*locks)*((count-1) - i));
+ }
+ dbuf.dsize -= sizeof(*locks);
+ tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
+ }
+
+ SAFE_FREE(dbuf.dptr);
+ tdb_chainunlock(tdb, kbuf);
+ return True;
+ }
+ }
+
+ /* we didn't find it */
+
+ fail:
+ SAFE_FREE(dbuf.dptr);
+ tdb_chainunlock(tdb, kbuf);
+ return False;
+}
+
+
+/****************************************************************************
+ Test if we could add a lock if we wanted to.
+****************************************************************************/
+
+BOOL brl_locktest(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
+ uint16 smbpid, pid_t pid, uint16 tid,
+ br_off start, br_off size,
+ enum brl_type lock_type, int check_self)
+{
+ TDB_DATA kbuf, dbuf;
+ int count, i;
+ struct lock_struct lock, *locks;
+
+ kbuf = locking_key(dev,ino);
+
+ dbuf.dptr = NULL;
+
+ tdb_chainlock(tdb, kbuf);
+ dbuf = tdb_fetch(tdb, kbuf);
+
+ lock.context.smbpid = smbpid;
+ lock.context.pid = pid;
+ lock.context.tid = tid;
+ lock.start = start;
+ lock.size = size;
+ lock.fnum = fnum;
+ lock.lock_type = lock_type;
+
+ if (dbuf.dptr) {
+ /* there are existing locks - make sure they don't conflict */
+ locks = (struct lock_struct *)dbuf.dptr;
+ count = dbuf.dsize / sizeof(*locks);
+ for (i=0; i<count; i++) {
+ if (check_self) {
+ if (brl_conflict(&locks[i], &lock))
+ goto fail;
+ } else {
+ /*
+ * Our own locks don't conflict.
+ */
+ if (brl_conflict_other(&locks[i], &lock))
+ goto fail;
+ }
+ }
+ }
+
+ /* no conflicts - we could have added it */
+ SAFE_FREE(dbuf.dptr);
+ tdb_chainunlock(tdb, kbuf);
+ return True;
+
+ fail:
+ SAFE_FREE(dbuf.dptr);
+ tdb_chainunlock(tdb, kbuf);
+ return False;
+}
+
+/****************************************************************************
+ Remove any locks associated with a open file.
+****************************************************************************/
+
+void brl_close(SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
+{
+ TDB_DATA kbuf, dbuf;
+ int count, i, j, dcount=0;
+ struct lock_struct *locks;
+
+ kbuf = locking_key(dev,ino);
+
+ dbuf.dptr = NULL;
+
+ tdb_chainlock(tdb, kbuf);
+ dbuf = tdb_fetch(tdb, kbuf);
+
+ if (!dbuf.dptr) goto fail;
+
+ /* there are existing locks - remove any for this fnum */
+ locks = (struct lock_struct *)dbuf.dptr;
+ count = dbuf.dsize / sizeof(*locks);
+
+ for (i=0; i<count; i++) {
+ struct lock_struct *lock = &locks[i];
+
+ if (lock->context.tid == tid &&
+ lock->context.pid == pid &&
+ lock->fnum == fnum) {
+
+ /* Send unlock messages to any pending waiters that overlap. */
+ for (j=0; j<count; j++) {
+ struct lock_struct *pend_lock = &locks[j];
+
+ /* Ignore our own or non-pending locks. */
+ if (pend_lock->lock_type != PENDING_LOCK)
+ continue;
+
+ if (pend_lock->context.tid == tid &&
+ pend_lock->context.pid == pid &&
+ pend_lock->fnum == fnum)
+ continue;
+
+ /* We could send specific lock info here... */
+ if (brl_pending_overlap(lock, pend_lock))
+ message_send_pid(pend_lock->context.pid,
+ MSG_SMB_UNLOCK,
+ NULL, 0, True);
+ }
+
+ /* found it - delete it */
+ if (count > 1 && i < count-1) {
+ memmove(&locks[i], &locks[i+1],
+ sizeof(*locks)*((count-1) - i));
+ }
+ count--;
+ i--;
+ dcount++;
+ }
+ }
+
+ if (count == 0) {
+ tdb_delete(tdb, kbuf);
+ } else if (count < (dbuf.dsize / sizeof(*locks))) {
+ dbuf.dsize -= dcount * sizeof(*locks);
+ tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
+ }
+
+ /* we didn't find it */
+ fail:
+ SAFE_FREE(dbuf.dptr);
+ tdb_chainunlock(tdb, kbuf);
+}
+
+/****************************************************************************
+ Traverse the whole database with this function, calling traverse_callback
+ on each lock.
+****************************************************************************/
+
+static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
+{
+ struct lock_struct *locks;
+ struct lock_key *key;
+ int i;
+
+ BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
+
+ locks = (struct lock_struct *)dbuf.dptr;
+ key = (struct lock_key *)kbuf.dptr;
+
+ for (i=0;i<dbuf.dsize/sizeof(*locks);i++) {
+ traverse_callback(key->device, key->inode,
+ locks[i].context.pid,
+ locks[i].lock_type,
+ locks[i].start,
+ locks[i].size);
+ }
+ return 0;
+}
+
+/*******************************************************************
+ Call the specified function on each lock in the database.
+********************************************************************/
+
+int brl_forall(BRLOCK_FN(fn))
+{
+ if (!tdb) return 0;
+ return tdb_traverse(tdb, traverse_fn, (void *)fn);
+}
diff --git a/source4/locking/locking.c b/source4/locking/locking.c
new file mode 100644
index 0000000000..63e1f3a164
--- /dev/null
+++ b/source4/locking/locking.c
@@ -0,0 +1,849 @@
+/*
+ Unix SMB/CIFS implementation.
+ Locking functions
+ Copyright (C) Andrew Tridgell 1992-2000
+ Copyright (C) Jeremy Allison 1992-2000
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Revision History:
+
+ 12 aug 96: Erik.Devriendt@te6.siemens.be
+ added support for shared memory implementation of share mode locking
+
+ May 1997. Jeremy Allison (jallison@whistle.com). Modified share mode
+ locking to deal with multiple share modes per open file.
+
+ September 1997. Jeremy Allison (jallison@whistle.com). Added oplock
+ support.
+
+ rewrtten completely to use new tdb code. Tridge, Dec '99
+
+ Added POSIX locking support. Jeremy Allison (jeremy@valinux.com), Apr. 2000.
+*/
+
+#include "includes.h"
+
+/* the locking database handle */
+static TDB_CONTEXT *tdb;
+
+/****************************************************************************
+ Debugging aid :-).
+****************************************************************************/
+
+static const char *lock_type_name(enum brl_type lock_type)
+{
+ return (lock_type == READ_LOCK) ? "READ" : "WRITE";
+}
+
+/****************************************************************************
+ Utility function called to see if a file region is locked.
+ If check_self is True, then checks on our own fd with the same locking context
+ are still made. If check_self is False, then checks are not made on our own fd
+ with the same locking context are not made.
+****************************************************************************/
+
+BOOL is_locked(files_struct *fsp,struct tcon_context *conn,
+ SMB_BIG_UINT count,SMB_BIG_UINT offset,
+ enum brl_type lock_type, BOOL check_self)
+{
+ int snum = SNUM(conn);
+ BOOL ret;
+
+ if (count == 0)
+ return(False);
+
+ if (!lp_locking(snum) || !lp_strict_locking(snum))
+ return(False);
+
+ ret = !brl_locktest(fsp->dev, fsp->inode, fsp->fnum,
+ global_smbpid, sys_getpid(), conn->cnum,
+ offset, count, lock_type, check_self);
+
+ DEBUG(10,("is_locked: brl start=%.0f len=%.0f %s for file %s\n",
+ (double)offset, (double)count, ret ? "locked" : "unlocked",
+ fsp->fsp_name ));
+
+ /*
+ * There is no lock held by an SMB daemon, check to
+ * see if there is a POSIX lock from a UNIX or NFS process.
+ */
+
+ if(!ret && lp_posix_locking(snum)) {
+ ret = is_posix_locked(fsp, offset, count, lock_type);
+
+ DEBUG(10,("is_locked: posix start=%.0f len=%.0f %s for file %s\n",
+ (double)offset, (double)count, ret ? "locked" : "unlocked",
+ fsp->fsp_name ));
+ }
+
+ return ret;
+}
+
+/****************************************************************************
+ Utility function called by locking requests.
+****************************************************************************/
+
+static NTSTATUS do_lock(files_struct *fsp,struct tcon_context *conn, uint16 lock_pid,
+ SMB_BIG_UINT count,SMB_BIG_UINT offset,enum brl_type lock_type)
+{
+ NTSTATUS status = NT_STATUS_LOCK_NOT_GRANTED;
+
+ if (!lp_locking(SNUM(conn)))
+ return NT_STATUS_OK;
+
+ /* NOTE! 0 byte long ranges ARE allowed and should be stored */
+
+ DEBUG(10,("do_lock: lock type %s start=%.0f len=%.0f requested for file %s\n",
+ lock_type_name(lock_type), (double)offset, (double)count, fsp->fsp_name ));
+
+ if (OPEN_FSP(fsp) && fsp->can_lock && (fsp->conn == conn)) {
+ status = brl_lock(fsp->dev, fsp->inode, fsp->fnum,
+ lock_pid, sys_getpid(), conn->cnum,
+ offset, count,
+ lock_type);
+
+ if (NT_STATUS_IS_OK(status) && lp_posix_locking(SNUM(conn))) {
+
+ /*
+ * Try and get a POSIX lock on this range.
+ * Note that this is ok if it is a read lock
+ * overlapping on a different fd. JRA.
+ */
+
+ if (!set_posix_lock(fsp, offset, count, lock_type)) {
+ status = NT_STATUS_LOCK_NOT_GRANTED;
+ /*
+ * We failed to map - we must now remove the brl
+ * lock entry.
+ */
+ (void)brl_unlock(fsp->dev, fsp->inode, fsp->fnum,
+ lock_pid, sys_getpid(), conn->cnum,
+ offset, count, False);
+ }
+ }
+ }
+
+ return status;
+}
+
+/****************************************************************************
+ Utility function called by locking requests. This is *DISGUSTING*. It also
+ appears to be "What Windows Does" (tm). Andrew, ever wonder why Windows 2000
+ is so slow on the locking tests...... ? This is the reason. Much though I hate
+ it, we need this. JRA.
+****************************************************************************/
+
+NTSTATUS do_lock_spin(files_struct *fsp,struct tcon_context *conn, uint16 lock_pid,
+ SMB_BIG_UINT count,SMB_BIG_UINT offset,enum brl_type lock_type)
+{
+ int j, maxj = lp_lock_spin_count();
+ int sleeptime = lp_lock_sleep_time();
+ NTSTATUS status, ret;
+
+ if (maxj <= 0)
+ maxj = 1;
+
+ ret = NT_STATUS_OK; /* to keep dumb compilers happy */
+
+ for (j = 0; j < maxj; j++) {
+ status = do_lock(fsp, conn, lock_pid, count, offset, lock_type);
+ if (!NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED) &&
+ !NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
+ return status;
+ }
+ /* if we do fail then return the first error code we got */
+ if (j == 0) {
+ ret = status;
+ }
+ if (sleeptime)
+ sys_usleep(sleeptime);
+ }
+ return ret;
+}
+
+/****************************************************************************
+ Utility function called by unlocking requests.
+****************************************************************************/
+
+NTSTATUS do_unlock(files_struct *fsp,struct tcon_context *conn, uint16 lock_pid,
+ SMB_BIG_UINT count,SMB_BIG_UINT offset)
+{
+ BOOL ok = False;
+
+ if (!lp_locking(SNUM(conn)))
+ return NT_STATUS_OK;
+
+ if (!OPEN_FSP(fsp) || !fsp->can_lock || (fsp->conn != conn)) {
+ return NT_STATUS_INVALID_HANDLE;
+ }
+
+ DEBUG(10,("do_unlock: unlock start=%.0f len=%.0f requested for file %s\n",
+ (double)offset, (double)count, fsp->fsp_name ));
+
+ /*
+ * Remove the existing lock record from the tdb lockdb
+ * before looking at POSIX locks. If this record doesn't
+ * match then don't bother looking to remove POSIX locks.
+ */
+
+ ok = brl_unlock(fsp->dev, fsp->inode, fsp->fnum,
+ lock_pid, sys_getpid(), conn->cnum, offset, count, False);
+
+ if (!ok) {
+ DEBUG(10,("do_unlock: returning ERRlock.\n" ));
+ return NT_STATUS_RANGE_NOT_LOCKED;
+ }
+
+ if (!lp_posix_locking(SNUM(conn)))
+ return NT_STATUS_OK;
+
+ (void)release_posix_lock(fsp, offset, count);
+
+ return NT_STATUS_OK;
+}
+
+/****************************************************************************
+ Remove any locks on this fd. Called from file_close().
+****************************************************************************/
+
+void locking_close_file(files_struct *fsp)
+{
+ pid_t pid = sys_getpid();
+
+ if (!lp_locking(SNUM(fsp->conn)))
+ return;
+
+ /*
+ * Just release all the brl locks, no need to release individually.
+ */
+
+ brl_close(fsp->dev, fsp->inode, pid, fsp->conn->cnum, fsp->fnum);
+
+ if(lp_posix_locking(SNUM(fsp->conn))) {
+
+ /*
+ * Release all the POSIX locks.
+ */
+ posix_locking_close_file(fsp);
+
+ }
+}
+
+/****************************************************************************
+ Initialise the locking functions.
+****************************************************************************/
+
+static int open_read_only;
+
+BOOL locking_init(int read_only)
+{
+ brl_init(read_only);
+
+ if (tdb)
+ return True;
+
+ tdb = tdb_open_log(lock_path("locking.tdb"),
+ 0, TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
+ read_only?O_RDONLY:O_RDWR|O_CREAT,
+ 0644);
+
+ if (!tdb) {
+ DEBUG(0,("ERROR: Failed to initialise locking database\n"));
+ return False;
+ }
+
+ if (!posix_locking_init(read_only))
+ return False;
+
+ open_read_only = read_only;
+
+ return True;
+}
+
+/*******************************************************************
+ Deinitialize the share_mode management.
+******************************************************************/
+
+BOOL locking_end(void)
+{
+
+ brl_shutdown(open_read_only);
+ if (tdb) {
+
+ if (tdb_close(tdb) != 0)
+ return False;
+ }
+
+ return True;
+}
+
+/*******************************************************************
+ Form a static locking key for a dev/inode pair.
+******************************************************************/
+
+static TDB_DATA locking_key(SMB_DEV_T dev, SMB_INO_T inode)
+{
+ static struct locking_key key;
+ TDB_DATA kbuf;
+
+ memset(&key, '\0', sizeof(key));
+ key.dev = dev;
+ key.inode = inode;
+ kbuf.dptr = (char *)&key;
+ kbuf.dsize = sizeof(key);
+ return kbuf;
+}
+
+static TDB_DATA locking_key_fsp(files_struct *fsp)
+{
+ return locking_key(fsp->dev, fsp->inode);
+}
+
+/*******************************************************************
+ Lock a hash bucket entry.
+******************************************************************/
+
+BOOL lock_share_entry(struct tcon_context *conn,
+ SMB_DEV_T dev, SMB_INO_T inode)
+{
+ return tdb_chainlock(tdb, locking_key(dev, inode)) == 0;
+}
+
+/*******************************************************************
+ Unlock a hash bucket entry.
+******************************************************************/
+
+void unlock_share_entry(struct tcon_context *conn,
+ SMB_DEV_T dev, SMB_INO_T inode)
+{
+ tdb_chainunlock(tdb, locking_key(dev, inode));
+}
+
+/*******************************************************************
+ Lock a hash bucket entry. use a fsp for convenience
+******************************************************************/
+
+BOOL lock_share_entry_fsp(files_struct *fsp)
+{
+ return tdb_chainlock(tdb, locking_key(fsp->dev, fsp->inode)) == 0;
+}
+
+/*******************************************************************
+ Unlock a hash bucket entry.
+******************************************************************/
+
+void unlock_share_entry_fsp(files_struct *fsp)
+{
+ tdb_chainunlock(tdb, locking_key(fsp->dev, fsp->inode));
+}
+
+/*******************************************************************
+ Print out a share mode.
+********************************************************************/
+
+static char *share_mode_str(int num, share_mode_entry *e)
+{
+ static pstring share_str;
+
+ slprintf(share_str, sizeof(share_str)-1, "share_mode_entry[%d]: \
+pid = %u, share_mode = 0x%x, desired_access = 0x%x, port = 0x%x, type= 0x%x, file_id = %lu, dev = 0x%x, inode = %.0f",
+ num, e->pid, e->share_mode, (unsigned int)e->desired_access, e->op_port, e->op_type, e->share_file_id,
+ (unsigned int)e->dev, (double)e->inode );
+
+ return share_str;
+}
+
+/*******************************************************************
+ Print out a share mode table.
+********************************************************************/
+
+static void print_share_mode_table(struct locking_data *data)
+{
+ int num_share_modes = data->u.num_share_mode_entries;
+ share_mode_entry *shares = (share_mode_entry *)(data + 1);
+ int i;
+
+ for (i = 0; i < num_share_modes; i++) {
+ share_mode_entry *entry_p = &shares[i];
+ DEBUG(10,("print_share_mode_table: %s\n", share_mode_str(i, entry_p) ));
+ }
+}
+
+/*******************************************************************
+ Get all share mode entries for a dev/inode pair.
+********************************************************************/
+
+int get_share_modes(struct tcon_context *conn,
+ SMB_DEV_T dev, SMB_INO_T inode,
+ share_mode_entry **pp_shares)
+{
+ TDB_DATA dbuf;
+ struct locking_data *data;
+ int num_share_modes;
+ share_mode_entry *shares = NULL;
+
+ *pp_shares = NULL;
+
+ dbuf = tdb_fetch(tdb, locking_key(dev, inode));
+ if (!dbuf.dptr)
+ return 0;
+
+ data = (struct locking_data *)dbuf.dptr;
+ num_share_modes = data->u.num_share_mode_entries;
+ if(num_share_modes) {
+ int i;
+ int del_count = 0;
+
+ shares = (share_mode_entry *)memdup(dbuf.dptr + sizeof(*data),
+ num_share_modes * sizeof(share_mode_entry));
+
+ if (!shares) {
+ SAFE_FREE(dbuf.dptr);
+ return 0;
+ }
+
+ /*
+ * Ensure that each entry has a real process attached.
+ */
+
+ for (i = 0; i < num_share_modes; ) {
+ share_mode_entry *entry_p = &shares[i];
+ if (process_exists(entry_p->pid)) {
+ DEBUG(10,("get_share_modes: %s\n", share_mode_str(i, entry_p) ));
+ i++;
+ } else {
+ DEBUG(10,("get_share_modes: deleted %s\n", share_mode_str(i, entry_p) ));
+ memcpy( &shares[i], &shares[i+1],
+ sizeof(share_mode_entry) * (num_share_modes - i - 1));
+ num_share_modes--;
+ del_count++;
+ }
+ }
+
+ /* Did we delete any ? If so, re-store in tdb. */
+ if (del_count) {
+ data->u.num_share_mode_entries = num_share_modes;
+
+ if (num_share_modes)
+ memcpy(dbuf.dptr + sizeof(*data), shares,
+ num_share_modes * sizeof(share_mode_entry));
+
+ /* The record has shrunk a bit */
+ dbuf.dsize -= del_count * sizeof(share_mode_entry);
+
+ if (tdb_store(tdb, locking_key(dev, inode), dbuf, TDB_REPLACE) == -1) {
+ SAFE_FREE(shares);
+ SAFE_FREE(dbuf.dptr);
+ return 0;
+ }
+ }
+ }
+
+ SAFE_FREE(dbuf.dptr);
+ *pp_shares = shares;
+ return num_share_modes;
+}
+
+/*******************************************************************
+ Fill a share mode entry.
+********************************************************************/
+
+static void fill_share_mode(char *p, files_struct *fsp, uint16 port, uint16 op_type)
+{
+ share_mode_entry *e = (share_mode_entry *)p;
+ void *x = &e->time; /* Needed to force alignment. p may not be aligned.... */
+
+ memset(e, '\0', sizeof(share_mode_entry));
+ e->pid = sys_getpid();
+ e->share_mode = fsp->share_mode;
+ e->desired_access = fsp->desired_access;
+ e->op_port = port;
+ e->op_type = op_type;
+ memcpy(x, &fsp->open_time, sizeof(struct timeval));
+ e->share_file_id = fsp->file_id;
+ e->dev = fsp->dev;
+ e->inode = fsp->inode;
+}
+
+/*******************************************************************
+ Check if two share mode entries are identical, ignoring oplock
+ and port info and desired_access.
+********************************************************************/
+
+BOOL share_modes_identical( share_mode_entry *e1, share_mode_entry *e2)
+{
+#if 1 /* JRA PARANOIA TEST - REMOVE LATER */
+ if (e1->pid == e2->pid &&
+ e1->share_file_id == e2->share_file_id &&
+ e1->dev == e2->dev &&
+ e1->inode == e2->inode &&
+ (e1->share_mode & ~DELETE_ON_CLOSE_FLAG) != (e2->share_mode & ~DELETE_ON_CLOSE_FLAG)) {
+ DEBUG(0,("PANIC: share_modes_identical: share_mode missmatch (e1 = %u, e2 = %u). Logic error.\n",
+ (unsigned int)(e1->share_mode & ~DELETE_ON_CLOSE_FLAG),
+ (unsigned int)(e2->share_mode & ~DELETE_ON_CLOSE_FLAG) ));
+ smb_panic("PANIC: share_modes_identical logic error.\n");
+ }
+#endif
+
+ return (e1->pid == e2->pid &&
+ (e1->share_mode & ~DELETE_ON_CLOSE_FLAG) == (e2->share_mode & ~DELETE_ON_CLOSE_FLAG) &&
+ e1->dev == e2->dev &&
+ e1->inode == e2->inode &&
+ e1->share_file_id == e2->share_file_id );
+}
+
+/*******************************************************************
+ Delete a specific share mode. Return the number
+ of entries left, and a memdup'ed copy of the entry deleted (if required).
+ Ignore if no entry deleted.
+********************************************************************/
+
+ssize_t del_share_entry( SMB_DEV_T dev, SMB_INO_T inode,
+ share_mode_entry *entry, share_mode_entry **ppse)
+{
+ TDB_DATA dbuf;
+ struct locking_data *data;
+ int i, del_count=0;
+ share_mode_entry *shares;
+ ssize_t count = 0;
+
+ if (ppse)
+ *ppse = NULL;
+
+ /* read in the existing share modes */
+ dbuf = tdb_fetch(tdb, locking_key(dev, inode));
+ if (!dbuf.dptr)
+ return -1;
+
+ data = (struct locking_data *)dbuf.dptr;
+ shares = (share_mode_entry *)(dbuf.dptr + sizeof(*data));
+
+ /*
+ * Find any with this pid and delete it
+ * by overwriting with the rest of the data
+ * from the record.
+ */
+
+ DEBUG(10,("del_share_entry: num_share_modes = %d\n", data->u.num_share_mode_entries ));
+
+ for (i=0;i<data->u.num_share_mode_entries;) {
+ if (share_modes_identical(&shares[i], entry)) {
+ DEBUG(10,("del_share_entry: deleted %s\n",
+ share_mode_str(i, &shares[i]) ));
+ if (ppse)
+ *ppse = memdup(&shares[i], sizeof(*shares));
+ data->u.num_share_mode_entries--;
+ memmove(&shares[i], &shares[i+1],
+ dbuf.dsize - (sizeof(*data) + (i+1)*sizeof(*shares)));
+ del_count++;
+
+ DEBUG(10,("del_share_entry: deleting entry %d\n", i ));
+
+ } else {
+ i++;
+ }
+ }
+
+ if (del_count) {
+ /* the record may have shrunk a bit */
+ dbuf.dsize -= del_count * sizeof(*shares);
+
+ count = (ssize_t)data->u.num_share_mode_entries;
+
+ /* store it back in the database */
+ if (data->u.num_share_mode_entries == 0) {
+ if (tdb_delete(tdb, locking_key(dev, inode)) == -1)
+ count = -1;
+ } else {
+ if (tdb_store(tdb, locking_key(dev, inode), dbuf, TDB_REPLACE) == -1)
+ count = -1;
+ }
+ }
+ DEBUG(10,("del_share_entry: Remaining table.\n"));
+ print_share_mode_table((struct locking_data *)dbuf.dptr);
+ SAFE_FREE(dbuf.dptr);
+ return count;
+}
+
+/*******************************************************************
+ Del the share mode of a file for this process. Return the number
+ of entries left, and a memdup'ed copy of the entry deleted.
+********************************************************************/
+
+ssize_t del_share_mode(files_struct *fsp, share_mode_entry **ppse)
+{
+ share_mode_entry entry;
+
+ /*
+ * Fake up a share_mode_entry for comparisons.
+ */
+
+ fill_share_mode((char *)&entry, fsp, 0, 0);
+ return del_share_entry(fsp->dev, fsp->inode, &entry, ppse);
+}
+
+/*******************************************************************
+ Set the share mode of a file. Return False on fail, True on success.
+********************************************************************/
+
+BOOL set_share_mode(files_struct *fsp, uint16 port, uint16 op_type)
+{
+ TDB_DATA dbuf;
+ struct locking_data *data;
+ char *p=NULL;
+ int size;
+ BOOL ret = True;
+
+ /* read in the existing share modes if any */
+ dbuf = tdb_fetch(tdb, locking_key_fsp(fsp));
+ if (!dbuf.dptr) {
+ size_t offset;
+ /* we'll need to create a new record */
+ pstring fname;
+
+ pstrcpy(fname, fsp->conn->connectpath);
+ pstrcat(fname, "/");
+ pstrcat(fname, fsp->fsp_name);
+
+ size = sizeof(*data) + sizeof(share_mode_entry) + strlen(fname) + 1;
+ p = (char *)malloc(size);
+ if (!p)
+ return False;
+ data = (struct locking_data *)p;
+ data->u.num_share_mode_entries = 1;
+
+ DEBUG(10,("set_share_mode: creating entry for file %s. num_share_modes = 1\n",
+ fsp->fsp_name ));
+
+ offset = sizeof(*data) + sizeof(share_mode_entry);
+ safe_strcpy(p + offset, fname, size - offset - 1);
+ fill_share_mode(p + sizeof(*data), fsp, port, op_type);
+ dbuf.dptr = p;
+ dbuf.dsize = size;
+ if (tdb_store(tdb, locking_key_fsp(fsp), dbuf, TDB_REPLACE) == -1)
+ ret = False;
+
+ print_share_mode_table((struct locking_data *)p);
+
+ SAFE_FREE(p);
+ return ret;
+ }
+
+ /* we're adding to an existing entry - this is a bit fiddly */
+ data = (struct locking_data *)dbuf.dptr;
+
+ data->u.num_share_mode_entries++;
+
+ DEBUG(10,("set_share_mode: adding entry for file %s. new num_share_modes = %d\n",
+ fsp->fsp_name, data->u.num_share_mode_entries ));
+
+ size = dbuf.dsize + sizeof(share_mode_entry);
+ p = malloc(size);
+ if (!p) {
+ SAFE_FREE(dbuf.dptr);
+ return False;
+ }
+ memcpy(p, dbuf.dptr, sizeof(*data));
+ fill_share_mode(p + sizeof(*data), fsp, port, op_type);
+ memcpy(p + sizeof(*data) + sizeof(share_mode_entry), dbuf.dptr + sizeof(*data),
+ dbuf.dsize - sizeof(*data));
+ SAFE_FREE(dbuf.dptr);
+ dbuf.dptr = p;
+ dbuf.dsize = size;
+ if (tdb_store(tdb, locking_key_fsp(fsp), dbuf, TDB_REPLACE) == -1)
+ ret = False;
+ print_share_mode_table((struct locking_data *)p);
+ SAFE_FREE(p);
+ return ret;
+}
+
+/*******************************************************************
+ A generic in-place modification call for share mode entries.
+********************************************************************/
+
+static BOOL mod_share_mode( SMB_DEV_T dev, SMB_INO_T inode, share_mode_entry *entry,
+ void (*mod_fn)(share_mode_entry *, SMB_DEV_T, SMB_INO_T, void *),
+ void *param)
+{
+ TDB_DATA dbuf;
+ struct locking_data *data;
+ int i;
+ share_mode_entry *shares;
+ BOOL need_store=False;
+ BOOL ret = True;
+
+ /* read in the existing share modes */
+ dbuf = tdb_fetch(tdb, locking_key(dev, inode));
+ if (!dbuf.dptr)
+ return False;
+
+ data = (struct locking_data *)dbuf.dptr;
+ shares = (share_mode_entry *)(dbuf.dptr + sizeof(*data));
+
+ /* find any with our pid and call the supplied function */
+ for (i=0;i<data->u.num_share_mode_entries;i++) {
+ if (share_modes_identical(entry, &shares[i])) {
+ mod_fn(&shares[i], dev, inode, param);
+ need_store=True;
+ }
+ }
+
+ /* if the mod fn was called then store it back */
+ if (need_store) {
+ if (data->u.num_share_mode_entries == 0) {
+ if (tdb_delete(tdb, locking_key(dev, inode)) == -1)
+ ret = False;
+ } else {
+ if (tdb_store(tdb, locking_key(dev, inode), dbuf, TDB_REPLACE) == -1)
+ ret = False;
+ }
+ }
+
+ SAFE_FREE(dbuf.dptr);
+ return ret;
+}
+
+/*******************************************************************
+ Static function that actually does the work for the generic function
+ below.
+********************************************************************/
+
+static void remove_share_oplock_fn(share_mode_entry *entry, SMB_DEV_T dev, SMB_INO_T inode,
+ void *param)
+{
+ DEBUG(10,("remove_share_oplock_fn: removing oplock info for entry dev=%x ino=%.0f\n",
+ (unsigned int)dev, (double)inode ));
+ /* Delete the oplock info. */
+ entry->op_port = 0;
+ entry->op_type = NO_OPLOCK;
+}
+
+/*******************************************************************
+ Remove an oplock port and mode entry from a share mode.
+********************************************************************/
+
+BOOL remove_share_oplock(files_struct *fsp)
+{
+ share_mode_entry entry;
+ /*
+ * Fake up an entry for comparisons...
+ */
+ fill_share_mode((char *)&entry, fsp, 0, 0);
+ return mod_share_mode(fsp->dev, fsp->inode, &entry, remove_share_oplock_fn, NULL);
+}
+
+/*******************************************************************
+ Static function that actually does the work for the generic function
+ below.
+********************************************************************/
+
+static void downgrade_share_oplock_fn(share_mode_entry *entry, SMB_DEV_T dev, SMB_INO_T inode,
+ void *param)
+{
+ DEBUG(10,("downgrade_share_oplock_fn: downgrading oplock info for entry dev=%x ino=%.0f\n",
+ (unsigned int)dev, (double)inode ));
+ entry->op_type = LEVEL_II_OPLOCK;
+}
+
+/*******************************************************************
+ Downgrade a oplock type from exclusive to level II.
+********************************************************************/
+
+BOOL downgrade_share_oplock(files_struct *fsp)
+{
+ share_mode_entry entry;
+ /*
+ * Fake up an entry for comparisons...
+ */
+ fill_share_mode((char *)&entry, fsp, 0, 0);
+ return mod_share_mode(fsp->dev, fsp->inode, &entry, downgrade_share_oplock_fn, NULL);
+}
+
+/*******************************************************************
+ Get/Set the delete on close flag in a set of share modes.
+ Return False on fail, True on success.
+********************************************************************/
+
+BOOL modify_delete_flag( SMB_DEV_T dev, SMB_INO_T inode, BOOL delete_on_close)
+{
+ TDB_DATA dbuf;
+ struct locking_data *data;
+ int i;
+ share_mode_entry *shares;
+
+ /* read in the existing share modes */
+ dbuf = tdb_fetch(tdb, locking_key(dev, inode));
+ if (!dbuf.dptr)
+ return False;
+
+ data = (struct locking_data *)dbuf.dptr;
+ shares = (share_mode_entry *)(dbuf.dptr + sizeof(*data));
+
+ /* Set/Unset the delete on close element. */
+ for (i=0;i<data->u.num_share_mode_entries;i++,shares++) {
+ shares->share_mode = (delete_on_close ?
+ (shares->share_mode | DELETE_ON_CLOSE_FLAG) :
+ (shares->share_mode & ~DELETE_ON_CLOSE_FLAG) );
+ }
+
+ /* store it back */
+ if (data->u.num_share_mode_entries) {
+ if (tdb_store(tdb, locking_key(dev,inode), dbuf, TDB_REPLACE)==-1) {
+ SAFE_FREE(dbuf.dptr);
+ return False;
+ }
+ }
+
+ SAFE_FREE(dbuf.dptr);
+ return True;
+}
+
+/****************************************************************************
+ Traverse the whole database with this function, calling traverse_callback
+ on each share mode
+****************************************************************************/
+
+static int traverse_fn(TDB_CONTEXT *the_tdb, TDB_DATA kbuf, TDB_DATA dbuf,
+ void* state)
+{
+ struct locking_data *data;
+ share_mode_entry *shares;
+ char *name;
+ int i;
+
+ SHAREMODE_FN(traverse_callback) = (SHAREMODE_FN_CAST())state;
+
+ data = (struct locking_data *)dbuf.dptr;
+ shares = (share_mode_entry *)(dbuf.dptr + sizeof(*data));
+ name = dbuf.dptr + sizeof(*data) + data->u.num_share_mode_entries*sizeof(*shares);
+
+ for (i=0;i<data->u.num_share_mode_entries;i++) {
+ traverse_callback(&shares[i], name);
+ }
+ return 0;
+}
+
+/*******************************************************************
+ Call the specified function on each entry under management by the
+ share mode system.
+********************************************************************/
+
+int share_mode_forall(SHAREMODE_FN(fn))
+{
+ if (!tdb)
+ return 0;
+ return tdb_traverse(tdb, traverse_fn, (void*)fn);
+}
diff --git a/source4/locking/posix.c b/source4/locking/posix.c
new file mode 100644
index 0000000000..81690bace9
--- /dev/null
+++ b/source4/locking/posix.c
@@ -0,0 +1,1313 @@
+/*
+ Unix SMB/CIFS implementation.
+ Locking functions
+ Copyright (C) Jeremy Allison 1992-2000
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Revision History:
+
+ POSIX locking support. Jeremy Allison (jeremy@valinux.com), Apr. 2000.
+*/
+
+#include "includes.h"
+
+/*
+ * The POSIX locking database handle.
+ */
+
+static TDB_CONTEXT *posix_lock_tdb;
+
+/*
+ * The pending close database handle.
+ */
+
+static TDB_CONTEXT *posix_pending_close_tdb;
+
+/*
+ * The data in POSIX lock records is an unsorted linear array of these
+ * records. It is unnecessary to store the count as tdb provides the
+ * size of the record.
+ */
+
+struct posix_lock {
+ int fd;
+ SMB_OFF_T start;
+ SMB_OFF_T size;
+ int lock_type;
+};
+
+/*
+ * The data in POSIX pending close records is an unsorted linear array of int
+ * records. It is unnecessary to store the count as tdb provides the
+ * size of the record.
+ */
+
+/* The key used in both the POSIX databases. */
+
+struct posix_lock_key {
+ SMB_DEV_T device;
+ SMB_INO_T inode;
+};
+
+/*******************************************************************
+ Form a static locking key for a dev/inode pair.
+******************************************************************/
+
+static TDB_DATA locking_key(SMB_DEV_T dev, SMB_INO_T inode)
+{
+ static struct posix_lock_key key;
+ TDB_DATA kbuf;
+
+ memset(&key, '\0', sizeof(key));
+ key.device = dev;
+ key.inode = inode;
+ kbuf.dptr = (char *)&key;
+ kbuf.dsize = sizeof(key);
+ return kbuf;
+}
+
+/*******************************************************************
+ Convenience function to get a key from an fsp.
+******************************************************************/
+
+static TDB_DATA locking_key_fsp(files_struct *fsp)
+{
+ return locking_key(fsp->dev, fsp->inode);
+}
+
+/****************************************************************************
+ Add an fd to the pending close tdb.
+****************************************************************************/
+
+static BOOL add_fd_to_close_entry(files_struct *fsp)
+{
+ TDB_DATA kbuf = locking_key_fsp(fsp);
+ TDB_DATA dbuf;
+ char *tp;
+
+ dbuf.dptr = NULL;
+
+ dbuf = tdb_fetch(posix_pending_close_tdb, kbuf);
+
+ tp = Realloc(dbuf.dptr, dbuf.dsize + sizeof(int));
+ if (!tp) {
+ DEBUG(0,("add_fd_to_close_entry: Realloc fail !\n"));
+ SAFE_FREE(dbuf.dptr);
+ return False;
+ } else
+ dbuf.dptr = tp;
+
+ memcpy(dbuf.dptr + dbuf.dsize, &fsp->fd, sizeof(int));
+ dbuf.dsize += sizeof(int);
+
+ if (tdb_store(posix_pending_close_tdb, kbuf, dbuf, TDB_REPLACE) == -1) {
+ DEBUG(0,("add_fd_to_close_entry: tdb_store fail !\n"));
+ }
+
+ SAFE_FREE(dbuf.dptr);
+ return True;
+}
+
+/****************************************************************************
+ Remove all fd entries for a specific dev/inode pair from the tdb.
+****************************************************************************/
+
+static void delete_close_entries(files_struct *fsp)
+{
+ TDB_DATA kbuf = locking_key_fsp(fsp);
+
+ if (tdb_delete(posix_pending_close_tdb, kbuf) == -1)
+ DEBUG(0,("delete_close_entries: tdb_delete fail !\n"));
+}
+
+/****************************************************************************
+ Get the array of POSIX pending close records for an open fsp. Caller must
+ free. Returns number of entries.
+****************************************************************************/
+
+static size_t get_posix_pending_close_entries(files_struct *fsp, int **entries)
+{
+ TDB_DATA kbuf = locking_key_fsp(fsp);
+ TDB_DATA dbuf;
+ size_t count = 0;
+
+ *entries = NULL;
+ dbuf.dptr = NULL;
+
+ dbuf = tdb_fetch(posix_pending_close_tdb, kbuf);
+
+ if (!dbuf.dptr) {
+ return 0;
+ }
+
+ *entries = (int *)dbuf.dptr;
+ count = (size_t)(dbuf.dsize / sizeof(int));
+
+ return count;
+}
+
+/****************************************************************************
+ Get the array of POSIX locks for an fsp. Caller must free. Returns
+ number of entries.
+****************************************************************************/
+
+static size_t get_posix_lock_entries(files_struct *fsp, struct posix_lock **entries)
+{
+ TDB_DATA kbuf = locking_key_fsp(fsp);
+ TDB_DATA dbuf;
+ size_t count = 0;
+
+ *entries = NULL;
+
+ dbuf.dptr = NULL;
+
+ dbuf = tdb_fetch(posix_lock_tdb, kbuf);
+
+ if (!dbuf.dptr) {
+ return 0;
+ }
+
+ *entries = (struct posix_lock *)dbuf.dptr;
+ count = (size_t)(dbuf.dsize / sizeof(struct posix_lock));
+
+ return count;
+}
+
+/****************************************************************************
+ Deal with pending closes needed by POSIX locking support.
+ Note that posix_locking_close_file() is expected to have been called
+ to delete all locks on this fsp before this function is called.
+****************************************************************************/
+
+int fd_close_posix(struct tcon_context *conn, files_struct *fsp)
+{
+ int saved_errno = 0;
+ int ret;
+ size_t count, i;
+ struct posix_lock *entries = NULL;
+ int *fd_array = NULL;
+ BOOL locks_on_other_fds = False;
+
+ if (!lp_posix_locking(SNUM(conn))) {
+ /*
+ * No POSIX to worry about, just close.
+ */
+ ret = conn->vfs_ops.close(fsp,fsp->fd);
+ fsp->fd = -1;
+ return ret;
+ }
+
+ /*
+ * Get the number of outstanding POSIX locks on this dev/inode pair.
+ */
+
+ count = get_posix_lock_entries(fsp, &entries);
+
+ /*
+ * Check if there are any outstanding locks belonging to
+ * other fd's. This should never be the case if posix_locking_close_file()
+ * has been called first, but it never hurts to be *sure*.
+ */
+
+ for (i = 0; i < count; i++) {
+ if (entries[i].fd != fsp->fd) {
+ locks_on_other_fds = True;
+ break;
+ }
+ }
+
+ if (locks_on_other_fds) {
+
+ /*
+ * There are outstanding locks on this dev/inode pair on other fds.
+ * Add our fd to the pending close tdb and set fsp->fd to -1.
+ */
+
+ if (!add_fd_to_close_entry(fsp)) {
+ SAFE_FREE(entries);
+ return False;
+ }
+
+ SAFE_FREE(entries);
+ fsp->fd = -1;
+ return 0;
+ }
+
+ SAFE_FREE(entries);
+
+ /*
+ * No outstanding POSIX locks. Get the pending close fd's
+ * from the tdb and close them all.
+ */
+
+ count = get_posix_pending_close_entries(fsp, &fd_array);
+
+ if (count) {
+ DEBUG(10,("fd_close_posix: doing close on %u fd's.\n", (unsigned int)count ));
+
+ for(i = 0; i < count; i++) {
+ if (conn->vfs_ops.close(fsp,fd_array[i]) == -1) {
+ saved_errno = errno;
+ }
+ }
+
+ /*
+ * Delete all fd's stored in the tdb
+ * for this dev/inode pair.
+ */
+
+ delete_close_entries(fsp);
+ }
+
+ SAFE_FREE(fd_array);
+
+ /*
+ * Finally close the fd associated with this fsp.
+ */
+
+ ret = conn->vfs_ops.close(fsp,fsp->fd);
+
+ if (saved_errno != 0) {
+ errno = saved_errno;
+ ret = -1;
+ }
+
+ fsp->fd = -1;
+
+ return ret;
+}
+
+/****************************************************************************
+ Debugging aid :-).
+****************************************************************************/
+
+static const char *posix_lock_type_name(int lock_type)
+{
+ return (lock_type == F_RDLCK) ? "READ" : "WRITE";
+}
+
+/****************************************************************************
+ Delete a POSIX lock entry by index number. Used if the tdb add succeeds, but
+ then the POSIX fcntl lock fails.
+****************************************************************************/
+
+static BOOL delete_posix_lock_entry_by_index(files_struct *fsp, size_t entry)
+{
+ TDB_DATA kbuf = locking_key_fsp(fsp);
+ TDB_DATA dbuf;
+ struct posix_lock *locks;
+ size_t count;
+
+ dbuf.dptr = NULL;
+
+ dbuf = tdb_fetch(posix_lock_tdb, kbuf);
+
+ if (!dbuf.dptr) {
+ DEBUG(10,("delete_posix_lock_entry_by_index: tdb_fetch failed !\n"));
+ goto fail;
+ }
+
+ count = (size_t)(dbuf.dsize / sizeof(struct posix_lock));
+ locks = (struct posix_lock *)dbuf.dptr;
+
+ if (count == 1) {
+ tdb_delete(posix_lock_tdb, kbuf);
+ } else {
+ if (entry < count-1) {
+ memmove(&locks[entry], &locks[entry+1], sizeof(*locks)*((count-1) - entry));
+ }
+ dbuf.dsize -= sizeof(*locks);
+ tdb_store(posix_lock_tdb, kbuf, dbuf, TDB_REPLACE);
+ }
+
+ SAFE_FREE(dbuf.dptr);
+
+ return True;
+
+ fail:
+
+ SAFE_FREE(dbuf.dptr);
+ return False;
+}
+
+/****************************************************************************
+ Add an entry into the POSIX locking tdb. We return the index number of the
+ added lock (used in case we need to delete *exactly* this entry). Returns
+ False on fail, True on success.
+****************************************************************************/
+
+static BOOL add_posix_lock_entry(files_struct *fsp, SMB_OFF_T start, SMB_OFF_T size, int lock_type, size_t *pentry_num)
+{
+ TDB_DATA kbuf = locking_key_fsp(fsp);
+ TDB_DATA dbuf;
+ struct posix_lock pl;
+ char *tp;
+
+ dbuf.dptr = NULL;
+
+ dbuf = tdb_fetch(posix_lock_tdb, kbuf);
+
+ *pentry_num = (size_t)(dbuf.dsize / sizeof(pl));
+
+ /*
+ * Add new record.
+ */
+
+ pl.fd = fsp->fd;
+ pl.start = start;
+ pl.size = size;
+ pl.lock_type = lock_type;
+
+ tp = Realloc(dbuf.dptr, dbuf.dsize + sizeof(pl));
+ if (!tp) {
+ DEBUG(0,("add_posix_lock_entry: Realloc fail !\n"));
+ goto fail;
+ } else
+ dbuf.dptr = tp;
+
+ memcpy(dbuf.dptr + dbuf.dsize, &pl, sizeof(pl));
+ dbuf.dsize += sizeof(pl);
+
+ if (tdb_store(posix_lock_tdb, kbuf, dbuf, TDB_REPLACE) == -1) {
+ DEBUG(0,("add_posix_lock: Failed to add lock entry on file %s\n", fsp->fsp_name));
+ goto fail;
+ }
+
+ SAFE_FREE(dbuf.dptr);
+
+ DEBUG(10,("add_posix_lock: File %s: type = %s: start=%.0f size=%.0f: dev=%.0f inode=%.0f\n",
+ fsp->fsp_name, posix_lock_type_name(lock_type), (double)start, (double)size,
+ (double)fsp->dev, (double)fsp->inode ));
+
+ return True;
+
+ fail:
+
+ SAFE_FREE(dbuf.dptr);
+ return False;
+}
+
+/****************************************************************************
+ Calculate if locks have any overlap at all.
+****************************************************************************/
+
+static BOOL does_lock_overlap(SMB_OFF_T start1, SMB_OFF_T size1, SMB_OFF_T start2, SMB_OFF_T size2)
+{
+ if (start1 >= start2 && start1 <= start2 + size2)
+ return True;
+
+ if (start1 < start2 && start1 + size1 > start2)
+ return True;
+
+ return False;
+}
+
+/****************************************************************************
+ Delete an entry from the POSIX locking tdb. Returns a copy of the entry being
+ deleted and the number of records that are overlapped by this one, or -1 on error.
+****************************************************************************/
+
+static int delete_posix_lock_entry(files_struct *fsp, SMB_OFF_T start, SMB_OFF_T size, struct posix_lock *pl)
+{
+ TDB_DATA kbuf = locking_key_fsp(fsp);
+ TDB_DATA dbuf;
+ struct posix_lock *locks;
+ size_t i, count;
+ BOOL found = False;
+ int num_overlapping_records = 0;
+
+ dbuf.dptr = NULL;
+
+ dbuf = tdb_fetch(posix_lock_tdb, kbuf);
+
+ if (!dbuf.dptr) {
+ DEBUG(10,("delete_posix_lock_entry: tdb_fetch failed !\n"));
+ goto fail;
+ }
+
+ /* There are existing locks - find a match. */
+ locks = (struct posix_lock *)dbuf.dptr;
+ count = (size_t)(dbuf.dsize / sizeof(*locks));
+
+ /*
+ * Search for and delete the first record that matches the
+ * unlock criteria.
+ */
+
+ for (i=0; i<count; i++) {
+ struct posix_lock *entry = &locks[i];
+
+ if (entry->fd == fsp->fd &&
+ entry->start == start &&
+ entry->size == size) {
+
+ /* Make a copy if requested. */
+ if (pl)
+ *pl = *entry;
+
+ /* Found it - delete it. */
+ if (count == 1) {
+ tdb_delete(posix_lock_tdb, kbuf);
+ } else {
+ if (i < count-1) {
+ memmove(&locks[i], &locks[i+1], sizeof(*locks)*((count-1) - i));
+ }
+ dbuf.dsize -= sizeof(*locks);
+ tdb_store(posix_lock_tdb, kbuf, dbuf, TDB_REPLACE);
+ }
+ count--;
+ found = True;
+ break;
+ }
+ }
+
+ if (!found)
+ goto fail;
+
+ /*
+ * Count the number of entries that are
+ * overlapped by this unlock request.
+ */
+
+ for (i = 0; i < count; i++) {
+ struct posix_lock *entry = &locks[i];
+
+ if (fsp->fd == entry->fd &&
+ does_lock_overlap( start, size, entry->start, entry->size))
+ num_overlapping_records++;
+ }
+
+ DEBUG(10,("delete_posix_lock_entry: type = %s: start=%.0f size=%.0f, num_records = %d\n",
+ posix_lock_type_name(pl->lock_type), (double)pl->start, (double)pl->size,
+ (unsigned int)num_overlapping_records ));
+
+ SAFE_FREE(dbuf.dptr);
+
+ return num_overlapping_records;
+
+ fail:
+
+ SAFE_FREE(dbuf.dptr);
+ return -1;
+}
+
+/****************************************************************************
+ Utility function to map a lock type correctly depending on the open
+ mode of a file.
+****************************************************************************/
+
+static int map_posix_lock_type( files_struct *fsp, enum brl_type lock_type)
+{
+ if((lock_type == WRITE_LOCK) && !fsp->can_write) {
+ /*
+ * Many UNIX's cannot get a write lock on a file opened read-only.
+ * Win32 locking semantics allow this.
+ * Do the best we can and attempt a read-only lock.
+ */
+ DEBUG(10,("map_posix_lock_type: Downgrading write lock to read due to read-only file.\n"));
+ return F_RDLCK;
+ } else if((lock_type == READ_LOCK) && !fsp->can_read) {
+ /*
+ * Ditto for read locks on write only files.
+ */
+ DEBUG(10,("map_posix_lock_type: Changing read lock to write due to write-only file.\n"));
+ return F_WRLCK;
+ }
+
+ /*
+ * This return should be the most normal, as we attempt
+ * to always open files read/write.
+ */
+
+ return (lock_type == READ_LOCK) ? F_RDLCK : F_WRLCK;
+}
+
+/****************************************************************************
+ Check to see if the given unsigned lock range is within the possible POSIX
+ range. Modifies the given args to be in range if possible, just returns
+ False if not.
+****************************************************************************/
+
+static BOOL posix_lock_in_range(SMB_OFF_T *offset_out, SMB_OFF_T *count_out,
+ SMB_BIG_UINT u_offset, SMB_BIG_UINT u_count)
+{
+ SMB_OFF_T offset = (SMB_OFF_T)u_offset;
+ SMB_OFF_T count = (SMB_OFF_T)u_count;
+
+ /*
+ * For the type of system we are, attempt to
+ * find the maximum positive lock offset as an SMB_OFF_T.
+ */
+
+#if defined(MAX_POSITIVE_LOCK_OFFSET) /* Some systems have arbitrary limits. */
+
+ SMB_OFF_T max_positive_lock_offset = (MAX_POSITIVE_LOCK_OFFSET);
+
+#elif defined(LARGE_SMB_OFF_T) && !defined(HAVE_BROKEN_FCNTL64_LOCKS)
+
+ /*
+ * In this case SMB_OFF_T is 64 bits,
+ * and the underlying system can handle 64 bit signed locks.
+ */
+
+ SMB_OFF_T mask2 = ((SMB_OFF_T)0x4) << (SMB_OFF_T_BITS-4);
+ SMB_OFF_T mask = (mask2<<1);
+ SMB_OFF_T max_positive_lock_offset = ~mask;
+
+#else /* !LARGE_SMB_OFF_T || HAVE_BROKEN_FCNTL64_LOCKS */
+
+ /*
+ * In this case either SMB_OFF_T is 32 bits,
+ * or the underlying system cannot handle 64 bit signed locks.
+ * All offsets & counts must be 2^31 or less.
+ */
+
+ SMB_OFF_T max_positive_lock_offset = 0x7FFFFFFF;
+
+#endif /* !LARGE_SMB_OFF_T || HAVE_BROKEN_FCNTL64_LOCKS */
+
+ /*
+ * POSIX locks of length zero mean lock to end-of-file.
+ * Win32 locks of length zero are point probes. Ignore
+ * any Win32 locks of length zero. JRA.
+ */
+
+ if (count == (SMB_OFF_T)0) {
+ DEBUG(10,("posix_lock_in_range: count = 0, ignoring.\n"));
+ return False;
+ }
+
+ /*
+ * If the given offset was > max_positive_lock_offset then we cannot map this at all
+ * ignore this lock.
+ */
+
+ if (u_offset & ~((SMB_BIG_UINT)max_positive_lock_offset)) {
+ DEBUG(10,("posix_lock_in_range: (offset = %.0f) offset > %.0f and we cannot handle this. Ignoring lock.\n",
+ (double)u_offset, (double)((SMB_BIG_UINT)max_positive_lock_offset) ));
+ return False;
+ }
+
+ /*
+ * We must truncate the count to less than max_positive_lock_offset.
+ */
+
+ if (u_count & ~((SMB_BIG_UINT)max_positive_lock_offset))
+ count = max_positive_lock_offset;
+
+ /*
+ * Truncate count to end at max lock offset.
+ */
+
+ if (offset + count < 0 || offset + count > max_positive_lock_offset)
+ count = max_positive_lock_offset - offset;
+
+ /*
+ * If we ate all the count, ignore this lock.
+ */
+
+ if (count == 0) {
+ DEBUG(10,("posix_lock_in_range: Count = 0. Ignoring lock u_offset = %.0f, u_count = %.0f\n",
+ (double)u_offset, (double)u_count ));
+ return False;
+ }
+
+ /*
+ * The mapping was successful.
+ */
+
+ DEBUG(10,("posix_lock_in_range: offset_out = %.0f, count_out = %.0f\n",
+ (double)offset, (double)count ));
+
+ *offset_out = offset;
+ *count_out = count;
+
+ return True;
+}
+
+/****************************************************************************
+ Actual function that does POSIX locks. Copes with 64 -> 32 bit cruft and
+ broken NFS implementations.
+****************************************************************************/
+
+static BOOL posix_fcntl_lock(files_struct *fsp, int op, SMB_OFF_T offset, SMB_OFF_T count, int type)
+{
+ int ret;
+ struct tcon_context *conn = fsp->conn;
+
+ DEBUG(8,("posix_fcntl_lock %d %d %.0f %.0f %d\n",fsp->fd,op,(double)offset,(double)count,type));
+
+ ret = conn->vfs_ops.lock(fsp,fsp->fd,op,offset,count,type);
+
+ if (!ret && ((errno == EFBIG) || (errno == ENOLCK) || (errno == EINVAL))) {
+
+ DEBUG(0,("posix_fcntl_lock: WARNING: lock request at offset %.0f, length %.0f returned\n",
+ (double)offset,(double)count));
+ DEBUG(0,("an %s error. This can happen when using 64 bit lock offsets\n", strerror(errno)));
+ DEBUG(0,("on 32 bit NFS mounted file systems.\n"));
+
+ /*
+ * If the offset is > 0x7FFFFFFF then this will cause problems on
+ * 32 bit NFS mounted filesystems. Just ignore it.
+ */
+
+ if (offset & ~((SMB_OFF_T)0x7fffffff)) {
+ DEBUG(0,("Offset greater than 31 bits. Returning success.\n"));
+ return True;
+ }
+
+ if (count & ~((SMB_OFF_T)0x7fffffff)) {
+ /* 32 bit NFS file system, retry with smaller offset */
+ DEBUG(0,("Count greater than 31 bits - retrying with 31 bit truncated length.\n"));
+ errno = 0;
+ count &= 0x7fffffff;
+ ret = conn->vfs_ops.lock(fsp,fsp->fd,op,offset,count,type);
+ }
+ }
+
+ DEBUG(8,("posix_fcntl_lock: Lock call %s\n", ret ? "successful" : "failed"));
+
+ return ret;
+}
+
+/****************************************************************************
+ POSIX function to see if a file region is locked. Returns True if the
+ region is locked, False otherwise.
+****************************************************************************/
+
+BOOL is_posix_locked(files_struct *fsp, SMB_BIG_UINT u_offset, SMB_BIG_UINT u_count, enum brl_type lock_type)
+{
+ SMB_OFF_T offset;
+ SMB_OFF_T count;
+ int posix_lock_type = map_posix_lock_type(fsp,lock_type);
+
+ DEBUG(10,("is_posix_locked: File %s, offset = %.0f, count = %.0f, type = %s\n",
+ fsp->fsp_name, (double)u_offset, (double)u_count, posix_lock_type_name(lock_type) ));
+
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * never set it, so presume it is not locked.
+ */
+
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count))
+ return False;
+
+ /*
+ * Note that most UNIX's can *test* for a write lock on
+ * a read-only fd, just not *set* a write lock on a read-only
+ * fd. So we don't need to use map_lock_type here.
+ */
+
+ return posix_fcntl_lock(fsp,SMB_F_GETLK,offset,count,posix_lock_type);
+}
+
+/*
+ * Structure used when splitting a lock range
+ * into a POSIX lock range. Doubly linked list.
+ */
+
+struct lock_list {
+ struct lock_list *next;
+ struct lock_list *prev;
+ SMB_OFF_T start;
+ SMB_OFF_T size;
+};
+
+/****************************************************************************
+ Create a list of lock ranges that don't overlap a given range. Used in calculating
+ POSIX locks and unlocks. This is a difficult function that requires ASCII art to
+ understand it :-).
+****************************************************************************/
+
+static struct lock_list *posix_lock_list(TALLOC_CTX *ctx, struct lock_list *lhead, files_struct *fsp)
+{
+ TDB_DATA kbuf = locking_key_fsp(fsp);
+ TDB_DATA dbuf;
+ struct posix_lock *locks;
+ size_t num_locks, i;
+
+ dbuf.dptr = NULL;
+
+ dbuf = tdb_fetch(posix_lock_tdb, kbuf);
+
+ if (!dbuf.dptr)
+ return lhead;
+
+ locks = (struct posix_lock *)dbuf.dptr;
+ num_locks = (size_t)(dbuf.dsize / sizeof(*locks));
+
+ /*
+ * Check the current lock list on this dev/inode pair.
+ * Quit if the list is deleted.
+ */
+
+ DEBUG(10,("posix_lock_list: curr: start=%.0f,size=%.0f\n",
+ (double)lhead->start, (double)lhead->size ));
+
+ for (i=0; i<num_locks && lhead; i++) {
+
+ struct posix_lock *lock = &locks[i];
+ struct lock_list *l_curr;
+
+ /*
+ * Walk the lock list, checking for overlaps. Note that
+ * the lock list can expand within this loop if the current
+ * range being examined needs to be split.
+ */
+
+ for (l_curr = lhead; l_curr;) {
+
+ DEBUG(10,("posix_lock_list: lock: fd=%d: start=%.0f,size=%.0f:type=%s", lock->fd,
+ (double)lock->start, (double)lock->size, posix_lock_type_name(lock->lock_type) ));
+
+ if ( (l_curr->start >= (lock->start + lock->size)) ||
+ (lock->start >= (l_curr->start + l_curr->size))) {
+
+ /* No overlap with this lock - leave this range alone. */
+/*********************************************
+ +---------+
+ | l_curr |
+ +---------+
+ +-------+
+ | lock |
+ +-------+
+OR....
+ +---------+
+ | l_curr |
+ +---------+
+**********************************************/
+
+ DEBUG(10,("no overlap case.\n" ));
+
+ l_curr = l_curr->next;
+
+ } else if ( (l_curr->start >= lock->start) &&
+ (l_curr->start + l_curr->size <= lock->start + lock->size) ) {
+
+ /*
+ * This unlock is completely overlapped by this existing lock range
+ * and thus should have no effect (not be unlocked). Delete it from the list.
+ */
+/*********************************************
+ +---------+
+ | l_curr |
+ +---------+
+ +---------------------------+
+ | lock |
+ +---------------------------+
+**********************************************/
+ /* Save the next pointer */
+ struct lock_list *ul_next = l_curr->next;
+
+ DEBUG(10,("delete case.\n" ));
+
+ DLIST_REMOVE(lhead, l_curr);
+ if(lhead == NULL)
+ break; /* No more list... */
+
+ l_curr = ul_next;
+
+ } else if ( (l_curr->start >= lock->start) &&
+ (l_curr->start < lock->start + lock->size) &&
+ (l_curr->start + l_curr->size > lock->start + lock->size) ) {
+
+ /*
+ * This unlock overlaps the existing lock range at the high end.
+ * Truncate by moving start to existing range end and reducing size.
+ */
+/*********************************************
+ +---------------+
+ | l_curr |
+ +---------------+
+ +---------------+
+ | lock |
+ +---------------+
+BECOMES....
+ +-------+
+ | l_curr|
+ +-------+
+**********************************************/
+
+ l_curr->size = (l_curr->start + l_curr->size) - (lock->start + lock->size);
+ l_curr->start = lock->start + lock->size;
+
+ DEBUG(10,("truncate high case: start=%.0f,size=%.0f\n",
+ (double)l_curr->start, (double)l_curr->size ));
+
+ l_curr = l_curr->next;
+
+ } else if ( (l_curr->start < lock->start) &&
+ (l_curr->start + l_curr->size > lock->start) &&
+ (l_curr->start + l_curr->size <= lock->start + lock->size) ) {
+
+ /*
+ * This unlock overlaps the existing lock range at the low end.
+ * Truncate by reducing size.
+ */
+/*********************************************
+ +---------------+
+ | l_curr |
+ +---------------+
+ +---------------+
+ | lock |
+ +---------------+
+BECOMES....
+ +-------+
+ | l_curr|
+ +-------+
+**********************************************/
+
+ l_curr->size = lock->start - l_curr->start;
+
+ DEBUG(10,("truncate low case: start=%.0f,size=%.0f\n",
+ (double)l_curr->start, (double)l_curr->size ));
+
+ l_curr = l_curr->next;
+
+ } else if ( (l_curr->start < lock->start) &&
+ (l_curr->start + l_curr->size > lock->start + lock->size) ) {
+ /*
+ * Worst case scenario. Unlock request completely overlaps an existing
+ * lock range. Split the request into two, push the new (upper) request
+ * into the dlink list, and continue with the entry after ul_new (as we
+ * know that ul_new will not overlap with this lock).
+ */
+/*********************************************
+ +---------------------------+
+ | l_curr |
+ +---------------------------+
+ +---------+
+ | lock |
+ +---------+
+BECOMES.....
+ +-------+ +---------+
+ | l_curr| | l_new |
+ +-------+ +---------+
+**********************************************/
+ struct lock_list *l_new = (struct lock_list *)talloc(ctx,
+ sizeof(struct lock_list));
+
+ if(l_new == NULL) {
+ DEBUG(0,("posix_lock_list: talloc fail.\n"));
+ return NULL; /* The talloc_destroy takes care of cleanup. */
+ }
+
+ ZERO_STRUCTP(l_new);
+ l_new->start = lock->start + lock->size;
+ l_new->size = l_curr->start + l_curr->size - l_new->start;
+
+ /* Truncate the l_curr. */
+ l_curr->size = lock->start - l_curr->start;
+
+ DEBUG(10,("split case: curr: start=%.0f,size=%.0f \
+new: start=%.0f,size=%.0f\n", (double)l_curr->start, (double)l_curr->size,
+ (double)l_new->start, (double)l_new->size ));
+
+ /*
+ * Add into the dlink list after the l_curr point - NOT at lhead.
+ * Note we can't use DLINK_ADD here as this inserts at the head of the given list.
+ */
+
+ l_new->prev = l_curr;
+ l_new->next = l_curr->next;
+ l_curr->next = l_new;
+
+ /* And move after the link we added. */
+ l_curr = l_new->next;
+
+ } else {
+
+ /*
+ * This logic case should never happen. Ensure this is the
+ * case by forcing an abort.... Remove in production.
+ */
+ pstring msg;
+
+ slprintf(msg, sizeof(msg)-1, "logic flaw in cases: l_curr: start = %.0f, size = %.0f : \
+lock: start = %.0f, size = %.0f\n", (double)l_curr->start, (double)l_curr->size, (double)lock->start, (double)lock->size );
+
+ smb_panic(msg);
+ }
+ } /* end for ( l_curr = lhead; l_curr;) */
+ } /* end for (i=0; i<num_locks && ul_head; i++) */
+
+ SAFE_FREE(dbuf.dptr);
+
+ return lhead;
+}
+
+/****************************************************************************
+ POSIX function to acquire a lock. Returns True if the
+ lock could be granted, False if not.
+****************************************************************************/
+
+BOOL set_posix_lock(files_struct *fsp, SMB_BIG_UINT u_offset, SMB_BIG_UINT u_count, enum brl_type lock_type)
+{
+ SMB_OFF_T offset;
+ SMB_OFF_T count;
+ BOOL ret = True;
+ size_t entry_num = 0;
+ size_t lock_count;
+ TALLOC_CTX *l_ctx = NULL;
+ struct lock_list *llist = NULL;
+ struct lock_list *ll = NULL;
+ int posix_lock_type = map_posix_lock_type(fsp,lock_type);
+
+ DEBUG(5,("set_posix_lock: File %s, offset = %.0f, count = %.0f, type = %s\n",
+ fsp->fsp_name, (double)u_offset, (double)u_count, posix_lock_type_name(lock_type) ));
+
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * pretend it was successful.
+ */
+
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count))
+ return True;
+
+ /*
+ * Windows is very strange. It allows read locks to be overlayed
+ * (even over a write lock), but leaves the write lock in force until the first
+ * unlock. It also reference counts the locks. This means the following sequence :
+ *
+ * process1 process2
+ * ------------------------------------------------------------------------
+ * WRITE LOCK : start = 2, len = 10
+ * READ LOCK: start =0, len = 10 - FAIL
+ * READ LOCK : start = 0, len = 14
+ * READ LOCK: start =0, len = 10 - FAIL
+ * UNLOCK : start = 2, len = 10
+ * READ LOCK: start =0, len = 10 - OK
+ *
+ * Under POSIX, the same sequence in steps 1 and 2 would not be reference counted, but
+ * would leave a single read lock over the 0-14 region. In order to
+ * re-create Windows semantics mapped to POSIX locks, we create multiple TDB
+ * entries, one for each overlayed lock request. We are guarenteed by the brlock
+ * semantics that if a write lock is added, then it will be first in the array.
+ */
+
+ if ((l_ctx = talloc_init("set_posix_lock")) == NULL) {
+ DEBUG(0,("set_posix_lock: unable to init talloc context.\n"));
+ return True; /* Not a fatal error. */
+ }
+
+ if ((ll = (struct lock_list *)talloc(l_ctx, sizeof(struct lock_list))) == NULL) {
+ DEBUG(0,("set_posix_lock: unable to talloc unlock list.\n"));
+ talloc_destroy(l_ctx);
+ return True; /* Not a fatal error. */
+ }
+
+ /*
+ * Create the initial list entry containing the
+ * lock we want to add.
+ */
+
+ ZERO_STRUCTP(ll);
+ ll->start = offset;
+ ll->size = count;
+
+ DLIST_ADD(llist, ll);
+
+ /*
+ * The following call calculates if there are any
+ * overlapping locks held by this process on
+ * fd's open on the same file and splits this list
+ * into a list of lock ranges that do not overlap with existing
+ * POSIX locks.
+ */
+
+ llist = posix_lock_list(l_ctx, llist, fsp);
+
+ /*
+ * Now we have the list of ranges to lock it is safe to add the
+ * entry into the POSIX lock tdb. We take note of the entry we
+ * added here in case we have to remove it on POSIX lock fail.
+ */
+
+ if (!add_posix_lock_entry(fsp,offset,count,posix_lock_type,&entry_num)) {
+ DEBUG(0,("set_posix_lock: Unable to create posix lock entry !\n"));
+ talloc_destroy(l_ctx);
+ return False;
+ }
+
+ /*
+ * Add the POSIX locks on the list of ranges returned.
+ * As the lock is supposed to be added atomically, we need to
+ * back out all the locks if any one of these calls fail.
+ */
+
+ for (lock_count = 0, ll = llist; ll; ll = ll->next, lock_count++) {
+ offset = ll->start;
+ count = ll->size;
+
+ DEBUG(5,("set_posix_lock: Real lock: Type = %s: offset = %.0f, count = %.0f\n",
+ posix_lock_type_name(posix_lock_type), (double)offset, (double)count ));
+
+ if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,posix_lock_type)) {
+ DEBUG(5,("set_posix_lock: Lock fail !: Type = %s: offset = %.0f, count = %.0f. Errno = %s\n",
+ posix_lock_type_name(posix_lock_type), (double)offset, (double)count, strerror(errno) ));
+ ret = False;
+ break;
+ }
+ }
+
+ if (!ret) {
+
+ /*
+ * Back out all the POSIX locks we have on fail.
+ */
+
+ for (ll = llist; lock_count; ll = ll->next, lock_count--) {
+ offset = ll->start;
+ count = ll->size;
+
+ DEBUG(5,("set_posix_lock: Backing out locks: Type = %s: offset = %.0f, count = %.0f\n",
+ posix_lock_type_name(posix_lock_type), (double)offset, (double)count ));
+
+ posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_UNLCK);
+ }
+
+ /*
+ * Remove the tdb entry for this lock.
+ */
+
+ delete_posix_lock_entry_by_index(fsp,entry_num);
+ }
+
+ talloc_destroy(l_ctx);
+ return ret;
+}
+
+/****************************************************************************
+ POSIX function to release a lock. Returns True if the
+ lock could be released, False if not.
+****************************************************************************/
+
+BOOL release_posix_lock(files_struct *fsp, SMB_BIG_UINT u_offset, SMB_BIG_UINT u_count)
+{
+ SMB_OFF_T offset;
+ SMB_OFF_T count;
+ BOOL ret = True;
+ TALLOC_CTX *ul_ctx = NULL;
+ struct lock_list *ulist = NULL;
+ struct lock_list *ul = NULL;
+ struct posix_lock deleted_lock;
+ int num_overlapped_entries;
+
+ DEBUG(5,("release_posix_lock: File %s, offset = %.0f, count = %.0f\n",
+ fsp->fsp_name, (double)u_offset, (double)u_count ));
+
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * pretend it was successful.
+ */
+
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count))
+ return True;
+
+ /*
+ * We treat this as one unlock request for POSIX accounting purposes even
+ * if it may later be split into multiple smaller POSIX unlock ranges.
+ * num_overlapped_entries is the number of existing locks that have any
+ * overlap with this unlock request.
+ */
+
+ num_overlapped_entries = delete_posix_lock_entry(fsp, offset, count, &deleted_lock);
+
+ if (num_overlapped_entries == -1) {
+ smb_panic("release_posix_lock: unable find entry to delete !\n");
+ }
+
+ /*
+ * If num_overlapped_entries is > 0, and the lock_type we just deleted from the tdb was
+ * a POSIX write lock, then before doing the unlock we need to downgrade
+ * the POSIX lock to a read lock. This allows any overlapping read locks
+ * to be atomically maintained.
+ */
+
+ if (num_overlapped_entries > 0 && deleted_lock.lock_type == F_WRLCK) {
+ if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_RDLCK)) {
+ DEBUG(0,("release_posix_lock: downgrade of lock failed with error %s !\n", strerror(errno) ));
+ return False;
+ }
+ }
+
+ if ((ul_ctx = talloc_init("release_posix_lock")) == NULL) {
+ DEBUG(0,("release_posix_lock: unable to init talloc context.\n"));
+ return True; /* Not a fatal error. */
+ }
+
+ if ((ul = (struct lock_list *)talloc(ul_ctx, sizeof(struct lock_list))) == NULL) {
+ DEBUG(0,("release_posix_lock: unable to talloc unlock list.\n"));
+ talloc_destroy(ul_ctx);
+ return True; /* Not a fatal error. */
+ }
+
+ /*
+ * Create the initial list entry containing the
+ * lock we want to remove.
+ */
+
+ ZERO_STRUCTP(ul);
+ ul->start = offset;
+ ul->size = count;
+
+ DLIST_ADD(ulist, ul);
+
+ /*
+ * The following call calculates if there are any
+ * overlapping locks held by this process on
+ * fd's open on the same file and creates a
+ * list of unlock ranges that will allow
+ * POSIX lock ranges to remain on the file whilst the
+ * unlocks are performed.
+ */
+
+ ulist = posix_lock_list(ul_ctx, ulist, fsp);
+
+ /*
+ * Release the POSIX locks on the list of ranges returned.
+ */
+
+ for(; ulist; ulist = ulist->next) {
+ offset = ulist->start;
+ count = ulist->size;
+
+ DEBUG(5,("release_posix_lock: Real unlock: offset = %.0f, count = %.0f\n",
+ (double)offset, (double)count ));
+
+ if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_UNLCK))
+ ret = False;
+ }
+
+ talloc_destroy(ul_ctx);
+
+ return ret;
+}
+
+/****************************************************************************
+ Remove all lock entries for a specific dev/inode pair from the tdb.
+****************************************************************************/
+
+static void delete_posix_lock_entries(files_struct *fsp)
+{
+ TDB_DATA kbuf = locking_key_fsp(fsp);
+
+ if (tdb_delete(posix_lock_tdb, kbuf) == -1)
+ DEBUG(0,("delete_close_entries: tdb_delete fail !\n"));
+}
+
+/****************************************************************************
+ Debug function.
+****************************************************************************/
+
+static void dump_entry(struct posix_lock *pl)
+{
+ DEBUG(10,("entry: start=%.0f, size=%.0f, type=%d, fd=%i\n",
+ (double)pl->start, (double)pl->size, (int)pl->lock_type, pl->fd ));
+}
+
+/****************************************************************************
+ Remove any locks on this fd. Called from file_close().
+****************************************************************************/
+
+void posix_locking_close_file(files_struct *fsp)
+{
+ struct posix_lock *entries = NULL;
+ size_t count, i;
+
+ /*
+ * Optimization for the common case where we are the only
+ * opener of a file. If all fd entries are our own, we don't
+ * need to explicitly release all the locks via the POSIX functions,
+ * we can just remove all the entries in the tdb and allow the
+ * close to remove the real locks.
+ */
+
+ count = get_posix_lock_entries(fsp, &entries);
+
+ if (count == 0) {
+ DEBUG(10,("posix_locking_close_file: file %s has no outstanding locks.\n", fsp->fsp_name ));
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (entries[i].fd != fsp->fd )
+ break;
+
+ dump_entry(&entries[i]);
+ }
+
+ if (i == count) {
+ /* All locks are ours. */
+ DEBUG(10,("posix_locking_close_file: file %s has %u outstanding locks, but all on one fd.\n",
+ fsp->fsp_name, (unsigned int)count ));
+ SAFE_FREE(entries);
+ delete_posix_lock_entries(fsp);
+ return;
+ }
+
+ /*
+ * Difficult case. We need to delete all our locks, whilst leaving
+ * all other POSIX locks in place.
+ */
+
+ for (i = 0; i < count; i++) {
+ struct posix_lock *pl = &entries[i];
+ if (pl->fd == fsp->fd)
+ release_posix_lock(fsp, (SMB_BIG_UINT)pl->start, (SMB_BIG_UINT)pl->size );
+ }
+ SAFE_FREE(entries);
+}
+
+/*******************************************************************
+ Create the in-memory POSIX lock databases.
+********************************************************************/
+
+BOOL posix_locking_init(int read_only)
+{
+ if (posix_lock_tdb && posix_pending_close_tdb)
+ return True;
+
+ if (!posix_lock_tdb)
+ posix_lock_tdb = tdb_open_log(NULL, 0, TDB_INTERNAL,
+ read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644);
+ if (!posix_lock_tdb) {
+ DEBUG(0,("Failed to open POSIX byte range locking database.\n"));
+ return False;
+ }
+ if (!posix_pending_close_tdb)
+ posix_pending_close_tdb = tdb_open_log(NULL, 0, TDB_INTERNAL,
+ read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644);
+ if (!posix_pending_close_tdb) {
+ DEBUG(0,("Failed to open POSIX pending close database.\n"));
+ return False;
+ }
+
+ return True;
+}
+
+/*******************************************************************
+ Delete the in-memory POSIX lock databases.
+********************************************************************/
+
+BOOL posix_locking_end(void)
+{
+ if (posix_lock_tdb && tdb_close(posix_lock_tdb) != 0)
+ return False;
+ if (posix_pending_close_tdb && tdb_close(posix_pending_close_tdb) != 0)
+ return False;
+ return True;
+}