summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVolker Lendecke <vl@samba.org>2013-10-18 15:12:35 +0000
committerStefan Metzmacher <metze@samba.org>2013-10-23 11:44:37 +0200
commitf892bdb2047c86e53f875e8f2c58858d2dacc363 (patch)
tree35a6683616f8908fba56cd66f0b35ed813cdc6b6
parent4e06c61b41fdde21fafaeee3c4fb3366744de9d9 (diff)
downloadsamba-f892bdb2047c86e53f875e8f2c58858d2dacc363.tar.gz
samba-f892bdb2047c86e53f875e8f2c58858d2dacc363.tar.bz2
samba-f892bdb2047c86e53f875e8f2c58858d2dacc363.zip
smbd: Fix bug 10216
While refactoring find_oplock_types to validate_oplock_types I forgot that stat opens will end up in locking.tdb. So even with a batch oplock around we can have more than one entry. This means the consistency check in validate_oplock_types was wrong and too strict. Signed-off-by: Volker Lendecke <vl@samba.org> Reviewed-by: Stefan Metzmacher <metze@samba.org>
-rw-r--r--source3/smbd/open.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/source3/smbd/open.c b/source3/smbd/open.c
index fa52fccc08..c3e1a76ff8 100644
--- a/source3/smbd/open.c
+++ b/source3/smbd/open.c
@@ -1211,6 +1211,7 @@ static bool validate_oplock_types(files_struct *fsp,
bool ex_or_batch = false;
bool level2 = false;
bool no_oplock = false;
+ uint32_t num_non_stat_opens = 0;
uint32_t i;
/* Ignore stat or internal opens, as is done in
@@ -1235,6 +1236,8 @@ static bool validate_oplock_types(files_struct *fsp,
continue;
}
+ num_non_stat_opens += 1;
+
if (BATCH_OPLOCK_TYPE(e->op_type)) {
/* batch - can only be one. */
if (share_mode_stale_pid(d, i)) {
@@ -1294,7 +1297,7 @@ static bool validate_oplock_types(files_struct *fsp,
remove_stale_share_mode_entries(d);
- if ((batch || ex_or_batch) && (d->num_share_modes != 1)) {
+ if ((batch || ex_or_batch) && (num_non_stat_opens != 1)) {
DEBUG(1, ("got batch (%d) or ex (%d) non-exclusively (%d)\n",
(int)batch, (int)ex_or_batch,
(int)d->num_share_modes));
@@ -1312,17 +1315,38 @@ static bool delay_for_oplock(files_struct *fsp,
{
struct share_mode_data *d = lck->data;
struct share_mode_entry *entry;
+ uint32_t num_non_stat_opens = 0;
+ uint32_t i;
if ((oplock_request & INTERNAL_OPEN_ONLY) || is_stat_open(fsp->access_mask)) {
return false;
}
- if (lck->data->num_share_modes != 1) {
+ for (i=0; i<d->num_share_modes; i++) {
+ struct share_mode_entry *e = &d->share_modes[i];
+ if (e->op_type == NO_OPLOCK && is_stat_open(e->access_mask)) {
+ continue;
+ }
+ num_non_stat_opens += 1;
+
+ /*
+ * We found the a non-stat open, which in the exclusive/batch
+ * case will be inspected further down.
+ */
+ entry = e;
+ }
+ if (num_non_stat_opens == 0) {
+ /*
+ * Nothing to wait for around
+ */
+ return false;
+ }
+ if (num_non_stat_opens != 1) {
/*
- * More than one. There can't be any exclusive or batch left.
+ * More than one open around. There can't be any exclusive or
+ * batch left, this is all level2.
*/
return false;
}
- entry = &d->share_modes[0];
if (server_id_is_disconnected(&entry->pid)) {
/*