summaryrefslogtreecommitdiff
path: root/source3/smbd
diff options
context:
space:
mode:
Diffstat (limited to 'source3/smbd')
-rw-r--r--source3/smbd/blocking.c21
-rw-r--r--source3/smbd/globals.h14
-rw-r--r--source3/smbd/smb2_lock.c472
3 files changed, 477 insertions, 30 deletions
diff --git a/source3/smbd/blocking.c b/source3/smbd/blocking.c
index c10899f5e6..83898a3098 100644
--- a/source3/smbd/blocking.c
+++ b/source3/smbd/blocking.c
@@ -33,14 +33,20 @@ static void received_unlock_msg(struct messaging_context *msg,
struct server_id server_id,
DATA_BLOB *data);
-static void brl_timeout_fn(struct event_context *event_ctx,
+void brl_timeout_fn(struct event_context *event_ctx,
struct timed_event *te,
struct timeval now,
void *private_data)
{
struct smbd_server_connection *sconn = smbd_server_conn;
- SMB_ASSERT(sconn->smb1.locks.brl_timeout == te);
- TALLOC_FREE(sconn->smb1.locks.brl_timeout);
+
+ if (sconn->allow_smb2) {
+ SMB_ASSERT(sconn->smb2.locks.brl_timeout == te);
+ TALLOC_FREE(sconn->smb2.locks.brl_timeout);
+ } else {
+ SMB_ASSERT(sconn->smb1.locks.brl_timeout == te);
+ TALLOC_FREE(sconn->smb1.locks.brl_timeout);
+ }
change_to_root_user(); /* TODO: Possibly run all timed events as
* root */
@@ -52,7 +58,7 @@ static void brl_timeout_fn(struct event_context *event_ctx,
We need a version of timeval_min that treats zero timval as infinite.
****************************************************************************/
-static struct timeval timeval_brl_min(const struct timeval *tv1,
+struct timeval timeval_brl_min(const struct timeval *tv1,
const struct timeval *tv2)
{
if (timeval_is_zero(tv1)) {
@@ -699,9 +705,14 @@ static void received_unlock_msg(struct messaging_context *msg,
void process_blocking_lock_queue(void)
{
struct smbd_server_connection *sconn = smbd_server_conn;
- struct timeval tv_curr = timeval_current();
+ struct timeval tv_curr;
struct blocking_lock_record *blr, *next = NULL;
+ if (sconn->allow_smb2) {
+ return process_blocking_lock_queue_smb2();
+ }
+
+ tv_curr = timeval_current();
/*
* Go through the queue and see if we can get any of the locks.
*/
diff --git a/source3/smbd/globals.h b/source3/smbd/globals.h
index be140ba445..7d1776d4df 100644
--- a/source3/smbd/globals.h
+++ b/source3/smbd/globals.h
@@ -152,13 +152,6 @@ NTSTATUS smb2_signing_check_pdu(DATA_BLOB session_key,
const struct iovec *vector,
int count);
-struct smbd_lock_element {
- uint32_t smbpid;
- enum brl_type brltype;
- uint64_t offset;
- uint64_t count;
-};
-
NTSTATUS smbd_do_locking(struct smb_request *req,
files_struct *fsp,
uint8_t type,
@@ -313,6 +306,7 @@ void smbd_smb2_request_dispatch_immediate(struct tevent_context *ctx,
/* SMB1 -> SMB2 glue. */
void send_break_message_smb2(files_struct *fsp, int level);
+struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req);
bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
struct smb_request *req,
files_struct *fsp,
@@ -324,6 +318,7 @@ bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
uint64_t offset,
uint64_t count,
uint32_t blocking_pid);
+void process_blocking_lock_queue_smb2(void);
void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
struct byte_range_lock *br_lck);
/* From smbd/smb2_create.c */
@@ -570,6 +565,11 @@ struct smbd_server_connection {
struct smbd_smb2_session *list;
} sessions;
+ struct {
+ /* The event that makes us process our blocking lock queue */
+ struct timed_event *brl_timeout;
+ bool blocking_lock_unlock_state;
+ } locks;
struct smbd_smb2_request *requests;
} smb2;
};
diff --git a/source3/smbd/smb2_lock.c b/source3/smbd/smb2_lock.c
index d7a6cb1376..ba5b03f573 100644
--- a/source3/smbd/smb2_lock.c
+++ b/source3/smbd/smb2_lock.c
@@ -135,6 +135,22 @@ static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
NTSTATUS status;
NTSTATUS error; /* transport error */
+ if (req->cancelled) {
+ const uint8_t *inhdr = (const uint8_t *)
+ req->in.vector[req->current_idx].iov_base;
+ uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
+
+ DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
+ (unsigned long long)mid ));
+ error = smbd_smb2_request_error(req, NT_STATUS_CANCELLED);
+ if (!NT_STATUS_IS_OK(error)) {
+ smbd_server_connection_terminate(req->sconn,
+ nt_errstr(error));
+ return;
+ }
+ return;
+ }
+
status = smbd_smb2_lock_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
@@ -171,6 +187,10 @@ static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
struct smbd_smb2_lock_state {
struct smbd_smb2_request *smb2req;
+ struct smb_request *smb1req;
+ struct blocking_lock_record *blr;
+ uint16_t lock_count;
+ struct smbd_lock_element *locks;
};
static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
@@ -183,7 +203,7 @@ static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
{
struct tevent_req *req;
struct smbd_smb2_lock_state *state;
- struct smb_request *smbreq;
+ struct smb_request *smb1req;
connection_struct *conn = smb2req->tcon->compat_conn;
files_struct *fsp;
int32_t timeout = -1;
@@ -194,21 +214,21 @@ static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
bool async = false;
req = tevent_req_create(mem_ctx, &state,
- struct smbd_smb2_lock_state);
+ struct smbd_smb2_lock_state);
if (req == NULL) {
return NULL;
}
state->smb2req = smb2req;
+ smb1req = smbd_smb2_fake_smb_request(smb2req);
+ if (tevent_req_nomem(smb1req, req)) {
+ return tevent_req_post(req, ev);
+ }
+ state->smb1req = smb1req;
DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
(unsigned long long)in_file_id_volatile));
- smbreq = smbd_smb2_fake_smb_request(smb2req);
- if (tevent_req_nomem(smbreq, req)) {
- return tevent_req_post(req, ev);
- }
-
- fsp = file_fsp(smbreq, (uint16_t)in_file_id_volatile);
+ fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
if (fsp == NULL) {
tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
return tevent_req_post(req, ev);
@@ -333,8 +353,11 @@ static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
}
}
+ state->locks = locks;
+ state->lock_count = in_lock_count;
+
if (isunlock) {
- status = smbd_do_locking(smbreq, fsp,
+ status = smbd_do_locking(smb1req, fsp,
0,
timeout,
in_lock_count,
@@ -343,7 +366,7 @@ static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
NULL,
&async);
} else {
- status = smbd_do_locking(smbreq, fsp,
+ status = smbd_do_locking(smb1req, fsp,
0,
timeout,
0,
@@ -361,8 +384,7 @@ static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
}
if (async) {
- tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
- return tevent_req_post(req, ev);
+ return req;
}
tevent_req_done(req);
@@ -382,13 +404,163 @@ static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
return NT_STATUS_OK;
}
-/*
- * Dummy (for now) function to cope with SMB2 blocking lock
- * requests.
- */
+/****************************************************************
+ Cancel an outstanding blocking lock request.
+*****************************************************************/
+
+static bool smbd_smb2_lock_cancel(struct tevent_req *req)
+{
+ struct smbd_smb2_request *smb2req = NULL;
+ struct smbd_smb2_lock_state *state = tevent_req_data(req,
+ struct smbd_smb2_lock_state);
+ if (!state) {
+ return false;
+ }
+
+ if (!state->smb2req) {
+ return false;
+ }
+
+ smb2req = state->smb2req;
+ smb2req->cancelled = true;
+
+ tevent_req_done(req);
+ return true;
+}
+
+/****************************************************************
+ Got a message saying someone unlocked a file. Re-schedule all
+ blocking lock requests as we don't know if anything overlapped.
+*****************************************************************/
+
+static void received_unlock_msg(struct messaging_context *msg,
+ void *private_data,
+ uint32_t msg_type,
+ struct server_id server_id,
+ DATA_BLOB *data)
+{
+ DEBUG(10,("received_unlock_msg (SMB2)\n"));
+ process_blocking_lock_queue_smb2();
+}
+
+/****************************************************************
+ Function to get the blr on a pending record.
+*****************************************************************/
+
+struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
+{
+ struct smbd_smb2_lock_state *state = NULL;
+ const uint8_t *inhdr;
+
+ if (!smb2req) {
+ return NULL;
+ }
+ if (smb2req->subreq == NULL) {
+ return NULL;
+ }
+ if (!tevent_req_is_in_progress(smb2req->subreq)) {
+ return NULL;
+ }
+ inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
+ if (IVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
+ return NULL;
+ }
+ state = tevent_req_data(smb2req->subreq,
+ struct smbd_smb2_lock_state);
+ if (!state) {
+ return NULL;
+ }
+ return state->blr;
+}
+/****************************************************************
+ Set up the next brl timeout.
+*****************************************************************/
+
+static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
+{
+ struct smbd_smb2_request *smb2req;
+ struct timeval next_timeout = timeval_zero();
+ int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
+
+ /*
+ * If we already have a timeout event, don't replace it.
+ * It will fire before this one anyway.
+ */
+
+ if (sconn->smb2.locks.brl_timeout) {
+ DEBUG(10,("recalc_smb2_brl_timeout: timeout already exists\n"));
+ return true;
+ }
+
+ for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
+ struct blocking_lock_record *blr =
+ get_pending_smb2req_blr(smb2req);
+ if (blr && blr->blocking_pid == 0xFFFFFFFF) {
+ /*
+ * If we're blocked on pid 0xFFFFFFFF this is
+ * a POSIX lock, so calculate a timeout of
+ * 10 seconds into the future.
+ */
+ next_timeout = timeval_current_ofs(10, 0);
+ break;
+ }
+ }
+
+ /*
+ * To account for unclean shutdowns by clients we need a
+ * maximum timeout that we use for checking pending locks. If
+ * we have any pending locks at all, then check if the pending
+ * lock can continue at least every brl:recalctime seconds
+ * (default 5 seconds).
+ *
+ * This saves us needing to do a message_send_all() in the
+ * SIGCHLD handler in the parent daemon. That
+ * message_send_all() caused O(n^2) work to be done when IP
+ * failovers happened in clustered Samba, which could make the
+ * entire system unusable for many minutes.
+ */
+
+ if (max_brl_timeout > 0) {
+ struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
+ next_timeout = timeval_brl_min(&next_timeout, &min_to);
+ }
+
+ if (timeval_is_zero(&next_timeout)) {
+ /* Infinite timeout - return. */
+ DEBUG(10, ("push_blocking_lock_request_smb2: Next "
+ "timeout = INFINITY\n"));
+ return true;
+ }
+
+ if (DEBUGLVL(10)) {
+ struct timeval cur, from_now;
+
+ cur = timeval_current();
+ from_now = timeval_until(&cur, &next_timeout);
+ DEBUG(10, ("push_blocking_lock_request_smb2: Next "
+ "timeout = %d.%d seconds from now.\n",
+ (int)from_now.tv_sec, (int)from_now.tv_usec));
+ }
+
+ sconn->smb2.locks.brl_timeout = event_add_timed(
+ smbd_event_context(),
+ NULL,
+ next_timeout,
+ brl_timeout_fn,
+ NULL);
+ if (!sconn->smb2.locks.brl_timeout) {
+ return false;
+ }
+ return true;
+}
+
+/****************************************************************
+ Get an SMB2 lock reqeust to go async. lock_timeout should
+ always be -1 here.
+*****************************************************************/
bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
- struct smb_request *req,
+ struct smb_request *smb1req,
files_struct *fsp,
int lock_timeout,
int lock_num,
@@ -399,10 +571,274 @@ bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
uint64_t count,
uint32_t blocking_pid)
{
- return false;
+ struct smbd_server_connection *sconn = smbd_server_conn;
+ struct smbd_smb2_request *smb2req = smb1req->smb2req;
+ struct tevent_req *req = NULL;
+ struct smbd_smb2_lock_state *state = NULL;
+ NTSTATUS status = NT_STATUS_OK;
+
+ SMB_ASSERT(lock_timeout == -1);
+
+ if (!smb2req) {
+ return false;
+ }
+ req = smb2req->subreq;
+ if (!req) {
+ return false;
+ }
+ state = tevent_req_data(req, struct smbd_smb2_lock_state);
+ if (!state) {
+ return false;
+ }
+
+ if (!state->blr) {
+ struct blocking_lock_record *blr = talloc_zero(state,
+ struct blocking_lock_record);
+ if (!blr) {
+ return false;
+ }
+ blr = talloc_zero(state, struct blocking_lock_record);
+ blr->fsp = fsp;
+ blr->expire_time.tv_sec = 0;
+ blr->expire_time.tv_usec = 0; /* Never expire. */
+ blr->lock_num = lock_num;
+ blr->lock_pid = lock_pid;
+ blr->blocking_pid = blocking_pid;
+ blr->lock_flav = lock_flav;
+ blr->lock_type = lock_type;
+ blr->offset = offset;
+ blr->count = count;
+
+ /* Specific brl_lock() implementations can fill this in. */
+ blr->blr_private = NULL;
+
+ /* Add a pending lock record for this. */
+ status = brl_lock(smbd_messaging_context(),
+ br_lck,
+ lock_pid,
+ procid_self(),
+ offset,
+ count,
+ lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
+ blr->lock_flav,
+ true,
+ NULL,
+ blr);
+
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(0,("push_blocking_lock_request_smb2: "
+ "failed to add PENDING_LOCK record.\n"));
+ TALLOC_FREE(blr);
+ return false;
+ }
+ state->blr = blr;
+ }
+
+ recalc_smb2_brl_timeout(sconn);
+
+ /* Ensure we'll receive messages when this is unlocked. */
+ if (!sconn->smb2.locks.blocking_lock_unlock_state) {
+ messaging_register(smbd_messaging_context(), NULL,
+ MSG_SMB_UNLOCK, received_unlock_msg);
+ sconn->smb2.locks.blocking_lock_unlock_state = true;
+ }
+
+ /* allow this request to be canceled */
+ tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
+
+ return true;
+}
+
+/****************************************************************
+ Re-proccess a blocking lock request.
+ This is equivalent to process_lockingX() inside smbd/blocking.c
+*****************************************************************/
+
+static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req)
+{
+ NTSTATUS status;
+ struct blocking_lock_record *blr = NULL;
+ struct smbd_smb2_lock_state *state = NULL;
+ files_struct *fsp = NULL;
+
+ if (!smb2req->subreq) {
+ return;
+ }
+ state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
+ if (!state) {
+ return;
+ }
+
+ blr = state->blr;
+ fsp = blr->fsp;
+
+ /* Try and finish off getting all the outstanding locks. */
+
+ for (; blr->lock_num < state->lock_count; blr->lock_num++) {
+ struct byte_range_lock *br_lck = NULL;
+ struct smbd_lock_element *e = &state->locks[blr->lock_num];
+
+ br_lck = do_lock(smbd_messaging_context(),
+ fsp,
+ e->smbpid,
+ e->count,
+ e->offset,
+ e->brltype,
+ WINDOWS_LOCK,
+ true,
+ &status,
+ &blr->blocking_pid,
+ blr);
+
+ TALLOC_FREE(br_lck);
+
+ if (NT_STATUS_IS_ERR(status)) {
+ break;
+ }
+ }
+
+ if(blr->lock_num == state->lock_count) {
+ /*
+ * Success - we got all the locks.
+ */
+
+ DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
+ "fnum=%d num_locks=%d\n",
+ fsp_str_dbg(fsp),
+ fsp->fnum,
+ (int)state->lock_count));
+
+ tevent_req_done(smb2req->subreq);
+ return;
+ }
+
+ if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
+ !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
+ /*
+ * We have other than a "can't get lock"
+ * error. Return an error.
+ */
+ tevent_req_nterror(smb2req->subreq, status);
+ return;
+ }
+
+ /*
+ * Still can't get all the locks - keep waiting.
+ */
+
+ DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
+ "for file %s, fnum = %d. Waiting....\n",
+ (int)blr->lock_num,
+ (int)state->lock_count,
+ fsp_str_dbg(fsp),
+ (int)fsp->fnum));
+
+ return;
+
+}
+
+/****************************************************************
+ Attempt to proccess all outstanding blocking locks pending on
+ the request queue.
+*****************************************************************/
+
+void process_blocking_lock_queue_smb2(void)
+{
+ struct smbd_server_connection *sconn = smbd_server_conn;
+ struct smbd_smb2_request *smb2req, *nextreq;
+
+ for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
+ const uint8_t *inhdr;
+
+ nextreq = smb2req->next;
+
+ if (smb2req->subreq == NULL) {
+ /* This message has been processed. */
+ continue;
+ }
+ if (!tevent_req_is_in_progress(smb2req->subreq)) {
+ /* This message has been processed. */
+ continue;
+ }
+
+ inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
+ if (IVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
+ reprocess_blocked_smb2_lock(smb2req);
+ }
+ }
+
+ recalc_smb2_brl_timeout(sconn);
}
+/****************************************************************************
+ Remove any locks on this fd. Called from file_close().
+****************************************************************************/
+
void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
struct byte_range_lock *br_lck)
{
+ struct smbd_server_connection *sconn = smbd_server_conn;
+ struct smbd_smb2_request *smb2req, *nextreq;
+
+ for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
+ struct smbd_smb2_lock_state *state = NULL;
+ files_struct *fsp_curr = NULL;
+ int i = smb2req->current_idx;
+ uint64_t in_file_id_volatile;
+ struct blocking_lock_record *blr = NULL;
+ const uint8_t *inhdr;
+ const uint8_t *inbody;
+
+ nextreq = smb2req->next;
+
+ if (smb2req->subreq == NULL) {
+ /* This message has been processed. */
+ continue;
+ }
+ if (!tevent_req_is_in_progress(smb2req->subreq)) {
+ /* This message has been processed. */
+ continue;
+ }
+
+ inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
+ if (IVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
+ /* Not a lock call. */
+ continue;
+ }
+
+ inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
+ in_file_id_volatile = BVAL(inbody, 0x10);
+
+ state = tevent_req_data(smb2req->subreq,
+ struct smbd_smb2_lock_state);
+ if (!state) {
+ /* Strange - is this even possible ? */
+ continue;
+ }
+
+ fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
+ if (fsp_curr == NULL) {
+ /* Strange - is this even possible ? */
+ continue;
+ }
+
+ if (fsp_curr != fsp) {
+ /* It's not our fid */
+ continue;
+ }
+
+ blr = state->blr;
+
+ /* Remove the entries from the lock db. */
+ brl_lock_cancel(br_lck,
+ blr->lock_pid,
+ procid_self(),
+ blr->offset,
+ blr->count,
+ blr->lock_flav,
+ blr);
+
+ /* Finally cancel the request. */
+ tevent_req_cancel(smb2req->subreq);
+ }
}