diff options
author | Stefan Metzmacher <metze@samba.org> | 2009-07-08 13:13:32 +0200 |
---|---|---|
committer | Stefan Metzmacher <metze@samba.org> | 2009-07-10 12:24:25 +0200 |
commit | 67c7b7f90bd1bad265e892bb00abf649b22345b6 (patch) | |
tree | 8589408c78083e067d00cf7ed834ca7983bf82db | |
parent | 8d534d45a5d21967e2d9bca1ac34a55f1518fe42 (diff) | |
download | samba-67c7b7f90bd1bad265e892bb00abf649b22345b6.tar.gz samba-67c7b7f90bd1bad265e892bb00abf649b22345b6.tar.bz2 samba-67c7b7f90bd1bad265e892bb00abf649b22345b6.zip |
s3:smbd: add support for SMB2 Lock
metze
-rw-r--r-- | source3/smbd/smb2_lock.c | 152 |
1 files changed, 151 insertions, 1 deletions
diff --git a/source3/smbd/smb2_lock.c b/source3/smbd/smb2_lock.c index 06e97b97b0..325c789320 100644 --- a/source3/smbd/smb2_lock.c +++ b/source3/smbd/smb2_lock.c @@ -190,6 +190,13 @@ static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx, struct smb_request *smbreq; connection_struct *conn = smb2req->tcon->compat_conn; files_struct *fsp; + int32_t timeout = -1; + uint8_t type = 0; + bool isunlock = false; + uint16_t i; + struct smbd_lock_element *locks; + NTSTATUS status; + bool async = false; req = tevent_req_create(mem_ctx, &state, struct smbd_smb2_lock_state); @@ -220,7 +227,150 @@ static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx, return tevent_req_post(req, ev); } - tevent_req_nterror(req, NT_STATUS_NOT_IMPLEMENTED); + locks = talloc_array(state, struct smbd_lock_element, in_lock_count); + if (locks == NULL) { + tevent_req_nterror(req, NT_STATUS_NO_MEMORY); + return tevent_req_post(req, ev); + } + + switch (in_locks[0].flags) { + case SMB2_LOCK_FLAG_SHARED: + case SMB2_LOCK_FLAG_EXCLUSIVE: + if (in_lock_count > 1) { + tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER); + return tevent_req_post(req, ev); + } + timeout = -1; + break; + + case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY: + case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY: + timeout = 0; + break; + + case SMB2_LOCK_FLAG_UNLOCK: + /* only the first lock gives the UNLOCK bit - see + MS-SMB2 3.3.5.14 */ + isunlock = true; + timeout = 0; + break; + + default: + tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER); + return tevent_req_post(req, ev); + } + + for (i=0; i<in_lock_count; i++) { + uint64_t max_count; + bool invalid = false; + + switch (in_locks[i].flags) { + case SMB2_LOCK_FLAG_SHARED: + case SMB2_LOCK_FLAG_EXCLUSIVE: + if (i > 0) { + tevent_req_nterror(req, + NT_STATUS_INVALID_PARAMETER); + return tevent_req_post(req, ev); + } + if (isunlock) { + tevent_req_nterror(req, + NT_STATUS_INVALID_PARAMETER); + return tevent_req_post(req, ev); + } + break; + + case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY: + case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY: + if (isunlock) { + tevent_req_nterror(req, + NT_STATUS_INVALID_PARAMETER); + return tevent_req_post(req, ev); + } + break; + + case SMB2_LOCK_FLAG_UNLOCK: + if (!isunlock) { + tevent_req_nterror(req, + NT_STATUS_INVALID_PARAMETER); + return tevent_req_post(req, ev); + } + break; + + default: + if (isunlock) { + /* + * is the first element was a UNLOCK + * we need to deferr the error response + * to the backend, because we need to process + * all unlock elements before + */ + invalid = true; + break; + } + tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER); + return tevent_req_post(req, ev); + } + + locks[i].smbpid = in_smbpid; + locks[i].offset = in_locks[i].offset; + locks[i].count = in_locks[i].length; + + if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) { + locks[i].brltype = WRITE_LOCK; + } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) { + locks[i].brltype = READ_LOCK; + } else if (invalid) { + /* + * this is an invalid UNLOCK element + * and the backend needs to test for + * brltype != UNLOCK_LOCK and return + * NT_STATUS_INVALID_PARAMER + */ + locks[i].brltype = READ_LOCK; + } else { + locks[i].brltype = UNLOCK_LOCK; + } + + max_count = UINT64_MAX - locks[i].offset; + if (locks[i].count > max_count) { + tevent_req_nterror(req, NT_STATUS_INVALID_LOCK_RANGE); + return tevent_req_post(req, ev); + } + } + + if (isunlock) { + status = smbd_do_locking(smbreq, fsp, + 0, + timeout, + in_lock_count, + locks, + 0, + NULL, + &async); + } else { + status = smbd_do_locking(smbreq, fsp, + 0, + timeout, + 0, + NULL, + in_lock_count, + locks, + &async); + } + if (!NT_STATUS_IS_OK(status)) { + if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) { + status = NT_STATUS_LOCK_NOT_GRANTED; + } + tevent_req_nterror(req, status); + return tevent_req_post(req, ev); + } + + if (async) { + tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR); + return tevent_req_post(req, ev); + } + + tevent_req_done(req); return tevent_req_post(req, ev); } |