diff options
author | Stefan Metzmacher <metze@samba.org> | 2009-08-11 14:50:36 +0200 |
---|---|---|
committer | Stefan Metzmacher <metze@samba.org> | 2009-09-18 20:34:43 +0200 |
commit | cda0849ebc6c3bc2cc6c7666e4e4e16093b2683b (patch) | |
tree | 6cfede95f3af159769f510efe1337a1ee205900d /source4 | |
parent | f0704f5abcec0317afd0dae29c096498206b1136 (diff) | |
download | samba-cda0849ebc6c3bc2cc6c7666e4e4e16093b2683b.tar.gz samba-cda0849ebc6c3bc2cc6c7666e4e4e16093b2683b.tar.bz2 samba-cda0849ebc6c3bc2cc6c7666e4e4e16093b2683b.zip |
s4:ntvfs_ipc: add real named pipe support
We now open a named via the named_pipe_auth
code and process IO via the tstream interface.
This means we support byte mode and message mode
named pipes.
We also correctly issue NT_STATUS_PIPE_BUSY
when a smb_trans request comes in and a read or smb_trans
is already pending.
We also have support for async dcerpc over ncacn_np now,
and we now can remove the ncacn_np specific hacks from the
rpc_server/ code.
metze
Diffstat (limited to 'source4')
-rw-r--r-- | source4/ntvfs/config.mk | 4 | ||||
-rw-r--r-- | source4/ntvfs/ipc/vfs_ipc.c | 884 |
2 files changed, 652 insertions, 236 deletions
diff --git a/source4/ntvfs/config.mk b/source4/ntvfs/config.mk index 593c526edb..060ee19115 100644 --- a/source4/ntvfs/config.mk +++ b/source4/ntvfs/config.mk @@ -71,7 +71,9 @@ ntvfs_print_OBJ_FILES = $(ntvfssrcdir)/print/vfs_print.o [MODULE::ntvfs_ipc] SUBSYSTEM = ntvfs INIT_FUNCTION = ntvfs_ipc_init -PRIVATE_DEPENDENCIES = dcerpc_server DCERPC_COMMON +PRIVATE_DEPENDENCIES = \ + NDR_NAMED_PIPE_AUTH NAMED_PIPE_AUTH_TSTREAM \ + HEIMDAL_GSSAPI CREDENTIALS # End MODULE ntvfs_ipc ################################################ diff --git a/source4/ntvfs/ipc/vfs_ipc.c b/source4/ntvfs/ipc/vfs_ipc.c index 95ad1c51fb..0cd909e351 100644 --- a/source4/ntvfs/ipc/vfs_ipc.c +++ b/source4/ntvfs/ipc/vfs_ipc.c @@ -29,9 +29,16 @@ #include "ntvfs/ntvfs.h" #include "libcli/rap/rap.h" #include "ntvfs/ipc/proto.h" -#include "rpc_server/dcerpc_server.h" #include "libcli/raw/ioctl.h" #include "param/param.h" +#include "../lib/tsocket/tsocket.h" +#include "../libcli/named_pipe_auth/npa_tstream.h" +#include "auth/auth.h" +#include "auth/auth_sam_reply.h" +#include "lib/socket/socket.h" +#include "auth/credentials/credentials.h" +#include "auth/credentials/credentials_krb5.h" +#include <gssapi/gssapi.h> /* this is the private structure used to keep the state of an open ipc$ connection. It needs to keep information about all open @@ -39,16 +46,18 @@ struct ipc_private { struct ntvfs_module_context *ntvfs; - struct dcesrv_context *dcesrv; - /* a list of open pipes */ struct pipe_state { struct pipe_state *next, *prev; struct ipc_private *ipriv; const char *pipe_name; struct ntvfs_handle *handle; - struct dcesrv_connection *dce_conn; - uint16_t ipc_state; + struct tstream_context *npipe; + uint16_t file_type; + uint16_t device_state; + uint64_t allocation_size; + struct tevent_queue *write_queue; + struct tevent_queue *read_queue; } *pipe_list; }; @@ -91,7 +100,6 @@ static NTSTATUS ipc_connect(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_tcon* tcon) { - NTSTATUS status; struct ipc_private *ipriv; const char *sharename; @@ -136,10 +144,6 @@ static NTSTATUS ipc_connect(struct ntvfs_module_context *ntvfs, ipriv->ntvfs = ntvfs; ipriv->pipe_list = NULL; - /* setup the DCERPC server subsystem */ - status = dcesrv_init_ipc_context(ipriv, ntvfs->ctx->lp_ctx, &ipriv->dcesrv); - NT_STATUS_NOT_OK_RETURN(status); - return NT_STATUS_OK; } @@ -207,32 +211,61 @@ static int ipc_fd_destructor(struct pipe_state *p) return 0; } -static struct socket_address *ipc_get_my_addr(struct dcesrv_connection *dce_conn, TALLOC_CTX *mem_ctx) -{ - struct ipc_private *ipriv = dce_conn->transport.private_data; - - return ntvfs_get_my_addr(ipriv->ntvfs, mem_ctx); -} - -static struct socket_address *ipc_get_peer_addr(struct dcesrv_connection *dce_conn, TALLOC_CTX *mem_ctx) -{ - struct ipc_private *ipriv = dce_conn->transport.private_data; +struct ipc_open_state { + struct ipc_private *ipriv; + struct pipe_state *p; + struct ntvfs_request *req; + union smb_open *oi; + struct netr_SamInfo3 *info3; +}; - return ntvfs_get_peer_addr(ipriv->ntvfs, mem_ctx); -} +static void ipc_open_done(struct tevent_req *subreq); /* - open a file backend - used for MSRPC pipes + open a file - used for MSRPC pipes */ -static NTSTATUS ipc_open_generic(struct ntvfs_module_context *ntvfs, - struct ntvfs_request *req, const char *fname, - struct pipe_state **ps) +static NTSTATUS ipc_open(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, union smb_open *oi) { - struct pipe_state *p; NTSTATUS status; - struct dcerpc_binding *ep_description; - struct ipc_private *ipriv = ntvfs->private_data; + struct pipe_state *p; + struct ipc_private *ipriv = talloc_get_type_abort(ntvfs->private_data, + struct ipc_private); + struct smb_iconv_convenience *smb_ic + = lp_iconv_convenience(ipriv->ntvfs->ctx->lp_ctx); struct ntvfs_handle *h; + struct ipc_open_state *state; + struct tevent_req *subreq; + const char *fname; + const char *directory; + struct socket_address *client_sa; + struct tsocket_address *client_addr; + struct socket_address *server_sa; + struct tsocket_address *server_addr; + int ret; + DATA_BLOB delegated_creds = data_blob_null; + + switch (oi->generic.level) { + case RAW_OPEN_NTCREATEX: + fname = oi->ntcreatex.in.fname; + break; + case RAW_OPEN_OPENX: + fname = oi->openx.in.fname; + break; + case RAW_OPEN_SMB2: + fname = oi->smb2.in.fname; + break; + default: + status = NT_STATUS_NOT_SUPPORTED; + break; + } + + directory = talloc_asprintf(req, "%s/np", + lp_ncalrpc_dir(ipriv->ntvfs->ctx->lp_ctx)); + NT_STATUS_HAVE_NO_MEMORY(directory); + + state = talloc(req, struct ipc_open_state); + NT_STATUS_HAVE_NO_MEMORY(state); status = ntvfs_handle_new(ntvfs, req, &h); NT_STATUS_NOT_OK_RETURN(status); @@ -240,156 +273,192 @@ static NTSTATUS ipc_open_generic(struct ntvfs_module_context *ntvfs, p = talloc(h, struct pipe_state); NT_STATUS_HAVE_NO_MEMORY(p); - ep_description = talloc(req, struct dcerpc_binding); - NT_STATUS_HAVE_NO_MEMORY(ep_description); - while (fname[0] == '\\') fname++; p->pipe_name = talloc_asprintf(p, "\\pipe\\%s", fname); NT_STATUS_HAVE_NO_MEMORY(p->pipe_name); p->handle = h; - p->ipc_state = 0x5ff; - - /* - we're all set, now ask the dcerpc server subsystem to open the - endpoint. At this stage the pipe isn't bound, so we don't - know what interface the user actually wants, just that they want - one of the interfaces attached to this pipe endpoint. - */ - ep_description->transport = NCACN_NP; - ep_description->endpoint = talloc_strdup(ep_description, p->pipe_name); - NT_STATUS_HAVE_NO_MEMORY(ep_description->endpoint); - - /* The session info is refcount-increased in the - * dcesrv_endpoint_search_connect() function - */ - status = dcesrv_endpoint_search_connect(ipriv->dcesrv, - p, - ep_description, - h->session_info, - ntvfs->ctx->event_ctx, - ntvfs->ctx->msg_ctx, - ntvfs->ctx->server_id, - 0, - &p->dce_conn); - NT_STATUS_NOT_OK_RETURN(status); + p->ipriv = ipriv; - p->dce_conn->transport.private_data = ipriv; - p->dce_conn->transport.report_output_data = NULL; - p->dce_conn->transport.get_my_addr = ipc_get_my_addr; - p->dce_conn->transport.get_peer_addr = ipc_get_peer_addr; - - DLIST_ADD(ipriv->pipe_list, p); + p->write_queue = tevent_queue_create(p, "ipc_write_queue"); + NT_STATUS_HAVE_NO_MEMORY(p->write_queue); - p->ipriv = ipriv; + p->read_queue = tevent_queue_create(p, "ipc_read_queue"); + NT_STATUS_HAVE_NO_MEMORY(p->read_queue); - talloc_set_destructor(p, ipc_fd_destructor); + state->ipriv = ipriv; + state->p = p; + state->req = req; + state->oi = oi; - status = ntvfs_handle_set_backend_data(h, ipriv->ntvfs, p); + status = auth_convert_server_info_saminfo3(state, + req->session_info->server_info, + &state->info3); NT_STATUS_NOT_OK_RETURN(status); - *ps = p; - return NT_STATUS_OK; -} + client_sa = ntvfs_get_peer_addr(ntvfs, state); + if (!client_sa) { + return NT_STATUS_INTERNAL_ERROR; + } -/* - open a file with ntcreatex - used for MSRPC pipes -*/ -static NTSTATUS ipc_open_ntcreatex(struct ntvfs_module_context *ntvfs, - struct ntvfs_request *req, union smb_open *oi) -{ - struct pipe_state *p; - NTSTATUS status; + server_sa = ntvfs_get_my_addr(ntvfs, state); + if (!server_sa) { + return NT_STATUS_INTERNAL_ERROR; + } - status = ipc_open_generic(ntvfs, req, oi->ntcreatex.in.fname, &p); - if (!NT_STATUS_IS_OK(status)) { + ret = tsocket_address_inet_from_strings(state, "ip", + client_sa->addr, + client_sa->port, + &client_addr); + if (ret == -1) { + status = map_nt_error_from_unix(errno); return status; } - ZERO_STRUCT(oi->ntcreatex.out); - oi->ntcreatex.out.file.ntvfs= p->handle; - oi->ntcreatex.out.ipc_state = p->ipc_state; - oi->ntcreatex.out.file_type = FILE_TYPE_MESSAGE_MODE_PIPE; + ret = tsocket_address_inet_from_strings(state, "ip", + server_sa->addr, + server_sa->port, + &server_addr); + if (ret == -1) { + status = map_nt_error_from_unix(errno); + return status; + } - return status; -} + if (req->session_info->credentials) { + struct gssapi_creds_container *gcc; + OM_uint32 gret; + OM_uint32 minor_status; + gss_buffer_desc cred_token; + + ret = cli_credentials_get_client_gss_creds(req->session_info->credentials, + ipriv->ntvfs->ctx->event_ctx, + ipriv->ntvfs->ctx->lp_ctx, + &gcc); + if (ret) { + goto skip; + } -/* - open a file with openx - used for MSRPC pipes -*/ -static NTSTATUS ipc_open_openx(struct ntvfs_module_context *ntvfs, - struct ntvfs_request *req, union smb_open *oi) -{ - struct pipe_state *p; - NTSTATUS status; - const char *fname = oi->openx.in.fname; + gret = gss_export_cred(&minor_status, + gcc->creds, + &cred_token); + if (gret != GSS_S_COMPLETE) { + return NT_STATUS_INTERNAL_ERROR; + } - status = ipc_open_generic(ntvfs, req, fname, &p); - if (!NT_STATUS_IS_OK(status)) { - return status; + if (cred_token.length) { + delegated_creds = data_blob_talloc(req, + cred_token.value, + cred_token.length); + gss_release_buffer(&minor_status, &cred_token); + NT_STATUS_HAVE_NO_MEMORY(delegated_creds.data); + } } - ZERO_STRUCT(oi->openx.out); - oi->openx.out.file.ntvfs= p->handle; - oi->openx.out.ftype = 2; - oi->openx.out.devstate = p->ipc_state; - - return status; +skip: + + subreq = tstream_npa_connect_send(p, + ipriv->ntvfs->ctx->event_ctx, + smb_ic, + directory, + fname, + client_addr, + NULL, + server_addr, + NULL, + state->info3, + req->session_info->session_key, + delegated_creds); + NT_STATUS_HAVE_NO_MEMORY(subreq); + tevent_req_set_callback(subreq, ipc_open_done, state); + + req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; + return NT_STATUS_OK; } -/* - open a file with SMB2 Create - used for MSRPC pipes -*/ -static NTSTATUS ipc_open_smb2(struct ntvfs_module_context *ntvfs, - struct ntvfs_request *req, union smb_open *oi) +static void ipc_open_done(struct tevent_req *subreq) { - struct pipe_state *p; + struct ipc_open_state *state = tevent_req_callback_data(subreq, + struct ipc_open_state); + struct ipc_private *ipriv = state->ipriv; + struct pipe_state *p = state->p; + struct ntvfs_request *req = state->req; + union smb_open *oi = state->oi; + int ret; + int sys_errno; NTSTATUS status; - status = ipc_open_generic(ntvfs, req, oi->smb2.in.fname, &p); - NT_STATUS_NOT_OK_RETURN(status); - - ZERO_STRUCT(oi->smb2.out); - oi->smb2.out.file.ntvfs = p->handle; - oi->smb2.out.oplock_level = oi->smb2.in.oplock_level; - oi->smb2.out.create_action = NTCREATEX_ACTION_EXISTED; - oi->smb2.out.create_time = 0; - oi->smb2.out.access_time = 0; - oi->smb2.out.write_time = 0; - oi->smb2.out.change_time = 0; - oi->smb2.out.alloc_size = 4096; - oi->smb2.out.size = 0; - oi->smb2.out.file_attr = FILE_ATTRIBUTE_NORMAL; - oi->smb2.out.reserved2 = 0; + ret = tstream_npa_connect_recv(subreq, &sys_errno, + p, &p->npipe, + &p->file_type, + &p->device_state, + &p->allocation_size); + TALLOC_FREE(subreq); + if (ret == -1) { + status = map_nt_error_from_unix(sys_errno); + goto reply; + } - return status; -} + DLIST_ADD(ipriv->pipe_list, p); + talloc_set_destructor(p, ipc_fd_destructor); -/* - open a file - used for MSRPC pipes -*/ -static NTSTATUS ipc_open(struct ntvfs_module_context *ntvfs, - struct ntvfs_request *req, union smb_open *oi) -{ - NTSTATUS status; + status = ntvfs_handle_set_backend_data(p->handle, ipriv->ntvfs, p); + if (!NT_STATUS_IS_OK(status)) { + goto reply; + } switch (oi->generic.level) { case RAW_OPEN_NTCREATEX: - status = ipc_open_ntcreatex(ntvfs, req, oi); + ZERO_STRUCT(oi->ntcreatex.out); + oi->ntcreatex.out.file.ntvfs = p->handle; + oi->ntcreatex.out.oplock_level = 0; + oi->ntcreatex.out.create_action = NTCREATEX_ACTION_EXISTED; + oi->ntcreatex.out.create_time = 0; + oi->ntcreatex.out.access_time = 0; + oi->ntcreatex.out.write_time = 0; + oi->ntcreatex.out.change_time = 0; + oi->ntcreatex.out.attrib = FILE_ATTRIBUTE_NORMAL; + oi->ntcreatex.out.alloc_size = p->allocation_size; + oi->ntcreatex.out.size = 0; + oi->ntcreatex.out.file_type = p->file_type; + oi->ntcreatex.out.ipc_state = p->device_state; + oi->ntcreatex.out.is_directory = 0; break; case RAW_OPEN_OPENX: - status = ipc_open_openx(ntvfs, req, oi); + ZERO_STRUCT(oi->openx.out); + oi->openx.out.file.ntvfs = p->handle; + oi->openx.out.attrib = FILE_ATTRIBUTE_NORMAL; + oi->openx.out.write_time = 0; + oi->openx.out.size = 0; + oi->openx.out.access = 0; + oi->openx.out.ftype = p->file_type; + oi->openx.out.devstate = p->device_state; + oi->openx.out.action = 0; + oi->openx.out.unique_fid = 0; + oi->openx.out.access_mask = 0; + oi->openx.out.unknown = 0; break; case RAW_OPEN_SMB2: - status = ipc_open_smb2(ntvfs, req, oi); + ZERO_STRUCT(oi->smb2.out); + oi->smb2.out.file.ntvfs = p->handle; + oi->smb2.out.oplock_level = oi->smb2.in.oplock_level; + oi->smb2.out.create_action = NTCREATEX_ACTION_EXISTED; + oi->smb2.out.create_time = 0; + oi->smb2.out.access_time = 0; + oi->smb2.out.write_time = 0; + oi->smb2.out.change_time = 0; + oi->smb2.out.alloc_size = p->allocation_size; + oi->smb2.out.size = 0; + oi->smb2.out.file_attr = FILE_ATTRIBUTE_NORMAL; + oi->smb2.out.reserved2 = 0; break; default: - status = NT_STATUS_NOT_SUPPORTED; break; } - return status; +reply: + req->async_states->status = status; + req->async_states->send_fn(req); } /* @@ -428,28 +497,108 @@ static NTSTATUS ipc_copy(struct ntvfs_module_context *ntvfs, return NT_STATUS_ACCESS_DENIED; } -static NTSTATUS ipc_readx_dcesrv_output(void *private_data, DATA_BLOB *out, size_t *nwritten) +struct ipc_readv_next_vector_state { + uint8_t *buf; + size_t len; + off_t ofs; + size_t remaining; +}; + +static void ipc_readv_next_vector_init(struct ipc_readv_next_vector_state *s, + uint8_t *buf, size_t len) +{ + ZERO_STRUCTP(s); + + s->buf = buf; + s->len = MIN(len, UINT16_MAX); + //DEBUG(0,("readv_next_vector_init[%u 0x%04X]\n", s->len, s->len)); +} + +static int ipc_readv_next_vector(struct tstream_context *stream, + void *private_data, + TALLOC_CTX *mem_ctx, + struct iovec **_vector, + size_t *count) { - DATA_BLOB *blob = private_data; + struct ipc_readv_next_vector_state *state = + (struct ipc_readv_next_vector_state *)private_data; + struct iovec *vector; + ssize_t pending; + size_t wanted; + + if (state->ofs == state->len) { + *_vector = NULL; + *count = 0; +// DEBUG(0,("readv_next_vector done ofs[%u 0x%04X]\n", +// state->ofs, state->ofs)); + return 0; + } - if (out->length < blob->length) { - blob->length = out->length; + pending = tstream_pending_bytes(stream); + if (pending == -1) { + return -1; } - memcpy(blob->data, out->data, blob->length); - *nwritten = blob->length; - return NT_STATUS_OK; + + if (pending == 0 && state->ofs != 0) { + /* return a short read */ + *_vector = NULL; + *count = 0; +// DEBUG(0,("readv_next_vector short read ofs[%u 0x%04X]\n", +// state->ofs, state->ofs)); + return 0; + } + + if (pending == 0) { + /* we want at least one byte and recheck again */ + wanted = 1; + } else { + size_t missing = state->len - state->ofs; + if (pending > missing) { + /* there's more available */ + state->remaining = pending - missing; + wanted = missing; + } else { + /* read what we can get and recheck in the next cycle */ + wanted = pending; + } + } + + vector = talloc_array(mem_ctx, struct iovec, 1); + if (!vector) { + return -1; + } + + vector[0].iov_base = state->buf + state->ofs; + vector[0].iov_len = wanted; + + state->ofs += wanted; + + *_vector = vector; + *count = 1; + return 0; } +struct ipc_read_state { + struct ipc_private *ipriv; + struct pipe_state *p; + struct ntvfs_request *req; + union smb_read *rd; + struct ipc_readv_next_vector_state next_vector; +}; + +static void ipc_read_done(struct tevent_req *subreq); + /* read from a file */ static NTSTATUS ipc_read(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_read *rd) { - struct ipc_private *ipriv = ntvfs->private_data; - DATA_BLOB data; + struct ipc_private *ipriv = talloc_get_type_abort(ntvfs->private_data, + struct ipc_private); struct pipe_state *p; - NTSTATUS status = NT_STATUS_OK; + struct ipc_read_state *state; + struct tevent_req *subreq; if (rd->generic.level != RAW_READ_GENERIC) { return ntvfs_map_read(ntvfs, req, rd); @@ -460,58 +609,143 @@ static NTSTATUS ipc_read(struct ntvfs_module_context *ntvfs, return NT_STATUS_INVALID_HANDLE; } - data.length = rd->readx.in.maxcnt; - data.data = rd->readx.out.data; - if (data.length > UINT16_MAX) { - data.length = UINT16_MAX; + state = talloc(req, struct ipc_read_state); + NT_STATUS_HAVE_NO_MEMORY(state); + + state->ipriv = ipriv; + state->p = p; + state->req = req; + state->rd = rd; + + /* rd->readx.out.data is already allocated */ + ipc_readv_next_vector_init(&state->next_vector, + rd->readx.out.data, + rd->readx.in.maxcnt); + + subreq = tstream_readv_pdu_queue_send(req, + ipriv->ntvfs->ctx->event_ctx, + p->npipe, + p->read_queue, + ipc_readv_next_vector, + &state->next_vector); + NT_STATUS_HAVE_NO_MEMORY(subreq); + tevent_req_set_callback(subreq, ipc_read_done, state); + + req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; + return NT_STATUS_OK; +} + +static void ipc_read_done(struct tevent_req *subreq) +{ + struct ipc_read_state *state = + tevent_req_callback_data(subreq, + struct ipc_read_state); + struct ntvfs_request *req = state->req; + union smb_read *rd = state->rd; + int ret; + int sys_errno; + NTSTATUS status; + + ret = tstream_readv_pdu_queue_recv(subreq, &sys_errno); + TALLOC_FREE(subreq); + if (ret == -1) { + status = map_nt_error_from_unix(sys_errno); + goto reply; } - if (data.length != 0) { - status = dcesrv_output(p->dce_conn, &data, ipc_readx_dcesrv_output); - if (NT_STATUS_IS_ERR(status)) { - return status; - } + status = NT_STATUS_OK; + if (state->next_vector.remaining > 0) { + status = STATUS_BUFFER_OVERFLOW; } - rd->readx.out.remaining = 0; + rd->readx.out.remaining = state->next_vector.remaining; rd->readx.out.compaction_mode = 0; - rd->readx.out.nread = data.length; + rd->readx.out.nread = ret; - return status; +reply: + req->async_states->status = status; + req->async_states->send_fn(req); } +struct ipc_write_state { + struct ipc_private *ipriv; + struct pipe_state *p; + struct ntvfs_request *req; + union smb_write *wr; + struct iovec iov; +}; + +static void ipc_write_done(struct tevent_req *subreq); + /* write to a file */ static NTSTATUS ipc_write(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_write *wr) { - struct ipc_private *ipriv = ntvfs->private_data; - DATA_BLOB data; + struct ipc_private *ipriv = talloc_get_type_abort(ntvfs->private_data, + struct ipc_private); struct pipe_state *p; - NTSTATUS status; + struct tevent_req *subreq; + struct ipc_write_state *state; if (wr->generic.level != RAW_WRITE_GENERIC) { return ntvfs_map_write(ntvfs, req, wr); } - data.data = discard_const_p(void, wr->writex.in.data); - data.length = wr->writex.in.count; - p = pipe_state_find(ipriv, wr->writex.in.file.ntvfs); if (!p) { return NT_STATUS_INVALID_HANDLE; } - status = dcesrv_input(p->dce_conn, &data); - if (!NT_STATUS_IS_OK(status)) { - return status; + state = talloc(req, struct ipc_write_state); + NT_STATUS_HAVE_NO_MEMORY(state); + + state->ipriv = ipriv; + state->p = p; + state->req = req; + state->wr = wr; + state->iov.iov_base = discard_const_p(void, wr->writex.in.data); + state->iov.iov_len = wr->writex.in.count; + + subreq = tstream_writev_queue_send(state, + ipriv->ntvfs->ctx->event_ctx, + p->npipe, + p->write_queue, + &state->iov, 1); + NT_STATUS_HAVE_NO_MEMORY(subreq); + tevent_req_set_callback(subreq, ipc_write_done, state); + + req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; + return NT_STATUS_OK; +} + +static void ipc_write_done(struct tevent_req *subreq) +{ + struct ipc_write_state *state = + tevent_req_callback_data(subreq, + struct ipc_write_state); + struct ntvfs_request *req = state->req; + union smb_write *wr = state->wr; + int ret; + int sys_errno; + NTSTATUS status; + + ret = tstream_writev_queue_recv(subreq, &sys_errno); + TALLOC_FREE(subreq); + if (ret == -1) { + status = map_nt_error_from_unix(sys_errno); + goto reply; } - wr->writex.out.nwritten = data.length; + status = NT_STATUS_OK; + + wr->writex.out.nwritten = ret; wr->writex.out.remaining = 0; - return NT_STATUS_OK; +reply: + req->async_states->status = status; + req->async_states->send_fn(req); } /* @@ -540,7 +774,8 @@ static NTSTATUS ipc_flush(struct ntvfs_module_context *ntvfs, static NTSTATUS ipc_close(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_close *io) { - struct ipc_private *ipriv = ntvfs->private_data; + struct ipc_private *ipriv = talloc_get_type_abort(ntvfs->private_data, + struct ipc_private); struct pipe_state *p; if (io->generic.level != RAW_CLOSE_CLOSE) { @@ -563,7 +798,8 @@ static NTSTATUS ipc_close(struct ntvfs_module_context *ntvfs, static NTSTATUS ipc_exit(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req) { - struct ipc_private *ipriv = ntvfs->private_data; + struct ipc_private *ipriv = talloc_get_type_abort(ntvfs->private_data, + struct ipc_private); struct pipe_state *p, *next; for (p=ipriv->pipe_list; p; p=next) { @@ -583,7 +819,8 @@ static NTSTATUS ipc_exit(struct ntvfs_module_context *ntvfs, static NTSTATUS ipc_logoff(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req) { - struct ipc_private *ipriv = ntvfs->private_data; + struct ipc_private *ipriv = talloc_get_type_abort(ntvfs->private_data, + struct ipc_private); struct pipe_state *p, *next; for (p=ipriv->pipe_list; p; p=next) { @@ -639,7 +876,8 @@ static NTSTATUS ipc_setfileinfo(struct ntvfs_module_context *ntvfs, static NTSTATUS ipc_qfileinfo(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_fileinfo *info) { - struct ipc_private *ipriv = ntvfs->private_data; + struct ipc_private *ipriv = talloc_get_type_abort(ntvfs->private_data, + struct ipc_private); struct pipe_state *p = pipe_state_find(ipriv, info->generic.in.file.ntvfs); if (!p) { return NT_STATUS_INVALID_HANDLE; @@ -724,32 +962,29 @@ static NTSTATUS ipc_search_close(struct ntvfs_module_context *ntvfs, return NT_STATUS_ACCESS_DENIED; } -static NTSTATUS ipc_trans_dcesrv_output(void *private_data, DATA_BLOB *out, size_t *nwritten) -{ - NTSTATUS status = NT_STATUS_OK; - DATA_BLOB *blob = private_data; - - if (out->length > blob->length) { - status = STATUS_BUFFER_OVERFLOW; - } +struct ipc_trans_state { + struct ipc_private *ipriv; + struct pipe_state *p; + struct ntvfs_request *req; + struct smb_trans2 *trans; + struct iovec writev_iov; + struct ipc_readv_next_vector_state next_vector; +}; - if (out->length < blob->length) { - blob->length = out->length; - } - memcpy(blob->data, out->data, blob->length); - *nwritten = blob->length; - return status; -} +static void ipc_trans_writev_done(struct tevent_req *subreq); +static void ipc_trans_readv_done(struct tevent_req *subreq); /* SMBtrans - handle a DCERPC command */ static NTSTATUS ipc_dcerpc_cmd(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, struct smb_trans2 *trans) { + struct ipc_private *ipriv = talloc_get_type_abort(ntvfs->private_data, + struct ipc_private); struct pipe_state *p; - struct ipc_private *ipriv = ntvfs->private_data; - NTSTATUS status; DATA_BLOB fnum_key; uint16_t fnum; + struct ipc_trans_state *state; + struct tevent_req *subreq; /* * the fnum is in setup[1], a 16 bit value @@ -765,43 +1000,122 @@ static NTSTATUS ipc_dcerpc_cmd(struct ntvfs_module_context *ntvfs, return NT_STATUS_INVALID_HANDLE; } - trans->out.data = data_blob_talloc(req, NULL, trans->in.max_data); - if (!trans->out.data.data) { - return NT_STATUS_NO_MEMORY; - } - - /* pass the data to the dcerpc server. Note that we don't - expect this to fail, and things like NDR faults are not - reported at this stage. Those sorts of errors happen in the - dcesrv_output stage */ - status = dcesrv_input(p->dce_conn, &trans->in.data); - if (!NT_STATUS_IS_OK(status)) { - return status; - } - /* - now ask the dcerpc system for some output. This doesn't yet handle - async calls. Again, we only expect NT_STATUS_OK. If the call fails then - the error is encoded at the dcerpc level - */ - status = dcesrv_output(p->dce_conn, &trans->out.data, ipc_trans_dcesrv_output); - if (NT_STATUS_IS_ERR(status)) { - return status; + * Trans requests are only allowed + * if no other Trans or Read is active + */ + if (tevent_queue_length(p->read_queue) > 0) { + return NT_STATUS_PIPE_BUSY; } + state = talloc(req, struct ipc_trans_state); + NT_STATUS_HAVE_NO_MEMORY(state); + trans->out.setup_count = 0; trans->out.setup = NULL; trans->out.params = data_blob(NULL, 0); + trans->out.data = data_blob_talloc(req, NULL, trans->in.max_data); + NT_STATUS_HAVE_NO_MEMORY(trans->out.data.data); + + state->ipriv = ipriv; + state->p = p; + state->req = req; + state->trans = trans; + state->writev_iov.iov_base = trans->in.data.data; + state->writev_iov.iov_len = trans->in.data.length; + + ipc_readv_next_vector_init(&state->next_vector, + trans->out.data.data, + trans->out.data.length); + + subreq = tstream_writev_queue_send(state, + ipriv->ntvfs->ctx->event_ctx, + p->npipe, + p->write_queue, + &state->writev_iov, 1); + NT_STATUS_HAVE_NO_MEMORY(subreq); + tevent_req_set_callback(subreq, ipc_trans_writev_done, state); + + req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; + return NT_STATUS_OK; +} - return status; +static void ipc_trans_writev_done(struct tevent_req *subreq) +{ + struct ipc_trans_state *state = + tevent_req_callback_data(subreq, + struct ipc_trans_state); + struct ipc_private *ipriv = state->ipriv; + struct pipe_state *p = state->p; + struct ntvfs_request *req = state->req; + int ret; + int sys_errno; + NTSTATUS status; + + ret = tstream_writev_queue_recv(subreq, &sys_errno); + TALLOC_FREE(subreq); + if (ret == 0) { + status = NT_STATUS_PIPE_DISCONNECTED; + goto reply; + } else if (ret == -1) { + status = map_nt_error_from_unix(sys_errno); + goto reply; + } + + subreq = tstream_readv_pdu_queue_send(state, + ipriv->ntvfs->ctx->event_ctx, + p->npipe, + p->read_queue, + ipc_readv_next_vector, + &state->next_vector); + if (!subreq) { + status = NT_STATUS_NO_MEMORY; + goto reply; + } + tevent_req_set_callback(subreq, ipc_trans_readv_done, state); + return; + +reply: + req->async_states->status = status; + req->async_states->send_fn(req); } +static void ipc_trans_readv_done(struct tevent_req *subreq) +{ + struct ipc_trans_state *state = + tevent_req_callback_data(subreq, + struct ipc_trans_state); + struct ntvfs_request *req = state->req; + struct smb_trans2 *trans = state->trans; + int ret; + int sys_errno; + NTSTATUS status; + + ret = tstream_readv_pdu_queue_recv(subreq, &sys_errno); + TALLOC_FREE(subreq); + if (ret == -1) { + status = map_nt_error_from_unix(sys_errno); + goto reply; + } + + status = NT_STATUS_OK; + if (state->next_vector.remaining > 0) { + status = STATUS_BUFFER_OVERFLOW; + } + + trans->out.data.length = ret; + +reply: + req->async_states->status = status; + req->async_states->send_fn(req); +} /* SMBtrans - set named pipe state */ static NTSTATUS ipc_set_nm_pipe_state(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, struct smb_trans2 *trans) { - struct ipc_private *ipriv = ntvfs->private_data; + struct ipc_private *ipriv = talloc_get_type_abort(ntvfs->private_data, + struct ipc_private); struct pipe_state *p; DATA_BLOB fnum_key; @@ -816,7 +1130,11 @@ static NTSTATUS ipc_set_nm_pipe_state(struct ntvfs_module_context *ntvfs, if (trans->in.params.length != 2) { return NT_STATUS_INVALID_PARAMETER; } - p->ipc_state = SVAL(trans->in.params.data, 0); + + /* + * TODO: pass this to the tstream_npa logic + */ + p->device_state = SVAL(trans->in.params.data, 0); trans->out.setup_count = 0; trans->out.setup = NULL; @@ -855,12 +1173,26 @@ static NTSTATUS ipc_trans(struct ntvfs_module_context *ntvfs, return status; } +struct ipc_ioctl_state { + struct ipc_private *ipriv; + struct pipe_state *p; + struct ntvfs_request *req; + union smb_ioctl *io; + struct iovec writev_iov; + struct ipc_readv_next_vector_state next_vector; +}; + +static void ipc_ioctl_writev_done(struct tevent_req *subreq); +static void ipc_ioctl_readv_done(struct tevent_req *subreq); + static NTSTATUS ipc_ioctl_smb2(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_ioctl *io) { + struct ipc_private *ipriv = talloc_get_type_abort(ntvfs->private_data, + struct ipc_private); struct pipe_state *p; - struct ipc_private *ipriv = ntvfs->private_data; - NTSTATUS status; + struct ipc_ioctl_state *state; + struct tevent_req *subreq; switch (io->smb2.in.function) { case FSCTL_NAMED_PIPE_READ_WRITE: @@ -875,31 +1207,113 @@ static NTSTATUS ipc_ioctl_smb2(struct ntvfs_module_context *ntvfs, return NT_STATUS_INVALID_HANDLE; } - io->smb2.out.out = data_blob_talloc(req, NULL, io->smb2.in.max_response_size); - NT_STATUS_HAVE_NO_MEMORY(io->smb2.out.out.data); - - /* pass the data to the dcerpc server. Note that we don't - expect this to fail, and things like NDR faults are not - reported at this stage. Those sorts of errors happen in the - dcesrv_output stage */ - status = dcesrv_input(p->dce_conn, &io->smb2.in.out); - NT_STATUS_NOT_OK_RETURN(status); - /* - now ask the dcerpc system for some output. This doesn't yet handle - async calls. Again, we only expect NT_STATUS_OK. If the call fails then - the error is encoded at the dcerpc level - */ - status = dcesrv_output(p->dce_conn, &io->smb2.out.out, ipc_trans_dcesrv_output); - NT_STATUS_IS_ERR_RETURN(status); + * Trans requests are only allowed + * if no other Trans or Read is active + */ + if (tevent_queue_length(p->read_queue) > 0) { + return NT_STATUS_PIPE_BUSY; + } + + state = talloc(req, struct ipc_ioctl_state); + NT_STATUS_HAVE_NO_MEMORY(state); io->smb2.out._pad = 0; io->smb2.out.function = io->smb2.in.function; io->smb2.out.unknown2 = 0; io->smb2.out.unknown3 = 0; io->smb2.out.in = io->smb2.in.out; + io->smb2.out.out = data_blob_talloc(req, NULL, io->smb2.in.max_response_size); + NT_STATUS_HAVE_NO_MEMORY(io->smb2.out.out.data); - return status; + state->ipriv = ipriv; + state->p = p; + state->req = req; + state->io = io; + state->writev_iov.iov_base = io->smb2.in.out.data; + state->writev_iov.iov_len = io->smb2.in.out.length; + + ipc_readv_next_vector_init(&state->next_vector, + io->smb2.out.out.data, + io->smb2.out.out.length); + + subreq = tstream_writev_queue_send(state, + ipriv->ntvfs->ctx->event_ctx, + p->npipe, + p->write_queue, + &state->writev_iov, 1); + NT_STATUS_HAVE_NO_MEMORY(subreq); + tevent_req_set_callback(subreq, ipc_ioctl_writev_done, state); + + req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; + return NT_STATUS_OK; +} + +static void ipc_ioctl_writev_done(struct tevent_req *subreq) +{ + struct ipc_ioctl_state *state = + tevent_req_callback_data(subreq, + struct ipc_ioctl_state); + struct ipc_private *ipriv = state->ipriv; + struct pipe_state *p = state->p; + struct ntvfs_request *req = state->req; + int ret; + int sys_errno; + NTSTATUS status; + + ret = tstream_writev_queue_recv(subreq, &sys_errno); + TALLOC_FREE(subreq); + if (ret == -1) { + status = map_nt_error_from_unix(sys_errno); + goto reply; + } + + subreq = tstream_readv_pdu_queue_send(state, + ipriv->ntvfs->ctx->event_ctx, + p->npipe, + p->read_queue, + ipc_readv_next_vector, + &state->next_vector); + if (!subreq) { + status = NT_STATUS_NO_MEMORY; + goto reply; + } + tevent_req_set_callback(subreq, ipc_ioctl_readv_done, state); + return; + +reply: + req->async_states->status = status; + req->async_states->send_fn(req); +} + +static void ipc_ioctl_readv_done(struct tevent_req *subreq) +{ + struct ipc_ioctl_state *state = + tevent_req_callback_data(subreq, + struct ipc_ioctl_state); + struct ntvfs_request *req = state->req; + union smb_ioctl *io = state->io; + int ret; + int sys_errno; + NTSTATUS status; + + ret = tstream_readv_pdu_queue_recv(subreq, &sys_errno); + TALLOC_FREE(subreq); + if (ret == -1) { + status = map_nt_error_from_unix(sys_errno); + goto reply; + } + + status = NT_STATUS_OK; + if (state->next_vector.remaining > 0) { + status = STATUS_BUFFER_OVERFLOW; + } + + io->smb2.out.out.length = ret; + +reply: + req->async_states->status = status; + req->async_states->send_fn(req); } /* |