From 5ddf678e0113f81aa2b5f99134cda4fe8c01afb7 Mon Sep 17 00:00:00 2001 From: Andrew Tridgell Date: Fri, 23 Jul 2004 06:40:49 +0000 Subject: r1578: the first stage of the async client rewrite. Up to now the client code has had an async API, and operated asynchronously at the packet level, but was not truly async in that it assumed that it could always write to the socket and when a partial packet came in that it could block waiting for the rest of the packet. This change makes the SMB client library full async, by adding a separate outgoing packet queue, using non-blocking socket IO and having a input buffer that can fill asynchonously until the full packet has arrived. The main complexity was in dealing with the events structure when using the CIFS proxy backend. In that case the same events structure needs to be used in both the client library and the main smbd server, so that when the client library is waiting for a reply that the main server keeps processing packets. This required some changes in the events library code. Next step is to make the generated rpc client code use these new capabilities. (This used to be commit 96bf4da3edc4d64b0f58ef520269f3b385b8da02) --- source4/ntvfs/cifs/vfs_cifs.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) (limited to 'source4/ntvfs') diff --git a/source4/ntvfs/cifs/vfs_cifs.c b/source4/ntvfs/cifs/vfs_cifs.c index b6d3486ad8..fd94a923c9 100644 --- a/source4/ntvfs/cifs/vfs_cifs.c +++ b/source4/ntvfs/cifs/vfs_cifs.c @@ -68,17 +68,17 @@ static BOOL oplock_handler(struct cli_transport *transport, uint16_t tid, uint16 return req_send_oplock_break(private->tcon, fnum, level); } -/* + /* a handler for read events on a connection to a backend server */ static void cifs_socket_handler(struct event_context *ev, struct fd_event *fde, time_t t, uint16_t flags) { struct cvfs_private *private = fde->private; struct smbsrv_tcon *tcon = private->tcon; - + DEBUG(5,("cifs_socket_handler event on fd %d\n", fde->fd)); - - if (!cli_request_receive_next(private->transport)) { + + if (!cli_transport_process(private->transport)) { /* the connection to our server is dead */ close_cnum(tcon); } @@ -93,7 +93,6 @@ static NTSTATUS cvfs_connect(struct smbsrv_request *req, const char *sharename) NTSTATUS status; struct cvfs_private *private; const char *map_calls; - struct fd_event fde; const char *host, *user, *pass, *domain, *remote_share; /* Here we need to determine which server to connect to. @@ -157,18 +156,17 @@ static NTSTATUS cvfs_connect(struct smbsrv_request *req, const char *sharename) tcon->ntvfs_ops = ops; } - /* we need to tell the event loop that we wish to receive read events - on our SMB connection to the server */ - fde.fd = private->transport->socket->fd; - fde.flags = EVENT_FD_READ; - fde.private = private; - fde.handler = cifs_socket_handler; - - event_add_fd(tcon->smb_conn->connection->event.ctx, &fde); - /* we need to receive oplock break requests from the server */ cli_oplock_handler(private->transport, oplock_handler, private); - cli_transport_idle_handler(private->transport, idle_func, 100, private); + cli_transport_idle_handler(private->transport, idle_func, 1, private); + + private->transport->event.fde->handler = cifs_socket_handler; + private->transport->event.fde->private = private; + + event_context_merge(tcon->smb_conn->connection->event.ctx, + private->transport->event.ctx); + + private->transport->event.ctx = tcon->smb_conn->connection->event.ctx; return NT_STATUS_OK; } @@ -180,7 +178,6 @@ static NTSTATUS cvfs_disconnect(struct smbsrv_tcon *tcon) { struct cvfs_private *private = tcon->ntvfs_private; - event_remove_fd_all(tcon->smb_conn->connection->event.ctx, private->transport->socket->fd); smb_tree_disconnect(private->tree); cli_tree_close(private->tree); -- cgit