summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/async_req/async_req.c42
-rw-r--r--lib/async_req/async_req.h9
-rw-r--r--lib/async_req/async_sock.c18
-rw-r--r--lib/replace/libreplace_network.m417
-rw-r--r--lib/talloc/configure.ac2
-rw-r--r--lib/talloc/talloc.h3
-rw-r--r--lib/tevent/configure.ac2
-rw-r--r--lib/tevent/libtevent.m43
-rw-r--r--lib/tevent/tevent.c93
-rw-r--r--lib/tevent/tevent.h41
-rw-r--r--lib/tevent/tevent_epoll.c62
-rw-r--r--lib/tevent/tevent_immediate.c139
-rw-r--r--lib/tevent/tevent_internal.h53
-rw-r--r--lib/tevent/tevent_queue.c99
-rw-r--r--lib/tevent/tevent_req.c74
-rw-r--r--lib/tevent/tevent_select.c60
-rw-r--r--lib/tevent/tevent_standard.c69
-rw-r--r--lib/tsocket/config.mk17
-rw-r--r--lib/tsocket/tsocket.c231
-rw-r--r--lib/tsocket/tsocket.h220
-rw-r--r--lib/tsocket/tsocket_bsd.c1126
-rw-r--r--lib/tsocket/tsocket_connect.c122
-rw-r--r--lib/tsocket/tsocket_guide.txt503
-rw-r--r--lib/tsocket/tsocket_helpers.c177
-rw-r--r--lib/tsocket/tsocket_internal.h157
-rw-r--r--lib/tsocket/tsocket_readv.c222
-rw-r--r--lib/tsocket/tsocket_recvfrom.c164
-rw-r--r--lib/tsocket/tsocket_sendto.c271
-rw-r--r--lib/tsocket/tsocket_writev.c316
-rw-r--r--lib/util/config.mk9
30 files changed, 4029 insertions, 292 deletions
diff --git a/lib/async_req/async_req.c b/lib/async_req/async_req.c
index 054c9f97cc..4dfe809738 100644
--- a/lib/async_req/async_req.c
+++ b/lib/async_req/async_req.c
@@ -194,48 +194,6 @@ bool async_req_is_error(struct async_req *req, enum async_req_state *state,
return true;
}
-static void async_req_timedout(struct tevent_context *ev,
- struct tevent_timer *te,
- struct timeval now,
- void *priv)
-{
- struct async_req *req = talloc_get_type_abort(priv, struct async_req);
- TALLOC_FREE(te);
- async_req_finish(req, ASYNC_REQ_TIMED_OUT);
-}
-
-bool async_req_set_timeout(struct async_req *req, struct tevent_context *ev,
- struct timeval to)
-{
- return (tevent_add_timer(
- ev, req,
- tevent_timeval_current_ofs(to.tv_sec, to.tv_usec),
- async_req_timedout, req)
- != NULL);
-}
-
-struct async_req *async_wait_send(TALLOC_CTX *mem_ctx,
- struct tevent_context *ev,
- struct timeval to)
-{
- struct async_req *result;
-
- result = async_req_new(mem_ctx);
- if (result == NULL) {
- return result;
- }
- if (!async_req_set_timeout(result, ev, to)) {
- TALLOC_FREE(result);
- return NULL;
- }
- return result;
-}
-
-bool async_wait_recv(struct async_req *req)
-{
- return true;
-}
-
struct async_queue_entry {
struct async_queue_entry *prev, *next;
struct async_req_queue *queue;
diff --git a/lib/async_req/async_req.h b/lib/async_req/async_req.h
index fc849880cd..fdec1b708e 100644
--- a/lib/async_req/async_req.h
+++ b/lib/async_req/async_req.h
@@ -139,15 +139,6 @@ bool async_post_error(struct async_req *req, struct tevent_context *ev,
bool async_req_is_error(struct async_req *req, enum async_req_state *state,
uint64_t *error);
-bool async_req_set_timeout(struct async_req *req, struct tevent_context *ev,
- struct timeval to);
-
-struct async_req *async_wait_send(TALLOC_CTX *mem_ctx,
- struct tevent_context *ev,
- struct timeval to);
-
-bool async_wait_recv(struct async_req *req);
-
struct async_req_queue;
struct async_req_queue *async_req_queue_init(TALLOC_CTX *mem_ctx);
diff --git a/lib/async_req/async_sock.c b/lib/async_req/async_sock.c
index be24bae6df..77df406044 100644
--- a/lib/async_req/async_sock.c
+++ b/lib/async_req/async_sock.c
@@ -389,7 +389,6 @@ struct tevent_req *writev_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
{
struct tevent_req *result;
struct writev_state *state;
- struct tevent_fd *fde;
result = tevent_req_create(mem_ctx, &state, struct writev_state);
if (result == NULL) {
@@ -405,22 +404,7 @@ struct tevent_req *writev_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
goto fail;
}
- /*
- * This if () should go away once our callers are converted to always
- * pass in a queue.
- */
-
- if (queue != NULL) {
- if (!tevent_queue_add(queue, ev, result, writev_trigger,
- NULL)) {
- goto fail;
- }
- return result;
- }
-
- fde = tevent_add_fd(ev, state, fd, TEVENT_FD_WRITE, writev_handler,
- result);
- if (fde == NULL) {
+ if (!tevent_queue_add(queue, ev, result, writev_trigger, NULL)) {
goto fail;
}
return result;
diff --git a/lib/replace/libreplace_network.m4 b/lib/replace/libreplace_network.m4
index 1dc1c44ed8..3bac72d136 100644
--- a/lib/replace/libreplace_network.m4
+++ b/lib/replace/libreplace_network.m4
@@ -8,12 +8,15 @@ LIBREPLACE_NETWORK_LIBS=""
AC_CHECK_HEADERS(sys/socket.h netinet/in.h netdb.h arpa/inet.h)
AC_CHECK_HEADERS(netinet/in_systm.h)
-AC_CHECK_HEADERS([netinet/ip.h], [], [],[#ifdef HAVE_NETINET_IN_H
-#include <netinet/in.h>
-#endif
-#ifdef HAVE_NETINET_IN_SYSTM_H
-#include <netinet/in_systm.h>
-#endif])
+AC_CHECK_HEADERS([netinet/ip.h], [], [],[
+ #include <sys/types.h>
+ #ifdef HAVE_NETINET_IN_H
+ #include <netinet/in.h>
+ #endif
+ #ifdef HAVE_NETINET_IN_SYSTM_H
+ #include <netinet/in_systm.h>
+ #endif
+])
AC_CHECK_HEADERS(netinet/tcp.h netinet/in_ip.h)
AC_CHECK_HEADERS(sys/sockio.h sys/un.h)
AC_CHECK_HEADERS(sys/uio.h)
@@ -242,7 +245,7 @@ AC_CHECK_MEMBERS([struct sockaddr.sa_len],
dnl test for getifaddrs and freeifaddrs
AC_CACHE_CHECK([for getifaddrs and freeifaddrs],libreplace_cv_HAVE_GETIFADDRS,[
-AC_TRY_COMPILE([
+AC_TRY_LINK([
#include <sys/types.h>
#if STDC_HEADERS
#include <stdlib.h>
diff --git a/lib/talloc/configure.ac b/lib/talloc/configure.ac
index 39cea393ce..00e8242d4e 100644
--- a/lib/talloc/configure.ac
+++ b/lib/talloc/configure.ac
@@ -1,5 +1,5 @@
AC_PREREQ(2.50)
-AC_INIT(talloc, 1.2.1)
+AC_INIT(talloc, 1.3.0)
AC_CONFIG_SRCDIR([talloc.c])
AC_SUBST(datarootdir)
AC_CONFIG_HEADER(config.h)
diff --git a/lib/talloc/talloc.h b/lib/talloc/talloc.h
index b62393494b..5c8d5c5fe2 100644
--- a/lib/talloc/talloc.h
+++ b/lib/talloc/talloc.h
@@ -94,6 +94,7 @@ typedef void TALLOC_CTX;
#define talloc_array(ctx, type, count) (type *)_talloc_array(ctx, sizeof(type), count, #type)
#define talloc_array_size(ctx, size, count) _talloc_array(ctx, size, count, __location__)
#define talloc_array_ptrtype(ctx, ptr, count) (_TALLOC_TYPEOF(ptr))talloc_array_size(ctx, sizeof(*(ptr)), count)
+#define talloc_array_length(ctx) ((ctx) ? talloc_get_size(ctx)/sizeof(*ctx) : 0)
#define talloc_realloc(ctx, p, type, count) (type *)_talloc_realloc_array(ctx, p, sizeof(type), count, #type)
#define talloc_realloc_size(ctx, ptr, size) _talloc_realloc(ctx, ptr, size, __location__)
@@ -115,6 +116,8 @@ typedef void TALLOC_CTX;
#define talloc_append_string(c, s, a) (s?talloc_strdup_append(s,a):talloc_strdup(c, a))
#endif
+#define TALLOC_FREE(ctx) do { talloc_free(ctx); ctx=NULL; } while(0)
+
/* The following definitions come from talloc.c */
void *_talloc(const void *context, size_t size);
void *talloc_pool(const void *context, size_t size);
diff --git a/lib/tevent/configure.ac b/lib/tevent/configure.ac
index 69f65c6df7..171a4088ba 100644
--- a/lib/tevent/configure.ac
+++ b/lib/tevent/configure.ac
@@ -1,5 +1,5 @@
AC_PREREQ(2.50)
-AC_INIT(tevent, 0.9.4)
+AC_INIT(tevent, 0.9.5)
AC_CONFIG_SRCDIR([tevent.c])
AC_CONFIG_HEADER(config.h)
diff --git a/lib/tevent/libtevent.m4 b/lib/tevent/libtevent.m4
index c316823a71..20730b17d6 100644
--- a/lib/tevent/libtevent.m4
+++ b/lib/tevent/libtevent.m4
@@ -26,7 +26,8 @@ AC_SUBST(TEVENT_LIBS)
TEVENT_CFLAGS="-I$teventdir"
-TEVENT_OBJ="tevent.o tevent_fd.o tevent_timed.o tevent_signal.o tevent_debug.o tevent_util.o"
+TEVENT_OBJ="tevent.o tevent_debug.o tevent_util.o"
+TEVENT_OBJ="$TEVENT_OBJ tevent_fd.o tevent_timed.o tevent_immediate.o tevent_signal.o"
TEVENT_OBJ="$TEVENT_OBJ tevent_req.o tevent_wakeup.o tevent_queue.o"
TEVENT_OBJ="$TEVENT_OBJ tevent_standard.o tevent_select.o"
diff --git a/lib/tevent/tevent.c b/lib/tevent/tevent.c
index 867cfc08fe..0c02e46f3c 100644
--- a/lib/tevent/tevent.c
+++ b/lib/tevent/tevent.c
@@ -143,6 +143,7 @@ int tevent_common_context_destructor(struct tevent_context *ev)
{
struct tevent_fd *fd, *fn;
struct tevent_timer *te, *tn;
+ struct tevent_immediate *ie, *in;
struct tevent_signal *se, *sn;
if (ev->pipe_fde) {
@@ -162,6 +163,13 @@ int tevent_common_context_destructor(struct tevent_context *ev)
DLIST_REMOVE(ev->timer_events, te);
}
+ for (ie = ev->immediate_events; ie; ie = in) {
+ in = ie->next;
+ ie->event_ctx = NULL;
+ ie->cancel_fn = NULL;
+ DLIST_REMOVE(ev->immediate_events, ie);
+ }
+
for (se = ev->signal_events; se; se = sn) {
sn = se->next;
se->event_ctx = NULL;
@@ -350,6 +358,47 @@ struct tevent_timer *_tevent_add_timer(struct tevent_context *ev,
}
/*
+ allocate an immediate event
+ return NULL on failure (memory allocation error)
+*/
+struct tevent_immediate *_tevent_create_immediate(TALLOC_CTX *mem_ctx,
+ const char *location)
+{
+ struct tevent_immediate *im;
+
+ im = talloc(mem_ctx, struct tevent_immediate);
+ if (im == NULL) return NULL;
+
+ im->prev = NULL;
+ im->next = NULL;
+ im->event_ctx = NULL;
+ im->create_location = location;
+ im->handler = NULL;
+ im->private_data = NULL;
+ im->handler_name = NULL;
+ im->schedule_location = NULL;
+ im->cancel_fn = NULL;
+ im->additional_data = NULL;
+
+ return im;
+}
+
+/*
+ schedule an immediate event
+ return NULL on failure
+*/
+void _tevent_schedule_immediate(struct tevent_immediate *im,
+ struct tevent_context *ev,
+ tevent_immediate_handler_t handler,
+ void *private_data,
+ const char *handler_name,
+ const char *location)
+{
+ ev->ops->schedule_immediate(im, ev, handler, private_data,
+ handler_name, location);
+}
+
+/*
add a signal event
sa_flags are flags to sigaction(2)
@@ -378,6 +427,14 @@ void tevent_loop_set_nesting_hook(struct tevent_context *ev,
tevent_nesting_hook hook,
void *private_data)
{
+ if (ev->nesting.hook_fn &&
+ (ev->nesting.hook_fn != hook ||
+ ev->nesting.hook_private != private_data)) {
+ /* the way the nesting hook code is currently written
+ we cannot support two different nesting hooks at the
+ same time. */
+ tevent_abort(ev, "tevent: Violation of nesting hook rules\n");
+ }
ev->nesting.hook_fn = hook;
ev->nesting.hook_private = private_data;
}
@@ -411,6 +468,8 @@ int _tevent_loop_once(struct tevent_context *ev, const char *location)
errno = ELOOP;
return -1;
}
+ }
+ if (ev->nesting.level > 0) {
if (ev->nesting.hook_fn) {
int ret2;
ret2 = ev->nesting.hook_fn(ev,
@@ -428,7 +487,7 @@ int _tevent_loop_once(struct tevent_context *ev, const char *location)
ret = ev->ops->loop_once(ev, location);
- if (ev->nesting.level > 1) {
+ if (ev->nesting.level > 0) {
if (ev->nesting.hook_fn) {
int ret2;
ret2 = ev->nesting.hook_fn(ev,
@@ -468,6 +527,8 @@ int _tevent_loop_until(struct tevent_context *ev,
errno = ELOOP;
return -1;
}
+ }
+ if (ev->nesting.level > 0) {
if (ev->nesting.hook_fn) {
int ret2;
ret2 = ev->nesting.hook_fn(ev,
@@ -490,7 +551,7 @@ int _tevent_loop_until(struct tevent_context *ev,
}
}
- if (ev->nesting.level > 1) {
+ if (ev->nesting.level > 0) {
if (ev->nesting.hook_fn) {
int ret2;
ret2 = ev->nesting.hook_fn(ev,
@@ -514,6 +575,34 @@ done:
/*
return on failure or (with 0) if all fd events are removed
*/
+int tevent_common_loop_wait(struct tevent_context *ev,
+ const char *location)
+{
+ /*
+ * loop as long as we have events pending
+ */
+ while (ev->fd_events ||
+ ev->timer_events ||
+ ev->immediate_events ||
+ ev->signal_events) {
+ int ret;
+ ret = _tevent_loop_once(ev, location);
+ if (ret != 0) {
+ tevent_debug(ev, TEVENT_DEBUG_FATAL,
+ "_tevent_loop_once() failed: %d - %s\n",
+ ret, strerror(errno));
+ return ret;
+ }
+ }
+
+ tevent_debug(ev, TEVENT_DEBUG_WARNING,
+ "tevent_common_loop_wait() out of events\n");
+ return 0;
+}
+
+/*
+ return on failure or (with 0) if all fd events are removed
+*/
int _tevent_loop_wait(struct tevent_context *ev, const char *location)
{
return ev->ops->loop_wait(ev, location);
diff --git a/lib/tevent/tevent.h b/lib/tevent/tevent.h
index 4a3f51ae5e..6c5df6321a 100644
--- a/lib/tevent/tevent.h
+++ b/lib/tevent/tevent.h
@@ -37,6 +37,7 @@ struct tevent_context;
struct tevent_ops;
struct tevent_fd;
struct tevent_timer;
+struct tevent_immediate;
struct tevent_signal;
/* event handler types */
@@ -52,6 +53,9 @@ typedef void (*tevent_timer_handler_t)(struct tevent_context *ev,
struct tevent_timer *te,
struct timeval current_time,
void *private_data);
+typedef void (*tevent_immediate_handler_t)(struct tevent_context *ctx,
+ struct tevent_immediate *im,
+ void *private_data);
typedef void (*tevent_signal_handler_t)(struct tevent_context *ev,
struct tevent_signal *se,
int signum,
@@ -87,6 +91,21 @@ struct tevent_timer *_tevent_add_timer(struct tevent_context *ev,
_tevent_add_timer(ev, mem_ctx, next_event, handler, private_data, \
#handler, __location__)
+struct tevent_immediate *_tevent_create_immediate(TALLOC_CTX *mem_ctx,
+ const char *location);
+#define tevent_create_immediate(mem_ctx) \
+ _tevent_create_immediate(mem_ctx, __location__)
+
+void _tevent_schedule_immediate(struct tevent_immediate *im,
+ struct tevent_context *ctx,
+ tevent_immediate_handler_t handler,
+ void *private_data,
+ const char *handler_name,
+ const char *location);
+#define tevent_schedule_immediate(im, ctx, handler, private_data) \
+ _tevent_schedule_immediate(im, ctx, handler, private_data, \
+ #handler, __location__);
+
struct tevent_signal *_tevent_add_signal(struct tevent_context *ev,
TALLOC_CTX *mem_ctx,
int signum,
@@ -233,13 +252,22 @@ bool tevent_req_set_endtime(struct tevent_req *req,
struct tevent_context *ev,
struct timeval endtime);
-void tevent_req_done(struct tevent_req *req);
+void _tevent_req_done(struct tevent_req *req,
+ const char *location);
+#define tevent_req_done(req) \
+ _tevent_req_done(req, __location__)
-bool tevent_req_error(struct tevent_req *req,
- uint64_t error);
+bool _tevent_req_error(struct tevent_req *req,
+ uint64_t error,
+ const char *location);
+#define tevent_req_error(req, error) \
+ _tevent_req_error(req, error, __location__)
-bool tevent_req_nomem(const void *p,
- struct tevent_req *req);
+bool _tevent_req_nomem(const void *p,
+ struct tevent_req *req,
+ const char *location);
+#define tevent_req_nomem(p, req) \
+ _tevent_req_nomem(p, req, __location__)
struct tevent_req *tevent_req_post(struct tevent_req *req,
struct tevent_context *ev);
@@ -295,8 +323,7 @@ bool tevent_queue_add(struct tevent_queue *queue,
struct tevent_req *req,
tevent_queue_trigger_fn_t trigger,
void *private_data);
-bool tevent_queue_start(struct tevent_queue *queue,
- struct tevent_context *ev);
+void tevent_queue_start(struct tevent_queue *queue);
void tevent_queue_stop(struct tevent_queue *queue);
size_t tevent_queue_length(struct tevent_queue *queue);
diff --git a/lib/tevent/tevent_epoll.c b/lib/tevent/tevent_epoll.c
index b63d299d94..7c7f389d5b 100644
--- a/lib/tevent/tevent_epoll.c
+++ b/lib/tevent/tevent_epoll.c
@@ -35,14 +35,6 @@ struct epoll_event_context {
/* a pointer back to the generic event_context */
struct tevent_context *ev;
- /* this is changed by the destructors for the fd event
- type. It is used to detect event destruction by event
- handlers, which means the code that is calling the event
- handler needs to assume that the linked list is no longer
- valid
- */
- uint32_t destruction_count;
-
/* when using epoll this is the handle from epoll_create */
int epoll_fd;
@@ -242,9 +234,8 @@ static void epoll_change_event(struct epoll_event_context *epoll_ev, struct teve
static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp)
{
int ret, i;
-#define MAXEVENTS 32
+#define MAXEVENTS 1
struct epoll_event events[MAXEVENTS];
- uint32_t destruction_count = ++epoll_ev->destruction_count;
int timeout = -1;
if (epoll_ev->epoll_fd == -1) return -1;
@@ -305,9 +296,7 @@ static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval
if (events[i].events & EPOLLOUT) flags |= TEVENT_FD_WRITE;
if (flags) {
fde->handler(epoll_ev->ev, fde, flags, fde->private_data);
- if (destruction_count != epoll_ev->destruction_count) {
- break;
- }
+ break;
}
}
@@ -351,8 +340,6 @@ static int epoll_event_fd_destructor(struct tevent_fd *fde)
epoll_check_reopen(epoll_ev);
- epoll_ev->destruction_count++;
-
epoll_del_event(epoll_ev, fde);
}
@@ -417,6 +404,16 @@ static int epoll_event_loop_once(struct tevent_context *ev, const char *location
struct epoll_event_context);
struct timeval tval;
+ if (ev->signal_events &&
+ tevent_common_check_signal(ev)) {
+ return 0;
+ }
+
+ if (ev->immediate_events &&
+ tevent_common_loop_immediate(ev)) {
+ return 0;
+ }
+
tval = tevent_common_loop_timer_delay(ev);
if (tevent_timeval_is_zero(&tval)) {
return 0;
@@ -427,32 +424,17 @@ static int epoll_event_loop_once(struct tevent_context *ev, const char *location
return epoll_event_loop(epoll_ev, &tval);
}
-/*
- return on failure or (with 0) if all fd events are removed
-*/
-static int epoll_event_loop_wait(struct tevent_context *ev, const char *location)
-{
- struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
- struct epoll_event_context);
- while (epoll_ev->ev->fd_events) {
- if (epoll_event_loop_once(ev, location) != 0) {
- break;
- }
- }
-
- return 0;
-}
-
static const struct tevent_ops epoll_event_ops = {
- .context_init = epoll_event_context_init,
- .add_fd = epoll_event_add_fd,
- .set_fd_close_fn= tevent_common_fd_set_close_fn,
- .get_fd_flags = tevent_common_fd_get_flags,
- .set_fd_flags = epoll_event_set_fd_flags,
- .add_timer = tevent_common_add_timer,
- .add_signal = tevent_common_add_signal,
- .loop_once = epoll_event_loop_once,
- .loop_wait = epoll_event_loop_wait,
+ .context_init = epoll_event_context_init,
+ .add_fd = epoll_event_add_fd,
+ .set_fd_close_fn = tevent_common_fd_set_close_fn,
+ .get_fd_flags = tevent_common_fd_get_flags,
+ .set_fd_flags = epoll_event_set_fd_flags,
+ .add_timer = tevent_common_add_timer,
+ .schedule_immediate = tevent_common_schedule_immediate,
+ .add_signal = tevent_common_add_signal,
+ .loop_once = epoll_event_loop_once,
+ .loop_wait = tevent_common_loop_wait,
};
bool tevent_epoll_init(void)
diff --git a/lib/tevent/tevent_immediate.c b/lib/tevent/tevent_immediate.c
new file mode 100644
index 0000000000..1ac293e175
--- /dev/null
+++ b/lib/tevent/tevent_immediate.c
@@ -0,0 +1,139 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ common events code for immediate events
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "tevent.h"
+#include "tevent_internal.h"
+#include "tevent_util.h"
+
+static void tevent_common_immediate_cancel(struct tevent_immediate *im)
+{
+ if (!im->event_ctx) {
+ return;
+ }
+
+ tevent_debug(im->event_ctx, TEVENT_DEBUG_TRACE,
+ "Cancel immediate event %p \"%s\"\n",
+ im, im->handler_name);
+
+ /* let the backend free im->additional_data */
+ if (im->cancel_fn) {
+ im->cancel_fn(im);
+ }
+
+ DLIST_REMOVE(im->event_ctx->immediate_events, im);
+ im->event_ctx = NULL;
+ im->handler = NULL;
+ im->private_data = NULL;
+ im->handler_name = NULL;
+ im->schedule_location = NULL;
+ im->cancel_fn = NULL;
+ im->additional_data = NULL;
+
+ talloc_set_destructor(im, NULL);
+}
+
+/*
+ destroy an immediate event
+*/
+static int tevent_common_immediate_destructor(struct tevent_immediate *im)
+{
+ tevent_common_immediate_cancel(im);
+ return 0;
+}
+
+/*
+ * schedule an immediate event on
+ */
+void tevent_common_schedule_immediate(struct tevent_immediate *im,
+ struct tevent_context *ev,
+ tevent_immediate_handler_t handler,
+ void *private_data,
+ const char *handler_name,
+ const char *location)
+{
+ tevent_common_immediate_cancel(im);
+
+ if (!handler) {
+ return;
+ }
+
+ im->event_ctx = ev;
+ im->handler = handler;
+ im->private_data = private_data;
+ im->handler_name = handler_name;
+ im->schedule_location = location;
+ im->cancel_fn = NULL;
+ im->additional_data = NULL;
+
+ DLIST_ADD_END(ev->immediate_events, im, struct tevent_immediate *);
+ talloc_set_destructor(im, tevent_common_immediate_destructor);
+
+ tevent_debug(ev, TEVENT_DEBUG_TRACE,
+ "Schedule immediate event \"%s\": %p\n",
+ handler_name, im);
+}
+
+/*
+ trigger the first immediate event and return true
+ if no event was triggered return false
+*/
+bool tevent_common_loop_immediate(struct tevent_context *ev)
+{
+ struct tevent_immediate *im = ev->immediate_events;
+ tevent_immediate_handler_t handler;
+ void *private_data;
+
+ if (!im) {
+ return false;
+ }
+
+ tevent_debug(ev, TEVENT_DEBUG_TRACE,
+ "Run immediate event \"%s\": %p\n",
+ im->handler_name, im);
+
+ /*
+ * remember the handler and then clear the event
+ * the handler might reschedule the event
+ */
+ handler = im->handler;
+ private_data = im->private_data;
+
+ DLIST_REMOVE(im->event_ctx->immediate_events, im);
+ im->event_ctx = NULL;
+ im->handler = NULL;
+ im->private_data = NULL;
+ im->handler_name = NULL;
+ im->schedule_location = NULL;
+ im->cancel_fn = NULL;
+ im->additional_data = NULL;
+
+ talloc_set_destructor(im, NULL);
+
+ handler(ev, im, private_data);
+
+ return true;
+}
+
diff --git a/lib/tevent/tevent_internal.h b/lib/tevent/tevent_internal.h
index f10485398f..eebf767067 100644
--- a/lib/tevent/tevent_internal.h
+++ b/lib/tevent/tevent_internal.h
@@ -85,7 +85,17 @@ struct tevent_req {
*
* This for debugging only.
*/
- const char *location;
+ const char *create_location;
+
+ /**
+ * @brief The location where the request was finished
+ *
+ * This uses the __location__ macro via the tevent_req_done(),
+ * tevent_req_error() or tevent_req_nomem() macro.
+ *
+ * This for debugging only.
+ */
+ const char *finish_location;
/**
* @brief The external state - will be queried by the caller
@@ -105,10 +115,10 @@ struct tevent_req {
uint64_t error;
/**
- * @brief the timer event if tevent_req_post was used
+ * @brief the immediate event used by tevent_req_post
*
*/
- struct tevent_timer *trigger;
+ struct tevent_immediate *trigger;
/**
* @brief the timer event if tevent_req_set_timeout was used
@@ -143,6 +153,15 @@ struct tevent_ops {
void *private_data,
const char *handler_name,
const char *location);
+
+ /* immediate event functions */
+ void (*schedule_immediate)(struct tevent_immediate *im,
+ struct tevent_context *ev,
+ tevent_immediate_handler_t handler,
+ void *private_data,
+ const char *handler_name,
+ const char *location);
+
/* signal functions */
struct tevent_signal *(*add_signal)(struct tevent_context *ev,
TALLOC_CTX *mem_ctx,
@@ -188,6 +207,21 @@ struct tevent_timer {
void *additional_data;
};
+struct tevent_immediate {
+ struct tevent_immediate *prev, *next;
+ struct tevent_context *event_ctx;
+ tevent_immediate_handler_t handler;
+ /* this is private for the specific handler */
+ void *private_data;
+ /* this is for debugging only! */
+ const char *handler_name;
+ const char *create_location;
+ const char *schedule_location;
+ /* this is private for the events_ops implementation */
+ void (*cancel_fn)(struct tevent_immediate *im);
+ void *additional_data;
+};
+
struct tevent_signal {
struct tevent_signal *prev, *next;
struct tevent_context *event_ctx;
@@ -222,6 +256,9 @@ struct tevent_context {
/* list of timed events - used by common code */
struct tevent_timer *timer_events;
+ /* list of immediate events - used by common code */
+ struct tevent_immediate *immediate_events;
+
/* list of signal events - used by common code */
struct tevent_signal *signal_events;
@@ -247,6 +284,8 @@ struct tevent_context {
bool tevent_register_backend(const char *name, const struct tevent_ops *ops);
int tevent_common_context_destructor(struct tevent_context *ev);
+int tevent_common_loop_wait(struct tevent_context *ev,
+ const char *location);
int tevent_common_fd_destructor(struct tevent_fd *fde);
struct tevent_fd *tevent_common_add_fd(struct tevent_context *ev,
@@ -271,6 +310,14 @@ struct tevent_timer *tevent_common_add_timer(struct tevent_context *ev,
const char *location);
struct timeval tevent_common_loop_timer_delay(struct tevent_context *);
+void tevent_common_schedule_immediate(struct tevent_immediate *im,
+ struct tevent_context *ev,
+ tevent_immediate_handler_t handler,
+ void *private_data,
+ const char *handler_name,
+ const char *location);
+bool tevent_common_loop_immediate(struct tevent_context *ev);
+
struct tevent_signal *tevent_common_add_signal(struct tevent_context *ev,
TALLOC_CTX *mem_ctx,
int signum,
diff --git a/lib/tevent/tevent_queue.c b/lib/tevent/tevent_queue.c
index 6c8fbe4f95..3715c35e4f 100644
--- a/lib/tevent/tevent_queue.c
+++ b/lib/tevent/tevent_queue.c
@@ -34,6 +34,7 @@ struct tevent_queue_entry {
bool triggered;
struct tevent_req *req;
+ struct tevent_context *ev;
tevent_queue_trigger_fn_t trigger;
void *private_data;
@@ -44,12 +45,16 @@ struct tevent_queue {
const char *location;
bool running;
- struct tevent_timer *timer;
+ struct tevent_immediate *immediate;
size_t length;
struct tevent_queue_entry *list;
};
+static void tevent_queue_immediate_trigger(struct tevent_context *ev,
+ struct tevent_immediate *im,
+ void *private_data);
+
static int tevent_queue_entry_destructor(struct tevent_queue_entry *e)
{
struct tevent_queue *q = e->queue;
@@ -61,14 +66,23 @@ static int tevent_queue_entry_destructor(struct tevent_queue_entry *e)
DLIST_REMOVE(q->list, e);
q->length--;
- if (e->triggered &&
- q->running &&
- q->list) {
- q->list->triggered = true;
- q->list->trigger(q->list->req,
- q->list->private_data);
+ if (!q->running) {
+ return 0;
+ }
+
+ if (!q->list) {
+ return 0;
+ }
+
+ if (q->list->triggered) {
+ return 0;
}
+ tevent_schedule_immediate(q->immediate,
+ q->list->ev,
+ tevent_queue_immediate_trigger,
+ q);
+
return 0;
}
@@ -100,6 +114,11 @@ struct tevent_queue *_tevent_queue_create(TALLOC_CTX *mem_ctx,
talloc_free(queue);
return NULL;
}
+ queue->immediate = tevent_create_immediate(queue);
+ if (!queue->immediate) {
+ talloc_free(queue);
+ return NULL;
+ }
queue->location = location;
@@ -110,16 +129,16 @@ struct tevent_queue *_tevent_queue_create(TALLOC_CTX *mem_ctx,
return queue;
}
-static void tevent_queue_timer_start(struct tevent_context *ev,
- struct tevent_timer *te,
- struct timeval now,
- void *private_data)
+static void tevent_queue_immediate_trigger(struct tevent_context *ev,
+ struct tevent_immediate *im,
+ void *private_data)
{
struct tevent_queue *q = talloc_get_type(private_data,
struct tevent_queue);
- talloc_free(te);
- q->timer = NULL;
+ if (!q->running) {
+ return;
+ }
q->list->triggered = true;
q->list->trigger(q->list->req, q->list->private_data);
@@ -140,56 +159,56 @@ bool tevent_queue_add(struct tevent_queue *queue,
e->queue = queue;
e->req = req;
+ e->ev = ev;
e->trigger = trigger;
e->private_data = private_data;
- if (queue->running &&
- !queue->timer &&
- !queue->list) {
- queue->timer = tevent_add_timer(ev, queue, tevent_timeval_zero(),
- tevent_queue_timer_start,
- queue);
- if (!queue->timer) {
- talloc_free(e);
- return false;
- }
- }
-
DLIST_ADD_END(queue->list, e, struct tevent_queue_entry *);
queue->length++;
talloc_set_destructor(e, tevent_queue_entry_destructor);
+ if (!queue->running) {
+ return true;
+ }
+
+ if (queue->list->triggered) {
+ return true;
+ }
+
+ tevent_schedule_immediate(queue->immediate,
+ queue->list->ev,
+ tevent_queue_immediate_trigger,
+ queue);
+
return true;
}
-bool tevent_queue_start(struct tevent_queue *queue,
- struct tevent_context *ev)
+void tevent_queue_start(struct tevent_queue *queue)
{
if (queue->running) {
/* already started */
- return true;
+ return;
}
- if (!queue->timer &&
- queue->list) {
- queue->timer = tevent_add_timer(ev, queue, tevent_timeval_zero(),
- tevent_queue_timer_start,
- queue);
- if (!queue->timer) {
- return false;
- }
+ queue->running = true;
+
+ if (!queue->list) {
+ return;
}
- queue->running = true;
+ if (queue->list->triggered) {
+ return;
+ }
- return true;
+ tevent_schedule_immediate(queue->immediate,
+ queue->list->ev,
+ tevent_queue_immediate_trigger,
+ queue);
}
void tevent_queue_stop(struct tevent_queue *queue)
{
queue->running = false;
- talloc_free(queue->timer);
- queue->timer = NULL;
}
size_t tevent_queue_length(struct tevent_queue *queue)
diff --git a/lib/tevent/tevent_req.c b/lib/tevent/tevent_req.c
index 3832088b34..380a6388e2 100644
--- a/lib/tevent/tevent_req.c
+++ b/lib/tevent/tevent_req.c
@@ -43,7 +43,7 @@ char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
return talloc_asprintf(mem_ctx,
"tevent_req[%p/%s]: state[%d] error[%lld (0x%llX)] "
" state[%s (%p)] timer[%p]",
- req, req->internal.location,
+ req, req->internal.create_location,
req->internal.state,
(unsigned long long)req->internal.error,
(unsigned long long)req->internal.error,
@@ -95,8 +95,14 @@ struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
return NULL;
}
req->internal.private_type = type;
- req->internal.location = location;
+ req->internal.create_location = location;
+ req->internal.finish_location = NULL;
req->internal.state = TEVENT_REQ_IN_PROGRESS;
+ req->internal.trigger = tevent_create_immediate(req);
+ if (!req->internal.trigger) {
+ talloc_free(req);
+ return NULL;
+ }
data = talloc_size(req, data_size);
if (data == NULL) {
@@ -111,9 +117,12 @@ struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
return req;
}
-static void tevent_req_finish(struct tevent_req *req, enum tevent_req_state state)
+static void tevent_req_finish(struct tevent_req *req,
+ enum tevent_req_state state,
+ const char *location)
{
req->internal.state = state;
+ req->internal.finish_location = location;
if (req->async.fn != NULL) {
req->async.fn(req);
}
@@ -128,9 +137,10 @@ static void tevent_req_finish(struct tevent_req *req, enum tevent_req_state stat
* function.
*/
-void tevent_req_done(struct tevent_req *req)
+void _tevent_req_done(struct tevent_req *req,
+ const char *location)
{
- tevent_req_finish(req, TEVENT_REQ_DONE);
+ tevent_req_finish(req, TEVENT_REQ_DONE, location);
}
/**
@@ -161,14 +171,16 @@ void tevent_req_done(struct tevent_req *req)
* \endcode
*/
-bool tevent_req_error(struct tevent_req *req, uint64_t error)
+bool _tevent_req_error(struct tevent_req *req,
+ uint64_t error,
+ const char *location)
{
if (error == 0) {
return false;
}
req->internal.error = error;
- tevent_req_finish(req, TEVENT_REQ_USER_ERROR);
+ tevent_req_finish(req, TEVENT_REQ_USER_ERROR, location);
return true;
}
@@ -189,42 +201,39 @@ bool tevent_req_error(struct tevent_req *req, uint64_t error)
* \endcode
*/
-bool tevent_req_nomem(const void *p, struct tevent_req *req)
+bool _tevent_req_nomem(const void *p,
+ struct tevent_req *req,
+ const char *location)
{
if (p != NULL) {
return false;
}
- tevent_req_finish(req, TEVENT_REQ_NO_MEMORY);
+ tevent_req_finish(req, TEVENT_REQ_NO_MEMORY, location);
return true;
}
/**
- * @brief Timed event callback
+ * @brief Immediate event callback
* @param[in] ev Event context
- * @param[in] te The timed event
- * @param[in] now zero time
+ * @param[in] im The immediate event
* @param[in] priv The async request to be finished
*/
static void tevent_req_trigger(struct tevent_context *ev,
- struct tevent_timer *te,
- struct timeval zero,
+ struct tevent_immediate *im,
void *private_data)
{
struct tevent_req *req = talloc_get_type(private_data,
struct tevent_req);
- talloc_free(req->internal.trigger);
- req->internal.trigger = NULL;
-
- tevent_req_finish(req, req->internal.state);
+ tevent_req_finish(req, req->internal.state,
+ req->internal.finish_location);
}
/**
* @brief Finish a request before the caller had the change to set the callback
* @param[in] req The finished request
* @param[in] ev The tevent_context for the timed event
- * @retval On success req will be returned,
- * on failure req will be destroyed
+ * @retval req will be returned
*
* An implementation of an async request might find that it can either finish
* the request without waiting for an external event, or it can't even start
@@ -237,13 +246,8 @@ static void tevent_req_trigger(struct tevent_context *ev,
struct tevent_req *tevent_req_post(struct tevent_req *req,
struct tevent_context *ev)
{
- req->internal.trigger = tevent_add_timer(ev, req, tevent_timeval_zero(),
- tevent_req_trigger, req);
- if (!req->internal.trigger) {
- talloc_free(req);
- return NULL;
- }
-
+ tevent_schedule_immediate(req->internal.trigger,
+ ev, tevent_req_trigger, req);
return req;
}
@@ -265,14 +269,11 @@ bool tevent_req_is_in_progress(struct tevent_req *req)
*/
void tevent_req_received(struct tevent_req *req)
{
- talloc_free(req->data);
- req->data = NULL;
+ TALLOC_FREE(req->data);
req->private_print = NULL;
- talloc_free(req->internal.trigger);
- req->internal.trigger = NULL;
- talloc_free(req->internal.timer);
- req->internal.timer = NULL;
+ TALLOC_FREE(req->internal.trigger);
+ TALLOC_FREE(req->internal.timer);
req->internal.state = TEVENT_REQ_RECEIVED;
}
@@ -313,17 +314,16 @@ static void tevent_req_timedout(struct tevent_context *ev,
struct tevent_req *req = talloc_get_type(private_data,
struct tevent_req);
- talloc_free(req->internal.timer);
- req->internal.timer = NULL;
+ TALLOC_FREE(req->internal.timer);
- tevent_req_finish(req, TEVENT_REQ_TIMED_OUT);
+ tevent_req_finish(req, TEVENT_REQ_TIMED_OUT, __FUNCTION__);
}
bool tevent_req_set_endtime(struct tevent_req *req,
struct tevent_context *ev,
struct timeval endtime)
{
- talloc_free(req->internal.timer);
+ TALLOC_FREE(req->internal.timer);
req->internal.timer = tevent_add_timer(ev, req, endtime,
tevent_req_timedout,
diff --git a/lib/tevent/tevent_select.c b/lib/tevent/tevent_select.c
index cdddb601c4..d97418991a 100644
--- a/lib/tevent/tevent_select.c
+++ b/lib/tevent/tevent_select.c
@@ -38,10 +38,6 @@ struct select_event_context {
/* information for exiting from the event loop */
int exit_code;
-
- /* this is incremented when the loop over events causes something which
- could change the events yet to be processed */
- uint32_t destruction_count;
};
/*
@@ -95,8 +91,6 @@ static int select_event_fd_destructor(struct tevent_fd *fde)
if (select_ev->maxfd == fde->fd) {
select_ev->maxfd = EVENT_INVALID_MAXFD;
}
-
- select_ev->destruction_count++;
}
return tevent_common_fd_destructor(fde);
@@ -138,7 +132,6 @@ static int select_event_loop_select(struct select_event_context *select_ev, stru
fd_set r_fds, w_fds;
struct tevent_fd *fde;
int selrtn;
- uint32_t destruction_count = ++select_ev->destruction_count;
/* we maybe need to recalculate the maxfd */
if (select_ev->maxfd == EVENT_INVALID_MAXFD) {
@@ -200,15 +193,13 @@ static int select_event_loop_select(struct select_event_context *select_ev, stru
if (FD_ISSET(fde->fd, &w_fds)) flags |= TEVENT_FD_WRITE;
if (flags) {
fde->handler(select_ev->ev, fde, flags, fde->private_data);
- if (destruction_count != select_ev->destruction_count) {
- break;
- }
+ break;
}
}
}
return 0;
-}
+}
/*
do a single event loop using the events defined in ev
@@ -219,42 +210,35 @@ static int select_event_loop_once(struct tevent_context *ev, const char *locatio
struct select_event_context);
struct timeval tval;
- tval = tevent_common_loop_timer_delay(ev);
- if (tevent_timeval_is_zero(&tval)) {
+ if (ev->signal_events &&
+ tevent_common_check_signal(ev)) {
return 0;
}
- return select_event_loop_select(select_ev, &tval);
-}
-
-/*
- return on failure or (with 0) if all fd events are removed
-*/
-static int select_event_loop_wait(struct tevent_context *ev, const char *location)
-{
- struct select_event_context *select_ev = talloc_get_type(ev->additional_data,
- struct select_event_context);
- select_ev->exit_code = 0;
+ if (ev->immediate_events &&
+ tevent_common_loop_immediate(ev)) {
+ return 0;
+ }
- while (ev->fd_events && select_ev->exit_code == 0) {
- if (select_event_loop_once(ev, location) != 0) {
- break;
- }
+ tval = tevent_common_loop_timer_delay(ev);
+ if (tevent_timeval_is_zero(&tval)) {
+ return 0;
}
- return select_ev->exit_code;
+ return select_event_loop_select(select_ev, &tval);
}
static const struct tevent_ops select_event_ops = {
- .context_init = select_event_context_init,
- .add_fd = select_event_add_fd,
- .set_fd_close_fn= tevent_common_fd_set_close_fn,
- .get_fd_flags = tevent_common_fd_get_flags,
- .set_fd_flags = tevent_common_fd_set_flags,
- .add_timer = tevent_common_add_timer,
- .add_signal = tevent_common_add_signal,
- .loop_once = select_event_loop_once,
- .loop_wait = select_event_loop_wait,
+ .context_init = select_event_context_init,
+ .add_fd = select_event_add_fd,
+ .set_fd_close_fn = tevent_common_fd_set_close_fn,
+ .get_fd_flags = tevent_common_fd_get_flags,
+ .set_fd_flags = tevent_common_fd_set_flags,
+ .add_timer = tevent_common_add_timer,
+ .schedule_immediate = tevent_common_schedule_immediate,
+ .add_signal = tevent_common_add_signal,
+ .loop_once = select_event_loop_once,
+ .loop_wait = tevent_common_loop_wait,
};
bool tevent_select_init(void)
diff --git a/lib/tevent/tevent_standard.c b/lib/tevent/tevent_standard.c
index 73a45e8c20..c3f8b36e84 100644
--- a/lib/tevent/tevent_standard.c
+++ b/lib/tevent/tevent_standard.c
@@ -48,14 +48,6 @@ struct std_event_context {
/* information for exiting from the event loop */
int exit_code;
- /* this is changed by the destructors for the fd event
- type. It is used to detect event destruction by event
- handlers, which means the code that is calling the event
- handler needs to assume that the linked list is no longer
- valid
- */
- uint32_t destruction_count;
-
/* when using epoll this is the handle from epoll_create */
int epoll_fd;
@@ -253,9 +245,8 @@ static void epoll_change_event(struct std_event_context *std_ev, struct tevent_f
static int epoll_event_loop(struct std_event_context *std_ev, struct timeval *tvalp)
{
int ret, i;
-#define MAXEVENTS 8
+#define MAXEVENTS 1
struct epoll_event events[MAXEVENTS];
- uint32_t destruction_count = ++std_ev->destruction_count;
int timeout = -1;
if (std_ev->epoll_fd == -1) return -1;
@@ -316,9 +307,7 @@ static int epoll_event_loop(struct std_event_context *std_ev, struct timeval *tv
if (events[i].events & EPOLLOUT) flags |= TEVENT_FD_WRITE;
if (flags) {
fde->handler(std_ev->ev, fde, flags, fde->private_data);
- if (destruction_count != std_ev->destruction_count) {
- break;
- }
+ break;
}
}
@@ -390,8 +379,6 @@ static int std_event_fd_destructor(struct tevent_fd *fde)
std_ev->maxfd = EVENT_INVALID_MAXFD;
}
- std_ev->destruction_count++;
-
epoll_del_event(std_ev, fde);
}
@@ -459,7 +446,6 @@ static int std_event_loop_select(struct std_event_context *std_ev, struct timeva
fd_set r_fds, w_fds;
struct tevent_fd *fde;
int selrtn;
- uint32_t destruction_count = ++std_ev->destruction_count;
/* we maybe need to recalculate the maxfd */
if (std_ev->maxfd == EVENT_INVALID_MAXFD) {
@@ -521,9 +507,7 @@ static int std_event_loop_select(struct std_event_context *std_ev, struct timeva
if (FD_ISSET(fde->fd, &w_fds)) flags |= TEVENT_FD_WRITE;
if (flags) {
fde->handler(std_ev->ev, fde, flags, fde->private_data);
- if (destruction_count != std_ev->destruction_count) {
- break;
- }
+ break;
}
}
}
@@ -540,6 +524,16 @@ static int std_event_loop_once(struct tevent_context *ev, const char *location)
struct std_event_context);
struct timeval tval;
+ if (ev->signal_events &&
+ tevent_common_check_signal(ev)) {
+ return 0;
+ }
+
+ if (ev->immediate_events &&
+ tevent_common_loop_immediate(ev)) {
+ return 0;
+ }
+
tval = tevent_common_loop_timer_delay(ev);
if (tevent_timeval_is_zero(&tval)) {
return 0;
@@ -554,34 +548,17 @@ static int std_event_loop_once(struct tevent_context *ev, const char *location)
return std_event_loop_select(std_ev, &tval);
}
-/*
- return on failure or (with 0) if all fd events are removed
-*/
-static int std_event_loop_wait(struct tevent_context *ev, const char *location)
-{
- struct std_event_context *std_ev = talloc_get_type(ev->additional_data,
- struct std_event_context);
- std_ev->exit_code = 0;
-
- while (ev->fd_events && std_ev->exit_code == 0) {
- if (std_event_loop_once(ev, location) != 0) {
- break;
- }
- }
-
- return std_ev->exit_code;
-}
-
static const struct tevent_ops std_event_ops = {
- .context_init = std_event_context_init,
- .add_fd = std_event_add_fd,
- .set_fd_close_fn= tevent_common_fd_set_close_fn,
- .get_fd_flags = tevent_common_fd_get_flags,
- .set_fd_flags = std_event_set_fd_flags,
- .add_timer = tevent_common_add_timer,
- .add_signal = tevent_common_add_signal,
- .loop_once = std_event_loop_once,
- .loop_wait = std_event_loop_wait,
+ .context_init = std_event_context_init,
+ .add_fd = std_event_add_fd,
+ .set_fd_close_fn = tevent_common_fd_set_close_fn,
+ .get_fd_flags = tevent_common_fd_get_flags,
+ .set_fd_flags = std_event_set_fd_flags,
+ .add_timer = tevent_common_add_timer,
+ .schedule_immediate = tevent_common_schedule_immediate,
+ .add_signal = tevent_common_add_signal,
+ .loop_once = std_event_loop_once,
+ .loop_wait = tevent_common_loop_wait,
};
diff --git a/lib/tsocket/config.mk b/lib/tsocket/config.mk
new file mode 100644
index 0000000000..c35f0afd6f
--- /dev/null
+++ b/lib/tsocket/config.mk
@@ -0,0 +1,17 @@
+[SUBSYSTEM::LIBTSOCKET]
+PRIVATE_DEPENDENCIES = LIBTALLOC LIBTEVENT LIBREPLACE_NETWORK
+
+LIBTSOCKET_OBJ_FILES = $(addprefix ../lib/tsocket/, \
+ tsocket.o \
+ tsocket_helpers.o \
+ tsocket_bsd.o \
+ tsocket_recvfrom.o \
+ tsocket_sendto.o \
+ tsocket_connect.o \
+ tsocket_writev.o \
+ tsocket_readv.o)
+
+PUBLIC_HEADERS += $(addprefix ../lib/tsocket/, \
+ tsocket.h\
+ tsocket_internal.h)
+
diff --git a/lib/tsocket/tsocket.c b/lib/tsocket/tsocket.c
new file mode 100644
index 0000000000..1a12e691a9
--- /dev/null
+++ b/lib/tsocket/tsocket.c
@@ -0,0 +1,231 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/network.h"
+#include "tsocket.h"
+#include "tsocket_internal.h"
+
+static int tsocket_context_destructor(struct tsocket_context *sock)
+{
+ tsocket_disconnect(sock);
+ return 0;
+}
+
+struct tsocket_context *_tsocket_context_create(TALLOC_CTX *mem_ctx,
+ const struct tsocket_context_ops *ops,
+ void *pstate,
+ size_t psize,
+ const char *type,
+ const char *location)
+{
+ void **ppstate = (void **)pstate;
+ struct tsocket_context *sock;
+
+ sock = talloc_zero(mem_ctx, struct tsocket_context);
+ if (!sock) {
+ return NULL;
+ }
+ sock->ops = ops;
+ sock->location = location;
+ sock->private_data = talloc_size(sock, psize);
+ if (!sock->private_data) {
+ talloc_free(sock);
+ return NULL;
+ }
+ talloc_set_name_const(sock->private_data, type);
+
+ talloc_set_destructor(sock, tsocket_context_destructor);
+
+ *ppstate = sock->private_data;
+ return sock;
+}
+
+int tsocket_set_event_context(struct tsocket_context *sock,
+ struct tevent_context *ev)
+{
+ return sock->ops->set_event_context(sock, ev);
+}
+
+int tsocket_set_readable_handler(struct tsocket_context *sock,
+ tsocket_event_handler_t handler,
+ void *private_data)
+{
+ return sock->ops->set_read_handler(sock, handler, private_data);
+}
+
+int tsocket_set_writeable_handler(struct tsocket_context *sock,
+ tsocket_event_handler_t handler,
+ void *private_data)
+{
+ return sock->ops->set_write_handler(sock, handler, private_data);
+}
+
+int tsocket_connect(struct tsocket_context *sock,
+ const struct tsocket_address *remote_addr)
+{
+ return sock->ops->connect_to(sock, remote_addr);
+}
+
+int tsocket_listen(struct tsocket_context *sock,
+ int queue_size)
+{
+ return sock->ops->listen_on(sock, queue_size);
+}
+
+int _tsocket_accept(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_context **new_sock,
+ const char *location)
+{
+ return sock->ops->accept_new(sock, mem_ctx, new_sock, location);
+}
+
+ssize_t tsocket_pending(struct tsocket_context *sock)
+{
+ return sock->ops->pending_data(sock);
+}
+
+int tsocket_readv(struct tsocket_context *sock,
+ const struct iovec *vector, size_t count)
+{
+ return sock->ops->readv_data(sock, vector, count);
+}
+
+int tsocket_writev(struct tsocket_context *sock,
+ const struct iovec *vector, size_t count)
+{
+ return sock->ops->writev_data(sock, vector, count);
+}
+
+ssize_t tsocket_recvfrom(struct tsocket_context *sock,
+ uint8_t *data, size_t len,
+ TALLOC_CTX *addr_ctx,
+ struct tsocket_address **src_addr)
+{
+ return sock->ops->recvfrom_data(sock, data, len, addr_ctx, src_addr);
+}
+
+ssize_t tsocket_sendto(struct tsocket_context *sock,
+ const uint8_t *data, size_t len,
+ const struct tsocket_address *dest_addr)
+{
+ return sock->ops->sendto_data(sock, data, len, dest_addr);
+}
+
+int tsocket_get_status(const struct tsocket_context *sock)
+{
+ return sock->ops->get_status(sock);
+}
+
+int _tsocket_get_local_address(const struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_address **local_addr,
+ const char *location)
+{
+ return sock->ops->get_local_address(sock, mem_ctx,
+ local_addr, location);
+}
+
+int _tsocket_get_remote_address(const struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_address **remote_addr,
+ const char *location)
+{
+ return sock->ops->get_remote_address(sock, mem_ctx,
+ remote_addr, location);
+}
+
+int tsocket_get_option(const struct tsocket_context *sock,
+ const char *option,
+ TALLOC_CTX *mem_ctx,
+ char **value)
+{
+ return sock->ops->get_option(sock, option, mem_ctx, value);
+}
+
+int tsocket_set_option(const struct tsocket_context *sock,
+ const char *option,
+ bool force,
+ const char *value)
+{
+ return sock->ops->set_option(sock, option, force, value);
+}
+
+void tsocket_disconnect(struct tsocket_context *sock)
+{
+ sock->ops->disconnect(sock);
+}
+
+struct tsocket_address *_tsocket_address_create(TALLOC_CTX *mem_ctx,
+ const struct tsocket_address_ops *ops,
+ void *pstate,
+ size_t psize,
+ const char *type,
+ const char *location)
+{
+ void **ppstate = (void **)pstate;
+ struct tsocket_address *addr;
+
+ addr = talloc_zero(mem_ctx, struct tsocket_address);
+ if (!addr) {
+ return NULL;
+ }
+ addr->ops = ops;
+ addr->location = location;
+ addr->private_data = talloc_size(addr, psize);
+ if (!addr->private_data) {
+ talloc_free(addr);
+ return NULL;
+ }
+ talloc_set_name_const(addr->private_data, type);
+
+ *ppstate = addr->private_data;
+ return addr;
+}
+
+char *tsocket_address_string(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx)
+{
+ if (!addr) {
+ return talloc_strdup(mem_ctx, "NULL");
+ }
+ return addr->ops->string(addr, mem_ctx);
+}
+
+struct tsocket_address *_tsocket_address_copy(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx,
+ const char *location)
+{
+ return addr->ops->copy(addr, mem_ctx, location);
+}
+
+int _tsocket_address_create_socket(const struct tsocket_address *addr,
+ enum tsocket_type type,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_context **sock,
+ const char *location)
+{
+ return addr->ops->create_socket(addr, type, mem_ctx, sock, location);
+}
+
diff --git a/lib/tsocket/tsocket.h b/lib/tsocket/tsocket.h
new file mode 100644
index 0000000000..9bcfb5cb7e
--- /dev/null
+++ b/lib/tsocket/tsocket.h
@@ -0,0 +1,220 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _TSOCKET_H
+#define _TSOCKET_H
+
+#include <talloc.h>
+#include <tevent.h>
+
+struct tsocket_context;
+struct tsocket_address;
+struct iovec;
+
+enum tsocket_type {
+ TSOCKET_TYPE_STREAM = 1,
+ TSOCKET_TYPE_DGRAM,
+ TSOCKET_TYPE_MESSAGE
+};
+
+typedef void (*tsocket_event_handler_t)(struct tsocket_context *, void *);
+int tsocket_set_event_context(struct tsocket_context *sock,
+ struct tevent_context *ev);
+int tsocket_set_readable_handler(struct tsocket_context *sock,
+ tsocket_event_handler_t handler,
+ void *private_data);
+int tsocket_set_writeable_handler(struct tsocket_context *sock,
+ tsocket_event_handler_t handler,
+ void *private_data);
+
+int tsocket_connect(struct tsocket_context *sock,
+ const struct tsocket_address *remote_addr);
+
+int tsocket_listen(struct tsocket_context *sock,
+ int queue_size);
+
+int _tsocket_accept(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_context **new_sock,
+ const char *location);
+#define tsocket_accept(sock, mem_ctx, new_sock) \
+ _tsocket_accept(sock, mem_ctx, new_sock, __location__)
+
+ssize_t tsocket_pending(struct tsocket_context *sock);
+
+int tsocket_readv(struct tsocket_context *sock,
+ const struct iovec *vector, size_t count);
+int tsocket_writev(struct tsocket_context *sock,
+ const struct iovec *vector, size_t count);
+
+ssize_t tsocket_recvfrom(struct tsocket_context *sock,
+ uint8_t *data, size_t len,
+ TALLOC_CTX *addr_ctx,
+ struct tsocket_address **src_addr);
+ssize_t tsocket_sendto(struct tsocket_context *sock,
+ const uint8_t *data, size_t len,
+ const struct tsocket_address *dest_addr);
+
+int tsocket_get_status(const struct tsocket_context *sock);
+
+int _tsocket_get_local_address(const struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_address **local_addr,
+ const char *location);
+#define tsocket_get_local_address(sock, mem_ctx, local_addr) \
+ _tsocket_get_local_address(sock, mem_ctx, local_addr, __location__)
+int _tsocket_get_remote_address(const struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_address **remote_addr,
+ const char *location);
+#define tsocket_get_remote_address(sock, mem_ctx, remote_addr) \
+ _tsocket_get_remote_address(sock, mem_ctx, remote_addr, __location__)
+
+int tsocket_get_option(const struct tsocket_context *sock,
+ const char *option,
+ TALLOC_CTX *mem_ctx,
+ char **value);
+int tsocket_set_option(const struct tsocket_context *sock,
+ const char *option,
+ bool force,
+ const char *value);
+
+void tsocket_disconnect(struct tsocket_context *sock);
+
+char *tsocket_address_string(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx);
+
+struct tsocket_address *_tsocket_address_copy(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx,
+ const char *location);
+
+#define tsocket_address_copy(addr, mem_ctx) \
+ _tsocket_address_copy(addr, mem_ctx, __location__)
+
+int _tsocket_address_create_socket(const struct tsocket_address *addr,
+ enum tsocket_type type,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_context **sock,
+ const char *location);
+#define tsocket_address_create_socket(addr, type, mem_ctx, sock) \
+ _tsocket_address_create_socket(addr, type, mem_ctx, sock,\
+ __location__)
+
+/*
+ * BSD sockets: inet, inet6 and unix
+ */
+
+int _tsocket_address_inet_from_strings(TALLOC_CTX *mem_ctx,
+ const char *fam,
+ const char *addr,
+ uint16_t port,
+ struct tsocket_address **_addr,
+ const char *location);
+#define tsocket_address_inet_from_strings(mem_ctx, fam, addr, port, _addr) \
+ _tsocket_address_inet_from_strings(mem_ctx, fam, addr, port, _addr, \
+ __location__)
+
+char *tsocket_address_inet_addr_string(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx);
+uint16_t tsocket_address_inet_port(const struct tsocket_address *addr);
+int tsocket_address_inet_set_port(struct tsocket_address *addr,
+ uint16_t port);
+void tsocket_address_inet_set_broadcast(struct tsocket_address *addr,
+ bool broadcast);
+
+int _tsocket_address_unix_from_path(TALLOC_CTX *mem_ctx,
+ const char *path,
+ struct tsocket_address **_addr,
+ const char *location);
+#define tsocket_address_unix_from_path(mem_ctx, path, _addr) \
+ _tsocket_address_unix_from_path(mem_ctx, path, _addr, \
+ __location__)
+char *tsocket_address_unix_path(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx);
+
+int _tsocket_context_bsd_wrap_existing(TALLOC_CTX *mem_ctx,
+ int fd, bool close_on_disconnect,
+ struct tsocket_context **_sock,
+ const char *location);
+#define tsocket_context_bsd_wrap_existing(mem_ctx, fd, cod, _sock) \
+ _tsocket_context_bsd_wrap_existing(mem_ctx, fd, cod, _sock, \
+ __location__)
+
+/*
+ * Async helpers
+ */
+
+struct tevent_req *tsocket_recvfrom_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx);
+ssize_t tsocket_recvfrom_recv(struct tevent_req *req,
+ int *perrno,
+ TALLOC_CTX *mem_ctx,
+ uint8_t **buf,
+ struct tsocket_address **src);
+
+struct tevent_req *tsocket_sendto_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ const uint8_t *buf,
+ size_t len,
+ const struct tsocket_address *dst);
+ssize_t tsocket_sendto_recv(struct tevent_req *req, int *perrno);
+
+struct tevent_req *tsocket_sendto_queue_send(TALLOC_CTX *mem_ctx,
+ struct tsocket_context *sock,
+ struct tevent_queue *queue,
+ const uint8_t *buf,
+ size_t len,
+ struct tsocket_address *dst);
+ssize_t tsocket_sendto_queue_recv(struct tevent_req *req, int *perrno);
+
+struct tevent_req *tsocket_connect_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ const struct tsocket_address *dst);
+int tsocket_connect_recv(struct tevent_req *req, int *perrno);
+
+struct tevent_req *tsocket_writev_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ const struct iovec *vector,
+ size_t count);
+int tsocket_writev_recv(struct tevent_req *req, int *perrno);
+
+struct tevent_req *tsocket_writev_queue_send(TALLOC_CTX *mem_ctx,
+ struct tsocket_context *sock,
+ struct tevent_queue *queue,
+ const struct iovec *vector,
+ size_t count);
+int tsocket_writev_queue_recv(struct tevent_req *req, int *perrno);
+
+typedef int (*tsocket_readv_next_iovec_t)(struct tsocket_context *sock,
+ void *private_data,
+ TALLOC_CTX *mem_ctx,
+ struct iovec **vector,
+ size_t *count);
+struct tevent_req *tsocket_readv_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ tsocket_readv_next_iovec_t next_iovec_fn,
+ void *private_data);
+int tsocket_readv_recv(struct tevent_req *req, int *perrno);
+
+#endif /* _TSOCKET_H */
+
diff --git a/lib/tsocket/tsocket_bsd.c b/lib/tsocket/tsocket_bsd.c
new file mode 100644
index 0000000000..2811882fed
--- /dev/null
+++ b/lib/tsocket/tsocket_bsd.c
@@ -0,0 +1,1126 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/network.h"
+#include "tsocket.h"
+#include "tsocket_internal.h"
+
+static const struct tsocket_context_ops tsocket_context_bsd_ops;
+static const struct tsocket_address_ops tsocket_address_bsd_ops;
+
+static int tsocket_context_bsd_set_option(const struct tsocket_context *sock,
+ const char *option,
+ bool force,
+ const char *value);
+
+struct tsocket_context_bsd {
+ bool close_on_disconnect;
+ int fd;
+ struct tevent_fd *fde;
+};
+
+struct tsocket_address_bsd {
+ bool broadcast;
+ union {
+ struct sockaddr sa;
+ struct sockaddr_in sin;
+#ifdef HAVE_IPV6
+ struct sockaddr_in6 sin6;
+#endif
+ struct sockaddr_un sun;
+ struct sockaddr_storage ss;
+ } u;
+};
+
+static int _tsocket_address_bsd_from_sockaddr(TALLOC_CTX *mem_ctx,
+ struct sockaddr *sa,
+ socklen_t sa_len,
+ struct tsocket_address **_addr,
+ const char *location)
+{
+ struct tsocket_address *addr;
+ struct tsocket_address_bsd *bsda;
+
+ switch (sa->sa_family) {
+ case AF_UNIX:
+ if (sa_len < sizeof(struct sockaddr_un)) {
+ errno = EINVAL;
+ return -1;
+ }
+ break;
+ case AF_INET:
+ if (sa_len < sizeof(struct sockaddr_in)) {
+ errno = EINVAL;
+ return -1;
+ }
+ break;
+#ifdef HAVE_IPV6
+ case AF_INET6:
+ if (sa_len < sizeof(struct sockaddr_in6)) {
+ errno = EINVAL;
+ return -1;
+ }
+ break;
+#endif
+ default:
+ errno = EAFNOSUPPORT;
+ return -1;
+ }
+
+ if (sa_len > sizeof(struct sockaddr_storage)) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ addr = tsocket_address_create(mem_ctx,
+ &tsocket_address_bsd_ops,
+ &bsda,
+ struct tsocket_address_bsd,
+ location);
+ if (!addr) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ ZERO_STRUCTP(bsda);
+
+ memcpy(&bsda->u.ss, sa, sa_len);
+
+ *_addr = addr;
+ return 0;
+}
+
+int _tsocket_address_inet_from_strings(TALLOC_CTX *mem_ctx,
+ const char *fam,
+ const char *addr,
+ uint16_t port,
+ struct tsocket_address **_addr,
+ const char *location)
+{
+ struct addrinfo hints;
+ struct addrinfo *result = NULL;
+ char port_str[6];
+ int ret;
+
+ ZERO_STRUCT(hints);
+ /*
+ * we use SOCKET_STREAM here to get just one result
+ * back from getaddrinfo().
+ */
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = AI_NUMERICHOST | AI_NUMERICSERV;
+
+ if (strcasecmp(fam, "ip") == 0) {
+ hints.ai_family = AF_UNSPEC;
+ if (!addr) {
+#ifdef HAVE_IPV6
+ addr = "::";
+#else
+ addr = "0.0.0.0";
+#endif
+ }
+ } else if (strcasecmp(fam, "ipv4") == 0) {
+ hints.ai_family = AF_INET;
+ if (!addr) {
+ addr = "0.0.0.0";
+ }
+#ifdef HAVE_IPV6
+ } else if (strcasecmp(fam, "ipv6") == 0) {
+ hints.ai_family = AF_INET6;
+ if (!addr) {
+ addr = "::";
+ }
+#endif
+ } else {
+ errno = EAFNOSUPPORT;
+ return -1;
+ }
+
+ snprintf(port_str, sizeof(port_str) - 1, "%u", port);
+
+ ret = getaddrinfo(addr, port_str, &hints, &result);
+ if (ret != 0) {
+ switch (ret) {
+ case EAI_FAIL:
+ errno = EINVAL;
+ break;
+ }
+ ret = -1;
+ goto done;
+ }
+
+ if (result->ai_socktype != SOCK_STREAM) {
+ errno = EINVAL;
+ ret = -1;
+ goto done;
+ }
+
+ ret = _tsocket_address_bsd_from_sockaddr(mem_ctx,
+ result->ai_addr,
+ result->ai_addrlen,
+ _addr,
+ location);
+
+done:
+ if (result) {
+ freeaddrinfo(result);
+ }
+ return ret;
+}
+
+char *tsocket_address_inet_addr_string(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx)
+{
+ struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data,
+ struct tsocket_address_bsd);
+ char addr_str[INET6_ADDRSTRLEN+1];
+ const char *str;
+
+ if (!bsda) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ switch (bsda->u.sa.sa_family) {
+ case AF_INET:
+ str = inet_ntop(bsda->u.sin.sin_family,
+ &bsda->u.sin.sin_addr,
+ addr_str, sizeof(addr_str));
+ break;
+#ifdef HAVE_IPV6
+ case AF_INET6:
+ str = inet_ntop(bsda->u.sin6.sin6_family,
+ &bsda->u.sin6.sin6_addr,
+ addr_str, sizeof(addr_str));
+ break;
+#endif
+ default:
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (!str) {
+ return NULL;
+ }
+
+ return talloc_strdup(mem_ctx, str);
+}
+
+uint16_t tsocket_address_inet_port(const struct tsocket_address *addr)
+{
+ struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data,
+ struct tsocket_address_bsd);
+ uint16_t port = 0;
+
+ if (!bsda) {
+ errno = EINVAL;
+ return 0;
+ }
+
+ switch (bsda->u.sa.sa_family) {
+ case AF_INET:
+ port = ntohs(bsda->u.sin.sin_port);
+ break;
+#ifdef HAVE_IPV6
+ case AF_INET6:
+ port = ntohs(bsda->u.sin6.sin6_port);
+ break;
+#endif
+ default:
+ errno = EINVAL;
+ return 0;
+ }
+
+ return port;
+}
+
+int tsocket_address_inet_set_port(struct tsocket_address *addr,
+ uint16_t port)
+{
+ struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data,
+ struct tsocket_address_bsd);
+
+ if (!bsda) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ switch (bsda->u.sa.sa_family) {
+ case AF_INET:
+ bsda->u.sin.sin_port = htons(port);
+ break;
+#ifdef HAVE_IPV6
+ case AF_INET6:
+ bsda->u.sin6.sin6_port = htons(port);
+ break;
+#endif
+ default:
+ errno = EINVAL;
+ return -1;
+ }
+
+ return 0;
+}
+
+void tsocket_address_inet_set_broadcast(struct tsocket_address *addr,
+ bool broadcast)
+{
+ struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data,
+ struct tsocket_address_bsd);
+
+ if (!bsda) {
+ return;
+ }
+
+ bsda->broadcast = broadcast;
+}
+
+int _tsocket_address_unix_from_path(TALLOC_CTX *mem_ctx,
+ const char *path,
+ struct tsocket_address **_addr,
+ const char *location)
+{
+ struct sockaddr_un sun;
+ void *p = &sun;
+ int ret;
+
+ if (!path) {
+ path = "";
+ }
+
+ ZERO_STRUCT(sun);
+ sun.sun_family = AF_UNIX;
+ strncpy(sun.sun_path, path, sizeof(sun.sun_path));
+
+ ret = _tsocket_address_bsd_from_sockaddr(mem_ctx,
+ (struct sockaddr *)p,
+ sizeof(sun),
+ _addr,
+ location);
+
+ return ret;
+}
+
+char *tsocket_address_unix_path(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx)
+{
+ struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data,
+ struct tsocket_address_bsd);
+ const char *str;
+
+ if (!bsda) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ switch (bsda->u.sa.sa_family) {
+ case AF_UNIX:
+ str = bsda->u.sun.sun_path;
+ break;
+ default:
+ errno = EINVAL;
+ return NULL;
+ }
+
+ return talloc_strdup(mem_ctx, str);
+}
+
+static char *tsocket_address_bsd_string(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx)
+{
+ struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data,
+ struct tsocket_address_bsd);
+ char *str;
+ char *addr_str;
+ const char *prefix = NULL;
+ uint16_t port;
+
+ switch (bsda->u.sa.sa_family) {
+ case AF_UNIX:
+ return talloc_asprintf(mem_ctx, "unix:%s",
+ bsda->u.sun.sun_path);
+ case AF_INET:
+ prefix = "ipv4";
+ break;
+ case AF_INET6:
+ prefix = "ipv6";
+ break;
+ default:
+ errno = EINVAL;
+ return NULL;
+ }
+
+ addr_str = tsocket_address_inet_addr_string(addr, mem_ctx);
+ if (!addr_str) {
+ return NULL;
+ }
+
+ port = tsocket_address_inet_port(addr);
+
+ str = talloc_asprintf(mem_ctx, "%s:%s:%u",
+ prefix, addr_str, port);
+ talloc_free(addr_str);
+
+ return str;
+}
+
+static struct tsocket_address *tsocket_address_bsd_copy(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx,
+ const char *location)
+{
+ struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data,
+ struct tsocket_address_bsd);
+ struct tsocket_address *copy;
+ int ret;
+
+ ret = _tsocket_address_bsd_from_sockaddr(mem_ctx,
+ &bsda->u.sa,
+ sizeof(bsda->u.ss),
+ &copy,
+ location);
+ if (ret != 0) {
+ return NULL;
+ }
+
+ tsocket_address_inet_set_broadcast(copy, bsda->broadcast);
+ return copy;
+}
+
+int _tsocket_context_bsd_wrap_existing(TALLOC_CTX *mem_ctx,
+ int fd, bool close_on_disconnect,
+ struct tsocket_context **_sock,
+ const char *location)
+{
+ struct tsocket_context *sock;
+ struct tsocket_context_bsd *bsds;
+
+ sock = tsocket_context_create(mem_ctx,
+ &tsocket_context_bsd_ops,
+ &bsds,
+ struct tsocket_context_bsd,
+ location);
+ if (!sock) {
+ return -1;
+ }
+
+ bsds->close_on_disconnect = close_on_disconnect;
+ bsds->fd = fd;
+ bsds->fde = NULL;
+
+ *_sock = sock;
+ return 0;
+}
+
+static int tsocket_address_bsd_create_socket(const struct tsocket_address *addr,
+ enum tsocket_type type,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_context **_sock,
+ const char *location)
+{
+ struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data,
+ struct tsocket_address_bsd);
+ struct tsocket_context *sock;
+ int bsd_type;
+ int fd;
+ int ret;
+ bool do_bind = false;
+ bool do_reuseaddr = false;
+
+ switch (type) {
+ case TSOCKET_TYPE_STREAM:
+ if (bsda->broadcast) {
+ errno = EINVAL;
+ return -1;
+ }
+ bsd_type = SOCK_STREAM;
+ break;
+ case TSOCKET_TYPE_DGRAM:
+ bsd_type = SOCK_DGRAM;
+ break;
+ default:
+ errno = EPROTONOSUPPORT;
+ return -1;
+ }
+
+ switch (bsda->u.sa.sa_family) {
+ case AF_UNIX:
+ if (bsda->broadcast) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (bsda->u.sun.sun_path[0] != 0) {
+ do_bind = true;
+ }
+ break;
+ case AF_INET:
+ if (bsda->u.sin.sin_port != 0) {
+ do_reuseaddr = true;
+ do_bind = true;
+ }
+ if (bsda->u.sin.sin_addr.s_addr == INADDR_ANY) {
+ do_bind = true;
+ }
+ break;
+#ifdef HAVE_IPV6
+ case AF_INET6:
+ if (bsda->u.sin6.sin6_port != 0) {
+ do_reuseaddr = true;
+ do_bind = true;
+ }
+ if (memcmp(&in6addr_any,
+ &bsda->u.sin6.sin6_addr,
+ sizeof(in6addr_any)) != 0) {
+ do_bind = true;
+ }
+ break;
+#endif
+ default:
+ errno = EINVAL;
+ return -1;
+ }
+
+ fd = socket(bsda->u.sa.sa_family, bsd_type, 0);
+ if (fd < 0) {
+ return fd;
+ }
+
+ fd = tsocket_common_prepare_fd(fd, true);
+ if (fd < 0) {
+ return fd;
+ }
+
+ ret = _tsocket_context_bsd_wrap_existing(mem_ctx, fd, true,
+ &sock, location);
+ if (ret != 0) {
+ int saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ return ret;
+ }
+
+ if (bsda->broadcast) {
+ ret = tsocket_context_bsd_set_option(sock, "SO_BROADCAST", true, "1");
+ if (ret != 0) {
+ int saved_errno = errno;
+ talloc_free(sock);
+ errno = saved_errno;
+ return ret;
+ }
+ }
+
+ if (do_reuseaddr) {
+ ret = tsocket_context_bsd_set_option(sock, "SO_REUSEADDR", true, "1");
+ if (ret != 0) {
+ int saved_errno = errno;
+ talloc_free(sock);
+ errno = saved_errno;
+ return ret;
+ }
+ }
+
+ if (do_bind) {
+ ret = bind(fd, &bsda->u.sa, sizeof(bsda->u.ss));
+ if (ret != 0) {
+ int saved_errno = errno;
+ talloc_free(sock);
+ errno = saved_errno;
+ return ret;
+ }
+ }
+
+ *_sock = sock;
+ return 0;
+}
+
+static const struct tsocket_address_ops tsocket_address_bsd_ops = {
+ .name = "bsd",
+ .string = tsocket_address_bsd_string,
+ .copy = tsocket_address_bsd_copy,
+ .create_socket = tsocket_address_bsd_create_socket
+};
+
+static void tsocket_context_bsd_fde_handler(struct tevent_context *ev,
+ struct tevent_fd *fde,
+ uint16_t flags,
+ void *private_data)
+{
+ struct tsocket_context *sock = talloc_get_type(private_data,
+ struct tsocket_context);
+
+ if (flags & TEVENT_FD_WRITE) {
+ sock->event.write_handler(sock, sock->event.write_private);
+ return;
+ }
+ if (flags & TEVENT_FD_READ) {
+ sock->event.read_handler(sock, sock->event.read_private);
+ return;
+ }
+}
+
+static int tsocket_context_bsd_set_event_context(struct tsocket_context *sock,
+ struct tevent_context *ev)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+
+ talloc_free(bsds->fde);
+ bsds->fde = NULL;
+ ZERO_STRUCT(sock->event);
+
+ if (!ev) {
+ return 0;
+ }
+
+ bsds->fde = tevent_add_fd(ev, bsds,
+ bsds->fd,
+ 0,
+ tsocket_context_bsd_fde_handler,
+ sock);
+ if (!bsds->fde) {
+ if (errno == 0) {
+ errno = ENOMEM;
+ }
+ return -1;
+ }
+
+ sock->event.ctx = ev;
+
+ return 0;
+}
+
+static int tsocket_context_bsd_set_read_handler(struct tsocket_context *sock,
+ tsocket_event_handler_t handler,
+ void *private_data)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+
+ if (sock->event.read_handler && !handler) {
+ TEVENT_FD_NOT_READABLE(bsds->fde);
+ } else if (!sock->event.read_handler && handler) {
+ TEVENT_FD_READABLE(bsds->fde);
+ }
+
+ sock->event.read_handler = handler;
+ sock->event.read_private = private_data;
+
+ return 0;
+}
+
+static int tsocket_context_bsd_set_write_handler(struct tsocket_context *sock,
+ tsocket_event_handler_t handler,
+ void *private_data)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+
+ if (sock->event.write_handler && !handler) {
+ TEVENT_FD_NOT_WRITEABLE(bsds->fde);
+ } else if (!sock->event.write_handler && handler) {
+ TEVENT_FD_WRITEABLE(bsds->fde);
+ }
+
+ sock->event.write_handler = handler;
+ sock->event.write_private = private_data;
+
+ return 0;
+}
+
+static int tsocket_context_bsd_connect_to(struct tsocket_context *sock,
+ const struct tsocket_address *remote)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ struct tsocket_address_bsd *bsda = talloc_get_type(remote->private_data,
+ struct tsocket_address_bsd);
+ int ret;
+
+ ret = connect(bsds->fd, &bsda->u.sa,
+ sizeof(bsda->u.ss));
+
+ return ret;
+}
+
+static int tsocket_context_bsd_listen_on(struct tsocket_context *sock,
+ int queue_size)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ int ret;
+
+ ret = listen(bsds->fd, queue_size);
+
+ return ret;
+}
+
+static int tsocket_context_bsd_accept_new(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_context **_new_sock,
+ const char *location)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ int new_fd;
+ struct tsocket_context *new_sock;
+ struct tsocket_context_bsd *new_bsds;
+ struct sockaddr_storage ss;
+ void *p = &ss;
+ socklen_t ss_len = sizeof(ss);
+
+ new_fd = accept(bsds->fd, (struct sockaddr *)p, &ss_len);
+ if (new_fd < 0) {
+ return new_fd;
+ }
+
+ new_fd = tsocket_common_prepare_fd(new_fd, true);
+ if (new_fd < 0) {
+ return new_fd;
+ }
+
+ new_sock = tsocket_context_create(mem_ctx,
+ &tsocket_context_bsd_ops,
+ &new_bsds,
+ struct tsocket_context_bsd,
+ location);
+ if (!new_sock) {
+ int saved_errno = errno;
+ close(new_fd);
+ errno = saved_errno;
+ return -1;
+ }
+
+ new_bsds->close_on_disconnect = true;
+ new_bsds->fd = new_fd;
+ new_bsds->fde = NULL;
+
+ *_new_sock = new_sock;
+ return 0;
+}
+
+static ssize_t tsocket_context_bsd_pending_data(struct tsocket_context *sock)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ int ret;
+ int value = 0;
+
+ ret = ioctl(bsds->fd, FIONREAD, &value);
+ if (ret == -1) {
+ return ret;
+ }
+
+ if (ret == 0) {
+ if (value == 0) {
+ int error=0;
+ socklen_t len = sizeof(error);
+ /*
+ * if no data is available check if the socket
+ * is in error state. For dgram sockets
+ * it's the way to return ICMP error messages
+ * of connected sockets to the caller.
+ */
+ ret = getsockopt(bsds->fd, SOL_SOCKET, SO_ERROR,
+ &error, &len);
+ if (ret == -1) {
+ return ret;
+ }
+ if (error != 0) {
+ errno = error;
+ return -1;
+ }
+ }
+ return value;
+ }
+
+ /* this should not be reached */
+ errno = EIO;
+ return -1;
+}
+
+static int tsocket_context_bsd_readv_data(struct tsocket_context *sock,
+ const struct iovec *vector,
+ size_t count)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ int ret;
+
+ ret = readv(bsds->fd, vector, count);
+
+ return ret;
+}
+
+static int tsocket_context_bsd_writev_data(struct tsocket_context *sock,
+ const struct iovec *vector,
+ size_t count)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ int ret;
+
+ ret = writev(bsds->fd, vector, count);
+
+ return ret;
+}
+
+static ssize_t tsocket_context_bsd_recvfrom_data(struct tsocket_context *sock,
+ uint8_t *data, size_t len,
+ TALLOC_CTX *addr_ctx,
+ struct tsocket_address **remote)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ struct tsocket_address *addr = NULL;
+ struct tsocket_address_bsd *bsda;
+ ssize_t ret;
+ struct sockaddr *sa = NULL;
+ socklen_t sa_len = 0;
+
+ if (remote) {
+ addr = tsocket_address_create(addr_ctx,
+ &tsocket_address_bsd_ops,
+ &bsda,
+ struct tsocket_address_bsd,
+ __location__ "recvfrom");
+ if (!addr) {
+ return -1;
+ }
+
+ ZERO_STRUCTP(bsda);
+
+ sa = &bsda->u.sa;
+ sa_len = sizeof(bsda->u.ss);
+ }
+
+ ret = recvfrom(bsds->fd, data, len, 0, sa, &sa_len);
+ if (ret < 0) {
+ int saved_errno = errno;
+ talloc_free(addr);
+ errno = saved_errno;
+ return ret;
+ }
+
+ if (remote) {
+ *remote = addr;
+ }
+ return ret;
+}
+
+static ssize_t tsocket_context_bsd_sendto_data(struct tsocket_context *sock,
+ const uint8_t *data, size_t len,
+ const struct tsocket_address *remote)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ struct sockaddr *sa = NULL;
+ socklen_t sa_len = 0;
+ ssize_t ret;
+
+ if (remote) {
+ struct tsocket_address_bsd *bsda =
+ talloc_get_type(remote->private_data,
+ struct tsocket_address_bsd);
+
+ sa = &bsda->u.sa;
+ sa_len = sizeof(bsda->u.ss);
+ }
+
+ ret = sendto(bsds->fd, data, len, 0, sa, sa_len);
+
+ return ret;
+}
+
+static int tsocket_context_bsd_get_status(const struct tsocket_context *sock)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ int ret;
+ int error=0;
+ socklen_t len = sizeof(error);
+
+ if (bsds->fd == -1) {
+ errno = EPIPE;
+ return -1;
+ }
+
+ ret = getsockopt(bsds->fd, SOL_SOCKET, SO_ERROR, &error, &len);
+ if (ret == -1) {
+ return ret;
+ }
+ if (error != 0) {
+ errno = error;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int tsocket_context_bsd_get_local_address(const struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_address **_addr,
+ const char *location)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ struct tsocket_address *addr;
+ struct tsocket_address_bsd *bsda;
+ ssize_t ret;
+ socklen_t sa_len;
+
+ addr = tsocket_address_create(mem_ctx,
+ &tsocket_address_bsd_ops,
+ &bsda,
+ struct tsocket_address_bsd,
+ location);
+ if (!addr) {
+ return -1;
+ }
+
+ ZERO_STRUCTP(bsda);
+
+ sa_len = sizeof(bsda->u.ss);
+ ret = getsockname(bsds->fd, &bsda->u.sa, &sa_len);
+ if (ret < 0) {
+ int saved_errno = errno;
+ talloc_free(addr);
+ errno = saved_errno;
+ return ret;
+ }
+
+ *_addr = addr;
+ return 0;
+}
+
+static int tsocket_context_bsd_get_remote_address(const struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_address **_addr,
+ const char *location)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ struct tsocket_address *addr;
+ struct tsocket_address_bsd *bsda;
+ ssize_t ret;
+ socklen_t sa_len;
+
+ addr = tsocket_address_create(mem_ctx,
+ &tsocket_address_bsd_ops,
+ &bsda,
+ struct tsocket_address_bsd,
+ location);
+ if (!addr) {
+ return -1;
+ }
+
+ ZERO_STRUCTP(bsda);
+
+ sa_len = sizeof(bsda->u.ss);
+ ret = getpeername(bsds->fd, &bsda->u.sa, &sa_len);
+ if (ret < 0) {
+ int saved_errno = errno;
+ talloc_free(addr);
+ errno = saved_errno;
+ return ret;
+ }
+
+ *_addr = addr;
+ return 0;
+}
+
+static const struct tsocket_context_bsd_option {
+ const char *name;
+ int level;
+ int optnum;
+ int optval;
+} tsocket_context_bsd_options[] = {
+#define TSOCKET_OPTION(_level, _optnum, _optval) { \
+ .name = #_optnum, \
+ .level = _level, \
+ .optnum = _optnum, \
+ .optval = _optval \
+}
+ TSOCKET_OPTION(SOL_SOCKET, SO_REUSEADDR, 0),
+ TSOCKET_OPTION(SOL_SOCKET, SO_BROADCAST, 0)
+};
+
+static int tsocket_context_bsd_get_option(const struct tsocket_context *sock,
+ const char *option,
+ TALLOC_CTX *mem_ctx,
+ char **_value)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ const struct tsocket_context_bsd_option *opt = NULL;
+ uint32_t i;
+ int optval;
+ socklen_t optval_len = sizeof(optval);
+ char *value;
+ int ret;
+
+ for (i=0; i < ARRAY_SIZE(tsocket_context_bsd_options); i++) {
+ if (strcmp(option, tsocket_context_bsd_options[i].name) != 0) {
+ continue;
+ }
+
+ opt = &tsocket_context_bsd_options[i];
+ break;
+ }
+
+ if (!opt) {
+ goto nosys;
+ }
+
+ ret = getsockopt(bsds->fd, opt->level, opt->optnum,
+ (void *)&optval, &optval_len);
+ if (ret != 0) {
+ return ret;
+ }
+
+ if (optval_len != sizeof(optval)) {
+ value = NULL;
+ } if (opt->optval != 0) {
+ if (optval == opt->optval) {
+ value = talloc_strdup(mem_ctx, "1");
+ } else {
+ value = talloc_strdup(mem_ctx, "0");
+ }
+ if (!value) {
+ goto nomem;
+ }
+ } else {
+ value = talloc_asprintf(mem_ctx, "%d", optval);
+ if (!value) {
+ goto nomem;
+ }
+ }
+
+ *_value = value;
+ return 0;
+
+ nomem:
+ errno = ENOMEM;
+ return -1;
+ nosys:
+ errno = ENOSYS;
+ return -1;
+}
+
+static int tsocket_context_bsd_set_option(const struct tsocket_context *sock,
+ const char *option,
+ bool force,
+ const char *value)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+ const struct tsocket_context_bsd_option *opt = NULL;
+ uint32_t i;
+ int optval;
+ int ret;
+
+ for (i=0; i < ARRAY_SIZE(tsocket_context_bsd_options); i++) {
+ if (strcmp(option, tsocket_context_bsd_options[i].name) != 0) {
+ continue;
+ }
+
+ opt = &tsocket_context_bsd_options[i];
+ break;
+ }
+
+ if (!opt) {
+ goto nosys;
+ }
+
+ if (value) {
+ if (opt->optval != 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ optval = atoi(value);
+ } else {
+ optval = opt->optval;
+ }
+
+ ret = setsockopt(bsds->fd, opt->level, opt->optnum,
+ (const void *)&optval, sizeof(optval));
+ if (ret != 0) {
+ if (!force) {
+ errno = 0;
+ return 0;
+ }
+ return ret;
+ }
+
+ return 0;
+
+ nosys:
+ if (!force) {
+ return 0;
+ }
+
+ errno = ENOSYS;
+ return -1;
+}
+
+static void tsocket_context_bsd_disconnect(struct tsocket_context *sock)
+{
+ struct tsocket_context_bsd *bsds = talloc_get_type(sock->private_data,
+ struct tsocket_context_bsd);
+
+ tsocket_context_bsd_set_event_context(sock, NULL);
+
+ if (bsds->fd != -1) {
+ if (bsds->close_on_disconnect) {
+ close(bsds->fd);
+ }
+ bsds->fd = -1;
+ }
+}
+
+static const struct tsocket_context_ops tsocket_context_bsd_ops = {
+ .name = "bsd",
+
+ .set_event_context = tsocket_context_bsd_set_event_context,
+ .set_read_handler = tsocket_context_bsd_set_read_handler,
+ .set_write_handler = tsocket_context_bsd_set_write_handler,
+
+ .connect_to = tsocket_context_bsd_connect_to,
+ .listen_on = tsocket_context_bsd_listen_on,
+ .accept_new = tsocket_context_bsd_accept_new,
+
+ .pending_data = tsocket_context_bsd_pending_data,
+ .readv_data = tsocket_context_bsd_readv_data,
+ .writev_data = tsocket_context_bsd_writev_data,
+ .recvfrom_data = tsocket_context_bsd_recvfrom_data,
+ .sendto_data = tsocket_context_bsd_sendto_data,
+
+ .get_status = tsocket_context_bsd_get_status,
+ .get_local_address = tsocket_context_bsd_get_local_address,
+ .get_remote_address = tsocket_context_bsd_get_remote_address,
+
+ .get_option = tsocket_context_bsd_get_option,
+ .set_option = tsocket_context_bsd_set_option,
+
+ .disconnect = tsocket_context_bsd_disconnect
+};
diff --git a/lib/tsocket/tsocket_connect.c b/lib/tsocket/tsocket_connect.c
new file mode 100644
index 0000000000..7a9d4b8381
--- /dev/null
+++ b/lib/tsocket/tsocket_connect.c
@@ -0,0 +1,122 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/network.h"
+#include "tsocket.h"
+#include "tsocket_internal.h"
+
+struct tsocket_connect_state {
+ /* this structs are owned by the caller */
+ struct {
+ struct tsocket_context *sock;
+ const struct tsocket_address *dst;
+ } caller;
+};
+
+static void tsocket_connect_handler(struct tsocket_context *sock,
+ void *private_data);
+
+struct tevent_req *tsocket_connect_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ const struct tsocket_address *dst)
+{
+ struct tevent_req *req;
+ struct tsocket_connect_state *state;
+ int ret;
+ int err;
+ bool retry;
+ bool dummy;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct tsocket_connect_state);
+ if (!req) {
+ return NULL;
+ }
+
+ state->caller.sock = sock;
+ state->caller.dst = dst;
+
+ ret = tsocket_connect(state->caller.sock,
+ state->caller.dst);
+ err = tsocket_error_from_errno(ret, errno, &retry);
+ if (retry) {
+ /* retry later */
+ goto async;
+ }
+ if (tevent_req_error(req, err)) {
+ goto post;
+ }
+
+ tevent_req_done(req);
+ goto post;
+
+ async:
+ ret = tsocket_set_readable_handler(state->caller.sock,
+ tsocket_connect_handler,
+ req);
+ err = tsocket_error_from_errno(ret, errno, &dummy);
+ if (tevent_req_error(req, err)) {
+ goto post;
+ }
+
+ return req;
+
+ post:
+ return tevent_req_post(req, sock->event.ctx);
+}
+
+static void tsocket_connect_handler(struct tsocket_context *sock,
+ void *private_data)
+{
+ struct tevent_req *req = talloc_get_type(private_data,
+ struct tevent_req);
+ struct tsocket_connect_state *state = tevent_req_data(req,
+ struct tsocket_connect_state);
+ int ret;
+ int err;
+ bool retry;
+
+ ret = tsocket_get_status(state->caller.sock);
+ err = tsocket_error_from_errno(ret, errno, &retry);
+ if (retry) {
+ /* retry later */
+ return;
+ }
+ if (tevent_req_error(req, err)) {
+ return;
+ }
+
+ tevent_req_done(req);
+}
+
+int tsocket_connect_recv(struct tevent_req *req, int *perrno)
+{
+ int ret;
+
+ ret = tsocket_simple_int_recv(req, perrno);
+
+ tevent_req_received(req);
+ return ret;
+}
+
diff --git a/lib/tsocket/tsocket_guide.txt b/lib/tsocket/tsocket_guide.txt
new file mode 100644
index 0000000000..a02fa373fa
--- /dev/null
+++ b/lib/tsocket/tsocket_guide.txt
@@ -0,0 +1,503 @@
+
+Basic design of the tsocket abstraction
+=======================================
+
+The tsocket layer is designed to match more or less
+the bsd socket layer, but it hides the filedescriptor
+within a opaque 'tsocket_context' structure to make virtual
+sockets possible. The virtual sockets can be encrypted tunnels
+(like TLS, SASL or GSSAPI) or named pipes over smb.
+
+The tsocket layer is a bit like an abstract class, which defines
+common methods to work with sockets in a non blocking fashion.
+
+The whole library is based on the talloc(3) and 'tevent' libraries.
+
+The 'tsocket_address' structure is the 2nd abstracted class
+which represends the address of a socket endpoint.
+
+Each different type of socket has its own constructor.
+
+Typically the constructor for a tsocket_context is attached to
+the tsocket_address of the source endpoint. That means
+the tsocket_address_create_socket() function takes the
+tsocket_address of the local endpoint and creates a tsocket_context
+for the communication.
+
+For some usecases it's possible to wrap an existing socket into a
+tsocket_context, e.g. to wrap an existing pipe(2) into
+tsocket_context, so that you can use the same functions to
+communicate over the pipe.
+
+The tsocket_address abstraction
+===============================
+
+The tsocket_address represents an socket endpoint genericly.
+As it's like an abstract class it has no specific constructor.
+The specific constructors are descripted later sections.
+
+There's a function get the string representation of the
+endpoint for debugging. Callers should not try to parse
+the string! The should use additional methods of the specific
+tsocket_address implemention to get more details.
+
+ char *tsocket_address_string(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx);
+
+There's a function to create a copy of the tsocket_address.
+This is useful when before doing modifications to a socket
+via additional methods of the specific tsocket_address implementation.
+
+ struct tsocket_address *tsocket_address_copy(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx);
+
+There's a function to create a tsocket_context based on the given local
+socket endpoint. The return value is 0 on success and -1 on failure
+with errno holding the specific error. Specific details are descripted in later
+sections. Note not all specific implementation have to implement all socket
+types.
+
+ enum tsocket_type {
+ TSOCKET_TYPE_STREAM = 1,
+ TSOCKET_TYPE_DGRAM,
+ TSOCKET_TYPE_MESSAGE
+ };
+
+ int tsocket_address_create_socket(const struct tsocket_address *addr,
+ enum tsocket_type type,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_context **sock);
+
+The tsocket_context abstraction
+===============================
+
+The tsocket_context is like an abstract class and represents
+a socket similar to bsd style sockets. The methods are more
+or less equal to the bsd socket api, while the filedescriptor
+is replaced by tsocket_context and sockaddr, socklen_t pairs
+are replaced by tsocket_address. The 'bind' operation happens
+in the specific constructor as the constructor is typically based
+on tsocket_address of local socket endpoint.
+
+All operations are by design non blocking and can return error
+values like EAGAIN, EINPROGRESS, EWOULDBLOCK or EINTR which
+indicate that the caller should retry the operation later.
+Also read the "The glue to tevent" section.
+
+The socket can of types:
+ - TSOCKET_TYPE_STREAM is the equivalent to SOCK_STREAM in the bsd socket api.
+ - TSOCKET_TYPE_DGRAM is the equivalent to SOCK_DGRAM in the bsd socket api.
+ - TSOCKET_TYPE_MESSAGE operates on a connected socket and is therefore
+ like TSOCKET_TYPE_STREAM, but the consumer needs to first read all
+ data of a message, which was generated by one message 'write' on the sender,
+ before the consumer gets data of the next message. This matches a bit
+ like message mode pipes on windows. The concept is to transfer ordered
+ messages between to endpoints.
+
+There's a function to connect to a remote endpoint. The behavior
+and error codes match the connect(2) function of the bsd socket api.
+Maybe the specific tsocket_context implementation speficied some
+further details.
+
+ int tsocket_connect(struct tsocket_context *sock,
+ const struct tsocket_address *remote_addr);
+
+There's a function to listen for incoming connections. The behavior
+and error codes match the listen(2) function of the bsd socket api.
+Maybe the specific tsocket_context implementation speficied some
+further details.
+
+ int tsocket_listen(struct tsocket_context *sock,
+ int queue_size);
+
+There's a function to accept incoming connections. The behavior
+and error codes match the accept(2) function of the bsd socket api.
+Maybe the specific tsocket_context implementation speficied some
+further details.
+
+ int tsocket_accept(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_context **new_sock);
+
+There's a function to ask how many bytes are in input buffer
+of the connection. For sockets of type TSOCKET_TYPE_DGRAM or
+TSOCKET_TYPE_MESSAGE the size of the next available dgram/message
+is returned. A return value of -1 indicates a socket error
+and errno will hold the specific error code. If no data
+is available 0 is returned, but retry error codes like
+EINTR can also be returned.
+
+ ssize_t tsocket_pending(struct tsocket_context *sock);
+
+There's a function to read data from the socket. The behavior
+and error codes match the readv(3) function, also take a look
+at the recv(2) function of the bsd socket api.
+Maybe the specific tsocket_context implementation speficied some
+further details.
+
+ int tsocket_readv(struct tsocket_context *sock,
+ const struct iovec *vector, size_t count);
+
+There's a function to write data from the socket. The behavior
+and error codes match the writev(3) function, also take a look
+at the send(2) function of the bsd socket api.
+Maybe the specific tsocket_context implementation speficied some
+further details.
+
+ int tsocket_writev(struct tsocket_context *sock,
+ const struct iovec *vector, size_t count);
+
+There's a function to read a datagram from a remote endpoint.
+The behavior and error codes match the recvfrom(2) function of
+the bsd socket api. As TSOCKET_TYPE_DGRAM sockets can also be
+used in connected mode src_addr can be NULL, if the caller don't
+want to get the source address. Maybe the specific tsocket_context
+implementation speficied some further details.
+
+ ssize_t tsocket_recvfrom(struct tsocket_context *sock,
+ uint8_t *data, size_t len,
+ TALLOC_CTX *addr_ctx,
+ struct tsocket_address **src_addr);
+
+There's a function to send a datagram to a remote endpoint the socket.
+The behavior and error codes match the recvfrom(2) function of the
+bsd socket api. As TSOCKET_TYPE_DGRAM sockets can also be used in
+connected mode dest_addr must be NULL in connected mode and a valid
+tsocket_address otherwise. Maybe the specific tsocket_context
+implementation speficied some further details.
+
+ ssize_t tsocket_sendto(struct tsocket_context *sock,
+ const uint8_t *data, size_t len,
+ const struct tsocket_address *dest_addr);
+
+There's a function to get the current status of the socket.
+The behavior and error codes match the getsockopt(2) function
+of the bsd socket api, with SOL_SOCKET and SO_ERROR as arguments.
+Maybe the specific tsocket_context implementation speficied some
+further details.
+
+ int tsocket_get_status(const struct tsocket_context *sock);
+
+There's a function to get tsocket_address of the local endpoint.
+The behavior and error codes match the getsockname(2) function
+of the bsd socket api. Maybe the specific tsocket_context
+implementation speficied some further details.
+
+ int tsocket_get_local_address(const struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_address **local_addr);
+
+There's a function to get tsocket_address of the remote endpoint
+of a connected socket. The behavior and error codes match the
+getpeername(2) function of the bsd socket api. Maybe the specific
+tsocket_context implementation speficied some further details.
+
+ int tsocket_get_remote_address(const struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_address **remote_addr,
+ const char *location);
+
+There's a function to ask for specific options of the socket.
+The behavior and error codes match the getsockopt(2) function
+of the bsd socket api. The option and value are represented as string
+values, where the 'value' parameter can be NULL is the caller don't want to
+get the value. The supported options and values are up to the specific
+tsocket_context implementation.
+
+ int tsocket_get_option(const struct tsocket_context *sock,
+ const char *option,
+ TALLOC_CTX *mem_ctx,
+ char **value);
+
+There's a function to set specific options of the socket.
+The behavior and error codes match the setsockopt(2) function
+of the bsd socket api. The option and value are represented as string
+values, where the 'value' parameter can be NULL. The supported options
+and values are up to the specific tsocket_context implementation.
+The 'force' parameter specifies whether an error should be returned
+for unsupported options.
+
+ int tsocket_set_option(const struct tsocket_context *sock,
+ const char *option,
+ bool force,
+ const char *value);
+
+There's a function to disconnect the socket. The behavior
+and error codes match the close(2) function of the bsd socket api.
+Maybe the specific tsocket_context implementation speficied some
+further details.
+
+ void tsocket_disconnect(struct tsocket_context *sock);
+
+The glue to tevent
+==================
+
+As the tsocket library is based on the tevent library,
+there need to be functions to let the caller register
+callback functions, which are triggered when the socket
+is writeable or readable. Typically one would use
+tevent fd events, but in order to hide the filedescriptor
+the tsocket_context abstraction has their own functions.
+
+There's a function to set the currently active tevent_context
+for the socket. It's important there's only one tevent_context
+actively used with the socket. A second call will cancel
+all low level events made on the old tevent_context, it will
+also resets the send and recv handlers to NULL. If the caller
+sets attaches a new event context to the socket, the callback
+function also need to be registered again. It's important
+that the caller keeps the given tevent_context in memory
+and actively calls tsocket_set_event_context(sock, NULL)
+before calling talloc_free(event_context).
+The function returns 0 on success and -1 together with an errno
+on failure.
+
+ int tsocket_set_event_context(struct tsocket_context *sock,
+ struct tevent_context *ev);
+
+There's a function to register a callback function which is called
+when the socket is readable. If the caller don't want to get notified
+anymore the function should be called with NULL as handler.
+The function returns 0 on success and -1 together with an errno
+on failure.
+
+ typedef void (*tsocket_event_handler_t)(struct tsocket_context *, void *);
+ int tsocket_set_readable_handler(struct tsocket_context *sock,
+ tsocket_event_handler_t handler,
+ void *private_data);
+
+There's a function to register a callback function which is called
+when the socket is writeable. If the caller don't want to get notified
+anymore the function should be called with NULL as handler.
+The function returns 0 on success and -1 together with an errno
+on failure.
+
+ typedef void (*tsocket_event_handler_t)(struct tsocket_context *, void *);
+ int tsocket_set_writeable_handler(struct tsocket_context *sock,
+ tsocket_event_handler_t handler,
+ void *private_data);
+
+Note: if the socket is readable and writeable, only the writeable
+ handler is called, this avoids deadlocks at the application level.
+
+Async helper functions
+======================
+
+To make the life easier for the callers, there're 'tevent_req' based
+helper functions for non-blocking io-operations. For each of this functions
+to work the caller must attach the tevent_context to the tsocket_context
+with tsocket_set_event_context(). Please remember that attching a new
+tevent_context will reset the event state of the socket and should only
+be done, when there's no async request is pending on the socket!
+
+The detailed calling conventions for 'tevent_req' based programming
+will be explained in the 'tevent' documentation.
+
+To receive the next availabe datagram from socket there's a wrapper
+for tsocket_recvfrom(). The caller virtually sends its desire to receive
+the next available datagram by calling the tsocket_recvfrom_send() function
+and attaches a callback function to the returned tevent_req via tevent_req_set_callback().
+The callback function is called when a datagram is available or an error has happened.
+The callback function needs to get the result by calling
+tsocket_recvfrom_recv(). The return value of tsocket_recvfrom_recv()
+matches the return value from tsocket_recvfrom(). A possible errno is delivered
+via the perrno parameter instead of the global errno variable. The datagram
+buffer and optional the source tsocket_address of the datagram are returned as talloc
+childs of the mem_ctx passed to tsocket_recvfrom_recv().
+It's important that the caller garanties that there's only one async
+read request on the socket at a time.
+
+ struct tevent_req *tsocket_recvfrom_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx);
+ ssize_t tsocket_recvfrom_recv(struct tevent_req *req,
+ int *perrno,
+ TALLOC_CTX *mem_ctx,
+ uint8_t **buf,
+ struct tsocket_address **src);
+
+To send a datagram there's a wrapper for tsocket_sendto().
+The caller calls tsocket_sendto_send() instead of tsocket_sendto()
+which returns a tevent_req allocated on the given TALLOC_CTX.
+The caller attaches a callback function to the returned tevent_req via
+tevent_req_set_callback(). The callback function is called when a datagram was
+deliviered into the socket or an error has happened.
+The callback function needs to get the result by calling
+tsocket_sendto_recv(). The return value of tsocket_sendto_recv()
+matches the return value from tsocket_sendto(). A possible errno is delivered
+via the perrno parameter instead of the global errno variable.
+Normal callers should not use this function directly, they should use
+tsocket_sendto_queue_send/recv() instead.
+
+ struct tevent_req *tsocket_sendto_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ const uint8_t *buf,
+ size_t len,
+ const struct tsocket_address *dst);
+ ssize_t tsocket_sendto_recv(struct tevent_req *req, int *perrno);
+
+As only one async tsocket_sendto() call should happen at a time,
+there's a 'tevent_queue' is used to serialize the sendto requests.
+
+ struct tevent_req *tsocket_sendto_queue_send(TALLOC_CTX *mem_ctx,
+ struct tsocket_context *sock,
+ struct tevent_queue *queue,
+ const uint8_t *buf,
+ size_t len,
+ struct tsocket_address *dst);
+ ssize_t tsocket_sendto_queue_recv(struct tevent_req *req, int *perrno);
+
+Ther's an async helper for tsocket_connect(), which should be used
+to connect TSOCKET_TYPE_STREAM based sockets.
+The caller virtually sends its desire to connect to the destination
+tsocket_address by calling tsocket_connect_send() and gets back a tevent_req.
+The caller sets a callback function via tevent_req_set_callback().
+The callback function is called if the tsocket is connected or an error has happened.
+The callback function needs to get the result by calling
+tsocket_connect_recv(). The return value of tsocket_connect_recv()
+matches the return value from tsocket_connect()/tsocket_get_status().
+A possible errno is delivered via the perrno parameter instead of the global
+errno variable.
+
+ struct tevent_req *tsocket_connect_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ const struct tsocket_address *dst);
+ int tsocket_connect_recv(struct tevent_req *req, int *perrno);
+
+To send an 'iovec' there's a wrapper for tsocket_writev().
+The caller calls tsocket_writev_send() instead of tsocket_writev()
+which returns a tevent_req allocated on the given TALLOC_CTX.
+The caller attaches a callback function to the returned tevent_req via
+tevent_req_set_callback(). The callback function is called when the whole iovec
+was deliviered into the socket or an error has happened.
+The callback function needs to get the result by calling
+tsocket_writev_recv(). The return value of tsocket_writev_recv()
+matches the return value from tsocket_writev(). A possible errno is delivered
+via the perrno parameter instead of the global errno variable.
+Normal callers should not use this function directly, they should use
+tsocket_writev_queue_send/recv() instead.
+
+ struct tevent_req *tsocket_writev_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ const struct iovec *vector,
+ size_t count);
+ int tsocket_writev_recv(struct tevent_req *req, int *perrno);
+
+As only one async tsocket_writev() call should happen at a time,
+there's a 'tevent_queue' is used to serialize the writev requests.
+
+ struct tevent_req *tsocket_writev_queue_send(TALLOC_CTX *mem_ctx,
+ struct tsocket_context *sock,
+ struct tevent_queue *queue,
+ const struct iovec *vector,
+ size_t count);
+ int tsocket_writev_queue_recv(struct tevent_req *req, int *perrno);
+
+For TSOCKET_TYPE_STREAM sockets, it's typically desired to split the stream
+into PDUs. That's why the helper function for tsocket_readv() is a bit
+different compared to the other helper functions. The general rule
+is still to get a tevent_req, set a callback which gets called when the
+operation is done. The callback function needs to get the result by
+calling tsocket_readv_recv(). The 'next_iovec' callback function
+makes the difference to the other helper function.
+The tsocket_writev_send/recv() logic asks the caller via the
+next_iovec_fn for an iovec array, which will be filled completely
+with bytes from the socket, then the next_iovec_fn is called for
+the next iovec array to fill, untill the next_iovec_fn returns an empty
+iovec array. That next_iovec_fn should allocate the array as child of the
+passed mem_ctx, while the buffers the array referr to belong to the caller.
+The tsocket_writev_send/recv() engine will modify and free the given array!
+The basic idea is that the caller allocates and maintains the real buffers.
+The next_iovec_fn should report error by returning -1 and setting errno to
+the specific error code. The engine will pass the error to the caller
+via tsocket_readv_recv().
+
+typedef int (*tsocket_readv_next_iovec_t)(struct tsocket_context *sock,
+ void *private_data,
+ TALLOC_CTX *mem_ctx,
+ struct iovec **vector,
+ size_t *count);
+struct tevent_req *tsocket_readv_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ tsocket_readv_next_iovec_t next_iovec_fn,
+ void *private_data);
+int tsocket_readv_recv(struct tevent_req *req, int *perrno);
+
+Wrapper for BSD style sockets
+=============================
+
+Support for BSD style sockets of AF_INET, AF_INET6 and AF_UNIX
+are part of the main tsocket library.
+
+To wrap an existing fd into a tsocket_context the function
+tsocket_context_bsd_wrap_existing() can be used.
+The caller needs to make sure the fd is marked as non-blocking!
+Normaly the tsocket_disconnect() function would close the fd,
+but the caller can influence this behavior based on the close_on_disconnect
+parameter. The caller should also make sure that the socket is only
+accessed via the tsocket_context wrapper after the call to
+tsocket_context_bsd_wrap_existing().
+
+ int tsocket_context_bsd_wrap_existing(TALLOC_CTX *mem_ctx,
+ int fd, bool close_on_disconnect,
+ struct tsocket_context **_sock);
+
+To create a tsocket_address for an inet address you need to use
+the tsocket_address_inet_from_strings() function. It takes the family
+as parameter which can be "ipv4", "ipv6" or "ip", where "ip" autodetects
+"ipv4" or "ipv6", based on the given address string. Depending on the
+operating system, "ipv6" may not be supported. Note: NULL as address
+is mapped to "0.0.0.0" or "::" based on the given family.
+The address parameter only accepts valid ipv4 or ipv6 address strings
+and no names! The caller need to resolve names before using this function.
+
+ int tsocket_address_inet_from_strings(TALLOC_CTX *mem_ctx,
+ const char *family,
+ const char *address,
+ uint16_t port,
+ struct tsocket_address **addr);
+
+To get the address of the inet tsocket_address as string the
+tsocket_address_inet_addr_string() function should be used.
+The caller should not try to parse the tsocket_address_string() output!
+
+ char *tsocket_address_inet_addr_string(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx);
+
+To get the port number of the inet tsocket_address the
+tsocket_address_inet_port() function should be used.
+The caller should not try to parse the tsocket_address_string() output!
+
+ uint16_t tsocket_address_inet_port(const struct tsocket_address *addr);
+
+To alter the port number of an inet tsocket_address the
+tsocket_address_inet_set_port() function can be used.
+This is usefull if the caller gets the address from
+tsocket_address_copy(), tsocket_context_remote_address() or
+tsocket_context_remote_address() instead of tsocket_address_inet_from_strings().
+
+ int tsocket_address_inet_set_port(struct tsocket_address *addr,
+ uint16_t port);
+
+If the caller wants to create a broadcast socket, with the SO_BROADCAST
+socket option, the broadcast option needs to be set with the
+tsocket_address_inet_set_broadcast() function before calling
+tsocket_address_create_socket().
+
+ void tsocket_address_inet_set_broadcast(struct tsocket_address *addr,
+ bool broadcast);
+
+To create a tsocket_address for AF_UNIX style sockets the
+tsocket_address_unix_from_path() should be used.
+NULL as path is handled like "".
+
+ int tsocket_address_unix_from_path(TALLOC_CTX *mem_ctx,
+ const char *path,
+ struct tsocket_address **addr);
+
+To get the unix path of an existing unix tsocket_address
+the tsocket_address_unix_path() should be used.
+The caller should not try to parse the tsocket_address_string() output!
+
+ char *tsocket_address_unix_path(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx);
+
diff --git a/lib/tsocket/tsocket_helpers.c b/lib/tsocket/tsocket_helpers.c
new file mode 100644
index 0000000000..b2edf43d97
--- /dev/null
+++ b/lib/tsocket/tsocket_helpers.c
@@ -0,0 +1,177 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/network.h"
+#include "system/filesys.h"
+#include "tsocket.h"
+#include "tsocket_internal.h"
+
+int tsocket_error_from_errno(int ret,
+ int sys_errno,
+ bool *retry)
+{
+ *retry = false;
+
+ if (ret >= 0) {
+ return 0;
+ }
+
+ if (ret != -1) {
+ return EIO;
+ }
+
+ if (sys_errno == 0) {
+ return EIO;
+ }
+
+ if (sys_errno == EINTR) {
+ *retry = true;
+ return sys_errno;
+ }
+
+ if (sys_errno == EINPROGRESS) {
+ *retry = true;
+ return sys_errno;
+ }
+
+ if (sys_errno == EAGAIN) {
+ *retry = true;
+ return sys_errno;
+ }
+
+#ifdef EWOULDBLOCK
+ if (sys_errno == EWOULDBLOCK) {
+ *retry = true;
+ return sys_errno;
+ }
+#endif
+
+ return sys_errno;
+}
+
+int tsocket_simple_int_recv(struct tevent_req *req, int *perrno)
+{
+ enum tevent_req_state state;
+ uint64_t error;
+
+ if (!tevent_req_is_error(req, &state, &error)) {
+ return 0;
+ }
+
+ switch (state) {
+ case TEVENT_REQ_NO_MEMORY:
+ *perrno = ENOMEM;
+ return -1;
+ case TEVENT_REQ_TIMED_OUT:
+ *perrno = ETIMEDOUT;
+ return -1;
+ case TEVENT_REQ_USER_ERROR:
+ *perrno = (int)error;
+ return -1;
+ default:
+ *perrno = EIO;
+ return -1;
+ }
+
+ *perrno = EIO;
+ return -1;
+}
+
+int tsocket_common_prepare_fd(int fd, bool high_fd)
+{
+ int i;
+ int sys_errno = 0;
+ int fds[3];
+ int num_fds = 0;
+
+ int result, flags;
+
+ if (fd == -1) {
+ return -1;
+ }
+
+ /* first make a fd >= 3 */
+ if (high_fd) {
+ while (fd < 3) {
+ fds[num_fds++] = fd;
+ fd = dup(fd);
+ if (fd == -1) {
+ sys_errno = errno;
+ break;
+ }
+ }
+ for (i=0; i<num_fds; i++) {
+ close(fds[i]);
+ }
+ if (fd == -1) {
+ errno = sys_errno;
+ return fd;
+ }
+ }
+
+ /* fd should be nonblocking. */
+
+#ifdef O_NONBLOCK
+#define FLAG_TO_SET O_NONBLOCK
+#else
+#ifdef SYSV
+#define FLAG_TO_SET O_NDELAY
+#else /* BSD */
+#define FLAG_TO_SET FNDELAY
+#endif
+#endif
+
+ if ((flags = fcntl(fd, F_GETFL)) == -1) {
+ goto fail;
+ }
+
+ flags |= FLAG_TO_SET;
+ if (fcntl(fd, F_SETFL, flags) == -1) {
+ goto fail;
+ }
+
+#undef FLAG_TO_SET
+
+ /* fd should be closed on exec() */
+#ifdef FD_CLOEXEC
+ result = flags = fcntl(fd, F_GETFD, 0);
+ if (flags >= 0) {
+ flags |= FD_CLOEXEC;
+ result = fcntl(fd, F_SETFD, flags);
+ }
+ if (result < 0) {
+ goto fail;
+ }
+#endif
+ return fd;
+
+ fail:
+ if (fd != -1) {
+ sys_errno = errno;
+ close(fd);
+ errno = sys_errno;
+ }
+ return -1;
+}
+
diff --git a/lib/tsocket/tsocket_internal.h b/lib/tsocket/tsocket_internal.h
new file mode 100644
index 0000000000..e4a4908f3e
--- /dev/null
+++ b/lib/tsocket/tsocket_internal.h
@@ -0,0 +1,157 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _TSOCKET_INTERNAL_H
+#define _TSOCKET_INTERNAL_H
+
+struct tsocket_context_ops {
+ const char *name;
+
+ /* event handling */
+ int (*set_event_context)(struct tsocket_context *sock,
+ struct tevent_context *ev);
+ int (*set_read_handler)(struct tsocket_context *sock,
+ tsocket_event_handler_t handler,
+ void *private_data);
+ int (*set_write_handler)(struct tsocket_context *sock,
+ tsocket_event_handler_t handler,
+ void *private_data);
+
+ /* client ops */
+ int (*connect_to)(struct tsocket_context *sock,
+ const struct tsocket_address *remote_addr);
+
+ /* server ops */
+ int (*listen_on)(struct tsocket_context *sock,
+ int queue_size);
+ int (*accept_new)(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_context **new_sock,
+ const char *location);
+
+ /* general ops */
+ ssize_t (*pending_data)(struct tsocket_context *sock);
+
+ int (*readv_data)(struct tsocket_context *sock,
+ const struct iovec *vector, size_t count);
+ int (*writev_data)(struct tsocket_context *sock,
+ const struct iovec *vector, size_t count);
+
+ ssize_t (*recvfrom_data)(struct tsocket_context *sock,
+ uint8_t *data, size_t len,
+ TALLOC_CTX *addr_ctx,
+ struct tsocket_address **remote_addr);
+ ssize_t (*sendto_data)(struct tsocket_context *sock,
+ const uint8_t *data, size_t len,
+ const struct tsocket_address *remote_addr);
+
+ /* info */
+ int (*get_status)(const struct tsocket_context *sock);
+ int (*get_local_address)(const struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_address **local_addr,
+ const char *location);
+ int (*get_remote_address)(const struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_address **remote_addr,
+ const char *location);
+
+ /* options */
+ int (*get_option)(const struct tsocket_context *sock,
+ const char *option,
+ TALLOC_CTX *mem_ctx,
+ char **value);
+ int (*set_option)(const struct tsocket_context *sock,
+ const char *option,
+ bool force,
+ const char *value);
+
+ /* close/disconnect */
+ void (*disconnect)(struct tsocket_context *sock);
+};
+
+struct tsocket_context {
+ const char *location;
+ const struct tsocket_context_ops *ops;
+
+ void *private_data;
+
+ struct {
+ struct tevent_context *ctx;
+ void *read_private;
+ tsocket_event_handler_t read_handler;
+ void *write_private;
+ tsocket_event_handler_t write_handler;
+ } event;
+};
+
+struct tsocket_context *_tsocket_context_create(TALLOC_CTX *mem_ctx,
+ const struct tsocket_context_ops *ops,
+ void *pstate,
+ size_t psize,
+ const char *type,
+ const char *location);
+#define tsocket_context_create(mem_ctx, ops, state, type, location) \
+ _tsocket_context_create(mem_ctx, ops, state, sizeof(type), \
+ #type, location)
+
+struct tsocket_address_ops {
+ const char *name;
+
+ char *(*string)(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx);
+
+ struct tsocket_address *(*copy)(const struct tsocket_address *addr,
+ TALLOC_CTX *mem_ctx,
+ const char *location);
+
+ int (*create_socket)(const struct tsocket_address *addr,
+ enum tsocket_type,
+ TALLOC_CTX *mem_ctx,
+ struct tsocket_context **sock,
+ const char *location);
+};
+
+struct tsocket_address {
+ const char *location;
+ const struct tsocket_address_ops *ops;
+
+ void *private_data;
+};
+
+struct tsocket_address *_tsocket_address_create(TALLOC_CTX *mem_ctx,
+ const struct tsocket_address_ops *ops,
+ void *pstate,
+ size_t psize,
+ const char *type,
+ const char *location);
+#define tsocket_address_create(mem_ctx, ops, state, type, location) \
+ _tsocket_address_create(mem_ctx, ops, state, sizeof(type), \
+ #type, location)
+
+int tsocket_error_from_errno(int ret, int sys_errno, bool *retry);
+int tsocket_simple_int_recv(struct tevent_req *req, int *perrno);
+int tsocket_common_prepare_fd(int fd, bool high_fd);
+
+#endif /* _TSOCKET_H */
+
diff --git a/lib/tsocket/tsocket_readv.c b/lib/tsocket/tsocket_readv.c
new file mode 100644
index 0000000000..2c8483ec7e
--- /dev/null
+++ b/lib/tsocket/tsocket_readv.c
@@ -0,0 +1,222 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/network.h"
+#include "tsocket.h"
+#include "tsocket_internal.h"
+
+struct tsocket_readv_state {
+ /* this structs are owned by the caller */
+ struct {
+ struct tsocket_context *sock;
+ tsocket_readv_next_iovec_t next_iovec_fn;
+ void *private_data;
+ } caller;
+
+ /*
+ * Each call to the callback resets iov and count
+ * the callback allocated the iov as child of our state,
+ * that means we are allowed to modify and free it.
+ *
+ * we should call the callback every time we filled the given
+ * vector and ask for a new vector. We return if the callback
+ * ask for 0 bytes.
+ */
+ struct iovec *iov;
+ size_t count;
+
+ /*
+ * the total number of bytes we read,
+ * the return value of the _recv function
+ */
+ int total_read;
+};
+
+static int tsocket_readv_state_destructor(struct tsocket_readv_state *state)
+{
+ if (state->caller.sock) {
+ tsocket_set_readable_handler(state->caller.sock, NULL, NULL);
+ }
+ ZERO_STRUCT(state->caller);
+
+ return 0;
+}
+
+static bool tsocket_readv_ask_for_next_vector(struct tevent_req *req,
+ struct tsocket_readv_state *state)
+{
+ int ret;
+ int err;
+ bool dummy;
+ size_t to_read = 0;
+ size_t i;
+
+ talloc_free(state->iov);
+ state->iov = NULL;
+ state->count = 0;
+
+ ret = state->caller.next_iovec_fn(state->caller.sock,
+ state->caller.private_data,
+ state, &state->iov, &state->count);
+ err = tsocket_error_from_errno(ret, errno, &dummy);
+ if (tevent_req_error(req, err)) {
+ return false;
+ }
+
+ for (i=0; i < state->count; i++) {
+ size_t tmp = to_read;
+ tmp += state->iov[i].iov_len;
+
+ if (tmp < to_read) {
+ tevent_req_error(req, EMSGSIZE);
+ return false;
+ }
+
+ to_read = tmp;
+ }
+
+ if (to_read == 0) {
+ tevent_req_done(req);
+ return false;
+ }
+
+ if (state->total_read + to_read < state->total_read) {
+ tevent_req_error(req, EMSGSIZE);
+ return false;
+ }
+
+ return true;
+}
+
+static void tsocket_readv_handler(struct tsocket_context *sock,
+ void *private_data);
+
+struct tevent_req *tsocket_readv_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ tsocket_readv_next_iovec_t next_iovec_fn,
+ void *private_data)
+{
+ struct tevent_req *req;
+ struct tsocket_readv_state *state;
+ int ret;
+ int err;
+ bool dummy;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct tsocket_readv_state);
+ if (!req) {
+ return NULL;
+ }
+
+ state->caller.sock = sock;
+ state->caller.next_iovec_fn = next_iovec_fn;
+ state->caller.private_data = private_data;
+
+ state->iov = NULL;
+ state->count = 0;
+ state->total_read = 0;
+
+ if (!tsocket_readv_ask_for_next_vector(req, state)) {
+ goto post;
+ }
+
+ talloc_set_destructor(state, tsocket_readv_state_destructor);
+
+ ret = tsocket_set_readable_handler(sock,
+ tsocket_readv_handler,
+ req);
+ err = tsocket_error_from_errno(ret, errno, &dummy);
+ if (tevent_req_error(req, err)) {
+ goto post;
+ }
+
+ return req;
+
+ post:
+ return tevent_req_post(req, sock->event.ctx);
+}
+
+static void tsocket_readv_handler(struct tsocket_context *sock,
+ void *private_data)
+{
+ struct tevent_req *req = talloc_get_type(private_data,
+ struct tevent_req);
+ struct tsocket_readv_state *state = tevent_req_data(req,
+ struct tsocket_readv_state);
+ ssize_t ret;
+ int err;
+ bool retry;
+
+ ret = tsocket_readv(state->caller.sock,
+ state->iov,
+ state->count);
+ err = tsocket_error_from_errno(ret, errno, &retry);
+ if (retry) {
+ /* retry later */
+ return;
+ }
+ if (tevent_req_error(req, err)) {
+ return;
+ }
+
+ state->total_read += ret;
+
+ while (ret > 0) {
+ if (ret < state->iov[0].iov_len) {
+ uint8_t *base;
+ base = (uint8_t *)state->iov[0].iov_base;
+ base += ret;
+ state->iov[0].iov_base = base;
+ state->iov[0].iov_len -= ret;
+ break;
+ }
+ ret -= state->iov[0].iov_len;
+ state->iov += 1;
+ state->count -= 1;
+ }
+
+ if (state->count) {
+ /* we have more to read */
+ return;
+ }
+
+ /* ask the callback for a new vector we should fill */
+ tsocket_readv_ask_for_next_vector(req, state);
+}
+
+int tsocket_readv_recv(struct tevent_req *req, int *perrno)
+{
+ struct tsocket_readv_state *state = tevent_req_data(req,
+ struct tsocket_readv_state);
+ int ret;
+
+ ret = tsocket_simple_int_recv(req, perrno);
+ if (ret == 0) {
+ ret = state->total_read;
+ }
+
+ tevent_req_received(req);
+ return ret;
+}
+
diff --git a/lib/tsocket/tsocket_recvfrom.c b/lib/tsocket/tsocket_recvfrom.c
new file mode 100644
index 0000000000..467738cfc2
--- /dev/null
+++ b/lib/tsocket/tsocket_recvfrom.c
@@ -0,0 +1,164 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/network.h"
+#include "tsocket.h"
+#include "tsocket_internal.h"
+
+struct tsocket_recvfrom_state {
+ /* this structs are owned by the caller */
+ struct {
+ struct tsocket_context *sock;
+ } caller;
+
+ uint8_t *buf;
+ size_t len;
+ struct tsocket_address *src;
+};
+
+static int tsocket_recvfrom_state_destructor(struct tsocket_recvfrom_state *state)
+{
+ if (state->caller.sock) {
+ tsocket_set_readable_handler(state->caller.sock, NULL, NULL);
+ }
+ ZERO_STRUCT(state->caller);
+
+ return 0;
+}
+
+static void tsocket_recvfrom_handler(struct tsocket_context *sock,
+ void *private_data);
+
+struct tevent_req *tsocket_recvfrom_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx)
+{
+ struct tevent_req *req;
+ struct tsocket_recvfrom_state *state;
+ int ret;
+ int err;
+ bool dummy;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct tsocket_recvfrom_state);
+ if (!req) {
+ return NULL;
+ }
+
+ state->caller.sock = sock;
+ state->buf = NULL;
+ state->len = 0;
+ state->src = NULL;
+
+ talloc_set_destructor(state, tsocket_recvfrom_state_destructor);
+
+ ret = tsocket_set_readable_handler(sock,
+ tsocket_recvfrom_handler,
+ req);
+ err = tsocket_error_from_errno(ret, errno, &dummy);
+ if (tevent_req_error(req, err)) {
+ goto post;
+ }
+
+ return req;
+
+ post:
+ return tevent_req_post(req, sock->event.ctx);
+}
+
+static void tsocket_recvfrom_handler(struct tsocket_context *sock,
+ void *private_data)
+{
+ struct tevent_req *req = talloc_get_type(private_data,
+ struct tevent_req);
+ struct tsocket_recvfrom_state *state = tevent_req_data(req,
+ struct tsocket_recvfrom_state);
+ ssize_t ret;
+ int err;
+ bool retry;
+
+ ret = tsocket_pending(state->caller.sock);
+ if (ret == 0) {
+ /* retry later */
+ return;
+ }
+ err = tsocket_error_from_errno(ret, errno, &retry);
+ if (retry) {
+ /* retry later */
+ return;
+ }
+ if (tevent_req_error(req, err)) {
+ return;
+ }
+
+ state->buf = talloc_array(state, uint8_t, ret);
+ if (tevent_req_nomem(state->buf, req)) {
+ return;
+ }
+ state->len = ret;
+
+ ret = tsocket_recvfrom(state->caller.sock,
+ state->buf,
+ state->len,
+ state,
+ &state->src);
+ err = tsocket_error_from_errno(ret, errno, &retry);
+ if (retry) {
+ /* retry later */
+ return;
+ }
+ if (tevent_req_error(req, err)) {
+ return;
+ }
+
+ if (ret != state->len) {
+ tevent_req_error(req, EIO);
+ return;
+ }
+
+ tevent_req_done(req);
+}
+
+ssize_t tsocket_recvfrom_recv(struct tevent_req *req,
+ int *perrno,
+ TALLOC_CTX *mem_ctx,
+ uint8_t **buf,
+ struct tsocket_address **src)
+{
+ struct tsocket_recvfrom_state *state = tevent_req_data(req,
+ struct tsocket_recvfrom_state);
+ ssize_t ret;
+
+ ret = tsocket_simple_int_recv(req, perrno);
+ if (ret == 0) {
+ *buf = talloc_move(mem_ctx, &state->buf);
+ ret = state->len;
+ if (src) {
+ *src = talloc_move(mem_ctx, &state->src);
+ }
+ }
+
+ tevent_req_received(req);
+ return ret;
+}
+
diff --git a/lib/tsocket/tsocket_sendto.c b/lib/tsocket/tsocket_sendto.c
new file mode 100644
index 0000000000..9c0a76bf16
--- /dev/null
+++ b/lib/tsocket/tsocket_sendto.c
@@ -0,0 +1,271 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/network.h"
+#include "tsocket.h"
+#include "tsocket_internal.h"
+
+struct tsocket_sendto_state {
+ /* this structs are owned by the caller */
+ struct {
+ struct tsocket_context *sock;
+ const uint8_t *buf;
+ size_t len;
+ const struct tsocket_address *dst;
+ } caller;
+
+ ssize_t ret;
+};
+
+static int tsocket_sendto_state_destructor(struct tsocket_sendto_state *state)
+{
+ if (state->caller.sock) {
+ tsocket_set_writeable_handler(state->caller.sock, NULL, NULL);
+ }
+ ZERO_STRUCT(state->caller);
+
+ return 0;
+}
+
+static void tsocket_sendto_handler(struct tsocket_context *sock,
+ void *private_data);
+
+struct tevent_req *tsocket_sendto_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ const uint8_t *buf,
+ size_t len,
+ const struct tsocket_address *dst)
+{
+ struct tevent_req *req;
+ struct tsocket_sendto_state *state;
+ int ret;
+ int err;
+ bool dummy;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct tsocket_sendto_state);
+ if (!req) {
+ return NULL;
+ }
+
+ state->caller.sock = sock;
+ state->caller.buf = buf;
+ state->caller.len = len;
+ state->caller.dst = dst;
+ state->ret = -1;
+
+ /*
+ * this is a fast path, not waiting for the
+ * socket to become explicit writeable gains
+ * about 10%-20% performance in benchmark tests.
+ */
+ tsocket_sendto_handler(sock, req);
+ if (!tevent_req_is_in_progress(req)) {
+ goto post;
+ }
+
+ talloc_set_destructor(state, tsocket_sendto_state_destructor);
+
+ ret = tsocket_set_writeable_handler(sock,
+ tsocket_sendto_handler,
+ req);
+ err = tsocket_error_from_errno(ret, errno, &dummy);
+ if (tevent_req_error(req, err)) {
+ goto post;
+ }
+
+ return req;
+
+ post:
+ return tevent_req_post(req, sock->event.ctx);
+}
+
+static void tsocket_sendto_handler(struct tsocket_context *sock,
+ void *private_data)
+{
+ struct tevent_req *req = talloc_get_type(private_data,
+ struct tevent_req);
+ struct tsocket_sendto_state *state = tevent_req_data(req,
+ struct tsocket_sendto_state);
+ ssize_t ret;
+ int err;
+ bool retry;
+
+ ret = tsocket_sendto(state->caller.sock,
+ state->caller.buf,
+ state->caller.len,
+ state->caller.dst);
+ err = tsocket_error_from_errno(ret, errno, &retry);
+ if (retry) {
+ /* retry later */
+ return;
+ }
+ if (tevent_req_error(req, err)) {
+ return;
+ }
+
+ state->ret = ret;
+
+ tevent_req_done(req);
+}
+
+ssize_t tsocket_sendto_recv(struct tevent_req *req, int *perrno)
+{
+ struct tsocket_sendto_state *state = tevent_req_data(req,
+ struct tsocket_sendto_state);
+ ssize_t ret;
+
+ ret = tsocket_simple_int_recv(req, perrno);
+ if (ret == 0) {
+ ret = state->ret;
+ }
+
+ tevent_req_received(req);
+ return ret;
+}
+
+struct tsocket_sendto_queue_state {
+ /* this structs are owned by the caller */
+ struct {
+ struct tsocket_context *sock;
+ const uint8_t *buf;
+ size_t len;
+ const struct tsocket_address *dst;
+ } caller;
+ ssize_t ret;
+};
+
+static void tsocket_sendto_queue_trigger(struct tevent_req *req,
+ void *private_data);
+static void tsocket_sendto_queue_done(struct tevent_req *subreq);
+
+/**
+ * @brief Queue a dgram blob for sending through the socket
+ * @param[in] mem_ctx The memory context for the result
+ * @param[in] sock The socket to send the message buffer
+ * @param[in] queue The existing dgram queue
+ * @param[in] buf The message buffer
+ * @param[in] len The message length
+ * @param[in] dst The destination socket address
+ * @retval The async request handle
+ *
+ * This function queues a blob for sending to destination through an existing
+ * dgram socket. The async callback is triggered when the whole blob is
+ * delivered to the underlying system socket.
+ *
+ * The caller needs to make sure that all non-scalar input parameters hang
+ * arround for the whole lifetime of the request.
+ */
+struct tevent_req *tsocket_sendto_queue_send(TALLOC_CTX *mem_ctx,
+ struct tsocket_context *sock,
+ struct tevent_queue *queue,
+ const uint8_t *buf,
+ size_t len,
+ struct tsocket_address *dst)
+{
+ struct tevent_req *req;
+ struct tsocket_sendto_queue_state *state;
+ bool ok;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct tsocket_sendto_queue_state);
+ if (!req) {
+ return NULL;
+ }
+
+ state->caller.sock = sock;
+ state->caller.buf = buf;
+ state->caller.len = len;
+ state->caller.dst = dst;
+ state->ret = -1;
+
+ ok = tevent_queue_add(queue,
+ sock->event.ctx,
+ req,
+ tsocket_sendto_queue_trigger,
+ NULL);
+ if (!ok) {
+ tevent_req_nomem(NULL, req);
+ goto post;
+ }
+
+ return req;
+
+ post:
+ return tevent_req_post(req, sock->event.ctx);
+}
+
+static void tsocket_sendto_queue_trigger(struct tevent_req *req,
+ void *private_data)
+{
+ struct tsocket_sendto_queue_state *state = tevent_req_data(req,
+ struct tsocket_sendto_queue_state);
+ struct tevent_req *subreq;
+
+ subreq = tsocket_sendto_send(state->caller.sock,
+ state,
+ state->caller.buf,
+ state->caller.len,
+ state->caller.dst);
+ if (tevent_req_nomem(subreq, req)) {
+ return;
+ }
+ tevent_req_set_callback(subreq, tsocket_sendto_queue_done ,req);
+}
+
+static void tsocket_sendto_queue_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct tsocket_sendto_queue_state *state = tevent_req_data(req,
+ struct tsocket_sendto_queue_state);
+ ssize_t ret;
+ int sys_errno;
+
+ ret = tsocket_sendto_recv(subreq, &sys_errno);
+ talloc_free(subreq);
+ if (ret == -1) {
+ tevent_req_error(req, sys_errno);
+ return;
+ }
+ state->ret = ret;
+
+ tevent_req_done(req);
+}
+
+ssize_t tsocket_sendto_queue_recv(struct tevent_req *req, int *perrno)
+{
+ struct tsocket_sendto_queue_state *state = tevent_req_data(req,
+ struct tsocket_sendto_queue_state);
+ ssize_t ret;
+
+ ret = tsocket_simple_int_recv(req, perrno);
+ if (ret == 0) {
+ ret = state->ret;
+ }
+
+ tevent_req_received(req);
+ return ret;
+}
+
diff --git a/lib/tsocket/tsocket_writev.c b/lib/tsocket/tsocket_writev.c
new file mode 100644
index 0000000000..8c5cd40385
--- /dev/null
+++ b/lib/tsocket/tsocket_writev.c
@@ -0,0 +1,316 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Copyright (C) Stefan Metzmacher 2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/network.h"
+#include "tsocket.h"
+#include "tsocket_internal.h"
+
+struct tsocket_writev_state {
+ /* this structs are owned by the caller */
+ struct {
+ struct tsocket_context *sock;
+ const struct iovec *vector;
+ size_t count;
+ } caller;
+
+ struct iovec *iov;
+ size_t count;
+ int total_written;
+};
+
+static int tsocket_writev_state_destructor(struct tsocket_writev_state *state)
+{
+ if (state->caller.sock) {
+ tsocket_set_writeable_handler(state->caller.sock, NULL, NULL);
+ }
+ ZERO_STRUCT(state->caller);
+
+ return 0;
+}
+
+static void tsocket_writev_handler(struct tsocket_context *sock,
+ void *private_data);
+
+struct tevent_req *tsocket_writev_send(struct tsocket_context *sock,
+ TALLOC_CTX *mem_ctx,
+ const struct iovec *vector,
+ size_t count)
+{
+ struct tevent_req *req;
+ struct tsocket_writev_state *state;
+ int ret;
+ int err;
+ bool dummy;
+ int to_write = 0;
+ size_t i;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct tsocket_writev_state);
+ if (!req) {
+ return NULL;
+ }
+
+ state->caller.sock = sock;
+ state->caller.vector = vector;
+ state->caller.count = count;
+
+ state->iov = NULL;
+ state->count = count;
+ state->total_written = 0;
+
+ state->iov = talloc_array(state, struct iovec, count);
+ if (tevent_req_nomem(state->iov, req)) {
+ goto post;
+ }
+ memcpy(state->iov, vector, sizeof(struct iovec) * count);
+
+ for (i=0; i < count; i++) {
+ int tmp = to_write;
+
+ tmp += state->iov[i].iov_len;
+
+ if (tmp < to_write) {
+ tevent_req_error(req, EMSGSIZE);
+ goto post;
+ }
+
+ to_write = tmp;
+ }
+
+ if (to_write == 0) {
+ tevent_req_done(req);
+ goto post;
+ }
+
+ /*
+ * this is a fast path, not waiting for the
+ * socket to become explicit writeable gains
+ * about 10%-20% performance in benchmark tests.
+ */
+ tsocket_writev_handler(sock, req);
+ if (!tevent_req_is_in_progress(req)) {
+ goto post;
+ }
+
+ talloc_set_destructor(state, tsocket_writev_state_destructor);
+
+ ret = tsocket_set_writeable_handler(sock,
+ tsocket_writev_handler,
+ req);
+ err = tsocket_error_from_errno(ret, errno, &dummy);
+ if (tevent_req_error(req, err)) {
+ goto post;
+ }
+
+ return req;
+
+ post:
+ return tevent_req_post(req, sock->event.ctx);
+}
+
+static void tsocket_writev_handler(struct tsocket_context *sock,
+ void *private_data)
+{
+ struct tevent_req *req = talloc_get_type(private_data,
+ struct tevent_req);
+ struct tsocket_writev_state *state = tevent_req_data(req,
+ struct tsocket_writev_state);
+ int ret;
+ int err;
+ bool retry;
+
+ ret = tsocket_writev(state->caller.sock,
+ state->iov,
+ state->count);
+ err = tsocket_error_from_errno(ret, errno, &retry);
+ if (retry) {
+ /* retry later */
+ return;
+ }
+ if (tevent_req_error(req, err)) {
+ return;
+ }
+
+ state->total_written += ret;
+
+ /*
+ * we have not written everything yet, so we need to truncate
+ * the already written bytes from our iov copy
+ */
+ while (ret > 0) {
+ if (ret < state->iov[0].iov_len) {
+ uint8_t *base;
+ base = (uint8_t *)state->iov[0].iov_base;
+ base += ret;
+ state->iov[0].iov_base = base;
+ state->iov[0].iov_len -= ret;
+ break;
+ }
+ ret -= state->iov[0].iov_len;
+ state->iov += 1;
+ state->count -= 1;
+ }
+
+ if (state->count > 0) {
+ /* more to write */
+ return;
+ }
+
+ tevent_req_done(req);
+}
+
+int tsocket_writev_recv(struct tevent_req *req, int *perrno)
+{
+ struct tsocket_writev_state *state = tevent_req_data(req,
+ struct tsocket_writev_state);
+ int ret;
+
+ ret = tsocket_simple_int_recv(req, perrno);
+ if (ret == 0) {
+ ret = state->total_written;
+ }
+
+ tevent_req_received(req);
+ return ret;
+}
+
+struct tsocket_writev_queue_state {
+ /* this structs are owned by the caller */
+ struct {
+ struct tsocket_context *sock;
+ const struct iovec *vector;
+ size_t count;
+ } caller;
+ int ret;
+};
+
+static void tsocket_writev_queue_trigger(struct tevent_req *req,
+ void *private_data);
+static void tsocket_writev_queue_done(struct tevent_req *subreq);
+
+/**
+ * @brief Queue a dgram blob for sending through the socket
+ * @param[in] mem_ctx The memory context for the result
+ * @param[in] sock The socket to send data through
+ * @param[in] queue The existing send queue
+ * @param[in] vector The iovec vector so write
+ * @param[in] count The size of the vector
+ * @retval The async request handle
+ *
+ * This function queues a blob for sending to destination through an existing
+ * dgram socket. The async callback is triggered when the whole blob is
+ * delivered to the underlying system socket.
+ *
+ * The caller needs to make sure that all non-scalar input parameters hang
+ * arround for the whole lifetime of the request.
+ */
+struct tevent_req *tsocket_writev_queue_send(TALLOC_CTX *mem_ctx,
+ struct tsocket_context *sock,
+ struct tevent_queue *queue,
+ const struct iovec *vector,
+ size_t count)
+{
+ struct tevent_req *req;
+ struct tsocket_writev_queue_state *state;
+ bool ok;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct tsocket_writev_queue_state);
+ if (!req) {
+ return NULL;
+ }
+
+ state->caller.sock = sock;
+ state->caller.vector = vector;
+ state->caller.count = count;
+ state->ret = -1;
+
+ ok = tevent_queue_add(queue,
+ sock->event.ctx,
+ req,
+ tsocket_writev_queue_trigger,
+ NULL);
+ if (!ok) {
+ tevent_req_nomem(NULL, req);
+ goto post;
+ }
+
+ return req;
+
+ post:
+ return tevent_req_post(req, sock->event.ctx);
+}
+
+static void tsocket_writev_queue_trigger(struct tevent_req *req,
+ void *private_data)
+{
+ struct tsocket_writev_queue_state *state = tevent_req_data(req,
+ struct tsocket_writev_queue_state);
+ struct tevent_req *subreq;
+
+ subreq = tsocket_writev_send(state->caller.sock,
+ state,
+ state->caller.vector,
+ state->caller.count);
+ if (tevent_req_nomem(subreq, req)) {
+ return;
+ }
+ tevent_req_set_callback(subreq, tsocket_writev_queue_done ,req);
+}
+
+static void tsocket_writev_queue_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct tsocket_writev_queue_state *state = tevent_req_data(req,
+ struct tsocket_writev_queue_state);
+ int ret;
+ int sys_errno;
+
+ ret = tsocket_writev_recv(subreq, &sys_errno);
+ talloc_free(subreq);
+ if (ret == -1) {
+ tevent_req_error(req, sys_errno);
+ return;
+ }
+ state->ret = ret;
+
+ tevent_req_done(req);
+}
+
+int tsocket_writev_queue_recv(struct tevent_req *req, int *perrno)
+{
+ struct tsocket_writev_queue_state *state = tevent_req_data(req,
+ struct tsocket_writev_queue_state);
+ int ret;
+
+ ret = tsocket_simple_int_recv(req, perrno);
+ if (ret == 0) {
+ ret = state->ret;
+ }
+
+ tevent_req_received(req);
+ return ret;
+}
+
diff --git a/lib/util/config.mk b/lib/util/config.mk
index 14bdb2a277..7835fed911 100644
--- a/lib/util/config.mk
+++ b/lib/util/config.mk
@@ -5,7 +5,7 @@ PUBLIC_DEPENDENCIES = \
CHARSET EXECINFO
LIBSAMBA-UTIL_OBJ_FILES = $(addprefix $(libutilsrcdir)/, \
- xfile.o \
+ xfile.o \
debug.o \
fault.o \
signal.o \
@@ -68,6 +68,13 @@ PUBLIC_DEPENDENCIES = LIBTDB
UTIL_TDB_OBJ_FILES = $(libutilsrcdir)/util_tdb.o
+[SUBSYSTEM::UTIL_TEVENT]
+PUBLIC_DEPENDENCIES = LIBTEVENT
+
+UTIL_TEVENT_OBJ_FILES = $(addprefix $(libutilsrcdir)/, \
+ tevent_unix.o \
+ tevent_ntstatus.o)
+
[SUBSYSTEM::UTIL_LDB]
PUBLIC_DEPENDENCIES = LIBLDB