summaryrefslogtreecommitdiff
path: root/source4/winbind/wb_init_domain.c
diff options
context:
space:
mode:
authorVolker Lendecke <vlendec@samba.org>2005-10-15 19:18:05 +0000
committerGerald (Jerry) Carter <jerry@samba.org>2007-10-10 13:44:48 -0500
commit42ececdfae15a34205638cc6e3ec53d6f3ac2148 (patch)
treeca9b6b82f861b11c3f0362e5bb863d7efa62f953 /source4/winbind/wb_init_domain.c
parent9259f9ecc0ab6fa3faeb582796d59420e71fc069 (diff)
downloadsamba-42ececdfae15a34205638cc6e3ec53d6f3ac2148.tar.gz
samba-42ececdfae15a34205638cc6e3ec53d6f3ac2148.tar.bz2
samba-42ececdfae15a34205638cc6e3ec53d6f3ac2148.zip
r11093: Implement wb_queue_domain_send: If the domain is not yet initialized, do that
first. And if a request is being processed, queue it. This correctly survived 3 endless loops with wbinfo's doing different things while starting up smbd. The number of indirections starts to become a bit scary, but what can you do without a decent programming language that provides closures :-) One thing that we might consider is to auto-generate async rpc requests that return composite_context structs instead of rpc_requests. Otherwise I'd have to write a lot of wrappers like composite_netr_LogonSamLogon_send. The alternative would be to write two versions of wb_queue_domain_send which I would like to avoid. This is cluttered enough already. Volker (This used to be commit 66c1b674f9870de73cce0e611909caf9eff34baa)
Diffstat (limited to 'source4/winbind/wb_init_domain.c')
-rw-r--r--source4/winbind/wb_init_domain.c112
1 files changed, 112 insertions, 0 deletions
diff --git a/source4/winbind/wb_init_domain.c b/source4/winbind/wb_init_domain.c
index c9389ea7dd..fbe44244f4 100644
--- a/source4/winbind/wb_init_domain.c
+++ b/source4/winbind/wb_init_domain.c
@@ -26,6 +26,7 @@
#include "winbind/wb_async_helpers.h"
#include "winbind/wb_server.h"
#include "smbd/service_stream.h"
+#include "dlinklist.h"
#include "librpc/gen_ndr/nbt.h"
#include "librpc/gen_ndr/samr.h"
@@ -350,3 +351,114 @@ NTSTATUS wb_init_domain(struct wbsrv_domain *domain,
wb_init_domain_send(domain, event_ctx, messaging_ctx);
return wb_init_domain_recv(c);
}
+
+struct queue_domain_state {
+ struct queue_domain_state *prev, *next;
+ struct composite_context *ctx;
+ struct wbsrv_domain *domain;
+ struct composite_context *(*send_fn)(void *p);
+ NTSTATUS (*recv_fn)(struct composite_context *c,
+ void *p);
+ void *private_data;
+};
+
+static void queue_domain_recv_init(struct composite_context *ctx);
+static void queue_domain_recv_sub(struct composite_context *ctx);
+
+struct composite_context *wb_queue_domain_send(TALLOC_CTX *mem_ctx,
+ struct wbsrv_domain *domain,
+ struct event_context *event_ctx,
+ struct messaging_context *msg_ctx,
+ struct composite_context *(*send_fn)(void *p),
+ NTSTATUS (*recv_fn)(struct composite_context *c,
+ void *p),
+ void *private_data)
+{
+ struct composite_context *result, *ctx;
+ struct queue_domain_state *state;
+
+ result = talloc(mem_ctx, struct composite_context);
+ if (result == NULL) goto failed;
+ result->state = COMPOSITE_STATE_IN_PROGRESS;
+ result->async.fn = NULL;
+ result->event_ctx = event_ctx;
+
+ state = talloc(result, struct queue_domain_state);
+ if (state == NULL) goto failed;
+ state->ctx = result;
+ result->private_data = state;
+
+ state->send_fn = send_fn;
+ state->recv_fn = recv_fn;
+ state->private_data = private_data;
+ state->domain = domain;
+
+ if (domain->busy) {
+ DEBUG(0, ("Domain %s busy\n", domain->name));
+ DLIST_ADD_END(domain->request_queue, state,
+ struct queue_domain_state *);
+ return result;
+ }
+
+ domain->busy = True;
+
+ if (!domain->initialized) {
+ ctx = wb_init_domain_send(domain, result->event_ctx, msg_ctx);
+ if (ctx == NULL) goto failed;
+ ctx->async.fn = queue_domain_recv_init;
+ ctx->async.private_data = state;
+ return result;
+ }
+
+ ctx = state->send_fn(state->private_data);
+ if (ctx == NULL) goto failed;
+ ctx->async.fn = queue_domain_recv_sub;
+ ctx->async.private_data = state;
+ return result;
+
+ failed:
+ talloc_free(result);
+ return NULL;
+}
+
+static void queue_domain_recv_init(struct composite_context *ctx)
+{
+ struct queue_domain_state *state =
+ talloc_get_type(ctx->async.private_data,
+ struct queue_domain_state);
+
+ state->ctx->status = wb_init_domain_recv(ctx);
+ if (!composite_is_ok(state->ctx)) return;
+
+ ctx = state->send_fn(state->private_data);
+ composite_continue(state->ctx, ctx, queue_domain_recv_sub, state);
+}
+
+static void queue_domain_recv_sub(struct composite_context *ctx)
+{
+ struct queue_domain_state *state =
+ talloc_get_type(ctx->async.private_data,
+ struct queue_domain_state);
+
+ state->ctx->status = state->recv_fn(ctx, state->private_data);
+ state->domain->busy = False;
+
+ if (state->domain->request_queue != NULL) {
+ struct queue_domain_state *s2;
+ s2 = state->domain->request_queue;
+ DLIST_REMOVE(state->domain->request_queue, s2);
+ ctx = s2->send_fn(s2->private_data);
+ composite_continue(s2->ctx, ctx, queue_domain_recv_sub, s2);
+ state->domain->busy = True;
+ }
+
+ if (!composite_is_ok(state->ctx)) return;
+ composite_done(state->ctx);
+}
+
+NTSTATUS wb_queue_domain_recv(struct composite_context *ctx)
+{
+ NTSTATUS status = composite_wait(ctx);
+ talloc_free(ctx);
+ return status;
+}