summaryrefslogtreecommitdiff
path: root/source4/winbind/wb_async_helpers.c
diff options
context:
space:
mode:
authorVolker Lendecke <vlendec@samba.org>2005-10-16 12:43:09 +0000
committerGerald (Jerry) Carter <jerry@samba.org>2007-10-10 13:44:48 -0500
commitd68319431e62e43c0ecb23328e3162128d823958 (patch)
tree9a798646b260534b8f293f3facc925bb1058dd6e /source4/winbind/wb_async_helpers.c
parent17355fbbd4c4a904bb75c1d8ba98948edaf0fe68 (diff)
downloadsamba-d68319431e62e43c0ecb23328e3162128d823958.tar.gz
samba-d68319431e62e43c0ecb23328e3162128d823958.tar.bz2
samba-d68319431e62e43c0ecb23328e3162128d823958.zip
r11095: Implement wb_getuserdomgroups.
Tridge, if you have the time, you might want to look at a problem I'm having with unix domain stream sockets. From a comment in this commit: /* Using composite_trigger_error here causes problems with the client * socket. Linux 2.6.8 gives me a ECONNRESET on the next read after * writing the reply when I don't wait the 100 milliseconds. */ This is in winbind/wb_cmd_userdomgroups.c:93. The problem I have is that I can not *immediately* send an error reply to the client because the next receive fails. Waiting 100 milliseconds helps. It might also be a problem with epoll(), I don't really know. I'd appreciate if you took a brief look at this, maybe I'm doing something wrong. Thanks, Volker (This used to be commit 3e535cce743710a68a4264e4f66e9c0c4d6770c6)
Diffstat (limited to 'source4/winbind/wb_async_helpers.c')
-rw-r--r--source4/winbind/wb_async_helpers.c138
1 files changed, 138 insertions, 0 deletions
diff --git a/source4/winbind/wb_async_helpers.c b/source4/winbind/wb_async_helpers.c
index eeed108719..c8749896df 100644
--- a/source4/winbind/wb_async_helpers.c
+++ b/source4/winbind/wb_async_helpers.c
@@ -610,3 +610,141 @@ NTSTATUS composite_netr_LogonSamLogon_recv(struct composite_context *ctx)
return status;
}
+struct samr_getuserdomgroups_state {
+ struct composite_context *ctx;
+ struct dcerpc_pipe *samr_pipe;
+
+ int num_rids;
+ uint32_t *rids;
+
+ struct policy_handle *user_handle;
+ struct samr_OpenUser o;
+ struct samr_GetGroupsForUser g;
+ struct samr_Close c;
+};
+
+static void samr_usergroups_recv_open(struct rpc_request *req);
+static void samr_usergroups_recv_groups(struct rpc_request *req);
+static void samr_usergroups_recv_close(struct rpc_request *req);
+
+struct composite_context *wb_samr_userdomgroups_send(struct dcerpc_pipe *samr_pipe,
+ struct policy_handle *domain_handle,
+ uint32_t rid)
+{
+ struct composite_context *result;
+ struct rpc_request *req;
+ struct samr_getuserdomgroups_state *state;
+
+ result = talloc_zero(NULL, struct composite_context);
+ if (result == NULL) goto failed;
+ result->state = COMPOSITE_STATE_IN_PROGRESS;
+ result->async.fn = NULL;
+ result->event_ctx = samr_pipe->conn->event_ctx;
+
+ state = talloc(result, struct samr_getuserdomgroups_state);
+ if (state == NULL) goto failed;
+ result->private_data = state;
+ state->ctx = result;
+
+ state->samr_pipe = samr_pipe;
+
+ state->user_handle = talloc(state, struct policy_handle);
+ if (state->user_handle == NULL) goto failed;
+
+ state->o.in.domain_handle = domain_handle;
+ state->o.in.access_mask = SEC_FLAG_MAXIMUM_ALLOWED;
+ state->o.in.rid = rid;
+ state->o.out.user_handle = state->user_handle;
+
+ req = dcerpc_samr_OpenUser_send(state->samr_pipe, state, &state->o);
+ if (req == NULL) goto failed;
+
+ req->async.callback = samr_usergroups_recv_open;
+ req->async.private = state;
+ return result;
+
+ failed:
+ talloc_free(result);
+ return NULL;
+}
+
+static void samr_usergroups_recv_open(struct rpc_request *req)
+{
+ struct samr_getuserdomgroups_state *state =
+ talloc_get_type(req->async.private,
+ struct samr_getuserdomgroups_state);
+
+ state->ctx->status = dcerpc_ndr_request_recv(req);
+ if (!composite_is_ok(state->ctx)) return;
+ state->ctx->status = state->o.out.result;
+ if (!composite_is_ok(state->ctx)) return;
+
+ state->g.in.user_handle = state->user_handle;
+
+ req = dcerpc_samr_GetGroupsForUser_send(state->samr_pipe, state,
+ &state->g);
+ composite_continue_rpc(state->ctx, req, samr_usergroups_recv_groups,
+ state);
+}
+
+static void samr_usergroups_recv_groups(struct rpc_request *req)
+{
+ struct samr_getuserdomgroups_state *state =
+ talloc_get_type(req->async.private,
+ struct samr_getuserdomgroups_state);
+
+ state->ctx->status = dcerpc_ndr_request_recv(req);
+ if (!composite_is_ok(state->ctx)) return;
+ state->ctx->status = state->g.out.result;
+ if (!composite_is_ok(state->ctx)) return;
+
+ state->c.in.handle = state->user_handle;
+ state->c.out.handle = state->user_handle;
+
+ req = dcerpc_samr_Close_send(state->samr_pipe, state, &state->c);
+ composite_continue_rpc(state->ctx, req, samr_usergroups_recv_close,
+ state);
+}
+
+static void samr_usergroups_recv_close(struct rpc_request *req)
+{
+ struct samr_getuserdomgroups_state *state =
+ talloc_get_type(req->async.private,
+ struct samr_getuserdomgroups_state);
+
+ state->ctx->status = dcerpc_ndr_request_recv(req);
+ if (!composite_is_ok(state->ctx)) return;
+ state->ctx->status = state->c.out.result;
+ if (!composite_is_ok(state->ctx)) return;
+
+ composite_done(state->ctx);
+}
+
+NTSTATUS wb_samr_userdomgroups_recv(struct composite_context *ctx,
+ TALLOC_CTX *mem_ctx,
+ int *num_rids, uint32_t **rids)
+{
+ struct samr_getuserdomgroups_state *state =
+ talloc_get_type(ctx->private_data,
+ struct samr_getuserdomgroups_state);
+
+ int i;
+ NTSTATUS status = composite_wait(ctx);
+ if (!NT_STATUS_IS_OK(status)) goto done;
+
+ *num_rids = state->g.out.rids->count;
+ *rids = talloc_array(mem_ctx, uint32_t, *num_rids);
+ if (*rids == NULL) {
+ status = NT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ for (i=0; i<*num_rids; i++) {
+ (*rids)[i] = state->g.out.rids->rids[i].rid;
+ }
+
+ done:
+ talloc_free(ctx);
+ return status;
+}
+