summaryrefslogtreecommitdiff
path: root/source3/rpc_client
diff options
context:
space:
mode:
Diffstat (limited to 'source3/rpc_client')
-rw-r--r--source3/rpc_client/cli_pipe.c217
-rw-r--r--source3/rpc_client/init_netlogon.c18
-rw-r--r--source3/rpc_client/rpc_transport_np.c162
-rw-r--r--source3/rpc_client/rpc_transport_smbd.c694
4 files changed, 951 insertions, 140 deletions
diff --git a/source3/rpc_client/cli_pipe.c b/source3/rpc_client/cli_pipe.c
index 5a53c0d940..2841ff08f6 100644
--- a/source3/rpc_client/cli_pipe.c
+++ b/source3/rpc_client/cli_pipe.c
@@ -81,9 +81,10 @@ static const struct pipe_id_info {
Return the pipe name from the interface.
****************************************************************************/
-const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
- const struct ndr_syntax_id *interface)
+const char *get_pipe_name_from_iface(const struct ndr_syntax_id *interface)
{
+ char *guid_str;
+ const char *result;
int i;
for (i = 0; pipe_names[i].client_pipe; i++) {
if (ndr_syntax_id_equal(pipe_names[i].abstr_syntax,
@@ -97,7 +98,18 @@ const char *cli_get_pipe_name_from_iface(TALLOC_CTX *mem_ctx,
* interested in the known pipes mentioned in pipe_names[]
*/
- return NULL;
+ guid_str = GUID_string(talloc_tos(), &interface->uuid);
+ if (guid_str == NULL) {
+ return NULL;
+ }
+ result = talloc_asprintf(talloc_tos(), "Interface %s.%d", guid_str,
+ (int)interface->if_version);
+ TALLOC_FREE(guid_str);
+
+ if (result == NULL) {
+ return "PIPE";
+ }
+ return result;
}
/********************************************************************
@@ -243,7 +255,7 @@ static void rpc_read_done(struct async_req *subreq)
status = state->transport->read_recv(subreq, &received);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
@@ -266,7 +278,7 @@ static void rpc_read_done(struct async_req *subreq)
static NTSTATUS rpc_read_recv(struct async_req *req)
{
- return async_req_simple_recv(req);
+ return async_req_simple_recv_ntstatus(req);
}
struct rpc_write_state {
@@ -323,7 +335,7 @@ static void rpc_write_done(struct async_req *subreq)
status = state->transport->write_recv(subreq, &written);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
@@ -347,7 +359,7 @@ static void rpc_write_done(struct async_req *subreq)
static NTSTATUS rpc_write_recv(struct async_req *req)
{
- return async_req_simple_recv(req);
+ return async_req_simple_recv_ntstatus(req);
}
@@ -458,7 +470,7 @@ static struct async_req *get_complete_frag_send(TALLOC_CTX *mem_ctx,
status = NT_STATUS_OK;
post_status:
- if (async_post_status(result, ev, status)) {
+ if (async_post_ntstatus(result, ev, status)) {
return result;
}
TALLOC_FREE(result);
@@ -476,18 +488,18 @@ static void get_complete_frag_got_header(struct async_req *subreq)
status = rpc_read_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
status = parse_rpc_header(state->cli, state->prhdr, state->pdu);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
if (!rpc_grow_buffer(state->pdu, state->prhdr->frag_len)) {
- async_req_error(req, NT_STATUS_NO_MEMORY);
+ async_req_nterror(req, NT_STATUS_NO_MEMORY);
return;
}
@@ -516,7 +528,7 @@ static void get_complete_frag_got_rest(struct async_req *subreq)
status = rpc_read_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
async_req_done(req);
@@ -524,7 +536,7 @@ static void get_complete_frag_got_rest(struct async_req *subreq)
static NTSTATUS get_complete_frag_recv(struct async_req *req)
{
- return async_req_simple_recv(req);
+ return async_req_simple_recv_ntstatus(req);
}
/****************************************************************************
@@ -1084,7 +1096,7 @@ static struct async_req *cli_api_pipe_send(TALLOC_CTX *mem_ctx,
status = NT_STATUS_INVALID_PARAMETER;
post_status:
- if (async_post_status(result, ev, status)) {
+ if (async_post_ntstatus(result, ev, status)) {
return result;
}
fail:
@@ -1104,7 +1116,7 @@ static void cli_api_pipe_trans_done(struct async_req *subreq)
&state->rdata_len);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
async_req_done(req);
@@ -1121,7 +1133,7 @@ static void cli_api_pipe_write_done(struct async_req *subreq)
status = rpc_write_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
@@ -1157,7 +1169,7 @@ static void cli_api_pipe_read_done(struct async_req *subreq)
status = state->transport->read_recv(subreq, &received);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
state->rdata_len = received;
@@ -1171,7 +1183,7 @@ static NTSTATUS cli_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
req->private_data, struct cli_api_pipe_state);
NTSTATUS status;
- if (async_req_is_error(req, &status)) {
+ if (async_req_is_nterror(req, &status)) {
return status;
}
@@ -1252,7 +1264,7 @@ static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
prs_init_empty(&state->incoming_pdu, state, UNMARSHALL);
/* Make incoming_pdu dynamic with no memory. */
- prs_give_memory(&state->incoming_pdu, 0, 0, true);
+ prs_give_memory(&state->incoming_pdu, NULL, 0, true);
talloc_set_destructor(state, rpc_api_pipe_state_destructor);
@@ -1284,7 +1296,7 @@ static struct async_req *rpc_api_pipe_send(TALLOC_CTX *mem_ctx,
return result;
post_status:
- if (async_post_status(result, ev, status)) {
+ if (async_post_ntstatus(result, ev, status)) {
return result;
}
TALLOC_FREE(result);
@@ -1306,7 +1318,7 @@ static void rpc_api_pipe_trans_done(struct async_req *subreq)
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(5, ("cli_api_pipe failed: %s\n", nt_errstr(status)));
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
@@ -1354,7 +1366,7 @@ static void rpc_api_pipe_got_pdu(struct async_req *subreq)
if (!NT_STATUS_IS_OK(status)) {
DEBUG(5, ("get_complete_frag failed: %s\n",
nt_errstr(status)));
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
@@ -1369,7 +1381,7 @@ static void rpc_api_pipe_got_pdu(struct async_req *subreq)
nt_errstr(status)));
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
@@ -1393,13 +1405,13 @@ static void rpc_api_pipe_got_pdu(struct async_req *subreq)
"%s\n",
state->incoming_pdu.bigendian_data?"big":"little",
state->incoming_frag.bigendian_data?"big":"little"));
- async_req_error(req, NT_STATUS_INVALID_PARAMETER);
+ async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
return;
}
/* Now copy the data portion out of the pdu into rbuf. */
if (!prs_force_grow(&state->incoming_pdu, rdata_len)) {
- async_req_error(req, NT_STATUS_NO_MEMORY);
+ async_req_nterror(req, NT_STATUS_NO_MEMORY);
return;
}
@@ -1410,7 +1422,7 @@ static void rpc_api_pipe_got_pdu(struct async_req *subreq)
status = cli_pipe_reset_current_pdu(state->cli, &state->rhdr,
&state->incoming_frag);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
@@ -1438,7 +1450,7 @@ static NTSTATUS rpc_api_pipe_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
req->private_data, struct rpc_api_pipe_state);
NTSTATUS status;
- if (async_req_is_error(req, &status)) {
+ if (async_req_is_nterror(req, &status)) {
return status;
}
@@ -2105,7 +2117,7 @@ struct async_req *rpc_api_pipe_req_send(TALLOC_CTX *mem_ctx,
return result;
post_status:
- if (async_post_status(result, ev, status)) {
+ if (async_post_ntstatus(result, ev, status)) {
return result;
}
TALLOC_FREE(result);
@@ -2209,13 +2221,13 @@ static void rpc_api_pipe_req_write_done(struct async_req *subreq)
status = rpc_write_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
status = prepare_next_frag(state, &is_last_frag);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
@@ -2253,7 +2265,7 @@ static void rpc_api_pipe_req_done(struct async_req *subreq)
status = rpc_api_pipe_recv(subreq, state, &state->reply_pdu);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
async_req_done(req);
@@ -2266,7 +2278,7 @@ NTSTATUS rpc_api_pipe_req_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
req->private_data, struct rpc_api_pipe_req_state);
NTSTATUS status;
- if (async_req_is_error(req, &status)) {
+ if (async_req_is_nterror(req, &status)) {
/*
* We always have to initialize to reply pdu, even if there is
* none. The rpccli_* caller routines expect this.
@@ -2573,7 +2585,7 @@ struct async_req *rpc_pipe_bind_send(TALLOC_CTX *mem_ctx,
return result;
post_status:
- if (async_post_status(result, ev, status)) {
+ if (async_post_ntstatus(result, ev, status)) {
return result;
}
TALLOC_FREE(result);
@@ -2597,27 +2609,30 @@ static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
DEBUG(3, ("rpc_pipe_bind: %s bind request returned %s\n",
rpccli_pipe_txt(debug_ctx(), state->cli),
nt_errstr(status)));
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
/* Unmarshall the RPC header */
if (!smb_io_rpc_hdr("hdr", &hdr, &reply_pdu, 0)) {
DEBUG(0, ("rpc_pipe_bind: failed to unmarshall RPC_HDR.\n"));
- async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
+ prs_mem_free(&reply_pdu);
+ async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
return;
}
if (!smb_io_rpc_hdr_ba("", &hdr_ba, &reply_pdu, 0)) {
DEBUG(0, ("rpc_pipe_bind: Failed to unmarshall "
"RPC_HDR_BA.\n"));
- async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
+ prs_mem_free(&reply_pdu);
+ async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
return;
}
if (!check_bind_response(&hdr_ba, &state->cli->transfer_syntax)) {
DEBUG(2, ("rpc_pipe_bind: check_bind_response failed.\n"));
- async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
+ prs_mem_free(&reply_pdu);
+ async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
return;
}
@@ -2633,6 +2648,7 @@ static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
case PIPE_AUTH_TYPE_NONE:
case PIPE_AUTH_TYPE_SCHANNEL:
/* Bind complete. */
+ prs_mem_free(&reply_pdu);
async_req_done(req);
break;
@@ -2640,8 +2656,9 @@ static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
/* Need to send AUTH3 packet - no reply. */
status = rpc_finish_auth3_bind_send(req, state, &hdr,
&reply_pdu);
+ prs_mem_free(&reply_pdu);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
}
break;
@@ -2649,8 +2666,9 @@ static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
/* Need to send alter context request and reply. */
status = rpc_finish_spnego_ntlmssp_bind_send(req, state, &hdr,
&reply_pdu);
+ prs_mem_free(&reply_pdu);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
}
break;
@@ -2660,7 +2678,8 @@ static void rpc_pipe_bind_step_one_done(struct async_req *subreq)
default:
DEBUG(0,("cli_finish_bind_auth: unknown auth type %u\n",
(unsigned int)state->cli->auth->auth_type));
- async_req_error(req, NT_STATUS_INTERNAL_ERROR);
+ prs_mem_free(&reply_pdu);
+ async_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
}
}
@@ -2737,7 +2756,7 @@ static void rpc_bind_auth3_write_done(struct async_req *subreq)
status = rpc_write_recv(subreq);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
async_req_done(req);
@@ -2852,7 +2871,7 @@ static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
status = rpc_api_pipe_recv(subreq, talloc_tos(), &reply_pdu);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
@@ -2860,19 +2879,19 @@ static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
if (!smb_io_rpc_hdr("rpc_hdr ", &hdr, &reply_pdu, 0)) {
DEBUG(0, ("rpc_finish_spnego_ntlmssp_bind: Failed to "
"unmarshall RPC_HDR.\n"));
- async_req_error(req, NT_STATUS_BUFFER_TOO_SMALL);
+ async_req_nterror(req, NT_STATUS_BUFFER_TOO_SMALL);
return;
}
if (!prs_set_offset(
&reply_pdu,
hdr.frag_len - hdr.auth_len - RPC_HDR_AUTH_LEN)) {
- async_req_error(req, NT_STATUS_INVALID_PARAMETER);
+ async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
return;
}
if (!smb_io_rpc_hdr_auth("hdr_auth", &hdr_auth, &reply_pdu, 0)) {
- async_req_error(req, NT_STATUS_INVALID_PARAMETER);
+ async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
return;
}
@@ -2885,7 +2904,7 @@ static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
OID_NTLMSSP, &tmp_blob)) {
data_blob_free(&server_spnego_response);
data_blob_free(&tmp_blob);
- async_req_error(req, NT_STATUS_INVALID_PARAMETER);
+ async_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
return;
}
@@ -2899,7 +2918,7 @@ static void rpc_bind_ntlmssp_api_done(struct async_req *subreq)
NTSTATUS rpc_pipe_bind_recv(struct async_req *req)
{
- return async_req_simple_recv(req);
+ return async_req_simple_recv_ntstatus(req);
}
NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli,
@@ -3511,6 +3530,61 @@ static NTSTATUS rpc_pipe_open_np(struct cli_state *cli,
return NT_STATUS_OK;
}
+NTSTATUS rpc_pipe_open_local(TALLOC_CTX *mem_ctx,
+ struct rpc_cli_smbd_conn *conn,
+ const struct ndr_syntax_id *syntax,
+ struct rpc_pipe_client **presult)
+{
+ struct rpc_pipe_client *result;
+ struct cli_pipe_auth_data *auth;
+ NTSTATUS status;
+
+ result = talloc(mem_ctx, struct rpc_pipe_client);
+ if (result == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+ result->abstract_syntax = *syntax;
+ result->transfer_syntax = ndr_transfer_syntax;
+ result->dispatch = cli_do_rpc_ndr;
+ result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN;
+ result->max_recv_frag = RPC_MAX_PDU_FRAG_LEN;
+
+ result->desthost = talloc_strdup(result, global_myname());
+ result->srv_name_slash = talloc_asprintf_strupper_m(
+ result, "\\\\%s", global_myname());
+ if ((result->desthost == NULL) || (result->srv_name_slash == NULL)) {
+ TALLOC_FREE(result);
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ status = rpc_transport_smbd_init(result, conn, syntax,
+ &result->transport);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(1, ("rpc_transport_smbd_init failed: %s\n",
+ nt_errstr(status)));
+ TALLOC_FREE(result);
+ return status;
+ }
+
+ status = rpccli_anon_bind_data(result, &auth);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(1, ("rpccli_anon_bind_data failed: %s\n",
+ nt_errstr(status)));
+ TALLOC_FREE(result);
+ return status;
+ }
+
+ status = rpc_pipe_bind(result, auth);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(1, ("rpc_pipe_bind failed: %s\n", nt_errstr(status)));
+ TALLOC_FREE(result);
+ return status;
+ }
+
+ *presult = result;
+ return NT_STATUS_OK;
+}
+
/****************************************************************************
Open a pipe to a remote server.
****************************************************************************/
@@ -3587,8 +3661,7 @@ NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
}
DEBUG(lvl, ("cli_rpc_pipe_open_noauth: rpc_pipe_bind for pipe "
"%s failed with error %s\n",
- cli_get_pipe_name_from_iface(debug_ctx(),
- interface),
+ get_pipe_name_from_iface(interface),
nt_errstr(status) ));
TALLOC_FREE(result);
return status;
@@ -3596,8 +3669,7 @@ NTSTATUS cli_rpc_pipe_open_noauth(struct cli_state *cli,
DEBUG(10,("cli_rpc_pipe_open_noauth: opened pipe %s to machine "
"%s and bound anonymously.\n",
- cli_get_pipe_name_from_iface(debug_ctx(), interface),
- cli->desthost ));
+ get_pipe_name_from_iface(interface), cli->desthost));
*presult = result;
return NT_STATUS_OK;
@@ -3643,8 +3715,8 @@ static NTSTATUS cli_rpc_pipe_open_ntlmssp_internal(struct cli_state *cli,
DEBUG(10,("cli_rpc_pipe_open_ntlmssp_internal: opened pipe %s to "
"machine %s and bound NTLMSSP as user %s\\%s.\n",
- cli_get_pipe_name_from_iface(debug_ctx(), interface),
- cli->desthost, domain, username ));
+ get_pipe_name_from_iface(interface), cli->desthost, domain,
+ username ));
*presult = result;
return NT_STATUS_OK;
@@ -3835,7 +3907,7 @@ NTSTATUS cli_rpc_pipe_open_schannel_with_key(struct cli_state *cli,
DEBUG(10,("cli_rpc_pipe_open_schannel_with_key: opened pipe %s to machine %s "
"for domain %s and bound using schannel.\n",
- cli_get_pipe_name_from_iface(debug_ctx(), interface),
+ get_pipe_name_from_iface(interface),
cli->desthost, domain ));
*presult = result;
@@ -4045,40 +4117,3 @@ NTSTATUS cli_get_session_key(TALLOC_CTX *mem_ctx,
return NT_STATUS_OK;
}
-
-/**
- * Create a new RPC client context which uses a local dispatch function.
- */
-NTSTATUS rpc_pipe_open_internal(TALLOC_CTX *mem_ctx, const struct ndr_syntax_id *abstract_syntax,
- NTSTATUS (*dispatch) (struct rpc_pipe_client *cli, TALLOC_CTX *mem_ctx, const struct ndr_interface_table *table, uint32_t opnum, void *r),
- struct auth_serversupplied_info *serversupplied_info,
- struct rpc_pipe_client **presult)
-{
- struct rpc_pipe_client *result;
-
- result = TALLOC_ZERO_P(mem_ctx, struct rpc_pipe_client);
- if (result == NULL) {
- return NT_STATUS_NO_MEMORY;
- }
-
- result->abstract_syntax = *abstract_syntax;
- result->transfer_syntax = ndr_transfer_syntax;
- result->dispatch = dispatch;
-
- result->pipes_struct = TALLOC_ZERO_P(mem_ctx, pipes_struct);
- if (result->pipes_struct == NULL) {
- TALLOC_FREE(result);
- return NT_STATUS_NO_MEMORY;
- }
- result->pipes_struct->mem_ctx = mem_ctx;
- result->pipes_struct->server_info = serversupplied_info;
- result->pipes_struct->pipe_bound = true;
-
- result->max_xmit_frag = -1;
- result->max_recv_frag = -1;
-
- *presult = result;
- return NT_STATUS_OK;
-}
-
-
diff --git a/source3/rpc_client/init_netlogon.c b/source3/rpc_client/init_netlogon.c
index 6f7a541f72..793b9c7de5 100644
--- a/source3/rpc_client/init_netlogon.c
+++ b/source3/rpc_client/init_netlogon.c
@@ -144,7 +144,8 @@ static NTSTATUS nt_token_to_group_list(TALLOC_CTX *mem_ctx,
const DOM_SID *domain_sid,
size_t num_sids,
const DOM_SID *sids,
- int *numgroups, DOM_GID **pgids)
+ int *numgroups,
+ struct samr_RidWithAttribute **pgids)
{
int i;
@@ -152,13 +153,14 @@ static NTSTATUS nt_token_to_group_list(TALLOC_CTX *mem_ctx,
*pgids = NULL;
for (i=0; i<num_sids; i++) {
- DOM_GID gid;
- if (!sid_peek_check_rid(domain_sid, &sids[i], &gid.g_rid)) {
+ struct samr_RidWithAttribute gid;
+ if (!sid_peek_check_rid(domain_sid, &sids[i], &gid.rid)) {
continue;
}
- gid.attr = (SE_GROUP_MANDATORY|SE_GROUP_ENABLED_BY_DEFAULT|
+ gid.attributes = (SE_GROUP_MANDATORY|SE_GROUP_ENABLED_BY_DEFAULT|
SE_GROUP_ENABLED);
- ADD_TO_ARRAY(mem_ctx, DOM_GID, gid, pgids, numgroups);
+ ADD_TO_ARRAY(mem_ctx, struct samr_RidWithAttribute,
+ gid, pgids, numgroups);
if (*pgids == NULL) {
return NT_STATUS_NO_MEMORY;
}
@@ -177,7 +179,7 @@ NTSTATUS serverinfo_to_SamInfo3(struct auth_serversupplied_info *server_info,
struct netr_SamInfo3 *sam3)
{
struct samu *sampw;
- DOM_GID *gids = NULL;
+ struct samr_RidWithAttribute *gids = NULL;
const DOM_SID *user_sid = NULL;
const DOM_SID *group_sid = NULL;
DOM_SID domain_sid;
@@ -277,8 +279,8 @@ NTSTATUS serverinfo_to_SamInfo3(struct auth_serversupplied_info *server_info,
}
for (i=0; i < groups.count; i++) {
- groups.rids[i].rid = gids[i].g_rid;
- groups.rids[i].attributes = gids[i].attr;
+ groups.rids[i].rid = gids[i].rid;
+ groups.rids[i].attributes = gids[i].attributes;
}
unix_to_nt_time(&last_logon, pdb_get_logon_time(sampw));
diff --git a/source3/rpc_client/rpc_transport_np.c b/source3/rpc_client/rpc_transport_np.c
index e8a333e509..80ff384046 100644
--- a/source3/rpc_client/rpc_transport_np.c
+++ b/source3/rpc_client/rpc_transport_np.c
@@ -93,7 +93,7 @@ static void rpc_np_write_done(struct async_req *subreq)
status = cli_write_andx_recv(subreq, &state->written);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
async_req_done(req);
@@ -105,7 +105,7 @@ static NTSTATUS rpc_np_write_recv(struct async_req *req, ssize_t *pwritten)
req->private_data, struct rpc_np_write_state);
NTSTATUS status;
- if (async_req_is_error(req, &status)) {
+ if (async_req_is_nterror(req, &status)) {
return status;
}
*pwritten = state->written;
@@ -169,13 +169,13 @@ static void rpc_np_read_done(struct async_req *subreq)
}
if (!NT_STATUS_IS_OK(status)) {
TALLOC_FREE(subreq);
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
if (state->received > state->size) {
TALLOC_FREE(subreq);
- async_req_error(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
+ async_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
return;
}
@@ -189,7 +189,7 @@ static NTSTATUS rpc_np_read_recv(struct async_req *req, ssize_t *preceived)
req->private_data, struct rpc_np_read_state);
NTSTATUS status;
- if (async_req_is_error(req, &status)) {
+ if (async_req_is_nterror(req, &status)) {
return status;
}
*preceived = state->received;
@@ -251,7 +251,7 @@ static void rpc_np_trans_done(struct async_req *subreq)
&state->rdata, &state->rdata_len);
TALLOC_FREE(subreq);
if (!NT_STATUS_IS_OK(status)) {
- async_req_error(req, status);
+ async_req_nterror(req, status);
return;
}
async_req_done(req);
@@ -264,7 +264,7 @@ static NTSTATUS rpc_np_trans_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
req->private_data, struct rpc_np_trans_state);
NTSTATUS status;
- if (async_req_is_error(req, &status)) {
+ if (async_req_is_nterror(req, &status)) {
return status;
}
*prdata = talloc_move(mem_ctx, &state->rdata);
@@ -272,49 +272,129 @@ static NTSTATUS rpc_np_trans_recv(struct async_req *req, TALLOC_CTX *mem_ctx,
return NT_STATUS_OK;
}
+struct rpc_transport_np_init_state {
+ struct rpc_cli_transport *transport;
+ struct rpc_transport_np_state *transport_np;
+};
+
+static void rpc_transport_np_init_pipe_open(struct async_req *subreq);
+
+struct async_req *rpc_transport_np_init_send(TALLOC_CTX *mem_ctx,
+ struct event_context *ev,
+ struct cli_state *cli,
+ const struct ndr_syntax_id *abstract_syntax)
+{
+ struct async_req *result, *subreq;
+ struct rpc_transport_np_init_state *state;
+
+ if (!async_req_setup(mem_ctx, &result, &state,
+ struct rpc_transport_np_init_state)) {
+ return NULL;
+ }
+
+ state->transport = talloc(state, struct rpc_cli_transport);
+ if (state->transport == NULL) {
+ goto fail;
+ }
+ state->transport_np = talloc(state->transport,
+ struct rpc_transport_np_state);
+ if (state->transport_np == NULL) {
+ goto fail;
+ }
+ state->transport->priv = state->transport_np;
+
+ state->transport_np->pipe_name = get_pipe_name_from_iface(
+ abstract_syntax);
+ state->transport_np->cli = cli;
+
+ subreq = cli_ntcreate_send(
+ state, ev, cli, state->transport_np->pipe_name, 0,
+ DESIRED_ACCESS_PIPE, 0, FILE_SHARE_READ|FILE_SHARE_WRITE,
+ FILE_OPEN, 0, 0);
+ if (subreq == NULL) {
+ goto fail;
+ }
+ subreq->async.fn = rpc_transport_np_init_pipe_open;
+ subreq->async.priv = result;
+ return result;
+
+ fail:
+ TALLOC_FREE(result);
+ return NULL;
+}
+
+static void rpc_transport_np_init_pipe_open(struct async_req *subreq)
+{
+ struct async_req *req = talloc_get_type_abort(
+ subreq->async.priv, struct async_req);
+ struct rpc_transport_np_init_state *state = talloc_get_type_abort(
+ req->private_data, struct rpc_transport_np_init_state);
+ NTSTATUS status;
+
+ status = cli_ntcreate_recv(subreq, &state->transport_np->fnum);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ async_req_nterror(req, status);
+ return;
+ }
+
+ talloc_set_destructor(state->transport_np,
+ rpc_transport_np_state_destructor);
+ async_req_done(req);
+}
+
+NTSTATUS rpc_transport_np_init_recv(struct async_req *req,
+ TALLOC_CTX *mem_ctx,
+ struct rpc_cli_transport **presult)
+{
+ struct rpc_transport_np_init_state *state = talloc_get_type_abort(
+ req->private_data, struct rpc_transport_np_init_state);
+ NTSTATUS status;
+
+ if (async_req_is_nterror(req, &status)) {
+ return status;
+ }
+
+ state->transport->write_send = rpc_np_write_send;
+ state->transport->write_recv = rpc_np_write_recv;
+ state->transport->read_send = rpc_np_read_send;
+ state->transport->read_recv = rpc_np_read_recv;
+ state->transport->trans_send = rpc_np_trans_send;
+ state->transport->trans_recv = rpc_np_trans_recv;
+
+ *presult = talloc_move(mem_ctx, &state->transport);
+ return NT_STATUS_OK;
+}
+
NTSTATUS rpc_transport_np_init(TALLOC_CTX *mem_ctx, struct cli_state *cli,
const struct ndr_syntax_id *abstract_syntax,
struct rpc_cli_transport **presult)
{
- struct rpc_cli_transport *result;
- struct rpc_transport_np_state *state;
- int fnum;
+ TALLOC_CTX *frame = talloc_stackframe();
+ struct event_context *ev;
+ struct async_req *req;
+ NTSTATUS status;
- result = talloc(mem_ctx, struct rpc_cli_transport);
- if (result == NULL) {
- return NT_STATUS_NO_MEMORY;
- }
- state = talloc(result, struct rpc_transport_np_state);
- if (state == NULL) {
- TALLOC_FREE(result);
- return NT_STATUS_NO_MEMORY;
+ ev = event_context_init(frame);
+ if (ev == NULL) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
}
- result->priv = state;
-
- state->cli = cli;
- state->pipe_name = cli_get_pipe_name_from_iface(
- state, abstract_syntax);
-
- fnum = cli_nt_create(cli, state->pipe_name, DESIRED_ACCESS_PIPE);
- if (fnum == -1) {
- DEBUG(3,("rpc_pipe_open_np: cli_nt_create failed on pipe %s "
- "to machine %s. Error was %s\n", state->pipe_name,
- cli->desthost, cli_errstr(cli)));
- TALLOC_FREE(result);
- return cli_get_nt_error(cli);
+
+ req = rpc_transport_np_init_send(frame, ev, cli, abstract_syntax);
+ if (req == NULL) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
}
- state->fnum = fnum;
- talloc_set_destructor(state, rpc_transport_np_state_destructor);
- result->write_send = rpc_np_write_send;
- result->write_recv = rpc_np_write_recv;
- result->read_send = rpc_np_read_send;
- result->read_recv = rpc_np_read_recv;
- result->trans_send = rpc_np_trans_send;
- result->trans_recv = rpc_np_trans_recv;
+ while (req->state < ASYNC_REQ_DONE) {
+ event_loop_once(ev);
+ }
- *presult = result;
- return NT_STATUS_OK;
+ status = rpc_transport_np_init_recv(req, mem_ctx, presult);
+ fail:
+ TALLOC_FREE(frame);
+ return status;
}
struct cli_state *rpc_pipe_np_smb_conn(struct rpc_pipe_client *p)
diff --git a/source3/rpc_client/rpc_transport_smbd.c b/source3/rpc_client/rpc_transport_smbd.c
new file mode 100644
index 0000000000..bf4aa65dae
--- /dev/null
+++ b/source3/rpc_client/rpc_transport_smbd.c
@@ -0,0 +1,694 @@
+/*
+ * Unix SMB/CIFS implementation.
+ * RPC client transport over named pipes to a child smbd
+ * Copyright (C) Volker Lendecke 2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "includes.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_RPC_CLI
+
+/**
+ * struct rpc_cli_smbd_conn represents a forked smbd. This structure should
+ * exist only once per process which does the rpc calls.
+ *
+ * RPC pipe handles can be attached to this smbd connection with
+ * rpc_pipe_open_local().
+ *
+ * For this to work right, we can not use rpc_transport_np directly, because
+ * the child smbd wants to write its DEBUG output somewhere. We redirect the
+ * child's output to rpc_cli_smbd_conn->stdout_fd. While the RPC calls are
+ * active, we have an event context available and attach a fd event to the
+ * stdout_df.
+ */
+
+struct rpc_cli_smbd_conn {
+ /**
+ * The smb connection to handle the named pipe traffic over
+ */
+ struct cli_state *cli;
+
+ /**
+ * Attached to stdout in the forked smbd, this is where smbd will
+ * print its DEBUG.
+ */
+ int stdout_fd;
+
+ /**
+ * Custom callback provided by the owner of the
+ * rpc_cli_smbd_conn. Here we send the smbd DEBUG output. Can be NULL.
+ */
+ struct {
+ void (*fn)(char *buf, size_t len, void *priv);
+ void *priv;
+ } stdout_callback ;
+};
+
+/**
+ * Event handler to be called whenever the forked smbd prints debugging
+ * output.
+ */
+
+static void rpc_cli_smbd_stdout_reader(struct event_context *ev,
+ struct fd_event *fde,
+ uint16_t flags, void *priv)
+{
+ struct rpc_cli_smbd_conn *conn = talloc_get_type_abort(
+ priv, struct rpc_cli_smbd_conn);
+ char buf[1024];
+ ssize_t nread;
+
+ if ((flags & EVENT_FD_READ) == 0) {
+ return;
+ }
+
+ nread = read(conn->stdout_fd, buf, sizeof(buf)-1);
+ if (nread < 0) {
+ DEBUG(0, ("Could not read from smbd stdout: %s\n",
+ strerror(errno)));
+ TALLOC_FREE(fde);
+ return;
+ }
+ if (nread == 0) {
+ DEBUG(0, ("EOF from smbd stdout\n"));
+ TALLOC_FREE(fde);
+ return;
+ }
+
+ if (conn->stdout_callback.fn != NULL) {
+ conn->stdout_callback.fn(buf, nread,
+ conn->stdout_callback.priv);
+ }
+}
+
+/**
+ * struct rpc_transport_smbd_state is the link from a struct rpc_pipe_client
+ * to the rpc_cli_smbd_conn. We use a named pipe transport as a subtransport.
+ */
+
+struct rpc_transport_smbd_state {
+ struct rpc_cli_smbd_conn *conn;
+ struct rpc_cli_transport *sub_transp;
+};
+
+static int rpc_cli_smbd_conn_destructor(struct rpc_cli_smbd_conn *conn)
+{
+ if (conn->cli != NULL) {
+ cli_shutdown(conn->cli);
+ conn->cli = NULL;
+ }
+ if (conn->stdout_fd != -1) {
+ close(conn->stdout_fd);
+ conn->stdout_fd = -1;
+ }
+ return 0;
+}
+
+/*
+ * Do the negprot/sesssetup/tcon to an anonymous ipc$ connection
+ */
+
+struct get_anon_ipc_state {
+ struct event_context *ev;
+ struct cli_state *cli;
+};
+
+static void get_anon_ipc_negprot_done(struct async_req *subreq);
+static void get_anon_ipc_sesssetup_done(struct async_req *subreq);
+static void get_anon_ipc_tcon_done(struct async_req *subreq);
+
+static struct async_req *get_anon_ipc_send(TALLOC_CTX *mem_ctx,
+ struct event_context *ev,
+ struct cli_state *cli)
+{
+ struct async_req *result, *subreq;
+ struct get_anon_ipc_state *state;
+
+ if (!async_req_setup(mem_ctx, &result, &state,
+ struct get_anon_ipc_state)) {
+ return NULL;
+ }
+
+ state->ev = ev;
+ state->cli = cli;
+
+ subreq = cli_negprot_send(state, ev, cli);
+ if (subreq == NULL) {
+ goto fail;
+ }
+ subreq->async.fn = get_anon_ipc_negprot_done;
+ subreq->async.priv = result;
+ return result;
+ fail:
+ TALLOC_FREE(result);
+ return NULL;
+}
+
+static void get_anon_ipc_negprot_done(struct async_req *subreq)
+{
+ struct async_req *req = talloc_get_type_abort(
+ subreq->async.priv, struct async_req);
+ struct get_anon_ipc_state *state = talloc_get_type_abort(
+ req->private_data, struct get_anon_ipc_state);
+ NTSTATUS status;
+
+ status = cli_negprot_recv(subreq);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ async_req_nterror(req, status);
+ return;
+ }
+
+ subreq = cli_session_setup_guest_send(state, state->ev, state->cli);
+ if (async_req_nomem(subreq, req)) {
+ return;
+ }
+ subreq->async.fn = get_anon_ipc_sesssetup_done;
+ subreq->async.priv = req;
+}
+
+static void get_anon_ipc_sesssetup_done(struct async_req *subreq)
+{
+ struct async_req *req = talloc_get_type_abort(
+ subreq->async.priv, struct async_req);
+ struct get_anon_ipc_state *state = talloc_get_type_abort(
+ req->private_data, struct get_anon_ipc_state);
+ NTSTATUS status;
+
+ status = cli_session_setup_guest_recv(subreq);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ async_req_nterror(req, status);
+ return;
+ }
+
+ subreq = cli_tcon_andx_send(state, state->ev, state->cli,
+ "IPC$", "IPC", NULL, 0);
+ if (async_req_nomem(subreq, req)) {
+ return;
+ }
+ subreq->async.fn = get_anon_ipc_tcon_done;
+ subreq->async.priv = req;
+}
+
+static void get_anon_ipc_tcon_done(struct async_req *subreq)
+{
+ struct async_req *req = talloc_get_type_abort(
+ subreq->async.priv, struct async_req);
+ NTSTATUS status;
+
+ status = cli_tcon_andx_recv(subreq);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ async_req_nterror(req, status);
+ return;
+ }
+ async_req_done(req);
+}
+
+static NTSTATUS get_anon_ipc_recv(struct async_req *req)
+{
+ return async_req_simple_recv_ntstatus(req);
+}
+
+struct rpc_cli_smbd_conn_init_state {
+ struct event_context *ev;
+ struct rpc_cli_smbd_conn *conn;
+};
+
+static void rpc_cli_smbd_conn_init_done(struct async_req *subreq);
+
+struct async_req *rpc_cli_smbd_conn_init_send(TALLOC_CTX *mem_ctx,
+ struct event_context *ev,
+ void (*stdout_callback)(char *buf,
+ size_t len,
+ void *priv),
+ void *priv)
+{
+ struct async_req *result, *subreq;
+ struct rpc_cli_smbd_conn_init_state *state;
+ int smb_sock[2];
+ int stdout_pipe[2];
+ NTSTATUS status;
+ pid_t pid;
+ int ret;
+
+ smb_sock[0] = smb_sock[1] = stdout_pipe[0] = stdout_pipe[1] = -1;
+
+ if (!async_req_setup(mem_ctx, &result, &state,
+ struct rpc_cli_smbd_conn_init_state)) {
+ return NULL;
+ }
+ state->ev = ev;
+
+ state->conn = talloc(state, struct rpc_cli_smbd_conn);
+ if (state->conn == NULL) {
+ goto nomem;
+ }
+
+ state->conn->cli = cli_initialise();
+ if (state->conn->cli == NULL) {
+ goto nomem;
+ }
+ state->conn->stdout_fd = -1;
+ state->conn->stdout_callback.fn = stdout_callback;
+ state->conn->stdout_callback.priv = priv;
+ talloc_set_destructor(state->conn, rpc_cli_smbd_conn_destructor);
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM, 0, smb_sock);
+ if (ret == -1) {
+ status = map_nt_error_from_unix(errno);
+ goto post_status;
+ }
+ ret = pipe(stdout_pipe);
+ if (ret == -1) {
+ status = map_nt_error_from_unix(errno);
+ goto post_status;
+ }
+
+ pid = sys_fork();
+ if (pid == -1) {
+ status = map_nt_error_from_unix(errno);
+ goto post_status;
+ }
+ if (pid == 0) {
+ char *smbd_cmd;
+
+ close(smb_sock[0]);
+ close(stdout_pipe[0]);
+ close(0);
+ if (dup(smb_sock[1]) == -1) {
+ exit(1);
+ }
+ close(smb_sock[1]);
+ close(1);
+ if (dup(stdout_pipe[1]) == -1) {
+ exit(1);
+ }
+ close(stdout_pipe[1]);
+
+ smbd_cmd = getenv("SMB_PATH");
+
+ if ((smbd_cmd == NULL)
+ && (asprintf(&smbd_cmd, "%s/smbd", get_dyn_SBINDIR())
+ == -1)) {
+ printf("no memory");
+ exit(1);
+ }
+ if (asprintf(&smbd_cmd, "%s -F -S", smbd_cmd) == -1) {
+ printf("no memory");
+ exit(1);
+ }
+
+ exit(system(smbd_cmd));
+ }
+
+ state->conn->cli->fd = smb_sock[0];
+ smb_sock[0] = -1;
+ close(smb_sock[1]);
+ smb_sock[1] = -1;
+
+ state->conn->stdout_fd = stdout_pipe[0];
+ stdout_pipe[0] = -1;
+ close(stdout_pipe[1]);
+ stdout_pipe[1] = -1;
+
+ subreq = get_anon_ipc_send(state, ev, state->conn->cli);
+ if (subreq == NULL) {
+ goto nomem;
+ }
+
+ if (event_add_fd(ev, subreq, state->conn->stdout_fd, EVENT_FD_READ,
+ rpc_cli_smbd_stdout_reader, state->conn) == NULL) {
+ goto nomem;
+ }
+
+ subreq->async.fn = rpc_cli_smbd_conn_init_done;
+ subreq->async.priv = result;
+ return result;
+
+ nomem:
+ status = NT_STATUS_NO_MEMORY;
+ post_status:
+ if (smb_sock[0] != -1) {
+ close(smb_sock[0]);
+ }
+ if (smb_sock[1] != -1) {
+ close(smb_sock[1]);
+ }
+ if (stdout_pipe[0] != -1) {
+ close(stdout_pipe[0]);
+ }
+ if (stdout_pipe[1] != -1) {
+ close(stdout_pipe[1]);
+ }
+ if (async_post_ntstatus(result, ev, status)) {
+ return result;
+ }
+ TALLOC_FREE(result);
+ return NULL;
+}
+
+static void rpc_cli_smbd_conn_init_done(struct async_req *subreq)
+{
+ struct async_req *req = talloc_get_type_abort(
+ subreq->async.priv, struct async_req);
+ NTSTATUS status;
+
+ status = get_anon_ipc_recv(subreq);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ async_req_nterror(req, status);
+ return;
+ }
+ async_req_done(req);
+}
+
+NTSTATUS rpc_cli_smbd_conn_init_recv(struct async_req *req,
+ TALLOC_CTX *mem_ctx,
+ struct rpc_cli_smbd_conn **pconn)
+{
+ struct rpc_cli_smbd_conn_init_state *state = talloc_get_type_abort(
+ req->private_data, struct rpc_cli_smbd_conn_init_state);
+ NTSTATUS status;
+
+ if (async_req_is_nterror(req, &status)) {
+ return status;
+ }
+ *pconn = talloc_move(mem_ctx, &state->conn);
+ return NT_STATUS_OK;
+}
+
+NTSTATUS rpc_cli_smbd_conn_init(TALLOC_CTX *mem_ctx,
+ struct rpc_cli_smbd_conn **pconn,
+ void (*stdout_callback)(char *buf,
+ size_t len,
+ void *priv),
+ void *priv)
+{
+ TALLOC_CTX *frame = talloc_stackframe();
+ struct event_context *ev;
+ struct async_req *req;
+ NTSTATUS status;
+
+ ev = event_context_init(frame);
+ if (ev == NULL) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ }
+
+ req = rpc_cli_smbd_conn_init_send(frame, ev, stdout_callback, priv);
+ if (req == NULL) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ }
+
+ while (req->state < ASYNC_REQ_DONE) {
+ event_loop_once(ev);
+ }
+
+ status = rpc_cli_smbd_conn_init_recv(req, mem_ctx, pconn);
+ fail:
+ TALLOC_FREE(frame);
+ return status;
+}
+
+struct rpc_smbd_write_state {
+ struct rpc_cli_transport *sub_transp;
+ ssize_t written;
+};
+
+static void rpc_smbd_write_done(struct async_req *subreq);
+
+static struct async_req *rpc_smbd_write_send(TALLOC_CTX *mem_ctx,
+ struct event_context *ev,
+ const uint8_t *data, size_t size,
+ void *priv)
+{
+ struct rpc_transport_smbd_state *transp = talloc_get_type_abort(
+ priv, struct rpc_transport_smbd_state);
+ struct async_req *result, *subreq;
+ struct rpc_smbd_write_state *state;
+
+ if (!async_req_setup(mem_ctx, &result, &state,
+ struct rpc_smbd_write_state)) {
+ return NULL;
+ }
+ state->sub_transp = transp->sub_transp;
+
+ subreq = transp->sub_transp->write_send(state, ev, data, size,
+ transp->sub_transp->priv);
+ if (subreq == NULL) {
+ goto fail;
+ }
+
+ if (event_add_fd(ev, subreq, transp->conn->stdout_fd, EVENT_FD_READ,
+ rpc_cli_smbd_stdout_reader, transp->conn) == NULL) {
+ goto fail;
+ }
+
+ subreq->async.fn = rpc_smbd_write_done;
+ subreq->async.priv = result;
+ return result;
+
+ fail:
+ TALLOC_FREE(result);
+ return NULL;
+}
+
+static void rpc_smbd_write_done(struct async_req *subreq)
+{
+ struct async_req *req = talloc_get_type_abort(
+ subreq->async.priv, struct async_req);
+ struct rpc_smbd_write_state *state = talloc_get_type_abort(
+ req->private_data, struct rpc_smbd_write_state);
+ NTSTATUS status;
+
+ status = state->sub_transp->write_recv(subreq, &state->written);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ async_req_nterror(req, status);
+ return;
+ }
+ async_req_done(req);
+}
+
+static NTSTATUS rpc_smbd_write_recv(struct async_req *req, ssize_t *pwritten)
+{
+ struct rpc_smbd_write_state *state = talloc_get_type_abort(
+ req->private_data, struct rpc_smbd_write_state);
+ NTSTATUS status;
+
+ if (async_req_is_nterror(req, &status)) {
+ return status;
+ }
+ *pwritten = state->written;
+ return NT_STATUS_OK;
+}
+
+struct rpc_smbd_read_state {
+ struct rpc_cli_transport *sub_transp;
+ ssize_t received;
+};
+
+static void rpc_smbd_read_done(struct async_req *subreq);
+
+static struct async_req *rpc_smbd_read_send(TALLOC_CTX *mem_ctx,
+ struct event_context *ev,
+ uint8_t *data, size_t size,
+ void *priv)
+{
+ struct rpc_transport_smbd_state *transp = talloc_get_type_abort(
+ priv, struct rpc_transport_smbd_state);
+ struct async_req *result, *subreq;
+ struct rpc_smbd_read_state *state;
+
+ if (!async_req_setup(mem_ctx, &result, &state,
+ struct rpc_smbd_read_state)) {
+ return NULL;
+ }
+ state->sub_transp = transp->sub_transp;
+
+ subreq = transp->sub_transp->read_send(state, ev, data, size,
+ transp->sub_transp->priv);
+ if (subreq == NULL) {
+ goto fail;
+ }
+
+ if (event_add_fd(ev, subreq, transp->conn->stdout_fd, EVENT_FD_READ,
+ rpc_cli_smbd_stdout_reader, transp->conn) == NULL) {
+ goto fail;
+ }
+
+ subreq->async.fn = rpc_smbd_read_done;
+ subreq->async.priv = result;
+ return result;
+
+ fail:
+ TALLOC_FREE(result);
+ return NULL;
+}
+
+static void rpc_smbd_read_done(struct async_req *subreq)
+{
+ struct async_req *req = talloc_get_type_abort(
+ subreq->async.priv, struct async_req);
+ struct rpc_smbd_read_state *state = talloc_get_type_abort(
+ req->private_data, struct rpc_smbd_read_state);
+ NTSTATUS status;
+
+ status = state->sub_transp->read_recv(subreq, &state->received);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ async_req_nterror(req, status);
+ return;
+ }
+ async_req_done(req);
+}
+
+static NTSTATUS rpc_smbd_read_recv(struct async_req *req, ssize_t *preceived)
+{
+ struct rpc_smbd_read_state *state = talloc_get_type_abort(
+ req->private_data, struct rpc_smbd_read_state);
+ NTSTATUS status;
+
+ if (async_req_is_nterror(req, &status)) {
+ return status;
+ }
+ *preceived = state->received;
+ return NT_STATUS_OK;
+}
+
+struct rpc_transport_smbd_init_state {
+ struct rpc_cli_transport *transport;
+ struct rpc_transport_smbd_state *transport_smbd;
+};
+
+static void rpc_transport_smbd_init_done(struct async_req *subreq);
+
+struct async_req *rpc_transport_smbd_init_send(TALLOC_CTX *mem_ctx,
+ struct event_context *ev,
+ struct rpc_cli_smbd_conn *conn,
+ const struct ndr_syntax_id *abstract_syntax)
+{
+ struct async_req *result, *subreq;
+ struct rpc_transport_smbd_init_state *state;
+
+ if (!async_req_setup(mem_ctx, &result, &state,
+ struct rpc_transport_smbd_init_state)) {
+ return NULL;
+ }
+
+ state->transport = talloc(state, struct rpc_cli_transport);
+ if (state->transport == NULL) {
+ goto fail;
+ }
+ state->transport_smbd = talloc(state->transport,
+ struct rpc_transport_smbd_state);
+ if (state->transport_smbd == NULL) {
+ goto fail;
+ }
+ state->transport_smbd->conn = conn;
+ state->transport->priv = state->transport_smbd;
+
+ subreq = rpc_transport_np_init_send(state, ev, conn->cli,
+ abstract_syntax);
+ if (subreq == NULL) {
+ goto fail;
+ }
+ subreq->async.fn = rpc_transport_smbd_init_done;
+ subreq->async.priv = result;
+ return result;
+
+ fail:
+ TALLOC_FREE(result);
+ return NULL;
+}
+
+static void rpc_transport_smbd_init_done(struct async_req *subreq)
+{
+ struct async_req *req = talloc_get_type_abort(
+ subreq->async.priv, struct async_req);
+ struct rpc_transport_smbd_init_state *state = talloc_get_type_abort(
+ req->private_data, struct rpc_transport_smbd_init_state);
+ NTSTATUS status;
+
+ status = rpc_transport_np_init_recv(
+ subreq, state->transport_smbd,
+ &state->transport_smbd->sub_transp);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ async_req_nterror(req, status);
+ return;
+ }
+ async_req_done(req);
+}
+
+NTSTATUS rpc_transport_smbd_init_recv(struct async_req *req,
+ TALLOC_CTX *mem_ctx,
+ struct rpc_cli_transport **presult)
+{
+ struct rpc_transport_smbd_init_state *state = talloc_get_type_abort(
+ req->private_data, struct rpc_transport_smbd_init_state);
+ NTSTATUS status;
+
+ if (async_req_is_nterror(req, &status)) {
+ return status;
+ }
+
+ state->transport->write_send = rpc_smbd_write_send;
+ state->transport->write_recv = rpc_smbd_write_recv;
+ state->transport->read_send = rpc_smbd_read_send;
+ state->transport->read_recv = rpc_smbd_read_recv;
+ state->transport->trans_send = NULL;
+ state->transport->trans_recv = NULL;
+
+ *presult = talloc_move(mem_ctx, &state->transport);
+ return NT_STATUS_OK;
+}
+
+NTSTATUS rpc_transport_smbd_init(TALLOC_CTX *mem_ctx,
+ struct rpc_cli_smbd_conn *conn,
+ const struct ndr_syntax_id *abstract_syntax,
+ struct rpc_cli_transport **presult)
+{
+ TALLOC_CTX *frame = talloc_stackframe();
+ struct event_context *ev;
+ struct async_req *req;
+ NTSTATUS status;
+
+ ev = event_context_init(frame);
+ if (ev == NULL) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ }
+
+ req = rpc_transport_smbd_init_send(frame, ev, conn, abstract_syntax);
+ if (req == NULL) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ }
+
+ while (req->state < ASYNC_REQ_DONE) {
+ event_loop_once(ev);
+ }
+
+ status = rpc_transport_smbd_init_recv(req, mem_ctx, presult);
+ fail:
+ TALLOC_FREE(frame);
+ return status;
+}