2 * Unix SMB/CIFS implementation.
3 * RPC client transport over named pipes
4 * Copyright (C) Volker Lendecke 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #define DBGC_CLASS DBGC_RPC_CLI
25 struct rpc_transport_np_state
{
26 struct cli_state
*cli
;
27 const char *pipe_name
;
31 static bool rpc_np_is_connected(void *priv
)
33 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
34 priv
, struct rpc_transport_np_state
);
37 if (np_transport
->cli
== NULL
) {
41 ok
= cli_state_is_connected(np_transport
->cli
);
43 np_transport
->cli
= NULL
;
50 static unsigned int rpc_np_set_timeout(void *priv
, unsigned int timeout
)
52 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
53 priv
, struct rpc_transport_np_state
);
56 if (np_transport
->cli
== NULL
) {
60 ok
= rpc_np_is_connected(np_transport
);
65 return cli_set_timeout(np_transport
->cli
, timeout
);
68 static int rpc_transport_np_state_destructor(struct rpc_transport_np_state
*s
)
70 if (!rpc_np_is_connected(s
)) {
71 DEBUG(10, ("socket was closed, no need to send close request.\n"));
75 /* TODO: do not use a sync call with a destructor!!! */
76 if (!NT_STATUS_IS_OK(cli_close(s
->cli
, s
->fnum
))) {
77 DEBUG(1, ("rpc_transport_np_state_destructor: cli_close "
78 "failed on pipe %s. Error was %s\n", s
->pipe_name
,
81 DEBUG(10, ("rpc_pipe_destructor: closed %s\n", s
->pipe_name
));
83 * We can't do much on failure
88 struct rpc_np_write_state
{
89 struct rpc_transport_np_state
*np_transport
;
94 static void rpc_np_write_done(struct tevent_req
*subreq
);
96 static struct tevent_req
*rpc_np_write_send(TALLOC_CTX
*mem_ctx
,
97 struct event_context
*ev
,
98 const uint8_t *data
, size_t size
,
101 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
102 priv
, struct rpc_transport_np_state
);
103 struct tevent_req
*req
, *subreq
;
104 struct rpc_np_write_state
*state
;
107 req
= tevent_req_create(mem_ctx
, &state
, struct rpc_np_write_state
);
112 ok
= rpc_np_is_connected(np_transport
);
114 tevent_req_nterror(req
, NT_STATUS_CONNECTION_INVALID
);
115 return tevent_req_post(req
, ev
);
118 state
->np_transport
= np_transport
;
122 subreq
= cli_write_andx_send(mem_ctx
, ev
, np_transport
->cli
,
124 8, /* 8 means message mode. */
126 if (tevent_req_nomem(subreq
, req
)) {
127 return tevent_req_post(req
, ev
);
129 tevent_req_set_callback(subreq
, rpc_np_write_done
, req
);
133 static void rpc_np_write_done(struct tevent_req
*subreq
)
135 struct tevent_req
*req
= tevent_req_callback_data(
136 subreq
, struct tevent_req
);
137 struct rpc_np_write_state
*state
= tevent_req_data(
138 req
, struct rpc_np_write_state
);
141 status
= cli_write_andx_recv(subreq
, &state
->written
);
143 if (!NT_STATUS_IS_OK(status
)) {
144 state
->np_transport
->cli
= NULL
;
145 tevent_req_nterror(req
, status
);
148 tevent_req_done(req
);
151 static NTSTATUS
rpc_np_write_recv(struct tevent_req
*req
, ssize_t
*pwritten
)
153 struct rpc_np_write_state
*state
= tevent_req_data(
154 req
, struct rpc_np_write_state
);
157 if (tevent_req_is_nterror(req
, &status
)) {
160 *pwritten
= state
->written
;
164 struct rpc_np_read_state
{
165 struct rpc_transport_np_state
*np_transport
;
171 static void rpc_np_read_done(struct tevent_req
*subreq
);
173 static struct tevent_req
*rpc_np_read_send(TALLOC_CTX
*mem_ctx
,
174 struct event_context
*ev
,
175 uint8_t *data
, size_t size
,
178 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
179 priv
, struct rpc_transport_np_state
);
180 struct tevent_req
*req
, *subreq
;
181 struct rpc_np_read_state
*state
;
184 req
= tevent_req_create(mem_ctx
, &state
, struct rpc_np_read_state
);
189 ok
= rpc_np_is_connected(np_transport
);
191 tevent_req_nterror(req
, NT_STATUS_CONNECTION_INVALID
);
192 return tevent_req_post(req
, ev
);
195 state
->np_transport
= np_transport
;
199 subreq
= cli_read_andx_send(mem_ctx
, ev
, np_transport
->cli
,
200 np_transport
->fnum
, 0, size
);
201 if (subreq
== NULL
) {
204 tevent_req_set_callback(subreq
, rpc_np_read_done
, req
);
211 static void rpc_np_read_done(struct tevent_req
*subreq
)
213 struct tevent_req
*req
= tevent_req_callback_data(
214 subreq
, struct tevent_req
);
215 struct rpc_np_read_state
*state
= tevent_req_data(
216 req
, struct rpc_np_read_state
);
220 /* We must free subreq in this function as there is
221 a timer event attached to it. */
223 status
= cli_read_andx_recv(subreq
, &state
->received
, &rcvbuf
);
225 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
228 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
229 status
= NT_STATUS_OK
;
231 if (!NT_STATUS_IS_OK(status
)) {
233 state
->np_transport
->cli
= NULL
;
234 tevent_req_nterror(req
, status
);
238 if (state
->received
> state
->size
) {
240 state
->np_transport
->cli
= NULL
;
241 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
245 if (state
->received
== 0) {
247 state
->np_transport
->cli
= NULL
;
248 tevent_req_nterror(req
, NT_STATUS_PIPE_BROKEN
);
252 memcpy(state
->data
, rcvbuf
, state
->received
);
254 tevent_req_done(req
);
257 static NTSTATUS
rpc_np_read_recv(struct tevent_req
*req
, ssize_t
*preceived
)
259 struct rpc_np_read_state
*state
= tevent_req_data(
260 req
, struct rpc_np_read_state
);
263 if (tevent_req_is_nterror(req
, &status
)) {
266 *preceived
= state
->received
;
270 struct rpc_np_trans_state
{
271 struct rpc_transport_np_state
*np_transport
;
273 uint32_t max_rdata_len
;
278 static void rpc_np_trans_done(struct tevent_req
*subreq
);
280 static struct tevent_req
*rpc_np_trans_send(TALLOC_CTX
*mem_ctx
,
281 struct event_context
*ev
,
282 uint8_t *data
, size_t data_len
,
283 uint32_t max_rdata_len
,
286 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
287 priv
, struct rpc_transport_np_state
);
288 struct tevent_req
*req
, *subreq
;
289 struct rpc_np_trans_state
*state
;
292 req
= tevent_req_create(mem_ctx
, &state
, struct rpc_np_trans_state
);
297 ok
= rpc_np_is_connected(np_transport
);
299 tevent_req_nterror(req
, NT_STATUS_CONNECTION_INVALID
);
300 return tevent_req_post(req
, ev
);
303 state
->np_transport
= np_transport
;
304 state
->max_rdata_len
= max_rdata_len
;
306 SSVAL(state
->setup
+0, 0, TRANSACT_DCERPCCMD
);
307 SSVAL(state
->setup
+1, 0, np_transport
->fnum
);
309 subreq
= cli_trans_send(
310 state
, ev
, np_transport
->cli
, SMBtrans
,
311 "\\PIPE\\", 0, 0, 0, state
->setup
, 2, 0,
312 NULL
, 0, 0, data
, data_len
, max_rdata_len
);
313 if (subreq
== NULL
) {
316 tevent_req_set_callback(subreq
, rpc_np_trans_done
, req
);
324 static void rpc_np_trans_done(struct tevent_req
*subreq
)
326 struct tevent_req
*req
= tevent_req_callback_data(
327 subreq
, struct tevent_req
);
328 struct rpc_np_trans_state
*state
= tevent_req_data(
329 req
, struct rpc_np_trans_state
);
332 status
= cli_trans_recv(subreq
, state
, NULL
, NULL
, 0, NULL
,
334 &state
->rdata
, 0, &state
->rdata_len
);
336 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
337 status
= NT_STATUS_OK
;
339 if (!NT_STATUS_IS_OK(status
)) {
340 state
->np_transport
->cli
= NULL
;
341 tevent_req_nterror(req
, status
);
345 if (state
->rdata_len
> state
->max_rdata_len
) {
346 state
->np_transport
->cli
= NULL
;
347 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
351 if (state
->rdata_len
== 0) {
352 state
->np_transport
->cli
= NULL
;
353 tevent_req_nterror(req
, NT_STATUS_PIPE_BROKEN
);
357 tevent_req_done(req
);
360 static NTSTATUS
rpc_np_trans_recv(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
,
361 uint8_t **prdata
, uint32_t *prdata_len
)
363 struct rpc_np_trans_state
*state
= tevent_req_data(
364 req
, struct rpc_np_trans_state
);
367 if (tevent_req_is_nterror(req
, &status
)) {
370 *prdata
= talloc_move(mem_ctx
, &state
->rdata
);
371 *prdata_len
= state
->rdata_len
;
375 struct rpc_transport_np_init_state
{
376 struct rpc_cli_transport
*transport
;
377 struct rpc_transport_np_state
*transport_np
;
380 static void rpc_transport_np_init_pipe_open(struct tevent_req
*subreq
);
382 struct tevent_req
*rpc_transport_np_init_send(TALLOC_CTX
*mem_ctx
,
383 struct event_context
*ev
,
384 struct cli_state
*cli
,
385 const struct ndr_syntax_id
*abstract_syntax
)
387 struct tevent_req
*req
, *subreq
;
388 struct rpc_transport_np_init_state
*state
;
391 req
= tevent_req_create(mem_ctx
, &state
,
392 struct rpc_transport_np_init_state
);
397 ok
= cli_state_is_connected(cli
);
399 tevent_req_nterror(req
, NT_STATUS_CONNECTION_INVALID
);
400 return tevent_req_post(req
, ev
);
403 state
->transport
= talloc(state
, struct rpc_cli_transport
);
404 if (tevent_req_nomem(state
->transport
, req
)) {
405 return tevent_req_post(req
, ev
);
407 state
->transport_np
= talloc(state
->transport
,
408 struct rpc_transport_np_state
);
409 if (tevent_req_nomem(state
->transport_np
, req
)) {
410 return tevent_req_post(req
, ev
);
412 state
->transport
->priv
= state
->transport_np
;
414 state
->transport_np
->pipe_name
= get_pipe_name_from_syntax(
415 state
->transport_np
, abstract_syntax
);
416 state
->transport_np
->cli
= cli
;
418 subreq
= cli_ntcreate_send(
419 state
, ev
, cli
, state
->transport_np
->pipe_name
, 0,
420 DESIRED_ACCESS_PIPE
, 0, FILE_SHARE_READ
|FILE_SHARE_WRITE
,
422 if (tevent_req_nomem(subreq
, req
)) {
423 return tevent_req_post(req
, ev
);
425 tevent_req_set_callback(subreq
, rpc_transport_np_init_pipe_open
,
430 static void rpc_transport_np_init_pipe_open(struct tevent_req
*subreq
)
432 struct tevent_req
*req
= tevent_req_callback_data(
433 subreq
, struct tevent_req
);
434 struct rpc_transport_np_init_state
*state
= tevent_req_data(
435 req
, struct rpc_transport_np_init_state
);
438 status
= cli_ntcreate_recv(subreq
, &state
->transport_np
->fnum
);
440 if (!NT_STATUS_IS_OK(status
)) {
441 tevent_req_nterror(req
, status
);
445 talloc_set_destructor(state
->transport_np
,
446 rpc_transport_np_state_destructor
);
447 tevent_req_done(req
);
450 NTSTATUS
rpc_transport_np_init_recv(struct tevent_req
*req
,
452 struct rpc_cli_transport
**presult
)
454 struct rpc_transport_np_init_state
*state
= tevent_req_data(
455 req
, struct rpc_transport_np_init_state
);
458 if (tevent_req_is_nterror(req
, &status
)) {
462 state
->transport
->write_send
= rpc_np_write_send
;
463 state
->transport
->write_recv
= rpc_np_write_recv
;
464 state
->transport
->read_send
= rpc_np_read_send
;
465 state
->transport
->read_recv
= rpc_np_read_recv
;
466 state
->transport
->trans_send
= rpc_np_trans_send
;
467 state
->transport
->trans_recv
= rpc_np_trans_recv
;
468 state
->transport
->is_connected
= rpc_np_is_connected
;
469 state
->transport
->set_timeout
= rpc_np_set_timeout
;
471 *presult
= talloc_move(mem_ctx
, &state
->transport
);
475 NTSTATUS
rpc_transport_np_init(TALLOC_CTX
*mem_ctx
, struct cli_state
*cli
,
476 const struct ndr_syntax_id
*abstract_syntax
,
477 struct rpc_cli_transport
**presult
)
479 TALLOC_CTX
*frame
= talloc_stackframe();
480 struct event_context
*ev
;
481 struct tevent_req
*req
;
482 NTSTATUS status
= NT_STATUS_OK
;
484 ev
= event_context_init(frame
);
486 status
= NT_STATUS_NO_MEMORY
;
490 req
= rpc_transport_np_init_send(frame
, ev
, cli
, abstract_syntax
);
492 status
= NT_STATUS_NO_MEMORY
;
496 if (!tevent_req_poll(req
, ev
)) {
497 status
= map_nt_error_from_unix(errno
);
501 status
= rpc_transport_np_init_recv(req
, mem_ctx
, presult
);
507 struct cli_state
*rpc_pipe_np_smb_conn(struct rpc_pipe_client
*p
)
509 struct rpc_transport_np_state
*state
= talloc_get_type(
510 p
->transport
->priv
, struct rpc_transport_np_state
);