2 * Unix SMB/CIFS implementation.
3 * RPC client transport over named pipes
4 * Copyright (C) Volker Lendecke 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #define DBGC_CLASS DBGC_RPC_CLI
25 struct rpc_transport_np_state
{
26 struct cli_state
*cli
;
27 const char *pipe_name
;
31 static bool rpc_np_is_connected(void *priv
)
33 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
34 priv
, struct rpc_transport_np_state
);
37 if (np_transport
->cli
== NULL
) {
41 ok
= cli_state_is_connected(np_transport
->cli
);
43 np_transport
->cli
= NULL
;
50 static unsigned int rpc_np_set_timeout(void *priv
, unsigned int timeout
)
52 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
53 priv
, struct rpc_transport_np_state
);
56 if (np_transport
->cli
== NULL
) {
60 ok
= rpc_np_is_connected(np_transport
);
65 return cli_set_timeout(np_transport
->cli
, timeout
);
68 static int rpc_transport_np_state_destructor(struct rpc_transport_np_state
*s
)
70 if (!rpc_np_is_connected(s
)) {
71 DEBUG(10, ("socket was closed, no need to send close request.\n"));
75 /* TODO: do not use a sync call with a destructor!!! */
76 if (!NT_STATUS_IS_OK(cli_close(s
->cli
, s
->fnum
))) {
77 DEBUG(1, ("rpc_transport_np_state_destructor: cli_close "
78 "failed on pipe %s. Error was %s\n", s
->pipe_name
,
81 DEBUG(10, ("rpc_pipe_destructor: closed %s\n", s
->pipe_name
));
83 * We can't do much on failure
88 struct rpc_np_write_state
{
89 struct rpc_transport_np_state
*np_transport
;
94 static void rpc_np_write_done(struct tevent_req
*subreq
);
96 static struct tevent_req
*rpc_np_write_send(TALLOC_CTX
*mem_ctx
,
97 struct event_context
*ev
,
98 const uint8_t *data
, size_t size
,
101 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
102 priv
, struct rpc_transport_np_state
);
103 struct tevent_req
*req
, *subreq
;
104 struct rpc_np_write_state
*state
;
107 req
= tevent_req_create(mem_ctx
, &state
, struct rpc_np_write_state
);
112 ok
= rpc_np_is_connected(np_transport
);
114 tevent_req_nterror(req
, NT_STATUS_CONNECTION_INVALID
);
115 return tevent_req_post(req
, ev
);
118 state
->np_transport
= np_transport
;
122 subreq
= cli_write_andx_send(mem_ctx
, ev
, np_transport
->cli
,
124 8, /* 8 means message mode. */
126 if (tevent_req_nomem(subreq
, req
)) {
127 return tevent_req_post(req
, ev
);
129 tevent_req_set_callback(subreq
, rpc_np_write_done
, req
);
133 static void rpc_np_write_done(struct tevent_req
*subreq
)
135 struct tevent_req
*req
= tevent_req_callback_data(
136 subreq
, struct tevent_req
);
137 struct rpc_np_write_state
*state
= tevent_req_data(
138 req
, struct rpc_np_write_state
);
141 status
= cli_write_andx_recv(subreq
, &state
->written
);
143 if (!NT_STATUS_IS_OK(status
)) {
144 state
->np_transport
->cli
= NULL
;
145 tevent_req_nterror(req
, status
);
148 tevent_req_done(req
);
151 static NTSTATUS
rpc_np_write_recv(struct tevent_req
*req
, ssize_t
*pwritten
)
153 struct rpc_np_write_state
*state
= tevent_req_data(
154 req
, struct rpc_np_write_state
);
157 if (tevent_req_is_nterror(req
, &status
)) {
160 *pwritten
= state
->written
;
164 struct rpc_np_read_state
{
165 struct rpc_transport_np_state
*np_transport
;
171 static void rpc_np_read_done(struct tevent_req
*subreq
);
173 static struct tevent_req
*rpc_np_read_send(TALLOC_CTX
*mem_ctx
,
174 struct event_context
*ev
,
175 uint8_t *data
, size_t size
,
178 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
179 priv
, struct rpc_transport_np_state
);
180 struct tevent_req
*req
, *subreq
;
181 struct rpc_np_read_state
*state
;
184 req
= tevent_req_create(mem_ctx
, &state
, struct rpc_np_read_state
);
189 ok
= rpc_np_is_connected(np_transport
);
191 tevent_req_nterror(req
, NT_STATUS_CONNECTION_INVALID
);
192 return tevent_req_post(req
, ev
);
195 state
->np_transport
= np_transport
;
199 subreq
= cli_read_andx_send(mem_ctx
, ev
, np_transport
->cli
,
200 np_transport
->fnum
, 0, size
);
201 if (subreq
== NULL
) {
204 tevent_req_set_callback(subreq
, rpc_np_read_done
, req
);
211 static void rpc_np_read_done(struct tevent_req
*subreq
)
213 struct tevent_req
*req
= tevent_req_callback_data(
214 subreq
, struct tevent_req
);
215 struct rpc_np_read_state
*state
= tevent_req_data(
216 req
, struct rpc_np_read_state
);
220 /* We must free subreq in this function as there is
221 a timer event attached to it. */
223 status
= cli_read_andx_recv(subreq
, &state
->received
, &rcvbuf
);
225 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
228 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
229 status
= NT_STATUS_OK
;
231 if (!NT_STATUS_IS_OK(status
)) {
233 state
->np_transport
->cli
= NULL
;
234 tevent_req_nterror(req
, status
);
238 if (state
->received
> state
->size
) {
240 state
->np_transport
->cli
= NULL
;
241 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
245 if (state
->received
== 0) {
247 state
->np_transport
->cli
= NULL
;
248 tevent_req_nterror(req
, NT_STATUS_PIPE_BROKEN
);
252 memcpy(state
->data
, rcvbuf
, state
->received
);
254 tevent_req_done(req
);
257 static NTSTATUS
rpc_np_read_recv(struct tevent_req
*req
, ssize_t
*preceived
)
259 struct rpc_np_read_state
*state
= tevent_req_data(
260 req
, struct rpc_np_read_state
);
263 if (tevent_req_is_nterror(req
, &status
)) {
266 *preceived
= state
->received
;
270 struct rpc_np_trans_state
{
271 struct rpc_transport_np_state
*np_transport
;
273 uint32_t max_rdata_len
;
278 static void rpc_np_trans_done(struct tevent_req
*subreq
);
280 static struct tevent_req
*rpc_np_trans_send(TALLOC_CTX
*mem_ctx
,
281 struct event_context
*ev
,
282 uint8_t *data
, size_t data_len
,
283 uint32_t max_rdata_len
,
286 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
287 priv
, struct rpc_transport_np_state
);
288 struct tevent_req
*req
, *subreq
;
289 struct rpc_np_trans_state
*state
;
292 req
= tevent_req_create(mem_ctx
, &state
, struct rpc_np_trans_state
);
297 ok
= rpc_np_is_connected(np_transport
);
299 tevent_req_nterror(req
, NT_STATUS_CONNECTION_INVALID
);
300 return tevent_req_post(req
, ev
);
303 state
->np_transport
= np_transport
;
304 state
->max_rdata_len
= max_rdata_len
;
306 SSVAL(state
->setup
+0, 0, TRANSACT_DCERPCCMD
);
307 SSVAL(state
->setup
+1, 0, np_transport
->fnum
);
309 subreq
= cli_trans_send(
310 state
, ev
, np_transport
->cli
, SMBtrans
,
311 "\\PIPE\\", 0, 0, 0, state
->setup
, 2, 0,
312 NULL
, 0, 0, data
, data_len
, max_rdata_len
);
313 if (subreq
== NULL
) {
316 tevent_req_set_callback(subreq
, rpc_np_trans_done
, req
);
324 static void rpc_np_trans_done(struct tevent_req
*subreq
)
326 struct tevent_req
*req
= tevent_req_callback_data(
327 subreq
, struct tevent_req
);
328 struct rpc_np_trans_state
*state
= tevent_req_data(
329 req
, struct rpc_np_trans_state
);
332 status
= cli_trans_recv(subreq
, state
, NULL
, 0, NULL
, NULL
, 0, NULL
,
333 &state
->rdata
, 0, &state
->rdata_len
);
335 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
336 status
= NT_STATUS_OK
;
338 if (!NT_STATUS_IS_OK(status
)) {
339 state
->np_transport
->cli
= NULL
;
340 tevent_req_nterror(req
, status
);
344 if (state
->rdata_len
> state
->max_rdata_len
) {
345 state
->np_transport
->cli
= NULL
;
346 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
350 if (state
->rdata_len
== 0) {
351 state
->np_transport
->cli
= NULL
;
352 tevent_req_nterror(req
, NT_STATUS_PIPE_BROKEN
);
356 tevent_req_done(req
);
359 static NTSTATUS
rpc_np_trans_recv(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
,
360 uint8_t **prdata
, uint32_t *prdata_len
)
362 struct rpc_np_trans_state
*state
= tevent_req_data(
363 req
, struct rpc_np_trans_state
);
366 if (tevent_req_is_nterror(req
, &status
)) {
369 *prdata
= talloc_move(mem_ctx
, &state
->rdata
);
370 *prdata_len
= state
->rdata_len
;
374 struct rpc_transport_np_init_state
{
375 struct rpc_cli_transport
*transport
;
376 struct rpc_transport_np_state
*transport_np
;
379 static void rpc_transport_np_init_pipe_open(struct tevent_req
*subreq
);
381 struct tevent_req
*rpc_transport_np_init_send(TALLOC_CTX
*mem_ctx
,
382 struct event_context
*ev
,
383 struct cli_state
*cli
,
384 const struct ndr_syntax_id
*abstract_syntax
)
386 struct tevent_req
*req
, *subreq
;
387 struct rpc_transport_np_init_state
*state
;
390 req
= tevent_req_create(mem_ctx
, &state
,
391 struct rpc_transport_np_init_state
);
396 ok
= cli_state_is_connected(cli
);
398 tevent_req_nterror(req
, NT_STATUS_CONNECTION_INVALID
);
399 return tevent_req_post(req
, ev
);
402 state
->transport
= talloc(state
, struct rpc_cli_transport
);
403 if (tevent_req_nomem(state
->transport
, req
)) {
404 return tevent_req_post(req
, ev
);
406 state
->transport_np
= talloc(state
->transport
,
407 struct rpc_transport_np_state
);
408 if (tevent_req_nomem(state
->transport_np
, req
)) {
409 return tevent_req_post(req
, ev
);
411 state
->transport
->priv
= state
->transport_np
;
413 state
->transport_np
->pipe_name
= get_pipe_name_from_syntax(
414 state
->transport_np
, abstract_syntax
);
415 state
->transport_np
->cli
= cli
;
417 subreq
= cli_ntcreate_send(
418 state
, ev
, cli
, state
->transport_np
->pipe_name
, 0,
419 DESIRED_ACCESS_PIPE
, 0, FILE_SHARE_READ
|FILE_SHARE_WRITE
,
421 if (tevent_req_nomem(subreq
, req
)) {
422 return tevent_req_post(req
, ev
);
424 tevent_req_set_callback(subreq
, rpc_transport_np_init_pipe_open
,
429 static void rpc_transport_np_init_pipe_open(struct tevent_req
*subreq
)
431 struct tevent_req
*req
= tevent_req_callback_data(
432 subreq
, struct tevent_req
);
433 struct rpc_transport_np_init_state
*state
= tevent_req_data(
434 req
, struct rpc_transport_np_init_state
);
437 status
= cli_ntcreate_recv(subreq
, &state
->transport_np
->fnum
);
439 if (!NT_STATUS_IS_OK(status
)) {
440 tevent_req_nterror(req
, status
);
444 talloc_set_destructor(state
->transport_np
,
445 rpc_transport_np_state_destructor
);
446 tevent_req_done(req
);
449 NTSTATUS
rpc_transport_np_init_recv(struct tevent_req
*req
,
451 struct rpc_cli_transport
**presult
)
453 struct rpc_transport_np_init_state
*state
= tevent_req_data(
454 req
, struct rpc_transport_np_init_state
);
457 if (tevent_req_is_nterror(req
, &status
)) {
461 state
->transport
->write_send
= rpc_np_write_send
;
462 state
->transport
->write_recv
= rpc_np_write_recv
;
463 state
->transport
->read_send
= rpc_np_read_send
;
464 state
->transport
->read_recv
= rpc_np_read_recv
;
465 state
->transport
->trans_send
= rpc_np_trans_send
;
466 state
->transport
->trans_recv
= rpc_np_trans_recv
;
467 state
->transport
->is_connected
= rpc_np_is_connected
;
468 state
->transport
->set_timeout
= rpc_np_set_timeout
;
470 *presult
= talloc_move(mem_ctx
, &state
->transport
);
474 NTSTATUS
rpc_transport_np_init(TALLOC_CTX
*mem_ctx
, struct cli_state
*cli
,
475 const struct ndr_syntax_id
*abstract_syntax
,
476 struct rpc_cli_transport
**presult
)
478 TALLOC_CTX
*frame
= talloc_stackframe();
479 struct event_context
*ev
;
480 struct tevent_req
*req
;
481 NTSTATUS status
= NT_STATUS_OK
;
483 ev
= event_context_init(frame
);
485 status
= NT_STATUS_NO_MEMORY
;
489 req
= rpc_transport_np_init_send(frame
, ev
, cli
, abstract_syntax
);
491 status
= NT_STATUS_NO_MEMORY
;
495 if (!tevent_req_poll(req
, ev
)) {
496 status
= map_nt_error_from_unix(errno
);
500 status
= rpc_transport_np_init_recv(req
, mem_ctx
, presult
);
506 struct cli_state
*rpc_pipe_np_smb_conn(struct rpc_pipe_client
*p
)
508 struct rpc_transport_np_state
*state
= talloc_get_type(
509 p
->transport
->priv
, struct rpc_transport_np_state
);