2 * Unix SMB/CIFS implementation.
3 * RPC client transport over named pipes
4 * Copyright (C) Volker Lendecke 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #define DBGC_CLASS DBGC_RPC_CLI
25 struct rpc_transport_np_state
{
26 struct cli_state
*cli
;
27 const char *pipe_name
;
31 static int rpc_transport_np_state_destructor(struct rpc_transport_np_state
*s
)
33 if (s
->cli
->fd
== -1) {
34 DEBUG(10, ("socket was closed, no need to send close request.\n"));
38 if (!NT_STATUS_IS_OK(cli_close(s
->cli
, s
->fnum
))) {
39 DEBUG(1, ("rpc_transport_np_state_destructor: cli_close "
40 "failed on pipe %s. Error was %s\n", s
->pipe_name
,
43 DEBUG(10, ("rpc_pipe_destructor: closed %s\n", s
->pipe_name
));
45 * We can't do much on failure
50 struct rpc_np_write_state
{
55 static void rpc_np_write_done(struct tevent_req
*subreq
);
57 static struct tevent_req
*rpc_np_write_send(TALLOC_CTX
*mem_ctx
,
58 struct event_context
*ev
,
59 const uint8_t *data
, size_t size
,
62 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
63 priv
, struct rpc_transport_np_state
);
64 struct tevent_req
*req
, *subreq
;
65 struct rpc_np_write_state
*state
;
67 req
= tevent_req_create(mem_ctx
, &state
, struct rpc_np_write_state
);
73 subreq
= cli_write_andx_send(mem_ctx
, ev
, np_transport
->cli
,
75 8, /* 8 means message mode. */
77 if (tevent_req_nomem(subreq
, req
)) {
78 return tevent_req_post(req
, ev
);
80 tevent_req_set_callback(subreq
, rpc_np_write_done
, req
);
84 static void rpc_np_write_done(struct tevent_req
*subreq
)
86 struct tevent_req
*req
= tevent_req_callback_data(
87 subreq
, struct tevent_req
);
88 struct rpc_np_write_state
*state
= tevent_req_data(
89 req
, struct rpc_np_write_state
);
92 status
= cli_write_andx_recv(subreq
, &state
->written
);
94 if (!NT_STATUS_IS_OK(status
)) {
95 tevent_req_nterror(req
, status
);
101 static NTSTATUS
rpc_np_write_recv(struct tevent_req
*req
, ssize_t
*pwritten
)
103 struct rpc_np_write_state
*state
= tevent_req_data(
104 req
, struct rpc_np_write_state
);
107 if (tevent_req_is_nterror(req
, &status
)) {
110 *pwritten
= state
->written
;
114 struct rpc_np_read_state
{
120 static void rpc_np_read_done(struct tevent_req
*subreq
);
122 static struct tevent_req
*rpc_np_read_send(TALLOC_CTX
*mem_ctx
,
123 struct event_context
*ev
,
124 uint8_t *data
, size_t size
,
127 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
128 priv
, struct rpc_transport_np_state
);
129 struct tevent_req
*req
, *subreq
;
130 struct rpc_np_read_state
*state
;
132 req
= tevent_req_create(mem_ctx
, &state
, struct rpc_np_read_state
);
139 subreq
= cli_read_andx_send(mem_ctx
, ev
, np_transport
->cli
,
140 np_transport
->fnum
, 0, size
);
141 if (subreq
== NULL
) {
144 tevent_req_set_callback(subreq
, rpc_np_read_done
, req
);
151 static void rpc_np_read_done(struct tevent_req
*subreq
)
153 struct tevent_req
*req
= tevent_req_callback_data(
154 subreq
, struct tevent_req
);
155 struct rpc_np_read_state
*state
= tevent_req_data(
156 req
, struct rpc_np_read_state
);
160 status
= cli_read_andx_recv(subreq
, &state
->received
, &rcvbuf
);
162 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
165 if (NT_STATUS_EQUAL(status
, NT_STATUS_BUFFER_TOO_SMALL
)) {
166 status
= NT_STATUS_OK
;
168 if (!NT_STATUS_IS_OK(status
)) {
170 tevent_req_nterror(req
, status
);
174 if (state
->received
> state
->size
) {
176 tevent_req_nterror(req
, NT_STATUS_INVALID_NETWORK_RESPONSE
);
180 memcpy(state
->data
, rcvbuf
, state
->received
);
181 tevent_req_done(req
);
184 static NTSTATUS
rpc_np_read_recv(struct tevent_req
*req
, ssize_t
*preceived
)
186 struct rpc_np_read_state
*state
= tevent_req_data(
187 req
, struct rpc_np_read_state
);
190 if (tevent_req_is_nterror(req
, &status
)) {
193 *preceived
= state
->received
;
197 struct rpc_np_trans_state
{
203 static void rpc_np_trans_done(struct tevent_req
*subreq
);
205 static struct tevent_req
*rpc_np_trans_send(TALLOC_CTX
*mem_ctx
,
206 struct event_context
*ev
,
207 uint8_t *data
, size_t data_len
,
208 uint32_t max_rdata_len
,
211 struct rpc_transport_np_state
*np_transport
= talloc_get_type_abort(
212 priv
, struct rpc_transport_np_state
);
213 struct tevent_req
*req
, *subreq
;
214 struct rpc_np_trans_state
*state
;
216 req
= tevent_req_create(mem_ctx
, &state
, struct rpc_np_trans_state
);
221 SSVAL(state
->setup
+0, 0, TRANSACT_DCERPCCMD
);
222 SSVAL(state
->setup
+1, 0, np_transport
->fnum
);
224 subreq
= cli_trans_send(
225 state
, ev
, np_transport
->cli
, SMBtrans
,
226 "\\PIPE\\", 0, 0, 0, state
->setup
, 2, 0,
227 NULL
, 0, 0, data
, data_len
, max_rdata_len
);
228 if (subreq
== NULL
) {
231 tevent_req_set_callback(subreq
, rpc_np_trans_done
, req
);
239 static void rpc_np_trans_done(struct tevent_req
*subreq
)
241 struct tevent_req
*req
= tevent_req_callback_data(
242 subreq
, struct tevent_req
);
243 struct rpc_np_trans_state
*state
= tevent_req_data(
244 req
, struct rpc_np_trans_state
);
247 status
= cli_trans_recv(subreq
, state
, NULL
, 0, NULL
, NULL
, 0, NULL
,
248 &state
->rdata
, 0, &state
->rdata_len
);
250 if (!NT_STATUS_IS_OK(status
)) {
251 tevent_req_nterror(req
, status
);
254 tevent_req_done(req
);
257 static NTSTATUS
rpc_np_trans_recv(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
,
258 uint8_t **prdata
, uint32_t *prdata_len
)
260 struct rpc_np_trans_state
*state
= tevent_req_data(
261 req
, struct rpc_np_trans_state
);
264 if (tevent_req_is_nterror(req
, &status
)) {
267 *prdata
= talloc_move(mem_ctx
, &state
->rdata
);
268 *prdata_len
= state
->rdata_len
;
272 struct rpc_transport_np_init_state
{
273 struct rpc_cli_transport
*transport
;
274 struct rpc_transport_np_state
*transport_np
;
277 static void rpc_transport_np_init_pipe_open(struct tevent_req
*subreq
);
279 struct tevent_req
*rpc_transport_np_init_send(TALLOC_CTX
*mem_ctx
,
280 struct event_context
*ev
,
281 struct cli_state
*cli
,
282 const struct ndr_syntax_id
*abstract_syntax
)
284 struct tevent_req
*req
, *subreq
;
285 struct rpc_transport_np_init_state
*state
;
287 req
= tevent_req_create(mem_ctx
, &state
,
288 struct rpc_transport_np_init_state
);
293 state
->transport
= talloc(state
, struct rpc_cli_transport
);
294 if (tevent_req_nomem(state
->transport
, req
)) {
295 return tevent_req_post(req
, ev
);
297 state
->transport_np
= talloc(state
->transport
,
298 struct rpc_transport_np_state
);
299 if (tevent_req_nomem(state
->transport_np
, req
)) {
300 return tevent_req_post(req
, ev
);
302 state
->transport
->priv
= state
->transport_np
;
304 state
->transport_np
->pipe_name
= get_pipe_name_from_syntax(
305 state
->transport_np
, abstract_syntax
);
306 state
->transport_np
->cli
= cli
;
308 subreq
= cli_ntcreate_send(
309 state
, ev
, cli
, state
->transport_np
->pipe_name
, 0,
310 DESIRED_ACCESS_PIPE
, 0, FILE_SHARE_READ
|FILE_SHARE_WRITE
,
312 if (tevent_req_nomem(subreq
, req
)) {
313 return tevent_req_post(req
, ev
);
315 tevent_req_set_callback(subreq
, rpc_transport_np_init_pipe_open
,
320 static void rpc_transport_np_init_pipe_open(struct tevent_req
*subreq
)
322 struct tevent_req
*req
= tevent_req_callback_data(
323 subreq
, struct tevent_req
);
324 struct rpc_transport_np_init_state
*state
= tevent_req_data(
325 req
, struct rpc_transport_np_init_state
);
328 status
= cli_ntcreate_recv(subreq
, &state
->transport_np
->fnum
);
330 if (!NT_STATUS_IS_OK(status
)) {
331 tevent_req_nterror(req
, status
);
335 talloc_set_destructor(state
->transport_np
,
336 rpc_transport_np_state_destructor
);
337 tevent_req_done(req
);
340 NTSTATUS
rpc_transport_np_init_recv(struct tevent_req
*req
,
342 struct rpc_cli_transport
**presult
)
344 struct rpc_transport_np_init_state
*state
= tevent_req_data(
345 req
, struct rpc_transport_np_init_state
);
348 if (tevent_req_is_nterror(req
, &status
)) {
352 state
->transport
->write_send
= rpc_np_write_send
;
353 state
->transport
->write_recv
= rpc_np_write_recv
;
354 state
->transport
->read_send
= rpc_np_read_send
;
355 state
->transport
->read_recv
= rpc_np_read_recv
;
356 state
->transport
->trans_send
= rpc_np_trans_send
;
357 state
->transport
->trans_recv
= rpc_np_trans_recv
;
359 *presult
= talloc_move(mem_ctx
, &state
->transport
);
363 NTSTATUS
rpc_transport_np_init(TALLOC_CTX
*mem_ctx
, struct cli_state
*cli
,
364 const struct ndr_syntax_id
*abstract_syntax
,
365 struct rpc_cli_transport
**presult
)
367 TALLOC_CTX
*frame
= talloc_stackframe();
368 struct event_context
*ev
;
369 struct tevent_req
*req
;
370 NTSTATUS status
= NT_STATUS_OK
;
372 ev
= event_context_init(frame
);
374 status
= NT_STATUS_NO_MEMORY
;
378 req
= rpc_transport_np_init_send(frame
, ev
, cli
, abstract_syntax
);
380 status
= NT_STATUS_NO_MEMORY
;
384 if (!tevent_req_poll(req
, ev
)) {
385 status
= map_nt_error_from_unix(errno
);
389 status
= rpc_transport_np_init_recv(req
, mem_ctx
, presult
);
395 struct cli_state
*rpc_pipe_np_smb_conn(struct rpc_pipe_client
*p
)
397 struct rpc_transport_np_state
*state
= talloc_get_type(
398 p
->transport
->priv
, struct rpc_transport_np_state
);
406 void rpccli_close_np_fd(struct rpc_pipe_client
*p
)
408 struct cli_state
*cli
= rpc_pipe_np_smb_conn(p
);