s3:rpc_transport_np: add comment about bad usage in a destructor
[Samba/bb.git] / source3 / rpc_client / rpc_transport_np.c
blob014ce92adccacaf037944e42a31d1ab92b82bc91
1 /*
2 * Unix SMB/CIFS implementation.
3 * RPC client transport over named pipes
4 * Copyright (C) Volker Lendecke 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
22 #undef DBGC_CLASS
23 #define DBGC_CLASS DBGC_RPC_CLI
25 struct rpc_transport_np_state {
26 struct cli_state *cli;
27 const char *pipe_name;
28 uint16_t fnum;
31 static int rpc_transport_np_state_destructor(struct rpc_transport_np_state *s)
33 if (!cli_state_is_connected(s->cli)) {
34 DEBUG(10, ("socket was closed, no need to send close request.\n"));
35 return 0;
38 /* TODO: do not use a sync call with a destructor!!! */
39 if (!NT_STATUS_IS_OK(cli_close(s->cli, s->fnum))) {
40 DEBUG(1, ("rpc_transport_np_state_destructor: cli_close "
41 "failed on pipe %s. Error was %s\n", s->pipe_name,
42 cli_errstr(s->cli)));
44 DEBUG(10, ("rpc_pipe_destructor: closed %s\n", s->pipe_name));
46 * We can't do much on failure
48 return 0;
51 struct rpc_np_write_state {
52 size_t size;
53 size_t written;
56 static void rpc_np_write_done(struct tevent_req *subreq);
58 static struct tevent_req *rpc_np_write_send(TALLOC_CTX *mem_ctx,
59 struct event_context *ev,
60 const uint8_t *data, size_t size,
61 void *priv)
63 struct rpc_transport_np_state *np_transport = talloc_get_type_abort(
64 priv, struct rpc_transport_np_state);
65 struct tevent_req *req, *subreq;
66 struct rpc_np_write_state *state;
68 req = tevent_req_create(mem_ctx, &state, struct rpc_np_write_state);
69 if (req == NULL) {
70 return NULL;
72 state->size = size;
74 subreq = cli_write_andx_send(mem_ctx, ev, np_transport->cli,
75 np_transport->fnum,
76 8, /* 8 means message mode. */
77 data, 0, size);
78 if (tevent_req_nomem(subreq, req)) {
79 return tevent_req_post(req, ev);
81 tevent_req_set_callback(subreq, rpc_np_write_done, req);
82 return req;
85 static void rpc_np_write_done(struct tevent_req *subreq)
87 struct tevent_req *req = tevent_req_callback_data(
88 subreq, struct tevent_req);
89 struct rpc_np_write_state *state = tevent_req_data(
90 req, struct rpc_np_write_state);
91 NTSTATUS status;
93 status = cli_write_andx_recv(subreq, &state->written);
94 TALLOC_FREE(subreq);
95 if (!NT_STATUS_IS_OK(status)) {
96 tevent_req_nterror(req, status);
97 return;
99 tevent_req_done(req);
102 static NTSTATUS rpc_np_write_recv(struct tevent_req *req, ssize_t *pwritten)
104 struct rpc_np_write_state *state = tevent_req_data(
105 req, struct rpc_np_write_state);
106 NTSTATUS status;
108 if (tevent_req_is_nterror(req, &status)) {
109 return status;
111 *pwritten = state->written;
112 return NT_STATUS_OK;
115 struct rpc_np_read_state {
116 uint8_t *data;
117 size_t size;
118 ssize_t received;
121 static void rpc_np_read_done(struct tevent_req *subreq);
123 static struct tevent_req *rpc_np_read_send(TALLOC_CTX *mem_ctx,
124 struct event_context *ev,
125 uint8_t *data, size_t size,
126 void *priv)
128 struct rpc_transport_np_state *np_transport = talloc_get_type_abort(
129 priv, struct rpc_transport_np_state);
130 struct tevent_req *req, *subreq;
131 struct rpc_np_read_state *state;
133 req = tevent_req_create(mem_ctx, &state, struct rpc_np_read_state);
134 if (req == NULL) {
135 return NULL;
137 state->data = data;
138 state->size = size;
140 subreq = cli_read_andx_send(mem_ctx, ev, np_transport->cli,
141 np_transport->fnum, 0, size);
142 if (subreq == NULL) {
143 goto fail;
145 tevent_req_set_callback(subreq, rpc_np_read_done, req);
146 return req;
147 fail:
148 TALLOC_FREE(req);
149 return NULL;
152 static void rpc_np_read_done(struct tevent_req *subreq)
154 struct tevent_req *req = tevent_req_callback_data(
155 subreq, struct tevent_req);
156 struct rpc_np_read_state *state = tevent_req_data(
157 req, struct rpc_np_read_state);
158 NTSTATUS status;
159 uint8_t *rcvbuf;
161 /* We must free subreq in this function as there is
162 a timer event attached to it. */
164 status = cli_read_andx_recv(subreq, &state->received, &rcvbuf);
166 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
167 * child of that.
169 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
170 status = NT_STATUS_OK;
172 if (!NT_STATUS_IS_OK(status)) {
173 TALLOC_FREE(subreq);
174 tevent_req_nterror(req, status);
175 return;
178 if (state->received > state->size) {
179 TALLOC_FREE(subreq);
180 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
181 return;
184 if (state->received == 0) {
185 TALLOC_FREE(subreq);
186 tevent_req_nterror(req, NT_STATUS_PIPE_BROKEN);
187 return;
190 memcpy(state->data, rcvbuf, state->received);
191 TALLOC_FREE(subreq);
192 tevent_req_done(req);
195 static NTSTATUS rpc_np_read_recv(struct tevent_req *req, ssize_t *preceived)
197 struct rpc_np_read_state *state = tevent_req_data(
198 req, struct rpc_np_read_state);
199 NTSTATUS status;
201 if (tevent_req_is_nterror(req, &status)) {
202 return status;
204 *preceived = state->received;
205 return NT_STATUS_OK;
208 struct rpc_np_trans_state {
209 uint16_t setup[2];
210 uint32_t max_rdata_len;
211 uint8_t *rdata;
212 uint32_t rdata_len;
215 static void rpc_np_trans_done(struct tevent_req *subreq);
217 static struct tevent_req *rpc_np_trans_send(TALLOC_CTX *mem_ctx,
218 struct event_context *ev,
219 uint8_t *data, size_t data_len,
220 uint32_t max_rdata_len,
221 void *priv)
223 struct rpc_transport_np_state *np_transport = talloc_get_type_abort(
224 priv, struct rpc_transport_np_state);
225 struct tevent_req *req, *subreq;
226 struct rpc_np_trans_state *state;
228 req = tevent_req_create(mem_ctx, &state, struct rpc_np_trans_state);
229 if (req == NULL) {
230 return NULL;
233 state->max_rdata_len = max_rdata_len;
235 SSVAL(state->setup+0, 0, TRANSACT_DCERPCCMD);
236 SSVAL(state->setup+1, 0, np_transport->fnum);
238 subreq = cli_trans_send(
239 state, ev, np_transport->cli, SMBtrans,
240 "\\PIPE\\", 0, 0, 0, state->setup, 2, 0,
241 NULL, 0, 0, data, data_len, max_rdata_len);
242 if (subreq == NULL) {
243 goto fail;
245 tevent_req_set_callback(subreq, rpc_np_trans_done, req);
246 return req;
248 fail:
249 TALLOC_FREE(req);
250 return NULL;
253 static void rpc_np_trans_done(struct tevent_req *subreq)
255 struct tevent_req *req = tevent_req_callback_data(
256 subreq, struct tevent_req);
257 struct rpc_np_trans_state *state = tevent_req_data(
258 req, struct rpc_np_trans_state);
259 NTSTATUS status;
261 status = cli_trans_recv(subreq, state, NULL, NULL, NULL, NULL,
262 &state->rdata, &state->rdata_len);
263 TALLOC_FREE(subreq);
264 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
265 status = NT_STATUS_OK;
267 if (!NT_STATUS_IS_OK(status)) {
268 tevent_req_nterror(req, status);
269 return;
272 if (state->rdata_len > state->max_rdata_len) {
273 tevent_req_nterror(req, NT_STATUS_INVALID_NETWORK_RESPONSE);
274 return;
277 if (state->rdata_len == 0) {
278 tevent_req_nterror(req, NT_STATUS_PIPE_BROKEN);
279 return;
282 tevent_req_done(req);
285 static NTSTATUS rpc_np_trans_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
286 uint8_t **prdata, uint32_t *prdata_len)
288 struct rpc_np_trans_state *state = tevent_req_data(
289 req, struct rpc_np_trans_state);
290 NTSTATUS status;
292 if (tevent_req_is_nterror(req, &status)) {
293 return status;
295 *prdata = talloc_move(mem_ctx, &state->rdata);
296 *prdata_len = state->rdata_len;
297 return NT_STATUS_OK;
300 struct rpc_transport_np_init_state {
301 struct rpc_cli_transport *transport;
302 struct rpc_transport_np_state *transport_np;
305 static void rpc_transport_np_init_pipe_open(struct tevent_req *subreq);
307 struct tevent_req *rpc_transport_np_init_send(TALLOC_CTX *mem_ctx,
308 struct event_context *ev,
309 struct cli_state *cli,
310 const struct ndr_syntax_id *abstract_syntax)
312 struct tevent_req *req, *subreq;
313 struct rpc_transport_np_init_state *state;
315 req = tevent_req_create(mem_ctx, &state,
316 struct rpc_transport_np_init_state);
317 if (req == NULL) {
318 return NULL;
321 state->transport = talloc(state, struct rpc_cli_transport);
322 if (tevent_req_nomem(state->transport, req)) {
323 return tevent_req_post(req, ev);
325 state->transport_np = talloc(state->transport,
326 struct rpc_transport_np_state);
327 if (tevent_req_nomem(state->transport_np, req)) {
328 return tevent_req_post(req, ev);
330 state->transport->priv = state->transport_np;
332 state->transport_np->pipe_name = get_pipe_name_from_syntax(
333 state->transport_np, abstract_syntax);
334 state->transport_np->cli = cli;
336 subreq = cli_ntcreate_send(
337 state, ev, cli, state->transport_np->pipe_name, 0,
338 DESIRED_ACCESS_PIPE, 0, FILE_SHARE_READ|FILE_SHARE_WRITE,
339 FILE_OPEN, 0, 0);
340 if (tevent_req_nomem(subreq, req)) {
341 return tevent_req_post(req, ev);
343 tevent_req_set_callback(subreq, rpc_transport_np_init_pipe_open,
344 req);
345 return req;
348 static void rpc_transport_np_init_pipe_open(struct tevent_req *subreq)
350 struct tevent_req *req = tevent_req_callback_data(
351 subreq, struct tevent_req);
352 struct rpc_transport_np_init_state *state = tevent_req_data(
353 req, struct rpc_transport_np_init_state);
354 NTSTATUS status;
356 status = cli_ntcreate_recv(subreq, &state->transport_np->fnum);
357 TALLOC_FREE(subreq);
358 if (!NT_STATUS_IS_OK(status)) {
359 tevent_req_nterror(req, status);
360 return;
363 talloc_set_destructor(state->transport_np,
364 rpc_transport_np_state_destructor);
365 tevent_req_done(req);
368 NTSTATUS rpc_transport_np_init_recv(struct tevent_req *req,
369 TALLOC_CTX *mem_ctx,
370 struct rpc_cli_transport **presult)
372 struct rpc_transport_np_init_state *state = tevent_req_data(
373 req, struct rpc_transport_np_init_state);
374 NTSTATUS status;
376 if (tevent_req_is_nterror(req, &status)) {
377 return status;
380 state->transport->write_send = rpc_np_write_send;
381 state->transport->write_recv = rpc_np_write_recv;
382 state->transport->read_send = rpc_np_read_send;
383 state->transport->read_recv = rpc_np_read_recv;
384 state->transport->trans_send = rpc_np_trans_send;
385 state->transport->trans_recv = rpc_np_trans_recv;
387 *presult = talloc_move(mem_ctx, &state->transport);
388 return NT_STATUS_OK;
391 NTSTATUS rpc_transport_np_init(TALLOC_CTX *mem_ctx, struct cli_state *cli,
392 const struct ndr_syntax_id *abstract_syntax,
393 struct rpc_cli_transport **presult)
395 TALLOC_CTX *frame = talloc_stackframe();
396 struct event_context *ev;
397 struct tevent_req *req;
398 NTSTATUS status = NT_STATUS_OK;
400 ev = event_context_init(frame);
401 if (ev == NULL) {
402 status = NT_STATUS_NO_MEMORY;
403 goto fail;
406 req = rpc_transport_np_init_send(frame, ev, cli, abstract_syntax);
407 if (req == NULL) {
408 status = NT_STATUS_NO_MEMORY;
409 goto fail;
412 if (!tevent_req_poll(req, ev)) {
413 status = map_nt_error_from_unix(errno);
414 goto fail;
417 status = rpc_transport_np_init_recv(req, mem_ctx, presult);
418 fail:
419 TALLOC_FREE(frame);
420 return status;
423 struct cli_state *rpc_pipe_np_smb_conn(struct rpc_pipe_client *p)
425 struct rpc_transport_np_state *state = talloc_get_type(
426 p->transport->priv, struct rpc_transport_np_state);
428 if (state == NULL) {
429 return NULL;
431 return state->cli;
434 void rpccli_close_np_fd(struct rpc_pipe_client *p)
436 struct cli_state *cli = rpc_pipe_np_smb_conn(p);
437 if (cli) {
438 if (cli->fd != -1) {
439 close(cli->fd);
440 cli->fd = -1;
443 return;