s3: Remove unused cli_setup_bcc
[Samba.git] / source3 / libsmb / cli_np_tstream.c
blob99a7e4fd0767865dc455a06a7a5ef4bf0b40e579
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "../lib/util/tevent_ntstatus.h"
24 #include "../lib/tsocket/tsocket.h"
25 #include "../lib/tsocket/tsocket_internal.h"
26 #include "cli_np_tstream.h"
28 static const struct tstream_context_ops tstream_cli_np_ops;
31 * Window uses 1024 hardcoded for read size and trans max data
33 #define TSTREAM_CLI_NP_BUF_SIZE 1024
35 struct tstream_cli_np {
36 struct cli_state *cli;
37 const char *npipe;
38 uint16_t fnum;
39 unsigned int default_timeout;
41 struct {
42 bool active;
43 struct tevent_req *read_req;
44 struct tevent_req *write_req;
45 uint16_t setup[2];
46 } trans;
48 struct {
49 off_t ofs;
50 size_t left;
51 uint8_t buf[TSTREAM_CLI_NP_BUF_SIZE];
52 } read, write;
55 static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
57 NTSTATUS status;
59 if (!cli_state_is_connected(cli_nps->cli)) {
60 return 0;
64 * TODO: do not use a sync call with a destructor!!!
66 * This only happens, if a caller does talloc_free(),
67 * while the everything was still ok.
69 * If we get an unexpected failure within a normal
70 * operation, we already do an async cli_close_send()/_recv().
72 * Once we've fixed all callers to call
73 * tstream_disconnect_send()/_recv(), this will
74 * never be called.
76 status = cli_close(cli_nps->cli, cli_nps->fnum);
77 if (!NT_STATUS_IS_OK(status)) {
78 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
79 "failed on pipe %s. Error was %s\n",
80 cli_nps->npipe, nt_errstr(status)));
83 * We can't do much on failure
85 return 0;
88 struct tstream_cli_np_open_state {
89 struct cli_state *cli;
90 uint16_t fnum;
91 const char *npipe;
94 static void tstream_cli_np_open_done(struct tevent_req *subreq);
96 struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
97 struct tevent_context *ev,
98 struct cli_state *cli,
99 const char *npipe)
101 struct tevent_req *req;
102 struct tstream_cli_np_open_state *state;
103 struct tevent_req *subreq;
105 req = tevent_req_create(mem_ctx, &state,
106 struct tstream_cli_np_open_state);
107 if (!req) {
108 return NULL;
110 state->cli = cli;
112 state->npipe = talloc_strdup(state, npipe);
113 if (tevent_req_nomem(state->npipe, req)) {
114 return tevent_req_post(req, ev);
117 subreq = cli_ntcreate_send(state, ev, cli,
118 npipe,
120 DESIRED_ACCESS_PIPE,
122 FILE_SHARE_READ|FILE_SHARE_WRITE,
123 FILE_OPEN,
126 if (tevent_req_nomem(subreq, req)) {
127 return tevent_req_post(req, ev);
129 tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
131 return req;
134 static void tstream_cli_np_open_done(struct tevent_req *subreq)
136 struct tevent_req *req =
137 tevent_req_callback_data(subreq, struct tevent_req);
138 struct tstream_cli_np_open_state *state =
139 tevent_req_data(req, struct tstream_cli_np_open_state);
140 NTSTATUS status;
142 status = cli_ntcreate_recv(subreq, &state->fnum);
143 TALLOC_FREE(subreq);
144 if (!NT_STATUS_IS_OK(status)) {
145 tevent_req_nterror(req, status);
146 return;
149 tevent_req_done(req);
152 NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
153 TALLOC_CTX *mem_ctx,
154 struct tstream_context **_stream,
155 const char *location)
157 struct tstream_cli_np_open_state *state =
158 tevent_req_data(req, struct tstream_cli_np_open_state);
159 struct tstream_context *stream;
160 struct tstream_cli_np *cli_nps;
161 NTSTATUS status;
163 if (tevent_req_is_nterror(req, &status)) {
164 tevent_req_received(req);
165 return status;
168 stream = tstream_context_create(mem_ctx,
169 &tstream_cli_np_ops,
170 &cli_nps,
171 struct tstream_cli_np,
172 location);
173 if (!stream) {
174 tevent_req_received(req);
175 return NT_STATUS_NO_MEMORY;
177 ZERO_STRUCTP(cli_nps);
179 cli_nps->cli = state->cli;
180 cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
181 cli_nps->fnum = state->fnum;
182 cli_nps->default_timeout = state->cli->timeout;
184 talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
186 cli_nps->trans.active = false;
187 cli_nps->trans.read_req = NULL;
188 cli_nps->trans.write_req = NULL;
189 SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
190 SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
192 *_stream = stream;
193 tevent_req_received(req);
194 return NT_STATUS_OK;
197 static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
199 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
200 struct tstream_cli_np);
202 if (!cli_state_is_connected(cli_nps->cli)) {
203 errno = ENOTCONN;
204 return -1;
207 return cli_nps->read.left;
210 bool tstream_is_cli_np(struct tstream_context *stream)
212 struct tstream_cli_np *cli_nps =
213 talloc_get_type(_tstream_context_data(stream),
214 struct tstream_cli_np);
216 if (!cli_nps) {
217 return false;
220 return true;
223 NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
225 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
226 struct tstream_cli_np);
228 if (cli_nps->trans.read_req) {
229 return NT_STATUS_PIPE_BUSY;
232 if (cli_nps->trans.write_req) {
233 return NT_STATUS_PIPE_BUSY;
236 if (cli_nps->trans.active) {
237 return NT_STATUS_PIPE_BUSY;
240 cli_nps->trans.active = true;
242 return NT_STATUS_OK;
245 unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
246 unsigned int timeout)
248 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
249 struct tstream_cli_np);
251 if (!cli_state_is_connected(cli_nps->cli)) {
252 return cli_nps->default_timeout;
255 return cli_set_timeout(cli_nps->cli, timeout);
258 struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
260 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
261 struct tstream_cli_np);
263 return cli_nps->cli;
266 struct tstream_cli_np_writev_state {
267 struct tstream_context *stream;
268 struct tevent_context *ev;
270 struct iovec *vector;
271 size_t count;
273 int ret;
275 struct {
276 int val;
277 const char *location;
278 } error;
281 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
283 struct tstream_cli_np *cli_nps =
284 tstream_context_data(state->stream,
285 struct tstream_cli_np);
287 cli_nps->trans.write_req = NULL;
289 return 0;
292 static void tstream_cli_np_writev_write_next(struct tevent_req *req);
294 static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
295 struct tevent_context *ev,
296 struct tstream_context *stream,
297 const struct iovec *vector,
298 size_t count)
300 struct tevent_req *req;
301 struct tstream_cli_np_writev_state *state;
302 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
303 struct tstream_cli_np);
305 req = tevent_req_create(mem_ctx, &state,
306 struct tstream_cli_np_writev_state);
307 if (!req) {
308 return NULL;
310 state->stream = stream;
311 state->ev = ev;
312 state->ret = 0;
314 talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
316 if (!cli_state_is_connected(cli_nps->cli)) {
317 tevent_req_error(req, ENOTCONN);
318 return tevent_req_post(req, ev);
322 * we make a copy of the vector so we can change the structure
324 state->vector = talloc_array(state, struct iovec, count);
325 if (tevent_req_nomem(state->vector, req)) {
326 return tevent_req_post(req, ev);
328 memcpy(state->vector, vector, sizeof(struct iovec) * count);
329 state->count = count;
331 tstream_cli_np_writev_write_next(req);
332 if (!tevent_req_is_in_progress(req)) {
333 return tevent_req_post(req, ev);
336 return req;
339 static void tstream_cli_np_readv_trans_start(struct tevent_req *req);
340 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
342 static void tstream_cli_np_writev_write_next(struct tevent_req *req)
344 struct tstream_cli_np_writev_state *state =
345 tevent_req_data(req,
346 struct tstream_cli_np_writev_state);
347 struct tstream_cli_np *cli_nps =
348 tstream_context_data(state->stream,
349 struct tstream_cli_np);
350 struct tevent_req *subreq;
352 cli_nps->write.ofs = 0;
353 cli_nps->write.left = TSTREAM_CLI_NP_BUF_SIZE;
356 * copy the pending buffer first
358 while (cli_nps->write.left > 0 && state->count > 0) {
359 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
360 size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
362 memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
364 base += len;
365 state->vector[0].iov_base = base;
366 state->vector[0].iov_len -= len;
368 cli_nps->write.ofs += len;
369 cli_nps->write.left -= len;
371 if (state->vector[0].iov_len == 0) {
372 state->vector += 1;
373 state->count -= 1;
376 state->ret += len;
379 if (cli_nps->write.ofs == 0) {
380 tevent_req_done(req);
381 return;
384 if (cli_nps->trans.active && state->count == 0) {
385 cli_nps->trans.active = false;
386 cli_nps->trans.write_req = req;
387 return;
390 if (cli_nps->trans.read_req && state->count == 0) {
391 cli_nps->trans.write_req = req;
392 tstream_cli_np_readv_trans_start(cli_nps->trans.read_req);
393 return;
396 subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
397 cli_nps->fnum,
398 8, /* 8 means message mode. */
399 cli_nps->write.buf, 0,
400 cli_nps->write.ofs);
401 if (tevent_req_nomem(subreq, req)) {
402 return;
404 tevent_req_set_callback(subreq,
405 tstream_cli_np_writev_write_done,
406 req);
409 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
410 int error,
411 const char *location);
413 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
415 struct tevent_req *req =
416 tevent_req_callback_data(subreq, struct tevent_req);
417 struct tstream_cli_np_writev_state *state =
418 tevent_req_data(req, struct tstream_cli_np_writev_state);
419 struct tstream_cli_np *cli_nps =
420 tstream_context_data(state->stream,
421 struct tstream_cli_np);
422 size_t written;
423 NTSTATUS status;
425 status = cli_write_andx_recv(subreq, &written);
426 TALLOC_FREE(subreq);
427 if (!NT_STATUS_IS_OK(status)) {
428 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
429 return;
432 if (written != cli_nps->write.ofs) {
433 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
434 return;
437 tstream_cli_np_writev_write_next(req);
440 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
442 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
443 int error,
444 const char *location)
446 struct tstream_cli_np_writev_state *state =
447 tevent_req_data(req,
448 struct tstream_cli_np_writev_state);
449 struct tstream_cli_np *cli_nps =
450 tstream_context_data(state->stream,
451 struct tstream_cli_np);
452 struct tevent_req *subreq;
454 state->error.val = error;
455 state->error.location = location;
457 if (!cli_state_is_connected(cli_nps->cli)) {
458 /* return the original error */
459 _tevent_req_error(req, state->error.val, state->error.location);
460 return;
463 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
464 if (subreq == NULL) {
465 /* return the original error */
466 _tevent_req_error(req, state->error.val, state->error.location);
467 return;
469 tevent_req_set_callback(subreq,
470 tstream_cli_np_writev_disconnect_done,
471 req);
474 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
476 struct tevent_req *req =
477 tevent_req_callback_data(subreq, struct tevent_req);
478 struct tstream_cli_np_writev_state *state =
479 tevent_req_data(req, struct tstream_cli_np_writev_state);
480 struct tstream_cli_np *cli_nps =
481 tstream_context_data(state->stream, struct tstream_cli_np);
482 NTSTATUS status;
484 status = cli_close_recv(subreq);
485 TALLOC_FREE(subreq);
487 cli_nps->cli = NULL;
489 /* return the original error */
490 _tevent_req_error(req, state->error.val, state->error.location);
493 static int tstream_cli_np_writev_recv(struct tevent_req *req,
494 int *perrno)
496 struct tstream_cli_np_writev_state *state =
497 tevent_req_data(req,
498 struct tstream_cli_np_writev_state);
499 int ret;
501 ret = tsocket_simple_int_recv(req, perrno);
502 if (ret == 0) {
503 ret = state->ret;
506 tevent_req_received(req);
507 return ret;
510 struct tstream_cli_np_readv_state {
511 struct tstream_context *stream;
512 struct tevent_context *ev;
514 struct iovec *vector;
515 size_t count;
517 int ret;
519 struct {
520 struct tevent_immediate *im;
521 } trans;
523 struct {
524 int val;
525 const char *location;
526 } error;
529 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state *state)
531 struct tstream_cli_np *cli_nps =
532 tstream_context_data(state->stream,
533 struct tstream_cli_np);
535 cli_nps->trans.read_req = NULL;
537 return 0;
540 static void tstream_cli_np_readv_read_next(struct tevent_req *req);
542 static struct tevent_req *tstream_cli_np_readv_send(TALLOC_CTX *mem_ctx,
543 struct tevent_context *ev,
544 struct tstream_context *stream,
545 struct iovec *vector,
546 size_t count)
548 struct tevent_req *req;
549 struct tstream_cli_np_readv_state *state;
550 struct tstream_cli_np *cli_nps =
551 tstream_context_data(stream, struct tstream_cli_np);
553 req = tevent_req_create(mem_ctx, &state,
554 struct tstream_cli_np_readv_state);
555 if (!req) {
556 return NULL;
558 state->stream = stream;
559 state->ev = ev;
560 state->ret = 0;
562 talloc_set_destructor(state, tstream_cli_np_readv_state_destructor);
564 if (!cli_state_is_connected(cli_nps->cli)) {
565 tevent_req_error(req, ENOTCONN);
566 return tevent_req_post(req, ev);
570 * we make a copy of the vector so we can change the structure
572 state->vector = talloc_array(state, struct iovec, count);
573 if (tevent_req_nomem(state->vector, req)) {
574 return tevent_req_post(req, ev);
576 memcpy(state->vector, vector, sizeof(struct iovec) * count);
577 state->count = count;
579 tstream_cli_np_readv_read_next(req);
580 if (!tevent_req_is_in_progress(req)) {
581 return tevent_req_post(req, ev);
584 return req;
587 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq);
589 static void tstream_cli_np_readv_read_next(struct tevent_req *req)
591 struct tstream_cli_np_readv_state *state =
592 tevent_req_data(req,
593 struct tstream_cli_np_readv_state);
594 struct tstream_cli_np *cli_nps =
595 tstream_context_data(state->stream,
596 struct tstream_cli_np);
597 struct tevent_req *subreq;
600 * copy the pending buffer first
602 while (cli_nps->read.left > 0 && state->count > 0) {
603 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
604 size_t len = MIN(cli_nps->read.left, state->vector[0].iov_len);
606 memcpy(base, cli_nps->read.buf + cli_nps->read.ofs, len);
608 base += len;
609 state->vector[0].iov_base = base;
610 state->vector[0].iov_len -= len;
612 cli_nps->read.ofs += len;
613 cli_nps->read.left -= len;
615 if (state->vector[0].iov_len == 0) {
616 state->vector += 1;
617 state->count -= 1;
620 state->ret += len;
623 if (state->count == 0) {
624 tevent_req_done(req);
625 return;
628 if (cli_nps->trans.active) {
629 cli_nps->trans.active = false;
630 cli_nps->trans.read_req = req;
631 return;
634 if (cli_nps->trans.write_req) {
635 cli_nps->trans.read_req = req;
636 tstream_cli_np_readv_trans_start(req);
637 return;
640 subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
641 cli_nps->fnum, 0, TSTREAM_CLI_NP_BUF_SIZE);
642 if (tevent_req_nomem(subreq, req)) {
643 return;
645 tevent_req_set_callback(subreq,
646 tstream_cli_np_readv_read_done,
647 req);
650 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq);
652 static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
654 struct tstream_cli_np_readv_state *state =
655 tevent_req_data(req,
656 struct tstream_cli_np_readv_state);
657 struct tstream_cli_np *cli_nps =
658 tstream_context_data(state->stream,
659 struct tstream_cli_np);
660 struct tevent_req *subreq;
662 state->trans.im = tevent_create_immediate(state);
663 if (tevent_req_nomem(state->trans.im, req)) {
664 return;
667 subreq = cli_trans_send(state, state->ev,
668 cli_nps->cli,
669 SMBtrans,
670 "\\PIPE\\",
671 0, 0, 0,
672 cli_nps->trans.setup, 2,
674 NULL, 0, 0,
675 cli_nps->write.buf,
676 cli_nps->write.ofs,
677 TSTREAM_CLI_NP_BUF_SIZE);
678 if (tevent_req_nomem(subreq, req)) {
679 return;
681 tevent_req_set_callback(subreq,
682 tstream_cli_np_readv_trans_done,
683 req);
686 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
687 int error,
688 const char *location);
689 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
690 struct tevent_immediate *im,
691 void *private_data);
693 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
695 struct tevent_req *req =
696 tevent_req_callback_data(subreq, struct tevent_req);
697 struct tstream_cli_np_readv_state *state =
698 tevent_req_data(req, struct tstream_cli_np_readv_state);
699 struct tstream_cli_np *cli_nps =
700 tstream_context_data(state->stream, struct tstream_cli_np);
701 uint8_t *rcvbuf;
702 uint32_t received;
703 NTSTATUS status;
705 status = cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
706 NULL, 0, NULL,
707 &rcvbuf, 0, &received);
708 TALLOC_FREE(subreq);
709 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
710 status = NT_STATUS_OK;
712 if (!NT_STATUS_IS_OK(status)) {
713 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
714 return;
717 if (received > TSTREAM_CLI_NP_BUF_SIZE) {
718 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
719 return;
722 if (received == 0) {
723 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
724 return;
727 cli_nps->read.ofs = 0;
728 cli_nps->read.left = received;
729 memcpy(cli_nps->read.buf, rcvbuf, received);
730 TALLOC_FREE(rcvbuf);
732 if (cli_nps->trans.write_req == NULL) {
733 tstream_cli_np_readv_read_next(req);
734 return;
737 tevent_schedule_immediate(state->trans.im, state->ev,
738 tstream_cli_np_readv_trans_next, req);
740 tevent_req_done(cli_nps->trans.write_req);
743 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
744 struct tevent_immediate *im,
745 void *private_data)
747 struct tevent_req *req =
748 talloc_get_type_abort(private_data,
749 struct tevent_req);
751 tstream_cli_np_readv_read_next(req);
754 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
756 struct tevent_req *req =
757 tevent_req_callback_data(subreq, struct tevent_req);
758 struct tstream_cli_np_readv_state *state =
759 tevent_req_data(req, struct tstream_cli_np_readv_state);
760 struct tstream_cli_np *cli_nps =
761 tstream_context_data(state->stream, struct tstream_cli_np);
762 uint8_t *rcvbuf;
763 ssize_t received;
764 NTSTATUS status;
767 * We must free subreq in this function as there is
768 * a timer event attached to it.
771 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
773 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
774 * child of that.
776 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
778 * NT_STATUS_BUFFER_TOO_SMALL means that there's
779 * more data to read when the named pipe is used
780 * in message mode (which is the case here).
782 * But we hide this from the caller.
784 status = NT_STATUS_OK;
786 if (!NT_STATUS_IS_OK(status)) {
787 TALLOC_FREE(subreq);
788 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
789 return;
792 if (received > TSTREAM_CLI_NP_BUF_SIZE) {
793 TALLOC_FREE(subreq);
794 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
795 return;
798 if (received == 0) {
799 TALLOC_FREE(subreq);
800 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
801 return;
804 cli_nps->read.ofs = 0;
805 cli_nps->read.left = received;
806 memcpy(cli_nps->read.buf, rcvbuf, received);
807 TALLOC_FREE(subreq);
809 tstream_cli_np_readv_read_next(req);
812 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq);
814 static void tstream_cli_np_readv_error(struct tevent_req *req);
816 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
817 int error,
818 const char *location)
820 struct tstream_cli_np_readv_state *state =
821 tevent_req_data(req,
822 struct tstream_cli_np_readv_state);
823 struct tstream_cli_np *cli_nps =
824 tstream_context_data(state->stream,
825 struct tstream_cli_np);
826 struct tevent_req *subreq;
828 state->error.val = error;
829 state->error.location = location;
831 if (!cli_state_is_connected(cli_nps->cli)) {
832 /* return the original error */
833 tstream_cli_np_readv_error(req);
834 return;
837 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
838 if (subreq == NULL) {
839 /* return the original error */
840 tstream_cli_np_readv_error(req);
841 return;
843 tevent_req_set_callback(subreq,
844 tstream_cli_np_readv_disconnect_done,
845 req);
848 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq)
850 struct tevent_req *req =
851 tevent_req_callback_data(subreq, struct tevent_req);
852 struct tstream_cli_np_readv_state *state =
853 tevent_req_data(req, struct tstream_cli_np_readv_state);
854 struct tstream_cli_np *cli_nps =
855 tstream_context_data(state->stream, struct tstream_cli_np);
856 NTSTATUS status;
858 status = cli_close_recv(subreq);
859 TALLOC_FREE(subreq);
861 cli_nps->cli = NULL;
863 tstream_cli_np_readv_error(req);
866 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
867 struct tevent_immediate *im,
868 void *private_data);
870 static void tstream_cli_np_readv_error(struct tevent_req *req)
872 struct tstream_cli_np_readv_state *state =
873 tevent_req_data(req,
874 struct tstream_cli_np_readv_state);
875 struct tstream_cli_np *cli_nps =
876 tstream_context_data(state->stream,
877 struct tstream_cli_np);
879 if (cli_nps->trans.write_req == NULL) {
880 /* return the original error */
881 _tevent_req_error(req, state->error.val, state->error.location);
882 return;
885 if (state->trans.im == NULL) {
886 /* return the original error */
887 _tevent_req_error(req, state->error.val, state->error.location);
888 return;
891 tevent_schedule_immediate(state->trans.im, state->ev,
892 tstream_cli_np_readv_error_trigger, req);
894 /* return the original error for writev */
895 _tevent_req_error(cli_nps->trans.write_req,
896 state->error.val, state->error.location);
899 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
900 struct tevent_immediate *im,
901 void *private_data)
903 struct tevent_req *req =
904 talloc_get_type_abort(private_data,
905 struct tevent_req);
906 struct tstream_cli_np_readv_state *state =
907 tevent_req_data(req,
908 struct tstream_cli_np_readv_state);
910 /* return the original error */
911 _tevent_req_error(req, state->error.val, state->error.location);
914 static int tstream_cli_np_readv_recv(struct tevent_req *req,
915 int *perrno)
917 struct tstream_cli_np_readv_state *state =
918 tevent_req_data(req, struct tstream_cli_np_readv_state);
919 int ret;
921 ret = tsocket_simple_int_recv(req, perrno);
922 if (ret == 0) {
923 ret = state->ret;
926 tevent_req_received(req);
927 return ret;
930 struct tstream_cli_np_disconnect_state {
931 struct tstream_context *stream;
934 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq);
936 static struct tevent_req *tstream_cli_np_disconnect_send(TALLOC_CTX *mem_ctx,
937 struct tevent_context *ev,
938 struct tstream_context *stream)
940 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
941 struct tstream_cli_np);
942 struct tevent_req *req;
943 struct tstream_cli_np_disconnect_state *state;
944 struct tevent_req *subreq;
946 req = tevent_req_create(mem_ctx, &state,
947 struct tstream_cli_np_disconnect_state);
948 if (req == NULL) {
949 return NULL;
952 state->stream = stream;
954 if (!cli_state_is_connected(cli_nps->cli)) {
955 tevent_req_error(req, ENOTCONN);
956 return tevent_req_post(req, ev);
959 subreq = cli_close_send(state, ev, cli_nps->cli, cli_nps->fnum);
960 if (tevent_req_nomem(subreq, req)) {
961 return tevent_req_post(req, ev);
963 tevent_req_set_callback(subreq, tstream_cli_np_disconnect_done, req);
965 return req;
968 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq)
970 struct tevent_req *req = tevent_req_callback_data(subreq,
971 struct tevent_req);
972 struct tstream_cli_np_disconnect_state *state =
973 tevent_req_data(req, struct tstream_cli_np_disconnect_state);
974 struct tstream_cli_np *cli_nps =
975 tstream_context_data(state->stream, struct tstream_cli_np);
976 NTSTATUS status;
978 status = cli_close_recv(subreq);
979 TALLOC_FREE(subreq);
980 if (!NT_STATUS_IS_OK(status)) {
981 tevent_req_error(req, EIO);
982 return;
985 cli_nps->cli = NULL;
987 tevent_req_done(req);
990 static int tstream_cli_np_disconnect_recv(struct tevent_req *req,
991 int *perrno)
993 int ret;
995 ret = tsocket_simple_int_recv(req, perrno);
997 tevent_req_received(req);
998 return ret;
1001 static const struct tstream_context_ops tstream_cli_np_ops = {
1002 .name = "cli_np",
1004 .pending_bytes = tstream_cli_np_pending_bytes,
1006 .readv_send = tstream_cli_np_readv_send,
1007 .readv_recv = tstream_cli_np_readv_recv,
1009 .writev_send = tstream_cli_np_writev_send,
1010 .writev_recv = tstream_cli_np_writev_recv,
1012 .disconnect_send = tstream_cli_np_disconnect_send,
1013 .disconnect_recv = tstream_cli_np_disconnect_recv,
1016 NTSTATUS _tstream_cli_np_existing(TALLOC_CTX *mem_ctx,
1017 struct cli_state *cli,
1018 uint16_t fnum,
1019 struct tstream_context **_stream,
1020 const char *location)
1022 struct tstream_context *stream;
1023 struct tstream_cli_np *cli_nps;
1025 stream = tstream_context_create(mem_ctx,
1026 &tstream_cli_np_ops,
1027 &cli_nps,
1028 struct tstream_cli_np,
1029 location);
1030 if (!stream) {
1031 return NT_STATUS_NO_MEMORY;
1033 ZERO_STRUCTP(cli_nps);
1035 cli_nps->cli = cli;
1036 cli_nps->fnum = fnum;
1038 *_stream = stream;
1039 return NT_STATUS_OK;