s3-proto: move rpc_server/rpc_handles.c protos to ntdomain.h
[Samba.git] / source3 / libsmb / cli_np_tstream.c
blob667f7c255461d42032bc02e7b71cbd990c3fed62
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/network.h"
22 #include "../lib/util/tevent_ntstatus.h"
23 #include "../lib/tsocket/tsocket.h"
24 #include "../lib/tsocket/tsocket_internal.h"
25 #include "cli_np_tstream.h"
27 static const struct tstream_context_ops tstream_cli_np_ops;
30 * Window uses 1024 hardcoded for read size and trans max data
32 #define TSTREAM_CLI_NP_BUF_SIZE 1024
34 struct tstream_cli_np {
35 struct cli_state *cli;
36 const char *npipe;
37 uint16_t fnum;
38 unsigned int default_timeout;
40 struct {
41 bool active;
42 struct tevent_req *read_req;
43 struct tevent_req *write_req;
44 uint16_t setup[2];
45 } trans;
47 struct {
48 off_t ofs;
49 size_t left;
50 uint8_t buf[TSTREAM_CLI_NP_BUF_SIZE];
51 } read, write;
54 static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
56 NTSTATUS status;
58 if (!cli_state_is_connected(cli_nps->cli)) {
59 return 0;
63 * TODO: do not use a sync call with a destructor!!!
65 * This only happens, if a caller does talloc_free(),
66 * while the everything was still ok.
68 * If we get an unexpected failure within a normal
69 * operation, we already do an async cli_close_send()/_recv().
71 * Once we've fixed all callers to call
72 * tstream_disconnect_send()/_recv(), this will
73 * never be called.
75 status = cli_close(cli_nps->cli, cli_nps->fnum);
76 if (!NT_STATUS_IS_OK(status)) {
77 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
78 "failed on pipe %s. Error was %s\n",
79 cli_nps->npipe, nt_errstr(status)));
82 * We can't do much on failure
84 return 0;
87 struct tstream_cli_np_open_state {
88 struct cli_state *cli;
89 uint16_t fnum;
90 const char *npipe;
93 static void tstream_cli_np_open_done(struct tevent_req *subreq);
95 struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
96 struct tevent_context *ev,
97 struct cli_state *cli,
98 const char *npipe)
100 struct tevent_req *req;
101 struct tstream_cli_np_open_state *state;
102 struct tevent_req *subreq;
104 req = tevent_req_create(mem_ctx, &state,
105 struct tstream_cli_np_open_state);
106 if (!req) {
107 return NULL;
109 state->cli = cli;
111 state->npipe = talloc_strdup(state, npipe);
112 if (tevent_req_nomem(state->npipe, req)) {
113 return tevent_req_post(req, ev);
116 subreq = cli_ntcreate_send(state, ev, cli,
117 npipe,
119 DESIRED_ACCESS_PIPE,
121 FILE_SHARE_READ|FILE_SHARE_WRITE,
122 FILE_OPEN,
125 if (tevent_req_nomem(subreq, req)) {
126 return tevent_req_post(req, ev);
128 tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
130 return req;
133 static void tstream_cli_np_open_done(struct tevent_req *subreq)
135 struct tevent_req *req =
136 tevent_req_callback_data(subreq, struct tevent_req);
137 struct tstream_cli_np_open_state *state =
138 tevent_req_data(req, struct tstream_cli_np_open_state);
139 NTSTATUS status;
141 status = cli_ntcreate_recv(subreq, &state->fnum);
142 TALLOC_FREE(subreq);
143 if (!NT_STATUS_IS_OK(status)) {
144 tevent_req_nterror(req, status);
145 return;
148 tevent_req_done(req);
151 NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
152 TALLOC_CTX *mem_ctx,
153 struct tstream_context **_stream,
154 const char *location)
156 struct tstream_cli_np_open_state *state =
157 tevent_req_data(req, struct tstream_cli_np_open_state);
158 struct tstream_context *stream;
159 struct tstream_cli_np *cli_nps;
160 NTSTATUS status;
162 if (tevent_req_is_nterror(req, &status)) {
163 tevent_req_received(req);
164 return status;
167 stream = tstream_context_create(mem_ctx,
168 &tstream_cli_np_ops,
169 &cli_nps,
170 struct tstream_cli_np,
171 location);
172 if (!stream) {
173 tevent_req_received(req);
174 return NT_STATUS_NO_MEMORY;
176 ZERO_STRUCTP(cli_nps);
178 cli_nps->cli = state->cli;
179 cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
180 cli_nps->fnum = state->fnum;
181 cli_nps->default_timeout = state->cli->timeout;
183 talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
185 cli_nps->trans.active = false;
186 cli_nps->trans.read_req = NULL;
187 cli_nps->trans.write_req = NULL;
188 SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
189 SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
191 *_stream = stream;
192 tevent_req_received(req);
193 return NT_STATUS_OK;
196 static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
198 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
199 struct tstream_cli_np);
201 if (!cli_state_is_connected(cli_nps->cli)) {
202 errno = ENOTCONN;
203 return -1;
206 return cli_nps->read.left;
209 bool tstream_is_cli_np(struct tstream_context *stream)
211 struct tstream_cli_np *cli_nps =
212 talloc_get_type(_tstream_context_data(stream),
213 struct tstream_cli_np);
215 if (!cli_nps) {
216 return false;
219 return true;
222 NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
224 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
225 struct tstream_cli_np);
227 if (cli_nps->trans.read_req) {
228 return NT_STATUS_PIPE_BUSY;
231 if (cli_nps->trans.write_req) {
232 return NT_STATUS_PIPE_BUSY;
235 if (cli_nps->trans.active) {
236 return NT_STATUS_PIPE_BUSY;
239 cli_nps->trans.active = true;
241 return NT_STATUS_OK;
244 unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
245 unsigned int timeout)
247 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
248 struct tstream_cli_np);
250 if (!cli_state_is_connected(cli_nps->cli)) {
251 return cli_nps->default_timeout;
254 return cli_set_timeout(cli_nps->cli, timeout);
257 struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
259 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
260 struct tstream_cli_np);
262 return cli_nps->cli;
265 struct tstream_cli_np_writev_state {
266 struct tstream_context *stream;
267 struct tevent_context *ev;
269 struct iovec *vector;
270 size_t count;
272 int ret;
274 struct {
275 int val;
276 const char *location;
277 } error;
280 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
282 struct tstream_cli_np *cli_nps =
283 tstream_context_data(state->stream,
284 struct tstream_cli_np);
286 cli_nps->trans.write_req = NULL;
288 return 0;
291 static void tstream_cli_np_writev_write_next(struct tevent_req *req);
293 static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
294 struct tevent_context *ev,
295 struct tstream_context *stream,
296 const struct iovec *vector,
297 size_t count)
299 struct tevent_req *req;
300 struct tstream_cli_np_writev_state *state;
301 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
302 struct tstream_cli_np);
304 req = tevent_req_create(mem_ctx, &state,
305 struct tstream_cli_np_writev_state);
306 if (!req) {
307 return NULL;
309 state->stream = stream;
310 state->ev = ev;
311 state->ret = 0;
313 talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
315 if (!cli_state_is_connected(cli_nps->cli)) {
316 tevent_req_error(req, ENOTCONN);
317 return tevent_req_post(req, ev);
321 * we make a copy of the vector so we can change the structure
323 state->vector = talloc_array(state, struct iovec, count);
324 if (tevent_req_nomem(state->vector, req)) {
325 return tevent_req_post(req, ev);
327 memcpy(state->vector, vector, sizeof(struct iovec) * count);
328 state->count = count;
330 tstream_cli_np_writev_write_next(req);
331 if (!tevent_req_is_in_progress(req)) {
332 return tevent_req_post(req, ev);
335 return req;
338 static void tstream_cli_np_readv_trans_start(struct tevent_req *req);
339 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
341 static void tstream_cli_np_writev_write_next(struct tevent_req *req)
343 struct tstream_cli_np_writev_state *state =
344 tevent_req_data(req,
345 struct tstream_cli_np_writev_state);
346 struct tstream_cli_np *cli_nps =
347 tstream_context_data(state->stream,
348 struct tstream_cli_np);
349 struct tevent_req *subreq;
351 cli_nps->write.ofs = 0;
352 cli_nps->write.left = TSTREAM_CLI_NP_BUF_SIZE;
355 * copy the pending buffer first
357 while (cli_nps->write.left > 0 && state->count > 0) {
358 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
359 size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
361 memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
363 base += len;
364 state->vector[0].iov_base = base;
365 state->vector[0].iov_len -= len;
367 cli_nps->write.ofs += len;
368 cli_nps->write.left -= len;
370 if (state->vector[0].iov_len == 0) {
371 state->vector += 1;
372 state->count -= 1;
375 state->ret += len;
378 if (cli_nps->write.ofs == 0) {
379 tevent_req_done(req);
380 return;
383 if (cli_nps->trans.active && state->count == 0) {
384 cli_nps->trans.active = false;
385 cli_nps->trans.write_req = req;
386 return;
389 if (cli_nps->trans.read_req && state->count == 0) {
390 cli_nps->trans.write_req = req;
391 tstream_cli_np_readv_trans_start(cli_nps->trans.read_req);
392 return;
395 subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
396 cli_nps->fnum,
397 8, /* 8 means message mode. */
398 cli_nps->write.buf, 0,
399 cli_nps->write.ofs);
400 if (tevent_req_nomem(subreq, req)) {
401 return;
403 tevent_req_set_callback(subreq,
404 tstream_cli_np_writev_write_done,
405 req);
408 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
409 int error,
410 const char *location);
412 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
414 struct tevent_req *req =
415 tevent_req_callback_data(subreq, struct tevent_req);
416 struct tstream_cli_np_writev_state *state =
417 tevent_req_data(req, struct tstream_cli_np_writev_state);
418 struct tstream_cli_np *cli_nps =
419 tstream_context_data(state->stream,
420 struct tstream_cli_np);
421 size_t written;
422 NTSTATUS status;
424 status = cli_write_andx_recv(subreq, &written);
425 TALLOC_FREE(subreq);
426 if (!NT_STATUS_IS_OK(status)) {
427 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
428 return;
431 if (written != cli_nps->write.ofs) {
432 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
433 return;
436 tstream_cli_np_writev_write_next(req);
439 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
441 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
442 int error,
443 const char *location)
445 struct tstream_cli_np_writev_state *state =
446 tevent_req_data(req,
447 struct tstream_cli_np_writev_state);
448 struct tstream_cli_np *cli_nps =
449 tstream_context_data(state->stream,
450 struct tstream_cli_np);
451 struct tevent_req *subreq;
453 state->error.val = error;
454 state->error.location = location;
456 if (!cli_state_is_connected(cli_nps->cli)) {
457 /* return the original error */
458 _tevent_req_error(req, state->error.val, state->error.location);
459 return;
462 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
463 if (subreq == NULL) {
464 /* return the original error */
465 _tevent_req_error(req, state->error.val, state->error.location);
466 return;
468 tevent_req_set_callback(subreq,
469 tstream_cli_np_writev_disconnect_done,
470 req);
473 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
475 struct tevent_req *req =
476 tevent_req_callback_data(subreq, struct tevent_req);
477 struct tstream_cli_np_writev_state *state =
478 tevent_req_data(req, struct tstream_cli_np_writev_state);
479 struct tstream_cli_np *cli_nps =
480 tstream_context_data(state->stream, struct tstream_cli_np);
481 NTSTATUS status;
483 status = cli_close_recv(subreq);
484 TALLOC_FREE(subreq);
486 cli_nps->cli = NULL;
488 /* return the original error */
489 _tevent_req_error(req, state->error.val, state->error.location);
492 static int tstream_cli_np_writev_recv(struct tevent_req *req,
493 int *perrno)
495 struct tstream_cli_np_writev_state *state =
496 tevent_req_data(req,
497 struct tstream_cli_np_writev_state);
498 int ret;
500 ret = tsocket_simple_int_recv(req, perrno);
501 if (ret == 0) {
502 ret = state->ret;
505 tevent_req_received(req);
506 return ret;
509 struct tstream_cli_np_readv_state {
510 struct tstream_context *stream;
511 struct tevent_context *ev;
513 struct iovec *vector;
514 size_t count;
516 int ret;
518 struct {
519 struct tevent_immediate *im;
520 } trans;
522 struct {
523 int val;
524 const char *location;
525 } error;
528 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state *state)
530 struct tstream_cli_np *cli_nps =
531 tstream_context_data(state->stream,
532 struct tstream_cli_np);
534 cli_nps->trans.read_req = NULL;
536 return 0;
539 static void tstream_cli_np_readv_read_next(struct tevent_req *req);
541 static struct tevent_req *tstream_cli_np_readv_send(TALLOC_CTX *mem_ctx,
542 struct tevent_context *ev,
543 struct tstream_context *stream,
544 struct iovec *vector,
545 size_t count)
547 struct tevent_req *req;
548 struct tstream_cli_np_readv_state *state;
549 struct tstream_cli_np *cli_nps =
550 tstream_context_data(stream, struct tstream_cli_np);
552 req = tevent_req_create(mem_ctx, &state,
553 struct tstream_cli_np_readv_state);
554 if (!req) {
555 return NULL;
557 state->stream = stream;
558 state->ev = ev;
559 state->ret = 0;
561 talloc_set_destructor(state, tstream_cli_np_readv_state_destructor);
563 if (!cli_state_is_connected(cli_nps->cli)) {
564 tevent_req_error(req, ENOTCONN);
565 return tevent_req_post(req, ev);
569 * we make a copy of the vector so we can change the structure
571 state->vector = talloc_array(state, struct iovec, count);
572 if (tevent_req_nomem(state->vector, req)) {
573 return tevent_req_post(req, ev);
575 memcpy(state->vector, vector, sizeof(struct iovec) * count);
576 state->count = count;
578 tstream_cli_np_readv_read_next(req);
579 if (!tevent_req_is_in_progress(req)) {
580 return tevent_req_post(req, ev);
583 return req;
586 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq);
588 static void tstream_cli_np_readv_read_next(struct tevent_req *req)
590 struct tstream_cli_np_readv_state *state =
591 tevent_req_data(req,
592 struct tstream_cli_np_readv_state);
593 struct tstream_cli_np *cli_nps =
594 tstream_context_data(state->stream,
595 struct tstream_cli_np);
596 struct tevent_req *subreq;
599 * copy the pending buffer first
601 while (cli_nps->read.left > 0 && state->count > 0) {
602 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
603 size_t len = MIN(cli_nps->read.left, state->vector[0].iov_len);
605 memcpy(base, cli_nps->read.buf + cli_nps->read.ofs, len);
607 base += len;
608 state->vector[0].iov_base = base;
609 state->vector[0].iov_len -= len;
611 cli_nps->read.ofs += len;
612 cli_nps->read.left -= len;
614 if (state->vector[0].iov_len == 0) {
615 state->vector += 1;
616 state->count -= 1;
619 state->ret += len;
622 if (state->count == 0) {
623 tevent_req_done(req);
624 return;
627 if (cli_nps->trans.active) {
628 cli_nps->trans.active = false;
629 cli_nps->trans.read_req = req;
630 return;
633 if (cli_nps->trans.write_req) {
634 cli_nps->trans.read_req = req;
635 tstream_cli_np_readv_trans_start(req);
636 return;
639 subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
640 cli_nps->fnum, 0, TSTREAM_CLI_NP_BUF_SIZE);
641 if (tevent_req_nomem(subreq, req)) {
642 return;
644 tevent_req_set_callback(subreq,
645 tstream_cli_np_readv_read_done,
646 req);
649 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq);
651 static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
653 struct tstream_cli_np_readv_state *state =
654 tevent_req_data(req,
655 struct tstream_cli_np_readv_state);
656 struct tstream_cli_np *cli_nps =
657 tstream_context_data(state->stream,
658 struct tstream_cli_np);
659 struct tevent_req *subreq;
661 state->trans.im = tevent_create_immediate(state);
662 if (tevent_req_nomem(state->trans.im, req)) {
663 return;
666 subreq = cli_trans_send(state, state->ev,
667 cli_nps->cli,
668 SMBtrans,
669 "\\PIPE\\",
670 0, 0, 0,
671 cli_nps->trans.setup, 2,
673 NULL, 0, 0,
674 cli_nps->write.buf,
675 cli_nps->write.ofs,
676 TSTREAM_CLI_NP_BUF_SIZE);
677 if (tevent_req_nomem(subreq, req)) {
678 return;
680 tevent_req_set_callback(subreq,
681 tstream_cli_np_readv_trans_done,
682 req);
685 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
686 int error,
687 const char *location);
688 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
689 struct tevent_immediate *im,
690 void *private_data);
692 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
694 struct tevent_req *req =
695 tevent_req_callback_data(subreq, struct tevent_req);
696 struct tstream_cli_np_readv_state *state =
697 tevent_req_data(req, struct tstream_cli_np_readv_state);
698 struct tstream_cli_np *cli_nps =
699 tstream_context_data(state->stream, struct tstream_cli_np);
700 uint8_t *rcvbuf;
701 uint32_t received;
702 NTSTATUS status;
704 status = cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
705 NULL, 0, NULL,
706 &rcvbuf, 0, &received);
707 TALLOC_FREE(subreq);
708 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
709 status = NT_STATUS_OK;
711 if (!NT_STATUS_IS_OK(status)) {
712 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
713 return;
716 if (received > TSTREAM_CLI_NP_BUF_SIZE) {
717 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
718 return;
721 if (received == 0) {
722 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
723 return;
726 cli_nps->read.ofs = 0;
727 cli_nps->read.left = received;
728 memcpy(cli_nps->read.buf, rcvbuf, received);
729 TALLOC_FREE(rcvbuf);
731 if (cli_nps->trans.write_req == NULL) {
732 tstream_cli_np_readv_read_next(req);
733 return;
736 tevent_schedule_immediate(state->trans.im, state->ev,
737 tstream_cli_np_readv_trans_next, req);
739 tevent_req_done(cli_nps->trans.write_req);
742 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
743 struct tevent_immediate *im,
744 void *private_data)
746 struct tevent_req *req =
747 talloc_get_type_abort(private_data,
748 struct tevent_req);
750 tstream_cli_np_readv_read_next(req);
753 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
755 struct tevent_req *req =
756 tevent_req_callback_data(subreq, struct tevent_req);
757 struct tstream_cli_np_readv_state *state =
758 tevent_req_data(req, struct tstream_cli_np_readv_state);
759 struct tstream_cli_np *cli_nps =
760 tstream_context_data(state->stream, struct tstream_cli_np);
761 uint8_t *rcvbuf;
762 ssize_t received;
763 NTSTATUS status;
766 * We must free subreq in this function as there is
767 * a timer event attached to it.
770 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
772 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
773 * child of that.
775 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
777 * NT_STATUS_BUFFER_TOO_SMALL means that there's
778 * more data to read when the named pipe is used
779 * in message mode (which is the case here).
781 * But we hide this from the caller.
783 status = NT_STATUS_OK;
785 if (!NT_STATUS_IS_OK(status)) {
786 TALLOC_FREE(subreq);
787 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
788 return;
791 if (received > TSTREAM_CLI_NP_BUF_SIZE) {
792 TALLOC_FREE(subreq);
793 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
794 return;
797 if (received == 0) {
798 TALLOC_FREE(subreq);
799 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
800 return;
803 cli_nps->read.ofs = 0;
804 cli_nps->read.left = received;
805 memcpy(cli_nps->read.buf, rcvbuf, received);
806 TALLOC_FREE(subreq);
808 tstream_cli_np_readv_read_next(req);
811 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq);
813 static void tstream_cli_np_readv_error(struct tevent_req *req);
815 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
816 int error,
817 const char *location)
819 struct tstream_cli_np_readv_state *state =
820 tevent_req_data(req,
821 struct tstream_cli_np_readv_state);
822 struct tstream_cli_np *cli_nps =
823 tstream_context_data(state->stream,
824 struct tstream_cli_np);
825 struct tevent_req *subreq;
827 state->error.val = error;
828 state->error.location = location;
830 if (!cli_state_is_connected(cli_nps->cli)) {
831 /* return the original error */
832 tstream_cli_np_readv_error(req);
833 return;
836 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
837 if (subreq == NULL) {
838 /* return the original error */
839 tstream_cli_np_readv_error(req);
840 return;
842 tevent_req_set_callback(subreq,
843 tstream_cli_np_readv_disconnect_done,
844 req);
847 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq)
849 struct tevent_req *req =
850 tevent_req_callback_data(subreq, struct tevent_req);
851 struct tstream_cli_np_readv_state *state =
852 tevent_req_data(req, struct tstream_cli_np_readv_state);
853 struct tstream_cli_np *cli_nps =
854 tstream_context_data(state->stream, struct tstream_cli_np);
855 NTSTATUS status;
857 status = cli_close_recv(subreq);
858 TALLOC_FREE(subreq);
860 cli_nps->cli = NULL;
862 tstream_cli_np_readv_error(req);
865 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
866 struct tevent_immediate *im,
867 void *private_data);
869 static void tstream_cli_np_readv_error(struct tevent_req *req)
871 struct tstream_cli_np_readv_state *state =
872 tevent_req_data(req,
873 struct tstream_cli_np_readv_state);
874 struct tstream_cli_np *cli_nps =
875 tstream_context_data(state->stream,
876 struct tstream_cli_np);
878 if (cli_nps->trans.write_req == NULL) {
879 /* return the original error */
880 _tevent_req_error(req, state->error.val, state->error.location);
881 return;
884 if (state->trans.im == NULL) {
885 /* return the original error */
886 _tevent_req_error(req, state->error.val, state->error.location);
887 return;
890 tevent_schedule_immediate(state->trans.im, state->ev,
891 tstream_cli_np_readv_error_trigger, req);
893 /* return the original error for writev */
894 _tevent_req_error(cli_nps->trans.write_req,
895 state->error.val, state->error.location);
898 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
899 struct tevent_immediate *im,
900 void *private_data)
902 struct tevent_req *req =
903 talloc_get_type_abort(private_data,
904 struct tevent_req);
905 struct tstream_cli_np_readv_state *state =
906 tevent_req_data(req,
907 struct tstream_cli_np_readv_state);
909 /* return the original error */
910 _tevent_req_error(req, state->error.val, state->error.location);
913 static int tstream_cli_np_readv_recv(struct tevent_req *req,
914 int *perrno)
916 struct tstream_cli_np_readv_state *state =
917 tevent_req_data(req, struct tstream_cli_np_readv_state);
918 int ret;
920 ret = tsocket_simple_int_recv(req, perrno);
921 if (ret == 0) {
922 ret = state->ret;
925 tevent_req_received(req);
926 return ret;
929 struct tstream_cli_np_disconnect_state {
930 struct tstream_context *stream;
933 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq);
935 static struct tevent_req *tstream_cli_np_disconnect_send(TALLOC_CTX *mem_ctx,
936 struct tevent_context *ev,
937 struct tstream_context *stream)
939 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
940 struct tstream_cli_np);
941 struct tevent_req *req;
942 struct tstream_cli_np_disconnect_state *state;
943 struct tevent_req *subreq;
945 req = tevent_req_create(mem_ctx, &state,
946 struct tstream_cli_np_disconnect_state);
947 if (req == NULL) {
948 return NULL;
951 state->stream = stream;
953 if (!cli_state_is_connected(cli_nps->cli)) {
954 tevent_req_error(req, ENOTCONN);
955 return tevent_req_post(req, ev);
958 subreq = cli_close_send(state, ev, cli_nps->cli, cli_nps->fnum);
959 if (tevent_req_nomem(subreq, req)) {
960 return tevent_req_post(req, ev);
962 tevent_req_set_callback(subreq, tstream_cli_np_disconnect_done, req);
964 return req;
967 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq)
969 struct tevent_req *req = tevent_req_callback_data(subreq,
970 struct tevent_req);
971 struct tstream_cli_np_disconnect_state *state =
972 tevent_req_data(req, struct tstream_cli_np_disconnect_state);
973 struct tstream_cli_np *cli_nps =
974 tstream_context_data(state->stream, struct tstream_cli_np);
975 NTSTATUS status;
977 status = cli_close_recv(subreq);
978 TALLOC_FREE(subreq);
979 if (!NT_STATUS_IS_OK(status)) {
980 tevent_req_error(req, EIO);
981 return;
984 cli_nps->cli = NULL;
986 tevent_req_done(req);
989 static int tstream_cli_np_disconnect_recv(struct tevent_req *req,
990 int *perrno)
992 int ret;
994 ret = tsocket_simple_int_recv(req, perrno);
996 tevent_req_received(req);
997 return ret;
1000 static const struct tstream_context_ops tstream_cli_np_ops = {
1001 .name = "cli_np",
1003 .pending_bytes = tstream_cli_np_pending_bytes,
1005 .readv_send = tstream_cli_np_readv_send,
1006 .readv_recv = tstream_cli_np_readv_recv,
1008 .writev_send = tstream_cli_np_writev_send,
1009 .writev_recv = tstream_cli_np_writev_recv,
1011 .disconnect_send = tstream_cli_np_disconnect_send,
1012 .disconnect_recv = tstream_cli_np_disconnect_recv,
1015 NTSTATUS _tstream_cli_np_existing(TALLOC_CTX *mem_ctx,
1016 struct cli_state *cli,
1017 uint16_t fnum,
1018 struct tstream_context **_stream,
1019 const char *location)
1021 struct tstream_context *stream;
1022 struct tstream_cli_np *cli_nps;
1024 stream = tstream_context_create(mem_ctx,
1025 &tstream_cli_np_ops,
1026 &cli_nps,
1027 struct tstream_cli_np,
1028 location);
1029 if (!stream) {
1030 return NT_STATUS_NO_MEMORY;
1032 ZERO_STRUCTP(cli_nps);
1034 cli_nps->cli = cli;
1035 cli_nps->fnum = fnum;
1037 *_stream = stream;
1038 return NT_STATUS_OK;