s3-utils/net_rpc_printer.c: print more info on write error
[Samba/gebeck_regimport.git] / source3 / libsmb / cli_np_tstream.c
blob7521181ec803bfeb8a053f4cdf2b4192e84d96bb
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "../lib/util/tevent_ntstatus.h"
24 #include "../lib/tsocket/tsocket.h"
25 #include "../lib/tsocket/tsocket_internal.h"
26 #include "cli_np_tstream.h"
28 static const struct tstream_context_ops tstream_cli_np_ops;
31 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
32 * This is fits into the max_xmit negotiated at the SMB layer.
34 * On the sending side they may use SMBtranss if the request does not
35 * fit into a single SMBtrans call.
37 * Windows uses 1024 as max data size of a SMBtrans request and then
38 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
39 * via a SMBreadX.
41 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
42 * request to get the whole fragment at once (like samba 3.5.x and below did.
44 * It is important that we use do SMBwriteX with the size of a full fragment,
45 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
46 * from NT4 servers. (See bug #8195)
48 #define TSTREAM_CLI_NP_MAX_BUF_SIZE 4280
50 struct tstream_cli_np {
51 struct cli_state *cli;
52 const char *npipe;
53 uint16_t fnum;
54 unsigned int default_timeout;
56 struct {
57 bool active;
58 struct tevent_req *read_req;
59 struct tevent_req *write_req;
60 uint16_t setup[2];
61 } trans;
63 struct {
64 off_t ofs;
65 size_t left;
66 uint8_t *buf;
67 } read, write;
70 static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
72 NTSTATUS status;
74 if (!cli_state_is_connected(cli_nps->cli)) {
75 return 0;
79 * TODO: do not use a sync call with a destructor!!!
81 * This only happens, if a caller does talloc_free(),
82 * while the everything was still ok.
84 * If we get an unexpected failure within a normal
85 * operation, we already do an async cli_close_send()/_recv().
87 * Once we've fixed all callers to call
88 * tstream_disconnect_send()/_recv(), this will
89 * never be called.
91 status = cli_close(cli_nps->cli, cli_nps->fnum);
92 if (!NT_STATUS_IS_OK(status)) {
93 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
94 "failed on pipe %s. Error was %s\n",
95 cli_nps->npipe, nt_errstr(status)));
98 * We can't do much on failure
100 return 0;
103 struct tstream_cli_np_open_state {
104 struct cli_state *cli;
105 uint16_t fnum;
106 const char *npipe;
109 static void tstream_cli_np_open_done(struct tevent_req *subreq);
111 struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
112 struct tevent_context *ev,
113 struct cli_state *cli,
114 const char *npipe)
116 struct tevent_req *req;
117 struct tstream_cli_np_open_state *state;
118 struct tevent_req *subreq;
120 req = tevent_req_create(mem_ctx, &state,
121 struct tstream_cli_np_open_state);
122 if (!req) {
123 return NULL;
125 state->cli = cli;
127 state->npipe = talloc_strdup(state, npipe);
128 if (tevent_req_nomem(state->npipe, req)) {
129 return tevent_req_post(req, ev);
132 subreq = cli_ntcreate_send(state, ev, cli,
133 npipe,
135 DESIRED_ACCESS_PIPE,
137 FILE_SHARE_READ|FILE_SHARE_WRITE,
138 FILE_OPEN,
141 if (tevent_req_nomem(subreq, req)) {
142 return tevent_req_post(req, ev);
144 tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
146 return req;
149 static void tstream_cli_np_open_done(struct tevent_req *subreq)
151 struct tevent_req *req =
152 tevent_req_callback_data(subreq, struct tevent_req);
153 struct tstream_cli_np_open_state *state =
154 tevent_req_data(req, struct tstream_cli_np_open_state);
155 NTSTATUS status;
157 status = cli_ntcreate_recv(subreq, &state->fnum);
158 TALLOC_FREE(subreq);
159 if (!NT_STATUS_IS_OK(status)) {
160 tevent_req_nterror(req, status);
161 return;
164 tevent_req_done(req);
167 NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
168 TALLOC_CTX *mem_ctx,
169 struct tstream_context **_stream,
170 const char *location)
172 struct tstream_cli_np_open_state *state =
173 tevent_req_data(req, struct tstream_cli_np_open_state);
174 struct tstream_context *stream;
175 struct tstream_cli_np *cli_nps;
176 NTSTATUS status;
178 if (tevent_req_is_nterror(req, &status)) {
179 tevent_req_received(req);
180 return status;
183 stream = tstream_context_create(mem_ctx,
184 &tstream_cli_np_ops,
185 &cli_nps,
186 struct tstream_cli_np,
187 location);
188 if (!stream) {
189 tevent_req_received(req);
190 return NT_STATUS_NO_MEMORY;
192 ZERO_STRUCTP(cli_nps);
194 cli_nps->cli = state->cli;
195 cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
196 cli_nps->fnum = state->fnum;
197 cli_nps->default_timeout = state->cli->timeout;
199 talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
201 cli_nps->trans.active = false;
202 cli_nps->trans.read_req = NULL;
203 cli_nps->trans.write_req = NULL;
204 SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
205 SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
207 *_stream = stream;
208 tevent_req_received(req);
209 return NT_STATUS_OK;
212 static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
214 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
215 struct tstream_cli_np);
217 if (!cli_state_is_connected(cli_nps->cli)) {
218 errno = ENOTCONN;
219 return -1;
222 return cli_nps->read.left;
225 bool tstream_is_cli_np(struct tstream_context *stream)
227 struct tstream_cli_np *cli_nps =
228 talloc_get_type(_tstream_context_data(stream),
229 struct tstream_cli_np);
231 if (!cli_nps) {
232 return false;
235 return true;
238 NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
240 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
241 struct tstream_cli_np);
243 if (cli_nps->trans.read_req) {
244 return NT_STATUS_PIPE_BUSY;
247 if (cli_nps->trans.write_req) {
248 return NT_STATUS_PIPE_BUSY;
251 if (cli_nps->trans.active) {
252 return NT_STATUS_PIPE_BUSY;
255 cli_nps->trans.active = true;
257 return NT_STATUS_OK;
260 unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
261 unsigned int timeout)
263 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
264 struct tstream_cli_np);
266 if (!cli_state_is_connected(cli_nps->cli)) {
267 return cli_nps->default_timeout;
270 return cli_set_timeout(cli_nps->cli, timeout);
273 struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
275 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
276 struct tstream_cli_np);
278 return cli_nps->cli;
281 struct tstream_cli_np_writev_state {
282 struct tstream_context *stream;
283 struct tevent_context *ev;
285 struct iovec *vector;
286 size_t count;
288 int ret;
290 struct {
291 int val;
292 const char *location;
293 } error;
296 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
298 struct tstream_cli_np *cli_nps =
299 tstream_context_data(state->stream,
300 struct tstream_cli_np);
302 cli_nps->trans.write_req = NULL;
304 return 0;
307 static void tstream_cli_np_writev_write_next(struct tevent_req *req);
309 static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
310 struct tevent_context *ev,
311 struct tstream_context *stream,
312 const struct iovec *vector,
313 size_t count)
315 struct tevent_req *req;
316 struct tstream_cli_np_writev_state *state;
317 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
318 struct tstream_cli_np);
320 req = tevent_req_create(mem_ctx, &state,
321 struct tstream_cli_np_writev_state);
322 if (!req) {
323 return NULL;
325 state->stream = stream;
326 state->ev = ev;
327 state->ret = 0;
329 talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
331 if (!cli_state_is_connected(cli_nps->cli)) {
332 tevent_req_error(req, ENOTCONN);
333 return tevent_req_post(req, ev);
337 * we make a copy of the vector so we can change the structure
339 state->vector = talloc_array(state, struct iovec, count);
340 if (tevent_req_nomem(state->vector, req)) {
341 return tevent_req_post(req, ev);
343 memcpy(state->vector, vector, sizeof(struct iovec) * count);
344 state->count = count;
346 tstream_cli_np_writev_write_next(req);
347 if (!tevent_req_is_in_progress(req)) {
348 return tevent_req_post(req, ev);
351 return req;
354 static void tstream_cli_np_readv_trans_start(struct tevent_req *req);
355 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
357 static void tstream_cli_np_writev_write_next(struct tevent_req *req)
359 struct tstream_cli_np_writev_state *state =
360 tevent_req_data(req,
361 struct tstream_cli_np_writev_state);
362 struct tstream_cli_np *cli_nps =
363 tstream_context_data(state->stream,
364 struct tstream_cli_np);
365 struct tevent_req *subreq;
366 size_t i;
367 size_t left = 0;
369 for (i=0; i < state->count; i++) {
370 left += state->vector[i].iov_len;
373 if (left == 0) {
374 TALLOC_FREE(cli_nps->write.buf);
375 tevent_req_done(req);
376 return;
379 cli_nps->write.ofs = 0;
380 cli_nps->write.left = MIN(left, TSTREAM_CLI_NP_MAX_BUF_SIZE);
381 cli_nps->write.buf = talloc_realloc(cli_nps, cli_nps->write.buf,
382 uint8_t, cli_nps->write.left);
383 if (tevent_req_nomem(cli_nps->write.buf, req)) {
384 return;
388 * copy the pending buffer first
390 while (cli_nps->write.left > 0 && state->count > 0) {
391 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
392 size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
394 memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
396 base += len;
397 state->vector[0].iov_base = base;
398 state->vector[0].iov_len -= len;
400 cli_nps->write.ofs += len;
401 cli_nps->write.left -= len;
403 if (state->vector[0].iov_len == 0) {
404 state->vector += 1;
405 state->count -= 1;
408 state->ret += len;
411 if (cli_nps->trans.active && state->count == 0) {
412 cli_nps->trans.active = false;
413 cli_nps->trans.write_req = req;
414 return;
417 if (cli_nps->trans.read_req && state->count == 0) {
418 cli_nps->trans.write_req = req;
419 tstream_cli_np_readv_trans_start(cli_nps->trans.read_req);
420 return;
423 subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
424 cli_nps->fnum,
425 8, /* 8 means message mode. */
426 cli_nps->write.buf, 0,
427 cli_nps->write.ofs);
428 if (tevent_req_nomem(subreq, req)) {
429 return;
431 tevent_req_set_callback(subreq,
432 tstream_cli_np_writev_write_done,
433 req);
436 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
437 int error,
438 const char *location);
440 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
442 struct tevent_req *req =
443 tevent_req_callback_data(subreq, struct tevent_req);
444 struct tstream_cli_np_writev_state *state =
445 tevent_req_data(req, struct tstream_cli_np_writev_state);
446 struct tstream_cli_np *cli_nps =
447 tstream_context_data(state->stream,
448 struct tstream_cli_np);
449 size_t written;
450 NTSTATUS status;
452 status = cli_write_andx_recv(subreq, &written);
453 TALLOC_FREE(subreq);
454 if (!NT_STATUS_IS_OK(status)) {
455 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
456 return;
459 if (written != cli_nps->write.ofs) {
460 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
461 return;
464 tstream_cli_np_writev_write_next(req);
467 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
469 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
470 int error,
471 const char *location)
473 struct tstream_cli_np_writev_state *state =
474 tevent_req_data(req,
475 struct tstream_cli_np_writev_state);
476 struct tstream_cli_np *cli_nps =
477 tstream_context_data(state->stream,
478 struct tstream_cli_np);
479 struct tevent_req *subreq;
481 state->error.val = error;
482 state->error.location = location;
484 if (!cli_state_is_connected(cli_nps->cli)) {
485 /* return the original error */
486 _tevent_req_error(req, state->error.val, state->error.location);
487 return;
490 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
491 if (subreq == NULL) {
492 /* return the original error */
493 _tevent_req_error(req, state->error.val, state->error.location);
494 return;
496 tevent_req_set_callback(subreq,
497 tstream_cli_np_writev_disconnect_done,
498 req);
501 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
503 struct tevent_req *req =
504 tevent_req_callback_data(subreq, struct tevent_req);
505 struct tstream_cli_np_writev_state *state =
506 tevent_req_data(req, struct tstream_cli_np_writev_state);
507 struct tstream_cli_np *cli_nps =
508 tstream_context_data(state->stream, struct tstream_cli_np);
510 cli_close_recv(subreq);
511 TALLOC_FREE(subreq);
513 cli_nps->cli = NULL;
515 /* return the original error */
516 _tevent_req_error(req, state->error.val, state->error.location);
519 static int tstream_cli_np_writev_recv(struct tevent_req *req,
520 int *perrno)
522 struct tstream_cli_np_writev_state *state =
523 tevent_req_data(req,
524 struct tstream_cli_np_writev_state);
525 int ret;
527 ret = tsocket_simple_int_recv(req, perrno);
528 if (ret == 0) {
529 ret = state->ret;
532 tevent_req_received(req);
533 return ret;
536 struct tstream_cli_np_readv_state {
537 struct tstream_context *stream;
538 struct tevent_context *ev;
540 struct iovec *vector;
541 size_t count;
543 int ret;
545 struct {
546 struct tevent_immediate *im;
547 } trans;
549 struct {
550 int val;
551 const char *location;
552 } error;
555 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state *state)
557 struct tstream_cli_np *cli_nps =
558 tstream_context_data(state->stream,
559 struct tstream_cli_np);
561 cli_nps->trans.read_req = NULL;
563 return 0;
566 static void tstream_cli_np_readv_read_next(struct tevent_req *req);
568 static struct tevent_req *tstream_cli_np_readv_send(TALLOC_CTX *mem_ctx,
569 struct tevent_context *ev,
570 struct tstream_context *stream,
571 struct iovec *vector,
572 size_t count)
574 struct tevent_req *req;
575 struct tstream_cli_np_readv_state *state;
576 struct tstream_cli_np *cli_nps =
577 tstream_context_data(stream, struct tstream_cli_np);
579 req = tevent_req_create(mem_ctx, &state,
580 struct tstream_cli_np_readv_state);
581 if (!req) {
582 return NULL;
584 state->stream = stream;
585 state->ev = ev;
586 state->ret = 0;
588 talloc_set_destructor(state, tstream_cli_np_readv_state_destructor);
590 if (!cli_state_is_connected(cli_nps->cli)) {
591 tevent_req_error(req, ENOTCONN);
592 return tevent_req_post(req, ev);
596 * we make a copy of the vector so we can change the structure
598 state->vector = talloc_array(state, struct iovec, count);
599 if (tevent_req_nomem(state->vector, req)) {
600 return tevent_req_post(req, ev);
602 memcpy(state->vector, vector, sizeof(struct iovec) * count);
603 state->count = count;
605 tstream_cli_np_readv_read_next(req);
606 if (!tevent_req_is_in_progress(req)) {
607 return tevent_req_post(req, ev);
610 return req;
613 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq);
615 static void tstream_cli_np_readv_read_next(struct tevent_req *req)
617 struct tstream_cli_np_readv_state *state =
618 tevent_req_data(req,
619 struct tstream_cli_np_readv_state);
620 struct tstream_cli_np *cli_nps =
621 tstream_context_data(state->stream,
622 struct tstream_cli_np);
623 struct tevent_req *subreq;
626 * copy the pending buffer first
628 while (cli_nps->read.left > 0 && state->count > 0) {
629 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
630 size_t len = MIN(cli_nps->read.left, state->vector[0].iov_len);
632 memcpy(base, cli_nps->read.buf + cli_nps->read.ofs, len);
634 base += len;
635 state->vector[0].iov_base = base;
636 state->vector[0].iov_len -= len;
638 cli_nps->read.ofs += len;
639 cli_nps->read.left -= len;
641 if (state->vector[0].iov_len == 0) {
642 state->vector += 1;
643 state->count -= 1;
646 state->ret += len;
649 if (cli_nps->read.left == 0) {
650 TALLOC_FREE(cli_nps->read.buf);
653 if (state->count == 0) {
654 tevent_req_done(req);
655 return;
658 if (cli_nps->trans.active) {
659 cli_nps->trans.active = false;
660 cli_nps->trans.read_req = req;
661 return;
664 if (cli_nps->trans.write_req) {
665 cli_nps->trans.read_req = req;
666 tstream_cli_np_readv_trans_start(req);
667 return;
670 subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
671 cli_nps->fnum, 0, TSTREAM_CLI_NP_MAX_BUF_SIZE);
672 if (tevent_req_nomem(subreq, req)) {
673 return;
675 tevent_req_set_callback(subreq,
676 tstream_cli_np_readv_read_done,
677 req);
680 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq);
682 static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
684 struct tstream_cli_np_readv_state *state =
685 tevent_req_data(req,
686 struct tstream_cli_np_readv_state);
687 struct tstream_cli_np *cli_nps =
688 tstream_context_data(state->stream,
689 struct tstream_cli_np);
690 struct tevent_req *subreq;
692 state->trans.im = tevent_create_immediate(state);
693 if (tevent_req_nomem(state->trans.im, req)) {
694 return;
697 subreq = cli_trans_send(state, state->ev,
698 cli_nps->cli,
699 SMBtrans,
700 "\\PIPE\\",
701 0, 0, 0,
702 cli_nps->trans.setup, 2,
704 NULL, 0, 0,
705 cli_nps->write.buf,
706 cli_nps->write.ofs,
707 TSTREAM_CLI_NP_MAX_BUF_SIZE);
708 if (tevent_req_nomem(subreq, req)) {
709 return;
711 tevent_req_set_callback(subreq,
712 tstream_cli_np_readv_trans_done,
713 req);
716 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
717 int error,
718 const char *location);
719 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
720 struct tevent_immediate *im,
721 void *private_data);
723 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
725 struct tevent_req *req =
726 tevent_req_callback_data(subreq, struct tevent_req);
727 struct tstream_cli_np_readv_state *state =
728 tevent_req_data(req, struct tstream_cli_np_readv_state);
729 struct tstream_cli_np *cli_nps =
730 tstream_context_data(state->stream, struct tstream_cli_np);
731 uint8_t *rcvbuf;
732 uint32_t received;
733 NTSTATUS status;
735 status = cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
736 NULL, 0, NULL,
737 &rcvbuf, 0, &received);
738 TALLOC_FREE(subreq);
739 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
740 status = NT_STATUS_OK;
742 if (!NT_STATUS_IS_OK(status)) {
743 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
744 return;
747 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
748 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
749 return;
752 if (received == 0) {
753 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
754 return;
757 cli_nps->read.ofs = 0;
758 cli_nps->read.left = received;
759 cli_nps->read.buf = talloc_move(cli_nps, &rcvbuf);
761 if (cli_nps->trans.write_req == NULL) {
762 tstream_cli_np_readv_read_next(req);
763 return;
766 tevent_schedule_immediate(state->trans.im, state->ev,
767 tstream_cli_np_readv_trans_next, req);
769 tevent_req_done(cli_nps->trans.write_req);
772 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
773 struct tevent_immediate *im,
774 void *private_data)
776 struct tevent_req *req =
777 talloc_get_type_abort(private_data,
778 struct tevent_req);
780 tstream_cli_np_readv_read_next(req);
783 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
785 struct tevent_req *req =
786 tevent_req_callback_data(subreq, struct tevent_req);
787 struct tstream_cli_np_readv_state *state =
788 tevent_req_data(req, struct tstream_cli_np_readv_state);
789 struct tstream_cli_np *cli_nps =
790 tstream_context_data(state->stream, struct tstream_cli_np);
791 uint8_t *rcvbuf;
792 ssize_t received;
793 NTSTATUS status;
796 * We must free subreq in this function as there is
797 * a timer event attached to it.
800 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
802 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
803 * child of that.
805 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
807 * NT_STATUS_BUFFER_TOO_SMALL means that there's
808 * more data to read when the named pipe is used
809 * in message mode (which is the case here).
811 * But we hide this from the caller.
813 status = NT_STATUS_OK;
815 if (!NT_STATUS_IS_OK(status)) {
816 TALLOC_FREE(subreq);
817 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
818 return;
821 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
822 TALLOC_FREE(subreq);
823 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
824 return;
827 if (received == 0) {
828 TALLOC_FREE(subreq);
829 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
830 return;
833 cli_nps->read.ofs = 0;
834 cli_nps->read.left = received;
835 cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
836 if (cli_nps->read.buf == NULL) {
837 TALLOC_FREE(subreq);
838 tevent_req_nomem(cli_nps->read.buf, req);
839 return;
841 memcpy(cli_nps->read.buf, rcvbuf, received);
842 TALLOC_FREE(subreq);
844 tstream_cli_np_readv_read_next(req);
847 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq);
849 static void tstream_cli_np_readv_error(struct tevent_req *req);
851 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
852 int error,
853 const char *location)
855 struct tstream_cli_np_readv_state *state =
856 tevent_req_data(req,
857 struct tstream_cli_np_readv_state);
858 struct tstream_cli_np *cli_nps =
859 tstream_context_data(state->stream,
860 struct tstream_cli_np);
861 struct tevent_req *subreq;
863 state->error.val = error;
864 state->error.location = location;
866 if (!cli_state_is_connected(cli_nps->cli)) {
867 /* return the original error */
868 tstream_cli_np_readv_error(req);
869 return;
872 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
873 if (subreq == NULL) {
874 /* return the original error */
875 tstream_cli_np_readv_error(req);
876 return;
878 tevent_req_set_callback(subreq,
879 tstream_cli_np_readv_disconnect_done,
880 req);
883 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq)
885 struct tevent_req *req =
886 tevent_req_callback_data(subreq, struct tevent_req);
887 struct tstream_cli_np_readv_state *state =
888 tevent_req_data(req, struct tstream_cli_np_readv_state);
889 struct tstream_cli_np *cli_nps =
890 tstream_context_data(state->stream, struct tstream_cli_np);
892 cli_close_recv(subreq);
893 TALLOC_FREE(subreq);
895 cli_nps->cli = NULL;
897 tstream_cli_np_readv_error(req);
900 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
901 struct tevent_immediate *im,
902 void *private_data);
904 static void tstream_cli_np_readv_error(struct tevent_req *req)
906 struct tstream_cli_np_readv_state *state =
907 tevent_req_data(req,
908 struct tstream_cli_np_readv_state);
909 struct tstream_cli_np *cli_nps =
910 tstream_context_data(state->stream,
911 struct tstream_cli_np);
913 if (cli_nps->trans.write_req == NULL) {
914 /* return the original error */
915 _tevent_req_error(req, state->error.val, state->error.location);
916 return;
919 if (state->trans.im == NULL) {
920 /* return the original error */
921 _tevent_req_error(req, state->error.val, state->error.location);
922 return;
925 tevent_schedule_immediate(state->trans.im, state->ev,
926 tstream_cli_np_readv_error_trigger, req);
928 /* return the original error for writev */
929 _tevent_req_error(cli_nps->trans.write_req,
930 state->error.val, state->error.location);
933 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
934 struct tevent_immediate *im,
935 void *private_data)
937 struct tevent_req *req =
938 talloc_get_type_abort(private_data,
939 struct tevent_req);
940 struct tstream_cli_np_readv_state *state =
941 tevent_req_data(req,
942 struct tstream_cli_np_readv_state);
944 /* return the original error */
945 _tevent_req_error(req, state->error.val, state->error.location);
948 static int tstream_cli_np_readv_recv(struct tevent_req *req,
949 int *perrno)
951 struct tstream_cli_np_readv_state *state =
952 tevent_req_data(req, struct tstream_cli_np_readv_state);
953 int ret;
955 ret = tsocket_simple_int_recv(req, perrno);
956 if (ret == 0) {
957 ret = state->ret;
960 tevent_req_received(req);
961 return ret;
964 struct tstream_cli_np_disconnect_state {
965 struct tstream_context *stream;
968 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq);
970 static struct tevent_req *tstream_cli_np_disconnect_send(TALLOC_CTX *mem_ctx,
971 struct tevent_context *ev,
972 struct tstream_context *stream)
974 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
975 struct tstream_cli_np);
976 struct tevent_req *req;
977 struct tstream_cli_np_disconnect_state *state;
978 struct tevent_req *subreq;
980 req = tevent_req_create(mem_ctx, &state,
981 struct tstream_cli_np_disconnect_state);
982 if (req == NULL) {
983 return NULL;
986 state->stream = stream;
988 if (!cli_state_is_connected(cli_nps->cli)) {
989 tevent_req_error(req, ENOTCONN);
990 return tevent_req_post(req, ev);
993 subreq = cli_close_send(state, ev, cli_nps->cli, cli_nps->fnum);
994 if (tevent_req_nomem(subreq, req)) {
995 return tevent_req_post(req, ev);
997 tevent_req_set_callback(subreq, tstream_cli_np_disconnect_done, req);
999 return req;
1002 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq)
1004 struct tevent_req *req = tevent_req_callback_data(subreq,
1005 struct tevent_req);
1006 struct tstream_cli_np_disconnect_state *state =
1007 tevent_req_data(req, struct tstream_cli_np_disconnect_state);
1008 struct tstream_cli_np *cli_nps =
1009 tstream_context_data(state->stream, struct tstream_cli_np);
1010 NTSTATUS status;
1012 status = cli_close_recv(subreq);
1013 TALLOC_FREE(subreq);
1014 if (!NT_STATUS_IS_OK(status)) {
1015 tevent_req_error(req, EIO);
1016 return;
1019 cli_nps->cli = NULL;
1021 tevent_req_done(req);
1024 static int tstream_cli_np_disconnect_recv(struct tevent_req *req,
1025 int *perrno)
1027 int ret;
1029 ret = tsocket_simple_int_recv(req, perrno);
1031 tevent_req_received(req);
1032 return ret;
1035 static const struct tstream_context_ops tstream_cli_np_ops = {
1036 .name = "cli_np",
1038 .pending_bytes = tstream_cli_np_pending_bytes,
1040 .readv_send = tstream_cli_np_readv_send,
1041 .readv_recv = tstream_cli_np_readv_recv,
1043 .writev_send = tstream_cli_np_writev_send,
1044 .writev_recv = tstream_cli_np_writev_recv,
1046 .disconnect_send = tstream_cli_np_disconnect_send,
1047 .disconnect_recv = tstream_cli_np_disconnect_recv,
1050 NTSTATUS _tstream_cli_np_existing(TALLOC_CTX *mem_ctx,
1051 struct cli_state *cli,
1052 uint16_t fnum,
1053 struct tstream_context **_stream,
1054 const char *location)
1056 struct tstream_context *stream;
1057 struct tstream_cli_np *cli_nps;
1059 stream = tstream_context_create(mem_ctx,
1060 &tstream_cli_np_ops,
1061 &cli_nps,
1062 struct tstream_cli_np,
1063 location);
1064 if (!stream) {
1065 return NT_STATUS_NO_MEMORY;
1067 ZERO_STRUCTP(cli_nps);
1069 cli_nps->cli = cli;
1070 cli_nps->fnum = fnum;
1072 *_stream = stream;
1073 return NT_STATUS_OK;