s3:libsmb/cli_np_tstream: use larger buffers to avoid a bug NT4 servers (bug #8195)
[Samba.git] / source3 / libsmb / cli_np_tstream.c
blob15eb7f7129459c030a266a6da2c726d90392ae6f
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "../lib/util/tevent_ntstatus.h"
24 #include "../lib/tsocket/tsocket.h"
25 #include "../lib/tsocket/tsocket_internal.h"
26 #include "cli_np_tstream.h"
28 static const struct tstream_context_ops tstream_cli_np_ops;
31 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
32 * This is fits into the max_xmit negotiated at the SMB layer.
34 * On the sending side they may use SMBtranss if the request does not
35 * fit into a single SMBtrans call.
37 * Windows uses 1024 as max data size of a SMBtrans request and then
38 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
39 * via a SMBreadX.
41 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
42 * request to get the whole fragment at once (like samba 3.5.x and below did.
44 * It is important that we use do SMBwriteX with the size of a full fragment,
45 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
46 * from NT4 servers. (See bug #8195)
48 #define TSTREAM_CLI_NP_BUF_SIZE 4280
50 struct tstream_cli_np {
51 struct cli_state *cli;
52 const char *npipe;
53 uint16_t fnum;
54 unsigned int default_timeout;
56 struct {
57 bool active;
58 struct tevent_req *read_req;
59 struct tevent_req *write_req;
60 uint16_t setup[2];
61 } trans;
63 struct {
64 off_t ofs;
65 size_t left;
66 uint8_t buf[TSTREAM_CLI_NP_BUF_SIZE];
67 } read, write;
70 static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
72 NTSTATUS status;
74 if (!cli_state_is_connected(cli_nps->cli)) {
75 return 0;
79 * TODO: do not use a sync call with a destructor!!!
81 * This only happens, if a caller does talloc_free(),
82 * while the everything was still ok.
84 * If we get an unexpected failure within a normal
85 * operation, we already do an async cli_close_send()/_recv().
87 * Once we've fixed all callers to call
88 * tstream_disconnect_send()/_recv(), this will
89 * never be called.
91 status = cli_close(cli_nps->cli, cli_nps->fnum);
92 if (!NT_STATUS_IS_OK(status)) {
93 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
94 "failed on pipe %s. Error was %s\n",
95 cli_nps->npipe, nt_errstr(status)));
98 * We can't do much on failure
100 return 0;
103 struct tstream_cli_np_open_state {
104 struct cli_state *cli;
105 uint16_t fnum;
106 const char *npipe;
109 static void tstream_cli_np_open_done(struct tevent_req *subreq);
111 struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
112 struct tevent_context *ev,
113 struct cli_state *cli,
114 const char *npipe)
116 struct tevent_req *req;
117 struct tstream_cli_np_open_state *state;
118 struct tevent_req *subreq;
120 req = tevent_req_create(mem_ctx, &state,
121 struct tstream_cli_np_open_state);
122 if (!req) {
123 return NULL;
125 state->cli = cli;
127 state->npipe = talloc_strdup(state, npipe);
128 if (tevent_req_nomem(state->npipe, req)) {
129 return tevent_req_post(req, ev);
132 subreq = cli_ntcreate_send(state, ev, cli,
133 npipe,
135 DESIRED_ACCESS_PIPE,
137 FILE_SHARE_READ|FILE_SHARE_WRITE,
138 FILE_OPEN,
141 if (tevent_req_nomem(subreq, req)) {
142 return tevent_req_post(req, ev);
144 tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
146 return req;
149 static void tstream_cli_np_open_done(struct tevent_req *subreq)
151 struct tevent_req *req =
152 tevent_req_callback_data(subreq, struct tevent_req);
153 struct tstream_cli_np_open_state *state =
154 tevent_req_data(req, struct tstream_cli_np_open_state);
155 NTSTATUS status;
157 status = cli_ntcreate_recv(subreq, &state->fnum);
158 TALLOC_FREE(subreq);
159 if (!NT_STATUS_IS_OK(status)) {
160 tevent_req_nterror(req, status);
161 return;
164 tevent_req_done(req);
167 NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
168 TALLOC_CTX *mem_ctx,
169 struct tstream_context **_stream,
170 const char *location)
172 struct tstream_cli_np_open_state *state =
173 tevent_req_data(req, struct tstream_cli_np_open_state);
174 struct tstream_context *stream;
175 struct tstream_cli_np *cli_nps;
176 NTSTATUS status;
178 if (tevent_req_is_nterror(req, &status)) {
179 tevent_req_received(req);
180 return status;
183 stream = tstream_context_create(mem_ctx,
184 &tstream_cli_np_ops,
185 &cli_nps,
186 struct tstream_cli_np,
187 location);
188 if (!stream) {
189 tevent_req_received(req);
190 return NT_STATUS_NO_MEMORY;
192 ZERO_STRUCTP(cli_nps);
194 cli_nps->cli = state->cli;
195 cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
196 cli_nps->fnum = state->fnum;
197 cli_nps->default_timeout = state->cli->timeout;
199 talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
201 cli_nps->trans.active = false;
202 cli_nps->trans.read_req = NULL;
203 cli_nps->trans.write_req = NULL;
204 SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
205 SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
207 *_stream = stream;
208 tevent_req_received(req);
209 return NT_STATUS_OK;
212 static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
214 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
215 struct tstream_cli_np);
217 if (!cli_state_is_connected(cli_nps->cli)) {
218 errno = ENOTCONN;
219 return -1;
222 return cli_nps->read.left;
225 bool tstream_is_cli_np(struct tstream_context *stream)
227 struct tstream_cli_np *cli_nps =
228 talloc_get_type(_tstream_context_data(stream),
229 struct tstream_cli_np);
231 if (!cli_nps) {
232 return false;
235 return true;
238 NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
240 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
241 struct tstream_cli_np);
243 if (cli_nps->trans.read_req) {
244 return NT_STATUS_PIPE_BUSY;
247 if (cli_nps->trans.write_req) {
248 return NT_STATUS_PIPE_BUSY;
251 if (cli_nps->trans.active) {
252 return NT_STATUS_PIPE_BUSY;
255 cli_nps->trans.active = true;
257 return NT_STATUS_OK;
260 unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
261 unsigned int timeout)
263 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
264 struct tstream_cli_np);
266 if (!cli_state_is_connected(cli_nps->cli)) {
267 return cli_nps->default_timeout;
270 return cli_set_timeout(cli_nps->cli, timeout);
273 struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
275 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
276 struct tstream_cli_np);
278 return cli_nps->cli;
281 struct tstream_cli_np_writev_state {
282 struct tstream_context *stream;
283 struct tevent_context *ev;
285 struct iovec *vector;
286 size_t count;
288 int ret;
290 struct {
291 int val;
292 const char *location;
293 } error;
296 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
298 struct tstream_cli_np *cli_nps =
299 tstream_context_data(state->stream,
300 struct tstream_cli_np);
302 cli_nps->trans.write_req = NULL;
304 return 0;
307 static void tstream_cli_np_writev_write_next(struct tevent_req *req);
309 static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
310 struct tevent_context *ev,
311 struct tstream_context *stream,
312 const struct iovec *vector,
313 size_t count)
315 struct tevent_req *req;
316 struct tstream_cli_np_writev_state *state;
317 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
318 struct tstream_cli_np);
320 req = tevent_req_create(mem_ctx, &state,
321 struct tstream_cli_np_writev_state);
322 if (!req) {
323 return NULL;
325 state->stream = stream;
326 state->ev = ev;
327 state->ret = 0;
329 talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
331 if (!cli_state_is_connected(cli_nps->cli)) {
332 tevent_req_error(req, ENOTCONN);
333 return tevent_req_post(req, ev);
337 * we make a copy of the vector so we can change the structure
339 state->vector = talloc_array(state, struct iovec, count);
340 if (tevent_req_nomem(state->vector, req)) {
341 return tevent_req_post(req, ev);
343 memcpy(state->vector, vector, sizeof(struct iovec) * count);
344 state->count = count;
346 tstream_cli_np_writev_write_next(req);
347 if (!tevent_req_is_in_progress(req)) {
348 return tevent_req_post(req, ev);
351 return req;
354 static void tstream_cli_np_readv_trans_start(struct tevent_req *req);
355 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
357 static void tstream_cli_np_writev_write_next(struct tevent_req *req)
359 struct tstream_cli_np_writev_state *state =
360 tevent_req_data(req,
361 struct tstream_cli_np_writev_state);
362 struct tstream_cli_np *cli_nps =
363 tstream_context_data(state->stream,
364 struct tstream_cli_np);
365 struct tevent_req *subreq;
367 cli_nps->write.ofs = 0;
368 cli_nps->write.left = TSTREAM_CLI_NP_BUF_SIZE;
371 * copy the pending buffer first
373 while (cli_nps->write.left > 0 && state->count > 0) {
374 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
375 size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
377 memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
379 base += len;
380 state->vector[0].iov_base = base;
381 state->vector[0].iov_len -= len;
383 cli_nps->write.ofs += len;
384 cli_nps->write.left -= len;
386 if (state->vector[0].iov_len == 0) {
387 state->vector += 1;
388 state->count -= 1;
391 state->ret += len;
394 if (cli_nps->write.ofs == 0) {
395 tevent_req_done(req);
396 return;
399 if (cli_nps->trans.active && state->count == 0) {
400 cli_nps->trans.active = false;
401 cli_nps->trans.write_req = req;
402 return;
405 if (cli_nps->trans.read_req && state->count == 0) {
406 cli_nps->trans.write_req = req;
407 tstream_cli_np_readv_trans_start(cli_nps->trans.read_req);
408 return;
411 subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
412 cli_nps->fnum,
413 8, /* 8 means message mode. */
414 cli_nps->write.buf, 0,
415 cli_nps->write.ofs);
416 if (tevent_req_nomem(subreq, req)) {
417 return;
419 tevent_req_set_callback(subreq,
420 tstream_cli_np_writev_write_done,
421 req);
424 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
425 int error,
426 const char *location);
428 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
430 struct tevent_req *req =
431 tevent_req_callback_data(subreq, struct tevent_req);
432 struct tstream_cli_np_writev_state *state =
433 tevent_req_data(req, struct tstream_cli_np_writev_state);
434 struct tstream_cli_np *cli_nps =
435 tstream_context_data(state->stream,
436 struct tstream_cli_np);
437 size_t written;
438 NTSTATUS status;
440 status = cli_write_andx_recv(subreq, &written);
441 TALLOC_FREE(subreq);
442 if (!NT_STATUS_IS_OK(status)) {
443 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
444 return;
447 if (written != cli_nps->write.ofs) {
448 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
449 return;
452 tstream_cli_np_writev_write_next(req);
455 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
457 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
458 int error,
459 const char *location)
461 struct tstream_cli_np_writev_state *state =
462 tevent_req_data(req,
463 struct tstream_cli_np_writev_state);
464 struct tstream_cli_np *cli_nps =
465 tstream_context_data(state->stream,
466 struct tstream_cli_np);
467 struct tevent_req *subreq;
469 state->error.val = error;
470 state->error.location = location;
472 if (!cli_state_is_connected(cli_nps->cli)) {
473 /* return the original error */
474 _tevent_req_error(req, state->error.val, state->error.location);
475 return;
478 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
479 if (subreq == NULL) {
480 /* return the original error */
481 _tevent_req_error(req, state->error.val, state->error.location);
482 return;
484 tevent_req_set_callback(subreq,
485 tstream_cli_np_writev_disconnect_done,
486 req);
489 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
491 struct tevent_req *req =
492 tevent_req_callback_data(subreq, struct tevent_req);
493 struct tstream_cli_np_writev_state *state =
494 tevent_req_data(req, struct tstream_cli_np_writev_state);
495 struct tstream_cli_np *cli_nps =
496 tstream_context_data(state->stream, struct tstream_cli_np);
497 NTSTATUS status;
499 status = cli_close_recv(subreq);
500 TALLOC_FREE(subreq);
502 cli_nps->cli = NULL;
504 /* return the original error */
505 _tevent_req_error(req, state->error.val, state->error.location);
508 static int tstream_cli_np_writev_recv(struct tevent_req *req,
509 int *perrno)
511 struct tstream_cli_np_writev_state *state =
512 tevent_req_data(req,
513 struct tstream_cli_np_writev_state);
514 int ret;
516 ret = tsocket_simple_int_recv(req, perrno);
517 if (ret == 0) {
518 ret = state->ret;
521 tevent_req_received(req);
522 return ret;
525 struct tstream_cli_np_readv_state {
526 struct tstream_context *stream;
527 struct tevent_context *ev;
529 struct iovec *vector;
530 size_t count;
532 int ret;
534 struct {
535 struct tevent_immediate *im;
536 } trans;
538 struct {
539 int val;
540 const char *location;
541 } error;
544 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state *state)
546 struct tstream_cli_np *cli_nps =
547 tstream_context_data(state->stream,
548 struct tstream_cli_np);
550 cli_nps->trans.read_req = NULL;
552 return 0;
555 static void tstream_cli_np_readv_read_next(struct tevent_req *req);
557 static struct tevent_req *tstream_cli_np_readv_send(TALLOC_CTX *mem_ctx,
558 struct tevent_context *ev,
559 struct tstream_context *stream,
560 struct iovec *vector,
561 size_t count)
563 struct tevent_req *req;
564 struct tstream_cli_np_readv_state *state;
565 struct tstream_cli_np *cli_nps =
566 tstream_context_data(stream, struct tstream_cli_np);
568 req = tevent_req_create(mem_ctx, &state,
569 struct tstream_cli_np_readv_state);
570 if (!req) {
571 return NULL;
573 state->stream = stream;
574 state->ev = ev;
575 state->ret = 0;
577 talloc_set_destructor(state, tstream_cli_np_readv_state_destructor);
579 if (!cli_state_is_connected(cli_nps->cli)) {
580 tevent_req_error(req, ENOTCONN);
581 return tevent_req_post(req, ev);
585 * we make a copy of the vector so we can change the structure
587 state->vector = talloc_array(state, struct iovec, count);
588 if (tevent_req_nomem(state->vector, req)) {
589 return tevent_req_post(req, ev);
591 memcpy(state->vector, vector, sizeof(struct iovec) * count);
592 state->count = count;
594 tstream_cli_np_readv_read_next(req);
595 if (!tevent_req_is_in_progress(req)) {
596 return tevent_req_post(req, ev);
599 return req;
602 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq);
604 static void tstream_cli_np_readv_read_next(struct tevent_req *req)
606 struct tstream_cli_np_readv_state *state =
607 tevent_req_data(req,
608 struct tstream_cli_np_readv_state);
609 struct tstream_cli_np *cli_nps =
610 tstream_context_data(state->stream,
611 struct tstream_cli_np);
612 struct tevent_req *subreq;
615 * copy the pending buffer first
617 while (cli_nps->read.left > 0 && state->count > 0) {
618 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
619 size_t len = MIN(cli_nps->read.left, state->vector[0].iov_len);
621 memcpy(base, cli_nps->read.buf + cli_nps->read.ofs, len);
623 base += len;
624 state->vector[0].iov_base = base;
625 state->vector[0].iov_len -= len;
627 cli_nps->read.ofs += len;
628 cli_nps->read.left -= len;
630 if (state->vector[0].iov_len == 0) {
631 state->vector += 1;
632 state->count -= 1;
635 state->ret += len;
638 if (state->count == 0) {
639 tevent_req_done(req);
640 return;
643 if (cli_nps->trans.active) {
644 cli_nps->trans.active = false;
645 cli_nps->trans.read_req = req;
646 return;
649 if (cli_nps->trans.write_req) {
650 cli_nps->trans.read_req = req;
651 tstream_cli_np_readv_trans_start(req);
652 return;
655 subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
656 cli_nps->fnum, 0, TSTREAM_CLI_NP_BUF_SIZE);
657 if (tevent_req_nomem(subreq, req)) {
658 return;
660 tevent_req_set_callback(subreq,
661 tstream_cli_np_readv_read_done,
662 req);
665 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq);
667 static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
669 struct tstream_cli_np_readv_state *state =
670 tevent_req_data(req,
671 struct tstream_cli_np_readv_state);
672 struct tstream_cli_np *cli_nps =
673 tstream_context_data(state->stream,
674 struct tstream_cli_np);
675 struct tevent_req *subreq;
677 state->trans.im = tevent_create_immediate(state);
678 if (tevent_req_nomem(state->trans.im, req)) {
679 return;
682 subreq = cli_trans_send(state, state->ev,
683 cli_nps->cli,
684 SMBtrans,
685 "\\PIPE\\",
686 0, 0, 0,
687 cli_nps->trans.setup, 2,
689 NULL, 0, 0,
690 cli_nps->write.buf,
691 cli_nps->write.ofs,
692 TSTREAM_CLI_NP_BUF_SIZE);
693 if (tevent_req_nomem(subreq, req)) {
694 return;
696 tevent_req_set_callback(subreq,
697 tstream_cli_np_readv_trans_done,
698 req);
701 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
702 int error,
703 const char *location);
704 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
705 struct tevent_immediate *im,
706 void *private_data);
708 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
710 struct tevent_req *req =
711 tevent_req_callback_data(subreq, struct tevent_req);
712 struct tstream_cli_np_readv_state *state =
713 tevent_req_data(req, struct tstream_cli_np_readv_state);
714 struct tstream_cli_np *cli_nps =
715 tstream_context_data(state->stream, struct tstream_cli_np);
716 uint8_t *rcvbuf;
717 uint32_t received;
718 NTSTATUS status;
720 status = cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
721 NULL, 0, NULL,
722 &rcvbuf, 0, &received);
723 TALLOC_FREE(subreq);
724 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
725 status = NT_STATUS_OK;
727 if (!NT_STATUS_IS_OK(status)) {
728 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
729 return;
732 if (received > TSTREAM_CLI_NP_BUF_SIZE) {
733 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
734 return;
737 if (received == 0) {
738 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
739 return;
742 cli_nps->read.ofs = 0;
743 cli_nps->read.left = received;
744 memcpy(cli_nps->read.buf, rcvbuf, received);
745 TALLOC_FREE(rcvbuf);
747 if (cli_nps->trans.write_req == NULL) {
748 tstream_cli_np_readv_read_next(req);
749 return;
752 tevent_schedule_immediate(state->trans.im, state->ev,
753 tstream_cli_np_readv_trans_next, req);
755 tevent_req_done(cli_nps->trans.write_req);
758 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
759 struct tevent_immediate *im,
760 void *private_data)
762 struct tevent_req *req =
763 talloc_get_type_abort(private_data,
764 struct tevent_req);
766 tstream_cli_np_readv_read_next(req);
769 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
771 struct tevent_req *req =
772 tevent_req_callback_data(subreq, struct tevent_req);
773 struct tstream_cli_np_readv_state *state =
774 tevent_req_data(req, struct tstream_cli_np_readv_state);
775 struct tstream_cli_np *cli_nps =
776 tstream_context_data(state->stream, struct tstream_cli_np);
777 uint8_t *rcvbuf;
778 ssize_t received;
779 NTSTATUS status;
782 * We must free subreq in this function as there is
783 * a timer event attached to it.
786 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
788 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
789 * child of that.
791 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
793 * NT_STATUS_BUFFER_TOO_SMALL means that there's
794 * more data to read when the named pipe is used
795 * in message mode (which is the case here).
797 * But we hide this from the caller.
799 status = NT_STATUS_OK;
801 if (!NT_STATUS_IS_OK(status)) {
802 TALLOC_FREE(subreq);
803 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
804 return;
807 if (received > TSTREAM_CLI_NP_BUF_SIZE) {
808 TALLOC_FREE(subreq);
809 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
810 return;
813 if (received == 0) {
814 TALLOC_FREE(subreq);
815 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
816 return;
819 cli_nps->read.ofs = 0;
820 cli_nps->read.left = received;
821 memcpy(cli_nps->read.buf, rcvbuf, received);
822 TALLOC_FREE(subreq);
824 tstream_cli_np_readv_read_next(req);
827 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq);
829 static void tstream_cli_np_readv_error(struct tevent_req *req);
831 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
832 int error,
833 const char *location)
835 struct tstream_cli_np_readv_state *state =
836 tevent_req_data(req,
837 struct tstream_cli_np_readv_state);
838 struct tstream_cli_np *cli_nps =
839 tstream_context_data(state->stream,
840 struct tstream_cli_np);
841 struct tevent_req *subreq;
843 state->error.val = error;
844 state->error.location = location;
846 if (!cli_state_is_connected(cli_nps->cli)) {
847 /* return the original error */
848 tstream_cli_np_readv_error(req);
849 return;
852 subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
853 if (subreq == NULL) {
854 /* return the original error */
855 tstream_cli_np_readv_error(req);
856 return;
858 tevent_req_set_callback(subreq,
859 tstream_cli_np_readv_disconnect_done,
860 req);
863 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq)
865 struct tevent_req *req =
866 tevent_req_callback_data(subreq, struct tevent_req);
867 struct tstream_cli_np_readv_state *state =
868 tevent_req_data(req, struct tstream_cli_np_readv_state);
869 struct tstream_cli_np *cli_nps =
870 tstream_context_data(state->stream, struct tstream_cli_np);
871 NTSTATUS status;
873 status = cli_close_recv(subreq);
874 TALLOC_FREE(subreq);
876 cli_nps->cli = NULL;
878 tstream_cli_np_readv_error(req);
881 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
882 struct tevent_immediate *im,
883 void *private_data);
885 static void tstream_cli_np_readv_error(struct tevent_req *req)
887 struct tstream_cli_np_readv_state *state =
888 tevent_req_data(req,
889 struct tstream_cli_np_readv_state);
890 struct tstream_cli_np *cli_nps =
891 tstream_context_data(state->stream,
892 struct tstream_cli_np);
894 if (cli_nps->trans.write_req == NULL) {
895 /* return the original error */
896 _tevent_req_error(req, state->error.val, state->error.location);
897 return;
900 if (state->trans.im == NULL) {
901 /* return the original error */
902 _tevent_req_error(req, state->error.val, state->error.location);
903 return;
906 tevent_schedule_immediate(state->trans.im, state->ev,
907 tstream_cli_np_readv_error_trigger, req);
909 /* return the original error for writev */
910 _tevent_req_error(cli_nps->trans.write_req,
911 state->error.val, state->error.location);
914 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
915 struct tevent_immediate *im,
916 void *private_data)
918 struct tevent_req *req =
919 talloc_get_type_abort(private_data,
920 struct tevent_req);
921 struct tstream_cli_np_readv_state *state =
922 tevent_req_data(req,
923 struct tstream_cli_np_readv_state);
925 /* return the original error */
926 _tevent_req_error(req, state->error.val, state->error.location);
929 static int tstream_cli_np_readv_recv(struct tevent_req *req,
930 int *perrno)
932 struct tstream_cli_np_readv_state *state =
933 tevent_req_data(req, struct tstream_cli_np_readv_state);
934 int ret;
936 ret = tsocket_simple_int_recv(req, perrno);
937 if (ret == 0) {
938 ret = state->ret;
941 tevent_req_received(req);
942 return ret;
945 struct tstream_cli_np_disconnect_state {
946 struct tstream_context *stream;
949 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq);
951 static struct tevent_req *tstream_cli_np_disconnect_send(TALLOC_CTX *mem_ctx,
952 struct tevent_context *ev,
953 struct tstream_context *stream)
955 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
956 struct tstream_cli_np);
957 struct tevent_req *req;
958 struct tstream_cli_np_disconnect_state *state;
959 struct tevent_req *subreq;
961 req = tevent_req_create(mem_ctx, &state,
962 struct tstream_cli_np_disconnect_state);
963 if (req == NULL) {
964 return NULL;
967 state->stream = stream;
969 if (!cli_state_is_connected(cli_nps->cli)) {
970 tevent_req_error(req, ENOTCONN);
971 return tevent_req_post(req, ev);
974 subreq = cli_close_send(state, ev, cli_nps->cli, cli_nps->fnum);
975 if (tevent_req_nomem(subreq, req)) {
976 return tevent_req_post(req, ev);
978 tevent_req_set_callback(subreq, tstream_cli_np_disconnect_done, req);
980 return req;
983 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq)
985 struct tevent_req *req = tevent_req_callback_data(subreq,
986 struct tevent_req);
987 struct tstream_cli_np_disconnect_state *state =
988 tevent_req_data(req, struct tstream_cli_np_disconnect_state);
989 struct tstream_cli_np *cli_nps =
990 tstream_context_data(state->stream, struct tstream_cli_np);
991 NTSTATUS status;
993 status = cli_close_recv(subreq);
994 TALLOC_FREE(subreq);
995 if (!NT_STATUS_IS_OK(status)) {
996 tevent_req_error(req, EIO);
997 return;
1000 cli_nps->cli = NULL;
1002 tevent_req_done(req);
1005 static int tstream_cli_np_disconnect_recv(struct tevent_req *req,
1006 int *perrno)
1008 int ret;
1010 ret = tsocket_simple_int_recv(req, perrno);
1012 tevent_req_received(req);
1013 return ret;
1016 static const struct tstream_context_ops tstream_cli_np_ops = {
1017 .name = "cli_np",
1019 .pending_bytes = tstream_cli_np_pending_bytes,
1021 .readv_send = tstream_cli_np_readv_send,
1022 .readv_recv = tstream_cli_np_readv_recv,
1024 .writev_send = tstream_cli_np_writev_send,
1025 .writev_recv = tstream_cli_np_writev_recv,
1027 .disconnect_send = tstream_cli_np_disconnect_send,
1028 .disconnect_recv = tstream_cli_np_disconnect_recv,
1031 NTSTATUS _tstream_cli_np_existing(TALLOC_CTX *mem_ctx,
1032 struct cli_state *cli,
1033 uint16_t fnum,
1034 struct tstream_context **_stream,
1035 const char *location)
1037 struct tstream_context *stream;
1038 struct tstream_cli_np *cli_nps;
1040 stream = tstream_context_create(mem_ctx,
1041 &tstream_cli_np_ops,
1042 &cli_nps,
1043 struct tstream_cli_np,
1044 location);
1045 if (!stream) {
1046 return NT_STATUS_NO_MEMORY;
1048 ZERO_STRUCTP(cli_nps);
1050 cli_nps->cli = cli;
1051 cli_nps->fnum = fnum;
1053 *_stream = stream;
1054 return NT_STATUS_OK;