s3:cli_np_tstream: include smbXcli_base.h, because we'll use functions from there
[Samba/gebeck_regimport.git] / source3 / libsmb / cli_np_tstream.c
blob4a2b1976d295b904fb6a9690458bb8a54978a6cb
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "libsmb/smb2cli.h"
24 #include "../libcli/smb/smbXcli_base.h"
25 #include "../lib/util/tevent_ntstatus.h"
26 #include "../lib/tsocket/tsocket.h"
27 #include "../lib/tsocket/tsocket_internal.h"
28 #include "cli_np_tstream.h"
30 static const struct tstream_context_ops tstream_cli_np_ops;
33 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
34 * This is fits into the max_xmit negotiated at the SMB layer.
36 * On the sending side they may use SMBtranss if the request does not
37 * fit into a single SMBtrans call.
39 * Windows uses 1024 as max data size of a SMBtrans request and then
40 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
41 * via a SMBreadX.
43 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
44 * request to get the whole fragment at once (like samba 3.5.x and below did.
46 * It is important that we use do SMBwriteX with the size of a full fragment,
47 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
48 * from NT4 servers. (See bug #8195)
50 #define TSTREAM_CLI_NP_MAX_BUF_SIZE 4280
52 struct tstream_cli_np {
53 struct cli_state *cli;
54 const char *npipe;
55 bool is_smb1;
56 uint16_t fnum;
57 uint64_t fid_persistent;
58 uint64_t fid_volatile;
59 unsigned int default_timeout;
61 struct {
62 bool active;
63 struct tevent_req *read_req;
64 struct tevent_req *write_req;
65 uint16_t setup[2];
66 } trans;
68 struct {
69 off_t ofs;
70 size_t left;
71 uint8_t *buf;
72 } read, write;
75 static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
77 NTSTATUS status;
79 if (!cli_state_is_connected(cli_nps->cli)) {
80 return 0;
84 * TODO: do not use a sync call with a destructor!!!
86 * This only happens, if a caller does talloc_free(),
87 * while the everything was still ok.
89 * If we get an unexpected failure within a normal
90 * operation, we already do an async cli_close_send()/_recv().
92 * Once we've fixed all callers to call
93 * tstream_disconnect_send()/_recv(), this will
94 * never be called.
96 if (cli_nps->is_smb1) {
97 status = cli_close(cli_nps->cli, cli_nps->fnum);
98 } else {
99 status = smb2cli_close(cli_nps->cli->conn,
100 cli_nps->cli->timeout,
101 cli_nps->cli->smb2.session,
102 cli_nps->cli->smb2.tid, 0,
103 cli_nps->fid_persistent,
104 cli_nps->fid_volatile);
106 if (!NT_STATUS_IS_OK(status)) {
107 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
108 "failed on pipe %s. Error was %s\n",
109 cli_nps->npipe, nt_errstr(status)));
112 * We can't do much on failure
114 return 0;
117 struct tstream_cli_np_open_state {
118 struct cli_state *cli;
119 bool is_smb1;
120 uint16_t fnum;
121 uint64_t fid_persistent;
122 uint64_t fid_volatile;
123 const char *npipe;
126 static void tstream_cli_np_open_done(struct tevent_req *subreq);
128 struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
129 struct tevent_context *ev,
130 struct cli_state *cli,
131 const char *npipe)
133 struct tevent_req *req;
134 struct tstream_cli_np_open_state *state;
135 struct tevent_req *subreq;
137 req = tevent_req_create(mem_ctx, &state,
138 struct tstream_cli_np_open_state);
139 if (!req) {
140 return NULL;
142 state->cli = cli;
144 state->npipe = talloc_strdup(state, npipe);
145 if (tevent_req_nomem(state->npipe, req)) {
146 return tevent_req_post(req, ev);
149 if (cli_state_protocol(cli) < PROTOCOL_SMB2_02) {
150 state->is_smb1 = true;
153 if (state->is_smb1) {
154 subreq = cli_ntcreate_send(state, ev, cli,
155 npipe,
157 DESIRED_ACCESS_PIPE,
159 FILE_SHARE_READ|FILE_SHARE_WRITE,
160 FILE_OPEN,
163 } else {
164 subreq = smb2cli_create_send(state, ev, cli->conn,
165 cli->timeout, cli->smb2.session,
166 cli->smb2.tid,
167 npipe,
168 SMB2_OPLOCK_LEVEL_NONE,
169 SMB2_IMPERSONATION_IMPERSONATION,
170 DESIRED_ACCESS_PIPE,
171 0, /* file_attributes */
172 FILE_SHARE_READ|FILE_SHARE_WRITE,
173 FILE_OPEN,
174 0, /* create_options */
175 NULL); /* blobs */
177 if (tevent_req_nomem(subreq, req)) {
178 return tevent_req_post(req, ev);
180 tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
182 return req;
185 static void tstream_cli_np_open_done(struct tevent_req *subreq)
187 struct tevent_req *req =
188 tevent_req_callback_data(subreq, struct tevent_req);
189 struct tstream_cli_np_open_state *state =
190 tevent_req_data(req, struct tstream_cli_np_open_state);
191 NTSTATUS status;
193 if (state->is_smb1) {
194 status = cli_ntcreate_recv(subreq, &state->fnum);
195 } else {
196 status = smb2cli_create_recv(subreq,
197 &state->fid_persistent,
198 &state->fid_volatile);
200 TALLOC_FREE(subreq);
201 if (!NT_STATUS_IS_OK(status)) {
202 tevent_req_nterror(req, status);
203 return;
206 tevent_req_done(req);
209 NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
210 TALLOC_CTX *mem_ctx,
211 struct tstream_context **_stream,
212 const char *location)
214 struct tstream_cli_np_open_state *state =
215 tevent_req_data(req, struct tstream_cli_np_open_state);
216 struct tstream_context *stream;
217 struct tstream_cli_np *cli_nps;
218 NTSTATUS status;
220 if (tevent_req_is_nterror(req, &status)) {
221 tevent_req_received(req);
222 return status;
225 stream = tstream_context_create(mem_ctx,
226 &tstream_cli_np_ops,
227 &cli_nps,
228 struct tstream_cli_np,
229 location);
230 if (!stream) {
231 tevent_req_received(req);
232 return NT_STATUS_NO_MEMORY;
234 ZERO_STRUCTP(cli_nps);
236 cli_nps->cli = state->cli;
237 cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
238 cli_nps->is_smb1 = state->is_smb1;
239 cli_nps->fnum = state->fnum;
240 cli_nps->fid_persistent = state->fid_persistent;
241 cli_nps->fid_volatile = state->fid_volatile;
242 cli_nps->default_timeout = cli_set_timeout(state->cli, 0);
243 cli_set_timeout(state->cli, cli_nps->default_timeout);
245 talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
247 cli_nps->trans.active = false;
248 cli_nps->trans.read_req = NULL;
249 cli_nps->trans.write_req = NULL;
250 SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
251 SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
253 *_stream = stream;
254 tevent_req_received(req);
255 return NT_STATUS_OK;
258 static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
260 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
261 struct tstream_cli_np);
263 if (!cli_state_is_connected(cli_nps->cli)) {
264 errno = ENOTCONN;
265 return -1;
268 return cli_nps->read.left;
271 bool tstream_is_cli_np(struct tstream_context *stream)
273 struct tstream_cli_np *cli_nps =
274 talloc_get_type(_tstream_context_data(stream),
275 struct tstream_cli_np);
277 if (!cli_nps) {
278 return false;
281 return true;
284 NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
286 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
287 struct tstream_cli_np);
289 if (cli_nps->trans.read_req) {
290 return NT_STATUS_PIPE_BUSY;
293 if (cli_nps->trans.write_req) {
294 return NT_STATUS_PIPE_BUSY;
297 if (cli_nps->trans.active) {
298 return NT_STATUS_PIPE_BUSY;
301 cli_nps->trans.active = true;
303 return NT_STATUS_OK;
306 unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
307 unsigned int timeout)
309 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
310 struct tstream_cli_np);
312 if (!cli_state_is_connected(cli_nps->cli)) {
313 return cli_nps->default_timeout;
316 return cli_set_timeout(cli_nps->cli, timeout);
319 struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
321 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
322 struct tstream_cli_np);
324 return cli_nps->cli;
327 struct tstream_cli_np_writev_state {
328 struct tstream_context *stream;
329 struct tevent_context *ev;
331 struct iovec *vector;
332 size_t count;
334 int ret;
336 struct {
337 int val;
338 const char *location;
339 } error;
342 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
344 struct tstream_cli_np *cli_nps =
345 tstream_context_data(state->stream,
346 struct tstream_cli_np);
348 cli_nps->trans.write_req = NULL;
350 return 0;
353 static void tstream_cli_np_writev_write_next(struct tevent_req *req);
355 static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
356 struct tevent_context *ev,
357 struct tstream_context *stream,
358 const struct iovec *vector,
359 size_t count)
361 struct tevent_req *req;
362 struct tstream_cli_np_writev_state *state;
363 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
364 struct tstream_cli_np);
366 req = tevent_req_create(mem_ctx, &state,
367 struct tstream_cli_np_writev_state);
368 if (!req) {
369 return NULL;
371 state->stream = stream;
372 state->ev = ev;
373 state->ret = 0;
375 talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
377 if (!cli_state_is_connected(cli_nps->cli)) {
378 tevent_req_error(req, ENOTCONN);
379 return tevent_req_post(req, ev);
383 * we make a copy of the vector so we can change the structure
385 state->vector = talloc_array(state, struct iovec, count);
386 if (tevent_req_nomem(state->vector, req)) {
387 return tevent_req_post(req, ev);
389 memcpy(state->vector, vector, sizeof(struct iovec) * count);
390 state->count = count;
392 tstream_cli_np_writev_write_next(req);
393 if (!tevent_req_is_in_progress(req)) {
394 return tevent_req_post(req, ev);
397 return req;
400 static void tstream_cli_np_readv_trans_start(struct tevent_req *req);
401 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
403 static void tstream_cli_np_writev_write_next(struct tevent_req *req)
405 struct tstream_cli_np_writev_state *state =
406 tevent_req_data(req,
407 struct tstream_cli_np_writev_state);
408 struct tstream_cli_np *cli_nps =
409 tstream_context_data(state->stream,
410 struct tstream_cli_np);
411 struct tevent_req *subreq;
412 size_t i;
413 size_t left = 0;
415 for (i=0; i < state->count; i++) {
416 left += state->vector[i].iov_len;
419 if (left == 0) {
420 TALLOC_FREE(cli_nps->write.buf);
421 tevent_req_done(req);
422 return;
425 cli_nps->write.ofs = 0;
426 cli_nps->write.left = MIN(left, TSTREAM_CLI_NP_MAX_BUF_SIZE);
427 cli_nps->write.buf = talloc_realloc(cli_nps, cli_nps->write.buf,
428 uint8_t, cli_nps->write.left);
429 if (tevent_req_nomem(cli_nps->write.buf, req)) {
430 return;
434 * copy the pending buffer first
436 while (cli_nps->write.left > 0 && state->count > 0) {
437 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
438 size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
440 memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
442 base += len;
443 state->vector[0].iov_base = base;
444 state->vector[0].iov_len -= len;
446 cli_nps->write.ofs += len;
447 cli_nps->write.left -= len;
449 if (state->vector[0].iov_len == 0) {
450 state->vector += 1;
451 state->count -= 1;
454 state->ret += len;
457 if (cli_nps->trans.active && state->count == 0) {
458 cli_nps->trans.active = false;
459 cli_nps->trans.write_req = req;
460 return;
463 if (cli_nps->trans.read_req && state->count == 0) {
464 cli_nps->trans.write_req = req;
465 tstream_cli_np_readv_trans_start(cli_nps->trans.read_req);
466 return;
469 if (cli_nps->is_smb1) {
470 subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
471 cli_nps->fnum,
472 8, /* 8 means message mode. */
473 cli_nps->write.buf,
474 0, /* offset */
475 cli_nps->write.ofs); /* size */
476 } else {
477 subreq = smb2cli_write_send(state, state->ev,
478 cli_nps->cli->conn,
479 cli_nps->cli->timeout,
480 cli_nps->cli->smb2.session,
481 cli_nps->cli->smb2.tid,
482 cli_nps->write.ofs, /* length */
483 0, /* offset */
484 cli_nps->fid_persistent,
485 cli_nps->fid_volatile,
486 0, /* remaining_bytes */
487 0, /* flags */
488 cli_nps->write.buf);
490 if (tevent_req_nomem(subreq, req)) {
491 return;
493 tevent_req_set_callback(subreq,
494 tstream_cli_np_writev_write_done,
495 req);
498 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
499 int error,
500 const char *location);
502 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
504 struct tevent_req *req =
505 tevent_req_callback_data(subreq, struct tevent_req);
506 struct tstream_cli_np_writev_state *state =
507 tevent_req_data(req, struct tstream_cli_np_writev_state);
508 struct tstream_cli_np *cli_nps =
509 tstream_context_data(state->stream,
510 struct tstream_cli_np);
511 size_t written;
512 NTSTATUS status;
514 if (cli_nps->is_smb1) {
515 status = cli_write_andx_recv(subreq, &written);
516 } else {
517 status = smb2cli_write_recv(subreq);
518 written = cli_nps->write.ofs; // TODO: get the value from the server
520 TALLOC_FREE(subreq);
521 if (!NT_STATUS_IS_OK(status)) {
522 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
523 return;
526 if (written != cli_nps->write.ofs) {
527 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
528 return;
531 tstream_cli_np_writev_write_next(req);
534 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
536 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
537 int error,
538 const char *location)
540 struct tstream_cli_np_writev_state *state =
541 tevent_req_data(req,
542 struct tstream_cli_np_writev_state);
543 struct tstream_cli_np *cli_nps =
544 tstream_context_data(state->stream,
545 struct tstream_cli_np);
546 struct tevent_req *subreq;
548 state->error.val = error;
549 state->error.location = location;
551 if (!cli_state_is_connected(cli_nps->cli)) {
552 /* return the original error */
553 _tevent_req_error(req, state->error.val, state->error.location);
554 return;
557 if (cli_nps->is_smb1) {
558 subreq = cli_close_send(state, state->ev, cli_nps->cli,
559 cli_nps->fnum);
560 } else {
561 subreq = smb2cli_close_send(state, state->ev,
562 cli_nps->cli->conn,
563 cli_nps->cli->timeout,
564 cli_nps->cli->smb2.session,
565 cli_nps->cli->smb2.tid,
566 0, /* flags */
567 cli_nps->fid_persistent,
568 cli_nps->fid_volatile);
570 if (subreq == NULL) {
571 /* return the original error */
572 _tevent_req_error(req, state->error.val, state->error.location);
573 return;
575 tevent_req_set_callback(subreq,
576 tstream_cli_np_writev_disconnect_done,
577 req);
580 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
582 struct tevent_req *req =
583 tevent_req_callback_data(subreq, struct tevent_req);
584 struct tstream_cli_np_writev_state *state =
585 tevent_req_data(req, struct tstream_cli_np_writev_state);
586 struct tstream_cli_np *cli_nps =
587 tstream_context_data(state->stream, struct tstream_cli_np);
589 if (cli_nps->is_smb1) {
590 cli_close_recv(subreq);
591 } else {
592 smb2cli_close_recv(subreq);
594 TALLOC_FREE(subreq);
596 cli_nps->cli = NULL;
598 /* return the original error */
599 _tevent_req_error(req, state->error.val, state->error.location);
602 static int tstream_cli_np_writev_recv(struct tevent_req *req,
603 int *perrno)
605 struct tstream_cli_np_writev_state *state =
606 tevent_req_data(req,
607 struct tstream_cli_np_writev_state);
608 int ret;
610 ret = tsocket_simple_int_recv(req, perrno);
611 if (ret == 0) {
612 ret = state->ret;
615 tevent_req_received(req);
616 return ret;
619 struct tstream_cli_np_readv_state {
620 struct tstream_context *stream;
621 struct tevent_context *ev;
623 struct iovec *vector;
624 size_t count;
626 int ret;
628 struct {
629 struct tevent_immediate *im;
630 } trans;
632 struct {
633 int val;
634 const char *location;
635 } error;
638 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state *state)
640 struct tstream_cli_np *cli_nps =
641 tstream_context_data(state->stream,
642 struct tstream_cli_np);
644 cli_nps->trans.read_req = NULL;
646 return 0;
649 static void tstream_cli_np_readv_read_next(struct tevent_req *req);
651 static struct tevent_req *tstream_cli_np_readv_send(TALLOC_CTX *mem_ctx,
652 struct tevent_context *ev,
653 struct tstream_context *stream,
654 struct iovec *vector,
655 size_t count)
657 struct tevent_req *req;
658 struct tstream_cli_np_readv_state *state;
659 struct tstream_cli_np *cli_nps =
660 tstream_context_data(stream, struct tstream_cli_np);
662 req = tevent_req_create(mem_ctx, &state,
663 struct tstream_cli_np_readv_state);
664 if (!req) {
665 return NULL;
667 state->stream = stream;
668 state->ev = ev;
669 state->ret = 0;
671 talloc_set_destructor(state, tstream_cli_np_readv_state_destructor);
673 if (!cli_state_is_connected(cli_nps->cli)) {
674 tevent_req_error(req, ENOTCONN);
675 return tevent_req_post(req, ev);
679 * we make a copy of the vector so we can change the structure
681 state->vector = talloc_array(state, struct iovec, count);
682 if (tevent_req_nomem(state->vector, req)) {
683 return tevent_req_post(req, ev);
685 memcpy(state->vector, vector, sizeof(struct iovec) * count);
686 state->count = count;
688 tstream_cli_np_readv_read_next(req);
689 if (!tevent_req_is_in_progress(req)) {
690 return tevent_req_post(req, ev);
693 return req;
696 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq);
698 static void tstream_cli_np_readv_read_next(struct tevent_req *req)
700 struct tstream_cli_np_readv_state *state =
701 tevent_req_data(req,
702 struct tstream_cli_np_readv_state);
703 struct tstream_cli_np *cli_nps =
704 tstream_context_data(state->stream,
705 struct tstream_cli_np);
706 struct tevent_req *subreq;
709 * copy the pending buffer first
711 while (cli_nps->read.left > 0 && state->count > 0) {
712 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
713 size_t len = MIN(cli_nps->read.left, state->vector[0].iov_len);
715 memcpy(base, cli_nps->read.buf + cli_nps->read.ofs, len);
717 base += len;
718 state->vector[0].iov_base = base;
719 state->vector[0].iov_len -= len;
721 cli_nps->read.ofs += len;
722 cli_nps->read.left -= len;
724 if (state->vector[0].iov_len == 0) {
725 state->vector += 1;
726 state->count -= 1;
729 state->ret += len;
732 if (cli_nps->read.left == 0) {
733 TALLOC_FREE(cli_nps->read.buf);
736 if (state->count == 0) {
737 tevent_req_done(req);
738 return;
741 if (cli_nps->trans.active) {
742 cli_nps->trans.active = false;
743 cli_nps->trans.read_req = req;
744 return;
747 if (cli_nps->trans.write_req) {
748 cli_nps->trans.read_req = req;
749 tstream_cli_np_readv_trans_start(req);
750 return;
753 if (cli_nps->is_smb1) {
754 subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
755 cli_nps->fnum,
756 0, /* offset */
757 TSTREAM_CLI_NP_MAX_BUF_SIZE);
758 } else {
759 subreq = smb2cli_read_send(state, state->ev,
760 cli_nps->cli->conn,
761 cli_nps->cli->timeout,
762 cli_nps->cli->smb2.session,
763 cli_nps->cli->smb2.tid,
764 TSTREAM_CLI_NP_MAX_BUF_SIZE, /* length */
765 0, /* offset */
766 cli_nps->fid_persistent,
767 cli_nps->fid_volatile,
768 0, /* minimum_count */
769 0); /* remaining_bytes */
771 if (tevent_req_nomem(subreq, req)) {
772 return;
774 tevent_req_set_callback(subreq,
775 tstream_cli_np_readv_read_done,
776 req);
779 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq);
781 static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
783 struct tstream_cli_np_readv_state *state =
784 tevent_req_data(req,
785 struct tstream_cli_np_readv_state);
786 struct tstream_cli_np *cli_nps =
787 tstream_context_data(state->stream,
788 struct tstream_cli_np);
789 struct tevent_req *subreq;
791 state->trans.im = tevent_create_immediate(state);
792 if (tevent_req_nomem(state->trans.im, req)) {
793 return;
796 if (cli_nps->is_smb1) {
797 subreq = cli_trans_send(state, state->ev,
798 cli_nps->cli,
799 SMBtrans,
800 "\\PIPE\\",
801 0, 0, 0,
802 cli_nps->trans.setup, 2,
804 NULL, 0, 0,
805 cli_nps->write.buf,
806 cli_nps->write.ofs,
807 TSTREAM_CLI_NP_MAX_BUF_SIZE);
808 } else {
809 DATA_BLOB in_input_buffer = data_blob_null;
810 DATA_BLOB in_output_buffer = data_blob_null;
812 in_input_buffer = data_blob_const(cli_nps->write.buf,
813 cli_nps->write.ofs);
815 subreq = smb2cli_ioctl_send(state, state->ev,
816 cli_nps->cli->conn,
817 cli_nps->cli->timeout,
818 cli_nps->cli->smb2.session,
819 cli_nps->cli->smb2.tid,
820 cli_nps->fid_persistent,
821 cli_nps->fid_volatile,
822 FSCTL_NAMED_PIPE_READ_WRITE,
823 0, /* in_max_input_length */
824 &in_input_buffer,
825 /* in_max_output_length */
826 TSTREAM_CLI_NP_MAX_BUF_SIZE,
827 &in_output_buffer,
828 SMB2_IOCTL_FLAG_IS_FSCTL);
830 if (tevent_req_nomem(subreq, req)) {
831 return;
833 tevent_req_set_callback(subreq,
834 tstream_cli_np_readv_trans_done,
835 req);
838 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
839 int error,
840 const char *location);
841 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
842 struct tevent_immediate *im,
843 void *private_data);
845 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
847 struct tevent_req *req =
848 tevent_req_callback_data(subreq, struct tevent_req);
849 struct tstream_cli_np_readv_state *state =
850 tevent_req_data(req, struct tstream_cli_np_readv_state);
851 struct tstream_cli_np *cli_nps =
852 tstream_context_data(state->stream, struct tstream_cli_np);
853 uint8_t *rcvbuf;
854 uint32_t received;
855 NTSTATUS status;
857 if (cli_nps->is_smb1) {
858 status = cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
859 NULL, 0, NULL,
860 &rcvbuf, 0, &received);
861 } else {
862 DATA_BLOB out_input_buffer = data_blob_null;
863 DATA_BLOB out_output_buffer = data_blob_null;
865 status = smb2cli_ioctl_recv(subreq, state,
866 &out_input_buffer,
867 &out_output_buffer);
869 /* Note that rcvbuf is not a talloc pointer here */
870 rcvbuf = out_output_buffer.data;
871 received = out_output_buffer.length;
873 TALLOC_FREE(subreq);
874 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
875 status = NT_STATUS_OK;
877 if (!NT_STATUS_IS_OK(status)) {
878 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
879 return;
882 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
883 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
884 return;
887 if (received == 0) {
888 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
889 return;
892 cli_nps->read.ofs = 0;
893 cli_nps->read.left = received;
894 cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
895 if (cli_nps->read.buf == NULL) {
896 TALLOC_FREE(subreq);
897 tevent_req_nomem(cli_nps->read.buf, req);
898 return;
900 memcpy(cli_nps->read.buf, rcvbuf, received);
902 if (cli_nps->trans.write_req == NULL) {
903 tstream_cli_np_readv_read_next(req);
904 return;
907 tevent_schedule_immediate(state->trans.im, state->ev,
908 tstream_cli_np_readv_trans_next, req);
910 tevent_req_done(cli_nps->trans.write_req);
913 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
914 struct tevent_immediate *im,
915 void *private_data)
917 struct tevent_req *req =
918 talloc_get_type_abort(private_data,
919 struct tevent_req);
921 tstream_cli_np_readv_read_next(req);
924 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
926 struct tevent_req *req =
927 tevent_req_callback_data(subreq, struct tevent_req);
928 struct tstream_cli_np_readv_state *state =
929 tevent_req_data(req, struct tstream_cli_np_readv_state);
930 struct tstream_cli_np *cli_nps =
931 tstream_context_data(state->stream, struct tstream_cli_np);
932 uint8_t *rcvbuf;
933 ssize_t received;
934 NTSTATUS status;
937 * We must free subreq in this function as there is
938 * a timer event attached to it.
941 if (cli_nps->is_smb1) {
942 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
943 } else {
944 uint32_t data_length = 0;
945 status = smb2cli_read_recv(subreq, state, &rcvbuf, &data_length);
946 received = data_length;
949 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
950 * child of that.
952 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
954 * NT_STATUS_BUFFER_TOO_SMALL means that there's
955 * more data to read when the named pipe is used
956 * in message mode (which is the case here).
958 * But we hide this from the caller.
960 status = NT_STATUS_OK;
962 if (!NT_STATUS_IS_OK(status)) {
963 TALLOC_FREE(subreq);
964 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
965 return;
968 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
969 TALLOC_FREE(subreq);
970 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
971 return;
974 if (received == 0) {
975 TALLOC_FREE(subreq);
976 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
977 return;
980 cli_nps->read.ofs = 0;
981 cli_nps->read.left = received;
982 cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
983 if (cli_nps->read.buf == NULL) {
984 TALLOC_FREE(subreq);
985 tevent_req_nomem(cli_nps->read.buf, req);
986 return;
988 memcpy(cli_nps->read.buf, rcvbuf, received);
989 TALLOC_FREE(subreq);
991 tstream_cli_np_readv_read_next(req);
994 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq);
996 static void tstream_cli_np_readv_error(struct tevent_req *req);
998 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
999 int error,
1000 const char *location)
1002 struct tstream_cli_np_readv_state *state =
1003 tevent_req_data(req,
1004 struct tstream_cli_np_readv_state);
1005 struct tstream_cli_np *cli_nps =
1006 tstream_context_data(state->stream,
1007 struct tstream_cli_np);
1008 struct tevent_req *subreq;
1010 state->error.val = error;
1011 state->error.location = location;
1013 if (!cli_state_is_connected(cli_nps->cli)) {
1014 /* return the original error */
1015 tstream_cli_np_readv_error(req);
1016 return;
1019 if (cli_nps->is_smb1) {
1020 subreq = cli_close_send(state, state->ev, cli_nps->cli,
1021 cli_nps->fnum);
1022 } else {
1023 subreq = smb2cli_close_send(state, state->ev,
1024 cli_nps->cli->conn,
1025 cli_nps->cli->timeout,
1026 cli_nps->cli->smb2.session,
1027 cli_nps->cli->smb2.tid,
1028 0, /* flags */
1029 cli_nps->fid_persistent,
1030 cli_nps->fid_volatile);
1032 if (subreq == NULL) {
1033 /* return the original error */
1034 tstream_cli_np_readv_error(req);
1035 return;
1037 tevent_req_set_callback(subreq,
1038 tstream_cli_np_readv_disconnect_done,
1039 req);
1042 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq)
1044 struct tevent_req *req =
1045 tevent_req_callback_data(subreq, struct tevent_req);
1046 struct tstream_cli_np_readv_state *state =
1047 tevent_req_data(req, struct tstream_cli_np_readv_state);
1048 struct tstream_cli_np *cli_nps =
1049 tstream_context_data(state->stream, struct tstream_cli_np);
1051 if (cli_nps->is_smb1) {
1052 cli_close_recv(subreq);
1053 } else {
1054 smb2cli_close_recv(subreq);
1056 TALLOC_FREE(subreq);
1058 cli_nps->cli = NULL;
1060 tstream_cli_np_readv_error(req);
1063 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
1064 struct tevent_immediate *im,
1065 void *private_data);
1067 static void tstream_cli_np_readv_error(struct tevent_req *req)
1069 struct tstream_cli_np_readv_state *state =
1070 tevent_req_data(req,
1071 struct tstream_cli_np_readv_state);
1072 struct tstream_cli_np *cli_nps =
1073 tstream_context_data(state->stream,
1074 struct tstream_cli_np);
1076 if (cli_nps->trans.write_req == NULL) {
1077 /* return the original error */
1078 _tevent_req_error(req, state->error.val, state->error.location);
1079 return;
1082 if (state->trans.im == NULL) {
1083 /* return the original error */
1084 _tevent_req_error(req, state->error.val, state->error.location);
1085 return;
1088 tevent_schedule_immediate(state->trans.im, state->ev,
1089 tstream_cli_np_readv_error_trigger, req);
1091 /* return the original error for writev */
1092 _tevent_req_error(cli_nps->trans.write_req,
1093 state->error.val, state->error.location);
1096 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
1097 struct tevent_immediate *im,
1098 void *private_data)
1100 struct tevent_req *req =
1101 talloc_get_type_abort(private_data,
1102 struct tevent_req);
1103 struct tstream_cli_np_readv_state *state =
1104 tevent_req_data(req,
1105 struct tstream_cli_np_readv_state);
1107 /* return the original error */
1108 _tevent_req_error(req, state->error.val, state->error.location);
1111 static int tstream_cli_np_readv_recv(struct tevent_req *req,
1112 int *perrno)
1114 struct tstream_cli_np_readv_state *state =
1115 tevent_req_data(req, struct tstream_cli_np_readv_state);
1116 int ret;
1118 ret = tsocket_simple_int_recv(req, perrno);
1119 if (ret == 0) {
1120 ret = state->ret;
1123 tevent_req_received(req);
1124 return ret;
1127 struct tstream_cli_np_disconnect_state {
1128 struct tstream_context *stream;
1131 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq);
1133 static struct tevent_req *tstream_cli_np_disconnect_send(TALLOC_CTX *mem_ctx,
1134 struct tevent_context *ev,
1135 struct tstream_context *stream)
1137 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
1138 struct tstream_cli_np);
1139 struct tevent_req *req;
1140 struct tstream_cli_np_disconnect_state *state;
1141 struct tevent_req *subreq;
1143 req = tevent_req_create(mem_ctx, &state,
1144 struct tstream_cli_np_disconnect_state);
1145 if (req == NULL) {
1146 return NULL;
1149 state->stream = stream;
1151 if (!cli_state_is_connected(cli_nps->cli)) {
1152 tevent_req_error(req, ENOTCONN);
1153 return tevent_req_post(req, ev);
1156 if (cli_nps->is_smb1) {
1157 subreq = cli_close_send(state, ev, cli_nps->cli,
1158 cli_nps->fnum);
1159 } else {
1160 subreq = smb2cli_close_send(state, ev, cli_nps->cli->conn,
1161 cli_nps->cli->timeout,
1162 cli_nps->cli->smb2.session,
1163 cli_nps->cli->smb2.tid,
1164 0, /* flags */
1165 cli_nps->fid_persistent,
1166 cli_nps->fid_volatile);
1168 if (tevent_req_nomem(subreq, req)) {
1169 return tevent_req_post(req, ev);
1171 tevent_req_set_callback(subreq, tstream_cli_np_disconnect_done, req);
1173 return req;
1176 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq)
1178 struct tevent_req *req = tevent_req_callback_data(subreq,
1179 struct tevent_req);
1180 struct tstream_cli_np_disconnect_state *state =
1181 tevent_req_data(req, struct tstream_cli_np_disconnect_state);
1182 struct tstream_cli_np *cli_nps =
1183 tstream_context_data(state->stream, struct tstream_cli_np);
1184 NTSTATUS status;
1186 if (cli_nps->is_smb1) {
1187 status = cli_close_recv(subreq);
1188 } else {
1189 status = smb2cli_close_recv(subreq);
1191 TALLOC_FREE(subreq);
1192 if (!NT_STATUS_IS_OK(status)) {
1193 tevent_req_error(req, EIO);
1194 return;
1197 cli_nps->cli = NULL;
1199 tevent_req_done(req);
1202 static int tstream_cli_np_disconnect_recv(struct tevent_req *req,
1203 int *perrno)
1205 int ret;
1207 ret = tsocket_simple_int_recv(req, perrno);
1209 tevent_req_received(req);
1210 return ret;
1213 static const struct tstream_context_ops tstream_cli_np_ops = {
1214 .name = "cli_np",
1216 .pending_bytes = tstream_cli_np_pending_bytes,
1218 .readv_send = tstream_cli_np_readv_send,
1219 .readv_recv = tstream_cli_np_readv_recv,
1221 .writev_send = tstream_cli_np_writev_send,
1222 .writev_recv = tstream_cli_np_writev_recv,
1224 .disconnect_send = tstream_cli_np_disconnect_send,
1225 .disconnect_recv = tstream_cli_np_disconnect_recv,