s3/libsmb: Generalise cli_state in smb2 create calls
[Samba/gebeck_regimport.git] / source3 / libsmb / cli_np_tstream.c
blob612a2c3fb2ccaac8460d3ec94123d8f78b541df4
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "libsmb/smb2cli.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/tsocket/tsocket.h"
26 #include "../lib/tsocket/tsocket_internal.h"
27 #include "cli_np_tstream.h"
29 static const struct tstream_context_ops tstream_cli_np_ops;
32 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
33 * This is fits into the max_xmit negotiated at the SMB layer.
35 * On the sending side they may use SMBtranss if the request does not
36 * fit into a single SMBtrans call.
38 * Windows uses 1024 as max data size of a SMBtrans request and then
39 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
40 * via a SMBreadX.
42 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
43 * request to get the whole fragment at once (like samba 3.5.x and below did.
45 * It is important that we use do SMBwriteX with the size of a full fragment,
46 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
47 * from NT4 servers. (See bug #8195)
49 #define TSTREAM_CLI_NP_MAX_BUF_SIZE 4280
51 struct tstream_cli_np {
52 struct cli_state *cli;
53 const char *npipe;
54 bool is_smb1;
55 uint16_t fnum;
56 uint64_t fid_persistent;
57 uint64_t fid_volatile;
58 unsigned int default_timeout;
60 struct {
61 bool active;
62 struct tevent_req *read_req;
63 struct tevent_req *write_req;
64 uint16_t setup[2];
65 } trans;
67 struct {
68 off_t ofs;
69 size_t left;
70 uint8_t *buf;
71 } read, write;
74 static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
76 NTSTATUS status;
78 if (!cli_state_is_connected(cli_nps->cli)) {
79 return 0;
83 * TODO: do not use a sync call with a destructor!!!
85 * This only happens, if a caller does talloc_free(),
86 * while the everything was still ok.
88 * If we get an unexpected failure within a normal
89 * operation, we already do an async cli_close_send()/_recv().
91 * Once we've fixed all callers to call
92 * tstream_disconnect_send()/_recv(), this will
93 * never be called.
95 if (cli_nps->is_smb1) {
96 status = cli_close(cli_nps->cli, cli_nps->fnum);
97 } else {
98 status = smb2cli_close(cli_nps->cli->conn,
99 cli_nps->cli->timeout,
100 cli_nps->cli->smb2.session,
101 cli_nps->cli->smb2.tid, 0,
102 cli_nps->fid_persistent,
103 cli_nps->fid_volatile);
105 if (!NT_STATUS_IS_OK(status)) {
106 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
107 "failed on pipe %s. Error was %s\n",
108 cli_nps->npipe, nt_errstr(status)));
111 * We can't do much on failure
113 return 0;
116 struct tstream_cli_np_open_state {
117 struct cli_state *cli;
118 bool is_smb1;
119 uint16_t fnum;
120 uint64_t fid_persistent;
121 uint64_t fid_volatile;
122 const char *npipe;
125 static void tstream_cli_np_open_done(struct tevent_req *subreq);
127 struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
128 struct tevent_context *ev,
129 struct cli_state *cli,
130 const char *npipe)
132 struct tevent_req *req;
133 struct tstream_cli_np_open_state *state;
134 struct tevent_req *subreq;
136 req = tevent_req_create(mem_ctx, &state,
137 struct tstream_cli_np_open_state);
138 if (!req) {
139 return NULL;
141 state->cli = cli;
143 state->npipe = talloc_strdup(state, npipe);
144 if (tevent_req_nomem(state->npipe, req)) {
145 return tevent_req_post(req, ev);
148 if (cli_state_protocol(cli) < PROTOCOL_SMB2_02) {
149 state->is_smb1 = true;
152 if (state->is_smb1) {
153 subreq = cli_ntcreate_send(state, ev, cli,
154 npipe,
156 DESIRED_ACCESS_PIPE,
158 FILE_SHARE_READ|FILE_SHARE_WRITE,
159 FILE_OPEN,
162 } else {
163 subreq = smb2cli_create_send(state, ev, cli->conn,
164 cli->timeout, cli->smb2.session,
165 cli->smb2.tid,
166 npipe,
167 SMB2_OPLOCK_LEVEL_NONE,
168 SMB2_IMPERSONATION_IMPERSONATION,
169 DESIRED_ACCESS_PIPE,
170 0, /* file_attributes */
171 FILE_SHARE_READ|FILE_SHARE_WRITE,
172 FILE_OPEN,
173 0, /* create_options */
174 NULL); /* blobs */
176 if (tevent_req_nomem(subreq, req)) {
177 return tevent_req_post(req, ev);
179 tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
181 return req;
184 static void tstream_cli_np_open_done(struct tevent_req *subreq)
186 struct tevent_req *req =
187 tevent_req_callback_data(subreq, struct tevent_req);
188 struct tstream_cli_np_open_state *state =
189 tevent_req_data(req, struct tstream_cli_np_open_state);
190 NTSTATUS status;
192 if (state->is_smb1) {
193 status = cli_ntcreate_recv(subreq, &state->fnum);
194 } else {
195 status = smb2cli_create_recv(subreq,
196 &state->fid_persistent,
197 &state->fid_volatile);
199 TALLOC_FREE(subreq);
200 if (!NT_STATUS_IS_OK(status)) {
201 tevent_req_nterror(req, status);
202 return;
205 tevent_req_done(req);
208 NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
209 TALLOC_CTX *mem_ctx,
210 struct tstream_context **_stream,
211 const char *location)
213 struct tstream_cli_np_open_state *state =
214 tevent_req_data(req, struct tstream_cli_np_open_state);
215 struct tstream_context *stream;
216 struct tstream_cli_np *cli_nps;
217 NTSTATUS status;
219 if (tevent_req_is_nterror(req, &status)) {
220 tevent_req_received(req);
221 return status;
224 stream = tstream_context_create(mem_ctx,
225 &tstream_cli_np_ops,
226 &cli_nps,
227 struct tstream_cli_np,
228 location);
229 if (!stream) {
230 tevent_req_received(req);
231 return NT_STATUS_NO_MEMORY;
233 ZERO_STRUCTP(cli_nps);
235 cli_nps->cli = state->cli;
236 cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
237 cli_nps->is_smb1 = state->is_smb1;
238 cli_nps->fnum = state->fnum;
239 cli_nps->fid_persistent = state->fid_persistent;
240 cli_nps->fid_volatile = state->fid_volatile;
241 cli_nps->default_timeout = cli_set_timeout(state->cli, 0);
242 cli_set_timeout(state->cli, cli_nps->default_timeout);
244 talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
246 cli_nps->trans.active = false;
247 cli_nps->trans.read_req = NULL;
248 cli_nps->trans.write_req = NULL;
249 SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
250 SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
252 *_stream = stream;
253 tevent_req_received(req);
254 return NT_STATUS_OK;
257 static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
259 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
260 struct tstream_cli_np);
262 if (!cli_state_is_connected(cli_nps->cli)) {
263 errno = ENOTCONN;
264 return -1;
267 return cli_nps->read.left;
270 bool tstream_is_cli_np(struct tstream_context *stream)
272 struct tstream_cli_np *cli_nps =
273 talloc_get_type(_tstream_context_data(stream),
274 struct tstream_cli_np);
276 if (!cli_nps) {
277 return false;
280 return true;
283 NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
285 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
286 struct tstream_cli_np);
288 if (cli_nps->trans.read_req) {
289 return NT_STATUS_PIPE_BUSY;
292 if (cli_nps->trans.write_req) {
293 return NT_STATUS_PIPE_BUSY;
296 if (cli_nps->trans.active) {
297 return NT_STATUS_PIPE_BUSY;
300 cli_nps->trans.active = true;
302 return NT_STATUS_OK;
305 unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
306 unsigned int timeout)
308 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
309 struct tstream_cli_np);
311 if (!cli_state_is_connected(cli_nps->cli)) {
312 return cli_nps->default_timeout;
315 return cli_set_timeout(cli_nps->cli, timeout);
318 struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
320 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
321 struct tstream_cli_np);
323 return cli_nps->cli;
326 struct tstream_cli_np_writev_state {
327 struct tstream_context *stream;
328 struct tevent_context *ev;
330 struct iovec *vector;
331 size_t count;
333 int ret;
335 struct {
336 int val;
337 const char *location;
338 } error;
341 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
343 struct tstream_cli_np *cli_nps =
344 tstream_context_data(state->stream,
345 struct tstream_cli_np);
347 cli_nps->trans.write_req = NULL;
349 return 0;
352 static void tstream_cli_np_writev_write_next(struct tevent_req *req);
354 static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
355 struct tevent_context *ev,
356 struct tstream_context *stream,
357 const struct iovec *vector,
358 size_t count)
360 struct tevent_req *req;
361 struct tstream_cli_np_writev_state *state;
362 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
363 struct tstream_cli_np);
365 req = tevent_req_create(mem_ctx, &state,
366 struct tstream_cli_np_writev_state);
367 if (!req) {
368 return NULL;
370 state->stream = stream;
371 state->ev = ev;
372 state->ret = 0;
374 talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
376 if (!cli_state_is_connected(cli_nps->cli)) {
377 tevent_req_error(req, ENOTCONN);
378 return tevent_req_post(req, ev);
382 * we make a copy of the vector so we can change the structure
384 state->vector = talloc_array(state, struct iovec, count);
385 if (tevent_req_nomem(state->vector, req)) {
386 return tevent_req_post(req, ev);
388 memcpy(state->vector, vector, sizeof(struct iovec) * count);
389 state->count = count;
391 tstream_cli_np_writev_write_next(req);
392 if (!tevent_req_is_in_progress(req)) {
393 return tevent_req_post(req, ev);
396 return req;
399 static void tstream_cli_np_readv_trans_start(struct tevent_req *req);
400 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
402 static void tstream_cli_np_writev_write_next(struct tevent_req *req)
404 struct tstream_cli_np_writev_state *state =
405 tevent_req_data(req,
406 struct tstream_cli_np_writev_state);
407 struct tstream_cli_np *cli_nps =
408 tstream_context_data(state->stream,
409 struct tstream_cli_np);
410 struct tevent_req *subreq;
411 size_t i;
412 size_t left = 0;
414 for (i=0; i < state->count; i++) {
415 left += state->vector[i].iov_len;
418 if (left == 0) {
419 TALLOC_FREE(cli_nps->write.buf);
420 tevent_req_done(req);
421 return;
424 cli_nps->write.ofs = 0;
425 cli_nps->write.left = MIN(left, TSTREAM_CLI_NP_MAX_BUF_SIZE);
426 cli_nps->write.buf = talloc_realloc(cli_nps, cli_nps->write.buf,
427 uint8_t, cli_nps->write.left);
428 if (tevent_req_nomem(cli_nps->write.buf, req)) {
429 return;
433 * copy the pending buffer first
435 while (cli_nps->write.left > 0 && state->count > 0) {
436 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
437 size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
439 memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
441 base += len;
442 state->vector[0].iov_base = base;
443 state->vector[0].iov_len -= len;
445 cli_nps->write.ofs += len;
446 cli_nps->write.left -= len;
448 if (state->vector[0].iov_len == 0) {
449 state->vector += 1;
450 state->count -= 1;
453 state->ret += len;
456 if (cli_nps->trans.active && state->count == 0) {
457 cli_nps->trans.active = false;
458 cli_nps->trans.write_req = req;
459 return;
462 if (cli_nps->trans.read_req && state->count == 0) {
463 cli_nps->trans.write_req = req;
464 tstream_cli_np_readv_trans_start(cli_nps->trans.read_req);
465 return;
468 if (cli_nps->is_smb1) {
469 subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
470 cli_nps->fnum,
471 8, /* 8 means message mode. */
472 cli_nps->write.buf,
473 0, /* offset */
474 cli_nps->write.ofs); /* size */
475 } else {
476 subreq = smb2cli_write_send(state, state->ev, cli_nps->cli,
477 cli_nps->write.ofs, /* length */
478 0, /* offset */
479 cli_nps->fid_persistent,
480 cli_nps->fid_volatile,
481 0, /* remaining_bytes */
482 0, /* flags */
483 cli_nps->write.buf);
485 if (tevent_req_nomem(subreq, req)) {
486 return;
488 tevent_req_set_callback(subreq,
489 tstream_cli_np_writev_write_done,
490 req);
493 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
494 int error,
495 const char *location);
497 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
499 struct tevent_req *req =
500 tevent_req_callback_data(subreq, struct tevent_req);
501 struct tstream_cli_np_writev_state *state =
502 tevent_req_data(req, struct tstream_cli_np_writev_state);
503 struct tstream_cli_np *cli_nps =
504 tstream_context_data(state->stream,
505 struct tstream_cli_np);
506 size_t written;
507 NTSTATUS status;
509 if (cli_nps->is_smb1) {
510 status = cli_write_andx_recv(subreq, &written);
511 } else {
512 status = smb2cli_write_recv(subreq);
513 written = cli_nps->write.ofs; // TODO: get the value from the server
515 TALLOC_FREE(subreq);
516 if (!NT_STATUS_IS_OK(status)) {
517 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
518 return;
521 if (written != cli_nps->write.ofs) {
522 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
523 return;
526 tstream_cli_np_writev_write_next(req);
529 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
531 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
532 int error,
533 const char *location)
535 struct tstream_cli_np_writev_state *state =
536 tevent_req_data(req,
537 struct tstream_cli_np_writev_state);
538 struct tstream_cli_np *cli_nps =
539 tstream_context_data(state->stream,
540 struct tstream_cli_np);
541 struct tevent_req *subreq;
543 state->error.val = error;
544 state->error.location = location;
546 if (!cli_state_is_connected(cli_nps->cli)) {
547 /* return the original error */
548 _tevent_req_error(req, state->error.val, state->error.location);
549 return;
552 if (cli_nps->is_smb1) {
553 subreq = cli_close_send(state, state->ev, cli_nps->cli,
554 cli_nps->fnum);
555 } else {
556 subreq = smb2cli_close_send(state, state->ev,
557 cli_nps->cli->conn,
558 cli_nps->cli->timeout,
559 cli_nps->cli->smb2.session,
560 cli_nps->cli->smb2.tid,
561 0, /* flags */
562 cli_nps->fid_persistent,
563 cli_nps->fid_volatile);
565 if (subreq == NULL) {
566 /* return the original error */
567 _tevent_req_error(req, state->error.val, state->error.location);
568 return;
570 tevent_req_set_callback(subreq,
571 tstream_cli_np_writev_disconnect_done,
572 req);
575 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
577 struct tevent_req *req =
578 tevent_req_callback_data(subreq, struct tevent_req);
579 struct tstream_cli_np_writev_state *state =
580 tevent_req_data(req, struct tstream_cli_np_writev_state);
581 struct tstream_cli_np *cli_nps =
582 tstream_context_data(state->stream, struct tstream_cli_np);
584 if (cli_nps->is_smb1) {
585 cli_close_recv(subreq);
586 } else {
587 smb2cli_close_recv(subreq);
589 TALLOC_FREE(subreq);
591 cli_nps->cli = NULL;
593 /* return the original error */
594 _tevent_req_error(req, state->error.val, state->error.location);
597 static int tstream_cli_np_writev_recv(struct tevent_req *req,
598 int *perrno)
600 struct tstream_cli_np_writev_state *state =
601 tevent_req_data(req,
602 struct tstream_cli_np_writev_state);
603 int ret;
605 ret = tsocket_simple_int_recv(req, perrno);
606 if (ret == 0) {
607 ret = state->ret;
610 tevent_req_received(req);
611 return ret;
614 struct tstream_cli_np_readv_state {
615 struct tstream_context *stream;
616 struct tevent_context *ev;
618 struct iovec *vector;
619 size_t count;
621 int ret;
623 struct {
624 struct tevent_immediate *im;
625 } trans;
627 struct {
628 int val;
629 const char *location;
630 } error;
633 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state *state)
635 struct tstream_cli_np *cli_nps =
636 tstream_context_data(state->stream,
637 struct tstream_cli_np);
639 cli_nps->trans.read_req = NULL;
641 return 0;
644 static void tstream_cli_np_readv_read_next(struct tevent_req *req);
646 static struct tevent_req *tstream_cli_np_readv_send(TALLOC_CTX *mem_ctx,
647 struct tevent_context *ev,
648 struct tstream_context *stream,
649 struct iovec *vector,
650 size_t count)
652 struct tevent_req *req;
653 struct tstream_cli_np_readv_state *state;
654 struct tstream_cli_np *cli_nps =
655 tstream_context_data(stream, struct tstream_cli_np);
657 req = tevent_req_create(mem_ctx, &state,
658 struct tstream_cli_np_readv_state);
659 if (!req) {
660 return NULL;
662 state->stream = stream;
663 state->ev = ev;
664 state->ret = 0;
666 talloc_set_destructor(state, tstream_cli_np_readv_state_destructor);
668 if (!cli_state_is_connected(cli_nps->cli)) {
669 tevent_req_error(req, ENOTCONN);
670 return tevent_req_post(req, ev);
674 * we make a copy of the vector so we can change the structure
676 state->vector = talloc_array(state, struct iovec, count);
677 if (tevent_req_nomem(state->vector, req)) {
678 return tevent_req_post(req, ev);
680 memcpy(state->vector, vector, sizeof(struct iovec) * count);
681 state->count = count;
683 tstream_cli_np_readv_read_next(req);
684 if (!tevent_req_is_in_progress(req)) {
685 return tevent_req_post(req, ev);
688 return req;
691 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq);
693 static void tstream_cli_np_readv_read_next(struct tevent_req *req)
695 struct tstream_cli_np_readv_state *state =
696 tevent_req_data(req,
697 struct tstream_cli_np_readv_state);
698 struct tstream_cli_np *cli_nps =
699 tstream_context_data(state->stream,
700 struct tstream_cli_np);
701 struct tevent_req *subreq;
704 * copy the pending buffer first
706 while (cli_nps->read.left > 0 && state->count > 0) {
707 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
708 size_t len = MIN(cli_nps->read.left, state->vector[0].iov_len);
710 memcpy(base, cli_nps->read.buf + cli_nps->read.ofs, len);
712 base += len;
713 state->vector[0].iov_base = base;
714 state->vector[0].iov_len -= len;
716 cli_nps->read.ofs += len;
717 cli_nps->read.left -= len;
719 if (state->vector[0].iov_len == 0) {
720 state->vector += 1;
721 state->count -= 1;
724 state->ret += len;
727 if (cli_nps->read.left == 0) {
728 TALLOC_FREE(cli_nps->read.buf);
731 if (state->count == 0) {
732 tevent_req_done(req);
733 return;
736 if (cli_nps->trans.active) {
737 cli_nps->trans.active = false;
738 cli_nps->trans.read_req = req;
739 return;
742 if (cli_nps->trans.write_req) {
743 cli_nps->trans.read_req = req;
744 tstream_cli_np_readv_trans_start(req);
745 return;
748 if (cli_nps->is_smb1) {
749 subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
750 cli_nps->fnum,
751 0, /* offset */
752 TSTREAM_CLI_NP_MAX_BUF_SIZE);
753 } else {
754 subreq = smb2cli_read_send(state, state->ev,
755 cli_nps->cli->conn,
756 cli_nps->cli->timeout,
757 cli_nps->cli->smb2.session,
758 cli_nps->cli->smb2.tid,
759 TSTREAM_CLI_NP_MAX_BUF_SIZE, /* length */
760 0, /* offset */
761 cli_nps->fid_persistent,
762 cli_nps->fid_volatile,
763 0, /* minimum_count */
764 0); /* remaining_bytes */
766 if (tevent_req_nomem(subreq, req)) {
767 return;
769 tevent_req_set_callback(subreq,
770 tstream_cli_np_readv_read_done,
771 req);
774 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq);
776 static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
778 struct tstream_cli_np_readv_state *state =
779 tevent_req_data(req,
780 struct tstream_cli_np_readv_state);
781 struct tstream_cli_np *cli_nps =
782 tstream_context_data(state->stream,
783 struct tstream_cli_np);
784 struct tevent_req *subreq;
786 state->trans.im = tevent_create_immediate(state);
787 if (tevent_req_nomem(state->trans.im, req)) {
788 return;
791 if (cli_nps->is_smb1) {
792 subreq = cli_trans_send(state, state->ev,
793 cli_nps->cli,
794 SMBtrans,
795 "\\PIPE\\",
796 0, 0, 0,
797 cli_nps->trans.setup, 2,
799 NULL, 0, 0,
800 cli_nps->write.buf,
801 cli_nps->write.ofs,
802 TSTREAM_CLI_NP_MAX_BUF_SIZE);
803 } else {
804 DATA_BLOB in_input_buffer = data_blob_null;
805 DATA_BLOB in_output_buffer = data_blob_null;
807 in_input_buffer = data_blob_const(cli_nps->write.buf,
808 cli_nps->write.ofs);
810 subreq = smb2cli_ioctl_send(state, state->ev,
811 cli_nps->cli,
812 cli_nps->fid_persistent,
813 cli_nps->fid_volatile,
814 FSCTL_NAMED_PIPE_READ_WRITE,
815 0, /* in_max_input_length */
816 &in_input_buffer,
817 /* in_max_output_length */
818 TSTREAM_CLI_NP_MAX_BUF_SIZE,
819 &in_output_buffer,
820 SMB2_IOCTL_FLAG_IS_FSCTL);
822 if (tevent_req_nomem(subreq, req)) {
823 return;
825 tevent_req_set_callback(subreq,
826 tstream_cli_np_readv_trans_done,
827 req);
830 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
831 int error,
832 const char *location);
833 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
834 struct tevent_immediate *im,
835 void *private_data);
837 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
839 struct tevent_req *req =
840 tevent_req_callback_data(subreq, struct tevent_req);
841 struct tstream_cli_np_readv_state *state =
842 tevent_req_data(req, struct tstream_cli_np_readv_state);
843 struct tstream_cli_np *cli_nps =
844 tstream_context_data(state->stream, struct tstream_cli_np);
845 uint8_t *rcvbuf;
846 uint32_t received;
847 NTSTATUS status;
849 if (cli_nps->is_smb1) {
850 status = cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
851 NULL, 0, NULL,
852 &rcvbuf, 0, &received);
853 } else {
854 DATA_BLOB out_input_buffer = data_blob_null;
855 DATA_BLOB out_output_buffer = data_blob_null;
857 status = smb2cli_ioctl_recv(subreq, state,
858 &out_input_buffer,
859 &out_output_buffer);
861 /* Note that rcvbuf is not a talloc pointer here */
862 rcvbuf = out_output_buffer.data;
863 received = out_output_buffer.length;
865 TALLOC_FREE(subreq);
866 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
867 status = NT_STATUS_OK;
869 if (!NT_STATUS_IS_OK(status)) {
870 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
871 return;
874 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
875 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
876 return;
879 if (received == 0) {
880 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
881 return;
884 cli_nps->read.ofs = 0;
885 cli_nps->read.left = received;
886 cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
887 if (cli_nps->read.buf == NULL) {
888 TALLOC_FREE(subreq);
889 tevent_req_nomem(cli_nps->read.buf, req);
890 return;
892 memcpy(cli_nps->read.buf, rcvbuf, received);
894 if (cli_nps->trans.write_req == NULL) {
895 tstream_cli_np_readv_read_next(req);
896 return;
899 tevent_schedule_immediate(state->trans.im, state->ev,
900 tstream_cli_np_readv_trans_next, req);
902 tevent_req_done(cli_nps->trans.write_req);
905 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
906 struct tevent_immediate *im,
907 void *private_data)
909 struct tevent_req *req =
910 talloc_get_type_abort(private_data,
911 struct tevent_req);
913 tstream_cli_np_readv_read_next(req);
916 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
918 struct tevent_req *req =
919 tevent_req_callback_data(subreq, struct tevent_req);
920 struct tstream_cli_np_readv_state *state =
921 tevent_req_data(req, struct tstream_cli_np_readv_state);
922 struct tstream_cli_np *cli_nps =
923 tstream_context_data(state->stream, struct tstream_cli_np);
924 uint8_t *rcvbuf;
925 ssize_t received;
926 NTSTATUS status;
929 * We must free subreq in this function as there is
930 * a timer event attached to it.
933 if (cli_nps->is_smb1) {
934 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
935 } else {
936 uint32_t data_length = 0;
937 status = smb2cli_read_recv(subreq, state, &rcvbuf, &data_length);
938 received = data_length;
941 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
942 * child of that.
944 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
946 * NT_STATUS_BUFFER_TOO_SMALL means that there's
947 * more data to read when the named pipe is used
948 * in message mode (which is the case here).
950 * But we hide this from the caller.
952 status = NT_STATUS_OK;
954 if (!NT_STATUS_IS_OK(status)) {
955 TALLOC_FREE(subreq);
956 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
957 return;
960 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
961 TALLOC_FREE(subreq);
962 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
963 return;
966 if (received == 0) {
967 TALLOC_FREE(subreq);
968 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
969 return;
972 cli_nps->read.ofs = 0;
973 cli_nps->read.left = received;
974 cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
975 if (cli_nps->read.buf == NULL) {
976 TALLOC_FREE(subreq);
977 tevent_req_nomem(cli_nps->read.buf, req);
978 return;
980 memcpy(cli_nps->read.buf, rcvbuf, received);
981 TALLOC_FREE(subreq);
983 tstream_cli_np_readv_read_next(req);
986 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq);
988 static void tstream_cli_np_readv_error(struct tevent_req *req);
990 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
991 int error,
992 const char *location)
994 struct tstream_cli_np_readv_state *state =
995 tevent_req_data(req,
996 struct tstream_cli_np_readv_state);
997 struct tstream_cli_np *cli_nps =
998 tstream_context_data(state->stream,
999 struct tstream_cli_np);
1000 struct tevent_req *subreq;
1002 state->error.val = error;
1003 state->error.location = location;
1005 if (!cli_state_is_connected(cli_nps->cli)) {
1006 /* return the original error */
1007 tstream_cli_np_readv_error(req);
1008 return;
1011 if (cli_nps->is_smb1) {
1012 subreq = cli_close_send(state, state->ev, cli_nps->cli,
1013 cli_nps->fnum);
1014 } else {
1015 subreq = smb2cli_close_send(state, state->ev,
1016 cli_nps->cli->conn,
1017 cli_nps->cli->timeout,
1018 cli_nps->cli->smb2.session,
1019 cli_nps->cli->smb2.tid,
1020 0, /* flags */
1021 cli_nps->fid_persistent,
1022 cli_nps->fid_volatile);
1024 if (subreq == NULL) {
1025 /* return the original error */
1026 tstream_cli_np_readv_error(req);
1027 return;
1029 tevent_req_set_callback(subreq,
1030 tstream_cli_np_readv_disconnect_done,
1031 req);
1034 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq)
1036 struct tevent_req *req =
1037 tevent_req_callback_data(subreq, struct tevent_req);
1038 struct tstream_cli_np_readv_state *state =
1039 tevent_req_data(req, struct tstream_cli_np_readv_state);
1040 struct tstream_cli_np *cli_nps =
1041 tstream_context_data(state->stream, struct tstream_cli_np);
1043 if (cli_nps->is_smb1) {
1044 cli_close_recv(subreq);
1045 } else {
1046 smb2cli_close_recv(subreq);
1048 TALLOC_FREE(subreq);
1050 cli_nps->cli = NULL;
1052 tstream_cli_np_readv_error(req);
1055 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
1056 struct tevent_immediate *im,
1057 void *private_data);
1059 static void tstream_cli_np_readv_error(struct tevent_req *req)
1061 struct tstream_cli_np_readv_state *state =
1062 tevent_req_data(req,
1063 struct tstream_cli_np_readv_state);
1064 struct tstream_cli_np *cli_nps =
1065 tstream_context_data(state->stream,
1066 struct tstream_cli_np);
1068 if (cli_nps->trans.write_req == NULL) {
1069 /* return the original error */
1070 _tevent_req_error(req, state->error.val, state->error.location);
1071 return;
1074 if (state->trans.im == NULL) {
1075 /* return the original error */
1076 _tevent_req_error(req, state->error.val, state->error.location);
1077 return;
1080 tevent_schedule_immediate(state->trans.im, state->ev,
1081 tstream_cli_np_readv_error_trigger, req);
1083 /* return the original error for writev */
1084 _tevent_req_error(cli_nps->trans.write_req,
1085 state->error.val, state->error.location);
1088 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
1089 struct tevent_immediate *im,
1090 void *private_data)
1092 struct tevent_req *req =
1093 talloc_get_type_abort(private_data,
1094 struct tevent_req);
1095 struct tstream_cli_np_readv_state *state =
1096 tevent_req_data(req,
1097 struct tstream_cli_np_readv_state);
1099 /* return the original error */
1100 _tevent_req_error(req, state->error.val, state->error.location);
1103 static int tstream_cli_np_readv_recv(struct tevent_req *req,
1104 int *perrno)
1106 struct tstream_cli_np_readv_state *state =
1107 tevent_req_data(req, struct tstream_cli_np_readv_state);
1108 int ret;
1110 ret = tsocket_simple_int_recv(req, perrno);
1111 if (ret == 0) {
1112 ret = state->ret;
1115 tevent_req_received(req);
1116 return ret;
1119 struct tstream_cli_np_disconnect_state {
1120 struct tstream_context *stream;
1123 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq);
1125 static struct tevent_req *tstream_cli_np_disconnect_send(TALLOC_CTX *mem_ctx,
1126 struct tevent_context *ev,
1127 struct tstream_context *stream)
1129 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
1130 struct tstream_cli_np);
1131 struct tevent_req *req;
1132 struct tstream_cli_np_disconnect_state *state;
1133 struct tevent_req *subreq;
1135 req = tevent_req_create(mem_ctx, &state,
1136 struct tstream_cli_np_disconnect_state);
1137 if (req == NULL) {
1138 return NULL;
1141 state->stream = stream;
1143 if (!cli_state_is_connected(cli_nps->cli)) {
1144 tevent_req_error(req, ENOTCONN);
1145 return tevent_req_post(req, ev);
1148 if (cli_nps->is_smb1) {
1149 subreq = cli_close_send(state, ev, cli_nps->cli,
1150 cli_nps->fnum);
1151 } else {
1152 subreq = smb2cli_close_send(state, ev, cli_nps->cli->conn,
1153 cli_nps->cli->timeout,
1154 cli_nps->cli->smb2.session,
1155 cli_nps->cli->smb2.tid,
1156 0, /* flags */
1157 cli_nps->fid_persistent,
1158 cli_nps->fid_volatile);
1160 if (tevent_req_nomem(subreq, req)) {
1161 return tevent_req_post(req, ev);
1163 tevent_req_set_callback(subreq, tstream_cli_np_disconnect_done, req);
1165 return req;
1168 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq)
1170 struct tevent_req *req = tevent_req_callback_data(subreq,
1171 struct tevent_req);
1172 struct tstream_cli_np_disconnect_state *state =
1173 tevent_req_data(req, struct tstream_cli_np_disconnect_state);
1174 struct tstream_cli_np *cli_nps =
1175 tstream_context_data(state->stream, struct tstream_cli_np);
1176 NTSTATUS status;
1178 if (cli_nps->is_smb1) {
1179 status = cli_close_recv(subreq);
1180 } else {
1181 status = smb2cli_close_recv(subreq);
1183 TALLOC_FREE(subreq);
1184 if (!NT_STATUS_IS_OK(status)) {
1185 tevent_req_error(req, EIO);
1186 return;
1189 cli_nps->cli = NULL;
1191 tevent_req_done(req);
1194 static int tstream_cli_np_disconnect_recv(struct tevent_req *req,
1195 int *perrno)
1197 int ret;
1199 ret = tsocket_simple_int_recv(req, perrno);
1201 tevent_req_received(req);
1202 return ret;
1205 static const struct tstream_context_ops tstream_cli_np_ops = {
1206 .name = "cli_np",
1208 .pending_bytes = tstream_cli_np_pending_bytes,
1210 .readv_send = tstream_cli_np_readv_send,
1211 .readv_recv = tstream_cli_np_readv_recv,
1213 .writev_send = tstream_cli_np_writev_send,
1214 .writev_recv = tstream_cli_np_writev_recv,
1216 .disconnect_send = tstream_cli_np_disconnect_send,
1217 .disconnect_recv = tstream_cli_np_disconnect_recv,