s3:lib: s/struct event_context/struct tevent_context
[Samba/gebeck_regimport.git] / source3 / libsmb / cli_np_tstream.c
blobc7ec664c5106cbb540f8001dd7b15f891ede5824
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "libsmb/smb2cli.h"
24 #include "../libcli/smb/smbXcli_base.h"
25 #include "../lib/util/tevent_ntstatus.h"
26 #include "../lib/tsocket/tsocket.h"
27 #include "../lib/tsocket/tsocket_internal.h"
28 #include "cli_np_tstream.h"
30 static const struct tstream_context_ops tstream_cli_np_ops;
33 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
34 * This is fits into the max_xmit negotiated at the SMB layer.
36 * On the sending side they may use SMBtranss if the request does not
37 * fit into a single SMBtrans call.
39 * Windows uses 1024 as max data size of a SMBtrans request and then
40 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
41 * via a SMBreadX.
43 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
44 * request to get the whole fragment at once (like samba 3.5.x and below did.
46 * It is important that we use do SMBwriteX with the size of a full fragment,
47 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
48 * from NT4 servers. (See bug #8195)
50 #define TSTREAM_CLI_NP_MAX_BUF_SIZE 4280
52 struct tstream_cli_np {
53 struct cli_state *cli;
54 const char *npipe;
55 bool is_smb1;
56 uint16_t fnum;
57 uint64_t fid_persistent;
58 uint64_t fid_volatile;
59 unsigned int default_timeout;
61 struct {
62 bool active;
63 struct tevent_req *read_req;
64 struct tevent_req *write_req;
65 uint16_t setup[2];
66 } trans;
68 struct {
69 off_t ofs;
70 size_t left;
71 uint8_t *buf;
72 } read, write;
75 static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
77 NTSTATUS status;
79 if (!cli_state_is_connected(cli_nps->cli)) {
80 return 0;
84 * TODO: do not use a sync call with a destructor!!!
86 * This only happens, if a caller does talloc_free(),
87 * while the everything was still ok.
89 * If we get an unexpected failure within a normal
90 * operation, we already do an async cli_close_send()/_recv().
92 * Once we've fixed all callers to call
93 * tstream_disconnect_send()/_recv(), this will
94 * never be called.
96 if (cli_nps->is_smb1) {
97 status = cli_close(cli_nps->cli, cli_nps->fnum);
98 } else {
99 status = smb2cli_close(cli_nps->cli->conn,
100 cli_nps->cli->timeout,
101 cli_nps->cli->smb2.session,
102 cli_nps->cli->smb2.tcon,
103 0, /* flags */
104 cli_nps->fid_persistent,
105 cli_nps->fid_volatile);
107 if (!NT_STATUS_IS_OK(status)) {
108 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
109 "failed on pipe %s. Error was %s\n",
110 cli_nps->npipe, nt_errstr(status)));
113 * We can't do much on failure
115 return 0;
118 struct tstream_cli_np_open_state {
119 struct cli_state *cli;
120 bool is_smb1;
121 uint16_t fnum;
122 uint64_t fid_persistent;
123 uint64_t fid_volatile;
124 const char *npipe;
127 static void tstream_cli_np_open_done(struct tevent_req *subreq);
129 struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
130 struct tevent_context *ev,
131 struct cli_state *cli,
132 const char *npipe)
134 struct tevent_req *req;
135 struct tstream_cli_np_open_state *state;
136 struct tevent_req *subreq;
138 req = tevent_req_create(mem_ctx, &state,
139 struct tstream_cli_np_open_state);
140 if (!req) {
141 return NULL;
143 state->cli = cli;
145 state->npipe = talloc_strdup(state, npipe);
146 if (tevent_req_nomem(state->npipe, req)) {
147 return tevent_req_post(req, ev);
150 if (smbXcli_conn_protocol(cli->conn) < PROTOCOL_SMB2_02) {
151 state->is_smb1 = true;
154 if (state->is_smb1) {
155 const char *smb1_npipe;
158 * Windows and newer Samba versions allow
159 * the pipe name without leading backslash,
160 * but we should better behave like windows clients
162 smb1_npipe = talloc_asprintf(state, "\\%s", state->npipe);
163 if (tevent_req_nomem(smb1_npipe, req)) {
164 return tevent_req_post(req, ev);
167 subreq = cli_ntcreate_send(state, ev, cli,
168 smb1_npipe,
170 DESIRED_ACCESS_PIPE,
172 FILE_SHARE_READ|FILE_SHARE_WRITE,
173 FILE_OPEN,
176 } else {
177 subreq = smb2cli_create_send(state, ev, cli->conn,
178 cli->timeout, cli->smb2.session,
179 cli->smb2.tcon,
180 npipe,
181 SMB2_OPLOCK_LEVEL_NONE,
182 SMB2_IMPERSONATION_IMPERSONATION,
183 DESIRED_ACCESS_PIPE,
184 0, /* file_attributes */
185 FILE_SHARE_READ|FILE_SHARE_WRITE,
186 FILE_OPEN,
187 0, /* create_options */
188 NULL); /* blobs */
190 if (tevent_req_nomem(subreq, req)) {
191 return tevent_req_post(req, ev);
193 tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
195 return req;
198 static void tstream_cli_np_open_done(struct tevent_req *subreq)
200 struct tevent_req *req =
201 tevent_req_callback_data(subreq, struct tevent_req);
202 struct tstream_cli_np_open_state *state =
203 tevent_req_data(req, struct tstream_cli_np_open_state);
204 NTSTATUS status;
206 if (state->is_smb1) {
207 status = cli_ntcreate_recv(subreq, &state->fnum);
208 } else {
209 status = smb2cli_create_recv(subreq,
210 &state->fid_persistent,
211 &state->fid_volatile);
213 TALLOC_FREE(subreq);
214 if (!NT_STATUS_IS_OK(status)) {
215 tevent_req_nterror(req, status);
216 return;
219 tevent_req_done(req);
222 NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
223 TALLOC_CTX *mem_ctx,
224 struct tstream_context **_stream,
225 const char *location)
227 struct tstream_cli_np_open_state *state =
228 tevent_req_data(req, struct tstream_cli_np_open_state);
229 struct tstream_context *stream;
230 struct tstream_cli_np *cli_nps;
231 NTSTATUS status;
233 if (tevent_req_is_nterror(req, &status)) {
234 tevent_req_received(req);
235 return status;
238 stream = tstream_context_create(mem_ctx,
239 &tstream_cli_np_ops,
240 &cli_nps,
241 struct tstream_cli_np,
242 location);
243 if (!stream) {
244 tevent_req_received(req);
245 return NT_STATUS_NO_MEMORY;
247 ZERO_STRUCTP(cli_nps);
249 cli_nps->cli = state->cli;
250 cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
251 cli_nps->is_smb1 = state->is_smb1;
252 cli_nps->fnum = state->fnum;
253 cli_nps->fid_persistent = state->fid_persistent;
254 cli_nps->fid_volatile = state->fid_volatile;
255 cli_nps->default_timeout = cli_set_timeout(state->cli, 0);
256 cli_set_timeout(state->cli, cli_nps->default_timeout);
258 talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
260 cli_nps->trans.active = false;
261 cli_nps->trans.read_req = NULL;
262 cli_nps->trans.write_req = NULL;
263 SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
264 SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
266 *_stream = stream;
267 tevent_req_received(req);
268 return NT_STATUS_OK;
271 static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
273 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
274 struct tstream_cli_np);
276 if (!cli_state_is_connected(cli_nps->cli)) {
277 errno = ENOTCONN;
278 return -1;
281 return cli_nps->read.left;
284 bool tstream_is_cli_np(struct tstream_context *stream)
286 struct tstream_cli_np *cli_nps =
287 talloc_get_type(_tstream_context_data(stream),
288 struct tstream_cli_np);
290 if (!cli_nps) {
291 return false;
294 return true;
297 NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
299 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
300 struct tstream_cli_np);
302 if (cli_nps->trans.read_req) {
303 return NT_STATUS_PIPE_BUSY;
306 if (cli_nps->trans.write_req) {
307 return NT_STATUS_PIPE_BUSY;
310 if (cli_nps->trans.active) {
311 return NT_STATUS_PIPE_BUSY;
314 cli_nps->trans.active = true;
316 return NT_STATUS_OK;
319 unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
320 unsigned int timeout)
322 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
323 struct tstream_cli_np);
325 if (!cli_state_is_connected(cli_nps->cli)) {
326 return cli_nps->default_timeout;
329 return cli_set_timeout(cli_nps->cli, timeout);
332 struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
334 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
335 struct tstream_cli_np);
337 return cli_nps->cli;
340 struct tstream_cli_np_writev_state {
341 struct tstream_context *stream;
342 struct tevent_context *ev;
344 struct iovec *vector;
345 size_t count;
347 int ret;
349 struct {
350 int val;
351 const char *location;
352 } error;
355 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
357 struct tstream_cli_np *cli_nps =
358 tstream_context_data(state->stream,
359 struct tstream_cli_np);
361 cli_nps->trans.write_req = NULL;
363 return 0;
366 static void tstream_cli_np_writev_write_next(struct tevent_req *req);
368 static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
369 struct tevent_context *ev,
370 struct tstream_context *stream,
371 const struct iovec *vector,
372 size_t count)
374 struct tevent_req *req;
375 struct tstream_cli_np_writev_state *state;
376 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
377 struct tstream_cli_np);
379 req = tevent_req_create(mem_ctx, &state,
380 struct tstream_cli_np_writev_state);
381 if (!req) {
382 return NULL;
384 state->stream = stream;
385 state->ev = ev;
386 state->ret = 0;
388 talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
390 if (!cli_state_is_connected(cli_nps->cli)) {
391 tevent_req_error(req, ENOTCONN);
392 return tevent_req_post(req, ev);
396 * we make a copy of the vector so we can change the structure
398 state->vector = talloc_array(state, struct iovec, count);
399 if (tevent_req_nomem(state->vector, req)) {
400 return tevent_req_post(req, ev);
402 memcpy(state->vector, vector, sizeof(struct iovec) * count);
403 state->count = count;
405 tstream_cli_np_writev_write_next(req);
406 if (!tevent_req_is_in_progress(req)) {
407 return tevent_req_post(req, ev);
410 return req;
413 static void tstream_cli_np_readv_trans_start(struct tevent_req *req);
414 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
416 static void tstream_cli_np_writev_write_next(struct tevent_req *req)
418 struct tstream_cli_np_writev_state *state =
419 tevent_req_data(req,
420 struct tstream_cli_np_writev_state);
421 struct tstream_cli_np *cli_nps =
422 tstream_context_data(state->stream,
423 struct tstream_cli_np);
424 struct tevent_req *subreq;
425 size_t i;
426 size_t left = 0;
428 for (i=0; i < state->count; i++) {
429 left += state->vector[i].iov_len;
432 if (left == 0) {
433 TALLOC_FREE(cli_nps->write.buf);
434 tevent_req_done(req);
435 return;
438 cli_nps->write.ofs = 0;
439 cli_nps->write.left = MIN(left, TSTREAM_CLI_NP_MAX_BUF_SIZE);
440 cli_nps->write.buf = talloc_realloc(cli_nps, cli_nps->write.buf,
441 uint8_t, cli_nps->write.left);
442 if (tevent_req_nomem(cli_nps->write.buf, req)) {
443 return;
447 * copy the pending buffer first
449 while (cli_nps->write.left > 0 && state->count > 0) {
450 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
451 size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
453 memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
455 base += len;
456 state->vector[0].iov_base = base;
457 state->vector[0].iov_len -= len;
459 cli_nps->write.ofs += len;
460 cli_nps->write.left -= len;
462 if (state->vector[0].iov_len == 0) {
463 state->vector += 1;
464 state->count -= 1;
467 state->ret += len;
470 if (cli_nps->trans.active && state->count == 0) {
471 cli_nps->trans.active = false;
472 cli_nps->trans.write_req = req;
473 return;
476 if (cli_nps->trans.read_req && state->count == 0) {
477 cli_nps->trans.write_req = req;
478 tstream_cli_np_readv_trans_start(cli_nps->trans.read_req);
479 return;
482 if (cli_nps->is_smb1) {
483 subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
484 cli_nps->fnum,
485 8, /* 8 means message mode. */
486 cli_nps->write.buf,
487 0, /* offset */
488 cli_nps->write.ofs); /* size */
489 } else {
490 subreq = smb2cli_write_send(state, state->ev,
491 cli_nps->cli->conn,
492 cli_nps->cli->timeout,
493 cli_nps->cli->smb2.session,
494 cli_nps->cli->smb2.tcon,
495 cli_nps->write.ofs, /* length */
496 0, /* offset */
497 cli_nps->fid_persistent,
498 cli_nps->fid_volatile,
499 0, /* remaining_bytes */
500 0, /* flags */
501 cli_nps->write.buf);
503 if (tevent_req_nomem(subreq, req)) {
504 return;
506 tevent_req_set_callback(subreq,
507 tstream_cli_np_writev_write_done,
508 req);
511 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
512 int error,
513 const char *location);
515 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
517 struct tevent_req *req =
518 tevent_req_callback_data(subreq, struct tevent_req);
519 struct tstream_cli_np_writev_state *state =
520 tevent_req_data(req, struct tstream_cli_np_writev_state);
521 struct tstream_cli_np *cli_nps =
522 tstream_context_data(state->stream,
523 struct tstream_cli_np);
524 size_t written;
525 NTSTATUS status;
527 if (cli_nps->is_smb1) {
528 status = cli_write_andx_recv(subreq, &written);
529 } else {
530 status = smb2cli_write_recv(subreq);
531 written = cli_nps->write.ofs; // TODO: get the value from the server
533 TALLOC_FREE(subreq);
534 if (!NT_STATUS_IS_OK(status)) {
535 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
536 return;
539 if (written != cli_nps->write.ofs) {
540 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
541 return;
544 tstream_cli_np_writev_write_next(req);
547 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
549 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
550 int error,
551 const char *location)
553 struct tstream_cli_np_writev_state *state =
554 tevent_req_data(req,
555 struct tstream_cli_np_writev_state);
556 struct tstream_cli_np *cli_nps =
557 tstream_context_data(state->stream,
558 struct tstream_cli_np);
559 struct tevent_req *subreq;
561 state->error.val = error;
562 state->error.location = location;
564 if (!cli_state_is_connected(cli_nps->cli)) {
565 /* return the original error */
566 _tevent_req_error(req, state->error.val, state->error.location);
567 return;
570 if (cli_nps->is_smb1) {
571 subreq = cli_close_send(state, state->ev, cli_nps->cli,
572 cli_nps->fnum);
573 } else {
574 subreq = smb2cli_close_send(state, state->ev,
575 cli_nps->cli->conn,
576 cli_nps->cli->timeout,
577 cli_nps->cli->smb2.session,
578 cli_nps->cli->smb2.tcon,
579 0, /* flags */
580 cli_nps->fid_persistent,
581 cli_nps->fid_volatile);
583 if (subreq == NULL) {
584 /* return the original error */
585 _tevent_req_error(req, state->error.val, state->error.location);
586 return;
588 tevent_req_set_callback(subreq,
589 tstream_cli_np_writev_disconnect_done,
590 req);
593 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
595 struct tevent_req *req =
596 tevent_req_callback_data(subreq, struct tevent_req);
597 struct tstream_cli_np_writev_state *state =
598 tevent_req_data(req, struct tstream_cli_np_writev_state);
599 struct tstream_cli_np *cli_nps =
600 tstream_context_data(state->stream, struct tstream_cli_np);
602 if (cli_nps->is_smb1) {
603 cli_close_recv(subreq);
604 } else {
605 smb2cli_close_recv(subreq);
607 TALLOC_FREE(subreq);
609 cli_nps->cli = NULL;
611 /* return the original error */
612 _tevent_req_error(req, state->error.val, state->error.location);
615 static int tstream_cli_np_writev_recv(struct tevent_req *req,
616 int *perrno)
618 struct tstream_cli_np_writev_state *state =
619 tevent_req_data(req,
620 struct tstream_cli_np_writev_state);
621 int ret;
623 ret = tsocket_simple_int_recv(req, perrno);
624 if (ret == 0) {
625 ret = state->ret;
628 tevent_req_received(req);
629 return ret;
632 struct tstream_cli_np_readv_state {
633 struct tstream_context *stream;
634 struct tevent_context *ev;
636 struct iovec *vector;
637 size_t count;
639 int ret;
641 struct {
642 struct tevent_immediate *im;
643 } trans;
645 struct {
646 int val;
647 const char *location;
648 } error;
651 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state *state)
653 struct tstream_cli_np *cli_nps =
654 tstream_context_data(state->stream,
655 struct tstream_cli_np);
657 cli_nps->trans.read_req = NULL;
659 return 0;
662 static void tstream_cli_np_readv_read_next(struct tevent_req *req);
664 static struct tevent_req *tstream_cli_np_readv_send(TALLOC_CTX *mem_ctx,
665 struct tevent_context *ev,
666 struct tstream_context *stream,
667 struct iovec *vector,
668 size_t count)
670 struct tevent_req *req;
671 struct tstream_cli_np_readv_state *state;
672 struct tstream_cli_np *cli_nps =
673 tstream_context_data(stream, struct tstream_cli_np);
675 req = tevent_req_create(mem_ctx, &state,
676 struct tstream_cli_np_readv_state);
677 if (!req) {
678 return NULL;
680 state->stream = stream;
681 state->ev = ev;
682 state->ret = 0;
684 talloc_set_destructor(state, tstream_cli_np_readv_state_destructor);
686 if (!cli_state_is_connected(cli_nps->cli)) {
687 tevent_req_error(req, ENOTCONN);
688 return tevent_req_post(req, ev);
692 * we make a copy of the vector so we can change the structure
694 state->vector = talloc_array(state, struct iovec, count);
695 if (tevent_req_nomem(state->vector, req)) {
696 return tevent_req_post(req, ev);
698 memcpy(state->vector, vector, sizeof(struct iovec) * count);
699 state->count = count;
701 tstream_cli_np_readv_read_next(req);
702 if (!tevent_req_is_in_progress(req)) {
703 return tevent_req_post(req, ev);
706 return req;
709 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq);
711 static void tstream_cli_np_readv_read_next(struct tevent_req *req)
713 struct tstream_cli_np_readv_state *state =
714 tevent_req_data(req,
715 struct tstream_cli_np_readv_state);
716 struct tstream_cli_np *cli_nps =
717 tstream_context_data(state->stream,
718 struct tstream_cli_np);
719 struct tevent_req *subreq;
722 * copy the pending buffer first
724 while (cli_nps->read.left > 0 && state->count > 0) {
725 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
726 size_t len = MIN(cli_nps->read.left, state->vector[0].iov_len);
728 memcpy(base, cli_nps->read.buf + cli_nps->read.ofs, len);
730 base += len;
731 state->vector[0].iov_base = base;
732 state->vector[0].iov_len -= len;
734 cli_nps->read.ofs += len;
735 cli_nps->read.left -= len;
737 if (state->vector[0].iov_len == 0) {
738 state->vector += 1;
739 state->count -= 1;
742 state->ret += len;
745 if (cli_nps->read.left == 0) {
746 TALLOC_FREE(cli_nps->read.buf);
749 if (state->count == 0) {
750 tevent_req_done(req);
751 return;
754 if (cli_nps->trans.active) {
755 cli_nps->trans.active = false;
756 cli_nps->trans.read_req = req;
757 return;
760 if (cli_nps->trans.write_req) {
761 cli_nps->trans.read_req = req;
762 tstream_cli_np_readv_trans_start(req);
763 return;
766 if (cli_nps->is_smb1) {
767 subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
768 cli_nps->fnum,
769 0, /* offset */
770 TSTREAM_CLI_NP_MAX_BUF_SIZE);
771 } else {
772 subreq = smb2cli_read_send(state, state->ev,
773 cli_nps->cli->conn,
774 cli_nps->cli->timeout,
775 cli_nps->cli->smb2.session,
776 cli_nps->cli->smb2.tcon,
777 TSTREAM_CLI_NP_MAX_BUF_SIZE, /* length */
778 0, /* offset */
779 cli_nps->fid_persistent,
780 cli_nps->fid_volatile,
781 0, /* minimum_count */
782 0); /* remaining_bytes */
784 if (tevent_req_nomem(subreq, req)) {
785 return;
787 tevent_req_set_callback(subreq,
788 tstream_cli_np_readv_read_done,
789 req);
792 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq);
794 static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
796 struct tstream_cli_np_readv_state *state =
797 tevent_req_data(req,
798 struct tstream_cli_np_readv_state);
799 struct tstream_cli_np *cli_nps =
800 tstream_context_data(state->stream,
801 struct tstream_cli_np);
802 struct tevent_req *subreq;
804 state->trans.im = tevent_create_immediate(state);
805 if (tevent_req_nomem(state->trans.im, req)) {
806 return;
809 if (cli_nps->is_smb1) {
810 subreq = cli_trans_send(state, state->ev,
811 cli_nps->cli,
812 SMBtrans,
813 "\\PIPE\\",
814 0, 0, 0,
815 cli_nps->trans.setup, 2,
817 NULL, 0, 0,
818 cli_nps->write.buf,
819 cli_nps->write.ofs,
820 TSTREAM_CLI_NP_MAX_BUF_SIZE);
821 } else {
822 DATA_BLOB in_input_buffer = data_blob_null;
823 DATA_BLOB in_output_buffer = data_blob_null;
825 in_input_buffer = data_blob_const(cli_nps->write.buf,
826 cli_nps->write.ofs);
828 subreq = smb2cli_ioctl_send(state, state->ev,
829 cli_nps->cli->conn,
830 cli_nps->cli->timeout,
831 cli_nps->cli->smb2.session,
832 cli_nps->cli->smb2.tcon,
833 cli_nps->fid_persistent,
834 cli_nps->fid_volatile,
835 FSCTL_NAMED_PIPE_READ_WRITE,
836 0, /* in_max_input_length */
837 &in_input_buffer,
838 /* in_max_output_length */
839 TSTREAM_CLI_NP_MAX_BUF_SIZE,
840 &in_output_buffer,
841 SMB2_IOCTL_FLAG_IS_FSCTL);
843 if (tevent_req_nomem(subreq, req)) {
844 return;
846 tevent_req_set_callback(subreq,
847 tstream_cli_np_readv_trans_done,
848 req);
851 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
852 int error,
853 const char *location);
854 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
855 struct tevent_immediate *im,
856 void *private_data);
858 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
860 struct tevent_req *req =
861 tevent_req_callback_data(subreq, struct tevent_req);
862 struct tstream_cli_np_readv_state *state =
863 tevent_req_data(req, struct tstream_cli_np_readv_state);
864 struct tstream_cli_np *cli_nps =
865 tstream_context_data(state->stream, struct tstream_cli_np);
866 uint8_t *rcvbuf;
867 uint32_t received;
868 NTSTATUS status;
870 if (cli_nps->is_smb1) {
871 status = cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
872 NULL, 0, NULL,
873 &rcvbuf, 0, &received);
874 } else {
875 DATA_BLOB out_input_buffer = data_blob_null;
876 DATA_BLOB out_output_buffer = data_blob_null;
878 status = smb2cli_ioctl_recv(subreq, state,
879 &out_input_buffer,
880 &out_output_buffer);
882 /* Note that rcvbuf is not a talloc pointer here */
883 rcvbuf = out_output_buffer.data;
884 received = out_output_buffer.length;
886 TALLOC_FREE(subreq);
887 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
888 status = NT_STATUS_OK;
890 if (!NT_STATUS_IS_OK(status)) {
891 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
892 return;
895 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
896 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
897 return;
900 if (received == 0) {
901 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
902 return;
905 cli_nps->read.ofs = 0;
906 cli_nps->read.left = received;
907 cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
908 if (cli_nps->read.buf == NULL) {
909 TALLOC_FREE(subreq);
910 tevent_req_nomem(cli_nps->read.buf, req);
911 return;
913 memcpy(cli_nps->read.buf, rcvbuf, received);
915 if (cli_nps->trans.write_req == NULL) {
916 tstream_cli_np_readv_read_next(req);
917 return;
920 tevent_schedule_immediate(state->trans.im, state->ev,
921 tstream_cli_np_readv_trans_next, req);
923 tevent_req_done(cli_nps->trans.write_req);
926 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
927 struct tevent_immediate *im,
928 void *private_data)
930 struct tevent_req *req =
931 talloc_get_type_abort(private_data,
932 struct tevent_req);
934 tstream_cli_np_readv_read_next(req);
937 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
939 struct tevent_req *req =
940 tevent_req_callback_data(subreq, struct tevent_req);
941 struct tstream_cli_np_readv_state *state =
942 tevent_req_data(req, struct tstream_cli_np_readv_state);
943 struct tstream_cli_np *cli_nps =
944 tstream_context_data(state->stream, struct tstream_cli_np);
945 uint8_t *rcvbuf;
946 ssize_t received;
947 NTSTATUS status;
950 * We must free subreq in this function as there is
951 * a timer event attached to it.
954 if (cli_nps->is_smb1) {
955 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
956 } else {
957 uint32_t data_length = 0;
958 status = smb2cli_read_recv(subreq, state, &rcvbuf, &data_length);
959 received = data_length;
962 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
963 * child of that.
965 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
967 * NT_STATUS_BUFFER_TOO_SMALL means that there's
968 * more data to read when the named pipe is used
969 * in message mode (which is the case here).
971 * But we hide this from the caller.
973 status = NT_STATUS_OK;
975 if (!NT_STATUS_IS_OK(status)) {
976 TALLOC_FREE(subreq);
977 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
978 return;
981 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
982 TALLOC_FREE(subreq);
983 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
984 return;
987 if (received == 0) {
988 TALLOC_FREE(subreq);
989 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
990 return;
993 cli_nps->read.ofs = 0;
994 cli_nps->read.left = received;
995 cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
996 if (cli_nps->read.buf == NULL) {
997 TALLOC_FREE(subreq);
998 tevent_req_nomem(cli_nps->read.buf, req);
999 return;
1001 memcpy(cli_nps->read.buf, rcvbuf, received);
1002 TALLOC_FREE(subreq);
1004 tstream_cli_np_readv_read_next(req);
1007 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq);
1009 static void tstream_cli_np_readv_error(struct tevent_req *req);
1011 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
1012 int error,
1013 const char *location)
1015 struct tstream_cli_np_readv_state *state =
1016 tevent_req_data(req,
1017 struct tstream_cli_np_readv_state);
1018 struct tstream_cli_np *cli_nps =
1019 tstream_context_data(state->stream,
1020 struct tstream_cli_np);
1021 struct tevent_req *subreq;
1023 state->error.val = error;
1024 state->error.location = location;
1026 if (!cli_state_is_connected(cli_nps->cli)) {
1027 /* return the original error */
1028 tstream_cli_np_readv_error(req);
1029 return;
1032 if (cli_nps->is_smb1) {
1033 subreq = cli_close_send(state, state->ev, cli_nps->cli,
1034 cli_nps->fnum);
1035 } else {
1036 subreq = smb2cli_close_send(state, state->ev,
1037 cli_nps->cli->conn,
1038 cli_nps->cli->timeout,
1039 cli_nps->cli->smb2.session,
1040 cli_nps->cli->smb2.tcon,
1041 0, /* flags */
1042 cli_nps->fid_persistent,
1043 cli_nps->fid_volatile);
1045 if (subreq == NULL) {
1046 /* return the original error */
1047 tstream_cli_np_readv_error(req);
1048 return;
1050 tevent_req_set_callback(subreq,
1051 tstream_cli_np_readv_disconnect_done,
1052 req);
1055 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq)
1057 struct tevent_req *req =
1058 tevent_req_callback_data(subreq, struct tevent_req);
1059 struct tstream_cli_np_readv_state *state =
1060 tevent_req_data(req, struct tstream_cli_np_readv_state);
1061 struct tstream_cli_np *cli_nps =
1062 tstream_context_data(state->stream, struct tstream_cli_np);
1064 if (cli_nps->is_smb1) {
1065 cli_close_recv(subreq);
1066 } else {
1067 smb2cli_close_recv(subreq);
1069 TALLOC_FREE(subreq);
1071 cli_nps->cli = NULL;
1073 tstream_cli_np_readv_error(req);
1076 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
1077 struct tevent_immediate *im,
1078 void *private_data);
1080 static void tstream_cli_np_readv_error(struct tevent_req *req)
1082 struct tstream_cli_np_readv_state *state =
1083 tevent_req_data(req,
1084 struct tstream_cli_np_readv_state);
1085 struct tstream_cli_np *cli_nps =
1086 tstream_context_data(state->stream,
1087 struct tstream_cli_np);
1089 if (cli_nps->trans.write_req == NULL) {
1090 /* return the original error */
1091 _tevent_req_error(req, state->error.val, state->error.location);
1092 return;
1095 if (state->trans.im == NULL) {
1096 /* return the original error */
1097 _tevent_req_error(req, state->error.val, state->error.location);
1098 return;
1101 tevent_schedule_immediate(state->trans.im, state->ev,
1102 tstream_cli_np_readv_error_trigger, req);
1104 /* return the original error for writev */
1105 _tevent_req_error(cli_nps->trans.write_req,
1106 state->error.val, state->error.location);
1109 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
1110 struct tevent_immediate *im,
1111 void *private_data)
1113 struct tevent_req *req =
1114 talloc_get_type_abort(private_data,
1115 struct tevent_req);
1116 struct tstream_cli_np_readv_state *state =
1117 tevent_req_data(req,
1118 struct tstream_cli_np_readv_state);
1120 /* return the original error */
1121 _tevent_req_error(req, state->error.val, state->error.location);
1124 static int tstream_cli_np_readv_recv(struct tevent_req *req,
1125 int *perrno)
1127 struct tstream_cli_np_readv_state *state =
1128 tevent_req_data(req, struct tstream_cli_np_readv_state);
1129 int ret;
1131 ret = tsocket_simple_int_recv(req, perrno);
1132 if (ret == 0) {
1133 ret = state->ret;
1136 tevent_req_received(req);
1137 return ret;
1140 struct tstream_cli_np_disconnect_state {
1141 struct tstream_context *stream;
1144 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq);
1146 static struct tevent_req *tstream_cli_np_disconnect_send(TALLOC_CTX *mem_ctx,
1147 struct tevent_context *ev,
1148 struct tstream_context *stream)
1150 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
1151 struct tstream_cli_np);
1152 struct tevent_req *req;
1153 struct tstream_cli_np_disconnect_state *state;
1154 struct tevent_req *subreq;
1156 req = tevent_req_create(mem_ctx, &state,
1157 struct tstream_cli_np_disconnect_state);
1158 if (req == NULL) {
1159 return NULL;
1162 state->stream = stream;
1164 if (!cli_state_is_connected(cli_nps->cli)) {
1165 tevent_req_error(req, ENOTCONN);
1166 return tevent_req_post(req, ev);
1169 if (cli_nps->is_smb1) {
1170 subreq = cli_close_send(state, ev, cli_nps->cli,
1171 cli_nps->fnum);
1172 } else {
1173 subreq = smb2cli_close_send(state, ev, cli_nps->cli->conn,
1174 cli_nps->cli->timeout,
1175 cli_nps->cli->smb2.session,
1176 cli_nps->cli->smb2.tcon,
1177 0, /* flags */
1178 cli_nps->fid_persistent,
1179 cli_nps->fid_volatile);
1181 if (tevent_req_nomem(subreq, req)) {
1182 return tevent_req_post(req, ev);
1184 tevent_req_set_callback(subreq, tstream_cli_np_disconnect_done, req);
1186 return req;
1189 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq)
1191 struct tevent_req *req = tevent_req_callback_data(subreq,
1192 struct tevent_req);
1193 struct tstream_cli_np_disconnect_state *state =
1194 tevent_req_data(req, struct tstream_cli_np_disconnect_state);
1195 struct tstream_cli_np *cli_nps =
1196 tstream_context_data(state->stream, struct tstream_cli_np);
1197 NTSTATUS status;
1199 if (cli_nps->is_smb1) {
1200 status = cli_close_recv(subreq);
1201 } else {
1202 status = smb2cli_close_recv(subreq);
1204 TALLOC_FREE(subreq);
1205 if (!NT_STATUS_IS_OK(status)) {
1206 tevent_req_error(req, EIO);
1207 return;
1210 cli_nps->cli = NULL;
1212 tevent_req_done(req);
1215 static int tstream_cli_np_disconnect_recv(struct tevent_req *req,
1216 int *perrno)
1218 int ret;
1220 ret = tsocket_simple_int_recv(req, perrno);
1222 tevent_req_received(req);
1223 return ret;
1226 static const struct tstream_context_ops tstream_cli_np_ops = {
1227 .name = "cli_np",
1229 .pending_bytes = tstream_cli_np_pending_bytes,
1231 .readv_send = tstream_cli_np_readv_send,
1232 .readv_recv = tstream_cli_np_readv_recv,
1234 .writev_send = tstream_cli_np_writev_send,
1235 .writev_recv = tstream_cli_np_writev_recv,
1237 .disconnect_send = tstream_cli_np_disconnect_send,
1238 .disconnect_recv = tstream_cli_np_disconnect_recv,