libcli/smb: pass smbXcli_tcon to smb2cli_create*()
[Samba/gebeck_regimport.git] / source3 / libsmb / cli_np_tstream.c
blob7482f9da6236f377a2f7881d6a6017b1e443511e
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2010
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "system/network.h"
22 #include "libsmb/libsmb.h"
23 #include "libsmb/smb2cli.h"
24 #include "../libcli/smb/smbXcli_base.h"
25 #include "../lib/util/tevent_ntstatus.h"
26 #include "../lib/tsocket/tsocket.h"
27 #include "../lib/tsocket/tsocket_internal.h"
28 #include "cli_np_tstream.h"
30 static const struct tstream_context_ops tstream_cli_np_ops;
33 * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
34 * This is fits into the max_xmit negotiated at the SMB layer.
36 * On the sending side they may use SMBtranss if the request does not
37 * fit into a single SMBtrans call.
39 * Windows uses 1024 as max data size of a SMBtrans request and then
40 * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
41 * via a SMBreadX.
43 * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
44 * request to get the whole fragment at once (like samba 3.5.x and below did.
46 * It is important that we use do SMBwriteX with the size of a full fragment,
47 * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
48 * from NT4 servers. (See bug #8195)
50 #define TSTREAM_CLI_NP_MAX_BUF_SIZE 4280
52 struct tstream_cli_np {
53 struct cli_state *cli;
54 const char *npipe;
55 bool is_smb1;
56 uint16_t fnum;
57 uint64_t fid_persistent;
58 uint64_t fid_volatile;
59 unsigned int default_timeout;
61 struct {
62 bool active;
63 struct tevent_req *read_req;
64 struct tevent_req *write_req;
65 uint16_t setup[2];
66 } trans;
68 struct {
69 off_t ofs;
70 size_t left;
71 uint8_t *buf;
72 } read, write;
75 static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
77 NTSTATUS status;
79 if (!cli_state_is_connected(cli_nps->cli)) {
80 return 0;
84 * TODO: do not use a sync call with a destructor!!!
86 * This only happens, if a caller does talloc_free(),
87 * while the everything was still ok.
89 * If we get an unexpected failure within a normal
90 * operation, we already do an async cli_close_send()/_recv().
92 * Once we've fixed all callers to call
93 * tstream_disconnect_send()/_recv(), this will
94 * never be called.
96 if (cli_nps->is_smb1) {
97 status = cli_close(cli_nps->cli, cli_nps->fnum);
98 } else {
99 status = smb2cli_close(cli_nps->cli->conn,
100 cli_nps->cli->timeout,
101 cli_nps->cli->smb2.session,
102 cli_nps->cli->smb2.tid, 0,
103 cli_nps->fid_persistent,
104 cli_nps->fid_volatile);
106 if (!NT_STATUS_IS_OK(status)) {
107 DEBUG(1, ("tstream_cli_np_destructor: cli_close "
108 "failed on pipe %s. Error was %s\n",
109 cli_nps->npipe, nt_errstr(status)));
112 * We can't do much on failure
114 return 0;
117 struct tstream_cli_np_open_state {
118 struct cli_state *cli;
119 bool is_smb1;
120 uint16_t fnum;
121 uint64_t fid_persistent;
122 uint64_t fid_volatile;
123 const char *npipe;
126 static void tstream_cli_np_open_done(struct tevent_req *subreq);
128 struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
129 struct tevent_context *ev,
130 struct cli_state *cli,
131 const char *npipe)
133 struct tevent_req *req;
134 struct tstream_cli_np_open_state *state;
135 struct tevent_req *subreq;
137 req = tevent_req_create(mem_ctx, &state,
138 struct tstream_cli_np_open_state);
139 if (!req) {
140 return NULL;
142 state->cli = cli;
144 state->npipe = talloc_strdup(state, npipe);
145 if (tevent_req_nomem(state->npipe, req)) {
146 return tevent_req_post(req, ev);
149 if (smbXcli_conn_protocol(cli->conn) < PROTOCOL_SMB2_02) {
150 state->is_smb1 = true;
153 if (state->is_smb1) {
154 const char *smb1_npipe;
157 * Windows and newer Samba versions allow
158 * the pipe name without leading backslash,
159 * but we should better behave like windows clients
161 smb1_npipe = talloc_asprintf(state, "\\%s", state->npipe);
162 if (tevent_req_nomem(smb1_npipe, req)) {
163 return tevent_req_post(req, ev);
166 subreq = cli_ntcreate_send(state, ev, cli,
167 smb1_npipe,
169 DESIRED_ACCESS_PIPE,
171 FILE_SHARE_READ|FILE_SHARE_WRITE,
172 FILE_OPEN,
175 } else {
176 subreq = smb2cli_create_send(state, ev, cli->conn,
177 cli->timeout, cli->smb2.session,
178 cli->smb2.tcon,
179 npipe,
180 SMB2_OPLOCK_LEVEL_NONE,
181 SMB2_IMPERSONATION_IMPERSONATION,
182 DESIRED_ACCESS_PIPE,
183 0, /* file_attributes */
184 FILE_SHARE_READ|FILE_SHARE_WRITE,
185 FILE_OPEN,
186 0, /* create_options */
187 NULL); /* blobs */
189 if (tevent_req_nomem(subreq, req)) {
190 return tevent_req_post(req, ev);
192 tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
194 return req;
197 static void tstream_cli_np_open_done(struct tevent_req *subreq)
199 struct tevent_req *req =
200 tevent_req_callback_data(subreq, struct tevent_req);
201 struct tstream_cli_np_open_state *state =
202 tevent_req_data(req, struct tstream_cli_np_open_state);
203 NTSTATUS status;
205 if (state->is_smb1) {
206 status = cli_ntcreate_recv(subreq, &state->fnum);
207 } else {
208 status = smb2cli_create_recv(subreq,
209 &state->fid_persistent,
210 &state->fid_volatile);
212 TALLOC_FREE(subreq);
213 if (!NT_STATUS_IS_OK(status)) {
214 tevent_req_nterror(req, status);
215 return;
218 tevent_req_done(req);
221 NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
222 TALLOC_CTX *mem_ctx,
223 struct tstream_context **_stream,
224 const char *location)
226 struct tstream_cli_np_open_state *state =
227 tevent_req_data(req, struct tstream_cli_np_open_state);
228 struct tstream_context *stream;
229 struct tstream_cli_np *cli_nps;
230 NTSTATUS status;
232 if (tevent_req_is_nterror(req, &status)) {
233 tevent_req_received(req);
234 return status;
237 stream = tstream_context_create(mem_ctx,
238 &tstream_cli_np_ops,
239 &cli_nps,
240 struct tstream_cli_np,
241 location);
242 if (!stream) {
243 tevent_req_received(req);
244 return NT_STATUS_NO_MEMORY;
246 ZERO_STRUCTP(cli_nps);
248 cli_nps->cli = state->cli;
249 cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
250 cli_nps->is_smb1 = state->is_smb1;
251 cli_nps->fnum = state->fnum;
252 cli_nps->fid_persistent = state->fid_persistent;
253 cli_nps->fid_volatile = state->fid_volatile;
254 cli_nps->default_timeout = cli_set_timeout(state->cli, 0);
255 cli_set_timeout(state->cli, cli_nps->default_timeout);
257 talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
259 cli_nps->trans.active = false;
260 cli_nps->trans.read_req = NULL;
261 cli_nps->trans.write_req = NULL;
262 SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
263 SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
265 *_stream = stream;
266 tevent_req_received(req);
267 return NT_STATUS_OK;
270 static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
272 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
273 struct tstream_cli_np);
275 if (!cli_state_is_connected(cli_nps->cli)) {
276 errno = ENOTCONN;
277 return -1;
280 return cli_nps->read.left;
283 bool tstream_is_cli_np(struct tstream_context *stream)
285 struct tstream_cli_np *cli_nps =
286 talloc_get_type(_tstream_context_data(stream),
287 struct tstream_cli_np);
289 if (!cli_nps) {
290 return false;
293 return true;
296 NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
298 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
299 struct tstream_cli_np);
301 if (cli_nps->trans.read_req) {
302 return NT_STATUS_PIPE_BUSY;
305 if (cli_nps->trans.write_req) {
306 return NT_STATUS_PIPE_BUSY;
309 if (cli_nps->trans.active) {
310 return NT_STATUS_PIPE_BUSY;
313 cli_nps->trans.active = true;
315 return NT_STATUS_OK;
318 unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
319 unsigned int timeout)
321 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
322 struct tstream_cli_np);
324 if (!cli_state_is_connected(cli_nps->cli)) {
325 return cli_nps->default_timeout;
328 return cli_set_timeout(cli_nps->cli, timeout);
331 struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
333 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
334 struct tstream_cli_np);
336 return cli_nps->cli;
339 struct tstream_cli_np_writev_state {
340 struct tstream_context *stream;
341 struct tevent_context *ev;
343 struct iovec *vector;
344 size_t count;
346 int ret;
348 struct {
349 int val;
350 const char *location;
351 } error;
354 static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
356 struct tstream_cli_np *cli_nps =
357 tstream_context_data(state->stream,
358 struct tstream_cli_np);
360 cli_nps->trans.write_req = NULL;
362 return 0;
365 static void tstream_cli_np_writev_write_next(struct tevent_req *req);
367 static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
368 struct tevent_context *ev,
369 struct tstream_context *stream,
370 const struct iovec *vector,
371 size_t count)
373 struct tevent_req *req;
374 struct tstream_cli_np_writev_state *state;
375 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
376 struct tstream_cli_np);
378 req = tevent_req_create(mem_ctx, &state,
379 struct tstream_cli_np_writev_state);
380 if (!req) {
381 return NULL;
383 state->stream = stream;
384 state->ev = ev;
385 state->ret = 0;
387 talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
389 if (!cli_state_is_connected(cli_nps->cli)) {
390 tevent_req_error(req, ENOTCONN);
391 return tevent_req_post(req, ev);
395 * we make a copy of the vector so we can change the structure
397 state->vector = talloc_array(state, struct iovec, count);
398 if (tevent_req_nomem(state->vector, req)) {
399 return tevent_req_post(req, ev);
401 memcpy(state->vector, vector, sizeof(struct iovec) * count);
402 state->count = count;
404 tstream_cli_np_writev_write_next(req);
405 if (!tevent_req_is_in_progress(req)) {
406 return tevent_req_post(req, ev);
409 return req;
412 static void tstream_cli_np_readv_trans_start(struct tevent_req *req);
413 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
415 static void tstream_cli_np_writev_write_next(struct tevent_req *req)
417 struct tstream_cli_np_writev_state *state =
418 tevent_req_data(req,
419 struct tstream_cli_np_writev_state);
420 struct tstream_cli_np *cli_nps =
421 tstream_context_data(state->stream,
422 struct tstream_cli_np);
423 struct tevent_req *subreq;
424 size_t i;
425 size_t left = 0;
427 for (i=0; i < state->count; i++) {
428 left += state->vector[i].iov_len;
431 if (left == 0) {
432 TALLOC_FREE(cli_nps->write.buf);
433 tevent_req_done(req);
434 return;
437 cli_nps->write.ofs = 0;
438 cli_nps->write.left = MIN(left, TSTREAM_CLI_NP_MAX_BUF_SIZE);
439 cli_nps->write.buf = talloc_realloc(cli_nps, cli_nps->write.buf,
440 uint8_t, cli_nps->write.left);
441 if (tevent_req_nomem(cli_nps->write.buf, req)) {
442 return;
446 * copy the pending buffer first
448 while (cli_nps->write.left > 0 && state->count > 0) {
449 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
450 size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
452 memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
454 base += len;
455 state->vector[0].iov_base = base;
456 state->vector[0].iov_len -= len;
458 cli_nps->write.ofs += len;
459 cli_nps->write.left -= len;
461 if (state->vector[0].iov_len == 0) {
462 state->vector += 1;
463 state->count -= 1;
466 state->ret += len;
469 if (cli_nps->trans.active && state->count == 0) {
470 cli_nps->trans.active = false;
471 cli_nps->trans.write_req = req;
472 return;
475 if (cli_nps->trans.read_req && state->count == 0) {
476 cli_nps->trans.write_req = req;
477 tstream_cli_np_readv_trans_start(cli_nps->trans.read_req);
478 return;
481 if (cli_nps->is_smb1) {
482 subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
483 cli_nps->fnum,
484 8, /* 8 means message mode. */
485 cli_nps->write.buf,
486 0, /* offset */
487 cli_nps->write.ofs); /* size */
488 } else {
489 subreq = smb2cli_write_send(state, state->ev,
490 cli_nps->cli->conn,
491 cli_nps->cli->timeout,
492 cli_nps->cli->smb2.session,
493 cli_nps->cli->smb2.tid,
494 cli_nps->write.ofs, /* length */
495 0, /* offset */
496 cli_nps->fid_persistent,
497 cli_nps->fid_volatile,
498 0, /* remaining_bytes */
499 0, /* flags */
500 cli_nps->write.buf);
502 if (tevent_req_nomem(subreq, req)) {
503 return;
505 tevent_req_set_callback(subreq,
506 tstream_cli_np_writev_write_done,
507 req);
510 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
511 int error,
512 const char *location);
514 static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
516 struct tevent_req *req =
517 tevent_req_callback_data(subreq, struct tevent_req);
518 struct tstream_cli_np_writev_state *state =
519 tevent_req_data(req, struct tstream_cli_np_writev_state);
520 struct tstream_cli_np *cli_nps =
521 tstream_context_data(state->stream,
522 struct tstream_cli_np);
523 size_t written;
524 NTSTATUS status;
526 if (cli_nps->is_smb1) {
527 status = cli_write_andx_recv(subreq, &written);
528 } else {
529 status = smb2cli_write_recv(subreq);
530 written = cli_nps->write.ofs; // TODO: get the value from the server
532 TALLOC_FREE(subreq);
533 if (!NT_STATUS_IS_OK(status)) {
534 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
535 return;
538 if (written != cli_nps->write.ofs) {
539 tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
540 return;
543 tstream_cli_np_writev_write_next(req);
546 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
548 static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
549 int error,
550 const char *location)
552 struct tstream_cli_np_writev_state *state =
553 tevent_req_data(req,
554 struct tstream_cli_np_writev_state);
555 struct tstream_cli_np *cli_nps =
556 tstream_context_data(state->stream,
557 struct tstream_cli_np);
558 struct tevent_req *subreq;
560 state->error.val = error;
561 state->error.location = location;
563 if (!cli_state_is_connected(cli_nps->cli)) {
564 /* return the original error */
565 _tevent_req_error(req, state->error.val, state->error.location);
566 return;
569 if (cli_nps->is_smb1) {
570 subreq = cli_close_send(state, state->ev, cli_nps->cli,
571 cli_nps->fnum);
572 } else {
573 subreq = smb2cli_close_send(state, state->ev,
574 cli_nps->cli->conn,
575 cli_nps->cli->timeout,
576 cli_nps->cli->smb2.session,
577 cli_nps->cli->smb2.tid,
578 0, /* flags */
579 cli_nps->fid_persistent,
580 cli_nps->fid_volatile);
582 if (subreq == NULL) {
583 /* return the original error */
584 _tevent_req_error(req, state->error.val, state->error.location);
585 return;
587 tevent_req_set_callback(subreq,
588 tstream_cli_np_writev_disconnect_done,
589 req);
592 static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
594 struct tevent_req *req =
595 tevent_req_callback_data(subreq, struct tevent_req);
596 struct tstream_cli_np_writev_state *state =
597 tevent_req_data(req, struct tstream_cli_np_writev_state);
598 struct tstream_cli_np *cli_nps =
599 tstream_context_data(state->stream, struct tstream_cli_np);
601 if (cli_nps->is_smb1) {
602 cli_close_recv(subreq);
603 } else {
604 smb2cli_close_recv(subreq);
606 TALLOC_FREE(subreq);
608 cli_nps->cli = NULL;
610 /* return the original error */
611 _tevent_req_error(req, state->error.val, state->error.location);
614 static int tstream_cli_np_writev_recv(struct tevent_req *req,
615 int *perrno)
617 struct tstream_cli_np_writev_state *state =
618 tevent_req_data(req,
619 struct tstream_cli_np_writev_state);
620 int ret;
622 ret = tsocket_simple_int_recv(req, perrno);
623 if (ret == 0) {
624 ret = state->ret;
627 tevent_req_received(req);
628 return ret;
631 struct tstream_cli_np_readv_state {
632 struct tstream_context *stream;
633 struct tevent_context *ev;
635 struct iovec *vector;
636 size_t count;
638 int ret;
640 struct {
641 struct tevent_immediate *im;
642 } trans;
644 struct {
645 int val;
646 const char *location;
647 } error;
650 static int tstream_cli_np_readv_state_destructor(struct tstream_cli_np_readv_state *state)
652 struct tstream_cli_np *cli_nps =
653 tstream_context_data(state->stream,
654 struct tstream_cli_np);
656 cli_nps->trans.read_req = NULL;
658 return 0;
661 static void tstream_cli_np_readv_read_next(struct tevent_req *req);
663 static struct tevent_req *tstream_cli_np_readv_send(TALLOC_CTX *mem_ctx,
664 struct tevent_context *ev,
665 struct tstream_context *stream,
666 struct iovec *vector,
667 size_t count)
669 struct tevent_req *req;
670 struct tstream_cli_np_readv_state *state;
671 struct tstream_cli_np *cli_nps =
672 tstream_context_data(stream, struct tstream_cli_np);
674 req = tevent_req_create(mem_ctx, &state,
675 struct tstream_cli_np_readv_state);
676 if (!req) {
677 return NULL;
679 state->stream = stream;
680 state->ev = ev;
681 state->ret = 0;
683 talloc_set_destructor(state, tstream_cli_np_readv_state_destructor);
685 if (!cli_state_is_connected(cli_nps->cli)) {
686 tevent_req_error(req, ENOTCONN);
687 return tevent_req_post(req, ev);
691 * we make a copy of the vector so we can change the structure
693 state->vector = talloc_array(state, struct iovec, count);
694 if (tevent_req_nomem(state->vector, req)) {
695 return tevent_req_post(req, ev);
697 memcpy(state->vector, vector, sizeof(struct iovec) * count);
698 state->count = count;
700 tstream_cli_np_readv_read_next(req);
701 if (!tevent_req_is_in_progress(req)) {
702 return tevent_req_post(req, ev);
705 return req;
708 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq);
710 static void tstream_cli_np_readv_read_next(struct tevent_req *req)
712 struct tstream_cli_np_readv_state *state =
713 tevent_req_data(req,
714 struct tstream_cli_np_readv_state);
715 struct tstream_cli_np *cli_nps =
716 tstream_context_data(state->stream,
717 struct tstream_cli_np);
718 struct tevent_req *subreq;
721 * copy the pending buffer first
723 while (cli_nps->read.left > 0 && state->count > 0) {
724 uint8_t *base = (uint8_t *)state->vector[0].iov_base;
725 size_t len = MIN(cli_nps->read.left, state->vector[0].iov_len);
727 memcpy(base, cli_nps->read.buf + cli_nps->read.ofs, len);
729 base += len;
730 state->vector[0].iov_base = base;
731 state->vector[0].iov_len -= len;
733 cli_nps->read.ofs += len;
734 cli_nps->read.left -= len;
736 if (state->vector[0].iov_len == 0) {
737 state->vector += 1;
738 state->count -= 1;
741 state->ret += len;
744 if (cli_nps->read.left == 0) {
745 TALLOC_FREE(cli_nps->read.buf);
748 if (state->count == 0) {
749 tevent_req_done(req);
750 return;
753 if (cli_nps->trans.active) {
754 cli_nps->trans.active = false;
755 cli_nps->trans.read_req = req;
756 return;
759 if (cli_nps->trans.write_req) {
760 cli_nps->trans.read_req = req;
761 tstream_cli_np_readv_trans_start(req);
762 return;
765 if (cli_nps->is_smb1) {
766 subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
767 cli_nps->fnum,
768 0, /* offset */
769 TSTREAM_CLI_NP_MAX_BUF_SIZE);
770 } else {
771 subreq = smb2cli_read_send(state, state->ev,
772 cli_nps->cli->conn,
773 cli_nps->cli->timeout,
774 cli_nps->cli->smb2.session,
775 cli_nps->cli->smb2.tid,
776 TSTREAM_CLI_NP_MAX_BUF_SIZE, /* length */
777 0, /* offset */
778 cli_nps->fid_persistent,
779 cli_nps->fid_volatile,
780 0, /* minimum_count */
781 0); /* remaining_bytes */
783 if (tevent_req_nomem(subreq, req)) {
784 return;
786 tevent_req_set_callback(subreq,
787 tstream_cli_np_readv_read_done,
788 req);
791 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq);
793 static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
795 struct tstream_cli_np_readv_state *state =
796 tevent_req_data(req,
797 struct tstream_cli_np_readv_state);
798 struct tstream_cli_np *cli_nps =
799 tstream_context_data(state->stream,
800 struct tstream_cli_np);
801 struct tevent_req *subreq;
803 state->trans.im = tevent_create_immediate(state);
804 if (tevent_req_nomem(state->trans.im, req)) {
805 return;
808 if (cli_nps->is_smb1) {
809 subreq = cli_trans_send(state, state->ev,
810 cli_nps->cli,
811 SMBtrans,
812 "\\PIPE\\",
813 0, 0, 0,
814 cli_nps->trans.setup, 2,
816 NULL, 0, 0,
817 cli_nps->write.buf,
818 cli_nps->write.ofs,
819 TSTREAM_CLI_NP_MAX_BUF_SIZE);
820 } else {
821 DATA_BLOB in_input_buffer = data_blob_null;
822 DATA_BLOB in_output_buffer = data_blob_null;
824 in_input_buffer = data_blob_const(cli_nps->write.buf,
825 cli_nps->write.ofs);
827 subreq = smb2cli_ioctl_send(state, state->ev,
828 cli_nps->cli->conn,
829 cli_nps->cli->timeout,
830 cli_nps->cli->smb2.session,
831 cli_nps->cli->smb2.tid,
832 cli_nps->fid_persistent,
833 cli_nps->fid_volatile,
834 FSCTL_NAMED_PIPE_READ_WRITE,
835 0, /* in_max_input_length */
836 &in_input_buffer,
837 /* in_max_output_length */
838 TSTREAM_CLI_NP_MAX_BUF_SIZE,
839 &in_output_buffer,
840 SMB2_IOCTL_FLAG_IS_FSCTL);
842 if (tevent_req_nomem(subreq, req)) {
843 return;
845 tevent_req_set_callback(subreq,
846 tstream_cli_np_readv_trans_done,
847 req);
850 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
851 int error,
852 const char *location);
853 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
854 struct tevent_immediate *im,
855 void *private_data);
857 static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
859 struct tevent_req *req =
860 tevent_req_callback_data(subreq, struct tevent_req);
861 struct tstream_cli_np_readv_state *state =
862 tevent_req_data(req, struct tstream_cli_np_readv_state);
863 struct tstream_cli_np *cli_nps =
864 tstream_context_data(state->stream, struct tstream_cli_np);
865 uint8_t *rcvbuf;
866 uint32_t received;
867 NTSTATUS status;
869 if (cli_nps->is_smb1) {
870 status = cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
871 NULL, 0, NULL,
872 &rcvbuf, 0, &received);
873 } else {
874 DATA_BLOB out_input_buffer = data_blob_null;
875 DATA_BLOB out_output_buffer = data_blob_null;
877 status = smb2cli_ioctl_recv(subreq, state,
878 &out_input_buffer,
879 &out_output_buffer);
881 /* Note that rcvbuf is not a talloc pointer here */
882 rcvbuf = out_output_buffer.data;
883 received = out_output_buffer.length;
885 TALLOC_FREE(subreq);
886 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
887 status = NT_STATUS_OK;
889 if (!NT_STATUS_IS_OK(status)) {
890 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
891 return;
894 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
895 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
896 return;
899 if (received == 0) {
900 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
901 return;
904 cli_nps->read.ofs = 0;
905 cli_nps->read.left = received;
906 cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
907 if (cli_nps->read.buf == NULL) {
908 TALLOC_FREE(subreq);
909 tevent_req_nomem(cli_nps->read.buf, req);
910 return;
912 memcpy(cli_nps->read.buf, rcvbuf, received);
914 if (cli_nps->trans.write_req == NULL) {
915 tstream_cli_np_readv_read_next(req);
916 return;
919 tevent_schedule_immediate(state->trans.im, state->ev,
920 tstream_cli_np_readv_trans_next, req);
922 tevent_req_done(cli_nps->trans.write_req);
925 static void tstream_cli_np_readv_trans_next(struct tevent_context *ctx,
926 struct tevent_immediate *im,
927 void *private_data)
929 struct tevent_req *req =
930 talloc_get_type_abort(private_data,
931 struct tevent_req);
933 tstream_cli_np_readv_read_next(req);
936 static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
938 struct tevent_req *req =
939 tevent_req_callback_data(subreq, struct tevent_req);
940 struct tstream_cli_np_readv_state *state =
941 tevent_req_data(req, struct tstream_cli_np_readv_state);
942 struct tstream_cli_np *cli_nps =
943 tstream_context_data(state->stream, struct tstream_cli_np);
944 uint8_t *rcvbuf;
945 ssize_t received;
946 NTSTATUS status;
949 * We must free subreq in this function as there is
950 * a timer event attached to it.
953 if (cli_nps->is_smb1) {
954 status = cli_read_andx_recv(subreq, &received, &rcvbuf);
955 } else {
956 uint32_t data_length = 0;
957 status = smb2cli_read_recv(subreq, state, &rcvbuf, &data_length);
958 received = data_length;
961 * We can't TALLOC_FREE(subreq) as usual here, as rcvbuf still is a
962 * child of that.
964 if (NT_STATUS_EQUAL(status, NT_STATUS_BUFFER_TOO_SMALL)) {
966 * NT_STATUS_BUFFER_TOO_SMALL means that there's
967 * more data to read when the named pipe is used
968 * in message mode (which is the case here).
970 * But we hide this from the caller.
972 status = NT_STATUS_OK;
974 if (!NT_STATUS_IS_OK(status)) {
975 TALLOC_FREE(subreq);
976 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
977 return;
980 if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
981 TALLOC_FREE(subreq);
982 tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
983 return;
986 if (received == 0) {
987 TALLOC_FREE(subreq);
988 tstream_cli_np_readv_disconnect_now(req, EPIPE, __location__);
989 return;
992 cli_nps->read.ofs = 0;
993 cli_nps->read.left = received;
994 cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
995 if (cli_nps->read.buf == NULL) {
996 TALLOC_FREE(subreq);
997 tevent_req_nomem(cli_nps->read.buf, req);
998 return;
1000 memcpy(cli_nps->read.buf, rcvbuf, received);
1001 TALLOC_FREE(subreq);
1003 tstream_cli_np_readv_read_next(req);
1006 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq);
1008 static void tstream_cli_np_readv_error(struct tevent_req *req);
1010 static void tstream_cli_np_readv_disconnect_now(struct tevent_req *req,
1011 int error,
1012 const char *location)
1014 struct tstream_cli_np_readv_state *state =
1015 tevent_req_data(req,
1016 struct tstream_cli_np_readv_state);
1017 struct tstream_cli_np *cli_nps =
1018 tstream_context_data(state->stream,
1019 struct tstream_cli_np);
1020 struct tevent_req *subreq;
1022 state->error.val = error;
1023 state->error.location = location;
1025 if (!cli_state_is_connected(cli_nps->cli)) {
1026 /* return the original error */
1027 tstream_cli_np_readv_error(req);
1028 return;
1031 if (cli_nps->is_smb1) {
1032 subreq = cli_close_send(state, state->ev, cli_nps->cli,
1033 cli_nps->fnum);
1034 } else {
1035 subreq = smb2cli_close_send(state, state->ev,
1036 cli_nps->cli->conn,
1037 cli_nps->cli->timeout,
1038 cli_nps->cli->smb2.session,
1039 cli_nps->cli->smb2.tid,
1040 0, /* flags */
1041 cli_nps->fid_persistent,
1042 cli_nps->fid_volatile);
1044 if (subreq == NULL) {
1045 /* return the original error */
1046 tstream_cli_np_readv_error(req);
1047 return;
1049 tevent_req_set_callback(subreq,
1050 tstream_cli_np_readv_disconnect_done,
1051 req);
1054 static void tstream_cli_np_readv_disconnect_done(struct tevent_req *subreq)
1056 struct tevent_req *req =
1057 tevent_req_callback_data(subreq, struct tevent_req);
1058 struct tstream_cli_np_readv_state *state =
1059 tevent_req_data(req, struct tstream_cli_np_readv_state);
1060 struct tstream_cli_np *cli_nps =
1061 tstream_context_data(state->stream, struct tstream_cli_np);
1063 if (cli_nps->is_smb1) {
1064 cli_close_recv(subreq);
1065 } else {
1066 smb2cli_close_recv(subreq);
1068 TALLOC_FREE(subreq);
1070 cli_nps->cli = NULL;
1072 tstream_cli_np_readv_error(req);
1075 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
1076 struct tevent_immediate *im,
1077 void *private_data);
1079 static void tstream_cli_np_readv_error(struct tevent_req *req)
1081 struct tstream_cli_np_readv_state *state =
1082 tevent_req_data(req,
1083 struct tstream_cli_np_readv_state);
1084 struct tstream_cli_np *cli_nps =
1085 tstream_context_data(state->stream,
1086 struct tstream_cli_np);
1088 if (cli_nps->trans.write_req == NULL) {
1089 /* return the original error */
1090 _tevent_req_error(req, state->error.val, state->error.location);
1091 return;
1094 if (state->trans.im == NULL) {
1095 /* return the original error */
1096 _tevent_req_error(req, state->error.val, state->error.location);
1097 return;
1100 tevent_schedule_immediate(state->trans.im, state->ev,
1101 tstream_cli_np_readv_error_trigger, req);
1103 /* return the original error for writev */
1104 _tevent_req_error(cli_nps->trans.write_req,
1105 state->error.val, state->error.location);
1108 static void tstream_cli_np_readv_error_trigger(struct tevent_context *ctx,
1109 struct tevent_immediate *im,
1110 void *private_data)
1112 struct tevent_req *req =
1113 talloc_get_type_abort(private_data,
1114 struct tevent_req);
1115 struct tstream_cli_np_readv_state *state =
1116 tevent_req_data(req,
1117 struct tstream_cli_np_readv_state);
1119 /* return the original error */
1120 _tevent_req_error(req, state->error.val, state->error.location);
1123 static int tstream_cli_np_readv_recv(struct tevent_req *req,
1124 int *perrno)
1126 struct tstream_cli_np_readv_state *state =
1127 tevent_req_data(req, struct tstream_cli_np_readv_state);
1128 int ret;
1130 ret = tsocket_simple_int_recv(req, perrno);
1131 if (ret == 0) {
1132 ret = state->ret;
1135 tevent_req_received(req);
1136 return ret;
1139 struct tstream_cli_np_disconnect_state {
1140 struct tstream_context *stream;
1143 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq);
1145 static struct tevent_req *tstream_cli_np_disconnect_send(TALLOC_CTX *mem_ctx,
1146 struct tevent_context *ev,
1147 struct tstream_context *stream)
1149 struct tstream_cli_np *cli_nps = tstream_context_data(stream,
1150 struct tstream_cli_np);
1151 struct tevent_req *req;
1152 struct tstream_cli_np_disconnect_state *state;
1153 struct tevent_req *subreq;
1155 req = tevent_req_create(mem_ctx, &state,
1156 struct tstream_cli_np_disconnect_state);
1157 if (req == NULL) {
1158 return NULL;
1161 state->stream = stream;
1163 if (!cli_state_is_connected(cli_nps->cli)) {
1164 tevent_req_error(req, ENOTCONN);
1165 return tevent_req_post(req, ev);
1168 if (cli_nps->is_smb1) {
1169 subreq = cli_close_send(state, ev, cli_nps->cli,
1170 cli_nps->fnum);
1171 } else {
1172 subreq = smb2cli_close_send(state, ev, cli_nps->cli->conn,
1173 cli_nps->cli->timeout,
1174 cli_nps->cli->smb2.session,
1175 cli_nps->cli->smb2.tid,
1176 0, /* flags */
1177 cli_nps->fid_persistent,
1178 cli_nps->fid_volatile);
1180 if (tevent_req_nomem(subreq, req)) {
1181 return tevent_req_post(req, ev);
1183 tevent_req_set_callback(subreq, tstream_cli_np_disconnect_done, req);
1185 return req;
1188 static void tstream_cli_np_disconnect_done(struct tevent_req *subreq)
1190 struct tevent_req *req = tevent_req_callback_data(subreq,
1191 struct tevent_req);
1192 struct tstream_cli_np_disconnect_state *state =
1193 tevent_req_data(req, struct tstream_cli_np_disconnect_state);
1194 struct tstream_cli_np *cli_nps =
1195 tstream_context_data(state->stream, struct tstream_cli_np);
1196 NTSTATUS status;
1198 if (cli_nps->is_smb1) {
1199 status = cli_close_recv(subreq);
1200 } else {
1201 status = smb2cli_close_recv(subreq);
1203 TALLOC_FREE(subreq);
1204 if (!NT_STATUS_IS_OK(status)) {
1205 tevent_req_error(req, EIO);
1206 return;
1209 cli_nps->cli = NULL;
1211 tevent_req_done(req);
1214 static int tstream_cli_np_disconnect_recv(struct tevent_req *req,
1215 int *perrno)
1217 int ret;
1219 ret = tsocket_simple_int_recv(req, perrno);
1221 tevent_req_received(req);
1222 return ret;
1225 static const struct tstream_context_ops tstream_cli_np_ops = {
1226 .name = "cli_np",
1228 .pending_bytes = tstream_cli_np_pending_bytes,
1230 .readv_send = tstream_cli_np_readv_send,
1231 .readv_recv = tstream_cli_np_readv_recv,
1233 .writev_send = tstream_cli_np_writev_send,
1234 .writev_recv = tstream_cli_np_writev_recv,
1236 .disconnect_send = tstream_cli_np_disconnect_send,
1237 .disconnect_recv = tstream_cli_np_disconnect_recv,