2 Unix SMB/CIFS implementation.
4 Copyright (C) Volker Lendecke 2008
6 ** NOTE! The following LGPL license applies to the async_sock
7 ** library. This does NOT imply that all of Samba is released
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 3 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Library General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #include "system/network.h"
26 #include "system/filesys.h"
29 #include "lib/async_req/async_sock.h"
30 #include "lib/util/iov_buf.h"
31 #include "lib/util/util_net.h"
33 /* Note: lib/util/ is currently GPL */
34 #include "lib/util/tevent_unix.h"
35 #include "lib/util/samba_util.h"
37 struct async_connect_state
{
39 struct tevent_fd
*fde
;
42 socklen_t address_len
;
43 struct sockaddr_storage address
;
45 void (*before_connect
)(void *private_data
);
46 void (*after_connect
)(void *private_data
);
50 static void async_connect_cleanup(struct tevent_req
*req
,
51 enum tevent_req_state req_state
);
52 static void async_connect_connected(struct tevent_context
*ev
,
53 struct tevent_fd
*fde
, uint16_t flags
,
57 * @brief async version of connect(2)
58 * @param[in] mem_ctx The memory context to hang the result off
59 * @param[in] ev The event context to work from
60 * @param[in] fd The socket to recv from
61 * @param[in] address Where to connect?
62 * @param[in] address_len Length of *address
63 * @retval The async request
65 * This function sets the socket into non-blocking state to be able to call
66 * connect in an async state. This will be reset when the request is finished.
69 struct tevent_req
*async_connect_send(
70 TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
, int fd
,
71 const struct sockaddr
*address
, socklen_t address_len
,
72 void (*before_connect
)(void *private_data
),
73 void (*after_connect
)(void *private_data
),
76 struct tevent_req
*req
;
77 struct async_connect_state
*state
;
80 req
= tevent_req_create(mem_ctx
, &state
, struct async_connect_state
);
86 * We have to set the socket to nonblocking for async connect(2). Keep
87 * the old sockflags around.
91 state
->before_connect
= before_connect
;
92 state
->after_connect
= after_connect
;
93 state
->private_data
= private_data
;
95 state
->old_sockflags
= fcntl(fd
, F_GETFL
, 0);
96 if (state
->old_sockflags
== -1) {
97 tevent_req_error(req
, errno
);
98 return tevent_req_post(req
, ev
);
101 tevent_req_set_cleanup_fn(req
, async_connect_cleanup
);
103 state
->address_len
= address_len
;
104 if (address_len
> sizeof(state
->address
)) {
105 tevent_req_error(req
, EINVAL
);
106 return tevent_req_post(req
, ev
);
108 memcpy(&state
->address
, address
, address_len
);
110 ret
= set_blocking(fd
, false);
112 tevent_req_error(req
, errno
);
113 return tevent_req_post(req
, ev
);
116 if (state
->before_connect
!= NULL
) {
117 state
->before_connect(state
->private_data
);
120 state
->result
= connect(fd
, address
, address_len
);
122 if (state
->after_connect
!= NULL
) {
123 state
->after_connect(state
->private_data
);
126 if (state
->result
== 0) {
127 tevent_req_done(req
);
128 return tevent_req_post(req
, ev
);
132 * The only errno indicating that an initial connect is still
133 * in flight is EINPROGRESS.
135 * This allows callers like open_socket_out_send() to reuse
136 * fds and call us with an fd for which the connect is still
137 * in flight. The proper thing to do for callers would be
138 * closing the fd and starting from scratch with a fresh
142 if (errno
!= EINPROGRESS
) {
143 tevent_req_error(req
, errno
);
144 return tevent_req_post(req
, ev
);
148 * Note for historic reasons TEVENT_FD_WRITE is not enough
149 * to get notified for POLLERR or EPOLLHUP even if they
150 * come together with POLLOUT. That means we need to
151 * use TEVENT_FD_READ in addition until we have
154 state
->fde
= tevent_add_fd(ev
, state
, fd
, TEVENT_FD_READ
|TEVENT_FD_WRITE
,
155 async_connect_connected
, req
);
156 if (state
->fde
== NULL
) {
157 tevent_req_error(req
, ENOMEM
);
158 return tevent_req_post(req
, ev
);
163 static void async_connect_cleanup(struct tevent_req
*req
,
164 enum tevent_req_state req_state
)
166 struct async_connect_state
*state
=
167 tevent_req_data(req
, struct async_connect_state
);
169 TALLOC_FREE(state
->fde
);
170 if (state
->fd
!= -1) {
173 ret
= fcntl(state
->fd
, F_SETFL
, state
->old_sockflags
);
183 * fde event handler for connect(2)
184 * @param[in] ev The event context that sent us here
185 * @param[in] fde The file descriptor event associated with the connect
186 * @param[in] flags Indicate read/writeability of the socket
187 * @param[in] priv private data, "struct async_req *" in this case
190 static void async_connect_connected(struct tevent_context
*ev
,
191 struct tevent_fd
*fde
, uint16_t flags
,
194 struct tevent_req
*req
= talloc_get_type_abort(
195 priv
, struct tevent_req
);
196 struct async_connect_state
*state
=
197 tevent_req_data(req
, struct async_connect_state
);
199 int socket_error
= 0;
200 socklen_t slen
= sizeof(socket_error
);
202 ret
= getsockopt(state
->fd
, SOL_SOCKET
, SO_ERROR
,
203 &socket_error
, &slen
);
207 * According to Stevens this is the Solaris behaviour
208 * in case the connection encountered an error:
209 * getsockopt() fails, error is in errno
211 tevent_req_error(req
, errno
);
215 if (socket_error
!= 0) {
217 * Berkeley derived implementations (including) Linux
218 * return the pending error via socket_error.
220 tevent_req_error(req
, socket_error
);
224 tevent_req_done(req
);
228 int async_connect_recv(struct tevent_req
*req
, int *perrno
)
230 int err
= tevent_req_simple_recv_unix(req
);
240 struct writev_state
{
241 struct tevent_context
*ev
;
242 struct tevent_queue_entry
*queue_entry
;
244 struct tevent_fd
*fde
;
249 bool err_on_readability
;
252 static void writev_cleanup(struct tevent_req
*req
,
253 enum tevent_req_state req_state
);
254 static bool writev_cancel(struct tevent_req
*req
);
255 static void writev_trigger(struct tevent_req
*req
, void *private_data
);
256 static void writev_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
257 uint16_t flags
, void *private_data
);
259 struct tevent_req
*writev_send(TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
,
260 struct tevent_queue
*queue
, int fd
,
261 bool err_on_readability
,
262 struct iovec
*iov
, int count
)
264 struct tevent_req
*req
;
265 struct writev_state
*state
;
267 req
= tevent_req_create(mem_ctx
, &state
, struct writev_state
);
273 state
->total_size
= 0;
274 state
->count
= count
;
275 state
->iov
= (struct iovec
*)talloc_memdup(
276 state
, iov
, sizeof(struct iovec
) * count
);
277 if (tevent_req_nomem(state
->iov
, req
)) {
278 return tevent_req_post(req
, ev
);
280 state
->flags
= TEVENT_FD_WRITE
|TEVENT_FD_READ
;
281 state
->err_on_readability
= err_on_readability
;
283 tevent_req_set_cleanup_fn(req
, writev_cleanup
);
284 tevent_req_set_cancel_fn(req
, writev_cancel
);
287 state
->fde
= tevent_add_fd(state
->ev
, state
, state
->fd
,
288 state
->flags
, writev_handler
, req
);
289 if (tevent_req_nomem(state
->fde
, req
)) {
290 return tevent_req_post(req
, ev
);
296 * writev_trigger tries a nonblocking write. If that succeeds,
297 * we can't directly notify the callback to call
298 * writev_recv. The callback would TALLOC_FREE(req) after
299 * calling writev_recv even before writev_trigger can inspect
302 tevent_req_defer_callback(req
, ev
);
304 state
->queue_entry
= tevent_queue_add_optimize_empty(
305 queue
, ev
, req
, writev_trigger
, NULL
);
306 if (tevent_req_nomem(state
->queue_entry
, req
)) {
307 return tevent_req_post(req
, ev
);
309 if (!tevent_req_is_in_progress(req
)) {
310 return tevent_req_post(req
, ev
);
315 static void writev_cleanup(struct tevent_req
*req
,
316 enum tevent_req_state req_state
)
318 struct writev_state
*state
= tevent_req_data(req
, struct writev_state
);
320 TALLOC_FREE(state
->queue_entry
);
321 TALLOC_FREE(state
->fde
);
324 static bool writev_cancel(struct tevent_req
*req
)
326 struct writev_state
*state
= tevent_req_data(req
, struct writev_state
);
328 if (state
->total_size
> 0) {
330 * We've already started to write :-(
335 TALLOC_FREE(state
->queue_entry
);
336 TALLOC_FREE(state
->fde
);
338 tevent_req_defer_callback(req
, state
->ev
);
339 tevent_req_error(req
, ECANCELED
);
343 static void writev_do(struct tevent_req
*req
, struct writev_state
*state
)
348 written
= writev(state
->fd
, state
->iov
, state
->count
);
349 if ((written
== -1) &&
352 (errno
== EWOULDBLOCK
))) {
353 /* retry after going through the tevent loop */
357 tevent_req_error(req
, errno
);
361 tevent_req_error(req
, EPIPE
);
364 state
->total_size
+= written
;
366 ok
= iov_advance(&state
->iov
, &state
->count
, written
);
368 tevent_req_error(req
, EIO
);
372 if (state
->count
== 0) {
373 tevent_req_done(req
);
378 static void writev_trigger(struct tevent_req
*req
, void *private_data
)
380 struct writev_state
*state
= tevent_req_data(req
, struct writev_state
);
382 state
->queue_entry
= NULL
;
384 writev_do(req
, state
);
385 if (!tevent_req_is_in_progress(req
)) {
389 state
->fde
= tevent_add_fd(state
->ev
, state
, state
->fd
, state
->flags
,
390 writev_handler
, req
);
391 if (tevent_req_nomem(state
->fde
, req
)) {
396 static void writev_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
397 uint16_t flags
, void *private_data
)
399 struct tevent_req
*req
= talloc_get_type_abort(
400 private_data
, struct tevent_req
);
401 struct writev_state
*state
=
402 tevent_req_data(req
, struct writev_state
);
404 if ((state
->flags
& TEVENT_FD_READ
) && (flags
& TEVENT_FD_READ
)) {
407 if (state
->err_on_readability
) {
408 /* Readable and the caller wants an error on read. */
409 tevent_req_error(req
, EPIPE
);
413 /* Might be an error. Check if there are bytes to read */
414 ret
= ioctl(state
->fd
, FIONREAD
, &value
);
415 /* FIXME - should we also check
416 for ret == 0 and value == 0 here ? */
418 /* There's an error. */
419 tevent_req_error(req
, EPIPE
);
422 /* A request for TEVENT_FD_READ will succeed from now and
423 forevermore until the bytes are read so if there was
424 an error we'll wait until we do read, then get it in
425 the read callback function. Until then, remove TEVENT_FD_READ
426 from the flags we're waiting for. */
427 state
->flags
&= ~TEVENT_FD_READ
;
428 TEVENT_FD_NOT_READABLE(fde
);
430 /* If not writable, we're done. */
431 if (!(flags
& TEVENT_FD_WRITE
)) {
436 writev_do(req
, state
);
439 ssize_t
writev_recv(struct tevent_req
*req
, int *perrno
)
441 struct writev_state
*state
=
442 tevent_req_data(req
, struct writev_state
);
445 if (tevent_req_is_unix_error(req
, perrno
)) {
446 tevent_req_received(req
);
449 ret
= state
->total_size
;
450 tevent_req_received(req
);
454 struct read_packet_state
{
456 struct tevent_fd
*fde
;
459 ssize_t (*more
)(uint8_t *buf
, size_t buflen
, void *private_data
);
463 static void read_packet_cleanup(struct tevent_req
*req
,
464 enum tevent_req_state req_state
);
465 static void read_packet_handler(struct tevent_context
*ev
,
466 struct tevent_fd
*fde
,
467 uint16_t flags
, void *private_data
);
469 struct tevent_req
*read_packet_send(TALLOC_CTX
*mem_ctx
,
470 struct tevent_context
*ev
,
471 int fd
, size_t initial
,
472 ssize_t (*more
)(uint8_t *buf
,
477 struct tevent_req
*req
;
478 struct read_packet_state
*state
;
480 req
= tevent_req_create(mem_ctx
, &state
, struct read_packet_state
);
487 state
->private_data
= private_data
;
489 tevent_req_set_cleanup_fn(req
, read_packet_cleanup
);
491 state
->buf
= talloc_array(state
, uint8_t, initial
);
492 if (tevent_req_nomem(state
->buf
, req
)) {
493 return tevent_req_post(req
, ev
);
496 state
->fde
= tevent_add_fd(ev
, state
, fd
,
497 TEVENT_FD_READ
, read_packet_handler
,
499 if (tevent_req_nomem(state
->fde
, req
)) {
500 return tevent_req_post(req
, ev
);
505 static void read_packet_cleanup(struct tevent_req
*req
,
506 enum tevent_req_state req_state
)
508 struct read_packet_state
*state
=
509 tevent_req_data(req
, struct read_packet_state
);
511 TALLOC_FREE(state
->fde
);
514 static void read_packet_handler(struct tevent_context
*ev
,
515 struct tevent_fd
*fde
,
516 uint16_t flags
, void *private_data
)
518 struct tevent_req
*req
= talloc_get_type_abort(
519 private_data
, struct tevent_req
);
520 struct read_packet_state
*state
=
521 tevent_req_data(req
, struct read_packet_state
);
522 size_t total
= talloc_get_size(state
->buf
);
526 nread
= recv(state
->fd
, state
->buf
+state
->nread
, total
-state
->nread
,
528 if ((nread
== -1) && (errno
== ENOTSOCK
)) {
529 nread
= read(state
->fd
, state
->buf
+state
->nread
,
532 if ((nread
== -1) && (errno
== EINTR
)) {
537 tevent_req_error(req
, errno
);
541 tevent_req_error(req
, EPIPE
);
545 state
->nread
+= nread
;
546 if (state
->nread
< total
) {
547 /* Come back later */
552 * We got what was initially requested. See if "more" asks for -- more.
554 if (state
->more
== NULL
) {
555 /* Nobody to ask, this is a async read_data */
556 tevent_req_done(req
);
560 more
= state
->more(state
->buf
, total
, state
->private_data
);
562 /* We got an invalid packet, tell the caller */
563 tevent_req_error(req
, EIO
);
567 /* We're done, full packet received */
568 tevent_req_done(req
);
572 if (total
+ more
< total
) {
573 tevent_req_error(req
, EMSGSIZE
);
577 tmp
= talloc_realloc(state
, state
->buf
, uint8_t, total
+more
);
578 if (tevent_req_nomem(tmp
, req
)) {
584 ssize_t
read_packet_recv(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
,
585 uint8_t **pbuf
, int *perrno
)
587 struct read_packet_state
*state
=
588 tevent_req_data(req
, struct read_packet_state
);
590 if (tevent_req_is_unix_error(req
, perrno
)) {
591 tevent_req_received(req
);
594 *pbuf
= talloc_move(mem_ctx
, &state
->buf
);
595 tevent_req_received(req
);
596 return talloc_get_size(*pbuf
);
599 struct wait_for_read_state
{
600 struct tevent_fd
*fde
;
605 static void wait_for_read_cleanup(struct tevent_req
*req
,
606 enum tevent_req_state req_state
);
607 static void wait_for_read_done(struct tevent_context
*ev
,
608 struct tevent_fd
*fde
,
612 struct tevent_req
*wait_for_read_send(TALLOC_CTX
*mem_ctx
,
613 struct tevent_context
*ev
, int fd
,
616 struct tevent_req
*req
;
617 struct wait_for_read_state
*state
;
619 req
= tevent_req_create(mem_ctx
, &state
, struct wait_for_read_state
);
624 tevent_req_set_cleanup_fn(req
, wait_for_read_cleanup
);
626 state
->fde
= tevent_add_fd(ev
, state
, fd
, TEVENT_FD_READ
,
627 wait_for_read_done
, req
);
628 if (tevent_req_nomem(state
->fde
, req
)) {
629 return tevent_req_post(req
, ev
);
633 state
->check_errors
= check_errors
;
637 static void wait_for_read_cleanup(struct tevent_req
*req
,
638 enum tevent_req_state req_state
)
640 struct wait_for_read_state
*state
=
641 tevent_req_data(req
, struct wait_for_read_state
);
643 TALLOC_FREE(state
->fde
);
646 static void wait_for_read_done(struct tevent_context
*ev
,
647 struct tevent_fd
*fde
,
651 struct tevent_req
*req
= talloc_get_type_abort(
652 private_data
, struct tevent_req
);
653 struct wait_for_read_state
*state
=
654 tevent_req_data(req
, struct wait_for_read_state
);
657 if ((flags
& TEVENT_FD_READ
) == 0) {
661 if (!state
->check_errors
) {
662 tevent_req_done(req
);
666 ret
= ioctl(state
->fd
, FIONREAD
, &available
);
668 if ((ret
== -1) && (errno
== EINTR
)) {
669 /* come back later */
674 tevent_req_error(req
, errno
);
678 if (available
== 0) {
679 tevent_req_error(req
, EPIPE
);
683 tevent_req_done(req
);
686 bool wait_for_read_recv(struct tevent_req
*req
, int *perr
)
688 int err
= tevent_req_simple_recv_unix(req
);
698 struct accept_state
{
699 struct tevent_fd
*fde
;
701 struct samba_sockaddr addr
;
705 static void accept_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
706 uint16_t flags
, void *private_data
);
708 struct tevent_req
*accept_send(TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
,
711 struct tevent_req
*req
;
712 struct accept_state
*state
;
714 req
= tevent_req_create(mem_ctx
, &state
, struct accept_state
);
719 state
->listen_sock
= listen_sock
;
721 state
->fde
= tevent_add_fd(ev
, state
, listen_sock
, TEVENT_FD_READ
,
722 accept_handler
, req
);
723 if (tevent_req_nomem(state
->fde
, req
)) {
724 return tevent_req_post(req
, ev
);
729 static void accept_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
730 uint16_t flags
, void *private_data
)
732 struct tevent_req
*req
= talloc_get_type_abort(
733 private_data
, struct tevent_req
);
734 struct accept_state
*state
= tevent_req_data(req
, struct accept_state
);
737 TALLOC_FREE(state
->fde
);
739 if ((flags
& TEVENT_FD_READ
) == 0) {
740 tevent_req_error(req
, EIO
);
744 state
->addr
.sa_socklen
= sizeof(state
->addr
.u
);
746 ret
= accept(state
->listen_sock
,
748 &state
->addr
.sa_socklen
);
749 if ((ret
== -1) && (errno
== EINTR
)) {
754 tevent_req_error(req
, errno
);
757 smb_set_close_on_exec(ret
);
759 tevent_req_done(req
);
762 int accept_recv(struct tevent_req
*req
,
764 struct samba_sockaddr
*paddr
,
767 struct accept_state
*state
= tevent_req_data(req
, struct accept_state
);
768 int sock
= state
->sock
;
771 if (tevent_req_is_unix_error(req
, &err
)) {
775 tevent_req_received(req
);
778 if (listen_sock
!= NULL
) {
779 *listen_sock
= state
->listen_sock
;
782 *paddr
= state
->addr
;
784 tevent_req_received(req
);