2 Unix SMB/CIFS implementation.
4 Copyright (C) Volker Lendecke 2008
6 ** NOTE! The following LGPL license applies to the async_sock
7 ** library. This does NOT imply that all of Samba is released
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 3 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Library General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #include "system/network.h"
26 #include "system/filesys.h"
29 #include "lib/async_req/async_sock.h"
30 #include "lib/util/iov_buf.h"
31 #include "lib/util/util_net.h"
33 /* Note: lib/util/ is currently GPL */
34 #include "lib/util/tevent_unix.h"
35 #include "lib/util/samba_util.h"
37 struct async_connect_state
{
39 struct tevent_fd
*fde
;
42 socklen_t address_len
;
43 struct sockaddr_storage address
;
45 void (*before_connect
)(void *private_data
);
46 void (*after_connect
)(void *private_data
);
50 static void async_connect_cleanup(struct tevent_req
*req
,
51 enum tevent_req_state req_state
);
52 static void async_connect_connected(struct tevent_context
*ev
,
53 struct tevent_fd
*fde
, uint16_t flags
,
57 * @brief async version of connect(2)
58 * @param[in] mem_ctx The memory context to hang the result off
59 * @param[in] ev The event context to work from
60 * @param[in] fd The socket to recv from
61 * @param[in] address Where to connect?
62 * @param[in] address_len Length of *address
63 * @retval The async request
65 * This function sets the socket into non-blocking state to be able to call
66 * connect in an async state. This will be reset when the request is finished.
69 struct tevent_req
*async_connect_send(
70 TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
, int fd
,
71 const struct sockaddr
*address
, socklen_t address_len
,
72 void (*before_connect
)(void *private_data
),
73 void (*after_connect
)(void *private_data
),
76 struct tevent_req
*req
;
77 struct async_connect_state
*state
;
80 req
= tevent_req_create(mem_ctx
, &state
, struct async_connect_state
);
86 * We have to set the socket to nonblocking for async connect(2). Keep
87 * the old sockflags around.
91 state
->before_connect
= before_connect
;
92 state
->after_connect
= after_connect
;
93 state
->private_data
= private_data
;
95 state
->old_sockflags
= fcntl(fd
, F_GETFL
, 0);
96 if (state
->old_sockflags
== -1) {
97 tevent_req_error(req
, errno
);
98 return tevent_req_post(req
, ev
);
101 tevent_req_set_cleanup_fn(req
, async_connect_cleanup
);
103 state
->address_len
= address_len
;
104 if (address_len
> sizeof(state
->address
)) {
105 tevent_req_error(req
, EINVAL
);
106 return tevent_req_post(req
, ev
);
108 memcpy(&state
->address
, address
, address_len
);
110 ret
= set_blocking(fd
, false);
112 tevent_req_error(req
, errno
);
113 return tevent_req_post(req
, ev
);
116 if (state
->before_connect
!= NULL
) {
117 state
->before_connect(state
->private_data
);
120 state
->result
= connect(fd
, address
, address_len
);
122 if (state
->after_connect
!= NULL
) {
123 state
->after_connect(state
->private_data
);
126 if (state
->result
== 0) {
127 tevent_req_done(req
);
128 return tevent_req_post(req
, ev
);
132 * The only errno indicating that an initial connect is still
133 * in flight is EINPROGRESS.
135 * This allows callers like open_socket_out_send() to reuse
136 * fds and call us with an fd for which the connect is still
137 * in flight. The proper thing to do for callers would be
138 * closing the fd and starting from scratch with a fresh
142 if (errno
!= EINPROGRESS
) {
143 tevent_req_error(req
, errno
);
144 return tevent_req_post(req
, ev
);
148 * Note for historic reasons TEVENT_FD_WRITE is not enough
149 * to get notified for POLLERR or EPOLLHUP even if they
150 * come together with POLLOUT. That means we need to
151 * use TEVENT_FD_READ in addition until we have
154 state
->fde
= tevent_add_fd(ev
, state
, fd
,
155 TEVENT_FD_ERROR
|TEVENT_FD_WRITE
,
156 async_connect_connected
, req
);
157 if (state
->fde
== NULL
) {
158 tevent_req_error(req
, ENOMEM
);
159 return tevent_req_post(req
, ev
);
164 static void async_connect_cleanup(struct tevent_req
*req
,
165 enum tevent_req_state req_state
)
167 struct async_connect_state
*state
=
168 tevent_req_data(req
, struct async_connect_state
);
170 TALLOC_FREE(state
->fde
);
171 if (state
->fd
!= -1) {
174 ret
= fcntl(state
->fd
, F_SETFL
, state
->old_sockflags
);
184 * fde event handler for connect(2)
185 * @param[in] ev The event context that sent us here
186 * @param[in] fde The file descriptor event associated with the connect
187 * @param[in] flags Indicate read/writeability of the socket
188 * @param[in] priv private data, "struct async_req *" in this case
191 static void async_connect_connected(struct tevent_context
*ev
,
192 struct tevent_fd
*fde
, uint16_t flags
,
195 struct tevent_req
*req
= talloc_get_type_abort(
196 priv
, struct tevent_req
);
197 struct async_connect_state
*state
=
198 tevent_req_data(req
, struct async_connect_state
);
200 int socket_error
= 0;
201 socklen_t slen
= sizeof(socket_error
);
203 ret
= getsockopt(state
->fd
, SOL_SOCKET
, SO_ERROR
,
204 &socket_error
, &slen
);
208 * According to Stevens this is the Solaris behaviour
209 * in case the connection encountered an error:
210 * getsockopt() fails, error is in errno
212 tevent_req_error(req
, errno
);
216 if (socket_error
!= 0) {
218 * Berkeley derived implementations (including) Linux
219 * return the pending error via socket_error.
221 tevent_req_error(req
, socket_error
);
225 tevent_req_done(req
);
229 int async_connect_recv(struct tevent_req
*req
, int *perrno
)
231 int err
= tevent_req_simple_recv_unix(req
);
241 struct writev_state
{
242 struct tevent_context
*ev
;
243 struct tevent_queue_entry
*queue_entry
;
245 struct tevent_fd
*fde
;
250 bool err_on_readability
;
253 static void writev_cleanup(struct tevent_req
*req
,
254 enum tevent_req_state req_state
);
255 static bool writev_cancel(struct tevent_req
*req
);
256 static void writev_trigger(struct tevent_req
*req
, void *private_data
);
257 static void writev_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
258 uint16_t flags
, void *private_data
);
260 struct tevent_req
*writev_send(TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
,
261 struct tevent_queue
*queue
, int fd
,
262 bool err_on_readability
,
263 struct iovec
*iov
, int count
)
265 struct tevent_req
*req
;
266 struct writev_state
*state
;
268 req
= tevent_req_create(mem_ctx
, &state
, struct writev_state
);
274 state
->total_size
= 0;
275 state
->count
= count
;
276 state
->iov
= (struct iovec
*)talloc_memdup(
277 state
, iov
, sizeof(struct iovec
) * count
);
278 if (tevent_req_nomem(state
->iov
, req
)) {
279 return tevent_req_post(req
, ev
);
281 state
->flags
= TEVENT_FD_WRITE
| TEVENT_FD_ERROR
;
282 if (err_on_readability
) {
283 state
->flags
|= TEVENT_FD_READ
;
286 tevent_req_set_cleanup_fn(req
, writev_cleanup
);
287 tevent_req_set_cancel_fn(req
, writev_cancel
);
290 state
->fde
= tevent_add_fd(state
->ev
, state
, state
->fd
,
291 state
->flags
, writev_handler
, req
);
292 if (tevent_req_nomem(state
->fde
, req
)) {
293 return tevent_req_post(req
, ev
);
299 * writev_trigger tries a nonblocking write. If that succeeds,
300 * we can't directly notify the callback to call
301 * writev_recv. The callback would TALLOC_FREE(req) after
302 * calling writev_recv even before writev_trigger can inspect
305 tevent_req_defer_callback(req
, ev
);
307 state
->queue_entry
= tevent_queue_add_optimize_empty(
308 queue
, ev
, req
, writev_trigger
, NULL
);
309 if (tevent_req_nomem(state
->queue_entry
, req
)) {
310 return tevent_req_post(req
, ev
);
312 if (!tevent_req_is_in_progress(req
)) {
313 return tevent_req_post(req
, ev
);
318 static void writev_cleanup(struct tevent_req
*req
,
319 enum tevent_req_state req_state
)
321 struct writev_state
*state
= tevent_req_data(req
, struct writev_state
);
323 TALLOC_FREE(state
->queue_entry
);
324 TALLOC_FREE(state
->fde
);
327 static bool writev_cancel(struct tevent_req
*req
)
329 struct writev_state
*state
= tevent_req_data(req
, struct writev_state
);
331 if (state
->total_size
> 0) {
333 * We've already started to write :-(
338 TALLOC_FREE(state
->queue_entry
);
339 TALLOC_FREE(state
->fde
);
341 tevent_req_defer_callback(req
, state
->ev
);
342 tevent_req_error(req
, ECANCELED
);
346 static void writev_do(struct tevent_req
*req
, struct writev_state
*state
)
351 written
= writev(state
->fd
, state
->iov
, state
->count
);
352 if ((written
== -1) &&
355 (errno
== EWOULDBLOCK
))) {
356 /* retry after going through the tevent loop */
360 tevent_req_error(req
, errno
);
364 tevent_req_error(req
, EPIPE
);
367 state
->total_size
+= written
;
369 ok
= iov_advance(&state
->iov
, &state
->count
, written
);
371 tevent_req_error(req
, EIO
);
375 if (state
->count
== 0) {
376 tevent_req_done(req
);
381 static void writev_trigger(struct tevent_req
*req
, void *private_data
)
383 struct writev_state
*state
= tevent_req_data(req
, struct writev_state
);
385 state
->queue_entry
= NULL
;
387 writev_do(req
, state
);
388 if (!tevent_req_is_in_progress(req
)) {
392 state
->fde
= tevent_add_fd(state
->ev
, state
, state
->fd
, state
->flags
,
393 writev_handler
, req
);
394 if (tevent_req_nomem(state
->fde
, req
)) {
399 static void writev_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
400 uint16_t flags
, void *private_data
)
402 struct tevent_req
*req
= talloc_get_type_abort(
403 private_data
, struct tevent_req
);
404 struct writev_state
*state
=
405 tevent_req_data(req
, struct writev_state
);
407 if (flags
& TEVENT_FD_ERROR
) {
409 * There's an error, for legacy reasons
410 * we just use EPIPE instead of a more
411 * detailed error using
412 * samba_socket_poll_or_sock_error().
414 tevent_req_error(req
, EPIPE
);
418 if (flags
& TEVENT_FD_READ
) {
419 /* Readable and the caller wants an error on read. */
420 tevent_req_error(req
, EPIPE
);
424 writev_do(req
, state
);
427 ssize_t
writev_recv(struct tevent_req
*req
, int *perrno
)
429 struct writev_state
*state
=
430 tevent_req_data(req
, struct writev_state
);
433 if (tevent_req_is_unix_error(req
, perrno
)) {
434 tevent_req_received(req
);
437 ret
= state
->total_size
;
438 tevent_req_received(req
);
442 struct read_packet_state
{
444 struct tevent_fd
*fde
;
447 ssize_t (*more
)(uint8_t *buf
, size_t buflen
, void *private_data
);
451 static void read_packet_cleanup(struct tevent_req
*req
,
452 enum tevent_req_state req_state
);
453 static void read_packet_handler(struct tevent_context
*ev
,
454 struct tevent_fd
*fde
,
455 uint16_t flags
, void *private_data
);
457 struct tevent_req
*read_packet_send(TALLOC_CTX
*mem_ctx
,
458 struct tevent_context
*ev
,
459 int fd
, size_t initial
,
460 ssize_t (*more
)(uint8_t *buf
,
465 struct tevent_req
*req
;
466 struct read_packet_state
*state
;
468 req
= tevent_req_create(mem_ctx
, &state
, struct read_packet_state
);
475 state
->private_data
= private_data
;
477 tevent_req_set_cleanup_fn(req
, read_packet_cleanup
);
479 state
->buf
= talloc_array(state
, uint8_t, initial
);
480 if (tevent_req_nomem(state
->buf
, req
)) {
481 return tevent_req_post(req
, ev
);
484 state
->fde
= tevent_add_fd(ev
, state
, fd
,
485 TEVENT_FD_READ
, read_packet_handler
,
487 if (tevent_req_nomem(state
->fde
, req
)) {
488 return tevent_req_post(req
, ev
);
493 static void read_packet_cleanup(struct tevent_req
*req
,
494 enum tevent_req_state req_state
)
496 struct read_packet_state
*state
=
497 tevent_req_data(req
, struct read_packet_state
);
499 TALLOC_FREE(state
->fde
);
502 static void read_packet_handler(struct tevent_context
*ev
,
503 struct tevent_fd
*fde
,
504 uint16_t flags
, void *private_data
)
506 struct tevent_req
*req
= talloc_get_type_abort(
507 private_data
, struct tevent_req
);
508 struct read_packet_state
*state
=
509 tevent_req_data(req
, struct read_packet_state
);
510 size_t total
= talloc_get_size(state
->buf
);
514 nread
= recv(state
->fd
, state
->buf
+state
->nread
, total
-state
->nread
,
516 if ((nread
== -1) && (errno
== ENOTSOCK
)) {
517 nread
= read(state
->fd
, state
->buf
+state
->nread
,
520 if ((nread
== -1) && (errno
== EINTR
)) {
525 tevent_req_error(req
, errno
);
529 tevent_req_error(req
, EPIPE
);
533 state
->nread
+= nread
;
534 if (state
->nread
< total
) {
535 /* Come back later */
540 * We got what was initially requested. See if "more" asks for -- more.
542 if (state
->more
== NULL
) {
543 /* Nobody to ask, this is a async read_data */
544 tevent_req_done(req
);
548 more
= state
->more(state
->buf
, total
, state
->private_data
);
550 /* We got an invalid packet, tell the caller */
551 tevent_req_error(req
, EIO
);
555 /* We're done, full packet received */
556 tevent_req_done(req
);
560 if (total
+ more
< total
) {
561 tevent_req_error(req
, EMSGSIZE
);
565 tmp
= talloc_realloc(state
, state
->buf
, uint8_t, total
+more
);
566 if (tevent_req_nomem(tmp
, req
)) {
572 ssize_t
read_packet_recv(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
,
573 uint8_t **pbuf
, int *perrno
)
575 struct read_packet_state
*state
=
576 tevent_req_data(req
, struct read_packet_state
);
578 if (tevent_req_is_unix_error(req
, perrno
)) {
579 tevent_req_received(req
);
582 *pbuf
= talloc_move(mem_ctx
, &state
->buf
);
583 tevent_req_received(req
);
584 return talloc_get_size(*pbuf
);
587 struct wait_for_read_state
{
588 struct tevent_fd
*fde
;
593 static void wait_for_read_cleanup(struct tevent_req
*req
,
594 enum tevent_req_state req_state
);
595 static void wait_for_read_done(struct tevent_context
*ev
,
596 struct tevent_fd
*fde
,
600 struct tevent_req
*wait_for_read_send(TALLOC_CTX
*mem_ctx
,
601 struct tevent_context
*ev
, int fd
,
604 struct tevent_req
*req
;
605 struct wait_for_read_state
*state
;
607 req
= tevent_req_create(mem_ctx
, &state
, struct wait_for_read_state
);
612 tevent_req_set_cleanup_fn(req
, wait_for_read_cleanup
);
614 state
->fde
= tevent_add_fd(ev
, state
, fd
, TEVENT_FD_READ
,
615 wait_for_read_done
, req
);
616 if (tevent_req_nomem(state
->fde
, req
)) {
617 return tevent_req_post(req
, ev
);
621 state
->check_errors
= check_errors
;
625 static void wait_for_read_cleanup(struct tevent_req
*req
,
626 enum tevent_req_state req_state
)
628 struct wait_for_read_state
*state
=
629 tevent_req_data(req
, struct wait_for_read_state
);
631 TALLOC_FREE(state
->fde
);
634 static void wait_for_read_done(struct tevent_context
*ev
,
635 struct tevent_fd
*fde
,
639 struct tevent_req
*req
= talloc_get_type_abort(
640 private_data
, struct tevent_req
);
641 struct wait_for_read_state
*state
=
642 tevent_req_data(req
, struct wait_for_read_state
);
645 if ((flags
& TEVENT_FD_READ
) == 0) {
649 if (!state
->check_errors
) {
650 tevent_req_done(req
);
654 ret
= ioctl(state
->fd
, FIONREAD
, &available
);
656 if ((ret
== -1) && (errno
== EINTR
)) {
657 /* come back later */
662 tevent_req_error(req
, errno
);
666 if (available
== 0) {
667 tevent_req_error(req
, EPIPE
);
671 tevent_req_done(req
);
674 bool wait_for_read_recv(struct tevent_req
*req
, int *perr
)
676 int err
= tevent_req_simple_recv_unix(req
);
686 struct accept_state
{
687 struct tevent_fd
*fde
;
689 struct samba_sockaddr addr
;
693 static void accept_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
694 uint16_t flags
, void *private_data
);
696 struct tevent_req
*accept_send(TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
,
699 struct tevent_req
*req
;
700 struct accept_state
*state
;
702 req
= tevent_req_create(mem_ctx
, &state
, struct accept_state
);
707 state
->listen_sock
= listen_sock
;
709 state
->fde
= tevent_add_fd(ev
, state
, listen_sock
, TEVENT_FD_READ
,
710 accept_handler
, req
);
711 if (tevent_req_nomem(state
->fde
, req
)) {
712 return tevent_req_post(req
, ev
);
717 static void accept_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
718 uint16_t flags
, void *private_data
)
720 struct tevent_req
*req
= talloc_get_type_abort(
721 private_data
, struct tevent_req
);
722 struct accept_state
*state
= tevent_req_data(req
, struct accept_state
);
725 TALLOC_FREE(state
->fde
);
727 if ((flags
& TEVENT_FD_READ
) == 0) {
728 tevent_req_error(req
, EIO
);
732 state
->addr
.sa_socklen
= sizeof(state
->addr
.u
);
734 ret
= accept(state
->listen_sock
,
736 &state
->addr
.sa_socklen
);
737 if ((ret
== -1) && (errno
== EINTR
)) {
742 tevent_req_error(req
, errno
);
745 smb_set_close_on_exec(ret
);
747 tevent_req_done(req
);
750 int accept_recv(struct tevent_req
*req
,
752 struct samba_sockaddr
*paddr
,
755 struct accept_state
*state
= tevent_req_data(req
, struct accept_state
);
756 int sock
= state
->sock
;
759 if (tevent_req_is_unix_error(req
, &err
)) {
763 tevent_req_received(req
);
766 if (listen_sock
!= NULL
) {
767 *listen_sock
= state
->listen_sock
;
770 *paddr
= state
->addr
;
772 tevent_req_received(req
);