2 * Simulate the Posix AIO using mmap/fork
4 * Copyright (C) Volker Lendecke 2008
5 * Copyright (C) Jeremy Allison 2010
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "system/filesys.h"
24 #include "system/shmem.h"
25 #include "smbd/smbd.h"
26 #include "smbd/globals.h"
27 #include "lib/async_req/async_sock.h"
28 #include "lib/util/tevent_unix.h"
36 struct aio_fork_config
{
37 bool erratic_testing_mode
;
45 static int mmap_area_destructor(struct mmap_area
*area
)
47 munmap((void *)area
->ptr
, area
->size
);
51 static struct mmap_area
*mmap_area_init(TALLOC_CTX
*mem_ctx
, size_t size
)
53 struct mmap_area
*result
;
56 result
= talloc(mem_ctx
, struct mmap_area
);
58 DEBUG(0, ("talloc failed\n"));
62 fd
= open("/dev/zero", O_RDWR
);
64 DEBUG(3, ("open(\"/dev/zero\") failed: %s\n",
69 result
->ptr
= mmap(NULL
, size
, PROT_READ
|PROT_WRITE
,
70 MAP_SHARED
|MAP_FILE
, fd
, 0);
72 if (result
->ptr
== MAP_FAILED
) {
73 DEBUG(1, ("mmap failed: %s\n", strerror(errno
)));
78 talloc_set_destructor(result
, mmap_area_destructor
);
93 static const char *cmd_type_str(enum cmd_type cmd
)
108 result
= "<UNKNOWN>";
118 bool erratic_testing_mode
;
126 struct aio_child_list
;
129 struct aio_child
*prev
, *next
;
130 struct aio_child_list
*list
;
133 struct mmap_area
*map
;
134 bool dont_delete
; /* Marked as in use since last cleanup */
138 struct aio_child_list
{
139 struct aio_child
*children
;
140 struct tevent_timer
*cleanup_event
;
143 static void free_aio_children(void **p
)
148 static ssize_t
read_fd(int fd
, void *ptr
, size_t nbytes
, int *recvfd
)
153 #ifndef HAVE_MSGHDR_MSG_CONTROL
157 #ifdef HAVE_MSGHDR_MSG_CONTROL
160 char control
[CMSG_SPACE(sizeof(int))];
162 struct cmsghdr
*cmptr
;
164 msg
.msg_control
= control_un
.control
;
165 msg
.msg_controllen
= sizeof(control_un
.control
);
167 #if HAVE_MSGHDR_MSG_ACCTRIGHTS
168 msg
.msg_accrights
= (caddr_t
) &newfd
;
169 msg
.msg_accrightslen
= sizeof(int);
171 #error Can not pass file descriptors
179 iov
[0].iov_base
= (void *)ptr
;
180 iov
[0].iov_len
= nbytes
;
184 if ( (n
= recvmsg(fd
, &msg
, 0)) <= 0) {
188 #ifdef HAVE_MSGHDR_MSG_CONTROL
189 if ((cmptr
= CMSG_FIRSTHDR(&msg
)) != NULL
190 && cmptr
->cmsg_len
== CMSG_LEN(sizeof(int))) {
191 if (cmptr
->cmsg_level
!= SOL_SOCKET
) {
192 DEBUG(10, ("control level != SOL_SOCKET"));
196 if (cmptr
->cmsg_type
!= SCM_RIGHTS
) {
197 DEBUG(10, ("control type != SCM_RIGHTS"));
201 memcpy(recvfd
, CMSG_DATA(cmptr
), sizeof(*recvfd
));
203 *recvfd
= -1; /* descriptor was not passed */
206 if (msg
.msg_accrightslen
== sizeof(int)) {
210 *recvfd
= -1; /* descriptor was not passed */
217 static ssize_t
write_fd(int fd
, void *ptr
, size_t nbytes
, int sendfd
)
222 #ifdef HAVE_MSGHDR_MSG_CONTROL
225 char control
[CMSG_SPACE(sizeof(int))];
227 struct cmsghdr
*cmptr
;
230 ZERO_STRUCT(control_un
);
232 msg
.msg_control
= control_un
.control
;
233 msg
.msg_controllen
= sizeof(control_un
.control
);
235 cmptr
= CMSG_FIRSTHDR(&msg
);
236 cmptr
->cmsg_len
= CMSG_LEN(sizeof(int));
237 cmptr
->cmsg_level
= SOL_SOCKET
;
238 cmptr
->cmsg_type
= SCM_RIGHTS
;
239 memcpy(CMSG_DATA(cmptr
), &sendfd
, sizeof(sendfd
));
242 msg
.msg_accrights
= (caddr_t
) &sendfd
;
243 msg
.msg_accrightslen
= sizeof(int);
250 iov
[0].iov_base
= (void *)ptr
;
251 iov
[0].iov_len
= nbytes
;
255 return (sendmsg(fd
, &msg
, 0));
258 static void aio_child_cleanup(struct tevent_context
*event_ctx
,
259 struct tevent_timer
*te
,
263 struct aio_child_list
*list
= talloc_get_type_abort(
264 private_data
, struct aio_child_list
);
265 struct aio_child
*child
, *next
;
267 TALLOC_FREE(list
->cleanup_event
);
269 for (child
= list
->children
; child
!= NULL
; child
= next
) {
273 DEBUG(10, ("child %d currently active\n",
278 if (child
->dont_delete
) {
279 DEBUG(10, ("Child %d was active since last cleanup\n",
281 child
->dont_delete
= false;
285 DEBUG(10, ("Child %d idle for more than 30 seconds, "
286 "deleting\n", (int)child
->pid
));
292 if (list
->children
!= NULL
) {
294 * Re-schedule the next cleanup round
296 list
->cleanup_event
= tevent_add_timer(server_event_context(), list
,
297 timeval_add(&now
, 30, 0),
298 aio_child_cleanup
, list
);
303 static struct aio_child_list
*init_aio_children(struct vfs_handle_struct
*handle
)
305 struct aio_child_list
*data
= NULL
;
307 if (SMB_VFS_HANDLE_TEST_DATA(handle
)) {
308 SMB_VFS_HANDLE_GET_DATA(handle
, data
, struct aio_child_list
,
313 data
= talloc_zero(NULL
, struct aio_child_list
);
320 * Regardless of whether the child_list had been around or not, make
321 * sure that we have a cleanup timed event. This timed event will
322 * delete itself when it finds that no children are around anymore.
325 if (data
->cleanup_event
== NULL
) {
326 data
->cleanup_event
= tevent_add_timer(server_event_context(), data
,
327 timeval_current_ofs(30, 0),
328 aio_child_cleanup
, data
);
329 if (data
->cleanup_event
== NULL
) {
335 if (!SMB_VFS_HANDLE_TEST_DATA(handle
)) {
336 SMB_VFS_HANDLE_SET_DATA(handle
, data
, free_aio_children
,
337 struct aio_child_list
, return False
);
343 static void aio_child_loop(int sockfd
, struct mmap_area
*map
)
348 struct rw_cmd cmd_struct
;
349 struct rw_ret ret_struct
;
351 ret
= read_fd(sockfd
, &cmd_struct
, sizeof(cmd_struct
), &fd
);
352 if (ret
!= sizeof(cmd_struct
)) {
353 DEBUG(10, ("read_fd returned %d: %s\n", (int)ret
,
358 DEBUG(10, ("aio_child_loop: %s %d bytes at %d from fd %d\n",
359 cmd_type_str(cmd_struct
.cmd
),
360 (int)cmd_struct
.n
, (int)cmd_struct
.offset
, fd
));
362 if (cmd_struct
.erratic_testing_mode
) {
364 * For developer testing, we want erratic behaviour for
370 * use generate_random_buffer, we just forked from a
371 * common parent state
373 generate_random_buffer(&randval
, sizeof(randval
));
374 msecs
= randval
+ 20;
375 DEBUG(10, ("delaying for %u msecs\n", msecs
));
379 ZERO_STRUCT(ret_struct
);
381 switch (cmd_struct
.cmd
) {
383 ret_struct
.size
= sys_pread(
384 fd
, (void *)map
->ptr
, cmd_struct
.n
,
387 /* This breaks "make test" when run with aio_fork module. */
389 ret_struct
.size
= MAX(1, ret_struct
.size
* 0.9);
394 ret_struct
.size
= sys_pwrite(
395 fd
, (void *)map
->ptr
, cmd_struct
.n
,
399 ret_struct
.size
= fsync(fd
);
402 ret_struct
.size
= -1;
406 DEBUG(10, ("aio_child_loop: syscall returned %d\n",
407 (int)ret_struct
.size
));
409 if (ret_struct
.size
== -1) {
410 ret_struct
.ret_errno
= errno
;
414 * Close the fd before telling our parent we're done. The
415 * parent might close and re-open the file very quickly, and
416 * with system-level share modes (GPFS) we would get an
417 * unjustified SHARING_VIOLATION.
421 ret
= write_data(sockfd
, (char *)&ret_struct
,
423 if (ret
!= sizeof(ret_struct
)) {
424 DEBUG(10, ("could not write ret_struct: %s\n",
431 static int aio_child_destructor(struct aio_child
*child
)
435 SMB_ASSERT(!child
->busy
);
437 DEBUG(10, ("aio_child_destructor: removing child %d on fd %d\n",
438 child
->pid
, child
->sockfd
));
441 * closing the sockfd makes the child not return from recvmsg() on RHEL
442 * 5.5 so instead force the child to exit by writing bad data to it
444 write(child
->sockfd
, &c
, sizeof(c
));
445 close(child
->sockfd
);
446 DLIST_REMOVE(child
->list
->children
, child
);
451 * We have to close all fd's in open files, we might incorrectly hold a system
452 * level share mode on a file.
455 static struct files_struct
*close_fsp_fd(struct files_struct
*fsp
,
458 if ((fsp
->fh
!= NULL
) && (fsp
->fh
->fd
!= -1)) {
465 static int create_aio_child(struct smbd_server_connection
*sconn
,
466 struct aio_child_list
*children
,
468 struct aio_child
**presult
)
470 struct aio_child
*result
;
474 fdpair
[0] = fdpair
[1] = -1;
476 result
= talloc_zero(children
, struct aio_child
);
477 if (result
== NULL
) {
481 if (socketpair(AF_UNIX
, SOCK_STREAM
, 0, fdpair
) == -1) {
483 DEBUG(10, ("socketpair() failed: %s\n", strerror(errno
)));
487 DEBUG(10, ("fdpair = %d/%d\n", fdpair
[0], fdpair
[1]));
489 result
->map
= mmap_area_init(result
, map_size
);
490 if (result
->map
== NULL
) {
492 DEBUG(0, ("Could not create mmap area\n"));
496 result
->pid
= fork();
497 if (result
->pid
== -1) {
499 DEBUG(0, ("fork failed: %s\n", strerror(errno
)));
503 if (result
->pid
== 0) {
505 result
->sockfd
= fdpair
[1];
506 files_forall(sconn
, close_fsp_fd
, NULL
);
507 aio_child_loop(result
->sockfd
, result
->map
);
510 DEBUG(10, ("Child %d created with sockfd %d\n",
511 result
->pid
, fdpair
[0]));
513 result
->sockfd
= fdpair
[0];
516 result
->list
= children
;
517 DLIST_ADD(children
->children
, result
);
519 talloc_set_destructor(result
, aio_child_destructor
);
526 if (fdpair
[0] != -1) close(fdpair
[0]);
527 if (fdpair
[1] != -1) close(fdpair
[1]);
533 static int get_idle_child(struct vfs_handle_struct
*handle
,
534 struct aio_child
**pchild
)
536 struct aio_child_list
*children
;
537 struct aio_child
*child
;
539 children
= init_aio_children(handle
);
540 if (children
== NULL
) {
544 for (child
= children
->children
; child
!= NULL
; child
= child
->next
) {
553 DEBUG(10, ("no idle child found, creating new one\n"));
555 ret
= create_aio_child(handle
->conn
->sconn
, children
,
558 DEBUG(10, ("create_aio_child failed: %s\n",
564 child
->dont_delete
= true;
571 struct aio_fork_pread_state
{
572 struct aio_child
*child
;
577 static void aio_fork_pread_done(struct tevent_req
*subreq
);
579 static struct tevent_req
*aio_fork_pread_send(struct vfs_handle_struct
*handle
,
581 struct tevent_context
*ev
,
582 struct files_struct
*fsp
,
584 size_t n
, off_t offset
)
586 struct tevent_req
*req
, *subreq
;
587 struct aio_fork_pread_state
*state
;
591 struct aio_fork_config
*config
;
593 SMB_VFS_HANDLE_GET_DATA(handle
, config
,
594 struct aio_fork_config
,
597 req
= tevent_req_create(mem_ctx
, &state
, struct aio_fork_pread_state
);
603 /* TODO: support variable buffers */
604 tevent_req_error(req
, EINVAL
);
605 return tevent_req_post(req
, ev
);
608 err
= get_idle_child(handle
, &state
->child
);
610 tevent_req_error(req
, err
);
611 return tevent_req_post(req
, ev
);
618 cmd
.erratic_testing_mode
= config
->erratic_testing_mode
;
620 DEBUG(10, ("sending fd %d to child %d\n", fsp
->fh
->fd
,
621 (int)state
->child
->pid
));
624 * Not making this async. We're writing into an empty unix
625 * domain socket. This should never block.
627 written
= write_fd(state
->child
->sockfd
, &cmd
, sizeof(cmd
),
632 TALLOC_FREE(state
->child
);
634 DEBUG(10, ("write_fd failed: %s\n", strerror(err
)));
635 tevent_req_error(req
, err
);
636 return tevent_req_post(req
, ev
);
639 subreq
= read_packet_send(state
, ev
, state
->child
->sockfd
,
640 sizeof(struct rw_ret
), NULL
, NULL
);
641 if (tevent_req_nomem(subreq
, req
)) {
642 TALLOC_FREE(state
->child
); /* we sent sth down */
643 return tevent_req_post(req
, ev
);
645 tevent_req_set_callback(subreq
, aio_fork_pread_done
, req
);
649 static void aio_fork_pread_done(struct tevent_req
*subreq
)
651 struct tevent_req
*req
= tevent_req_callback_data(
652 subreq
, struct tevent_req
);
653 struct aio_fork_pread_state
*state
= tevent_req_data(
654 req
, struct aio_fork_pread_state
);
658 struct rw_ret
*retbuf
;
660 nread
= read_packet_recv(subreq
, talloc_tos(), &buf
, &err
);
663 TALLOC_FREE(state
->child
);
664 tevent_req_error(req
, err
);
668 state
->child
->busy
= false;
670 retbuf
= (struct rw_ret
*)buf
;
671 state
->ret
= retbuf
->size
;
672 state
->err
= retbuf
->ret_errno
;
673 tevent_req_done(req
);
676 static ssize_t
aio_fork_pread_recv(struct tevent_req
*req
, int *err
)
678 struct aio_fork_pread_state
*state
= tevent_req_data(
679 req
, struct aio_fork_pread_state
);
681 if (tevent_req_is_unix_error(req
, err
)) {
684 if (state
->ret
== -1) {
690 struct aio_fork_pwrite_state
{
691 struct aio_child
*child
;
696 static void aio_fork_pwrite_done(struct tevent_req
*subreq
);
698 static struct tevent_req
*aio_fork_pwrite_send(
699 struct vfs_handle_struct
*handle
, TALLOC_CTX
*mem_ctx
,
700 struct tevent_context
*ev
, struct files_struct
*fsp
,
701 const void *data
, size_t n
, off_t offset
)
703 struct tevent_req
*req
, *subreq
;
704 struct aio_fork_pwrite_state
*state
;
708 struct aio_fork_config
*config
;
709 SMB_VFS_HANDLE_GET_DATA(handle
, config
,
710 struct aio_fork_config
,
713 req
= tevent_req_create(mem_ctx
, &state
, struct aio_fork_pwrite_state
);
719 /* TODO: support variable buffers */
720 tevent_req_error(req
, EINVAL
);
721 return tevent_req_post(req
, ev
);
724 err
= get_idle_child(handle
, &state
->child
);
726 tevent_req_error(req
, err
);
727 return tevent_req_post(req
, ev
);
734 cmd
.erratic_testing_mode
= config
->erratic_testing_mode
;
736 DEBUG(10, ("sending fd %d to child %d\n", fsp
->fh
->fd
,
737 (int)state
->child
->pid
));
740 * Not making this async. We're writing into an empty unix
741 * domain socket. This should never block.
743 written
= write_fd(state
->child
->sockfd
, &cmd
, sizeof(cmd
),
748 TALLOC_FREE(state
->child
);
750 DEBUG(10, ("write_fd failed: %s\n", strerror(err
)));
751 tevent_req_error(req
, err
);
752 return tevent_req_post(req
, ev
);
755 subreq
= read_packet_send(state
, ev
, state
->child
->sockfd
,
756 sizeof(struct rw_ret
), NULL
, NULL
);
757 if (tevent_req_nomem(subreq
, req
)) {
758 TALLOC_FREE(state
->child
); /* we sent sth down */
759 return tevent_req_post(req
, ev
);
761 tevent_req_set_callback(subreq
, aio_fork_pwrite_done
, req
);
765 static void aio_fork_pwrite_done(struct tevent_req
*subreq
)
767 struct tevent_req
*req
= tevent_req_callback_data(
768 subreq
, struct tevent_req
);
769 struct aio_fork_pwrite_state
*state
= tevent_req_data(
770 req
, struct aio_fork_pwrite_state
);
774 struct rw_ret
*retbuf
;
776 nread
= read_packet_recv(subreq
, talloc_tos(), &buf
, &err
);
779 TALLOC_FREE(state
->child
);
780 tevent_req_error(req
, err
);
784 state
->child
->busy
= false;
786 retbuf
= (struct rw_ret
*)buf
;
787 state
->ret
= retbuf
->size
;
788 state
->err
= retbuf
->ret_errno
;
789 tevent_req_done(req
);
792 static ssize_t
aio_fork_pwrite_recv(struct tevent_req
*req
, int *err
)
794 struct aio_fork_pwrite_state
*state
= tevent_req_data(
795 req
, struct aio_fork_pwrite_state
);
797 if (tevent_req_is_unix_error(req
, err
)) {
800 if (state
->ret
== -1) {
806 struct aio_fork_fsync_state
{
807 struct aio_child
*child
;
812 static void aio_fork_fsync_done(struct tevent_req
*subreq
);
814 static struct tevent_req
*aio_fork_fsync_send(
815 struct vfs_handle_struct
*handle
, TALLOC_CTX
*mem_ctx
,
816 struct tevent_context
*ev
, struct files_struct
*fsp
)
818 struct tevent_req
*req
, *subreq
;
819 struct aio_fork_fsync_state
*state
;
823 struct aio_fork_config
*config
;
825 SMB_VFS_HANDLE_GET_DATA(handle
, config
,
826 struct aio_fork_config
,
829 req
= tevent_req_create(mem_ctx
, &state
, struct aio_fork_fsync_state
);
834 err
= get_idle_child(handle
, &state
->child
);
836 tevent_req_error(req
, err
);
837 return tevent_req_post(req
, ev
);
842 cmd
.erratic_testing_mode
= config
->erratic_testing_mode
;
844 DEBUG(10, ("sending fd %d to child %d\n", fsp
->fh
->fd
,
845 (int)state
->child
->pid
));
848 * Not making this async. We're writing into an empty unix
849 * domain socket. This should never block.
851 written
= write_fd(state
->child
->sockfd
, &cmd
, sizeof(cmd
),
856 TALLOC_FREE(state
->child
);
858 DEBUG(10, ("write_fd failed: %s\n", strerror(err
)));
859 tevent_req_error(req
, err
);
860 return tevent_req_post(req
, ev
);
863 subreq
= read_packet_send(state
, ev
, state
->child
->sockfd
,
864 sizeof(struct rw_ret
), NULL
, NULL
);
865 if (tevent_req_nomem(subreq
, req
)) {
866 TALLOC_FREE(state
->child
); /* we sent sth down */
867 return tevent_req_post(req
, ev
);
869 tevent_req_set_callback(subreq
, aio_fork_fsync_done
, req
);
873 static void aio_fork_fsync_done(struct tevent_req
*subreq
)
875 struct tevent_req
*req
= tevent_req_callback_data(
876 subreq
, struct tevent_req
);
877 struct aio_fork_fsync_state
*state
= tevent_req_data(
878 req
, struct aio_fork_fsync_state
);
882 struct rw_ret
*retbuf
;
884 nread
= read_packet_recv(subreq
, talloc_tos(), &buf
, &err
);
887 TALLOC_FREE(state
->child
);
888 tevent_req_error(req
, err
);
892 state
->child
->busy
= false;
894 retbuf
= (struct rw_ret
*)buf
;
895 state
->ret
= retbuf
->size
;
896 state
->err
= retbuf
->ret_errno
;
897 tevent_req_done(req
);
900 static int aio_fork_fsync_recv(struct tevent_req
*req
, int *err
)
902 struct aio_fork_fsync_state
*state
= tevent_req_data(
903 req
, struct aio_fork_fsync_state
);
905 if (tevent_req_is_unix_error(req
, err
)) {
908 if (state
->ret
== -1) {
914 static int aio_fork_connect(vfs_handle_struct
*handle
, const char *service
,
918 struct aio_fork_config
*config
;
919 ret
= SMB_VFS_NEXT_CONNECT(handle
, service
, user
);
925 config
= talloc_zero(handle
->conn
, struct aio_fork_config
);
927 SMB_VFS_NEXT_DISCONNECT(handle
);
928 DEBUG(0, ("talloc_zero() failed\n"));
932 config
->erratic_testing_mode
= lp_parm_bool(SNUM(handle
->conn
), "vfs_aio_fork",
933 "erratic_testing_mode", false);
935 SMB_VFS_HANDLE_SET_DATA(handle
, config
,
936 NULL
, struct aio_fork_config
,
939 /*********************************************************************
940 * How many threads to initialize ?
941 * 100 per process seems insane as a default until you realize that
942 * (a) Threads terminate after 1 second when idle.
943 * (b) Throttling is done in SMB2 via the crediting algorithm.
944 * (c) SMB1 clients are limited to max_mux (50) outstanding
945 * requests and Windows clients don't use this anyway.
946 * Essentially we want this to be unlimited unless smb.conf
948 *********************************************************************/
949 aio_pending_size
= 100;
953 static struct vfs_fn_pointers vfs_aio_fork_fns
= {
954 .connect_fn
= aio_fork_connect
,
955 .pread_send_fn
= aio_fork_pread_send
,
956 .pread_recv_fn
= aio_fork_pread_recv
,
957 .pwrite_send_fn
= aio_fork_pwrite_send
,
958 .pwrite_recv_fn
= aio_fork_pwrite_recv
,
959 .fsync_send_fn
= aio_fork_fsync_send
,
960 .fsync_recv_fn
= aio_fork_fsync_recv
,
963 NTSTATUS
vfs_aio_fork_init(void);
964 NTSTATUS
vfs_aio_fork_init(void)
966 return smb_register_vfs(SMB_VFS_INTERFACE_VERSION
,
967 "aio_fork", &vfs_aio_fork_fns
);