2 * Simulate the Posix AIO using mmap/fork
4 * Copyright (C) Volker Lendecke 2008
5 * Copyright (C) Jeremy Allison 2010
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "system/filesys.h"
24 #include "system/shmem.h"
25 #include "smbd/smbd.h"
26 #include "smbd/globals.h"
27 #include "lib/async_req/async_sock.h"
28 #include "lib/util/tevent_unix.h"
41 static int mmap_area_destructor(struct mmap_area
*area
)
43 munmap((void *)area
->ptr
, area
->size
);
47 static struct mmap_area
*mmap_area_init(TALLOC_CTX
*mem_ctx
, size_t size
)
49 struct mmap_area
*result
;
52 result
= talloc(mem_ctx
, struct mmap_area
);
54 DEBUG(0, ("talloc failed\n"));
58 fd
= open("/dev/zero", O_RDWR
);
60 DEBUG(3, ("open(\"/dev/zero\") failed: %s\n",
65 result
->ptr
= mmap(NULL
, size
, PROT_READ
|PROT_WRITE
,
66 MAP_SHARED
|MAP_FILE
, fd
, 0);
67 if (result
->ptr
== MAP_FAILED
) {
68 DEBUG(1, ("mmap failed: %s\n", strerror(errno
)));
75 talloc_set_destructor(result
, mmap_area_destructor
);
90 static const char *cmd_type_str(enum cmd_type cmd
)
105 result
= "<UNKNOWN>";
122 struct aio_child_list
;
125 struct aio_child
*prev
, *next
;
126 struct aio_child_list
*list
;
129 struct mmap_area
*map
;
130 bool dont_delete
; /* Marked as in use since last cleanup */
134 struct aio_child_list
{
135 struct aio_child
*children
;
136 struct timed_event
*cleanup_event
;
139 static void free_aio_children(void **p
)
144 static ssize_t
read_fd(int fd
, void *ptr
, size_t nbytes
, int *recvfd
)
149 #ifndef HAVE_MSGHDR_MSG_CONTROL
153 #ifdef HAVE_MSGHDR_MSG_CONTROL
156 char control
[CMSG_SPACE(sizeof(int))];
158 struct cmsghdr
*cmptr
;
160 msg
.msg_control
= control_un
.control
;
161 msg
.msg_controllen
= sizeof(control_un
.control
);
163 #if HAVE_MSGHDR_MSG_ACCTRIGHTS
164 msg
.msg_accrights
= (caddr_t
) &newfd
;
165 msg
.msg_accrightslen
= sizeof(int);
167 #error Can not pass file descriptors
175 iov
[0].iov_base
= (void *)ptr
;
176 iov
[0].iov_len
= nbytes
;
180 if ( (n
= recvmsg(fd
, &msg
, 0)) <= 0) {
184 #ifdef HAVE_MSGHDR_MSG_CONTROL
185 if ((cmptr
= CMSG_FIRSTHDR(&msg
)) != NULL
186 && cmptr
->cmsg_len
== CMSG_LEN(sizeof(int))) {
187 if (cmptr
->cmsg_level
!= SOL_SOCKET
) {
188 DEBUG(10, ("control level != SOL_SOCKET"));
192 if (cmptr
->cmsg_type
!= SCM_RIGHTS
) {
193 DEBUG(10, ("control type != SCM_RIGHTS"));
197 memcpy(recvfd
, CMSG_DATA(cmptr
), sizeof(*recvfd
));
199 *recvfd
= -1; /* descriptor was not passed */
202 if (msg
.msg_accrightslen
== sizeof(int)) {
206 *recvfd
= -1; /* descriptor was not passed */
213 static ssize_t
write_fd(int fd
, void *ptr
, size_t nbytes
, int sendfd
)
218 #ifdef HAVE_MSGHDR_MSG_CONTROL
221 char control
[CMSG_SPACE(sizeof(int))];
223 struct cmsghdr
*cmptr
;
226 ZERO_STRUCT(control_un
);
228 msg
.msg_control
= control_un
.control
;
229 msg
.msg_controllen
= sizeof(control_un
.control
);
231 cmptr
= CMSG_FIRSTHDR(&msg
);
232 cmptr
->cmsg_len
= CMSG_LEN(sizeof(int));
233 cmptr
->cmsg_level
= SOL_SOCKET
;
234 cmptr
->cmsg_type
= SCM_RIGHTS
;
235 memcpy(CMSG_DATA(cmptr
), &sendfd
, sizeof(sendfd
));
238 msg
.msg_accrights
= (caddr_t
) &sendfd
;
239 msg
.msg_accrightslen
= sizeof(int);
246 iov
[0].iov_base
= (void *)ptr
;
247 iov
[0].iov_len
= nbytes
;
251 return (sendmsg(fd
, &msg
, 0));
254 static void aio_child_cleanup(struct event_context
*event_ctx
,
255 struct timed_event
*te
,
259 struct aio_child_list
*list
= talloc_get_type_abort(
260 private_data
, struct aio_child_list
);
261 struct aio_child
*child
, *next
;
263 TALLOC_FREE(list
->cleanup_event
);
265 for (child
= list
->children
; child
!= NULL
; child
= next
) {
269 DEBUG(10, ("child %d currently active\n",
274 if (child
->dont_delete
) {
275 DEBUG(10, ("Child %d was active since last cleanup\n",
277 child
->dont_delete
= false;
281 DEBUG(10, ("Child %d idle for more than 30 seconds, "
282 "deleting\n", (int)child
->pid
));
288 if (list
->children
!= NULL
) {
290 * Re-schedule the next cleanup round
292 list
->cleanup_event
= event_add_timed(server_event_context(), list
,
293 timeval_add(&now
, 30, 0),
294 aio_child_cleanup
, list
);
299 static struct aio_child_list
*init_aio_children(struct vfs_handle_struct
*handle
)
301 struct aio_child_list
*data
= NULL
;
303 if (SMB_VFS_HANDLE_TEST_DATA(handle
)) {
304 SMB_VFS_HANDLE_GET_DATA(handle
, data
, struct aio_child_list
,
309 data
= talloc_zero(NULL
, struct aio_child_list
);
316 * Regardless of whether the child_list had been around or not, make
317 * sure that we have a cleanup timed event. This timed event will
318 * delete itself when it finds that no children are around anymore.
321 if (data
->cleanup_event
== NULL
) {
322 data
->cleanup_event
= event_add_timed(server_event_context(), data
,
323 timeval_current_ofs(30, 0),
324 aio_child_cleanup
, data
);
325 if (data
->cleanup_event
== NULL
) {
331 if (!SMB_VFS_HANDLE_TEST_DATA(handle
)) {
332 SMB_VFS_HANDLE_SET_DATA(handle
, data
, free_aio_children
,
333 struct aio_child_list
, return False
);
339 static void aio_child_loop(int sockfd
, struct mmap_area
*map
)
344 struct rw_cmd cmd_struct
;
345 struct rw_ret ret_struct
;
347 ret
= read_fd(sockfd
, &cmd_struct
, sizeof(cmd_struct
), &fd
);
348 if (ret
!= sizeof(cmd_struct
)) {
349 DEBUG(10, ("read_fd returned %d: %s\n", (int)ret
,
354 DEBUG(10, ("aio_child_loop: %s %d bytes at %d from fd %d\n",
355 cmd_type_str(cmd_struct
.cmd
),
356 (int)cmd_struct
.n
, (int)cmd_struct
.offset
, fd
));
358 #ifdef ENABLE_BUILD_FARM_HACKS
361 * In the build farm, we want erratic behaviour for
367 * use generate_random_buffer, we just forked from a
368 * common parent state
370 generate_random_buffer(&randval
, sizeof(randval
));
371 msecs
= randval
+ 20;
372 DEBUG(10, ("delaying for %u msecs\n", msecs
));
378 ZERO_STRUCT(ret_struct
);
380 switch (cmd_struct
.cmd
) {
382 ret_struct
.size
= sys_pread(
383 fd
, (void *)map
->ptr
, cmd_struct
.n
,
386 /* This breaks "make test" when run with aio_fork module. */
387 #ifdef ENABLE_BUILD_FARM_HACKS
388 ret_struct
.size
= MAX(1, ret_struct
.size
* 0.9);
393 ret_struct
.size
= sys_pwrite(
394 fd
, (void *)map
->ptr
, cmd_struct
.n
,
398 ret_struct
.size
= fsync(fd
);
401 ret_struct
.size
= -1;
405 DEBUG(10, ("aio_child_loop: syscall returned %d\n",
406 (int)ret_struct
.size
));
408 if (ret_struct
.size
== -1) {
409 ret_struct
.ret_errno
= errno
;
413 * Close the fd before telling our parent we're done. The
414 * parent might close and re-open the file very quickly, and
415 * with system-level share modes (GPFS) we would get an
416 * unjustified SHARING_VIOLATION.
420 ret
= write_data(sockfd
, (char *)&ret_struct
,
422 if (ret
!= sizeof(ret_struct
)) {
423 DEBUG(10, ("could not write ret_struct: %s\n",
430 static int aio_child_destructor(struct aio_child
*child
)
434 SMB_ASSERT(!child
->busy
);
436 DEBUG(10, ("aio_child_destructor: removing child %d on fd %d\n",
437 child
->pid
, child
->sockfd
));
440 * closing the sockfd makes the child not return from recvmsg() on RHEL
441 * 5.5 so instead force the child to exit by writing bad data to it
443 write(child
->sockfd
, &c
, sizeof(c
));
444 close(child
->sockfd
);
445 DLIST_REMOVE(child
->list
->children
, child
);
450 * We have to close all fd's in open files, we might incorrectly hold a system
451 * level share mode on a file.
454 static struct files_struct
*close_fsp_fd(struct files_struct
*fsp
,
457 if ((fsp
->fh
!= NULL
) && (fsp
->fh
->fd
!= -1)) {
464 static int create_aio_child(struct smbd_server_connection
*sconn
,
465 struct aio_child_list
*children
,
467 struct aio_child
**presult
)
469 struct aio_child
*result
;
473 fdpair
[0] = fdpair
[1] = -1;
475 result
= talloc_zero(children
, struct aio_child
);
476 if (result
== NULL
) {
480 if (socketpair(AF_UNIX
, SOCK_STREAM
, 0, fdpair
) == -1) {
482 DEBUG(10, ("socketpair() failed: %s\n", strerror(errno
)));
486 DEBUG(10, ("fdpair = %d/%d\n", fdpair
[0], fdpair
[1]));
488 result
->map
= mmap_area_init(result
, map_size
);
489 if (result
->map
== NULL
) {
491 DEBUG(0, ("Could not create mmap area\n"));
495 result
->pid
= fork();
496 if (result
->pid
== -1) {
498 DEBUG(0, ("fork failed: %s\n", strerror(errno
)));
502 if (result
->pid
== 0) {
504 result
->sockfd
= fdpair
[1];
505 files_forall(sconn
, close_fsp_fd
, NULL
);
506 aio_child_loop(result
->sockfd
, result
->map
);
509 DEBUG(10, ("Child %d created with sockfd %d\n",
510 result
->pid
, fdpair
[0]));
512 result
->sockfd
= fdpair
[0];
515 result
->list
= children
;
516 DLIST_ADD(children
->children
, result
);
518 talloc_set_destructor(result
, aio_child_destructor
);
525 if (fdpair
[0] != -1) close(fdpair
[0]);
526 if (fdpair
[1] != -1) close(fdpair
[1]);
532 static int get_idle_child(struct vfs_handle_struct
*handle
,
533 struct aio_child
**pchild
)
535 struct aio_child_list
*children
;
536 struct aio_child
*child
;
538 children
= init_aio_children(handle
);
539 if (children
== NULL
) {
543 for (child
= children
->children
; child
!= NULL
; child
= child
->next
) {
552 DEBUG(10, ("no idle child found, creating new one\n"));
554 ret
= create_aio_child(handle
->conn
->sconn
, children
,
557 DEBUG(10, ("create_aio_child failed: %s\n",
563 child
->dont_delete
= true;
570 struct aio_fork_pread_state
{
571 struct aio_child
*child
;
576 static void aio_fork_pread_done(struct tevent_req
*subreq
);
578 static struct tevent_req
*aio_fork_pread_send(struct vfs_handle_struct
*handle
,
580 struct tevent_context
*ev
,
581 struct files_struct
*fsp
,
583 size_t n
, off_t offset
)
585 struct tevent_req
*req
, *subreq
;
586 struct aio_fork_pread_state
*state
;
591 req
= tevent_req_create(mem_ctx
, &state
, struct aio_fork_pread_state
);
597 /* TODO: support variable buffers */
598 tevent_req_error(req
, EINVAL
);
599 return tevent_req_post(req
, ev
);
602 err
= get_idle_child(handle
, &state
->child
);
604 tevent_req_error(req
, err
);
605 return tevent_req_post(req
, ev
);
613 DEBUG(10, ("sending fd %d to child %d\n", fsp
->fh
->fd
,
614 (int)state
->child
->pid
));
617 * Not making this async. We're writing into an empty unix
618 * domain socket. This should never block.
620 written
= write_fd(state
->child
->sockfd
, &cmd
, sizeof(cmd
),
625 TALLOC_FREE(state
->child
);
627 DEBUG(10, ("write_fd failed: %s\n", strerror(err
)));
628 tevent_req_error(req
, err
);
629 return tevent_req_post(req
, ev
);
632 subreq
= read_packet_send(state
, ev
, state
->child
->sockfd
,
633 sizeof(struct rw_ret
), NULL
, NULL
);
634 if (tevent_req_nomem(subreq
, req
)) {
635 TALLOC_FREE(state
->child
); /* we sent sth down */
636 return tevent_req_post(req
, ev
);
638 tevent_req_set_callback(subreq
, aio_fork_pread_done
, req
);
642 static void aio_fork_pread_done(struct tevent_req
*subreq
)
644 struct tevent_req
*req
= tevent_req_callback_data(
645 subreq
, struct tevent_req
);
646 struct aio_fork_pread_state
*state
= tevent_req_data(
647 req
, struct aio_fork_pread_state
);
651 struct rw_ret
*retbuf
;
653 nread
= read_packet_recv(subreq
, talloc_tos(), &buf
, &err
);
656 TALLOC_FREE(state
->child
);
657 tevent_req_error(req
, err
);
661 state
->child
->busy
= false;
663 retbuf
= (struct rw_ret
*)buf
;
664 state
->ret
= retbuf
->size
;
665 state
->err
= retbuf
->ret_errno
;
666 tevent_req_done(req
);
669 static ssize_t
aio_fork_pread_recv(struct tevent_req
*req
, int *err
)
671 struct aio_fork_pread_state
*state
= tevent_req_data(
672 req
, struct aio_fork_pread_state
);
674 if (tevent_req_is_unix_error(req
, err
)) {
677 if (state
->ret
== -1) {
683 struct aio_fork_pwrite_state
{
684 struct aio_child
*child
;
689 static void aio_fork_pwrite_done(struct tevent_req
*subreq
);
691 static struct tevent_req
*aio_fork_pwrite_send(
692 struct vfs_handle_struct
*handle
, TALLOC_CTX
*mem_ctx
,
693 struct tevent_context
*ev
, struct files_struct
*fsp
,
694 const void *data
, size_t n
, off_t offset
)
696 struct tevent_req
*req
, *subreq
;
697 struct aio_fork_pwrite_state
*state
;
702 req
= tevent_req_create(mem_ctx
, &state
, struct aio_fork_pwrite_state
);
708 /* TODO: support variable buffers */
709 tevent_req_error(req
, EINVAL
);
710 return tevent_req_post(req
, ev
);
713 err
= get_idle_child(handle
, &state
->child
);
715 tevent_req_error(req
, err
);
716 return tevent_req_post(req
, ev
);
724 DEBUG(10, ("sending fd %d to child %d\n", fsp
->fh
->fd
,
725 (int)state
->child
->pid
));
728 * Not making this async. We're writing into an empty unix
729 * domain socket. This should never block.
731 written
= write_fd(state
->child
->sockfd
, &cmd
, sizeof(cmd
),
736 TALLOC_FREE(state
->child
);
738 DEBUG(10, ("write_fd failed: %s\n", strerror(err
)));
739 tevent_req_error(req
, err
);
740 return tevent_req_post(req
, ev
);
743 subreq
= read_packet_send(state
, ev
, state
->child
->sockfd
,
744 sizeof(struct rw_ret
), NULL
, NULL
);
745 if (tevent_req_nomem(subreq
, req
)) {
746 TALLOC_FREE(state
->child
); /* we sent sth down */
747 return tevent_req_post(req
, ev
);
749 tevent_req_set_callback(subreq
, aio_fork_pwrite_done
, req
);
753 static void aio_fork_pwrite_done(struct tevent_req
*subreq
)
755 struct tevent_req
*req
= tevent_req_callback_data(
756 subreq
, struct tevent_req
);
757 struct aio_fork_pwrite_state
*state
= tevent_req_data(
758 req
, struct aio_fork_pwrite_state
);
762 struct rw_ret
*retbuf
;
764 nread
= read_packet_recv(subreq
, talloc_tos(), &buf
, &err
);
767 TALLOC_FREE(state
->child
);
768 tevent_req_error(req
, err
);
772 state
->child
->busy
= false;
774 retbuf
= (struct rw_ret
*)buf
;
775 state
->ret
= retbuf
->size
;
776 state
->err
= retbuf
->ret_errno
;
777 tevent_req_done(req
);
780 static ssize_t
aio_fork_pwrite_recv(struct tevent_req
*req
, int *err
)
782 struct aio_fork_pwrite_state
*state
= tevent_req_data(
783 req
, struct aio_fork_pwrite_state
);
785 if (tevent_req_is_unix_error(req
, err
)) {
788 if (state
->ret
== -1) {
794 struct aio_fork_fsync_state
{
795 struct aio_child
*child
;
800 static void aio_fork_fsync_done(struct tevent_req
*subreq
);
802 static struct tevent_req
*aio_fork_fsync_send(
803 struct vfs_handle_struct
*handle
, TALLOC_CTX
*mem_ctx
,
804 struct tevent_context
*ev
, struct files_struct
*fsp
)
806 struct tevent_req
*req
, *subreq
;
807 struct aio_fork_fsync_state
*state
;
812 req
= tevent_req_create(mem_ctx
, &state
, struct aio_fork_fsync_state
);
817 err
= get_idle_child(handle
, &state
->child
);
819 tevent_req_error(req
, err
);
820 return tevent_req_post(req
, ev
);
826 DEBUG(10, ("sending fd %d to child %d\n", fsp
->fh
->fd
,
827 (int)state
->child
->pid
));
830 * Not making this async. We're writing into an empty unix
831 * domain socket. This should never block.
833 written
= write_fd(state
->child
->sockfd
, &cmd
, sizeof(cmd
),
838 TALLOC_FREE(state
->child
);
840 DEBUG(10, ("write_fd failed: %s\n", strerror(err
)));
841 tevent_req_error(req
, err
);
842 return tevent_req_post(req
, ev
);
845 subreq
= read_packet_send(state
, ev
, state
->child
->sockfd
,
846 sizeof(struct rw_ret
), NULL
, NULL
);
847 if (tevent_req_nomem(subreq
, req
)) {
848 TALLOC_FREE(state
->child
); /* we sent sth down */
849 return tevent_req_post(req
, ev
);
851 tevent_req_set_callback(subreq
, aio_fork_fsync_done
, req
);
855 static void aio_fork_fsync_done(struct tevent_req
*subreq
)
857 struct tevent_req
*req
= tevent_req_callback_data(
858 subreq
, struct tevent_req
);
859 struct aio_fork_fsync_state
*state
= tevent_req_data(
860 req
, struct aio_fork_fsync_state
);
864 struct rw_ret
*retbuf
;
866 nread
= read_packet_recv(subreq
, talloc_tos(), &buf
, &err
);
869 TALLOC_FREE(state
->child
);
870 tevent_req_error(req
, err
);
874 state
->child
->busy
= false;
876 retbuf
= (struct rw_ret
*)buf
;
877 state
->ret
= retbuf
->size
;
878 state
->err
= retbuf
->ret_errno
;
879 tevent_req_done(req
);
882 static int aio_fork_fsync_recv(struct tevent_req
*req
, int *err
)
884 struct aio_fork_fsync_state
*state
= tevent_req_data(
885 req
, struct aio_fork_fsync_state
);
887 if (tevent_req_is_unix_error(req
, err
)) {
890 if (state
->ret
== -1) {
896 static int aio_fork_connect(vfs_handle_struct
*handle
, const char *service
,
899 /*********************************************************************
900 * How many threads to initialize ?
901 * 100 per process seems insane as a default until you realize that
902 * (a) Threads terminate after 1 second when idle.
903 * (b) Throttling is done in SMB2 via the crediting algorithm.
904 * (c) SMB1 clients are limited to max_mux (50) outstanding
905 * requests and Windows clients don't use this anyway.
906 * Essentially we want this to be unlimited unless smb.conf
908 *********************************************************************/
909 aio_pending_size
= 100;
910 return SMB_VFS_NEXT_CONNECT(handle
, service
, user
);
913 static struct vfs_fn_pointers vfs_aio_fork_fns
= {
914 .connect_fn
= aio_fork_connect
,
915 .pread_send_fn
= aio_fork_pread_send
,
916 .pread_recv_fn
= aio_fork_pread_recv
,
917 .pwrite_send_fn
= aio_fork_pwrite_send
,
918 .pwrite_recv_fn
= aio_fork_pwrite_recv
,
919 .fsync_send_fn
= aio_fork_fsync_send
,
920 .fsync_recv_fn
= aio_fork_fsync_recv
,
923 NTSTATUS
vfs_aio_fork_init(void);
924 NTSTATUS
vfs_aio_fork_init(void)
926 return smb_register_vfs(SMB_VFS_INTERFACE_VERSION
,
927 "aio_fork", &vfs_aio_fork_fns
);