s3-aio-fork: make "read_cmd" an enum
[Samba/gbeck.git] / source3 / modules / vfs_aio_fork.c
blob210fde1b31fd971e11760dba4acb0392de0e49d9
1 /*
2 * Simulate the Posix AIO using mmap/fork
4 * Copyright (C) Volker Lendecke 2008
5 * Copyright (C) Jeremy Allison 2010
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include "includes.h"
23 #include "system/filesys.h"
24 #include "system/shmem.h"
25 #include "smbd/smbd.h"
26 #include "smbd/globals.h"
27 #include "lib/async_req/async_sock.h"
28 #include "lib/util/tevent_unix.h"
30 #ifndef MAP_FILE
31 #define MAP_FILE 0
32 #endif
34 struct mmap_area {
35 size_t size;
36 volatile void *ptr;
39 static int mmap_area_destructor(struct mmap_area *area)
41 munmap((void *)area->ptr, area->size);
42 return 0;
45 static struct mmap_area *mmap_area_init(TALLOC_CTX *mem_ctx, size_t size)
47 struct mmap_area *result;
48 int fd;
50 result = talloc(mem_ctx, struct mmap_area);
51 if (result == NULL) {
52 DEBUG(0, ("talloc failed\n"));
53 goto fail;
56 fd = open("/dev/zero", O_RDWR);
57 if (fd == -1) {
58 DEBUG(3, ("open(\"/dev/zero\") failed: %s\n",
59 strerror(errno)));
60 goto fail;
63 result->ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
64 MAP_SHARED|MAP_FILE, fd, 0);
65 if (result->ptr == MAP_FAILED) {
66 DEBUG(1, ("mmap failed: %s\n", strerror(errno)));
67 goto fail;
70 close(fd);
72 result->size = size;
73 talloc_set_destructor(result, mmap_area_destructor);
75 return result;
77 fail:
78 TALLOC_FREE(result);
79 return NULL;
82 enum cmd_type {
83 READ_CMD,
84 WRITE_CMD
87 static const char *cmd_type_str(enum cmd_type cmd)
89 const char *result;
91 switch (cmd) {
92 case READ_CMD:
93 result = "READ";
94 break;
95 case WRITE_CMD:
96 result = "WRITE";
97 break;
98 default:
99 result = "<UNKNOWN>";
100 break;
102 return result;
105 struct rw_cmd {
106 size_t n;
107 off_t offset;
108 enum cmd_type cmd;
111 struct rw_ret {
112 ssize_t size;
113 int ret_errno;
116 struct aio_child_list;
118 struct aio_child {
119 struct aio_child *prev, *next;
120 struct aio_child_list *list;
121 pid_t pid;
122 int sockfd;
123 struct mmap_area *map;
124 bool dont_delete; /* Marked as in use since last cleanup */
125 bool busy;
128 struct aio_child_list {
129 struct aio_child *children;
130 struct timed_event *cleanup_event;
133 static void free_aio_children(void **p)
135 TALLOC_FREE(*p);
138 static ssize_t read_fd(int fd, void *ptr, size_t nbytes, int *recvfd)
140 struct msghdr msg;
141 struct iovec iov[1];
142 ssize_t n;
143 #ifndef HAVE_MSGHDR_MSG_CONTROL
144 int newfd;
145 #endif
147 #ifdef HAVE_MSGHDR_MSG_CONTROL
148 union {
149 struct cmsghdr cm;
150 char control[CMSG_SPACE(sizeof(int))];
151 } control_un;
152 struct cmsghdr *cmptr;
154 msg.msg_control = control_un.control;
155 msg.msg_controllen = sizeof(control_un.control);
156 #else
157 #if HAVE_MSGHDR_MSG_ACCTRIGHTS
158 msg.msg_accrights = (caddr_t) &newfd;
159 msg.msg_accrightslen = sizeof(int);
160 #else
161 #error Can not pass file descriptors
162 #endif
163 #endif
165 msg.msg_name = NULL;
166 msg.msg_namelen = 0;
167 msg.msg_flags = 0;
169 iov[0].iov_base = (void *)ptr;
170 iov[0].iov_len = nbytes;
171 msg.msg_iov = iov;
172 msg.msg_iovlen = 1;
174 if ( (n = recvmsg(fd, &msg, 0)) <= 0) {
175 return(n);
178 #ifdef HAVE_MSGHDR_MSG_CONTROL
179 if ((cmptr = CMSG_FIRSTHDR(&msg)) != NULL
180 && cmptr->cmsg_len == CMSG_LEN(sizeof(int))) {
181 if (cmptr->cmsg_level != SOL_SOCKET) {
182 DEBUG(10, ("control level != SOL_SOCKET"));
183 errno = EINVAL;
184 return -1;
186 if (cmptr->cmsg_type != SCM_RIGHTS) {
187 DEBUG(10, ("control type != SCM_RIGHTS"));
188 errno = EINVAL;
189 return -1;
191 memcpy(recvfd, CMSG_DATA(cmptr), sizeof(*recvfd));
192 } else {
193 *recvfd = -1; /* descriptor was not passed */
195 #else
196 if (msg.msg_accrightslen == sizeof(int)) {
197 *recvfd = newfd;
199 else {
200 *recvfd = -1; /* descriptor was not passed */
202 #endif
204 return(n);
207 static ssize_t write_fd(int fd, void *ptr, size_t nbytes, int sendfd)
209 struct msghdr msg;
210 struct iovec iov[1];
212 #ifdef HAVE_MSGHDR_MSG_CONTROL
213 union {
214 struct cmsghdr cm;
215 char control[CMSG_SPACE(sizeof(int))];
216 } control_un;
217 struct cmsghdr *cmptr;
219 ZERO_STRUCT(msg);
220 ZERO_STRUCT(control_un);
222 msg.msg_control = control_un.control;
223 msg.msg_controllen = sizeof(control_un.control);
225 cmptr = CMSG_FIRSTHDR(&msg);
226 cmptr->cmsg_len = CMSG_LEN(sizeof(int));
227 cmptr->cmsg_level = SOL_SOCKET;
228 cmptr->cmsg_type = SCM_RIGHTS;
229 memcpy(CMSG_DATA(cmptr), &sendfd, sizeof(sendfd));
230 #else
231 ZERO_STRUCT(msg);
232 msg.msg_accrights = (caddr_t) &sendfd;
233 msg.msg_accrightslen = sizeof(int);
234 #endif
236 msg.msg_name = NULL;
237 msg.msg_namelen = 0;
239 ZERO_STRUCT(iov);
240 iov[0].iov_base = (void *)ptr;
241 iov[0].iov_len = nbytes;
242 msg.msg_iov = iov;
243 msg.msg_iovlen = 1;
245 return (sendmsg(fd, &msg, 0));
248 static void aio_child_cleanup(struct event_context *event_ctx,
249 struct timed_event *te,
250 struct timeval now,
251 void *private_data)
253 struct aio_child_list *list = talloc_get_type_abort(
254 private_data, struct aio_child_list);
255 struct aio_child *child, *next;
257 TALLOC_FREE(list->cleanup_event);
259 for (child = list->children; child != NULL; child = next) {
260 next = child->next;
262 if (child->busy) {
263 DEBUG(10, ("child %d currently active\n",
264 (int)child->pid));
265 continue;
268 if (child->dont_delete) {
269 DEBUG(10, ("Child %d was active since last cleanup\n",
270 (int)child->pid));
271 child->dont_delete = false;
272 continue;
275 DEBUG(10, ("Child %d idle for more than 30 seconds, "
276 "deleting\n", (int)child->pid));
278 TALLOC_FREE(child);
279 child = next;
282 if (list->children != NULL) {
284 * Re-schedule the next cleanup round
286 list->cleanup_event = event_add_timed(server_event_context(), list,
287 timeval_add(&now, 30, 0),
288 aio_child_cleanup, list);
293 static struct aio_child_list *init_aio_children(struct vfs_handle_struct *handle)
295 struct aio_child_list *data = NULL;
297 if (SMB_VFS_HANDLE_TEST_DATA(handle)) {
298 SMB_VFS_HANDLE_GET_DATA(handle, data, struct aio_child_list,
299 return NULL);
302 if (data == NULL) {
303 data = talloc_zero(NULL, struct aio_child_list);
304 if (data == NULL) {
305 return NULL;
310 * Regardless of whether the child_list had been around or not, make
311 * sure that we have a cleanup timed event. This timed event will
312 * delete itself when it finds that no children are around anymore.
315 if (data->cleanup_event == NULL) {
316 data->cleanup_event = event_add_timed(server_event_context(), data,
317 timeval_current_ofs(30, 0),
318 aio_child_cleanup, data);
319 if (data->cleanup_event == NULL) {
320 TALLOC_FREE(data);
321 return NULL;
325 if (!SMB_VFS_HANDLE_TEST_DATA(handle)) {
326 SMB_VFS_HANDLE_SET_DATA(handle, data, free_aio_children,
327 struct aio_child_list, return False);
330 return data;
333 static void aio_child_loop(int sockfd, struct mmap_area *map)
335 while (true) {
336 int fd = -1;
337 ssize_t ret;
338 struct rw_cmd cmd_struct;
339 struct rw_ret ret_struct;
341 ret = read_fd(sockfd, &cmd_struct, sizeof(cmd_struct), &fd);
342 if (ret != sizeof(cmd_struct)) {
343 DEBUG(10, ("read_fd returned %d: %s\n", (int)ret,
344 strerror(errno)));
345 exit(1);
348 DEBUG(10, ("aio_child_loop: %s %d bytes at %d from fd %d\n",
349 cmd_type_str(cmd_struct.cmd),
350 (int)cmd_struct.n, (int)cmd_struct.offset, fd));
352 #ifdef ENABLE_BUILD_FARM_HACKS
355 * In the build farm, we want erratic behaviour for
356 * async I/O times
358 uint8_t randval;
359 unsigned msecs;
361 * use generate_random_buffer, we just forked from a
362 * common parent state
364 generate_random_buffer(&randval, sizeof(randval));
365 msecs = randval + 20;
366 DEBUG(10, ("delaying for %u msecs\n", msecs));
367 smb_msleep(msecs);
369 #endif
372 ZERO_STRUCT(ret_struct);
374 switch (cmd_struct.cmd) {
375 case READ_CMD:
376 ret_struct.size = sys_pread(
377 fd, (void *)map->ptr, cmd_struct.n,
378 cmd_struct.offset);
379 #if 0
380 /* This breaks "make test" when run with aio_fork module. */
381 #ifdef ENABLE_BUILD_FARM_HACKS
382 ret_struct.size = MAX(1, ret_struct.size * 0.9);
383 #endif
384 #endif
385 break;
386 case WRITE_CMD:
387 ret_struct.size = sys_pwrite(
388 fd, (void *)map->ptr, cmd_struct.n,
389 cmd_struct.offset);
390 break;
391 default:
392 ret_struct.size = -1;
393 errno = EINVAL;
396 DEBUG(10, ("aio_child_loop: syscall returned %d\n",
397 (int)ret_struct.size));
399 if (ret_struct.size == -1) {
400 ret_struct.ret_errno = errno;
404 * Close the fd before telling our parent we're done. The
405 * parent might close and re-open the file very quickly, and
406 * with system-level share modes (GPFS) we would get an
407 * unjustified SHARING_VIOLATION.
409 close(fd);
411 ret = write_data(sockfd, (char *)&ret_struct,
412 sizeof(ret_struct));
413 if (ret != sizeof(ret_struct)) {
414 DEBUG(10, ("could not write ret_struct: %s\n",
415 strerror(errno)));
416 exit(2);
421 static int aio_child_destructor(struct aio_child *child)
423 char c=0;
425 SMB_ASSERT(!child->busy);
427 DEBUG(10, ("aio_child_destructor: removing child %d on fd %d\n",
428 child->pid, child->sockfd));
431 * closing the sockfd makes the child not return from recvmsg() on RHEL
432 * 5.5 so instead force the child to exit by writing bad data to it
434 write(child->sockfd, &c, sizeof(c));
435 close(child->sockfd);
436 DLIST_REMOVE(child->list->children, child);
437 return 0;
441 * We have to close all fd's in open files, we might incorrectly hold a system
442 * level share mode on a file.
445 static struct files_struct *close_fsp_fd(struct files_struct *fsp,
446 void *private_data)
448 if ((fsp->fh != NULL) && (fsp->fh->fd != -1)) {
449 close(fsp->fh->fd);
450 fsp->fh->fd = -1;
452 return NULL;
455 static int create_aio_child(struct smbd_server_connection *sconn,
456 struct aio_child_list *children,
457 size_t map_size,
458 struct aio_child **presult)
460 struct aio_child *result;
461 int fdpair[2];
462 int ret;
464 fdpair[0] = fdpair[1] = -1;
466 result = talloc_zero(children, struct aio_child);
467 if (result == NULL) {
468 return ENOMEM;
471 if (socketpair(AF_UNIX, SOCK_STREAM, 0, fdpair) == -1) {
472 ret = errno;
473 DEBUG(10, ("socketpair() failed: %s\n", strerror(errno)));
474 goto fail;
477 DEBUG(10, ("fdpair = %d/%d\n", fdpair[0], fdpair[1]));
479 result->map = mmap_area_init(result, map_size);
480 if (result->map == NULL) {
481 ret = errno;
482 DEBUG(0, ("Could not create mmap area\n"));
483 goto fail;
486 result->pid = fork();
487 if (result->pid == -1) {
488 ret = errno;
489 DEBUG(0, ("fork failed: %s\n", strerror(errno)));
490 goto fail;
493 if (result->pid == 0) {
494 close(fdpair[0]);
495 result->sockfd = fdpair[1];
496 files_forall(sconn, close_fsp_fd, NULL);
497 aio_child_loop(result->sockfd, result->map);
500 DEBUG(10, ("Child %d created with sockfd %d\n",
501 result->pid, fdpair[0]));
503 result->sockfd = fdpair[0];
504 close(fdpair[1]);
506 result->list = children;
507 DLIST_ADD(children->children, result);
509 talloc_set_destructor(result, aio_child_destructor);
511 *presult = result;
513 return 0;
515 fail:
516 if (fdpair[0] != -1) close(fdpair[0]);
517 if (fdpair[1] != -1) close(fdpair[1]);
518 TALLOC_FREE(result);
520 return ret;
523 static int get_idle_child(struct vfs_handle_struct *handle,
524 struct aio_child **pchild)
526 struct aio_child_list *children;
527 struct aio_child *child;
529 children = init_aio_children(handle);
530 if (children == NULL) {
531 return ENOMEM;
534 for (child = children->children; child != NULL; child = child->next) {
535 if (!child->busy) {
536 break;
540 if (child == NULL) {
541 int ret;
543 DEBUG(10, ("no idle child found, creating new one\n"));
545 ret = create_aio_child(handle->conn->sconn, children,
546 128*1024, &child);
547 if (ret != 0) {
548 DEBUG(10, ("create_aio_child failed: %s\n",
549 strerror(errno)));
550 return ret;
554 child->dont_delete = true;
555 child->busy = true;
557 *pchild = child;
558 return 0;
561 struct aio_fork_pread_state {
562 struct aio_child *child;
563 ssize_t ret;
564 int err;
567 static void aio_fork_pread_done(struct tevent_req *subreq);
569 static struct tevent_req *aio_fork_pread_send(struct vfs_handle_struct *handle,
570 TALLOC_CTX *mem_ctx,
571 struct tevent_context *ev,
572 struct files_struct *fsp,
573 void *data,
574 size_t n, off_t offset)
576 struct tevent_req *req, *subreq;
577 struct aio_fork_pread_state *state;
578 struct rw_cmd cmd;
579 ssize_t written;
580 int err;
582 req = tevent_req_create(mem_ctx, &state, struct aio_fork_pread_state);
583 if (req == NULL) {
584 return NULL;
587 if (n > 128*1024) {
588 /* TODO: support variable buffers */
589 tevent_req_error(req, EINVAL);
590 return tevent_req_post(req, ev);
593 err = get_idle_child(handle, &state->child);
594 if (err != 0) {
595 tevent_req_error(req, err);
596 return tevent_req_post(req, ev);
599 ZERO_STRUCT(cmd);
600 cmd.n = n;
601 cmd.offset = offset;
602 cmd.cmd = READ_CMD;
604 DEBUG(10, ("sending fd %d to child %d\n", fsp->fh->fd,
605 (int)state->child->pid));
608 * Not making this async. We're writing into an empty unix
609 * domain socket. This should never block.
611 written = write_fd(state->child->sockfd, &cmd, sizeof(cmd),
612 fsp->fh->fd);
613 if (written == -1) {
614 err = errno;
616 TALLOC_FREE(state->child);
618 DEBUG(10, ("write_fd failed: %s\n", strerror(err)));
619 tevent_req_error(req, err);
620 return tevent_req_post(req, ev);
623 subreq = read_packet_send(state, ev, state->child->sockfd,
624 sizeof(struct rw_ret), NULL, NULL);
625 if (tevent_req_nomem(subreq, req)) {
626 TALLOC_FREE(state->child); /* we sent sth down */
627 return tevent_req_post(req, ev);
629 tevent_req_set_callback(subreq, aio_fork_pread_done, req);
630 return req;
633 static void aio_fork_pread_done(struct tevent_req *subreq)
635 struct tevent_req *req = tevent_req_callback_data(
636 subreq, struct tevent_req);
637 struct aio_fork_pread_state *state = tevent_req_data(
638 req, struct aio_fork_pread_state);
639 ssize_t nread;
640 uint8_t *buf;
641 int err;
642 struct rw_ret *retbuf;
644 nread = read_packet_recv(subreq, talloc_tos(), &buf, &err);
645 TALLOC_FREE(subreq);
646 if (nread == -1) {
647 TALLOC_FREE(state->child);
648 tevent_req_error(req, err);
649 return;
652 state->child->busy = false;
654 retbuf = (struct rw_ret *)buf;
655 state->ret = retbuf->size;
656 state->err = retbuf->ret_errno;
657 tevent_req_done(req);
660 static ssize_t aio_fork_pread_recv(struct tevent_req *req, int *err)
662 struct aio_fork_pread_state *state = tevent_req_data(
663 req, struct aio_fork_pread_state);
665 if (tevent_req_is_unix_error(req, err)) {
666 return -1;
668 if (state->ret == -1) {
669 *err = state->err;
671 return state->ret;
674 struct aio_fork_pwrite_state {
675 struct aio_child *child;
676 ssize_t ret;
677 int err;
680 static void aio_fork_pwrite_done(struct tevent_req *subreq);
682 static struct tevent_req *aio_fork_pwrite_send(
683 struct vfs_handle_struct *handle, TALLOC_CTX *mem_ctx,
684 struct tevent_context *ev, struct files_struct *fsp,
685 const void *data, size_t n, off_t offset)
687 struct tevent_req *req, *subreq;
688 struct aio_fork_pwrite_state *state;
689 struct rw_cmd cmd;
690 ssize_t written;
691 int err;
693 req = tevent_req_create(mem_ctx, &state, struct aio_fork_pwrite_state);
694 if (req == NULL) {
695 return NULL;
698 if (n > 128*1024) {
699 /* TODO: support variable buffers */
700 tevent_req_error(req, EINVAL);
701 return tevent_req_post(req, ev);
704 err = get_idle_child(handle, &state->child);
705 if (err != 0) {
706 tevent_req_error(req, err);
707 return tevent_req_post(req, ev);
710 ZERO_STRUCT(cmd);
711 cmd.n = n;
712 cmd.offset = offset;
713 cmd.cmd = WRITE_CMD;
715 DEBUG(10, ("sending fd %d to child %d\n", fsp->fh->fd,
716 (int)state->child->pid));
719 * Not making this async. We're writing into an empty unix
720 * domain socket. This should never block.
722 written = write_fd(state->child->sockfd, &cmd, sizeof(cmd),
723 fsp->fh->fd);
724 if (written == -1) {
725 err = errno;
727 TALLOC_FREE(state->child);
729 DEBUG(10, ("write_fd failed: %s\n", strerror(err)));
730 tevent_req_error(req, err);
731 return tevent_req_post(req, ev);
734 subreq = read_packet_send(state, ev, state->child->sockfd,
735 sizeof(struct rw_ret), NULL, NULL);
736 if (tevent_req_nomem(subreq, req)) {
737 TALLOC_FREE(state->child); /* we sent sth down */
738 return tevent_req_post(req, ev);
740 tevent_req_set_callback(subreq, aio_fork_pwrite_done, req);
741 return req;
744 static void aio_fork_pwrite_done(struct tevent_req *subreq)
746 struct tevent_req *req = tevent_req_callback_data(
747 subreq, struct tevent_req);
748 struct aio_fork_pwrite_state *state = tevent_req_data(
749 req, struct aio_fork_pwrite_state);
750 ssize_t nread;
751 uint8_t *buf;
752 int err;
753 struct rw_ret *retbuf;
755 nread = read_packet_recv(subreq, talloc_tos(), &buf, &err);
756 TALLOC_FREE(subreq);
757 if (nread == -1) {
758 TALLOC_FREE(state->child);
759 tevent_req_error(req, err);
760 return;
763 state->child->busy = false;
765 retbuf = (struct rw_ret *)buf;
766 state->ret = retbuf->size;
767 state->err = retbuf->ret_errno;
768 tevent_req_done(req);
771 static ssize_t aio_fork_pwrite_recv(struct tevent_req *req, int *err)
773 struct aio_fork_pwrite_state *state = tevent_req_data(
774 req, struct aio_fork_pwrite_state);
776 if (tevent_req_is_unix_error(req, err)) {
777 return -1;
779 if (state->ret == -1) {
780 *err = state->err;
782 return state->ret;
785 static int aio_fork_connect(vfs_handle_struct *handle, const char *service,
786 const char *user)
788 /*********************************************************************
789 * How many threads to initialize ?
790 * 100 per process seems insane as a default until you realize that
791 * (a) Threads terminate after 1 second when idle.
792 * (b) Throttling is done in SMB2 via the crediting algorithm.
793 * (c) SMB1 clients are limited to max_mux (50) outstanding
794 * requests and Windows clients don't use this anyway.
795 * Essentially we want this to be unlimited unless smb.conf
796 * says different.
797 *********************************************************************/
798 aio_pending_size = 100;
799 return SMB_VFS_NEXT_CONNECT(handle, service, user);
802 static struct vfs_fn_pointers vfs_aio_fork_fns = {
803 .connect_fn = aio_fork_connect,
804 .pread_send_fn = aio_fork_pread_send,
805 .pread_recv_fn = aio_fork_pread_recv,
806 .pwrite_send_fn = aio_fork_pwrite_send,
807 .pwrite_recv_fn = aio_fork_pwrite_recv,
810 NTSTATUS vfs_aio_fork_init(void);
811 NTSTATUS vfs_aio_fork_init(void)
813 return smb_register_vfs(SMB_VFS_INTERFACE_VERSION,
814 "aio_fork", &vfs_aio_fork_fns);