s3: Lift the server_messaging_context from send_spoolss_notify2_msg
[Samba/gbeck.git] / source3 / modules / vfs_aio_fork.c
blob02b1394216cef6964f07e5eb8a51a28471e11f12
1 /*
2 * Simulate the Posix AIO using mmap/fork
4 * Copyright (C) Volker Lendecke 2008
5 * Copyright (C) Jeremy Allison 2010
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include "includes.h"
24 struct mmap_area {
25 size_t size;
26 volatile void *ptr;
29 static int mmap_area_destructor(struct mmap_area *area)
31 munmap((void *)area->ptr, area->size);
32 return 0;
35 static struct mmap_area *mmap_area_init(TALLOC_CTX *mem_ctx, size_t size)
37 struct mmap_area *result;
38 int fd;
40 result = talloc(mem_ctx, struct mmap_area);
41 if (result == NULL) {
42 DEBUG(0, ("talloc failed\n"));
43 goto fail;
46 fd = open("/dev/zero", O_RDWR);
47 if (fd == -1) {
48 DEBUG(3, ("open(\"/dev/zero\") failed: %s\n",
49 strerror(errno)));
50 goto fail;
53 result->ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
54 MAP_SHARED|MAP_FILE, fd, 0);
55 if (result->ptr == MAP_FAILED) {
56 DEBUG(1, ("mmap failed: %s\n", strerror(errno)));
57 goto fail;
60 close(fd);
62 result->size = size;
63 talloc_set_destructor(result, mmap_area_destructor);
65 return result;
67 fail:
68 TALLOC_FREE(result);
69 return NULL;
72 struct rw_cmd {
73 size_t n;
74 SMB_OFF_T offset;
75 bool read_cmd;
78 struct rw_ret {
79 ssize_t size;
80 int ret_errno;
83 struct aio_child_list;
85 struct aio_child {
86 struct aio_child *prev, *next;
87 struct aio_child_list *list;
88 SMB_STRUCT_AIOCB *aiocb;
89 pid_t pid;
90 int sockfd;
91 struct fd_event *sock_event;
92 struct rw_ret retval;
93 struct mmap_area *map; /* ==NULL means write request */
94 bool dont_delete; /* Marked as in use since last cleanup */
95 bool cancelled;
96 bool read_cmd;
99 struct aio_child_list {
100 struct aio_child *children;
101 struct timed_event *cleanup_event;
104 static void free_aio_children(void **p)
106 TALLOC_FREE(*p);
109 static ssize_t read_fd(int fd, void *ptr, size_t nbytes, int *recvfd)
111 struct msghdr msg;
112 struct iovec iov[1];
113 ssize_t n;
114 #ifndef HAVE_MSGHDR_MSG_CONTROL
115 int newfd;
116 #endif
118 #ifdef HAVE_MSGHDR_MSG_CONTROL
119 union {
120 struct cmsghdr cm;
121 char control[CMSG_SPACE(sizeof(int))];
122 } control_un;
123 struct cmsghdr *cmptr;
125 msg.msg_control = control_un.control;
126 msg.msg_controllen = sizeof(control_un.control);
127 #else
128 #if HAVE_MSGHDR_MSG_ACCTRIGHTS
129 msg.msg_accrights = (caddr_t) &newfd;
130 msg.msg_accrightslen = sizeof(int);
131 #else
132 #error Can not pass file descriptors
133 #endif
134 #endif
136 msg.msg_name = NULL;
137 msg.msg_namelen = 0;
139 iov[0].iov_base = (void *)ptr;
140 iov[0].iov_len = nbytes;
141 msg.msg_iov = iov;
142 msg.msg_iovlen = 1;
144 if ( (n = recvmsg(fd, &msg, 0)) <= 0) {
145 return(n);
148 #ifdef HAVE_MSGHDR_MSG_CONTROL
149 if ((cmptr = CMSG_FIRSTHDR(&msg)) != NULL
150 && cmptr->cmsg_len == CMSG_LEN(sizeof(int))) {
151 if (cmptr->cmsg_level != SOL_SOCKET) {
152 DEBUG(10, ("control level != SOL_SOCKET"));
153 errno = EINVAL;
154 return -1;
156 if (cmptr->cmsg_type != SCM_RIGHTS) {
157 DEBUG(10, ("control type != SCM_RIGHTS"));
158 errno = EINVAL;
159 return -1;
161 *recvfd = *((int *) CMSG_DATA(cmptr));
162 } else {
163 *recvfd = -1; /* descriptor was not passed */
165 #else
166 if (msg.msg_accrightslen == sizeof(int)) {
167 *recvfd = newfd;
169 else {
170 *recvfd = -1; /* descriptor was not passed */
172 #endif
174 return(n);
177 static ssize_t write_fd(int fd, void *ptr, size_t nbytes, int sendfd)
179 struct msghdr msg;
180 struct iovec iov[1];
182 #ifdef HAVE_MSGHDR_MSG_CONTROL
183 union {
184 struct cmsghdr cm;
185 char control[CMSG_SPACE(sizeof(int))];
186 } control_un;
187 struct cmsghdr *cmptr;
189 ZERO_STRUCT(msg);
190 ZERO_STRUCT(control_un);
192 msg.msg_control = control_un.control;
193 msg.msg_controllen = sizeof(control_un.control);
195 cmptr = CMSG_FIRSTHDR(&msg);
196 cmptr->cmsg_len = CMSG_LEN(sizeof(int));
197 cmptr->cmsg_level = SOL_SOCKET;
198 cmptr->cmsg_type = SCM_RIGHTS;
199 *((int *) CMSG_DATA(cmptr)) = sendfd;
200 #else
201 ZERO_STRUCT(msg);
202 msg.msg_accrights = (caddr_t) &sendfd;
203 msg.msg_accrightslen = sizeof(int);
204 #endif
206 msg.msg_name = NULL;
207 msg.msg_namelen = 0;
209 ZERO_STRUCT(iov);
210 iov[0].iov_base = (void *)ptr;
211 iov[0].iov_len = nbytes;
212 msg.msg_iov = iov;
213 msg.msg_iovlen = 1;
215 return (sendmsg(fd, &msg, 0));
218 static void aio_child_cleanup(struct event_context *event_ctx,
219 struct timed_event *te,
220 struct timeval now,
221 void *private_data)
223 struct aio_child_list *list = talloc_get_type_abort(
224 private_data, struct aio_child_list);
225 struct aio_child *child, *next;
227 TALLOC_FREE(list->cleanup_event);
229 for (child = list->children; child != NULL; child = next) {
230 next = child->next;
232 if (child->aiocb != NULL) {
233 DEBUG(10, ("child %d currently active\n",
234 (int)child->pid));
235 continue;
238 if (child->dont_delete) {
239 DEBUG(10, ("Child %d was active since last cleanup\n",
240 (int)child->pid));
241 child->dont_delete = false;
242 continue;
245 DEBUG(10, ("Child %d idle for more than 30 seconds, "
246 "deleting\n", (int)child->pid));
248 TALLOC_FREE(child);
251 if (list->children != NULL) {
253 * Re-schedule the next cleanup round
255 list->cleanup_event = event_add_timed(smbd_event_context(), list,
256 timeval_add(&now, 30, 0),
257 aio_child_cleanup, list);
262 static struct aio_child_list *init_aio_children(struct vfs_handle_struct *handle)
264 struct aio_child_list *data = NULL;
266 if (SMB_VFS_HANDLE_TEST_DATA(handle)) {
267 SMB_VFS_HANDLE_GET_DATA(handle, data, struct aio_child_list,
268 return NULL);
271 if (data == NULL) {
272 data = TALLOC_ZERO_P(NULL, struct aio_child_list);
273 if (data == NULL) {
274 return NULL;
279 * Regardless of whether the child_list had been around or not, make
280 * sure that we have a cleanup timed event. This timed event will
281 * delete itself when it finds that no children are around anymore.
284 if (data->cleanup_event == NULL) {
285 data->cleanup_event = event_add_timed(smbd_event_context(), data,
286 timeval_current_ofs(30, 0),
287 aio_child_cleanup, data);
288 if (data->cleanup_event == NULL) {
289 TALLOC_FREE(data);
290 return NULL;
294 if (!SMB_VFS_HANDLE_TEST_DATA(handle)) {
295 SMB_VFS_HANDLE_SET_DATA(handle, data, free_aio_children,
296 struct aio_child_list, return False);
299 return data;
302 static void aio_child_loop(int sockfd, struct mmap_area *map)
304 while (true) {
305 int fd = -1;
306 ssize_t ret;
307 struct rw_cmd cmd_struct;
308 struct rw_ret ret_struct;
310 ret = read_fd(sockfd, &cmd_struct, sizeof(cmd_struct), &fd);
311 if (ret != sizeof(cmd_struct)) {
312 DEBUG(10, ("read_fd returned %d: %s\n", (int)ret,
313 strerror(errno)));
314 exit(1);
317 DEBUG(10, ("aio_child_loop: %s %d bytes at %d from fd %d\n",
318 cmd_struct.read_cmd ? "read" : "write",
319 (int)cmd_struct.n, (int)cmd_struct.offset, fd));
321 #ifdef ENABLE_BUILD_FARM_HACKS
324 * In the build farm, we want erratic behaviour for
325 * async I/O times
327 uint8_t randval;
328 unsigned msecs;
330 * use generate_random_buffer, we just forked from a
331 * common parent state
333 generate_random_buffer(&randval, sizeof(randval));
334 msecs = randval + 20;
335 DEBUG(10, ("delaying for %u msecs\n", msecs));
336 smb_msleep(msecs);
338 #endif
341 ZERO_STRUCT(ret_struct);
343 if (cmd_struct.read_cmd) {
344 ret_struct.size = sys_pread(
345 fd, (void *)map->ptr, cmd_struct.n,
346 cmd_struct.offset);
347 #if 0
348 /* This breaks "make test" when run with aio_fork module. */
349 #ifdef ENABLE_BUILD_FARM_HACKS
350 ret_struct.size = MAX(1, ret_struct.size * 0.9);
351 #endif
352 #endif
354 else {
355 ret_struct.size = sys_pwrite(
356 fd, (void *)map->ptr, cmd_struct.n,
357 cmd_struct.offset);
360 DEBUG(10, ("aio_child_loop: syscall returned %d\n",
361 (int)ret_struct.size));
363 if (ret_struct.size == -1) {
364 ret_struct.ret_errno = errno;
368 * Close the fd before telling our parent we're done. The
369 * parent might close and re-open the file very quickly, and
370 * with system-level share modes (GPFS) we would get an
371 * unjustified SHARING_VIOLATION.
373 close(fd);
375 ret = write_data(sockfd, (char *)&ret_struct,
376 sizeof(ret_struct));
377 if (ret != sizeof(ret_struct)) {
378 DEBUG(10, ("could not write ret_struct: %s\n",
379 strerror(errno)));
380 exit(2);
385 static void handle_aio_completion(struct event_context *event_ctx,
386 struct fd_event *event, uint16 flags,
387 void *p)
389 struct aio_extra *aio_ex = NULL;
390 struct aio_child *child = (struct aio_child *)p;
392 DEBUG(10, ("handle_aio_completion called with flags=%d\n", flags));
394 if ((flags & EVENT_FD_READ) == 0) {
395 return;
398 if (!NT_STATUS_IS_OK(read_data(child->sockfd,
399 (char *)&child->retval,
400 sizeof(child->retval)))) {
401 DEBUG(0, ("aio child %d died\n", (int)child->pid));
402 child->retval.size = -1;
403 child->retval.ret_errno = EIO;
406 if (child->cancelled) {
407 child->aiocb = NULL;
408 child->cancelled = false;
409 return;
412 if (child->read_cmd && (child->retval.size > 0)) {
413 SMB_ASSERT(child->retval.size <= child->aiocb->aio_nbytes);
414 memcpy((void *)child->aiocb->aio_buf, (void *)child->map->ptr,
415 child->retval.size);
418 aio_ex = (struct aio_extra *)child->aiocb->aio_sigevent.sigev_value.sival_ptr;
419 smbd_aio_complete_aio_ex(aio_ex);
422 static int aio_child_destructor(struct aio_child *child)
424 SMB_ASSERT((child->aiocb == NULL) || child->cancelled);
425 close(child->sockfd);
426 DLIST_REMOVE(child->list->children, child);
427 return 0;
431 * We have to close all fd's in open files, we might incorrectly hold a system
432 * level share mode on a file.
435 static struct files_struct *close_fsp_fd(struct files_struct *fsp,
436 void *private_data)
438 if ((fsp->fh != NULL) && (fsp->fh->fd != -1)) {
439 close(fsp->fh->fd);
440 fsp->fh->fd = -1;
442 return NULL;
445 static NTSTATUS create_aio_child(struct aio_child_list *children,
446 size_t map_size,
447 struct aio_child **presult)
449 struct aio_child *result;
450 int fdpair[2];
451 NTSTATUS status;
453 fdpair[0] = fdpair[1] = -1;
455 result = TALLOC_ZERO_P(children, struct aio_child);
456 NT_STATUS_HAVE_NO_MEMORY(result);
458 if (socketpair(AF_UNIX, SOCK_STREAM, 0, fdpair) == -1) {
459 status = map_nt_error_from_unix(errno);
460 DEBUG(10, ("socketpair() failed: %s\n", strerror(errno)));
461 goto fail;
464 DEBUG(10, ("fdpair = %d/%d\n", fdpair[0], fdpair[1]));
466 result->map = mmap_area_init(result, map_size);
467 if (result->map == NULL) {
468 status = map_nt_error_from_unix(errno);
469 DEBUG(0, ("Could not create mmap area\n"));
470 goto fail;
473 result->pid = sys_fork();
474 if (result->pid == -1) {
475 status = map_nt_error_from_unix(errno);
476 DEBUG(0, ("fork failed: %s\n", strerror(errno)));
477 goto fail;
480 if (result->pid == 0) {
481 close(fdpair[0]);
482 result->sockfd = fdpair[1];
483 files_forall(close_fsp_fd, NULL);
484 aio_child_loop(result->sockfd, result->map);
487 DEBUG(10, ("Child %d created\n", result->pid));
489 result->sockfd = fdpair[0];
490 close(fdpair[1]);
492 result->sock_event = event_add_fd(smbd_event_context(), result,
493 result->sockfd, EVENT_FD_READ,
494 handle_aio_completion,
495 result);
496 if (result->sock_event == NULL) {
497 status = NT_STATUS_NO_MEMORY;
498 DEBUG(0, ("event_add_fd failed\n"));
499 goto fail;
502 result->list = children;
503 DLIST_ADD(children->children, result);
505 talloc_set_destructor(result, aio_child_destructor);
507 *presult = result;
509 return NT_STATUS_OK;
511 fail:
512 if (fdpair[0] != -1) close(fdpair[0]);
513 if (fdpair[1] != -1) close(fdpair[1]);
514 TALLOC_FREE(result);
516 return status;
519 static NTSTATUS get_idle_child(struct vfs_handle_struct *handle,
520 struct aio_child **pchild)
522 struct aio_child_list *children;
523 struct aio_child *child;
524 NTSTATUS status;
526 children = init_aio_children(handle);
527 if (children == NULL) {
528 return NT_STATUS_NO_MEMORY;
531 for (child = children->children; child != NULL; child = child->next) {
532 if (child->aiocb == NULL) {
533 /* idle */
534 break;
538 if (child == NULL) {
539 DEBUG(10, ("no idle child found, creating new one\n"));
541 status = create_aio_child(children, 128*1024, &child);
542 if (!NT_STATUS_IS_OK(status)) {
543 DEBUG(10, ("create_aio_child failed: %s\n",
544 nt_errstr(status)));
545 return status;
549 child->dont_delete = true;
551 *pchild = child;
552 return NT_STATUS_OK;
555 static int aio_fork_read(struct vfs_handle_struct *handle,
556 struct files_struct *fsp, SMB_STRUCT_AIOCB *aiocb)
558 struct aio_child *child;
559 struct rw_cmd cmd;
560 ssize_t ret;
561 NTSTATUS status;
563 if (aiocb->aio_nbytes > 128*1024) {
564 /* TODO: support variable buffers */
565 errno = EINVAL;
566 return -1;
569 status = get_idle_child(handle, &child);
570 if (!NT_STATUS_IS_OK(status)) {
571 DEBUG(10, ("Could not get an idle child\n"));
572 return -1;
575 child->read_cmd = true;
576 child->aiocb = aiocb;
577 child->retval.ret_errno = EINPROGRESS;
579 ZERO_STRUCT(cmd);
580 cmd.n = aiocb->aio_nbytes;
581 cmd.offset = aiocb->aio_offset;
582 cmd.read_cmd = child->read_cmd;
584 DEBUG(10, ("sending fd %d to child %d\n", fsp->fh->fd,
585 (int)child->pid));
587 ret = write_fd(child->sockfd, &cmd, sizeof(cmd), fsp->fh->fd);
588 if (ret == -1) {
589 DEBUG(10, ("write_fd failed: %s\n", strerror(errno)));
590 return -1;
593 return 0;
596 static int aio_fork_write(struct vfs_handle_struct *handle,
597 struct files_struct *fsp, SMB_STRUCT_AIOCB *aiocb)
599 struct aio_child *child;
600 struct rw_cmd cmd;
601 ssize_t ret;
602 NTSTATUS status;
604 if (aiocb->aio_nbytes > 128*1024) {
605 /* TODO: support variable buffers */
606 errno = EINVAL;
607 return -1;
610 status = get_idle_child(handle, &child);
611 if (!NT_STATUS_IS_OK(status)) {
612 DEBUG(10, ("Could not get an idle child\n"));
613 return -1;
616 child->read_cmd = false;
617 child->aiocb = aiocb;
618 child->retval.ret_errno = EINPROGRESS;
620 memcpy((void *)child->map->ptr, (void *)aiocb->aio_buf,
621 aiocb->aio_nbytes);
623 ZERO_STRUCT(cmd);
624 cmd.n = aiocb->aio_nbytes;
625 cmd.offset = aiocb->aio_offset;
626 cmd.read_cmd = child->read_cmd;
628 DEBUG(10, ("sending fd %d to child %d\n", fsp->fh->fd,
629 (int)child->pid));
631 ret = write_fd(child->sockfd, &cmd, sizeof(cmd), fsp->fh->fd);
632 if (ret == -1) {
633 DEBUG(10, ("write_fd failed: %s\n", strerror(errno)));
634 return -1;
637 return 0;
640 static struct aio_child *aio_fork_find_child(struct vfs_handle_struct *handle,
641 SMB_STRUCT_AIOCB *aiocb)
643 struct aio_child_list *children;
644 struct aio_child *child;
646 children = init_aio_children(handle);
647 if (children == NULL) {
648 return NULL;
651 for (child = children->children; child != NULL; child = child->next) {
652 if (child->aiocb == aiocb) {
653 return child;
657 return NULL;
660 static ssize_t aio_fork_return_fn(struct vfs_handle_struct *handle,
661 struct files_struct *fsp,
662 SMB_STRUCT_AIOCB *aiocb)
664 struct aio_child *child = aio_fork_find_child(handle, aiocb);
666 if (child == NULL) {
667 errno = EINVAL;
668 DEBUG(0, ("returning EINVAL\n"));
669 return -1;
672 child->aiocb = NULL;
674 if (child->retval.size == -1) {
675 errno = child->retval.ret_errno;
678 return child->retval.size;
681 static int aio_fork_cancel(struct vfs_handle_struct *handle,
682 struct files_struct *fsp,
683 SMB_STRUCT_AIOCB *aiocb)
685 struct aio_child_list *children;
686 struct aio_child *child;
688 children = init_aio_children(handle);
689 if (children == NULL) {
690 errno = EINVAL;
691 return -1;
694 for (child = children->children; child != NULL; child = child->next) {
695 if (child->aiocb == NULL) {
696 continue;
698 if (child->aiocb->aio_fildes != fsp->fh->fd) {
699 continue;
701 if ((aiocb != NULL) && (child->aiocb != aiocb)) {
702 continue;
706 * We let the child do its job, but we discard the result when
707 * it's finished.
710 child->cancelled = true;
713 return AIO_CANCELED;
716 static int aio_fork_error_fn(struct vfs_handle_struct *handle,
717 struct files_struct *fsp,
718 SMB_STRUCT_AIOCB *aiocb)
720 struct aio_child *child = aio_fork_find_child(handle, aiocb);
722 if (child == NULL) {
723 errno = EINVAL;
724 return -1;
727 return child->retval.ret_errno;
730 static void aio_fork_suspend_timed_out(struct tevent_context *event_ctx,
731 struct tevent_timer *te,
732 struct timeval now,
733 void *private_data)
735 bool *timed_out = (bool *)private_data;
736 /* Remove this timed event handler. */
737 TALLOC_FREE(te);
738 *timed_out = true;
741 static int aio_fork_suspend(struct vfs_handle_struct *handle,
742 struct files_struct *fsp,
743 const SMB_STRUCT_AIOCB * const aiocb_array[],
744 int n,
745 const struct timespec *timeout)
747 struct aio_child_list *children = NULL;
748 TALLOC_CTX *frame = talloc_stackframe();
749 struct event_context *ev = NULL;
750 int i;
751 int ret = -1;
752 bool timed_out = false;
754 children = init_aio_children(handle);
755 if (children == NULL) {
756 errno = EINVAL;
757 goto out;
760 /* This is a blocking call, and has to use a sub-event loop. */
761 ev = event_context_init(frame);
762 if (ev == NULL) {
763 errno = ENOMEM;
764 goto out;
767 if (timeout) {
768 struct timeval tv = convert_timespec_to_timeval(*timeout);
769 struct tevent_timer *te = tevent_add_timer(ev,
770 frame,
771 timeval_current_ofs(tv.tv_sec,
772 tv.tv_usec),
773 aio_fork_suspend_timed_out,
774 &timed_out);
775 if (!te) {
776 errno = ENOMEM;
777 goto out;
781 for (i = 0; i < n; i++) {
782 struct aio_child *child = NULL;
783 const SMB_STRUCT_AIOCB *aiocb = aiocb_array[i];
785 if (!aiocb) {
786 continue;
790 * We're going to cheat here. We know that smbd/aio.c
791 * only calls this when it's waiting for every single
792 * outstanding call to finish on a close, so just wait
793 * individually for each IO to complete. We don't care
794 * what order they finish - only that they all do. JRA.
797 for (child = children->children; child != NULL; child = child->next) {
798 if (child->aiocb == NULL) {
799 continue;
801 if (child->aiocb->aio_fildes != fsp->fh->fd) {
802 continue;
804 if (child->aiocb != aiocb) {
805 continue;
808 if (child->aiocb->aio_sigevent.sigev_value.sival_ptr == NULL) {
809 continue;
812 /* We're never using this event on the
813 * main event context again... */
814 TALLOC_FREE(child->sock_event);
816 child->sock_event = event_add_fd(ev,
817 child,
818 child->sockfd,
819 EVENT_FD_READ,
820 handle_aio_completion,
821 child);
823 while (1) {
824 if (tevent_loop_once(ev) == -1) {
825 goto out;
828 if (timed_out) {
829 errno = EAGAIN;
830 goto out;
833 /* We set child->aiocb to NULL in our hooked
834 * AIO_RETURN(). */
835 if (child->aiocb == NULL) {
836 break;
842 ret = 0;
844 out:
846 TALLOC_FREE(frame);
847 return ret;
850 static struct vfs_fn_pointers vfs_aio_fork_fns = {
851 .aio_read = aio_fork_read,
852 .aio_write = aio_fork_write,
853 .aio_return_fn = aio_fork_return_fn,
854 .aio_cancel = aio_fork_cancel,
855 .aio_error_fn = aio_fork_error_fn,
856 .aio_suspend = aio_fork_suspend,
859 NTSTATUS vfs_aio_fork_init(void);
860 NTSTATUS vfs_aio_fork_init(void)
862 return smb_register_vfs(SMB_VFS_INTERFACE_VERSION,
863 "aio_fork", &vfs_aio_fork_fns);