s3:smbd: change blocking.c to use fsp_fnum_dbg() for fsp->fnum logging.
[Samba/gebeck_regimport.git] / source3 / modules / vfs_aio_fork.c
blob4be21f7d97f539f48548418c5f5892fe72b0495f
1 /*
2 * Simulate the Posix AIO using mmap/fork
4 * Copyright (C) Volker Lendecke 2008
5 * Copyright (C) Jeremy Allison 2010
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include "includes.h"
23 #include "system/filesys.h"
24 #include "system/shmem.h"
25 #include "smbd/smbd.h"
26 #include "smbd/globals.h"
28 #ifndef MAP_FILE
29 #define MAP_FILE 0
30 #endif
32 struct mmap_area {
33 size_t size;
34 volatile void *ptr;
37 static int mmap_area_destructor(struct mmap_area *area)
39 munmap((void *)area->ptr, area->size);
40 return 0;
43 static struct mmap_area *mmap_area_init(TALLOC_CTX *mem_ctx, size_t size)
45 struct mmap_area *result;
46 int fd;
48 result = talloc(mem_ctx, struct mmap_area);
49 if (result == NULL) {
50 DEBUG(0, ("talloc failed\n"));
51 goto fail;
54 fd = open("/dev/zero", O_RDWR);
55 if (fd == -1) {
56 DEBUG(3, ("open(\"/dev/zero\") failed: %s\n",
57 strerror(errno)));
58 goto fail;
61 result->ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
62 MAP_SHARED|MAP_FILE, fd, 0);
63 if (result->ptr == MAP_FAILED) {
64 DEBUG(1, ("mmap failed: %s\n", strerror(errno)));
65 goto fail;
68 close(fd);
70 result->size = size;
71 talloc_set_destructor(result, mmap_area_destructor);
73 return result;
75 fail:
76 TALLOC_FREE(result);
77 return NULL;
80 struct rw_cmd {
81 size_t n;
82 off_t offset;
83 bool read_cmd;
86 struct rw_ret {
87 ssize_t size;
88 int ret_errno;
91 struct aio_child_list;
93 struct aio_child {
94 struct aio_child *prev, *next;
95 struct aio_child_list *list;
96 SMB_STRUCT_AIOCB *aiocb;
97 pid_t pid;
98 int sockfd;
99 struct fd_event *sock_event;
100 struct rw_ret retval;
101 struct mmap_area *map; /* ==NULL means write request */
102 bool dont_delete; /* Marked as in use since last cleanup */
103 bool cancelled;
104 bool read_cmd;
105 bool called_from_suspend;
106 bool completion_done;
109 struct aio_child_list {
110 struct aio_child *children;
111 struct timed_event *cleanup_event;
114 static void free_aio_children(void **p)
116 TALLOC_FREE(*p);
119 static ssize_t read_fd(int fd, void *ptr, size_t nbytes, int *recvfd)
121 struct msghdr msg;
122 struct iovec iov[1];
123 ssize_t n;
124 #ifndef HAVE_MSGHDR_MSG_CONTROL
125 int newfd;
126 #endif
128 #ifdef HAVE_MSGHDR_MSG_CONTROL
129 union {
130 struct cmsghdr cm;
131 char control[CMSG_SPACE(sizeof(int))];
132 } control_un;
133 struct cmsghdr *cmptr;
135 msg.msg_control = control_un.control;
136 msg.msg_controllen = sizeof(control_un.control);
137 #else
138 #if HAVE_MSGHDR_MSG_ACCTRIGHTS
139 msg.msg_accrights = (caddr_t) &newfd;
140 msg.msg_accrightslen = sizeof(int);
141 #else
142 #error Can not pass file descriptors
143 #endif
144 #endif
146 msg.msg_name = NULL;
147 msg.msg_namelen = 0;
148 msg.msg_flags = 0;
150 iov[0].iov_base = (void *)ptr;
151 iov[0].iov_len = nbytes;
152 msg.msg_iov = iov;
153 msg.msg_iovlen = 1;
155 if ( (n = recvmsg(fd, &msg, 0)) <= 0) {
156 return(n);
159 #ifdef HAVE_MSGHDR_MSG_CONTROL
160 if ((cmptr = CMSG_FIRSTHDR(&msg)) != NULL
161 && cmptr->cmsg_len == CMSG_LEN(sizeof(int))) {
162 if (cmptr->cmsg_level != SOL_SOCKET) {
163 DEBUG(10, ("control level != SOL_SOCKET"));
164 errno = EINVAL;
165 return -1;
167 if (cmptr->cmsg_type != SCM_RIGHTS) {
168 DEBUG(10, ("control type != SCM_RIGHTS"));
169 errno = EINVAL;
170 return -1;
172 memcpy(recvfd, CMSG_DATA(cmptr), sizeof(*recvfd));
173 } else {
174 *recvfd = -1; /* descriptor was not passed */
176 #else
177 if (msg.msg_accrightslen == sizeof(int)) {
178 *recvfd = newfd;
180 else {
181 *recvfd = -1; /* descriptor was not passed */
183 #endif
185 return(n);
188 static ssize_t write_fd(int fd, void *ptr, size_t nbytes, int sendfd)
190 struct msghdr msg;
191 struct iovec iov[1];
193 #ifdef HAVE_MSGHDR_MSG_CONTROL
194 union {
195 struct cmsghdr cm;
196 char control[CMSG_SPACE(sizeof(int))];
197 } control_un;
198 struct cmsghdr *cmptr;
200 ZERO_STRUCT(msg);
201 ZERO_STRUCT(control_un);
203 msg.msg_control = control_un.control;
204 msg.msg_controllen = sizeof(control_un.control);
206 cmptr = CMSG_FIRSTHDR(&msg);
207 cmptr->cmsg_len = CMSG_LEN(sizeof(int));
208 cmptr->cmsg_level = SOL_SOCKET;
209 cmptr->cmsg_type = SCM_RIGHTS;
210 memcpy(CMSG_DATA(cmptr), &sendfd, sizeof(sendfd));
211 #else
212 ZERO_STRUCT(msg);
213 msg.msg_accrights = (caddr_t) &sendfd;
214 msg.msg_accrightslen = sizeof(int);
215 #endif
217 msg.msg_name = NULL;
218 msg.msg_namelen = 0;
220 ZERO_STRUCT(iov);
221 iov[0].iov_base = (void *)ptr;
222 iov[0].iov_len = nbytes;
223 msg.msg_iov = iov;
224 msg.msg_iovlen = 1;
226 return (sendmsg(fd, &msg, 0));
229 static void aio_child_cleanup(struct event_context *event_ctx,
230 struct timed_event *te,
231 struct timeval now,
232 void *private_data)
234 struct aio_child_list *list = talloc_get_type_abort(
235 private_data, struct aio_child_list);
236 struct aio_child *child, *next;
238 TALLOC_FREE(list->cleanup_event);
240 for (child = list->children; child != NULL; child = next) {
241 next = child->next;
243 if (child->aiocb != NULL) {
244 DEBUG(10, ("child %d currently active\n",
245 (int)child->pid));
246 continue;
249 if (child->dont_delete) {
250 DEBUG(10, ("Child %d was active since last cleanup\n",
251 (int)child->pid));
252 child->dont_delete = false;
253 continue;
256 DEBUG(10, ("Child %d idle for more than 30 seconds, "
257 "deleting\n", (int)child->pid));
259 TALLOC_FREE(child);
260 child = next;
263 if (list->children != NULL) {
265 * Re-schedule the next cleanup round
267 list->cleanup_event = event_add_timed(server_event_context(), list,
268 timeval_add(&now, 30, 0),
269 aio_child_cleanup, list);
274 static struct aio_child_list *init_aio_children(struct vfs_handle_struct *handle)
276 struct aio_child_list *data = NULL;
278 if (SMB_VFS_HANDLE_TEST_DATA(handle)) {
279 SMB_VFS_HANDLE_GET_DATA(handle, data, struct aio_child_list,
280 return NULL);
283 if (data == NULL) {
284 data = talloc_zero(NULL, struct aio_child_list);
285 if (data == NULL) {
286 return NULL;
291 * Regardless of whether the child_list had been around or not, make
292 * sure that we have a cleanup timed event. This timed event will
293 * delete itself when it finds that no children are around anymore.
296 if (data->cleanup_event == NULL) {
297 data->cleanup_event = event_add_timed(server_event_context(), data,
298 timeval_current_ofs(30, 0),
299 aio_child_cleanup, data);
300 if (data->cleanup_event == NULL) {
301 TALLOC_FREE(data);
302 return NULL;
306 if (!SMB_VFS_HANDLE_TEST_DATA(handle)) {
307 SMB_VFS_HANDLE_SET_DATA(handle, data, free_aio_children,
308 struct aio_child_list, return False);
311 return data;
314 static void aio_child_loop(int sockfd, struct mmap_area *map)
316 while (true) {
317 int fd = -1;
318 ssize_t ret;
319 struct rw_cmd cmd_struct;
320 struct rw_ret ret_struct;
322 ret = read_fd(sockfd, &cmd_struct, sizeof(cmd_struct), &fd);
323 if (ret != sizeof(cmd_struct)) {
324 DEBUG(10, ("read_fd returned %d: %s\n", (int)ret,
325 strerror(errno)));
326 exit(1);
329 DEBUG(10, ("aio_child_loop: %s %d bytes at %d from fd %d\n",
330 cmd_struct.read_cmd ? "read" : "write",
331 (int)cmd_struct.n, (int)cmd_struct.offset, fd));
333 #ifdef ENABLE_BUILD_FARM_HACKS
336 * In the build farm, we want erratic behaviour for
337 * async I/O times
339 uint8_t randval;
340 unsigned msecs;
342 * use generate_random_buffer, we just forked from a
343 * common parent state
345 generate_random_buffer(&randval, sizeof(randval));
346 msecs = randval + 20;
347 DEBUG(10, ("delaying for %u msecs\n", msecs));
348 smb_msleep(msecs);
350 #endif
353 ZERO_STRUCT(ret_struct);
355 if (cmd_struct.read_cmd) {
356 ret_struct.size = sys_pread(
357 fd, (void *)map->ptr, cmd_struct.n,
358 cmd_struct.offset);
359 #if 0
360 /* This breaks "make test" when run with aio_fork module. */
361 #ifdef ENABLE_BUILD_FARM_HACKS
362 ret_struct.size = MAX(1, ret_struct.size * 0.9);
363 #endif
364 #endif
366 else {
367 ret_struct.size = sys_pwrite(
368 fd, (void *)map->ptr, cmd_struct.n,
369 cmd_struct.offset);
372 DEBUG(10, ("aio_child_loop: syscall returned %d\n",
373 (int)ret_struct.size));
375 if (ret_struct.size == -1) {
376 ret_struct.ret_errno = errno;
380 * Close the fd before telling our parent we're done. The
381 * parent might close and re-open the file very quickly, and
382 * with system-level share modes (GPFS) we would get an
383 * unjustified SHARING_VIOLATION.
385 close(fd);
387 ret = write_data(sockfd, (char *)&ret_struct,
388 sizeof(ret_struct));
389 if (ret != sizeof(ret_struct)) {
390 DEBUG(10, ("could not write ret_struct: %s\n",
391 strerror(errno)));
392 exit(2);
397 static void handle_aio_completion(struct event_context *event_ctx,
398 struct fd_event *event, uint16 flags,
399 void *p)
401 struct aio_extra *aio_ex = NULL;
402 struct aio_child *child = (struct aio_child *)p;
403 NTSTATUS status;
405 DEBUG(10, ("handle_aio_completion called with flags=%d\n", flags));
407 if ((flags & EVENT_FD_READ) == 0) {
408 return;
411 status = read_data(child->sockfd, (char *)&child->retval,
412 sizeof(child->retval));
414 if (!NT_STATUS_IS_OK(status)) {
415 DEBUG(1, ("aio child %d died: %s\n", (int)child->pid,
416 nt_errstr(status)));
417 child->retval.size = -1;
418 child->retval.ret_errno = EIO;
421 if (child->aiocb == NULL) {
422 DEBUG(1, ("Inactive child died\n"));
423 TALLOC_FREE(child);
424 return;
427 if (child->cancelled) {
428 child->aiocb = NULL;
429 child->cancelled = false;
430 return;
433 if (child->read_cmd && (child->retval.size > 0)) {
434 SMB_ASSERT(child->retval.size <= child->aiocb->aio_nbytes);
435 memcpy((void *)child->aiocb->aio_buf, (void *)child->map->ptr,
436 child->retval.size);
439 if (child->called_from_suspend) {
440 child->completion_done = true;
441 return;
443 aio_ex = (struct aio_extra *)child->aiocb->aio_sigevent.sigev_value.sival_ptr;
444 smbd_aio_complete_aio_ex(aio_ex);
445 TALLOC_FREE(aio_ex);
448 static int aio_child_destructor(struct aio_child *child)
450 char c=0;
452 SMB_ASSERT((child->aiocb == NULL) || child->cancelled);
454 DEBUG(10, ("aio_child_destructor: removing child %d on fd %d\n",
455 child->pid, child->sockfd));
458 * closing the sockfd makes the child not return from recvmsg() on RHEL
459 * 5.5 so instead force the child to exit by writing bad data to it
461 write(child->sockfd, &c, sizeof(c));
462 close(child->sockfd);
463 DLIST_REMOVE(child->list->children, child);
464 return 0;
468 * We have to close all fd's in open files, we might incorrectly hold a system
469 * level share mode on a file.
472 static struct files_struct *close_fsp_fd(struct files_struct *fsp,
473 void *private_data)
475 if ((fsp->fh != NULL) && (fsp->fh->fd != -1)) {
476 close(fsp->fh->fd);
477 fsp->fh->fd = -1;
479 return NULL;
482 static NTSTATUS create_aio_child(struct smbd_server_connection *sconn,
483 struct aio_child_list *children,
484 size_t map_size,
485 struct aio_child **presult)
487 struct aio_child *result;
488 int fdpair[2];
489 NTSTATUS status;
491 fdpair[0] = fdpair[1] = -1;
493 result = talloc_zero(children, struct aio_child);
494 NT_STATUS_HAVE_NO_MEMORY(result);
496 if (socketpair(AF_UNIX, SOCK_STREAM, 0, fdpair) == -1) {
497 status = map_nt_error_from_unix(errno);
498 DEBUG(10, ("socketpair() failed: %s\n", strerror(errno)));
499 goto fail;
502 DEBUG(10, ("fdpair = %d/%d\n", fdpair[0], fdpair[1]));
504 result->map = mmap_area_init(result, map_size);
505 if (result->map == NULL) {
506 status = map_nt_error_from_unix(errno);
507 DEBUG(0, ("Could not create mmap area\n"));
508 goto fail;
511 result->pid = fork();
512 if (result->pid == -1) {
513 status = map_nt_error_from_unix(errno);
514 DEBUG(0, ("fork failed: %s\n", strerror(errno)));
515 goto fail;
518 if (result->pid == 0) {
519 close(fdpair[0]);
520 result->sockfd = fdpair[1];
521 files_forall(sconn, close_fsp_fd, NULL);
522 aio_child_loop(result->sockfd, result->map);
525 DEBUG(10, ("Child %d created with sockfd %d\n",
526 result->pid, fdpair[0]));
528 result->sockfd = fdpair[0];
529 close(fdpair[1]);
531 result->sock_event = event_add_fd(server_event_context(), result,
532 result->sockfd, EVENT_FD_READ,
533 handle_aio_completion,
534 result);
535 if (result->sock_event == NULL) {
536 status = NT_STATUS_NO_MEMORY;
537 DEBUG(0, ("event_add_fd failed\n"));
538 goto fail;
541 result->list = children;
542 DLIST_ADD(children->children, result);
544 talloc_set_destructor(result, aio_child_destructor);
546 *presult = result;
548 return NT_STATUS_OK;
550 fail:
551 if (fdpair[0] != -1) close(fdpair[0]);
552 if (fdpair[1] != -1) close(fdpair[1]);
553 TALLOC_FREE(result);
555 return status;
558 static NTSTATUS get_idle_child(struct vfs_handle_struct *handle,
559 struct aio_child **pchild)
561 struct aio_child_list *children;
562 struct aio_child *child;
563 NTSTATUS status;
565 children = init_aio_children(handle);
566 if (children == NULL) {
567 return NT_STATUS_NO_MEMORY;
570 for (child = children->children; child != NULL; child = child->next) {
571 if (child->aiocb == NULL) {
572 /* idle */
573 break;
577 if (child == NULL) {
578 DEBUG(10, ("no idle child found, creating new one\n"));
580 status = create_aio_child(handle->conn->sconn, children,
581 128*1024, &child);
582 if (!NT_STATUS_IS_OK(status)) {
583 DEBUG(10, ("create_aio_child failed: %s\n",
584 nt_errstr(status)));
585 return status;
589 child->dont_delete = true;
591 *pchild = child;
592 return NT_STATUS_OK;
595 static int aio_fork_read(struct vfs_handle_struct *handle,
596 struct files_struct *fsp, SMB_STRUCT_AIOCB *aiocb)
598 struct aio_child *child;
599 struct rw_cmd cmd;
600 ssize_t ret;
601 NTSTATUS status;
603 if (aiocb->aio_nbytes > 128*1024) {
604 /* TODO: support variable buffers */
605 errno = EINVAL;
606 return -1;
609 status = get_idle_child(handle, &child);
610 if (!NT_STATUS_IS_OK(status)) {
611 DEBUG(10, ("Could not get an idle child\n"));
612 return -1;
615 child->read_cmd = true;
616 child->aiocb = aiocb;
617 child->retval.ret_errno = EINPROGRESS;
619 ZERO_STRUCT(cmd);
620 cmd.n = aiocb->aio_nbytes;
621 cmd.offset = aiocb->aio_offset;
622 cmd.read_cmd = child->read_cmd;
624 DEBUG(10, ("sending fd %d to child %d\n", fsp->fh->fd,
625 (int)child->pid));
627 ret = write_fd(child->sockfd, &cmd, sizeof(cmd), fsp->fh->fd);
628 if (ret == -1) {
629 DEBUG(10, ("write_fd failed: %s\n", strerror(errno)));
630 return -1;
633 return 0;
636 static int aio_fork_write(struct vfs_handle_struct *handle,
637 struct files_struct *fsp, SMB_STRUCT_AIOCB *aiocb)
639 struct aio_child *child;
640 struct rw_cmd cmd;
641 ssize_t ret;
642 NTSTATUS status;
644 if (aiocb->aio_nbytes > 128*1024) {
645 /* TODO: support variable buffers */
646 errno = EINVAL;
647 return -1;
650 status = get_idle_child(handle, &child);
651 if (!NT_STATUS_IS_OK(status)) {
652 DEBUG(10, ("Could not get an idle child\n"));
653 return -1;
656 child->read_cmd = false;
657 child->aiocb = aiocb;
658 child->retval.ret_errno = EINPROGRESS;
660 memcpy((void *)child->map->ptr, (void *)aiocb->aio_buf,
661 aiocb->aio_nbytes);
663 ZERO_STRUCT(cmd);
664 cmd.n = aiocb->aio_nbytes;
665 cmd.offset = aiocb->aio_offset;
666 cmd.read_cmd = child->read_cmd;
668 DEBUG(10, ("sending fd %d to child %d\n", fsp->fh->fd,
669 (int)child->pid));
671 ret = write_fd(child->sockfd, &cmd, sizeof(cmd), fsp->fh->fd);
672 if (ret == -1) {
673 DEBUG(10, ("write_fd failed: %s\n", strerror(errno)));
674 return -1;
677 return 0;
680 static struct aio_child *aio_fork_find_child(struct vfs_handle_struct *handle,
681 SMB_STRUCT_AIOCB *aiocb)
683 struct aio_child_list *children;
684 struct aio_child *child;
686 children = init_aio_children(handle);
687 if (children == NULL) {
688 return NULL;
691 for (child = children->children; child != NULL; child = child->next) {
692 if (child->aiocb == aiocb) {
693 return child;
697 return NULL;
700 static ssize_t aio_fork_return_fn(struct vfs_handle_struct *handle,
701 struct files_struct *fsp,
702 SMB_STRUCT_AIOCB *aiocb)
704 struct aio_child *child = aio_fork_find_child(handle, aiocb);
706 if (child == NULL) {
707 errno = EINVAL;
708 DEBUG(0, ("returning EINVAL\n"));
709 return -1;
712 child->aiocb = NULL;
714 if (child->cancelled) {
715 errno = ECANCELED;
716 return -1;
719 if (child->retval.size == -1) {
720 errno = child->retval.ret_errno;
723 return child->retval.size;
726 static int aio_fork_cancel(struct vfs_handle_struct *handle,
727 struct files_struct *fsp,
728 SMB_STRUCT_AIOCB *aiocb)
730 struct aio_child_list *children;
731 struct aio_child *child;
733 children = init_aio_children(handle);
734 if (children == NULL) {
735 errno = EINVAL;
736 return -1;
739 for (child = children->children; child != NULL; child = child->next) {
740 if (child->aiocb == NULL) {
741 continue;
743 if (child->aiocb->aio_fildes != fsp->fh->fd) {
744 continue;
746 if ((aiocb != NULL) && (child->aiocb != aiocb)) {
747 continue;
751 * We let the child do its job, but we discard the result when
752 * it's finished.
755 child->cancelled = true;
758 return AIO_CANCELED;
761 static int aio_fork_error_fn(struct vfs_handle_struct *handle,
762 struct files_struct *fsp,
763 SMB_STRUCT_AIOCB *aiocb)
765 struct aio_child *child = aio_fork_find_child(handle, aiocb);
767 if (child == NULL) {
768 errno = EINVAL;
769 return -1;
772 return child->retval.ret_errno;
775 static void aio_fork_suspend_timed_out(struct tevent_context *event_ctx,
776 struct tevent_timer *te,
777 struct timeval now,
778 void *private_data)
780 bool *timed_out = (bool *)private_data;
781 /* Remove this timed event handler. */
782 TALLOC_FREE(te);
783 *timed_out = true;
786 static int aio_fork_suspend(struct vfs_handle_struct *handle,
787 struct files_struct *fsp,
788 const SMB_STRUCT_AIOCB * const aiocb_array[],
789 int n,
790 const struct timespec *timeout)
792 struct aio_child_list *children = NULL;
793 TALLOC_CTX *frame = talloc_stackframe();
794 struct event_context *ev = NULL;
795 int i;
796 int ret = -1;
797 bool timed_out = false;
798 int err;
800 children = init_aio_children(handle);
801 if (children == NULL) {
802 errno = EINVAL;
803 goto out;
806 /* This is a blocking call, and has to use a sub-event loop. */
807 ev = event_context_init(frame);
808 if (ev == NULL) {
809 errno = ENOMEM;
810 goto out;
813 if (timeout) {
814 struct timeval tv = convert_timespec_to_timeval(*timeout);
815 struct tevent_timer *te = tevent_add_timer(ev,
816 frame,
817 timeval_current_ofs(tv.tv_sec,
818 tv.tv_usec),
819 aio_fork_suspend_timed_out,
820 &timed_out);
821 if (!te) {
822 errno = ENOMEM;
823 goto out;
827 for (i = 0; i < n; i++) {
828 struct aio_child *child = NULL;
829 const SMB_STRUCT_AIOCB *aiocb = aiocb_array[i];
831 if (!aiocb) {
832 continue;
836 * We're going to cheat here. We know that smbd/aio.c
837 * only calls this when it's waiting for every single
838 * outstanding call to finish on a close, so just wait
839 * individually for each IO to complete. We don't care
840 * what order they finish - only that they all do. JRA.
843 for (child = children->children; child != NULL; child = child->next) {
844 struct tevent_fd *event;
846 if (child->aiocb == NULL) {
847 continue;
849 if (child->aiocb->aio_fildes != fsp->fh->fd) {
850 continue;
852 if (child->aiocb != aiocb) {
853 continue;
856 if (child->aiocb->aio_sigevent.sigev_value.sival_ptr == NULL) {
857 continue;
860 event = event_add_fd(ev,
861 frame,
862 child->sockfd,
863 EVENT_FD_READ,
864 handle_aio_completion,
865 child);
866 if (event == NULL) {
867 errno = ENOMEM;
868 goto out;
871 child->called_from_suspend = true;
873 while (!child->completion_done) {
874 if (tevent_loop_once(ev) == -1) {
875 goto out;
878 if (timed_out) {
879 errno = EAGAIN;
880 goto out;
886 ret = 0;
888 out:
890 err = errno;
891 TALLOC_FREE(frame);
892 errno = err;
893 return ret;
896 static int aio_fork_connect(vfs_handle_struct *handle, const char *service,
897 const char *user)
899 /*********************************************************************
900 * How many threads to initialize ?
901 * 100 per process seems insane as a default until you realize that
902 * (a) Threads terminate after 1 second when idle.
903 * (b) Throttling is done in SMB2 via the crediting algorithm.
904 * (c) SMB1 clients are limited to max_mux (50) outstanding
905 * requests and Windows clients don't use this anyway.
906 * Essentially we want this to be unlimited unless smb.conf
907 * says different.
908 *********************************************************************/
909 aio_pending_size = 100;
910 return SMB_VFS_NEXT_CONNECT(handle, service, user);
913 static struct vfs_fn_pointers vfs_aio_fork_fns = {
914 .connect_fn = aio_fork_connect,
915 .aio_read_fn = aio_fork_read,
916 .aio_write_fn = aio_fork_write,
917 .aio_return_fn = aio_fork_return_fn,
918 .aio_cancel_fn = aio_fork_cancel,
919 .aio_error_fn = aio_fork_error_fn,
920 .aio_suspend_fn = aio_fork_suspend,
923 NTSTATUS vfs_aio_fork_init(void);
924 NTSTATUS vfs_aio_fork_init(void)
926 return smb_register_vfs(SMB_VFS_INTERFACE_VERSION,
927 "aio_fork", &vfs_aio_fork_fns);