[GLUE] Rsync SAMBA_3_0 SVN r25598 in order to create the v3-0-test branch.
[Samba.git] / source / modules / vfs_cacheprime.c
blob61a92a02324dd700e793b9641004d7de9b90b632
1 /*
2 * Copyright (c) James Peach 2005-2006
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include "includes.h"
21 /* Cache priming module.
23 * The purpose of this module is to do RAID stripe width reads to prime the
24 * buffer cache to do zero-copy I/O for subsequent sendfile calls. The idea is
25 * to do a single large read at the start of the file to make sure that most or
26 * all of the file is pulled into the buffer cache. Subsequent I/Os have
27 * reduced latency.
29 * Tunables.
31 * cacheprime:rsize Amount of readahead in bytes. This should be a
32 * multiple of the RAID stripe width.
33 * cacheprime:debug Debug level at which to emit messages.
36 #define READAHEAD_MIN (128 * 1024) /* min is 128 KiB */
37 #define READAHEAD_MAX (100 * 1024 * 1024) /* max is 100 MiB */
39 #define MODULE "cacheprime"
41 static int module_debug;
42 static ssize_t g_readsz = 0;
43 static void * g_readbuf = NULL;
45 /* Prime the kernel buffer cache with data from the specified file. We use
46 * per-fsp data to make sure we only ever do this once. If pread is being
47 * emulated by seek/read/seek, when this will suck quite a lot.
49 static BOOL prime_cache(
50 struct vfs_handle_struct * handle,
51 files_struct * fsp,
52 int fd,
53 SMB_OFF_T offset,
54 size_t count)
56 SMB_OFF_T * last;
57 ssize_t nread;
59 last = VFS_ADD_FSP_EXTENSION(handle, fsp, SMB_OFF_T);
60 if (!last) {
61 return False;
64 if (*last == -1) {
65 /* Readahead disabled. */
66 return False;
69 if ((*last + g_readsz) > (offset + count)) {
70 /* Skip readahead ... we've already been here. */
71 return False;
74 DEBUG(module_debug,
75 ("%s: doing readahead of %lld bytes at %lld for %s\n",
76 MODULE, (long long)g_readsz, (long long)*last,
77 fsp->fsp_name));
79 nread = sys_pread(fd, g_readbuf, g_readsz, *last);
80 if (nread < 0) {
81 *last = -1;
82 return False;
85 *last += nread;
86 return True;
89 static int cprime_connect(
90 struct vfs_handle_struct * handle,
91 const char * service,
92 const char * user)
94 module_debug = lp_parm_int(SNUM(handle->conn), MODULE, "debug", 100);
95 if (g_readbuf) {
96 /* Only allocate g_readbuf once. If the config changes and
97 * another client multiplexes onto this smbd, we don't want
98 * to risk memory corruption.
100 return SMB_VFS_NEXT_CONNECT(handle, service, user);
103 g_readsz = conv_str_size(lp_parm_const_string(SNUM(handle->conn),
104 MODULE, "rsize", NULL));
106 if (g_readsz < READAHEAD_MIN) {
107 DEBUG(module_debug, ("%s: %ld bytes of readahead "
108 "requested, using minimum of %u\n",
109 MODULE, (long)g_readsz, READAHEAD_MIN));
110 g_readsz = READAHEAD_MIN;
111 } else if (g_readsz > READAHEAD_MAX) {
112 DEBUG(module_debug, ("%s: %ld bytes of readahead "
113 "requested, using maximum of %u\n",
114 MODULE, (long)g_readsz, READAHEAD_MAX));
115 g_readsz = READAHEAD_MAX;
118 if ((g_readbuf = SMB_MALLOC(g_readsz)) == NULL) {
119 /* Turn off readahead if we can't get a buffer. */
120 g_readsz = 0;
123 return SMB_VFS_NEXT_CONNECT(handle, service, user);
126 static ssize_t cprime_sendfile(
127 struct vfs_handle_struct * handle,
128 int tofd,
129 files_struct * fsp,
130 int fromfd,
131 const DATA_BLOB * header,
132 SMB_OFF_T offset,
133 size_t count)
135 if (g_readbuf && offset == 0) {
136 prime_cache(handle, fsp, fromfd, offset, count);
139 return SMB_VFS_NEXT_SENDFILE(handle, tofd, fsp, fromfd,
140 header, offset, count);
143 static ssize_t cprime_read(
144 vfs_handle_struct * handle,
145 files_struct * fsp,
146 int fd,
147 void * data,
148 size_t count)
150 SMB_OFF_T offset;
152 offset = SMB_VFS_LSEEK(fsp, fd, 0, SEEK_CUR);
153 if (offset >= 0 && g_readbuf) {
154 prime_cache(handle, fsp, fd, offset, count);
155 SMB_VFS_LSEEK(fsp, fd, offset, SEEK_SET);
158 return SMB_VFS_NEXT_READ(handle, fsp, fd, data, count);
161 static ssize_t cprime_pread(
162 vfs_handle_struct * handle,
163 files_struct * fsp,
164 int fd,
165 void * data,
166 size_t count,
167 SMB_OFF_T offset)
169 if (g_readbuf) {
170 prime_cache(handle, fsp, fd, offset, count);
173 return SMB_VFS_NEXT_PREAD(handle, fsp, fd, data, count, offset);
176 static vfs_op_tuple cprime_ops [] =
178 {SMB_VFS_OP(cprime_sendfile),
179 SMB_VFS_OP_SENDFILE, SMB_VFS_LAYER_TRANSPARENT},
180 {SMB_VFS_OP(cprime_pread),
181 SMB_VFS_OP_PREAD, SMB_VFS_LAYER_TRANSPARENT},
182 {SMB_VFS_OP(cprime_read),
183 SMB_VFS_OP_READ, SMB_VFS_LAYER_TRANSPARENT},
184 {SMB_VFS_OP(cprime_connect),
185 SMB_VFS_OP_CONNECT, SMB_VFS_LAYER_TRANSPARENT},
187 {SMB_VFS_OP(NULL), SMB_VFS_OP_NOOP, SMB_VFS_LAYER_NOOP}
190 /* -------------------------------------------------------------------------
191 * Samba module initialisation entry point.
192 * -------------------------------------------------------------------------
195 NTSTATUS vfs_cacheprime_init(void);
196 NTSTATUS vfs_cacheprime_init(void)
198 return smb_register_vfs(SMB_VFS_INTERFACE_VERSION, MODULE, cprime_ops);
201 /* vim: set sw=4 ts=4 tw=79 et: */