First cut toward adding WINS server failover.
[Samba/reqa.git] / source3 / smbd / fileio.c
blob45ad959a3c25d43fa48ad492f02691a93ea128e2
1 #define OLD_NTDOMAIN 1
2 /*
3 Unix SMB/Netbios implementation.
4 Version 1.9.
5 read/write to a files_struct
6 Copyright (C) Andrew Tridgell 1992-1998
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "includes.h"
25 extern int DEBUGLEVEL;
27 static BOOL setup_write_cache(files_struct *, SMB_OFF_T);
29 /****************************************************************************
30 seek a file. Try to avoid the seek if possible
31 ****************************************************************************/
33 SMB_OFF_T seek_file(files_struct *fsp,SMB_OFF_T pos)
35 SMB_OFF_T offset = 0;
36 SMB_OFF_T seek_ret;
38 if (fsp->print_file && lp_postscript(fsp->conn->service))
39 offset = 3;
41 seek_ret = fsp->conn->vfs_ops.lseek(fsp->fd,pos+offset,SEEK_SET);
44 * We want to maintain the fiction that we can seek
45 * on a fifo for file system purposes. This allows
46 * people to set up UNIX fifo's that feed data to Windows
47 * applications. JRA.
50 if((seek_ret == -1) && (errno == ESPIPE)) {
51 seek_ret = pos+offset;
52 errno = 0;
55 if((seek_ret == -1) || (seek_ret != pos+offset)) {
56 DEBUG(0,("seek_file: sys_lseek failed. Error was %s\n", strerror(errno) ));
57 fsp->pos = -1;
58 return -1;
61 fsp->pos = seek_ret - offset;
63 DEBUG(10,("seek_file: requested pos = %.0f, new pos = %.0f\n",
64 (double)(pos+offset), (double)fsp->pos ));
66 return(fsp->pos);
69 /****************************************************************************
70 Read from write cache if we can.
71 ****************************************************************************/
73 static unsigned int cache_read_hits;
75 BOOL read_from_write_cache(files_struct *fsp,char *data,SMB_OFF_T pos,size_t n)
77 write_cache *wcp = fsp->wcp;
79 if(!wcp)
80 return False;
82 if(n > wcp->data_size || pos < wcp->offset || pos + n > wcp->offset + wcp->data_size)
83 return False;
85 memcpy(data, wcp->data + (pos - wcp->offset), n);
87 cache_read_hits++;
89 return True;
92 /****************************************************************************
93 read from a file
94 ****************************************************************************/
96 ssize_t read_file(files_struct *fsp,char *data,SMB_OFF_T pos,size_t n)
98 ssize_t ret=0,readret;
100 /* you can't read from print files */
101 if (fsp->print_file) {
102 return -1;
106 * Serve from write cache if we can.
108 if(read_from_write_cache(fsp, data, pos, n))
109 return n;
111 flush_write_cache(fsp, READ_FLUSH);
113 if (seek_file(fsp,pos) == -1) {
114 DEBUG(3,("read_file: Failed to seek to %.0f\n",(double)pos));
115 return(ret);
118 if (n > 0) {
119 readret = fsp->conn->vfs_ops.read(fsp->fd,data,n);
120 if (readret == -1)
121 return -1;
122 if (readret > 0) ret += readret;
125 return(ret);
128 /* Write cache static counters. */
130 static unsigned int abutted_writes;
131 static unsigned int total_writes;
132 static unsigned int non_oplock_writes;
133 static unsigned int direct_writes;
134 static unsigned int init_writes;
135 static unsigned int flushed_writes;
136 static unsigned int num_perfect_writes;
137 static unsigned int flush_reasons[NUM_FLUSH_REASONS];
139 /* how many write cache buffers have been allocated */
140 static unsigned int allocated_write_caches;
141 static unsigned int num_write_caches;
143 /****************************************************************************
144 *Really* write to a file
145 ****************************************************************************/
147 static ssize_t real_write_file(files_struct *fsp,char *data,SMB_OFF_T pos, size_t n)
149 if ((pos != -1) && (seek_file(fsp,pos) == -1))
150 return -1;
152 return write_data(fsp->fd,data,n);
155 /****************************************************************************
156 write to a file
157 ****************************************************************************/
159 ssize_t write_file(files_struct *fsp, char *data, SMB_OFF_T pos, size_t n)
161 write_cache *wcp = fsp->wcp;
162 ssize_t total_written = 0;
163 int write_path = -1;
165 if (fsp->print_file) {
166 return print_job_write(fsp->print_jobid, data, n);
169 if (!fsp->can_write) {
170 errno = EPERM;
171 return(0);
174 if (!fsp->modified) {
175 SMB_STRUCT_STAT st;
176 fsp->modified = True;
178 if (fsp->conn->vfs_ops.fstat(fsp->fd,&st) == 0) {
179 int dosmode = dos_mode(fsp->conn,fsp->fsp_name,&st);
180 if (MAP_ARCHIVE(fsp->conn) && !IS_DOS_ARCHIVE(dosmode)) {
181 file_chmod(fsp->conn,fsp->fsp_name,dosmode | aARCH,&st);
185 * If this is the first write and we have an exclusive oplock then setup
186 * the write cache.
189 if ((fsp->oplock_type == EXCLUSIVE_OPLOCK) && !wcp) {
190 setup_write_cache(fsp, st.st_size);
191 wcp = fsp->wcp;
196 total_writes++;
197 if (!fsp->oplock_type) {
198 non_oplock_writes++;
202 * If this file is level II oplocked then we need
203 * to grab the shared memory lock and inform all
204 * other files with a level II lock that they need
205 * to flush their read caches. We keep the lock over
206 * the shared memory area whilst doing this.
209 if (LEVEL_II_OPLOCK_TYPE(fsp->oplock_type)) {
210 share_mode_entry *share_list = NULL;
211 pid_t pid = sys_getpid();
212 int token = -1;
213 int num_share_modes = 0;
214 int i;
216 if (lock_share_entry_fsp(fsp) == False) {
217 DEBUG(0,("write_file: failed to lock share mode entry for file %s.\n", fsp->fsp_name ));
220 num_share_modes = get_share_modes(fsp->conn, fsp->dev, fsp->inode, &share_list);
222 for(i = 0; i < num_share_modes; i++) {
223 share_mode_entry *share_entry = &share_list[i];
226 * As there could have been multiple writes waiting at the lock_share_entry
227 * gate we may not be the first to enter. Hence the state of the op_types
228 * in the share mode entries may be partly NO_OPLOCK and partly LEVEL_II
229 * oplock. It will do no harm to re-send break messages to those smbd's
230 * that are still waiting their turn to remove their LEVEL_II state, and
231 * also no harm to ignore existing NO_OPLOCK states. JRA.
234 if (share_entry->op_type == NO_OPLOCK)
235 continue;
237 /* Paranoia .... */
238 if (EXCLUSIVE_OPLOCK_TYPE(share_entry->op_type)) {
239 DEBUG(0,("write_file: PANIC. share mode entry %d is an exlusive oplock !\n", i ));
240 unlock_share_entry(fsp->conn, fsp->dev, fsp->inode);
241 abort();
245 * Check if this is a file we have open (including the
246 * file we've been called to do write_file on. If so
247 * then break it directly without releasing the lock.
250 if (pid == share_entry->pid) {
251 files_struct *new_fsp = file_find_dit(fsp->dev, fsp->inode, &share_entry->time);
253 /* Paranoia check... */
254 if(new_fsp == NULL) {
255 DEBUG(0,("write_file: PANIC. share mode entry %d is not a local file !\n", i ));
256 unlock_share_entry(fsp->conn, fsp->dev, fsp->inode);
257 abort();
259 oplock_break_level2(new_fsp, True, token);
261 } else {
264 * This is a remote file and so we send an asynchronous
265 * message.
268 request_oplock_break(share_entry, fsp->dev, fsp->inode);
272 free((char *)share_list);
273 unlock_share_entry_fsp(fsp);
276 /* Paranoia check... */
277 if (LEVEL_II_OPLOCK_TYPE(fsp->oplock_type)) {
278 DEBUG(0,("write_file: PANIC. File %s still has a level II oplock.\n", fsp->fsp_name));
279 abort();
282 if (total_writes % 500 == 0) {
283 DEBUG(3,("WRITECACHE: initwrites=%u abutted=%u flushes=%u total=%u \
284 nonop=%u allocated=%u active=%u direct=%u perfect=%u readhits=%u\n",
285 init_writes, abutted_writes, flushed_writes, total_writes, non_oplock_writes,
286 allocated_write_caches,
287 num_write_caches, direct_writes, num_perfect_writes, cache_read_hits ));
289 DEBUG(3,("WRITECACHE: SEEK=%d, READ=%d, WRITE=%d, READRAW=%d, OPLOCK=%d, CLOSE=%d, SYNC=%d\n",
290 flush_reasons[SEEK_FLUSH],
291 flush_reasons[READ_FLUSH],
292 flush_reasons[WRITE_FLUSH],
293 flush_reasons[READRAW_FLUSH],
294 flush_reasons[OPLOCK_RELEASE_FLUSH],
295 flush_reasons[CLOSE_FLUSH],
296 flush_reasons[SYNC_FLUSH] ));
299 if(!wcp) {
300 direct_writes++;
301 return real_write_file(fsp, data, pos, n);
304 DEBUG(9,("write_file(fd=%d pos=%d size=%d) wofs=%d wsize=%d\n",
305 fsp->fd, (int)pos, (int)n, (int)wcp->offset, (int)wcp->data_size));
308 * If we have active cache and it isn't contiguous then we flush.
309 * NOTE: There is a small problem with running out of disk ....
312 if (wcp->data_size) {
314 BOOL cache_flush_needed = False;
316 if ((pos >= wcp->offset) && (pos <= wcp->offset + wcp->data_size)) {
319 * Start of write overlaps or abutts the existing data.
322 size_t data_used = MIN((wcp->alloc_size - (pos - wcp->offset)), n);
324 memcpy(wcp->data + (pos - wcp->offset), data, data_used);
327 * Update the current buffer size with the new data.
330 if(pos + data_used > wcp->offset + wcp->data_size)
331 wcp->data_size = pos + data_used - wcp->offset;
334 * If we used all the data then
335 * return here.
338 if(n == data_used)
339 return n;
340 else
341 cache_flush_needed = True;
344 * Move the start of data forward by the amount used,
345 * cut down the amount left by the same amount.
348 data += data_used;
349 pos += data_used;
350 n -= data_used;
352 abutted_writes++;
353 total_written = data_used;
355 write_path = 1;
357 } else if ((pos < wcp->offset) && (pos + n > wcp->offset) &&
358 (pos + n <= wcp->offset + wcp->alloc_size)) {
361 * End of write overlaps the existing data.
364 size_t data_used = pos + n - wcp->offset;
366 memcpy(wcp->data, data + n - data_used, data_used);
369 * Update the current buffer size with the new data.
372 if(pos + n > wcp->offset + wcp->data_size)
373 wcp->data_size = pos + n - wcp->offset;
376 * We don't need to move the start of data, but we
377 * cut down the amount left by the amount used.
380 n -= data_used;
383 * We cannot have used all the data here.
386 cache_flush_needed = True;
388 abutted_writes++;
389 total_written = data_used;
391 write_path = 2;
393 } else if ( (pos >= wcp->file_size) &&
394 (pos > wcp->offset + wcp->data_size) &&
395 (pos < wcp->offset + wcp->alloc_size) ) {
398 * Non-contiguous write part of which fits within
399 * the cache buffer and is extending the file.
402 size_t data_used;
404 if(pos + n <= wcp->offset + wcp->alloc_size)
405 data_used = n;
406 else
407 data_used = wcp->offset + wcp->alloc_size - pos;
410 * Fill in the non-continuous area with zeros.
413 memset(wcp->data + wcp->data_size, '\0',
414 pos - (wcp->offset + wcp->data_size) );
416 memcpy(wcp->data + (pos - wcp->offset), data, data_used);
419 * Update the current buffer size with the new data.
422 if(pos + data_used > wcp->offset + wcp->data_size)
423 wcp->data_size = pos + data_used - wcp->offset;
426 * Update the known file length.
429 wcp->file_size = wcp->offset + wcp->data_size;
431 #if 0
432 if (set_filelen(fsp->fd, wcp->file_size) == -1) {
433 DEBUG(0,("write_file: error %s in setting file to length %.0f\n",
434 strerror(errno), (double)wcp->file_size ));
435 return -1;
437 #endif
440 * If we used all the data then
441 * return here.
444 if(n == data_used)
445 return n;
446 else
447 cache_flush_needed = True;
450 * Move the start of data forward by the amount used,
451 * cut down the amount left by the same amount.
454 data += data_used;
455 pos += data_used;
456 n -= data_used;
458 abutted_writes++;
459 total_written = data_used;
461 write_path = 3;
463 } else {
466 * Write is bigger than buffer, or there is no overlap on the
467 * low or high ends.
470 DEBUG(9,("write_file: non cacheable write : fd = %d, pos = %.0f, len = %u, current cache pos = %.0f \
471 len = %u\n",fsp->fd, (double)pos, (unsigned int)n, (double)wcp->offset, (unsigned int)wcp->data_size ));
474 * Update the file size if needed.
477 if(pos + n > wcp->file_size)
478 wcp->file_size = pos + n;
481 * If write would fit in the cache, and is larger than
482 * the data already in the cache, flush the cache and
483 * preferentially copy the data new data into it. Otherwise
484 * just write the data directly.
487 if ( n <= wcp->alloc_size && n > wcp->data_size) {
488 cache_flush_needed = True;
489 } else {
490 direct_writes++;
491 return real_write_file(fsp, data, pos, n);
494 write_path = 4;
498 if(wcp->data_size > wcp->file_size)
499 wcp->file_size = wcp->data_size;
501 if (cache_flush_needed) {
502 flushed_writes++;
504 DEBUG(3,("WRITE_FLUSH:%d: due to noncontinuous write: fd = %d, size = %.0f, pos = %.0f, \
505 n = %u, wcp->offset=%.0f, wcp->data_size=%u\n",
506 write_path, fsp->fd, (double)wcp->file_size, (double)pos, (unsigned int)n,
507 (double)wcp->offset, (unsigned int)wcp->data_size ));
509 flush_write_cache(fsp, WRITE_FLUSH);
514 * If the write request is bigger than the cache
515 * size, write it all out.
518 if (n > wcp->alloc_size ) {
519 if(real_write_file(fsp, data, pos, n) == -1)
520 return -1;
521 direct_writes++;
522 return total_written + n;
526 * If there's any data left, cache it.
529 if (n) {
530 if (wcp->data_size) {
531 abutted_writes++;
532 DEBUG(9,("abutted write (%u)\n", abutted_writes));
533 } else {
534 init_writes++;
536 memcpy(wcp->data+wcp->data_size, data, n);
537 if (wcp->data_size == 0) {
538 wcp->offset = pos;
539 num_write_caches++;
541 wcp->data_size += n;
542 DEBUG(9,("cache return %u\n", (unsigned int)n));
543 total_written += n;
544 return total_written; /* .... that's a write :) */
547 return total_written;
550 /****************************************************************************
551 Delete the write cache structure.
552 ****************************************************************************/
554 void delete_write_cache(files_struct *fsp)
556 write_cache *wcp;
558 if(!fsp)
559 return;
561 if(!(wcp = fsp->wcp))
562 return;
564 allocated_write_caches--;
566 SMB_ASSERT(wcp->data_size == 0);
568 free(wcp->data);
569 free(wcp);
571 fsp->wcp = NULL;
574 /****************************************************************************
575 Setup the write cache structure.
576 ****************************************************************************/
578 static BOOL setup_write_cache(files_struct *fsp, SMB_OFF_T file_size)
580 ssize_t alloc_size = lp_write_cache_size(SNUM(fsp->conn));
581 write_cache *wcp;
583 if (allocated_write_caches >= MAX_WRITE_CACHES) return False;
585 if(alloc_size == 0 || fsp->wcp)
586 return False;
588 if((wcp = (write_cache *)malloc(sizeof(write_cache))) == NULL) {
589 DEBUG(0,("setup_write_cache: malloc fail.\n"));
590 return False;
593 wcp->file_size = file_size;
594 wcp->offset = 0;
595 wcp->alloc_size = alloc_size;
596 wcp->data_size = 0;
597 if((wcp->data = malloc(wcp->alloc_size)) == NULL) {
598 DEBUG(0,("setup_write_cache: malloc fail for buffer size %u.\n",
599 (unsigned int)wcp->alloc_size ));
600 free(wcp);
601 return False;
604 fsp->wcp = wcp;
605 allocated_write_caches++;
607 return True;
610 /****************************************************************************
611 Cope with a size change.
612 ****************************************************************************/
614 void set_filelen_write_cache(files_struct *fsp, SMB_OFF_T file_size)
616 if(fsp->wcp) {
617 flush_write_cache(fsp, SIZECHANGE_FLUSH);
618 fsp->wcp->file_size = file_size;
622 /*******************************************************************
623 Flush a write cache struct to disk.
624 ********************************************************************/
626 ssize_t flush_write_cache(files_struct *fsp, enum flush_reason_enum reason)
628 write_cache *wcp = fsp->wcp;
629 size_t data_size;
631 if(!wcp || !wcp->data_size)
632 return 0;
634 data_size = wcp->data_size;
635 wcp->data_size = 0;
637 num_write_caches--;
639 flush_reasons[(int)reason]++;
641 DEBUG(9,("flushing write cache: fd = %d, off=%.0f, size=%u\n",
642 fsp->fd, (double)wcp->offset, (unsigned int)data_size));
644 if(data_size == wcp->alloc_size)
645 num_perfect_writes++;
647 return real_write_file(fsp, wcp->data, wcp->offset, data_size);
650 /*******************************************************************
651 sync a file
652 ********************************************************************/
654 void sync_file(connection_struct *conn, files_struct *fsp)
656 if(lp_strict_sync(SNUM(conn)) && fsp->fd != -1) {
657 flush_write_cache(fsp, SYNC_FLUSH);
658 conn->vfs_ops.fsync(fsp->fd);
661 #undef OLD_NTDOMAIN