>grand.central.org GCO Public CellServDB 25 Oct 2007
[arla.git] / arlad / abuf.c
blob64111dac585b0b4886c0ff2982299ea1298e2dcc
1 /*
2 * Copyright (c) 1995 - 2007 Kungliga Tekniska Högskolan
3 * (Royal Institute of Technology, Stockholm, Sweden).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the Institute nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
34 #include <config.h>
36 RCSID("$Id$") ;
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <errno.h>
43 #ifdef HAVE_SYS_MMAN_H
44 #include <sys/mman.h>
45 #endif
46 #include <unistd.h>
48 #include <fs_errors.h>
50 #include <roken.h>
52 #include <arla_local.h>
54 struct abuf_data {
55 FCacheEntry *entry;
58 static int
59 abuf_flush(fbuf *f);
61 static inline FCacheEntry *
62 abuf_entry(fbuf *f)
64 return ((struct abuf_data *)(f)->data)->entry;
68 #ifdef HAVE_MMAP
71 * mmap implementation for copy{rx2cache,cache2rx}. It's a little
72 * complicated to support reading/writing on non page boundaries, plus
73 * the block handling.
76 #if !defined(MAP_FAILED)
77 #define MAP_FAILED ((void *)(-1))
78 #endif
82 * use mmap for transfer between rx call and cache files
85 static int
86 cachetransfer(struct rx_call *call, FCacheEntry *entry,
87 off_t off, off_t len, Bool rxwritep)
89 void *buf;
90 int rw_len;
91 int ret = 0;
92 off_t adjust_off, adjust_len;
93 size_t mmap_len, block_len;
94 size_t size;
95 int iosize = getpagesize();
96 int fd = 0;
98 if (len == 0)
99 return ret;
101 adjust_off = off % iosize;
103 while (len > 0) {
104 off_t real_off = off - adjust_off;
105 off_t block_off = block_offset(real_off);
106 off_t mmap_off = real_off - block_off;
108 block_len = fcache_getblocksize() - mmap_off;
109 size = len + adjust_off;
110 if (size > block_len)
111 size = block_len;
113 if (size % iosize)
114 adjust_len = iosize - (size % iosize);
115 else
116 adjust_len = 0;
117 mmap_len = size + adjust_len;
119 if (fd == 0 || mmap_off == 0) {
120 if (fd != 0)
121 if (close(fd))
122 return errno;
124 fd = fcache_open_block(entry, block_off, !rxwritep);
125 if (fd < 0)
126 return errno;
128 if (!rxwritep) {
130 * always truncate to be on the "safe" side.
131 * We assume that we always get a full block or to EOF.
134 ret = ftruncate(fd, mmap_len);
135 if (ret)
136 break;
140 if (rxwritep)
141 buf = mmap(0, mmap_len, PROT_READ, MAP_PRIVATE, fd, mmap_off);
142 else
143 buf = mmap(0, mmap_len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mmap_off);
145 if (buf == (void *) MAP_FAILED) {
146 ret = errno;
147 break;
150 if (rxwritep)
151 rw_len = rx_Write(call, (char *)buf + adjust_off, size - adjust_off);
152 else
153 rw_len = rx_Read(call, (char *)buf + adjust_off, size - adjust_off);
155 if (rw_len != mmap_len - adjust_off)
156 ret = conv_to_arla_errno(rx_GetCallError(call));
158 len -= rw_len;
159 off += rw_len;
160 adjust_off = 0;
162 if (!rxwritep) {
163 if (msync(buf, mmap_len, MS_ASYNC))
164 ret = errno;
166 if (munmap(buf, mmap_len))
167 ret = errno;
169 if (ret)
170 break;
173 if (fd != 0)
174 close(fd);
176 return ret;
178 #else /* !HAVE_MMAP */
181 * use malloc for transfer between rx call and cache files
184 static int
185 cachetransfer(struct rx_call *call, FCacheEntry *entry,
186 off_t off, off_t len, Bool rxwritep)
188 void *buf;
189 int ret = 0;
190 size_t io_len, block_len;
191 ssize_t nread, nwrite;
192 u_long bufsize = 8192;
193 int fd = 0;
195 if (len == 0)
196 return 0;
198 buf = malloc(bufsize);
199 if (buf == NULL)
200 return ENOMEM;
202 while (len > 0) {
203 uint64_t block_off = block_offset(off);
204 off_t buf_off = off - block_off;
206 if (block_off == off) {
207 if ((fd != 0 && close(fd) != 0)
208 || (fd = fcache_open_block(entry, block_off, !rxwritep)) < 0) {
209 ret = errno;
210 fd = 0;
211 arla_debug_assert(0);
212 break;
216 io_len = min(bufsize, len);
217 block_len = fcache_getblocksize() - buf_off;
219 if (io_len > block_len)
220 io_len = block_len;
222 if (rxwritep) {
223 nread = pread(fd, buf, io_len, buf_off);
224 if (nread <= 0) {
225 ret = errno;
226 arla_debug_assert(0);
227 break;
230 nwrite = rx_Write(call, buf, nread);
231 if (nwrite != nread) {
232 ret = conv_to_arla_errno(rx_GetCallError(call));
233 break;
235 } else {
236 nread = rx_Read(call, buf, io_len);
237 if (nread != io_len) {
238 ret = conv_to_arla_errno(rx_GetCallError(call));
239 break;
242 nwrite = pwrite(fd, buf, nread, buf_off);
243 if (nwrite != nread) {
244 ret = errno;
245 arla_debug_assert(0);
246 break;
249 len -= nread;
250 off += nread;
252 if (ret)
253 break;
256 if (fd != 0)
257 close(fd);
259 free(buf);
261 return ret;
263 #endif /* !HAVE_MMAP */
266 * Copy from a RX_call to a cache node.
267 * The area between offset and len + offset should be present in the cache.
269 * Returns 0 or error.
273 copyrx2cache(struct rx_call *call, FCacheEntry *entry, off_t off, off_t len)
275 return cachetransfer(call, entry, off, len, FALSE);
279 * Copy `len' bytes from `entry' to `call'.
280 * Returns 0 or error.
284 copycache2rx(FCacheEntry *entry, struct rx_call *call, off_t off, off_t len)
286 return cachetransfer(call, entry, off, len, TRUE);
290 * actually do the malloc + read thing
293 static int
294 abuf_populate(fbuf *f)
296 uint64_t block_off = 0;
297 ssize_t nread;
298 off_t off = 0;
299 char *buf;
300 int fd = 0;
301 size_t len = f->len;
302 int ret = 0;
304 buf = malloc(len);
305 if (buf == NULL) {
306 int ret = errno;
307 arla_warnx(ADEBWARN, "abuf_populate: malloc(%lu) failed",
308 (unsigned long)len);
309 arla_debug_assert(0);
310 return ret;
313 while (len > 0) {
314 block_off = block_offset(off);
315 off_t r_len = min(len, fcache_getblocksize());
317 if ((fd != 0 && close(fd) != 0)
318 || (fd = fcache_open_block(abuf_entry(f),
319 block_off, FALSE)) < 0) {
320 ret = errno;
321 fd = 0;
322 break;
325 nread = pread(fd, buf + off, r_len, off - block_off);
326 if (nread != r_len) {
327 ret = errno;
328 break;
331 len -= nread;
332 off += nread;
335 if (ret)
336 free(buf);
337 else
338 f->buf = buf;
340 if (fd)
341 close(fd);
343 return ret;
348 * infrastructure for truncate handling
351 struct truncate_cb_data {
352 off_t length;
353 uint64_t last_off;
354 uint64_t prev_last_off;
358 abuf_truncate_block(FCacheEntry *entry, uint64_t offset, uint64_t blocklen)
360 int ret;
361 int fd = fcache_open_block(entry, offset, TRUE);
362 if (fd < 0) {
363 ret = errno;
364 arla_warnx(ADEBWARN, "abuf_truncate_block: "
365 "open failed at offset 0x%" PRIX64 "\n", offset);
366 arla_debug_assert(0);
367 return ret;
370 ret = ftruncate(fd, blocklen);
371 if (ret) {
372 ret = errno;
373 arla_warnx(ADEBWARN, "abuf_truncate_block: "
374 "truncate failed at offset 0x%" PRIX64 "\n", offset);
375 arla_debug_assert(0);
376 return ret;
379 close(fd);
380 return 0;
383 static void
384 truncate_callback(struct block *block, void *data)
386 struct truncate_cb_data *cb_data = (struct truncate_cb_data *)data;
388 if (block->offset >= cb_data->length && block->offset != 0) {
389 fcache_throw_block(block);
390 } else if (cb_data->last_off == block->offset) {
391 (void)abuf_truncate_block(block->node, block->offset,
392 cb_data->length - block->offset);
393 } else if (cb_data->prev_last_off == block->offset) {
394 uint64_t blocklen = cb_data->length - block->offset;
395 uint64_t blocksize = fcache_getblocksize();
397 if (blocklen > blocksize)
398 blocklen = blocksize;
400 (void)abuf_truncate_block(block->node, block->offset, blocklen);
401 } else {
402 /* block should be ok */
407 * truncate the cache data for real, update 'have' flags
410 static int
411 abuf_truncate_int(FCacheEntry *entry, off_t length)
413 struct truncate_cb_data data;
415 data.length = length;
416 data.last_off = block_offset(length);
417 data.prev_last_off =
418 block_offset(fcache_get_status_length(&entry->status));
420 block_foreach(entry, truncate_callback, &data);
421 return 0;
425 * Change the size of the underlying cache and the fbuf to `new_len'
426 * bytes.
427 * Returns 0 or error.
430 static int
431 abuf_truncate_op(fbuf *f, size_t new_len)
433 int ret;
435 ret = abuf_flush(f);
436 if (ret)
437 goto fail;
439 ret = abuf_truncate_int(abuf_entry(f), new_len);
440 if (ret)
441 goto fail;
443 if (f->buf) {
444 void *buf = realloc(f->buf, new_len);
445 if (buf == NULL) {
446 ret = ENOMEM;
447 goto fail;
450 f->buf = buf;
453 f->len = new_len;
455 return 0;
457 fail:
458 if (f->buf) {
459 free(f->buf);
460 f->buf = NULL;
463 arla_debug_assert(0);
465 return ret;
469 * Change the size of the underlying cache and the fbuf to `new_len'
470 * bytes.
471 * Returns 0 or error.
475 abuf_truncate(FCacheEntry *entry, size_t new_len)
477 return abuf_truncate_int(entry, new_len);
480 static void
481 purge_callback(struct block *block, void *data)
483 fcache_throw_block(block);
487 * Throw all data in the node. This is a special case of truncate
488 * that does not leave block zero.
492 abuf_purge(FCacheEntry *entry)
494 block_foreach(entry, purge_callback, NULL);
495 return 0;
499 * Create a fbuf with (fd, len, flags).
500 * Returns 0 or error.
504 abuf_create(fbuf *f, FCacheEntry *entry, size_t len, fbuf_flags flags)
506 struct abuf_data *data = malloc(sizeof(*data));
507 if (data == NULL)
508 return ENOMEM;
510 data->entry = entry;
512 f->data = data;
513 f->len = len;
514 f->buf = NULL;
515 f->flags = flags;
517 f->truncate = abuf_truncate_op;
519 return abuf_populate(f);
523 * Write out the data of `f' to the file.
524 * Returns 0 or error.
527 static int
528 abuf_flush(fbuf *f)
530 size_t len = f->len;
531 uint64_t block_off = 0;
532 ssize_t nwrite;
533 int fd, ret = 0;
535 if (!f->buf)
536 return 0;
538 if ((f->flags & FBUF_WRITE) != FBUF_WRITE)
539 return 0;
541 while (len > 0 && ret == 0) {
542 size_t size = min(len, fcache_getblocksize());
544 if ((fd = fcache_open_block(abuf_entry(f),
545 block_off, TRUE)) < 0) {
546 ret = errno;
547 break;
550 nwrite = write(fd, (char *)f->buf + block_off, size);
551 if (nwrite != size) {
552 ret = errno;
553 close(fd);
554 break;
557 len -= nwrite;
558 block_off += fcache_getblocksize();
559 ret = close(fd);
562 return ret;
566 * End using `f'.
567 * Returns 0 or error.
571 abuf_end(fbuf *f)
573 int ret = 0;
575 if (f->buf) {
576 ret = abuf_flush(f);
577 free(f->buf);
578 f->buf = NULL;
581 free(f->data);
583 return ret;