[PATCH] USB: goku_udc updates (sparse, SETUP api change)
[linux-2.6/cjktty.git] / fs / isofs / compress.c
blob34a44e451689afd59aa30449b56f938bd0109f60
1 /* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2001 H. Peter Anvin - All Rights Reserved
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
8 * USA; either version 2 of the License, or (at your option) any later
9 * version; incorporated herein by reference.
11 * ----------------------------------------------------------------------- */
14 * linux/fs/isofs/compress.c
16 * Transparent decompression of files on an iso9660 filesystem
19 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
23 #include <linux/vmalloc.h>
24 #include <linux/zlib.h>
26 #include "isofs.h"
27 #include "zisofs.h"
29 /* This should probably be global. */
30 static char zisofs_sink_page[PAGE_CACHE_SIZE];
33 * This contains the zlib memory allocation and the mutex for the
34 * allocation; this avoids failures at block-decompression time.
36 static void *zisofs_zlib_workspace;
37 static struct semaphore zisofs_zlib_semaphore;
40 * When decompressing, we typically obtain more than one page
41 * per reference. We inject the additional pages into the page
42 * cache as a form of readahead.
44 static int zisofs_readpage(struct file *file, struct page *page)
46 struct inode *inode = file->f_dentry->d_inode;
47 struct address_space *mapping = inode->i_mapping;
48 unsigned int maxpage, xpage, fpage, blockindex;
49 unsigned long offset;
50 unsigned long blockptr, blockendptr, cstart, cend, csize;
51 struct buffer_head *bh, *ptrbh[2];
52 unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
53 unsigned int bufshift = ISOFS_BUFFER_BITS(inode);
54 unsigned long bufmask = bufsize - 1;
55 int err = -EIO;
56 int i;
57 unsigned int header_size = ISOFS_I(inode)->i_format_parm[0];
58 unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
59 /* unsigned long zisofs_block_size = 1UL << zisofs_block_shift; */
60 unsigned int zisofs_block_page_shift = zisofs_block_shift-PAGE_CACHE_SHIFT;
61 unsigned long zisofs_block_pages = 1UL << zisofs_block_page_shift;
62 unsigned long zisofs_block_page_mask = zisofs_block_pages-1;
63 struct page *pages[zisofs_block_pages];
64 unsigned long index = page->index;
65 int indexblocks;
67 /* We have already been given one page, this is the one
68 we must do. */
69 xpage = index & zisofs_block_page_mask;
70 pages[xpage] = page;
72 /* The remaining pages need to be allocated and inserted */
73 offset = index & ~zisofs_block_page_mask;
74 blockindex = offset >> zisofs_block_page_shift;
75 maxpage = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
76 maxpage = min(zisofs_block_pages, maxpage-offset);
78 for ( i = 0 ; i < maxpage ; i++, offset++ ) {
79 if ( i != xpage ) {
80 pages[i] = grab_cache_page_nowait(mapping, offset);
82 page = pages[i];
83 if ( page ) {
84 ClearPageError(page);
85 kmap(page);
89 /* This is the last page filled, plus one; used in case of abort. */
90 fpage = 0;
92 /* Find the pointer to this specific chunk */
93 /* Note: we're not using isonum_731() here because the data is known aligned */
94 /* Note: header_size is in 32-bit words (4 bytes) */
95 blockptr = (header_size + blockindex) << 2;
96 blockendptr = blockptr + 4;
98 indexblocks = ((blockptr^blockendptr) >> bufshift) ? 2 : 1;
99 ptrbh[0] = ptrbh[1] = NULL;
101 if ( isofs_get_blocks(inode, blockptr >> bufshift, ptrbh, indexblocks) != indexblocks ) {
102 if ( ptrbh[0] ) brelse(ptrbh[0]);
103 printk(KERN_DEBUG "zisofs: Null buffer on reading block table, inode = %lu, block = %lu\n",
104 inode->i_ino, blockptr >> bufshift);
105 goto eio;
107 ll_rw_block(READ, indexblocks, ptrbh);
109 bh = ptrbh[0];
110 if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
111 printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
112 inode->i_ino, blockptr >> bufshift);
113 if ( ptrbh[1] )
114 brelse(ptrbh[1]);
115 goto eio;
117 cstart = le32_to_cpu(*(__le32 *)(bh->b_data + (blockptr & bufmask)));
119 if ( indexblocks == 2 ) {
120 /* We just crossed a block boundary. Switch to the next block */
121 brelse(bh);
122 bh = ptrbh[1];
123 if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
124 printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
125 inode->i_ino, blockendptr >> bufshift);
126 goto eio;
129 cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
130 brelse(bh);
132 csize = cend-cstart;
134 /* Now page[] contains an array of pages, any of which can be NULL,
135 and the locks on which we hold. We should now read the data and
136 release the pages. If the pages are NULL the decompressed data
137 for that particular page should be discarded. */
139 if ( csize == 0 ) {
140 /* This data block is empty. */
142 for ( fpage = 0 ; fpage < maxpage ; fpage++ ) {
143 if ( (page = pages[fpage]) != NULL ) {
144 memset(page_address(page), 0, PAGE_CACHE_SIZE);
146 flush_dcache_page(page);
147 SetPageUptodate(page);
148 kunmap(page);
149 unlock_page(page);
150 if ( fpage == xpage )
151 err = 0; /* The critical page */
152 else
153 page_cache_release(page);
156 } else {
157 /* This data block is compressed. */
158 z_stream stream;
159 int bail = 0, left_out = -1;
160 int zerr;
161 int needblocks = (csize + (cstart & bufmask) + bufmask) >> bufshift;
162 int haveblocks;
163 struct buffer_head *bhs[needblocks+1];
164 struct buffer_head **bhptr;
166 /* Because zlib is not thread-safe, do all the I/O at the top. */
168 blockptr = cstart >> bufshift;
169 memset(bhs, 0, (needblocks+1)*sizeof(struct buffer_head *));
170 haveblocks = isofs_get_blocks(inode, blockptr, bhs, needblocks);
171 ll_rw_block(READ, haveblocks, bhs);
173 bhptr = &bhs[0];
174 bh = *bhptr++;
176 /* First block is special since it may be fractional.
177 We also wait for it before grabbing the zlib
178 semaphore; odds are that the subsequent blocks are
179 going to come in in short order so we don't hold
180 the zlib semaphore longer than necessary. */
182 if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
183 printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
184 fpage, xpage, csize);
185 goto b_eio;
187 stream.next_in = bh->b_data + (cstart & bufmask);
188 stream.avail_in = min(bufsize-(cstart & bufmask), csize);
189 csize -= stream.avail_in;
191 stream.workspace = zisofs_zlib_workspace;
192 down(&zisofs_zlib_semaphore);
194 zerr = zlib_inflateInit(&stream);
195 if ( zerr != Z_OK ) {
196 if ( err && zerr == Z_MEM_ERROR )
197 err = -ENOMEM;
198 printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n",
199 zerr);
200 goto z_eio;
203 while ( !bail && fpage < maxpage ) {
204 page = pages[fpage];
205 if ( page )
206 stream.next_out = page_address(page);
207 else
208 stream.next_out = (void *)&zisofs_sink_page;
209 stream.avail_out = PAGE_CACHE_SIZE;
211 while ( stream.avail_out ) {
212 int ao, ai;
213 if ( stream.avail_in == 0 && left_out ) {
214 if ( !csize ) {
215 printk(KERN_WARNING "zisofs: ZF read beyond end of input\n");
216 bail = 1;
217 break;
218 } else {
219 bh = *bhptr++;
220 if ( !bh ||
221 (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
222 /* Reached an EIO */
223 printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
224 fpage, xpage, csize);
226 bail = 1;
227 break;
229 stream.next_in = bh->b_data;
230 stream.avail_in = min(csize,bufsize);
231 csize -= stream.avail_in;
234 ao = stream.avail_out; ai = stream.avail_in;
235 zerr = zlib_inflate(&stream, Z_SYNC_FLUSH);
236 left_out = stream.avail_out;
237 if ( zerr == Z_BUF_ERROR && stream.avail_in == 0 )
238 continue;
239 if ( zerr != Z_OK ) {
240 /* EOF, error, or trying to read beyond end of input */
241 if ( err && zerr == Z_MEM_ERROR )
242 err = -ENOMEM;
243 if ( zerr != Z_STREAM_END )
244 printk(KERN_DEBUG "zisofs: zisofs_inflate returned %d, inode = %lu, index = %lu, fpage = %d, xpage = %d, avail_in = %d, avail_out = %d, ai = %d, ao = %d\n",
245 zerr, inode->i_ino, index,
246 fpage, xpage,
247 stream.avail_in, stream.avail_out,
248 ai, ao);
249 bail = 1;
250 break;
254 if ( stream.avail_out && zerr == Z_STREAM_END ) {
255 /* Fractional page written before EOF. This may
256 be the last page in the file. */
257 memset(stream.next_out, 0, stream.avail_out);
258 stream.avail_out = 0;
261 if ( !stream.avail_out ) {
262 /* This page completed */
263 if ( page ) {
264 flush_dcache_page(page);
265 SetPageUptodate(page);
266 kunmap(page);
267 unlock_page(page);
268 if ( fpage == xpage )
269 err = 0; /* The critical page */
270 else
271 page_cache_release(page);
273 fpage++;
276 zlib_inflateEnd(&stream);
278 z_eio:
279 up(&zisofs_zlib_semaphore);
281 b_eio:
282 for ( i = 0 ; i < haveblocks ; i++ ) {
283 if ( bhs[i] )
284 brelse(bhs[i]);
288 eio:
290 /* Release any residual pages, do not SetPageUptodate */
291 while ( fpage < maxpage ) {
292 page = pages[fpage];
293 if ( page ) {
294 flush_dcache_page(page);
295 if ( fpage == xpage )
296 SetPageError(page);
297 kunmap(page);
298 unlock_page(page);
299 if ( fpage != xpage )
300 page_cache_release(page);
302 fpage++;
305 /* At this point, err contains 0 or -EIO depending on the "critical" page */
306 return err;
309 struct address_space_operations zisofs_aops = {
310 .readpage = zisofs_readpage,
311 /* No sync_page operation supported? */
312 /* No bmap operation supported */
315 static int initialized;
317 int __init zisofs_init(void)
319 if ( initialized ) {
320 printk("zisofs_init: called more than once\n");
321 return 0;
324 zisofs_zlib_workspace = vmalloc(zlib_inflate_workspacesize());
325 if ( !zisofs_zlib_workspace )
326 return -ENOMEM;
327 init_MUTEX(&zisofs_zlib_semaphore);
329 initialized = 1;
330 return 0;
333 void zisofs_cleanup(void)
335 if ( !initialized ) {
336 printk("zisofs_cleanup: called without initialization\n");
337 return;
340 vfree(zisofs_zlib_workspace);
341 initialized = 0;