- Kai Germaschewski: ymfpci cleanups and resource leak fixes
[davej-history.git] / drivers / char / raw.c
blobae4f0bbd3ec1cf70e92250e4d5f0844cecd45bba
1 /*
2 * linux/drivers/char/raw.c
4 * Front-end raw character devices. These can be bound to any block
5 * devices to provide genuine Unix raw character device semantics.
7 * We reserve minor number 0 for a control interface. ioctl()s on this
8 * device are used to bind the other minor numbers to block devices.
9 */
11 #include <linux/fs.h>
12 #include <linux/iobuf.h>
13 #include <linux/major.h>
14 #include <linux/blkdev.h>
15 #include <linux/raw.h>
16 #include <linux/capability.h>
17 #include <linux/smp_lock.h>
18 #include <asm/uaccess.h>
20 #define dprintk(x...)
22 static struct block_device *raw_device_bindings[256];
23 static int raw_device_inuse[256];
24 static int raw_device_sector_size[256];
25 static int raw_device_sector_bits[256];
27 static ssize_t rw_raw_dev(int rw, struct file *, char *, size_t, loff_t *);
29 ssize_t raw_read(struct file *, char *, size_t, loff_t *);
30 ssize_t raw_write(struct file *, const char *, size_t, loff_t *);
31 int raw_open(struct inode *, struct file *);
32 int raw_release(struct inode *, struct file *);
33 int raw_ctl_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
36 static struct file_operations raw_fops = {
37 read: raw_read,
38 write: raw_write,
39 open: raw_open,
40 release: raw_release,
43 static struct file_operations raw_ctl_fops = {
44 ioctl: raw_ctl_ioctl,
45 open: raw_open,
48 void __init raw_init(void)
50 register_chrdev(RAW_MAJOR, "raw", &raw_fops);
53 /*
54 * Open/close code for raw IO.
57 int raw_open(struct inode *inode, struct file *filp)
59 int minor;
60 struct block_device * bdev;
61 kdev_t rdev; /* it should eventually go away */
62 int err;
63 int sector_size;
64 int sector_bits;
66 minor = MINOR(inode->i_rdev);
68 /*
69 * Is it the control device?
72 if (minor == 0) {
73 filp->f_op = &raw_ctl_fops;
74 return 0;
78 * No, it is a normal raw device. All we need to do on open is
79 * to check that the device is bound, and force the underlying
80 * block device to a sector-size blocksize.
83 bdev = raw_device_bindings[minor];
84 if (!bdev)
85 return -ENODEV;
87 rdev = to_kdev_t(bdev->bd_dev);
88 err = blkdev_get(bdev, filp->f_mode, 0, BDEV_RAW);
89 if (err)
90 return err;
93 * Don't change the blocksize if we already have users using
94 * this device
97 if (raw_device_inuse[minor]++)
98 return 0;
101 * Don't interfere with mounted devices: we cannot safely set
102 * the blocksize on a device which is already mounted.
105 sector_size = 512;
106 if (get_super(rdev) != NULL) {
107 if (blksize_size[MAJOR(rdev)])
108 sector_size = blksize_size[MAJOR(rdev)][MINOR(rdev)];
109 } else {
110 if (hardsect_size[MAJOR(rdev)])
111 sector_size = hardsect_size[MAJOR(rdev)][MINOR(rdev)];
114 set_blocksize(rdev, sector_size);
115 raw_device_sector_size[minor] = sector_size;
117 for (sector_bits = 0; !(sector_size & 1); )
118 sector_size>>=1, sector_bits++;
119 raw_device_sector_bits[minor] = sector_bits;
121 return 0;
124 int raw_release(struct inode *inode, struct file *filp)
126 int minor;
127 struct block_device *bdev;
129 minor = MINOR(inode->i_rdev);
130 lock_kernel();
131 bdev = raw_device_bindings[minor];
132 blkdev_put(bdev, BDEV_RAW);
133 raw_device_inuse[minor]--;
134 unlock_kernel();
135 return 0;
141 * Deal with ioctls against the raw-device control interface, to bind
142 * and unbind other raw devices.
145 int raw_ctl_ioctl(struct inode *inode,
146 struct file *flip,
147 unsigned int command,
148 unsigned long arg)
150 struct raw_config_request rq;
151 int err = 0;
152 int minor;
154 switch (command) {
155 case RAW_SETBIND:
156 case RAW_GETBIND:
158 /* First, find out which raw minor we want */
160 err = copy_from_user(&rq, (void *) arg, sizeof(rq));
161 if (err)
162 break;
164 minor = rq.raw_minor;
165 if (minor == 0 || minor > MINORMASK) {
166 err = -EINVAL;
167 break;
170 if (command == RAW_SETBIND) {
172 * This is like making block devices, so demand the
173 * same capability
175 if (!capable(CAP_SYS_ADMIN)) {
176 err = -EPERM;
177 break;
181 * For now, we don't need to check that the underlying
182 * block device is present or not: we can do that when
183 * the raw device is opened. Just check that the
184 * major/minor numbers make sense.
187 if (rq.block_major == NODEV ||
188 rq.block_major > MAX_BLKDEV ||
189 rq.block_minor > MINORMASK) {
190 err = -EINVAL;
191 break;
194 if (raw_device_inuse[minor]) {
195 err = -EBUSY;
196 break;
198 if (raw_device_bindings[minor])
199 bdput(raw_device_bindings[minor]);
200 raw_device_bindings[minor] =
201 bdget(kdev_t_to_nr(MKDEV(rq.block_major, rq.block_minor)));
202 } else {
203 struct block_device *bdev;
204 kdev_t dev;
206 bdev = raw_device_bindings[minor];
207 if (bdev) {
208 dev = to_kdev_t(bdev->bd_dev);
209 rq.block_major = MAJOR(dev);
210 rq.block_minor = MINOR(dev);
211 } else {
212 rq.block_major = rq.block_minor = 0;
214 err = copy_to_user((void *) arg, &rq, sizeof(rq));
216 break;
218 default:
219 err = -EINVAL;
222 return err;
227 ssize_t raw_read(struct file *filp, char * buf,
228 size_t size, loff_t *offp)
230 return rw_raw_dev(READ, filp, buf, size, offp);
233 ssize_t raw_write(struct file *filp, const char *buf,
234 size_t size, loff_t *offp)
236 return rw_raw_dev(WRITE, filp, (char *) buf, size, offp);
239 #define SECTOR_BITS 9
240 #define SECTOR_SIZE (1U << SECTOR_BITS)
241 #define SECTOR_MASK (SECTOR_SIZE - 1)
243 ssize_t rw_raw_dev(int rw, struct file *filp, char *buf,
244 size_t size, loff_t *offp)
246 struct kiobuf * iobuf;
247 int err;
248 unsigned long blocknr, blocks;
249 unsigned long b[KIO_MAX_SECTORS];
250 size_t transferred;
251 int iosize;
252 int i;
253 int minor;
254 kdev_t dev;
255 unsigned long limit;
257 int sector_size, sector_bits, sector_mask;
258 int max_sectors;
261 * First, a few checks on device size limits
264 minor = MINOR(filp->f_dentry->d_inode->i_rdev);
265 dev = to_kdev_t(raw_device_bindings[minor]->bd_dev);
266 sector_size = raw_device_sector_size[minor];
267 sector_bits = raw_device_sector_bits[minor];
268 sector_mask = sector_size- 1;
269 max_sectors = KIO_MAX_SECTORS >> (sector_bits - 9);
271 if (blk_size[MAJOR(dev)])
272 limit = (((loff_t) blk_size[MAJOR(dev)][MINOR(dev)]) << BLOCK_SIZE_BITS) >> sector_bits;
273 else
274 limit = INT_MAX;
275 dprintk ("rw_raw_dev: dev %d:%d (+%d)\n",
276 MAJOR(dev), MINOR(dev), limit);
278 if ((*offp & sector_mask) || (size & sector_mask))
279 return -EINVAL;
280 if ((*offp >> sector_bits) > limit)
281 return 0;
284 * We'll just use one kiobuf
287 err = alloc_kiovec(1, &iobuf);
288 if (err)
289 return err;
292 * Split the IO into KIO_MAX_SECTORS chunks, mapping and
293 * unmapping the single kiobuf as we go to perform each chunk of
294 * IO.
297 transferred = 0;
298 blocknr = *offp >> sector_bits;
299 while (size > 0) {
300 blocks = size >> sector_bits;
301 if (blocks > max_sectors)
302 blocks = max_sectors;
303 if (blocks > limit - blocknr)
304 blocks = limit - blocknr;
305 if (!blocks)
306 break;
308 iosize = blocks << sector_bits;
310 err = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize);
311 if (err)
312 break;
313 #if 0
314 err = lock_kiovec(1, &iobuf, 1);
315 if (err)
316 break;
317 #endif
319 for (i=0; i < blocks; i++)
320 b[i] = blocknr++;
322 err = brw_kiovec(rw, 1, &iobuf, dev, b, sector_size);
324 if (err >= 0) {
325 transferred += err;
326 size -= err;
327 buf += err;
330 unmap_kiobuf(iobuf); /* The unlock_kiobuf is implicit here */
332 if (err != iosize)
333 break;
336 free_kiovec(1, &iobuf);
338 if (transferred) {
339 *offp += transferred;
340 return transferred;
343 return err;