Portability cleanup as required by Linus.
[linux-2.6/linux-mips.git] / drivers / char / raw.c
bloba3159c8367a4cfcddfa13b28763916cf09deb45e
1 /*
2 * linux/drivers/char/raw.c
4 * Front-end raw character devices. These can be bound to any block
5 * devices to provide genuine Unix raw character device semantics.
7 * We reserve minor number 0 for a control interface. ioctl()s on this
8 * device are used to bind the other minor numbers to block devices.
9 */
11 #include <linux/fs.h>
12 #include <linux/iobuf.h>
13 #include <linux/major.h>
14 #include <linux/blkdev.h>
15 #include <linux/raw.h>
16 #include <linux/capability.h>
17 #include <asm/uaccess.h>
19 #define dprintk(x...)
21 static struct block_device *raw_device_bindings[256] = {};
22 static int raw_device_inuse[256] = {};
23 static int raw_device_sector_size[256] = {};
24 static int raw_device_sector_bits[256] = {};
26 static ssize_t rw_raw_dev(int rw, struct file *, char *, size_t, loff_t *);
28 ssize_t raw_read(struct file *, char *, size_t, loff_t *);
29 ssize_t raw_write(struct file *, const char *, size_t, loff_t *);
30 int raw_open(struct inode *, struct file *);
31 int raw_release(struct inode *, struct file *);
32 int raw_ctl_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
35 static struct file_operations raw_fops = {
36 read: raw_read,
37 write: raw_write,
38 open: raw_open,
39 release: raw_release,
42 static struct file_operations raw_ctl_fops = {
43 ioctl: raw_ctl_ioctl,
44 open: raw_open,
47 void __init raw_init(void)
49 register_chrdev(RAW_MAJOR, "raw", &raw_fops);
52 /*
53 * Open/close code for raw IO.
56 int raw_open(struct inode *inode, struct file *filp)
58 int minor;
59 struct block_device * bdev;
60 kdev_t rdev; /* it should eventually go away */
61 int err;
62 int sector_size;
63 int sector_bits;
65 minor = MINOR(inode->i_rdev);
67 /*
68 * Is it the control device?
71 if (minor == 0) {
72 filp->f_op = &raw_ctl_fops;
73 return 0;
77 * No, it is a normal raw device. All we need to do on open is
78 * to check that the device is bound, and force the underlying
79 * block device to a sector-size blocksize.
82 bdev = raw_device_bindings[minor];
83 if (!bdev)
84 return -ENODEV;
86 rdev = to_kdev_t(bdev->bd_dev);
87 err = blkdev_get(bdev, filp->f_mode, 0, BDEV_RAW);
88 if (err)
89 return err;
92 * Don't change the blocksize if we already have users using
93 * this device
96 if (raw_device_inuse[minor]++)
97 return 0;
99 /*
100 * Don't interfere with mounted devices: we cannot safely set
101 * the blocksize on a device which is already mounted.
104 sector_size = 512;
105 if (get_super(rdev) != NULL) {
106 if (blksize_size[MAJOR(rdev)])
107 sector_size = blksize_size[MAJOR(rdev)][MINOR(rdev)];
108 } else {
109 if (hardsect_size[MAJOR(rdev)])
110 sector_size = hardsect_size[MAJOR(rdev)][MINOR(rdev)];
113 set_blocksize(rdev, sector_size);
114 raw_device_sector_size[minor] = sector_size;
116 for (sector_bits = 0; !(sector_size & 1); )
117 sector_size>>=1, sector_bits++;
118 raw_device_sector_bits[minor] = sector_bits;
120 return 0;
123 int raw_release(struct inode *inode, struct file *filp)
125 int minor;
126 struct block_device *bdev;
128 minor = MINOR(inode->i_rdev);
129 bdev = raw_device_bindings[minor];
130 blkdev_put(bdev, BDEV_RAW);
131 raw_device_inuse[minor]--;
132 return 0;
138 * Deal with ioctls against the raw-device control interface, to bind
139 * and unbind other raw devices.
142 int raw_ctl_ioctl(struct inode *inode,
143 struct file *flip,
144 unsigned int command,
145 unsigned long arg)
147 struct raw_config_request rq;
148 int err = 0;
149 int minor;
151 switch (command) {
152 case RAW_SETBIND:
153 case RAW_GETBIND:
155 /* First, find out which raw minor we want */
157 err = copy_from_user(&rq, (void *) arg, sizeof(rq));
158 if (err)
159 break;
161 minor = rq.raw_minor;
162 if (minor == 0 || minor > MINORMASK) {
163 err = -EINVAL;
164 break;
167 if (command == RAW_SETBIND) {
169 * This is like making block devices, so demand the
170 * same capability
172 if (!capable(CAP_SYS_ADMIN)) {
173 err = -EPERM;
174 break;
178 * For now, we don't need to check that the underlying
179 * block device is present or not: we can do that when
180 * the raw device is opened. Just check that the
181 * major/minor numbers make sense.
184 if (rq.block_major == NODEV ||
185 rq.block_major > MAX_BLKDEV ||
186 rq.block_minor > MINORMASK) {
187 err = -EINVAL;
188 break;
191 if (raw_device_inuse[minor]) {
192 err = -EBUSY;
193 break;
195 if (raw_device_bindings[minor])
196 bdput(raw_device_bindings[minor]);
197 raw_device_bindings[minor] =
198 bdget(kdev_t_to_nr(MKDEV(rq.block_major, rq.block_minor)));
199 } else {
200 struct block_device *bdev;
201 kdev_t dev;
203 bdev = raw_device_bindings[minor];
204 if (bdev) {
205 dev = to_kdev_t(bdev->bd_dev);
206 rq.block_major = MAJOR(dev);
207 rq.block_minor = MINOR(dev);
208 } else {
209 rq.block_major = rq.block_minor = 0;
211 err = copy_to_user((void *) arg, &rq, sizeof(rq));
213 break;
215 default:
216 err = -EINVAL;
219 return err;
224 ssize_t raw_read(struct file *filp, char * buf,
225 size_t size, loff_t *offp)
227 return rw_raw_dev(READ, filp, buf, size, offp);
230 ssize_t raw_write(struct file *filp, const char *buf,
231 size_t size, loff_t *offp)
233 return rw_raw_dev(WRITE, filp, (char *) buf, size, offp);
236 #define SECTOR_BITS 9
237 #define SECTOR_SIZE (1U << SECTOR_BITS)
238 #define SECTOR_MASK (SECTOR_SIZE - 1)
240 ssize_t rw_raw_dev(int rw, struct file *filp, char *buf,
241 size_t size, loff_t *offp)
243 struct kiobuf * iobuf;
244 int err;
245 unsigned long blocknr, blocks;
246 unsigned long b[KIO_MAX_SECTORS];
247 size_t transferred;
248 int iosize;
249 int i;
250 int minor;
251 kdev_t dev;
252 unsigned long limit;
254 int sector_size, sector_bits, sector_mask;
255 int max_sectors;
258 * First, a few checks on device size limits
261 minor = MINOR(filp->f_dentry->d_inode->i_rdev);
262 dev = to_kdev_t(raw_device_bindings[minor]->bd_dev);
263 sector_size = raw_device_sector_size[minor];
264 sector_bits = raw_device_sector_bits[minor];
265 sector_mask = sector_size- 1;
266 max_sectors = KIO_MAX_SECTORS >> (sector_bits - 9);
268 if (blk_size[MAJOR(dev)])
269 limit = (((loff_t) blk_size[MAJOR(dev)][MINOR(dev)]) << BLOCK_SIZE_BITS) >> sector_bits;
270 else
271 limit = INT_MAX;
272 dprintk ("rw_raw_dev: dev %d:%d (+%d)\n",
273 MAJOR(dev), MINOR(dev), limit);
275 if ((*offp & sector_mask) || (size & sector_mask))
276 return -EINVAL;
277 if ((*offp >> sector_bits) > limit)
278 return 0;
281 * We'll just use one kiobuf
284 err = alloc_kiovec(1, &iobuf);
285 if (err)
286 return err;
289 * Split the IO into KIO_MAX_SECTORS chunks, mapping and
290 * unmapping the single kiobuf as we go to perform each chunk of
291 * IO.
294 transferred = 0;
295 blocknr = *offp >> sector_bits;
296 while (size > 0) {
297 blocks = size >> sector_bits;
298 if (blocks > max_sectors)
299 blocks = max_sectors;
300 if (blocks > limit - blocknr)
301 blocks = limit - blocknr;
302 if (!blocks)
303 break;
305 iosize = blocks << sector_bits;
307 err = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize);
308 if (err)
309 break;
310 #if 0
311 err = lock_kiovec(1, &iobuf, 1);
312 if (err)
313 break;
314 #endif
316 for (i=0; i < blocks; i++)
317 b[i] = blocknr++;
319 err = brw_kiovec(rw, 1, &iobuf, dev, b, sector_size);
321 if (err >= 0) {
322 transferred += err;
323 size -= err;
324 buf += err;
327 unmap_kiobuf(iobuf); /* The unlock_kiobuf is implicit here */
329 if (err != iosize)
330 break;
333 free_kiovec(1, &iobuf);
335 if (transferred) {
336 *offp += transferred;
337 return transferred;
340 return err;