2 * linux/drivers/char/raw.c
4 * Front-end raw character devices. These can be bound to any block
5 * devices to provide genuine Unix raw character device semantics.
7 * We reserve minor number 0 for a control interface. ioctl()s on this
8 * device are used to bind the other minor numbers to block devices.
11 #include <linux/init.h>
13 #include <linux/devfs_fs_kernel.h>
14 #include <linux/major.h>
15 #include <linux/blkdev.h>
16 #include <linux/module.h>
17 #include <linux/raw.h>
18 #include <linux/capability.h>
19 #include <linux/uio.h>
20 #include <linux/cdev.h>
21 #include <linux/device.h>
22 #include <linux/mutex.h>
24 #include <asm/uaccess.h>
26 struct raw_device_data
{
27 struct block_device
*binding
;
31 static struct class *raw_class
;
32 static struct raw_device_data raw_devices
[MAX_RAW_MINORS
];
33 static DEFINE_MUTEX(raw_mutex
);
34 static struct file_operations raw_ctl_fops
; /* forward declaration */
37 * Open/close code for raw IO.
39 * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
40 * point at the blockdev's address_space and set the file handle to use
43 * Set the device's soft blocksize to the minimum possible. This gives the
44 * finest possible alignment and has no adverse impact on performance.
46 static int raw_open(struct inode
*inode
, struct file
*filp
)
48 const int minor
= iminor(inode
);
49 struct block_device
*bdev
;
52 if (minor
== 0) { /* It is the control device */
53 filp
->f_op
= &raw_ctl_fops
;
57 mutex_lock(&raw_mutex
);
60 * All we need to do on open is check that the device is bound.
62 bdev
= raw_devices
[minor
].binding
;
66 igrab(bdev
->bd_inode
);
67 err
= blkdev_get(bdev
, filp
->f_mode
, 0);
70 err
= bd_claim(bdev
, raw_open
);
73 err
= set_blocksize(bdev
, bdev_hardsect_size(bdev
));
76 filp
->f_flags
|= O_DIRECT
;
77 filp
->f_mapping
= bdev
->bd_inode
->i_mapping
;
78 if (++raw_devices
[minor
].inuse
== 1)
79 filp
->f_dentry
->d_inode
->i_mapping
=
80 bdev
->bd_inode
->i_mapping
;
81 filp
->private_data
= bdev
;
82 mutex_unlock(&raw_mutex
);
90 mutex_unlock(&raw_mutex
);
95 * When the final fd which refers to this character-special node is closed, we
96 * make its ->mapping point back at its own i_data.
98 static int raw_release(struct inode
*inode
, struct file
*filp
)
100 const int minor
= iminor(inode
);
101 struct block_device
*bdev
;
103 mutex_lock(&raw_mutex
);
104 bdev
= raw_devices
[minor
].binding
;
105 if (--raw_devices
[minor
].inuse
== 0) {
106 /* Here inode->i_mapping == bdev->bd_inode->i_mapping */
107 inode
->i_mapping
= &inode
->i_data
;
108 inode
->i_mapping
->backing_dev_info
= &default_backing_dev_info
;
110 mutex_unlock(&raw_mutex
);
118 * Forward ioctls to the underlying block device.
121 raw_ioctl(struct inode
*inode
, struct file
*filp
,
122 unsigned int command
, unsigned long arg
)
124 struct block_device
*bdev
= filp
->private_data
;
126 return blkdev_ioctl(bdev
->bd_inode
, NULL
, command
, arg
);
129 static void bind_device(struct raw_config_request
*rq
)
131 class_device_destroy(raw_class
, MKDEV(RAW_MAJOR
, rq
->raw_minor
));
132 class_device_create(raw_class
, NULL
, MKDEV(RAW_MAJOR
, rq
->raw_minor
),
133 NULL
, "raw%d", rq
->raw_minor
);
137 * Deal with ioctls against the raw-device control interface, to bind
138 * and unbind other raw devices.
140 static int raw_ctl_ioctl(struct inode
*inode
, struct file
*filp
,
141 unsigned int command
, unsigned long arg
)
143 struct raw_config_request rq
;
144 struct raw_device_data
*rawdev
;
151 /* First, find out which raw minor we want */
153 if (copy_from_user(&rq
, (void __user
*) arg
, sizeof(rq
))) {
158 if (rq
.raw_minor
< 0 || rq
.raw_minor
>= MAX_RAW_MINORS
) {
162 rawdev
= &raw_devices
[rq
.raw_minor
];
164 if (command
== RAW_SETBIND
) {
168 * This is like making block devices, so demand the
171 if (!capable(CAP_SYS_ADMIN
)) {
177 * For now, we don't need to check that the underlying
178 * block device is present or not: we can do that when
179 * the raw device is opened. Just check that the
180 * major/minor numbers make sense.
183 dev
= MKDEV(rq
.block_major
, rq
.block_minor
);
184 if ((rq
.block_major
== 0 && rq
.block_minor
!= 0) ||
185 MAJOR(dev
) != rq
.block_major
||
186 MINOR(dev
) != rq
.block_minor
) {
191 mutex_lock(&raw_mutex
);
193 mutex_unlock(&raw_mutex
);
197 if (rawdev
->binding
) {
198 bdput(rawdev
->binding
);
199 module_put(THIS_MODULE
);
201 if (rq
.block_major
== 0 && rq
.block_minor
== 0) {
203 rawdev
->binding
= NULL
;
204 class_device_destroy(raw_class
,
205 MKDEV(RAW_MAJOR
, rq
.raw_minor
));
207 rawdev
->binding
= bdget(dev
);
208 if (rawdev
->binding
== NULL
)
211 __module_get(THIS_MODULE
);
215 mutex_unlock(&raw_mutex
);
217 struct block_device
*bdev
;
219 mutex_lock(&raw_mutex
);
220 bdev
= rawdev
->binding
;
222 rq
.block_major
= MAJOR(bdev
->bd_dev
);
223 rq
.block_minor
= MINOR(bdev
->bd_dev
);
225 rq
.block_major
= rq
.block_minor
= 0;
227 mutex_unlock(&raw_mutex
);
228 if (copy_to_user((void __user
*)arg
, &rq
, sizeof(rq
))) {
242 static ssize_t
raw_file_write(struct file
*file
, const char __user
*buf
,
243 size_t count
, loff_t
*ppos
)
245 struct iovec local_iov
= {
246 .iov_base
= (char __user
*)buf
,
250 return generic_file_write_nolock(file
, &local_iov
, 1, ppos
);
253 static ssize_t
raw_file_aio_write(struct kiocb
*iocb
, const char __user
*buf
,
254 size_t count
, loff_t pos
)
256 struct iovec local_iov
= {
257 .iov_base
= (char __user
*)buf
,
261 return generic_file_aio_write_nolock(iocb
, &local_iov
, 1, &iocb
->ki_pos
);
265 static struct file_operations raw_fops
= {
266 .read
= generic_file_read
,
267 .aio_read
= generic_file_aio_read
,
268 .write
= raw_file_write
,
269 .aio_write
= raw_file_aio_write
,
271 .release
= raw_release
,
273 .readv
= generic_file_readv
,
274 .writev
= generic_file_writev
,
275 .owner
= THIS_MODULE
,
278 static struct file_operations raw_ctl_fops
= {
279 .ioctl
= raw_ctl_ioctl
,
281 .owner
= THIS_MODULE
,
284 static struct cdev raw_cdev
= {
285 .kobj
= {.name
= "raw", },
286 .owner
= THIS_MODULE
,
289 static int __init
raw_init(void)
292 dev_t dev
= MKDEV(RAW_MAJOR
, 0);
294 if (register_chrdev_region(dev
, MAX_RAW_MINORS
, "raw"))
297 cdev_init(&raw_cdev
, &raw_fops
);
298 if (cdev_add(&raw_cdev
, dev
, MAX_RAW_MINORS
)) {
299 kobject_put(&raw_cdev
.kobj
);
300 unregister_chrdev_region(dev
, MAX_RAW_MINORS
);
304 raw_class
= class_create(THIS_MODULE
, "raw");
305 if (IS_ERR(raw_class
)) {
306 printk(KERN_ERR
"Error creating raw class.\n");
308 unregister_chrdev_region(dev
, MAX_RAW_MINORS
);
311 class_device_create(raw_class
, NULL
, MKDEV(RAW_MAJOR
, 0), NULL
, "rawctl");
313 devfs_mk_cdev(MKDEV(RAW_MAJOR
, 0),
314 S_IFCHR
| S_IRUGO
| S_IWUGO
,
316 for (i
= 1; i
< MAX_RAW_MINORS
; i
++)
317 devfs_mk_cdev(MKDEV(RAW_MAJOR
, i
),
318 S_IFCHR
| S_IRUGO
| S_IWUGO
,
323 printk(KERN_ERR
"error register raw device\n");
327 static void __exit
raw_exit(void)
331 for (i
= 1; i
< MAX_RAW_MINORS
; i
++)
332 devfs_remove("raw/raw%d", i
);
333 devfs_remove("raw/rawctl");
335 class_device_destroy(raw_class
, MKDEV(RAW_MAJOR
, 0));
336 class_destroy(raw_class
);
338 unregister_chrdev_region(MKDEV(RAW_MAJOR
, 0), MAX_RAW_MINORS
);
341 module_init(raw_init
);
342 module_exit(raw_exit
);
343 MODULE_LICENSE("GPL");