ila: allow configuration of identifier type
[linux-2.6/btrfs-unstable.git] / fs / char_dev.c
bloba65e4a56318ca79d17c91e36c69c6aa3371728e4
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/char_dev.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/kdev_t.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
14 #include <linux/major.h>
15 #include <linux/errno.h>
16 #include <linux/module.h>
17 #include <linux/seq_file.h>
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
23 #include <linux/backing-dev.h>
24 #include <linux/tty.h>
26 #include "internal.h"
28 static struct kobj_map *cdev_map;
30 static DEFINE_MUTEX(chrdevs_lock);
32 #define CHRDEV_MAJOR_HASH_SIZE 255
34 static struct char_device_struct {
35 struct char_device_struct *next;
36 unsigned int major;
37 unsigned int baseminor;
38 int minorct;
39 char name[64];
40 struct cdev *cdev; /* will die */
41 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
43 /* index in the above */
44 static inline int major_to_index(unsigned major)
46 return major % CHRDEV_MAJOR_HASH_SIZE;
49 #ifdef CONFIG_PROC_FS
51 void chrdev_show(struct seq_file *f, off_t offset)
53 struct char_device_struct *cd;
55 mutex_lock(&chrdevs_lock);
56 for (cd = chrdevs[major_to_index(offset)]; cd; cd = cd->next) {
57 if (cd->major == offset)
58 seq_printf(f, "%3d %s\n", cd->major, cd->name);
60 mutex_unlock(&chrdevs_lock);
63 #endif /* CONFIG_PROC_FS */
65 static int find_dynamic_major(void)
67 int i;
68 struct char_device_struct *cd;
70 for (i = ARRAY_SIZE(chrdevs)-1; i > CHRDEV_MAJOR_DYN_END; i--) {
71 if (chrdevs[i] == NULL)
72 return i;
75 for (i = CHRDEV_MAJOR_DYN_EXT_START;
76 i > CHRDEV_MAJOR_DYN_EXT_END; i--) {
77 for (cd = chrdevs[major_to_index(i)]; cd; cd = cd->next)
78 if (cd->major == i)
79 break;
81 if (cd == NULL || cd->major != i)
82 return i;
85 return -EBUSY;
89 * Register a single major with a specified minor range.
91 * If major == 0 this functions will dynamically allocate a major and return
92 * its number.
94 * If major > 0 this function will attempt to reserve the passed range of
95 * minors and will return zero on success.
97 * Returns a -ve errno on failure.
99 static struct char_device_struct *
100 __register_chrdev_region(unsigned int major, unsigned int baseminor,
101 int minorct, const char *name)
103 struct char_device_struct *cd, **cp;
104 int ret = 0;
105 int i;
107 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
108 if (cd == NULL)
109 return ERR_PTR(-ENOMEM);
111 mutex_lock(&chrdevs_lock);
113 if (major == 0) {
114 ret = find_dynamic_major();
115 if (ret < 0) {
116 pr_err("CHRDEV \"%s\" dynamic allocation region is full\n",
117 name);
118 goto out;
120 major = ret;
123 if (major >= CHRDEV_MAJOR_MAX) {
124 pr_err("CHRDEV \"%s\" major requested (%d) is greater than the maximum (%d)\n",
125 name, major, CHRDEV_MAJOR_MAX);
126 ret = -EINVAL;
127 goto out;
130 cd->major = major;
131 cd->baseminor = baseminor;
132 cd->minorct = minorct;
133 strlcpy(cd->name, name, sizeof(cd->name));
135 i = major_to_index(major);
137 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
138 if ((*cp)->major > major ||
139 ((*cp)->major == major &&
140 (((*cp)->baseminor >= baseminor) ||
141 ((*cp)->baseminor + (*cp)->minorct > baseminor))))
142 break;
144 /* Check for overlapping minor ranges. */
145 if (*cp && (*cp)->major == major) {
146 int old_min = (*cp)->baseminor;
147 int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
148 int new_min = baseminor;
149 int new_max = baseminor + minorct - 1;
151 /* New driver overlaps from the left. */
152 if (new_max >= old_min && new_max <= old_max) {
153 ret = -EBUSY;
154 goto out;
157 /* New driver overlaps from the right. */
158 if (new_min <= old_max && new_min >= old_min) {
159 ret = -EBUSY;
160 goto out;
164 cd->next = *cp;
165 *cp = cd;
166 mutex_unlock(&chrdevs_lock);
167 return cd;
168 out:
169 mutex_unlock(&chrdevs_lock);
170 kfree(cd);
171 return ERR_PTR(ret);
174 static struct char_device_struct *
175 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
177 struct char_device_struct *cd = NULL, **cp;
178 int i = major_to_index(major);
180 mutex_lock(&chrdevs_lock);
181 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
182 if ((*cp)->major == major &&
183 (*cp)->baseminor == baseminor &&
184 (*cp)->minorct == minorct)
185 break;
186 if (*cp) {
187 cd = *cp;
188 *cp = cd->next;
190 mutex_unlock(&chrdevs_lock);
191 return cd;
195 * register_chrdev_region() - register a range of device numbers
196 * @from: the first in the desired range of device numbers; must include
197 * the major number.
198 * @count: the number of consecutive device numbers required
199 * @name: the name of the device or driver.
201 * Return value is zero on success, a negative error code on failure.
203 int register_chrdev_region(dev_t from, unsigned count, const char *name)
205 struct char_device_struct *cd;
206 dev_t to = from + count;
207 dev_t n, next;
209 for (n = from; n < to; n = next) {
210 next = MKDEV(MAJOR(n)+1, 0);
211 if (next > to)
212 next = to;
213 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
214 next - n, name);
215 if (IS_ERR(cd))
216 goto fail;
218 return 0;
219 fail:
220 to = n;
221 for (n = from; n < to; n = next) {
222 next = MKDEV(MAJOR(n)+1, 0);
223 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
225 return PTR_ERR(cd);
229 * alloc_chrdev_region() - register a range of char device numbers
230 * @dev: output parameter for first assigned number
231 * @baseminor: first of the requested range of minor numbers
232 * @count: the number of minor numbers required
233 * @name: the name of the associated device or driver
235 * Allocates a range of char device numbers. The major number will be
236 * chosen dynamically, and returned (along with the first minor number)
237 * in @dev. Returns zero or a negative error code.
239 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
240 const char *name)
242 struct char_device_struct *cd;
243 cd = __register_chrdev_region(0, baseminor, count, name);
244 if (IS_ERR(cd))
245 return PTR_ERR(cd);
246 *dev = MKDEV(cd->major, cd->baseminor);
247 return 0;
251 * __register_chrdev() - create and register a cdev occupying a range of minors
252 * @major: major device number or 0 for dynamic allocation
253 * @baseminor: first of the requested range of minor numbers
254 * @count: the number of minor numbers required
255 * @name: name of this range of devices
256 * @fops: file operations associated with this devices
258 * If @major == 0 this functions will dynamically allocate a major and return
259 * its number.
261 * If @major > 0 this function will attempt to reserve a device with the given
262 * major number and will return zero on success.
264 * Returns a -ve errno on failure.
266 * The name of this device has nothing to do with the name of the device in
267 * /dev. It only helps to keep track of the different owners of devices. If
268 * your module name has only one type of devices it's ok to use e.g. the name
269 * of the module here.
271 int __register_chrdev(unsigned int major, unsigned int baseminor,
272 unsigned int count, const char *name,
273 const struct file_operations *fops)
275 struct char_device_struct *cd;
276 struct cdev *cdev;
277 int err = -ENOMEM;
279 cd = __register_chrdev_region(major, baseminor, count, name);
280 if (IS_ERR(cd))
281 return PTR_ERR(cd);
283 cdev = cdev_alloc();
284 if (!cdev)
285 goto out2;
287 cdev->owner = fops->owner;
288 cdev->ops = fops;
289 kobject_set_name(&cdev->kobj, "%s", name);
291 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
292 if (err)
293 goto out;
295 cd->cdev = cdev;
297 return major ? 0 : cd->major;
298 out:
299 kobject_put(&cdev->kobj);
300 out2:
301 kfree(__unregister_chrdev_region(cd->major, baseminor, count));
302 return err;
306 * unregister_chrdev_region() - unregister a range of device numbers
307 * @from: the first in the range of numbers to unregister
308 * @count: the number of device numbers to unregister
310 * This function will unregister a range of @count device numbers,
311 * starting with @from. The caller should normally be the one who
312 * allocated those numbers in the first place...
314 void unregister_chrdev_region(dev_t from, unsigned count)
316 dev_t to = from + count;
317 dev_t n, next;
319 for (n = from; n < to; n = next) {
320 next = MKDEV(MAJOR(n)+1, 0);
321 if (next > to)
322 next = to;
323 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
328 * __unregister_chrdev - unregister and destroy a cdev
329 * @major: major device number
330 * @baseminor: first of the range of minor numbers
331 * @count: the number of minor numbers this cdev is occupying
332 * @name: name of this range of devices
334 * Unregister and destroy the cdev occupying the region described by
335 * @major, @baseminor and @count. This function undoes what
336 * __register_chrdev() did.
338 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
339 unsigned int count, const char *name)
341 struct char_device_struct *cd;
343 cd = __unregister_chrdev_region(major, baseminor, count);
344 if (cd && cd->cdev)
345 cdev_del(cd->cdev);
346 kfree(cd);
349 static DEFINE_SPINLOCK(cdev_lock);
351 static struct kobject *cdev_get(struct cdev *p)
353 struct module *owner = p->owner;
354 struct kobject *kobj;
356 if (owner && !try_module_get(owner))
357 return NULL;
358 kobj = kobject_get(&p->kobj);
359 if (!kobj)
360 module_put(owner);
361 return kobj;
364 void cdev_put(struct cdev *p)
366 if (p) {
367 struct module *owner = p->owner;
368 kobject_put(&p->kobj);
369 module_put(owner);
374 * Called every time a character special file is opened
376 static int chrdev_open(struct inode *inode, struct file *filp)
378 const struct file_operations *fops;
379 struct cdev *p;
380 struct cdev *new = NULL;
381 int ret = 0;
383 spin_lock(&cdev_lock);
384 p = inode->i_cdev;
385 if (!p) {
386 struct kobject *kobj;
387 int idx;
388 spin_unlock(&cdev_lock);
389 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
390 if (!kobj)
391 return -ENXIO;
392 new = container_of(kobj, struct cdev, kobj);
393 spin_lock(&cdev_lock);
394 /* Check i_cdev again in case somebody beat us to it while
395 we dropped the lock. */
396 p = inode->i_cdev;
397 if (!p) {
398 inode->i_cdev = p = new;
399 list_add(&inode->i_devices, &p->list);
400 new = NULL;
401 } else if (!cdev_get(p))
402 ret = -ENXIO;
403 } else if (!cdev_get(p))
404 ret = -ENXIO;
405 spin_unlock(&cdev_lock);
406 cdev_put(new);
407 if (ret)
408 return ret;
410 ret = -ENXIO;
411 fops = fops_get(p->ops);
412 if (!fops)
413 goto out_cdev_put;
415 replace_fops(filp, fops);
416 if (filp->f_op->open) {
417 ret = filp->f_op->open(inode, filp);
418 if (ret)
419 goto out_cdev_put;
422 return 0;
424 out_cdev_put:
425 cdev_put(p);
426 return ret;
429 void cd_forget(struct inode *inode)
431 spin_lock(&cdev_lock);
432 list_del_init(&inode->i_devices);
433 inode->i_cdev = NULL;
434 inode->i_mapping = &inode->i_data;
435 spin_unlock(&cdev_lock);
438 static void cdev_purge(struct cdev *cdev)
440 spin_lock(&cdev_lock);
441 while (!list_empty(&cdev->list)) {
442 struct inode *inode;
443 inode = container_of(cdev->list.next, struct inode, i_devices);
444 list_del_init(&inode->i_devices);
445 inode->i_cdev = NULL;
447 spin_unlock(&cdev_lock);
451 * Dummy default file-operations: the only thing this does
452 * is contain the open that then fills in the correct operations
453 * depending on the special file...
455 const struct file_operations def_chr_fops = {
456 .open = chrdev_open,
457 .llseek = noop_llseek,
460 static struct kobject *exact_match(dev_t dev, int *part, void *data)
462 struct cdev *p = data;
463 return &p->kobj;
466 static int exact_lock(dev_t dev, void *data)
468 struct cdev *p = data;
469 return cdev_get(p) ? 0 : -1;
473 * cdev_add() - add a char device to the system
474 * @p: the cdev structure for the device
475 * @dev: the first device number for which this device is responsible
476 * @count: the number of consecutive minor numbers corresponding to this
477 * device
479 * cdev_add() adds the device represented by @p to the system, making it
480 * live immediately. A negative error code is returned on failure.
482 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
484 int error;
486 p->dev = dev;
487 p->count = count;
489 error = kobj_map(cdev_map, dev, count, NULL,
490 exact_match, exact_lock, p);
491 if (error)
492 return error;
494 kobject_get(p->kobj.parent);
496 return 0;
500 * cdev_set_parent() - set the parent kobject for a char device
501 * @p: the cdev structure
502 * @kobj: the kobject to take a reference to
504 * cdev_set_parent() sets a parent kobject which will be referenced
505 * appropriately so the parent is not freed before the cdev. This
506 * should be called before cdev_add.
508 void cdev_set_parent(struct cdev *p, struct kobject *kobj)
510 WARN_ON(!kobj->state_initialized);
511 p->kobj.parent = kobj;
515 * cdev_device_add() - add a char device and it's corresponding
516 * struct device, linkink
517 * @dev: the device structure
518 * @cdev: the cdev structure
520 * cdev_device_add() adds the char device represented by @cdev to the system,
521 * just as cdev_add does. It then adds @dev to the system using device_add
522 * The dev_t for the char device will be taken from the struct device which
523 * needs to be initialized first. This helper function correctly takes a
524 * reference to the parent device so the parent will not get released until
525 * all references to the cdev are released.
527 * This helper uses dev->devt for the device number. If it is not set
528 * it will not add the cdev and it will be equivalent to device_add.
530 * This function should be used whenever the struct cdev and the
531 * struct device are members of the same structure whose lifetime is
532 * managed by the struct device.
534 * NOTE: Callers must assume that userspace was able to open the cdev and
535 * can call cdev fops callbacks at any time, even if this function fails.
537 int cdev_device_add(struct cdev *cdev, struct device *dev)
539 int rc = 0;
541 if (dev->devt) {
542 cdev_set_parent(cdev, &dev->kobj);
544 rc = cdev_add(cdev, dev->devt, 1);
545 if (rc)
546 return rc;
549 rc = device_add(dev);
550 if (rc)
551 cdev_del(cdev);
553 return rc;
557 * cdev_device_del() - inverse of cdev_device_add
558 * @dev: the device structure
559 * @cdev: the cdev structure
561 * cdev_device_del() is a helper function to call cdev_del and device_del.
562 * It should be used whenever cdev_device_add is used.
564 * If dev->devt is not set it will not remove the cdev and will be equivalent
565 * to device_del.
567 * NOTE: This guarantees that associated sysfs callbacks are not running
568 * or runnable, however any cdevs already open will remain and their fops
569 * will still be callable even after this function returns.
571 void cdev_device_del(struct cdev *cdev, struct device *dev)
573 device_del(dev);
574 if (dev->devt)
575 cdev_del(cdev);
578 static void cdev_unmap(dev_t dev, unsigned count)
580 kobj_unmap(cdev_map, dev, count);
584 * cdev_del() - remove a cdev from the system
585 * @p: the cdev structure to be removed
587 * cdev_del() removes @p from the system, possibly freeing the structure
588 * itself.
590 * NOTE: This guarantees that cdev device will no longer be able to be
591 * opened, however any cdevs already open will remain and their fops will
592 * still be callable even after cdev_del returns.
594 void cdev_del(struct cdev *p)
596 cdev_unmap(p->dev, p->count);
597 kobject_put(&p->kobj);
601 static void cdev_default_release(struct kobject *kobj)
603 struct cdev *p = container_of(kobj, struct cdev, kobj);
604 struct kobject *parent = kobj->parent;
606 cdev_purge(p);
607 kobject_put(parent);
610 static void cdev_dynamic_release(struct kobject *kobj)
612 struct cdev *p = container_of(kobj, struct cdev, kobj);
613 struct kobject *parent = kobj->parent;
615 cdev_purge(p);
616 kfree(p);
617 kobject_put(parent);
620 static struct kobj_type ktype_cdev_default = {
621 .release = cdev_default_release,
624 static struct kobj_type ktype_cdev_dynamic = {
625 .release = cdev_dynamic_release,
629 * cdev_alloc() - allocate a cdev structure
631 * Allocates and returns a cdev structure, or NULL on failure.
633 struct cdev *cdev_alloc(void)
635 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
636 if (p) {
637 INIT_LIST_HEAD(&p->list);
638 kobject_init(&p->kobj, &ktype_cdev_dynamic);
640 return p;
644 * cdev_init() - initialize a cdev structure
645 * @cdev: the structure to initialize
646 * @fops: the file_operations for this device
648 * Initializes @cdev, remembering @fops, making it ready to add to the
649 * system with cdev_add().
651 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
653 memset(cdev, 0, sizeof *cdev);
654 INIT_LIST_HEAD(&cdev->list);
655 kobject_init(&cdev->kobj, &ktype_cdev_default);
656 cdev->ops = fops;
659 static struct kobject *base_probe(dev_t dev, int *part, void *data)
661 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
662 /* Make old-style 2.4 aliases work */
663 request_module("char-major-%d", MAJOR(dev));
664 return NULL;
667 void __init chrdev_init(void)
669 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
673 /* Let modules do char dev stuff */
674 EXPORT_SYMBOL(register_chrdev_region);
675 EXPORT_SYMBOL(unregister_chrdev_region);
676 EXPORT_SYMBOL(alloc_chrdev_region);
677 EXPORT_SYMBOL(cdev_init);
678 EXPORT_SYMBOL(cdev_alloc);
679 EXPORT_SYMBOL(cdev_del);
680 EXPORT_SYMBOL(cdev_add);
681 EXPORT_SYMBOL(cdev_set_parent);
682 EXPORT_SYMBOL(cdev_device_add);
683 EXPORT_SYMBOL(cdev_device_del);
684 EXPORT_SYMBOL(__register_chrdev);
685 EXPORT_SYMBOL(__unregister_chrdev);