4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/string.h>
12 #include <linux/major.h>
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <linux/smp_lock.h>
16 #include <linux/seq_file.h>
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
22 #include <linux/backing-dev.h>
25 #include <linux/kmod.h>
29 * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
31 * - permits shared-mmap for read, write and/or exec
32 * - does not permit private mmap in NOMMU mode (can't do COW)
33 * - no readahead or I/O queue unplugging required
35 struct backing_dev_info directly_mappable_cdev_bdi
= {
38 /* permit private copies of the data to be taken */
41 /* permit direct mmap, for read, write or exec */
43 BDI_CAP_READ_MAP
| BDI_CAP_WRITE_MAP
| BDI_CAP_EXEC_MAP
),
46 static struct kobj_map
*cdev_map
;
48 static DEFINE_MUTEX(chrdevs_lock
);
50 static struct char_device_struct
{
51 struct char_device_struct
*next
;
53 unsigned int baseminor
;
56 struct file_operations
*fops
;
57 struct cdev
*cdev
; /* will die */
58 } *chrdevs
[CHRDEV_MAJOR_HASH_SIZE
];
60 /* index in the above */
61 static inline int major_to_index(int major
)
63 return major
% CHRDEV_MAJOR_HASH_SIZE
;
68 void chrdev_show(struct seq_file
*f
, off_t offset
)
70 struct char_device_struct
*cd
;
72 if (offset
< CHRDEV_MAJOR_HASH_SIZE
) {
73 mutex_lock(&chrdevs_lock
);
74 for (cd
= chrdevs
[offset
]; cd
; cd
= cd
->next
)
75 seq_printf(f
, "%3d %s\n", cd
->major
, cd
->name
);
76 mutex_unlock(&chrdevs_lock
);
80 #endif /* CONFIG_PROC_FS */
83 * Register a single major with a specified minor range.
85 * If major == 0 this functions will dynamically allocate a major and return
88 * If major > 0 this function will attempt to reserve the passed range of
89 * minors and will return zero on success.
91 * Returns a -ve errno on failure.
93 static struct char_device_struct
*
94 __register_chrdev_region(unsigned int major
, unsigned int baseminor
,
95 int minorct
, const char *name
)
97 struct char_device_struct
*cd
, **cp
;
101 cd
= kzalloc(sizeof(struct char_device_struct
), GFP_KERNEL
);
103 return ERR_PTR(-ENOMEM
);
105 mutex_lock(&chrdevs_lock
);
109 for (i
= ARRAY_SIZE(chrdevs
)-1; i
> 0; i
--) {
110 if (chrdevs
[i
] == NULL
)
123 cd
->baseminor
= baseminor
;
124 cd
->minorct
= minorct
;
125 strncpy(cd
->name
,name
, 64);
127 i
= major_to_index(major
);
129 for (cp
= &chrdevs
[i
]; *cp
; cp
= &(*cp
)->next
)
130 if ((*cp
)->major
> major
||
131 ((*cp
)->major
== major
&& (*cp
)->baseminor
>= baseminor
))
133 if (*cp
&& (*cp
)->major
== major
&&
134 (*cp
)->baseminor
< baseminor
+ minorct
) {
140 mutex_unlock(&chrdevs_lock
);
143 mutex_unlock(&chrdevs_lock
);
148 static struct char_device_struct
*
149 __unregister_chrdev_region(unsigned major
, unsigned baseminor
, int minorct
)
151 struct char_device_struct
*cd
= NULL
, **cp
;
152 int i
= major_to_index(major
);
154 mutex_lock(&chrdevs_lock
);
155 for (cp
= &chrdevs
[i
]; *cp
; cp
= &(*cp
)->next
)
156 if ((*cp
)->major
== major
&&
157 (*cp
)->baseminor
== baseminor
&&
158 (*cp
)->minorct
== minorct
)
164 mutex_unlock(&chrdevs_lock
);
168 int register_chrdev_region(dev_t from
, unsigned count
, const char *name
)
170 struct char_device_struct
*cd
;
171 dev_t to
= from
+ count
;
174 for (n
= from
; n
< to
; n
= next
) {
175 next
= MKDEV(MAJOR(n
)+1, 0);
178 cd
= __register_chrdev_region(MAJOR(n
), MINOR(n
),
186 for (n
= from
; n
< to
; n
= next
) {
187 next
= MKDEV(MAJOR(n
)+1, 0);
188 kfree(__unregister_chrdev_region(MAJOR(n
), MINOR(n
), next
- n
));
193 int alloc_chrdev_region(dev_t
*dev
, unsigned baseminor
, unsigned count
,
196 struct char_device_struct
*cd
;
197 cd
= __register_chrdev_region(0, baseminor
, count
, name
);
200 *dev
= MKDEV(cd
->major
, cd
->baseminor
);
205 * register_chrdev() - Register a major number for character devices.
206 * @major: major device number or 0 for dynamic allocation
207 * @name: name of this range of devices
208 * @fops: file operations associated with this devices
210 * If @major == 0 this functions will dynamically allocate a major and return
213 * If @major > 0 this function will attempt to reserve a device with the given
214 * major number and will return zero on success.
216 * Returns a -ve errno on failure.
218 * The name of this device has nothing to do with the name of the device in
219 * /dev. It only helps to keep track of the different owners of devices. If
220 * your module name has only one type of devices it's ok to use e.g. the name
221 * of the module here.
223 * This function registers a range of 256 minor numbers. The first minor number
226 int register_chrdev(unsigned int major
, const char *name
,
227 const struct file_operations
*fops
)
229 struct char_device_struct
*cd
;
234 cd
= __register_chrdev_region(major
, 0, 256, name
);
242 cdev
->owner
= fops
->owner
;
244 kobject_set_name(&cdev
->kobj
, "%s", name
);
245 for (s
= strchr(kobject_name(&cdev
->kobj
),'/'); s
; s
= strchr(s
, '/'))
248 err
= cdev_add(cdev
, MKDEV(cd
->major
, 0), 256);
254 return major
? 0 : cd
->major
;
256 kobject_put(&cdev
->kobj
);
258 kfree(__unregister_chrdev_region(cd
->major
, 0, 256));
262 void unregister_chrdev_region(dev_t from
, unsigned count
)
264 dev_t to
= from
+ count
;
267 for (n
= from
; n
< to
; n
= next
) {
268 next
= MKDEV(MAJOR(n
)+1, 0);
271 kfree(__unregister_chrdev_region(MAJOR(n
), MINOR(n
), next
- n
));
275 int unregister_chrdev(unsigned int major
, const char *name
)
277 struct char_device_struct
*cd
;
278 cd
= __unregister_chrdev_region(major
, 0, 256);
285 static DEFINE_SPINLOCK(cdev_lock
);
287 static struct kobject
*cdev_get(struct cdev
*p
)
289 struct module
*owner
= p
->owner
;
290 struct kobject
*kobj
;
292 if (owner
&& !try_module_get(owner
))
294 kobj
= kobject_get(&p
->kobj
);
300 void cdev_put(struct cdev
*p
)
303 struct module
*owner
= p
->owner
;
304 kobject_put(&p
->kobj
);
310 * Called every time a character special file is opened
312 int chrdev_open(struct inode
* inode
, struct file
* filp
)
315 struct cdev
*new = NULL
;
318 spin_lock(&cdev_lock
);
321 struct kobject
*kobj
;
323 spin_unlock(&cdev_lock
);
324 kobj
= kobj_lookup(cdev_map
, inode
->i_rdev
, &idx
);
327 new = container_of(kobj
, struct cdev
, kobj
);
328 spin_lock(&cdev_lock
);
331 inode
->i_cdev
= p
= new;
332 inode
->i_cindex
= idx
;
333 list_add(&inode
->i_devices
, &p
->list
);
335 } else if (!cdev_get(p
))
337 } else if (!cdev_get(p
))
339 spin_unlock(&cdev_lock
);
343 filp
->f_op
= fops_get(p
->ops
);
348 if (filp
->f_op
->open
) {
350 ret
= filp
->f_op
->open(inode
,filp
);
358 void cd_forget(struct inode
*inode
)
360 spin_lock(&cdev_lock
);
361 list_del_init(&inode
->i_devices
);
362 inode
->i_cdev
= NULL
;
363 spin_unlock(&cdev_lock
);
366 static void cdev_purge(struct cdev
*cdev
)
368 spin_lock(&cdev_lock
);
369 while (!list_empty(&cdev
->list
)) {
371 inode
= container_of(cdev
->list
.next
, struct inode
, i_devices
);
372 list_del_init(&inode
->i_devices
);
373 inode
->i_cdev
= NULL
;
375 spin_unlock(&cdev_lock
);
379 * Dummy default file-operations: the only thing this does
380 * is contain the open that then fills in the correct operations
381 * depending on the special file...
383 const struct file_operations def_chr_fops
= {
387 static struct kobject
*exact_match(dev_t dev
, int *part
, void *data
)
389 struct cdev
*p
= data
;
393 static int exact_lock(dev_t dev
, void *data
)
395 struct cdev
*p
= data
;
396 return cdev_get(p
) ? 0 : -1;
399 int cdev_add(struct cdev
*p
, dev_t dev
, unsigned count
)
403 return kobj_map(cdev_map
, dev
, count
, NULL
, exact_match
, exact_lock
, p
);
406 static void cdev_unmap(dev_t dev
, unsigned count
)
408 kobj_unmap(cdev_map
, dev
, count
);
411 void cdev_del(struct cdev
*p
)
413 cdev_unmap(p
->dev
, p
->count
);
414 kobject_put(&p
->kobj
);
418 static void cdev_default_release(struct kobject
*kobj
)
420 struct cdev
*p
= container_of(kobj
, struct cdev
, kobj
);
424 static void cdev_dynamic_release(struct kobject
*kobj
)
426 struct cdev
*p
= container_of(kobj
, struct cdev
, kobj
);
431 static struct kobj_type ktype_cdev_default
= {
432 .release
= cdev_default_release
,
435 static struct kobj_type ktype_cdev_dynamic
= {
436 .release
= cdev_dynamic_release
,
439 struct cdev
*cdev_alloc(void)
441 struct cdev
*p
= kzalloc(sizeof(struct cdev
), GFP_KERNEL
);
443 p
->kobj
.ktype
= &ktype_cdev_dynamic
;
444 INIT_LIST_HEAD(&p
->list
);
445 kobject_init(&p
->kobj
);
450 void cdev_init(struct cdev
*cdev
, const struct file_operations
*fops
)
452 memset(cdev
, 0, sizeof *cdev
);
453 INIT_LIST_HEAD(&cdev
->list
);
454 cdev
->kobj
.ktype
= &ktype_cdev_default
;
455 kobject_init(&cdev
->kobj
);
459 static struct kobject
*base_probe(dev_t dev
, int *part
, void *data
)
461 if (request_module("char-major-%d-%d", MAJOR(dev
), MINOR(dev
)) > 0)
462 /* Make old-style 2.4 aliases work */
463 request_module("char-major-%d", MAJOR(dev
));
467 void __init
chrdev_init(void)
469 cdev_map
= kobj_map_init(base_probe
, &chrdevs_lock
);
473 /* Let modules do char dev stuff */
474 EXPORT_SYMBOL(register_chrdev_region
);
475 EXPORT_SYMBOL(unregister_chrdev_region
);
476 EXPORT_SYMBOL(alloc_chrdev_region
);
477 EXPORT_SYMBOL(cdev_init
);
478 EXPORT_SYMBOL(cdev_alloc
);
479 EXPORT_SYMBOL(cdev_del
);
480 EXPORT_SYMBOL(cdev_add
);
481 EXPORT_SYMBOL(register_chrdev
);
482 EXPORT_SYMBOL(unregister_chrdev
);
483 EXPORT_SYMBOL(directly_mappable_cdev_bdi
);