4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/string.h>
12 #include <linux/major.h>
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <linux/smp_lock.h>
16 #include <linux/seq_file.h>
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
24 #include <linux/kmod.h>
27 static struct kobj_map
*cdev_map
;
29 static DEFINE_MUTEX(chrdevs_lock
);
31 static struct char_device_struct
{
32 struct char_device_struct
*next
;
34 unsigned int baseminor
;
37 struct file_operations
*fops
;
38 struct cdev
*cdev
; /* will die */
39 } *chrdevs
[CHRDEV_MAJOR_HASH_SIZE
];
41 /* index in the above */
42 static inline int major_to_index(int major
)
44 return major
% CHRDEV_MAJOR_HASH_SIZE
;
49 void chrdev_show(struct seq_file
*f
, off_t offset
)
51 struct char_device_struct
*cd
;
53 if (offset
< CHRDEV_MAJOR_HASH_SIZE
) {
54 mutex_lock(&chrdevs_lock
);
55 for (cd
= chrdevs
[offset
]; cd
; cd
= cd
->next
)
56 seq_printf(f
, "%3d %s\n", cd
->major
, cd
->name
);
57 mutex_unlock(&chrdevs_lock
);
61 #endif /* CONFIG_PROC_FS */
64 * Register a single major with a specified minor range.
66 * If major == 0 this functions will dynamically allocate a major and return
69 * If major > 0 this function will attempt to reserve the passed range of
70 * minors and will return zero on success.
72 * Returns a -ve errno on failure.
74 static struct char_device_struct
*
75 __register_chrdev_region(unsigned int major
, unsigned int baseminor
,
76 int minorct
, const char *name
)
78 struct char_device_struct
*cd
, **cp
;
82 cd
= kzalloc(sizeof(struct char_device_struct
), GFP_KERNEL
);
84 return ERR_PTR(-ENOMEM
);
86 mutex_lock(&chrdevs_lock
);
90 for (i
= ARRAY_SIZE(chrdevs
)-1; i
> 0; i
--) {
91 if (chrdevs
[i
] == NULL
)
104 cd
->baseminor
= baseminor
;
105 cd
->minorct
= minorct
;
106 strncpy(cd
->name
,name
, 64);
108 i
= major_to_index(major
);
110 for (cp
= &chrdevs
[i
]; *cp
; cp
= &(*cp
)->next
)
111 if ((*cp
)->major
> major
||
112 ((*cp
)->major
== major
&& (*cp
)->baseminor
>= baseminor
))
114 if (*cp
&& (*cp
)->major
== major
&&
115 (*cp
)->baseminor
< baseminor
+ minorct
) {
121 mutex_unlock(&chrdevs_lock
);
124 mutex_unlock(&chrdevs_lock
);
129 static struct char_device_struct
*
130 __unregister_chrdev_region(unsigned major
, unsigned baseminor
, int minorct
)
132 struct char_device_struct
*cd
= NULL
, **cp
;
133 int i
= major_to_index(major
);
135 mutex_lock(&chrdevs_lock
);
136 for (cp
= &chrdevs
[i
]; *cp
; cp
= &(*cp
)->next
)
137 if ((*cp
)->major
== major
&&
138 (*cp
)->baseminor
== baseminor
&&
139 (*cp
)->minorct
== minorct
)
145 mutex_unlock(&chrdevs_lock
);
149 int register_chrdev_region(dev_t from
, unsigned count
, const char *name
)
151 struct char_device_struct
*cd
;
152 dev_t to
= from
+ count
;
155 for (n
= from
; n
< to
; n
= next
) {
156 next
= MKDEV(MAJOR(n
)+1, 0);
159 cd
= __register_chrdev_region(MAJOR(n
), MINOR(n
),
167 for (n
= from
; n
< to
; n
= next
) {
168 next
= MKDEV(MAJOR(n
)+1, 0);
169 kfree(__unregister_chrdev_region(MAJOR(n
), MINOR(n
), next
- n
));
174 int alloc_chrdev_region(dev_t
*dev
, unsigned baseminor
, unsigned count
,
177 struct char_device_struct
*cd
;
178 cd
= __register_chrdev_region(0, baseminor
, count
, name
);
181 *dev
= MKDEV(cd
->major
, cd
->baseminor
);
185 int register_chrdev(unsigned int major
, const char *name
,
186 const struct file_operations
*fops
)
188 struct char_device_struct
*cd
;
193 cd
= __register_chrdev_region(major
, 0, 256, name
);
201 cdev
->owner
= fops
->owner
;
203 kobject_set_name(&cdev
->kobj
, "%s", name
);
204 for (s
= strchr(kobject_name(&cdev
->kobj
),'/'); s
; s
= strchr(s
, '/'))
207 err
= cdev_add(cdev
, MKDEV(cd
->major
, 0), 256);
213 return major
? 0 : cd
->major
;
215 kobject_put(&cdev
->kobj
);
217 kfree(__unregister_chrdev_region(cd
->major
, 0, 256));
221 void unregister_chrdev_region(dev_t from
, unsigned count
)
223 dev_t to
= from
+ count
;
226 for (n
= from
; n
< to
; n
= next
) {
227 next
= MKDEV(MAJOR(n
)+1, 0);
230 kfree(__unregister_chrdev_region(MAJOR(n
), MINOR(n
), next
- n
));
234 int unregister_chrdev(unsigned int major
, const char *name
)
236 struct char_device_struct
*cd
;
237 cd
= __unregister_chrdev_region(major
, 0, 256);
244 static DEFINE_SPINLOCK(cdev_lock
);
246 static struct kobject
*cdev_get(struct cdev
*p
)
248 struct module
*owner
= p
->owner
;
249 struct kobject
*kobj
;
251 if (owner
&& !try_module_get(owner
))
253 kobj
= kobject_get(&p
->kobj
);
259 void cdev_put(struct cdev
*p
)
262 struct module
*owner
= p
->owner
;
263 kobject_put(&p
->kobj
);
269 * Called every time a character special file is opened
271 int chrdev_open(struct inode
* inode
, struct file
* filp
)
274 struct cdev
*new = NULL
;
277 spin_lock(&cdev_lock
);
280 struct kobject
*kobj
;
282 spin_unlock(&cdev_lock
);
283 kobj
= kobj_lookup(cdev_map
, inode
->i_rdev
, &idx
);
286 new = container_of(kobj
, struct cdev
, kobj
);
287 spin_lock(&cdev_lock
);
290 inode
->i_cdev
= p
= new;
291 inode
->i_cindex
= idx
;
292 list_add(&inode
->i_devices
, &p
->list
);
294 } else if (!cdev_get(p
))
296 } else if (!cdev_get(p
))
298 spin_unlock(&cdev_lock
);
302 filp
->f_op
= fops_get(p
->ops
);
307 if (filp
->f_op
->open
) {
309 ret
= filp
->f_op
->open(inode
,filp
);
317 void cd_forget(struct inode
*inode
)
319 spin_lock(&cdev_lock
);
320 list_del_init(&inode
->i_devices
);
321 inode
->i_cdev
= NULL
;
322 spin_unlock(&cdev_lock
);
325 static void cdev_purge(struct cdev
*cdev
)
327 spin_lock(&cdev_lock
);
328 while (!list_empty(&cdev
->list
)) {
330 inode
= container_of(cdev
->list
.next
, struct inode
, i_devices
);
331 list_del_init(&inode
->i_devices
);
332 inode
->i_cdev
= NULL
;
334 spin_unlock(&cdev_lock
);
338 * Dummy default file-operations: the only thing this does
339 * is contain the open that then fills in the correct operations
340 * depending on the special file...
342 const struct file_operations def_chr_fops
= {
346 static struct kobject
*exact_match(dev_t dev
, int *part
, void *data
)
348 struct cdev
*p
= data
;
352 static int exact_lock(dev_t dev
, void *data
)
354 struct cdev
*p
= data
;
355 return cdev_get(p
) ? 0 : -1;
358 int cdev_add(struct cdev
*p
, dev_t dev
, unsigned count
)
362 return kobj_map(cdev_map
, dev
, count
, NULL
, exact_match
, exact_lock
, p
);
365 static void cdev_unmap(dev_t dev
, unsigned count
)
367 kobj_unmap(cdev_map
, dev
, count
);
370 void cdev_del(struct cdev
*p
)
372 cdev_unmap(p
->dev
, p
->count
);
373 kobject_put(&p
->kobj
);
377 static void cdev_default_release(struct kobject
*kobj
)
379 struct cdev
*p
= container_of(kobj
, struct cdev
, kobj
);
383 static void cdev_dynamic_release(struct kobject
*kobj
)
385 struct cdev
*p
= container_of(kobj
, struct cdev
, kobj
);
390 static struct kobj_type ktype_cdev_default
= {
391 .release
= cdev_default_release
,
394 static struct kobj_type ktype_cdev_dynamic
= {
395 .release
= cdev_dynamic_release
,
398 struct cdev
*cdev_alloc(void)
400 struct cdev
*p
= kzalloc(sizeof(struct cdev
), GFP_KERNEL
);
402 p
->kobj
.ktype
= &ktype_cdev_dynamic
;
403 INIT_LIST_HEAD(&p
->list
);
404 kobject_init(&p
->kobj
);
409 void cdev_init(struct cdev
*cdev
, const struct file_operations
*fops
)
411 memset(cdev
, 0, sizeof *cdev
);
412 INIT_LIST_HEAD(&cdev
->list
);
413 cdev
->kobj
.ktype
= &ktype_cdev_default
;
414 kobject_init(&cdev
->kobj
);
418 static struct kobject
*base_probe(dev_t dev
, int *part
, void *data
)
420 if (request_module("char-major-%d-%d", MAJOR(dev
), MINOR(dev
)) > 0)
421 /* Make old-style 2.4 aliases work */
422 request_module("char-major-%d", MAJOR(dev
));
426 void __init
chrdev_init(void)
428 cdev_map
= kobj_map_init(base_probe
, &chrdevs_lock
);
432 /* Let modules do char dev stuff */
433 EXPORT_SYMBOL(register_chrdev_region
);
434 EXPORT_SYMBOL(unregister_chrdev_region
);
435 EXPORT_SYMBOL(alloc_chrdev_region
);
436 EXPORT_SYMBOL(cdev_init
);
437 EXPORT_SYMBOL(cdev_alloc
);
438 EXPORT_SYMBOL(cdev_del
);
439 EXPORT_SYMBOL(cdev_add
);
440 EXPORT_SYMBOL(register_chrdev
);
441 EXPORT_SYMBOL(unregister_chrdev
);