NFS: Separate metadata and page cache revalidation mechanisms
[linux-2.6/mini2440.git] / fs / char_dev.c
blobf3418f7a6e9d9da9bf894840cd23eba9be77d5b7
1 /*
2 * linux/fs/char_dev.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/devfs_fs_kernel.h>
18 #include <linux/seq_file.h>
20 #include <linux/kobject.h>
21 #include <linux/kobj_map.h>
22 #include <linux/cdev.h>
23 #include <linux/mutex.h>
25 #ifdef CONFIG_KMOD
26 #include <linux/kmod.h>
27 #endif
29 static struct kobj_map *cdev_map;
31 static DEFINE_MUTEX(chrdevs_lock);
33 static struct char_device_struct {
34 struct char_device_struct *next;
35 unsigned int major;
36 unsigned int baseminor;
37 int minorct;
38 char name[64];
39 struct file_operations *fops;
40 struct cdev *cdev; /* will die */
41 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
43 /* index in the above */
44 static inline int major_to_index(int major)
46 return major % CHRDEV_MAJOR_HASH_SIZE;
49 #ifdef CONFIG_PROC_FS
51 void chrdev_show(struct seq_file *f, off_t offset)
53 struct char_device_struct *cd;
55 if (offset < CHRDEV_MAJOR_HASH_SIZE) {
56 mutex_lock(&chrdevs_lock);
57 for (cd = chrdevs[offset]; cd; cd = cd->next)
58 seq_printf(f, "%3d %s\n", cd->major, cd->name);
59 mutex_unlock(&chrdevs_lock);
63 #endif /* CONFIG_PROC_FS */
66 * Register a single major with a specified minor range.
68 * If major == 0 this functions will dynamically allocate a major and return
69 * its number.
71 * If major > 0 this function will attempt to reserve the passed range of
72 * minors and will return zero on success.
74 * Returns a -ve errno on failure.
76 static struct char_device_struct *
77 __register_chrdev_region(unsigned int major, unsigned int baseminor,
78 int minorct, const char *name)
80 struct char_device_struct *cd, **cp;
81 int ret = 0;
82 int i;
84 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
85 if (cd == NULL)
86 return ERR_PTR(-ENOMEM);
88 mutex_lock(&chrdevs_lock);
90 /* temporary */
91 if (major == 0) {
92 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
93 if (chrdevs[i] == NULL)
94 break;
97 if (i == 0) {
98 ret = -EBUSY;
99 goto out;
101 major = i;
102 ret = major;
105 cd->major = major;
106 cd->baseminor = baseminor;
107 cd->minorct = minorct;
108 strncpy(cd->name,name, 64);
110 i = major_to_index(major);
112 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
113 if ((*cp)->major > major ||
114 ((*cp)->major == major && (*cp)->baseminor >= baseminor))
115 break;
116 if (*cp && (*cp)->major == major &&
117 (*cp)->baseminor < baseminor + minorct) {
118 ret = -EBUSY;
119 goto out;
121 cd->next = *cp;
122 *cp = cd;
123 mutex_unlock(&chrdevs_lock);
124 return cd;
125 out:
126 mutex_unlock(&chrdevs_lock);
127 kfree(cd);
128 return ERR_PTR(ret);
131 static struct char_device_struct *
132 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
134 struct char_device_struct *cd = NULL, **cp;
135 int i = major_to_index(major);
137 mutex_lock(&chrdevs_lock);
138 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
139 if ((*cp)->major == major &&
140 (*cp)->baseminor == baseminor &&
141 (*cp)->minorct == minorct)
142 break;
143 if (*cp) {
144 cd = *cp;
145 *cp = cd->next;
147 mutex_unlock(&chrdevs_lock);
148 return cd;
151 int register_chrdev_region(dev_t from, unsigned count, const char *name)
153 struct char_device_struct *cd;
154 dev_t to = from + count;
155 dev_t n, next;
157 for (n = from; n < to; n = next) {
158 next = MKDEV(MAJOR(n)+1, 0);
159 if (next > to)
160 next = to;
161 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
162 next - n, name);
163 if (IS_ERR(cd))
164 goto fail;
166 return 0;
167 fail:
168 to = n;
169 for (n = from; n < to; n = next) {
170 next = MKDEV(MAJOR(n)+1, 0);
171 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
173 return PTR_ERR(cd);
176 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
177 const char *name)
179 struct char_device_struct *cd;
180 cd = __register_chrdev_region(0, baseminor, count, name);
181 if (IS_ERR(cd))
182 return PTR_ERR(cd);
183 *dev = MKDEV(cd->major, cd->baseminor);
184 return 0;
187 int register_chrdev(unsigned int major, const char *name,
188 const struct file_operations *fops)
190 struct char_device_struct *cd;
191 struct cdev *cdev;
192 char *s;
193 int err = -ENOMEM;
195 cd = __register_chrdev_region(major, 0, 256, name);
196 if (IS_ERR(cd))
197 return PTR_ERR(cd);
199 cdev = cdev_alloc();
200 if (!cdev)
201 goto out2;
203 cdev->owner = fops->owner;
204 cdev->ops = fops;
205 kobject_set_name(&cdev->kobj, "%s", name);
206 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
207 *s = '!';
209 err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
210 if (err)
211 goto out;
213 cd->cdev = cdev;
215 return major ? 0 : cd->major;
216 out:
217 kobject_put(&cdev->kobj);
218 out2:
219 kfree(__unregister_chrdev_region(cd->major, 0, 256));
220 return err;
223 void unregister_chrdev_region(dev_t from, unsigned count)
225 dev_t to = from + count;
226 dev_t n, next;
228 for (n = from; n < to; n = next) {
229 next = MKDEV(MAJOR(n)+1, 0);
230 if (next > to)
231 next = to;
232 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
236 int unregister_chrdev(unsigned int major, const char *name)
238 struct char_device_struct *cd;
239 cd = __unregister_chrdev_region(major, 0, 256);
240 if (cd && cd->cdev)
241 cdev_del(cd->cdev);
242 kfree(cd);
243 return 0;
246 static DEFINE_SPINLOCK(cdev_lock);
248 static struct kobject *cdev_get(struct cdev *p)
250 struct module *owner = p->owner;
251 struct kobject *kobj;
253 if (owner && !try_module_get(owner))
254 return NULL;
255 kobj = kobject_get(&p->kobj);
256 if (!kobj)
257 module_put(owner);
258 return kobj;
261 void cdev_put(struct cdev *p)
263 if (p) {
264 struct module *owner = p->owner;
265 kobject_put(&p->kobj);
266 module_put(owner);
271 * Called every time a character special file is opened
273 int chrdev_open(struct inode * inode, struct file * filp)
275 struct cdev *p;
276 struct cdev *new = NULL;
277 int ret = 0;
279 spin_lock(&cdev_lock);
280 p = inode->i_cdev;
281 if (!p) {
282 struct kobject *kobj;
283 int idx;
284 spin_unlock(&cdev_lock);
285 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
286 if (!kobj)
287 return -ENXIO;
288 new = container_of(kobj, struct cdev, kobj);
289 spin_lock(&cdev_lock);
290 p = inode->i_cdev;
291 if (!p) {
292 inode->i_cdev = p = new;
293 inode->i_cindex = idx;
294 list_add(&inode->i_devices, &p->list);
295 new = NULL;
296 } else if (!cdev_get(p))
297 ret = -ENXIO;
298 } else if (!cdev_get(p))
299 ret = -ENXIO;
300 spin_unlock(&cdev_lock);
301 cdev_put(new);
302 if (ret)
303 return ret;
304 filp->f_op = fops_get(p->ops);
305 if (!filp->f_op) {
306 cdev_put(p);
307 return -ENXIO;
309 if (filp->f_op->open) {
310 lock_kernel();
311 ret = filp->f_op->open(inode,filp);
312 unlock_kernel();
314 if (ret)
315 cdev_put(p);
316 return ret;
319 void cd_forget(struct inode *inode)
321 spin_lock(&cdev_lock);
322 list_del_init(&inode->i_devices);
323 inode->i_cdev = NULL;
324 spin_unlock(&cdev_lock);
327 static void cdev_purge(struct cdev *cdev)
329 spin_lock(&cdev_lock);
330 while (!list_empty(&cdev->list)) {
331 struct inode *inode;
332 inode = container_of(cdev->list.next, struct inode, i_devices);
333 list_del_init(&inode->i_devices);
334 inode->i_cdev = NULL;
336 spin_unlock(&cdev_lock);
340 * Dummy default file-operations: the only thing this does
341 * is contain the open that then fills in the correct operations
342 * depending on the special file...
344 const struct file_operations def_chr_fops = {
345 .open = chrdev_open,
348 static struct kobject *exact_match(dev_t dev, int *part, void *data)
350 struct cdev *p = data;
351 return &p->kobj;
354 static int exact_lock(dev_t dev, void *data)
356 struct cdev *p = data;
357 return cdev_get(p) ? 0 : -1;
360 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
362 p->dev = dev;
363 p->count = count;
364 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
367 static void cdev_unmap(dev_t dev, unsigned count)
369 kobj_unmap(cdev_map, dev, count);
372 void cdev_del(struct cdev *p)
374 cdev_unmap(p->dev, p->count);
375 kobject_put(&p->kobj);
379 static void cdev_default_release(struct kobject *kobj)
381 struct cdev *p = container_of(kobj, struct cdev, kobj);
382 cdev_purge(p);
385 static void cdev_dynamic_release(struct kobject *kobj)
387 struct cdev *p = container_of(kobj, struct cdev, kobj);
388 cdev_purge(p);
389 kfree(p);
392 static struct kobj_type ktype_cdev_default = {
393 .release = cdev_default_release,
396 static struct kobj_type ktype_cdev_dynamic = {
397 .release = cdev_dynamic_release,
400 struct cdev *cdev_alloc(void)
402 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
403 if (p) {
404 p->kobj.ktype = &ktype_cdev_dynamic;
405 INIT_LIST_HEAD(&p->list);
406 kobject_init(&p->kobj);
408 return p;
411 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
413 memset(cdev, 0, sizeof *cdev);
414 INIT_LIST_HEAD(&cdev->list);
415 cdev->kobj.ktype = &ktype_cdev_default;
416 kobject_init(&cdev->kobj);
417 cdev->ops = fops;
420 static struct kobject *base_probe(dev_t dev, int *part, void *data)
422 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
423 /* Make old-style 2.4 aliases work */
424 request_module("char-major-%d", MAJOR(dev));
425 return NULL;
428 void __init chrdev_init(void)
430 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
434 /* Let modules do char dev stuff */
435 EXPORT_SYMBOL(register_chrdev_region);
436 EXPORT_SYMBOL(unregister_chrdev_region);
437 EXPORT_SYMBOL(alloc_chrdev_region);
438 EXPORT_SYMBOL(cdev_init);
439 EXPORT_SYMBOL(cdev_alloc);
440 EXPORT_SYMBOL(cdev_del);
441 EXPORT_SYMBOL(cdev_add);
442 EXPORT_SYMBOL(register_chrdev);
443 EXPORT_SYMBOL(unregister_chrdev);