x86: PAT avoid aliasing in /dev/mem read/write
[linux-2.6/mini2440.git] / drivers / char / mem.c
blob83495885ada0e00fbf0ce33b13d03cc7ac746082
1 /*
2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
38 * Architectures vary in how they handle caching for addresses
39 * outside of main memory.
42 static inline int uncached_access(struct file *file, unsigned long addr)
44 #if defined(__i386__) && !defined(__arch_um__)
46 * On the PPro and successors, the MTRRs are used to set
47 * memory types for physical addresses outside main memory,
48 * so blindly setting PCD or PWT on those pages is wrong.
49 * For Pentiums and earlier, the surround logic should disable
50 * caching for the high addresses through the KEN pin, but
51 * we maintain the tradition of paranoia in this code.
53 if (file->f_flags & O_SYNC)
54 return 1;
55 return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
56 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
57 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
58 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
59 && addr >= __pa(high_memory);
60 #elif defined(__x86_64__) && !defined(__arch_um__)
61 /*
62 * This is broken because it can generate memory type aliases,
63 * which can cause cache corruptions
64 * But it is only available for root and we have to be bug-to-bug
65 * compatible with i386.
67 if (file->f_flags & O_SYNC)
68 return 1;
69 /* same behaviour as i386. PAT always set to cached and MTRRs control the
70 caching behaviour.
71 Hopefully a full PAT implementation will fix that soon. */
72 return 0;
73 #elif defined(CONFIG_IA64)
75 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
77 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
78 #elif defined(CONFIG_MIPS)
80 extern int __uncached_access(struct file *file,
81 unsigned long addr);
83 return __uncached_access(file, addr);
85 #else
87 * Accessing memory above the top the kernel knows about or through a file pointer
88 * that was marked O_SYNC will be done non-cached.
90 if (file->f_flags & O_SYNC)
91 return 1;
92 return addr >= __pa(high_memory);
93 #endif
96 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
97 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
99 if (addr + count > __pa(high_memory))
100 return 0;
102 return 1;
105 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
107 return 1;
109 #endif
111 #ifdef CONFIG_NONPROMISC_DEVMEM
112 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
114 u64 from = ((u64)pfn) << PAGE_SHIFT;
115 u64 to = from + size;
116 u64 cursor = from;
118 while (cursor < to) {
119 if (!devmem_is_allowed(pfn)) {
120 printk(KERN_INFO
121 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
122 current->comm, from, to);
123 return 0;
125 cursor += PAGE_SIZE;
126 pfn++;
128 return 1;
130 #else
131 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
133 return 1;
135 #endif
137 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
142 * This funcion reads the *physical* memory. The f_pos points directly to the
143 * memory location.
145 static ssize_t read_mem(struct file * file, char __user * buf,
146 size_t count, loff_t *ppos)
148 unsigned long p = *ppos;
149 ssize_t read, sz;
150 char *ptr;
152 if (!valid_phys_addr_range(p, count))
153 return -EFAULT;
154 read = 0;
155 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
156 /* we don't have page 0 mapped on sparc and m68k.. */
157 if (p < PAGE_SIZE) {
158 sz = PAGE_SIZE - p;
159 if (sz > count)
160 sz = count;
161 if (sz > 0) {
162 if (clear_user(buf, sz))
163 return -EFAULT;
164 buf += sz;
165 p += sz;
166 count -= sz;
167 read += sz;
170 #endif
172 while (count > 0) {
174 * Handle first page in case it's not aligned
176 if (-p & (PAGE_SIZE - 1))
177 sz = -p & (PAGE_SIZE - 1);
178 else
179 sz = PAGE_SIZE;
181 sz = min_t(unsigned long, sz, count);
183 if (!range_is_allowed(p >> PAGE_SHIFT, count))
184 return -EPERM;
187 * On ia64 if a page has been mapped somewhere as
188 * uncached, then it must also be accessed uncached
189 * by the kernel or data corruption may occur
191 ptr = xlate_dev_mem_ptr(p);
192 if (!ptr)
193 return -EFAULT;
195 if (copy_to_user(buf, ptr, sz)) {
196 unxlate_dev_mem_ptr(p, ptr);
197 return -EFAULT;
200 unxlate_dev_mem_ptr(p, ptr);
202 buf += sz;
203 p += sz;
204 count -= sz;
205 read += sz;
208 *ppos += read;
209 return read;
212 static ssize_t write_mem(struct file * file, const char __user * buf,
213 size_t count, loff_t *ppos)
215 unsigned long p = *ppos;
216 ssize_t written, sz;
217 unsigned long copied;
218 void *ptr;
220 if (!valid_phys_addr_range(p, count))
221 return -EFAULT;
223 written = 0;
225 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
226 /* we don't have page 0 mapped on sparc and m68k.. */
227 if (p < PAGE_SIZE) {
228 unsigned long sz = PAGE_SIZE - p;
229 if (sz > count)
230 sz = count;
231 /* Hmm. Do something? */
232 buf += sz;
233 p += sz;
234 count -= sz;
235 written += sz;
237 #endif
239 while (count > 0) {
241 * Handle first page in case it's not aligned
243 if (-p & (PAGE_SIZE - 1))
244 sz = -p & (PAGE_SIZE - 1);
245 else
246 sz = PAGE_SIZE;
248 sz = min_t(unsigned long, sz, count);
250 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
251 return -EPERM;
254 * On ia64 if a page has been mapped somewhere as
255 * uncached, then it must also be accessed uncached
256 * by the kernel or data corruption may occur
258 ptr = xlate_dev_mem_ptr(p);
259 if (!ptr) {
260 if (written)
261 break;
262 return -EFAULT;
265 copied = copy_from_user(ptr, buf, sz);
266 if (copied) {
267 written += sz - copied;
268 unxlate_dev_mem_ptr(p, ptr);
269 if (written)
270 break;
271 return -EFAULT;
274 unxlate_dev_mem_ptr(p, ptr);
276 buf += sz;
277 p += sz;
278 count -= sz;
279 written += sz;
282 *ppos += written;
283 return written;
286 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
287 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
288 unsigned long size, pgprot_t vma_prot)
290 #ifdef pgprot_noncached
291 unsigned long offset = pfn << PAGE_SHIFT;
293 if (uncached_access(file, offset))
294 return pgprot_noncached(vma_prot);
295 #endif
296 return vma_prot;
298 #endif
300 #ifndef CONFIG_MMU
301 static unsigned long get_unmapped_area_mem(struct file *file,
302 unsigned long addr,
303 unsigned long len,
304 unsigned long pgoff,
305 unsigned long flags)
307 if (!valid_mmap_phys_addr_range(pgoff, len))
308 return (unsigned long) -EINVAL;
309 return pgoff << PAGE_SHIFT;
312 /* can't do an in-place private mapping if there's no MMU */
313 static inline int private_mapping_ok(struct vm_area_struct *vma)
315 return vma->vm_flags & VM_MAYSHARE;
317 #else
318 #define get_unmapped_area_mem NULL
320 static inline int private_mapping_ok(struct vm_area_struct *vma)
322 return 1;
324 #endif
326 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
328 size_t size = vma->vm_end - vma->vm_start;
330 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
331 return -EINVAL;
333 if (!private_mapping_ok(vma))
334 return -ENOSYS;
336 if (!range_is_allowed(vma->vm_pgoff, size))
337 return -EPERM;
339 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
340 size,
341 vma->vm_page_prot);
343 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
344 if (remap_pfn_range(vma,
345 vma->vm_start,
346 vma->vm_pgoff,
347 size,
348 vma->vm_page_prot))
349 return -EAGAIN;
350 return 0;
353 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
355 unsigned long pfn;
357 /* Turn a kernel-virtual address into a physical page frame */
358 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
361 * RED-PEN: on some architectures there is more mapped memory
362 * than available in mem_map which pfn_valid checks
363 * for. Perhaps should add a new macro here.
365 * RED-PEN: vmalloc is not supported right now.
367 if (!pfn_valid(pfn))
368 return -EIO;
370 vma->vm_pgoff = pfn;
371 return mmap_mem(file, vma);
374 #ifdef CONFIG_CRASH_DUMP
376 * Read memory corresponding to the old kernel.
378 static ssize_t read_oldmem(struct file *file, char __user *buf,
379 size_t count, loff_t *ppos)
381 unsigned long pfn, offset;
382 size_t read = 0, csize;
383 int rc = 0;
385 while (count) {
386 pfn = *ppos / PAGE_SIZE;
387 if (pfn > saved_max_pfn)
388 return read;
390 offset = (unsigned long)(*ppos % PAGE_SIZE);
391 if (count > PAGE_SIZE - offset)
392 csize = PAGE_SIZE - offset;
393 else
394 csize = count;
396 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
397 if (rc < 0)
398 return rc;
399 buf += csize;
400 *ppos += csize;
401 read += csize;
402 count -= csize;
404 return read;
406 #endif
408 extern long vread(char *buf, char *addr, unsigned long count);
409 extern long vwrite(char *buf, char *addr, unsigned long count);
412 * This function reads the *virtual* memory as seen by the kernel.
414 static ssize_t read_kmem(struct file *file, char __user *buf,
415 size_t count, loff_t *ppos)
417 unsigned long p = *ppos;
418 ssize_t low_count, read, sz;
419 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
421 read = 0;
422 if (p < (unsigned long) high_memory) {
423 low_count = count;
424 if (count > (unsigned long) high_memory - p)
425 low_count = (unsigned long) high_memory - p;
427 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
428 /* we don't have page 0 mapped on sparc and m68k.. */
429 if (p < PAGE_SIZE && low_count > 0) {
430 size_t tmp = PAGE_SIZE - p;
431 if (tmp > low_count) tmp = low_count;
432 if (clear_user(buf, tmp))
433 return -EFAULT;
434 buf += tmp;
435 p += tmp;
436 read += tmp;
437 low_count -= tmp;
438 count -= tmp;
440 #endif
441 while (low_count > 0) {
443 * Handle first page in case it's not aligned
445 if (-p & (PAGE_SIZE - 1))
446 sz = -p & (PAGE_SIZE - 1);
447 else
448 sz = PAGE_SIZE;
450 sz = min_t(unsigned long, sz, low_count);
453 * On ia64 if a page has been mapped somewhere as
454 * uncached, then it must also be accessed uncached
455 * by the kernel or data corruption may occur
457 kbuf = xlate_dev_kmem_ptr((char *)p);
459 if (copy_to_user(buf, kbuf, sz))
460 return -EFAULT;
461 buf += sz;
462 p += sz;
463 read += sz;
464 low_count -= sz;
465 count -= sz;
469 if (count > 0) {
470 kbuf = (char *)__get_free_page(GFP_KERNEL);
471 if (!kbuf)
472 return -ENOMEM;
473 while (count > 0) {
474 int len = count;
476 if (len > PAGE_SIZE)
477 len = PAGE_SIZE;
478 len = vread(kbuf, (char *)p, len);
479 if (!len)
480 break;
481 if (copy_to_user(buf, kbuf, len)) {
482 free_page((unsigned long)kbuf);
483 return -EFAULT;
485 count -= len;
486 buf += len;
487 read += len;
488 p += len;
490 free_page((unsigned long)kbuf);
492 *ppos = p;
493 return read;
497 static inline ssize_t
498 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
499 size_t count, loff_t *ppos)
501 ssize_t written, sz;
502 unsigned long copied;
504 written = 0;
505 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
506 /* we don't have page 0 mapped on sparc and m68k.. */
507 if (realp < PAGE_SIZE) {
508 unsigned long sz = PAGE_SIZE - realp;
509 if (sz > count)
510 sz = count;
511 /* Hmm. Do something? */
512 buf += sz;
513 p += sz;
514 realp += sz;
515 count -= sz;
516 written += sz;
518 #endif
520 while (count > 0) {
521 char *ptr;
523 * Handle first page in case it's not aligned
525 if (-realp & (PAGE_SIZE - 1))
526 sz = -realp & (PAGE_SIZE - 1);
527 else
528 sz = PAGE_SIZE;
530 sz = min_t(unsigned long, sz, count);
533 * On ia64 if a page has been mapped somewhere as
534 * uncached, then it must also be accessed uncached
535 * by the kernel or data corruption may occur
537 ptr = xlate_dev_kmem_ptr(p);
539 copied = copy_from_user(ptr, buf, sz);
540 if (copied) {
541 written += sz - copied;
542 if (written)
543 break;
544 return -EFAULT;
546 buf += sz;
547 p += sz;
548 realp += sz;
549 count -= sz;
550 written += sz;
553 *ppos += written;
554 return written;
559 * This function writes to the *virtual* memory as seen by the kernel.
561 static ssize_t write_kmem(struct file * file, const char __user * buf,
562 size_t count, loff_t *ppos)
564 unsigned long p = *ppos;
565 ssize_t wrote = 0;
566 ssize_t virtr = 0;
567 ssize_t written;
568 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
570 if (p < (unsigned long) high_memory) {
572 wrote = count;
573 if (count > (unsigned long) high_memory - p)
574 wrote = (unsigned long) high_memory - p;
576 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
577 if (written != wrote)
578 return written;
579 wrote = written;
580 p += wrote;
581 buf += wrote;
582 count -= wrote;
585 if (count > 0) {
586 kbuf = (char *)__get_free_page(GFP_KERNEL);
587 if (!kbuf)
588 return wrote ? wrote : -ENOMEM;
589 while (count > 0) {
590 int len = count;
592 if (len > PAGE_SIZE)
593 len = PAGE_SIZE;
594 if (len) {
595 written = copy_from_user(kbuf, buf, len);
596 if (written) {
597 if (wrote + virtr)
598 break;
599 free_page((unsigned long)kbuf);
600 return -EFAULT;
603 len = vwrite(kbuf, (char *)p, len);
604 count -= len;
605 buf += len;
606 virtr += len;
607 p += len;
609 free_page((unsigned long)kbuf);
612 *ppos = p;
613 return virtr + wrote;
616 #ifdef CONFIG_DEVPORT
617 static ssize_t read_port(struct file * file, char __user * buf,
618 size_t count, loff_t *ppos)
620 unsigned long i = *ppos;
621 char __user *tmp = buf;
623 if (!access_ok(VERIFY_WRITE, buf, count))
624 return -EFAULT;
625 while (count-- > 0 && i < 65536) {
626 if (__put_user(inb(i),tmp) < 0)
627 return -EFAULT;
628 i++;
629 tmp++;
631 *ppos = i;
632 return tmp-buf;
635 static ssize_t write_port(struct file * file, const char __user * buf,
636 size_t count, loff_t *ppos)
638 unsigned long i = *ppos;
639 const char __user * tmp = buf;
641 if (!access_ok(VERIFY_READ,buf,count))
642 return -EFAULT;
643 while (count-- > 0 && i < 65536) {
644 char c;
645 if (__get_user(c, tmp)) {
646 if (tmp > buf)
647 break;
648 return -EFAULT;
650 outb(c,i);
651 i++;
652 tmp++;
654 *ppos = i;
655 return tmp-buf;
657 #endif
659 static ssize_t read_null(struct file * file, char __user * buf,
660 size_t count, loff_t *ppos)
662 return 0;
665 static ssize_t write_null(struct file * file, const char __user * buf,
666 size_t count, loff_t *ppos)
668 return count;
671 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
672 struct splice_desc *sd)
674 return sd->len;
677 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
678 loff_t *ppos, size_t len, unsigned int flags)
680 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
683 static ssize_t read_zero(struct file * file, char __user * buf,
684 size_t count, loff_t *ppos)
686 size_t written;
688 if (!count)
689 return 0;
691 if (!access_ok(VERIFY_WRITE, buf, count))
692 return -EFAULT;
694 written = 0;
695 while (count) {
696 unsigned long unwritten;
697 size_t chunk = count;
699 if (chunk > PAGE_SIZE)
700 chunk = PAGE_SIZE; /* Just for latency reasons */
701 unwritten = clear_user(buf, chunk);
702 written += chunk - unwritten;
703 if (unwritten)
704 break;
705 buf += chunk;
706 count -= chunk;
707 cond_resched();
709 return written ? written : -EFAULT;
712 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
714 #ifndef CONFIG_MMU
715 return -ENOSYS;
716 #endif
717 if (vma->vm_flags & VM_SHARED)
718 return shmem_zero_setup(vma);
719 return 0;
722 static ssize_t write_full(struct file * file, const char __user * buf,
723 size_t count, loff_t *ppos)
725 return -ENOSPC;
729 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
730 * can fopen() both devices with "a" now. This was previously impossible.
731 * -- SRB.
734 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
736 return file->f_pos = 0;
740 * The memory devices use the full 32/64 bits of the offset, and so we cannot
741 * check against negative addresses: they are ok. The return value is weird,
742 * though, in that case (0).
744 * also note that seeking relative to the "end of file" isn't supported:
745 * it has no meaning, so it returns -EINVAL.
747 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
749 loff_t ret;
751 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
752 switch (orig) {
753 case 0:
754 file->f_pos = offset;
755 ret = file->f_pos;
756 force_successful_syscall_return();
757 break;
758 case 1:
759 file->f_pos += offset;
760 ret = file->f_pos;
761 force_successful_syscall_return();
762 break;
763 default:
764 ret = -EINVAL;
766 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
767 return ret;
770 static int open_port(struct inode * inode, struct file * filp)
772 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
775 #define zero_lseek null_lseek
776 #define full_lseek null_lseek
777 #define write_zero write_null
778 #define read_full read_zero
779 #define open_mem open_port
780 #define open_kmem open_mem
781 #define open_oldmem open_mem
783 static const struct file_operations mem_fops = {
784 .llseek = memory_lseek,
785 .read = read_mem,
786 .write = write_mem,
787 .mmap = mmap_mem,
788 .open = open_mem,
789 .get_unmapped_area = get_unmapped_area_mem,
792 static const struct file_operations kmem_fops = {
793 .llseek = memory_lseek,
794 .read = read_kmem,
795 .write = write_kmem,
796 .mmap = mmap_kmem,
797 .open = open_kmem,
798 .get_unmapped_area = get_unmapped_area_mem,
801 static const struct file_operations null_fops = {
802 .llseek = null_lseek,
803 .read = read_null,
804 .write = write_null,
805 .splice_write = splice_write_null,
808 #ifdef CONFIG_DEVPORT
809 static const struct file_operations port_fops = {
810 .llseek = memory_lseek,
811 .read = read_port,
812 .write = write_port,
813 .open = open_port,
815 #endif
817 static const struct file_operations zero_fops = {
818 .llseek = zero_lseek,
819 .read = read_zero,
820 .write = write_zero,
821 .mmap = mmap_zero,
825 * capabilities for /dev/zero
826 * - permits private mappings, "copies" are taken of the source of zeros
828 static struct backing_dev_info zero_bdi = {
829 .capabilities = BDI_CAP_MAP_COPY,
832 static const struct file_operations full_fops = {
833 .llseek = full_lseek,
834 .read = read_full,
835 .write = write_full,
838 #ifdef CONFIG_CRASH_DUMP
839 static const struct file_operations oldmem_fops = {
840 .read = read_oldmem,
841 .open = open_oldmem,
843 #endif
845 static ssize_t kmsg_write(struct file * file, const char __user * buf,
846 size_t count, loff_t *ppos)
848 char *tmp;
849 ssize_t ret;
851 tmp = kmalloc(count + 1, GFP_KERNEL);
852 if (tmp == NULL)
853 return -ENOMEM;
854 ret = -EFAULT;
855 if (!copy_from_user(tmp, buf, count)) {
856 tmp[count] = 0;
857 ret = printk("%s", tmp);
858 if (ret > count)
859 /* printk can add a prefix */
860 ret = count;
862 kfree(tmp);
863 return ret;
866 static const struct file_operations kmsg_fops = {
867 .write = kmsg_write,
870 static int memory_open(struct inode * inode, struct file * filp)
872 switch (iminor(inode)) {
873 case 1:
874 filp->f_op = &mem_fops;
875 filp->f_mapping->backing_dev_info =
876 &directly_mappable_cdev_bdi;
877 break;
878 case 2:
879 filp->f_op = &kmem_fops;
880 filp->f_mapping->backing_dev_info =
881 &directly_mappable_cdev_bdi;
882 break;
883 case 3:
884 filp->f_op = &null_fops;
885 break;
886 #ifdef CONFIG_DEVPORT
887 case 4:
888 filp->f_op = &port_fops;
889 break;
890 #endif
891 case 5:
892 filp->f_mapping->backing_dev_info = &zero_bdi;
893 filp->f_op = &zero_fops;
894 break;
895 case 7:
896 filp->f_op = &full_fops;
897 break;
898 case 8:
899 filp->f_op = &random_fops;
900 break;
901 case 9:
902 filp->f_op = &urandom_fops;
903 break;
904 case 11:
905 filp->f_op = &kmsg_fops;
906 break;
907 #ifdef CONFIG_CRASH_DUMP
908 case 12:
909 filp->f_op = &oldmem_fops;
910 break;
911 #endif
912 default:
913 return -ENXIO;
915 if (filp->f_op && filp->f_op->open)
916 return filp->f_op->open(inode,filp);
917 return 0;
920 static const struct file_operations memory_fops = {
921 .open = memory_open, /* just a selector for the real open */
924 static const struct {
925 unsigned int minor;
926 char *name;
927 umode_t mode;
928 const struct file_operations *fops;
929 } devlist[] = { /* list of minor devices */
930 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
931 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
932 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
933 #ifdef CONFIG_DEVPORT
934 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
935 #endif
936 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
937 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
938 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
939 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
940 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
941 #ifdef CONFIG_CRASH_DUMP
942 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
943 #endif
946 static struct class *mem_class;
948 static int __init chr_dev_init(void)
950 int i;
951 int err;
953 err = bdi_init(&zero_bdi);
954 if (err)
955 return err;
957 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
958 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
960 mem_class = class_create(THIS_MODULE, "mem");
961 for (i = 0; i < ARRAY_SIZE(devlist); i++)
962 device_create(mem_class, NULL,
963 MKDEV(MEM_MAJOR, devlist[i].minor),
964 devlist[i].name);
966 return 0;
969 fs_initcall(chr_dev_init);