2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
11 #include <linux/config.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/smp_lock.h>
23 #include <linux/devfs_fs_kernel.h>
24 #include <linux/ptrace.h>
25 #include <linux/device.h>
27 #include <asm/uaccess.h>
31 # include <linux/efi.h>
34 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
35 extern void tapechar_init(void);
39 * Architectures vary in how they handle caching for addresses
40 * outside of main memory.
43 static inline int uncached_access(struct file
*file
, unsigned long addr
)
47 * On the PPro and successors, the MTRRs are used to set
48 * memory types for physical addresses outside main memory,
49 * so blindly setting PCD or PWT on those pages is wrong.
50 * For Pentiums and earlier, the surround logic should disable
51 * caching for the high addresses through the KEN pin, but
52 * we maintain the tradition of paranoia in this code.
54 if (file
->f_flags
& O_SYNC
)
56 return !( test_bit(X86_FEATURE_MTRR
, boot_cpu_data
.x86_capability
) ||
57 test_bit(X86_FEATURE_K6_MTRR
, boot_cpu_data
.x86_capability
) ||
58 test_bit(X86_FEATURE_CYRIX_ARR
, boot_cpu_data
.x86_capability
) ||
59 test_bit(X86_FEATURE_CENTAUR_MCR
, boot_cpu_data
.x86_capability
) )
60 && addr
>= __pa(high_memory
);
61 #elif defined(__x86_64__)
63 * This is broken because it can generate memory type aliases,
64 * which can cause cache corruptions
65 * But it is only available for root and we have to be bug-to-bug
66 * compatible with i386.
68 if (file
->f_flags
& O_SYNC
)
70 /* same behaviour as i386. PAT always set to cached and MTRRs control the
72 Hopefully a full PAT implementation will fix that soon. */
74 #elif defined(CONFIG_IA64)
76 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
78 return !(efi_mem_attributes(addr
) & EFI_MEMORY_WB
);
79 #elif defined(CONFIG_PPC64)
80 /* On PPC64, we always do non-cacheable access to the IO hole and
81 * cacheable elsewhere. Cache paradox can checkstop the CPU and
82 * the high_memory heuristic below is wrong on machines with memory
83 * above the IO hole... Ah, and of course, XFree86 doesn't pass
84 * O_SYNC when mapping us to tap IO space. Surprised ?
86 return !page_is_ram(addr
>> PAGE_SHIFT
);
89 * Accessing memory above the top the kernel knows about or through a file pointer
90 * that was marked O_SYNC will be done non-cached.
92 if (file
->f_flags
& O_SYNC
)
94 return addr
>= __pa(high_memory
);
98 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
99 static inline int valid_phys_addr_range(unsigned long addr
, size_t *count
)
101 unsigned long end_mem
;
103 end_mem
= __pa(high_memory
);
107 if (*count
> end_mem
- addr
)
108 *count
= end_mem
- addr
;
114 static ssize_t
do_write_mem(void *p
, unsigned long realp
,
115 const char __user
* buf
, size_t count
, loff_t
*ppos
)
118 unsigned long copied
;
121 #if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
122 /* we don't have page 0 mapped on sparc and m68k.. */
123 if (realp
< PAGE_SIZE
) {
124 unsigned long sz
= PAGE_SIZE
-realp
;
125 if (sz
> count
) sz
= count
;
126 /* Hmm. Do something? */
133 copied
= copy_from_user(p
, buf
, count
);
135 ssize_t ret
= written
+ (count
- copied
);
148 * This funcion reads the *physical* memory. The f_pos points directly to the
151 static ssize_t
read_mem(struct file
* file
, char __user
* buf
,
152 size_t count
, loff_t
*ppos
)
154 unsigned long p
= *ppos
;
157 if (!valid_phys_addr_range(p
, &count
))
160 #if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
161 /* we don't have page 0 mapped on sparc and m68k.. */
163 unsigned long sz
= PAGE_SIZE
-p
;
167 if (clear_user(buf
, sz
))
176 if (copy_to_user(buf
, __va(p
), count
))
183 static ssize_t
write_mem(struct file
* file
, const char __user
* buf
,
184 size_t count
, loff_t
*ppos
)
186 unsigned long p
= *ppos
;
188 if (!valid_phys_addr_range(p
, &count
))
190 return do_write_mem(__va(p
), p
, buf
, count
, ppos
);
193 static int mmap_mem(struct file
* file
, struct vm_area_struct
* vma
)
195 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
198 uncached
= uncached_access(file
, offset
);
199 #ifdef pgprot_noncached
201 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
204 /* Don't try to swap out physical pages.. */
205 vma
->vm_flags
|= VM_RESERVED
;
208 * Don't dump addresses that are not real memory to a core file.
211 vma
->vm_flags
|= VM_IO
;
213 if (remap_page_range(vma
, vma
->vm_start
, offset
, vma
->vm_end
-vma
->vm_start
,
219 extern long vread(char *buf
, char *addr
, unsigned long count
);
220 extern long vwrite(char *buf
, char *addr
, unsigned long count
);
223 * This function reads the *virtual* memory as seen by the kernel.
225 static ssize_t
read_kmem(struct file
*file
, char __user
*buf
,
226 size_t count
, loff_t
*ppos
)
228 unsigned long p
= *ppos
;
231 char * kbuf
; /* k-addr because vread() takes vmlist_lock rwlock */
233 if (p
< (unsigned long) high_memory
) {
235 if (count
> (unsigned long) high_memory
- p
)
236 read
= (unsigned long) high_memory
- p
;
238 #if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
239 /* we don't have page 0 mapped on sparc and m68k.. */
240 if (p
< PAGE_SIZE
&& read
> 0) {
241 size_t tmp
= PAGE_SIZE
- p
;
242 if (tmp
> read
) tmp
= read
;
243 if (clear_user(buf
, tmp
))
251 if (copy_to_user(buf
, (char *)p
, read
))
259 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
267 len
= vread(kbuf
, (char *)p
, len
);
270 if (copy_to_user(buf
, kbuf
, len
)) {
271 free_page((unsigned long)kbuf
);
279 free_page((unsigned long)kbuf
);
286 * This function writes to the *virtual* memory as seen by the kernel.
288 static ssize_t
write_kmem(struct file
* file
, const char __user
* buf
,
289 size_t count
, loff_t
*ppos
)
291 unsigned long p
= *ppos
;
295 char * kbuf
; /* k-addr because vwrite() takes vmlist_lock rwlock */
297 if (p
< (unsigned long) high_memory
) {
300 if (count
> (unsigned long) high_memory
- p
)
301 wrote
= (unsigned long) high_memory
- p
;
303 written
= do_write_mem((void*)p
, p
, buf
, wrote
, ppos
);
304 if (written
!= wrote
)
313 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
315 return wrote
? wrote
: -ENOMEM
;
322 written
= copy_from_user(kbuf
, buf
, len
);
326 free_page((unsigned long)kbuf
);
327 ret
= wrote
+ virtr
+ (len
- written
);
328 return ret
? ret
: -EFAULT
;
331 len
= vwrite(kbuf
, (char *)p
, len
);
337 free_page((unsigned long)kbuf
);
341 return virtr
+ wrote
;
344 #if defined(CONFIG_ISA) || !defined(__mc68000__)
345 static ssize_t
read_port(struct file
* file
, char __user
* buf
,
346 size_t count
, loff_t
*ppos
)
348 unsigned long i
= *ppos
;
349 char __user
*tmp
= buf
;
351 if (verify_area(VERIFY_WRITE
,buf
,count
))
353 while (count
-- > 0 && i
< 65536) {
354 if (__put_user(inb(i
),tmp
) < 0)
363 static ssize_t
write_port(struct file
* file
, const char __user
* buf
,
364 size_t count
, loff_t
*ppos
)
366 unsigned long i
= *ppos
;
367 const char __user
* tmp
= buf
;
369 if (verify_area(VERIFY_READ
,buf
,count
))
371 while (count
-- > 0 && i
< 65536) {
373 if (__get_user(c
, tmp
))
384 static ssize_t
read_null(struct file
* file
, char __user
* buf
,
385 size_t count
, loff_t
*ppos
)
390 static ssize_t
write_null(struct file
* file
, const char __user
* buf
,
391 size_t count
, loff_t
*ppos
)
398 * For fun, we are using the MMU for this.
400 static inline size_t read_zero_pagealigned(char __user
* buf
, size_t size
)
402 struct mm_struct
*mm
;
403 struct vm_area_struct
* vma
;
404 unsigned long addr
=(unsigned long)buf
;
407 /* Oops, this was forgotten before. -ben */
408 down_read(&mm
->mmap_sem
);
410 /* For private mappings, just map in zero pages. */
411 for (vma
= find_vma(mm
, addr
); vma
; vma
= vma
->vm_next
) {
414 if (vma
->vm_start
> addr
|| (vma
->vm_flags
& VM_WRITE
) == 0)
416 if (vma
->vm_flags
& (VM_SHARED
| VM_HUGETLB
))
418 count
= vma
->vm_end
- addr
;
422 zap_page_range(vma
, addr
, count
, NULL
);
423 zeromap_page_range(vma
, addr
, count
, PAGE_COPY
);
432 up_read(&mm
->mmap_sem
);
434 /* The shared case is hard. Let's do the conventional zeroing. */
436 unsigned long unwritten
= clear_user(buf
, PAGE_SIZE
);
438 return size
+ unwritten
- PAGE_SIZE
;
446 up_read(&mm
->mmap_sem
);
450 static ssize_t
read_zero(struct file
* file
, char __user
* buf
,
451 size_t count
, loff_t
*ppos
)
453 unsigned long left
, unwritten
, written
= 0;
458 if (!access_ok(VERIFY_WRITE
, buf
, count
))
463 /* do we want to be clever? Arbitrary cut-off */
464 if (count
>= PAGE_SIZE
*4) {
465 unsigned long partial
;
467 /* How much left of the page? */
468 partial
= (PAGE_SIZE
-1) & -(unsigned long) buf
;
469 unwritten
= clear_user(buf
, partial
);
470 written
= partial
- unwritten
;
475 unwritten
= read_zero_pagealigned(buf
, left
& PAGE_MASK
);
476 written
+= (left
& PAGE_MASK
) - unwritten
;
479 buf
+= left
& PAGE_MASK
;
482 unwritten
= clear_user(buf
, left
);
483 written
+= left
- unwritten
;
485 return written
? written
: -EFAULT
;
488 static int mmap_zero(struct file
* file
, struct vm_area_struct
* vma
)
490 if (vma
->vm_flags
& VM_SHARED
)
491 return shmem_zero_setup(vma
);
492 if (zeromap_page_range(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
496 #else /* CONFIG_MMU */
497 static ssize_t
read_zero(struct file
* file
, char * buf
,
498 size_t count
, loff_t
*ppos
)
506 chunk
= 4096; /* Just for latency reasons */
507 if (clear_user(buf
, chunk
))
516 static int mmap_zero(struct file
* file
, struct vm_area_struct
* vma
)
520 #endif /* CONFIG_MMU */
522 static ssize_t
write_full(struct file
* file
, const char __user
* buf
,
523 size_t count
, loff_t
*ppos
)
529 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
530 * can fopen() both devices with "a" now. This was previously impossible.
534 static loff_t
null_lseek(struct file
* file
, loff_t offset
, int orig
)
536 return file
->f_pos
= 0;
540 * The memory devices use the full 32/64 bits of the offset, and so we cannot
541 * check against negative addresses: they are ok. The return value is weird,
542 * though, in that case (0).
544 * also note that seeking relative to the "end of file" isn't supported:
545 * it has no meaning, so it returns -EINVAL.
547 static loff_t
memory_lseek(struct file
* file
, loff_t offset
, int orig
)
551 down(&file
->f_dentry
->d_inode
->i_sem
);
554 file
->f_pos
= offset
;
556 force_successful_syscall_return();
559 file
->f_pos
+= offset
;
561 force_successful_syscall_return();
566 up(&file
->f_dentry
->d_inode
->i_sem
);
570 static int open_port(struct inode
* inode
, struct file
* filp
)
572 return capable(CAP_SYS_RAWIO
) ? 0 : -EPERM
;
575 #define mmap_kmem mmap_mem
576 #define zero_lseek null_lseek
577 #define full_lseek null_lseek
578 #define write_zero write_null
579 #define read_full read_zero
580 #define open_mem open_port
581 #define open_kmem open_mem
583 static struct file_operations mem_fops
= {
584 .llseek
= memory_lseek
,
591 static struct file_operations kmem_fops
= {
592 .llseek
= memory_lseek
,
599 static struct file_operations null_fops
= {
600 .llseek
= null_lseek
,
605 #if defined(CONFIG_ISA) || !defined(__mc68000__)
606 static struct file_operations port_fops
= {
607 .llseek
= memory_lseek
,
614 static struct file_operations zero_fops
= {
615 .llseek
= zero_lseek
,
621 static struct file_operations full_fops
= {
622 .llseek
= full_lseek
,
627 static ssize_t
kmsg_write(struct file
* file
, const char __user
* buf
,
628 size_t count
, loff_t
*ppos
)
633 tmp
= kmalloc(count
+ 1, GFP_KERNEL
);
637 if (!copy_from_user(tmp
, buf
, count
)) {
639 ret
= printk("%s", tmp
);
645 static struct file_operations kmsg_fops
= {
649 static int memory_open(struct inode
* inode
, struct file
* filp
)
651 switch (iminor(inode
)) {
653 filp
->f_op
= &mem_fops
;
656 filp
->f_op
= &kmem_fops
;
659 filp
->f_op
= &null_fops
;
661 #if defined(CONFIG_ISA) || !defined(__mc68000__)
663 filp
->f_op
= &port_fops
;
667 filp
->f_op
= &zero_fops
;
670 filp
->f_op
= &full_fops
;
673 filp
->f_op
= &random_fops
;
676 filp
->f_op
= &urandom_fops
;
679 filp
->f_op
= &kmsg_fops
;
684 if (filp
->f_op
&& filp
->f_op
->open
)
685 return filp
->f_op
->open(inode
,filp
);
689 static struct file_operations memory_fops
= {
690 .open
= memory_open
, /* just a selector for the real open */
693 static const struct {
697 struct file_operations
*fops
;
698 } devlist
[] = { /* list of minor devices */
699 {1, "mem", S_IRUSR
| S_IWUSR
| S_IRGRP
, &mem_fops
},
700 {2, "kmem", S_IRUSR
| S_IWUSR
| S_IRGRP
, &kmem_fops
},
701 {3, "null", S_IRUGO
| S_IWUGO
, &null_fops
},
702 #if defined(CONFIG_ISA) || !defined(__mc68000__)
703 {4, "port", S_IRUSR
| S_IWUSR
| S_IRGRP
, &port_fops
},
705 {5, "zero", S_IRUGO
| S_IWUGO
, &zero_fops
},
706 {7, "full", S_IRUGO
| S_IWUGO
, &full_fops
},
707 {8, "random", S_IRUGO
| S_IWUSR
, &random_fops
},
708 {9, "urandom", S_IRUGO
| S_IWUSR
, &urandom_fops
},
709 {11,"kmsg", S_IRUGO
| S_IWUSR
, &kmsg_fops
},
712 static struct class_simple
*mem_class
;
714 static int __init
chr_dev_init(void)
718 if (register_chrdev(MEM_MAJOR
,"mem",&memory_fops
))
719 printk("unable to get major %d for memory devs\n", MEM_MAJOR
);
721 mem_class
= class_simple_create(THIS_MODULE
, "mem");
722 for (i
= 0; i
< ARRAY_SIZE(devlist
); i
++) {
723 class_simple_device_add(mem_class
,
724 MKDEV(MEM_MAJOR
, devlist
[i
].minor
),
725 NULL
, devlist
[i
].name
);
726 devfs_mk_cdev(MKDEV(MEM_MAJOR
, devlist
[i
].minor
),
727 S_IFCHR
| devlist
[i
].mode
, devlist
[i
].name
);
733 fs_initcall(chr_dev_init
);