2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
11 #include <linux/config.h>
13 #include <linux/miscdevice.h>
14 #include <linux/tpqic02.h>
15 #include <linux/ftape.h>
16 #include <linux/malloc.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mman.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/raw.h>
22 #include <linux/capability.h>
24 #include <asm/uaccess.h>
26 #include <asm/pgalloc.h>
29 extern int i2c_init_all(void);
32 void soundcore_init(void);
33 #ifdef CONFIG_SOUND_OSS
34 void soundcard_init(void);
37 #ifdef CONFIG_SPARCAUDIO
38 extern int sparcaudio_init(void);
43 #ifdef CONFIG_VIDEO_DEV
44 extern int videodev_init(void);
47 extern void fbmem_init(void);
49 #ifdef CONFIG_PROM_CONSOLE
50 extern void prom_con_init(void);
52 #ifdef CONFIG_MDA_CONSOLE
53 extern void mda_console_init(void);
55 #if defined(CONFIG_ADB)
56 extern void adbdev_init(void);
59 extern void telephony_init(void);
62 static ssize_t
do_write_mem(struct file
* file
, void *p
, unsigned long realp
,
63 const char * buf
, size_t count
, loff_t
*ppos
)
68 #if defined(__sparc__) || defined(__mc68000__)
69 /* we don't have page 0 mapped on sparc and m68k.. */
70 if (realp
< PAGE_SIZE
) {
71 unsigned long sz
= PAGE_SIZE
-realp
;
72 if (sz
> count
) sz
= count
;
73 /* Hmm. Do something? */
80 if (copy_from_user(p
, buf
, count
))
89 * This funcion reads the *physical* memory. The f_pos points directly to the
92 static ssize_t
read_mem(struct file
* file
, char * buf
,
93 size_t count
, loff_t
*ppos
)
95 unsigned long p
= *ppos
;
96 unsigned long end_mem
;
99 end_mem
= __pa(high_memory
);
102 if (count
> end_mem
- p
)
105 #if defined(__sparc__) || defined(__mc68000__)
106 /* we don't have page 0 mapped on sparc and m68k.. */
108 unsigned long sz
= PAGE_SIZE
-p
;
112 if (clear_user(buf
, sz
))
121 if (copy_to_user(buf
, __va(p
), count
))
128 static ssize_t
write_mem(struct file
* file
, const char * buf
,
129 size_t count
, loff_t
*ppos
)
131 unsigned long p
= *ppos
;
132 unsigned long end_mem
;
134 end_mem
= __pa(high_memory
);
137 if (count
> end_mem
- p
)
139 return do_write_mem(file
, __va(p
), p
, buf
, count
, ppos
);
142 #ifndef pgprot_noncached
145 * This should probably be per-architecture in <asm/pgtable.h>
147 static inline pgprot_t
pgprot_noncached(pgprot_t _prot
)
149 unsigned long prot
= pgprot_val(_prot
);
151 #if defined(__i386__)
152 /* On PPro and successors, PCD alone doesn't always mean
153 uncached because of interactions with the MTRRs. PCD | PWT
154 means definitely uncached. */
155 if (boot_cpu_data
.x86
> 3)
156 prot
|= _PAGE_PCD
| _PAGE_PWT
;
157 #elif defined(__powerpc__)
158 prot
|= _PAGE_NO_CACHE
| _PAGE_GUARDED
;
159 #elif defined(__mc68000__)
161 prot
|= SUN3_PAGE_NOCACHE
;
162 else if (MMU_IS_851
|| MMU_IS_030
)
163 prot
|= _PAGE_NOCACHE030
;
164 /* Use no-cache mode, serialized */
165 else if (MMU_IS_040
|| MMU_IS_060
)
166 prot
= (prot
& _CACHEMASK040
) | _PAGE_NOCACHE_S
;
167 #elif defined(__mips__)
168 prot
= (prot
& ~_CACHE_MASK
) | _CACHE_UNCACHED
;
169 #elif defined(__arm__) && defined(CONFIG_CPU_32)
170 /* Turn off caching for all I/O areas */
171 prot
&= ~(L_PTE_CACHEABLE
| L_PTE_BUFFERABLE
);
174 return __pgprot(prot
);
177 #endif /* !pgprot_noncached */
180 * Architectures vary in how they handle caching for addresses
181 * outside of main memory.
183 static inline int noncached_address(unsigned long addr
)
185 #if defined(__i386__)
187 * On the PPro and successors, the MTRRs are used to set
188 * memory types for physical addresses outside main memory,
189 * so blindly setting PCD or PWT on those pages is wrong.
190 * For Pentiums and earlier, the surround logic should disable
191 * caching for the high addresses through the KEN pin, but
192 * we maintain the tradition of paranoia in this code.
194 return !(boot_cpu_data
.x86_capability
& X86_FEATURE_MTRR
)
195 && addr
>= __pa(high_memory
);
197 return addr
>= __pa(high_memory
);
201 static int mmap_mem(struct file
* file
, struct vm_area_struct
* vma
)
203 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
206 * Accessing memory above the top the kernel knows about or
207 * through a file pointer that was marked O_SYNC will be
210 if (noncached_address(offset
) || (file
->f_flags
& O_SYNC
))
211 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
214 * Don't dump addresses that are not real memory to a core file.
216 if (offset
>= __pa(high_memory
) || (file
->f_flags
& O_SYNC
))
217 vma
->vm_flags
|= VM_IO
;
219 if (remap_page_range(vma
->vm_start
, offset
, vma
->vm_end
-vma
->vm_start
,
226 * This function reads the *virtual* memory as seen by the kernel.
228 static ssize_t
read_kmem(struct file
*file
, char *buf
,
229 size_t count
, loff_t
*ppos
)
231 unsigned long p
= *ppos
;
234 char * kbuf
; /* k-addr because vread() takes vmlist_lock rwlock */
236 if (p
< (unsigned long) high_memory
) {
238 if (count
> (unsigned long) high_memory
- p
)
239 read
= (unsigned long) high_memory
- p
;
241 #if defined(__sparc__) || defined(__mc68000__)
242 /* we don't have page 0 mapped on sparc and m68k.. */
243 if (p
< PAGE_SIZE
&& read
> 0) {
244 size_t tmp
= PAGE_SIZE
- p
;
245 if (tmp
> read
) tmp
= read
;
246 if (clear_user(buf
, tmp
))
254 if (copy_to_user(buf
, (char *)p
, read
))
261 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
269 len
= vread(kbuf
, (char *)p
, len
);
270 if (len
&& copy_to_user(buf
, kbuf
, len
)) {
271 free_page((unsigned long)kbuf
);
279 free_page((unsigned long)kbuf
);
285 * This function writes to the *virtual* memory as seen by the kernel.
287 static ssize_t
write_kmem(struct file
* file
, const char * buf
,
288 size_t count
, loff_t
*ppos
)
290 unsigned long p
= *ppos
;
292 if (p
>= (unsigned long) high_memory
)
294 if (count
> (unsigned long) high_memory
- p
)
295 count
= (unsigned long) high_memory
- p
;
296 return do_write_mem(file
, (void*)p
, p
, buf
, count
, ppos
);
300 static ssize_t
read_port(struct file
* file
, char * buf
,
301 size_t count
, loff_t
*ppos
)
303 unsigned long i
= *ppos
;
306 if (verify_area(VERIFY_WRITE
,buf
,count
))
308 while (count
-- > 0 && i
< 65536) {
309 if (__put_user(inb(i
),tmp
) < 0)
318 static ssize_t
write_port(struct file
* file
, const char * buf
,
319 size_t count
, loff_t
*ppos
)
321 unsigned long i
= *ppos
;
322 const char * tmp
= buf
;
324 if (verify_area(VERIFY_READ
,buf
,count
))
326 while (count
-- > 0 && i
< 65536) {
328 if (__get_user(c
, tmp
))
339 static ssize_t
read_null(struct file
* file
, char * buf
,
340 size_t count
, loff_t
*ppos
)
345 static ssize_t
write_null(struct file
* file
, const char * buf
,
346 size_t count
, loff_t
*ppos
)
352 * For fun, we are using the MMU for this.
354 static inline size_t read_zero_pagealigned(char * buf
, size_t size
)
356 struct mm_struct
*mm
;
357 struct vm_area_struct
* vma
;
358 unsigned long addr
=(unsigned long)buf
;
361 /* Oops, this was forgotten before. -ben */
364 /* For private mappings, just map in zero pages. */
365 for (vma
= find_vma(mm
, addr
); vma
; vma
= vma
->vm_next
) {
368 if (vma
->vm_start
> addr
|| (vma
->vm_flags
& VM_WRITE
) == 0)
370 if (vma
->vm_flags
& VM_SHARED
)
372 count
= vma
->vm_end
- addr
;
376 flush_cache_range(mm
, addr
, addr
+ count
);
377 zap_page_range(mm
, addr
, count
);
378 zeromap_page_range(addr
, count
, PAGE_COPY
);
379 flush_tlb_range(mm
, addr
, addr
+ count
);
390 /* The shared case is hard. Let's do the conventional zeroing. */
392 unsigned long unwritten
= clear_user(buf
, PAGE_SIZE
);
394 return size
+ unwritten
- PAGE_SIZE
;
395 if (current
->need_resched
)
407 static ssize_t
read_zero(struct file
* file
, char * buf
,
408 size_t count
, loff_t
*ppos
)
410 unsigned long left
, unwritten
, written
= 0;
415 if (!access_ok(VERIFY_WRITE
, buf
, count
))
420 /* do we want to be clever? Arbitrary cut-off */
421 if (count
>= PAGE_SIZE
*4) {
422 unsigned long partial
;
424 /* How much left of the page? */
425 partial
= (PAGE_SIZE
-1) & -(unsigned long) buf
;
426 unwritten
= clear_user(buf
, partial
);
427 written
= partial
- unwritten
;
432 unwritten
= read_zero_pagealigned(buf
, left
& PAGE_MASK
);
433 written
+= (left
& PAGE_MASK
) - unwritten
;
436 buf
+= left
& PAGE_MASK
;
439 unwritten
= clear_user(buf
, left
);
440 written
+= left
- unwritten
;
442 return written
? written
: -EFAULT
;
445 static int mmap_zero(struct file
* file
, struct vm_area_struct
* vma
)
447 if (vma
->vm_flags
& VM_SHARED
)
448 return map_zero_setup(vma
);
449 if (zeromap_page_range(vma
->vm_start
, vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
454 static ssize_t
write_full(struct file
* file
, const char * buf
,
455 size_t count
, loff_t
*ppos
)
461 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
462 * can fopen() both devices with "a" now. This was previously impossible.
466 static loff_t
null_lseek(struct file
* file
, loff_t offset
, int orig
)
468 return file
->f_pos
= 0;
472 * The memory devices use the full 32/64 bits of the offset, and so we cannot
473 * check against negative addresses: they are ok. The return value is weird,
474 * though, in that case (0).
476 * also note that seeking relative to the "end of file" isn't supported:
477 * it has no meaning, so it returns -EINVAL.
479 static loff_t
memory_lseek(struct file
* file
, loff_t offset
, int orig
)
483 file
->f_pos
= offset
;
486 file
->f_pos
+= offset
;
493 static int open_port(struct inode
* inode
, struct file
* filp
)
495 return capable(CAP_SYS_RAWIO
) ? 0 : -EPERM
;
498 #define mmap_kmem mmap_mem
499 #define zero_lseek null_lseek
500 #define full_lseek null_lseek
501 #define write_zero write_null
502 #define read_full read_zero
503 #define open_mem open_port
504 #define open_kmem open_mem
506 static struct file_operations mem_fops
= {
507 llseek
: memory_lseek
,
514 static struct file_operations kmem_fops
= {
515 llseek
: memory_lseek
,
522 static struct file_operations null_fops
= {
529 static struct file_operations port_fops
= {
530 llseek
: memory_lseek
,
537 static struct file_operations zero_fops
= {
544 static struct file_operations full_fops
= {
550 static int memory_open(struct inode
* inode
, struct file
* filp
)
552 switch (MINOR(inode
->i_rdev
)) {
554 filp
->f_op
= &mem_fops
;
557 filp
->f_op
= &kmem_fops
;
560 filp
->f_op
= &null_fops
;
564 filp
->f_op
= &port_fops
;
568 filp
->f_op
= &zero_fops
;
571 filp
->f_op
= &full_fops
;
574 filp
->f_op
= &random_fops
;
577 filp
->f_op
= &urandom_fops
;
582 if (filp
->f_op
&& filp
->f_op
->open
)
583 return filp
->f_op
->open(inode
,filp
);
587 void __init
memory_devfs_register (void)
589 /* These are never unregistered */
590 static const struct {
591 unsigned short minor
;
594 struct file_operations
*fops
;
595 } list
[] = { /* list of minor devices */
596 {1, "mem", S_IRUSR
| S_IWUSR
| S_IRGRP
, &mem_fops
},
597 {2, "kmem", S_IRUSR
| S_IWUSR
| S_IRGRP
, &kmem_fops
},
598 {3, "null", S_IRUGO
| S_IWUGO
, &null_fops
},
600 {4, "port", S_IRUSR
| S_IWUSR
| S_IRGRP
, &port_fops
},
602 {5, "zero", S_IRUGO
| S_IWUGO
, &zero_fops
},
603 {7, "full", S_IRUGO
| S_IWUGO
, &full_fops
},
604 {8, "random", S_IRUGO
| S_IWUSR
, &random_fops
},
605 {9, "urandom", S_IRUGO
| S_IWUSR
, &urandom_fops
}
609 for (i
=0; i
<(sizeof(list
)/sizeof(*list
)); i
++)
610 devfs_register (NULL
, list
[i
].name
, DEVFS_FL_NONE
,
611 MEM_MAJOR
, list
[i
].minor
,
612 list
[i
].mode
| S_IFCHR
,
616 static struct file_operations memory_fops
= {
617 open
: memory_open
, /* just a selector for the real open */
620 int __init
chr_dev_init(void)
622 if (devfs_register_chrdev(MEM_MAJOR
,"mem",&memory_fops
))
623 printk("unable to get major %d for memory devs\n", MEM_MAJOR
);
624 memory_devfs_register();
630 #if defined (CONFIG_FB)
633 #if defined (CONFIG_PROM_CONSOLE)
636 #if defined (CONFIG_MDA_CONSOLE)
640 #ifdef CONFIG_PRINTER
643 #ifdef CONFIG_M68K_PRINTER
649 #ifdef CONFIG_SOUND_OSS
653 #ifdef CONFIG_SPARCAUDIO
656 #if CONFIG_QIC02_TAPE
665 #if defined(CONFIG_ADB)
668 #ifdef CONFIG_VIDEO_DEV