2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department, and code derived from software contributed to
9 * Berkeley by William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * from: Utah $Hdr: mem.c 1.13 89/10/08$
36 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
37 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $
44 #include <sys/param.h>
45 #include <sys/systm.h>
48 #include <sys/fcntl.h>
49 #include <sys/filio.h>
50 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/memrange.h>
55 #include <sys/random.h>
56 #include <sys/signalvar.h>
58 #include <sys/vnode.h>
59 #include <sys/sysctl.h>
61 #include <sys/signal2.h>
65 #include <vm/vm_extern.h>
68 static d_open_t mmopen
;
69 static d_close_t mmclose
;
70 static d_read_t mmread
;
71 static d_write_t mmwrite
;
72 static d_ioctl_t mmioctl
;
74 static d_mmap_t memmmap
;
76 static d_kqfilter_t mmkqfilter
;
77 static int memuksmap(cdev_t dev
, vm_page_t fake
);
80 static struct dev_ops mem_ops
= {
81 { "mem", 0, D_MPSAFE
| D_QUICK
},
87 .d_kqfilter
= mmkqfilter
,
94 static struct dev_ops mem_ops_mem
= {
95 { "mem", 0, D_MEM
| D_MPSAFE
| D_QUICK
},
101 .d_kqfilter
= mmkqfilter
,
105 .d_uksmap
= memuksmap
108 static struct dev_ops mem_ops_noq
= {
109 { "mem", 0, D_MPSAFE
},
115 .d_kqfilter
= mmkqfilter
,
119 .d_uksmap
= memuksmap
122 static int rand_bolt
;
124 static cdev_t zerodev
= NULL
;
125 static struct lock mem_lock
= LOCK_INITIALIZER("memlk", 0, 0);
127 MALLOC_DEFINE(M_MEMDESC
, "memdesc", "memory range descriptors");
128 static int mem_ioctl (cdev_t
, u_long
, caddr_t
, int, struct ucred
*);
129 static int random_ioctl (cdev_t
, u_long
, caddr_t
, int, struct ucred
*);
131 struct mem_range_softc mem_range_softc
;
133 static int seedenable
;
134 SYSCTL_INT(_kern
, OID_AUTO
, seedenable
, CTLFLAG_RW
, &seedenable
, 0, "");
137 mmopen(struct dev_open_args
*ap
)
139 cdev_t dev
= ap
->a_head
.a_dev
;
142 switch (minor(dev
)) {
146 * /dev/mem and /dev/kmem
148 if (ap
->a_oflags
& FWRITE
) {
149 if (securelevel
> 0 || kernel_mem_readonly
)
156 * /dev/kpmap can only be opened for reading.
158 if (ap
->a_oflags
& FWRITE
)
163 error
= priv_check_cred(ap
->a_cred
, PRIV_ROOT
, 0);
166 if (securelevel
> 0 || kernel_mem_readonly
) {
170 error
= cpu_set_iopl();
180 mmclose(struct dev_close_args
*ap
)
182 cdev_t dev
= ap
->a_head
.a_dev
;
185 switch (minor(dev
)) {
187 error
= cpu_clr_iopl();
198 mmrw(cdev_t dev
, struct uio
*uio
, int flags
)
208 while (uio
->uio_resid
> 0 && error
== 0) {
210 if (iov
->iov_len
== 0) {
213 if (uio
->uio_iovcnt
< 0)
217 switch (minor(dev
)) {
220 * minor device 0 is physical memory, /dev/mem
223 v
&= ~(long)PAGE_MASK
;
224 pmap_kenter((vm_offset_t
)ptvmmap
, v
);
225 o
= (int)uio
->uio_offset
& PAGE_MASK
;
226 c
= (u_int
)(PAGE_SIZE
- ((uintptr_t)iov
->iov_base
& PAGE_MASK
));
227 c
= min(c
, (u_int
)(PAGE_SIZE
- o
));
228 c
= min(c
, (u_int
)iov
->iov_len
);
229 error
= uiomove((caddr_t
)&ptvmmap
[o
], (int)c
, uio
);
230 pmap_kremove((vm_offset_t
)ptvmmap
);
235 * minor device 1 is kernel memory, /dev/kmem
237 vm_offset_t saddr
, eaddr
;
243 * Make sure that all of the pages are currently
244 * resident so that we don't create any zero-fill
247 saddr
= trunc_page(uio
->uio_offset
);
248 eaddr
= round_page(uio
->uio_offset
+ c
);
253 * Make sure the kernel addresses are mapped.
254 * platform_direct_mapped() can be used to bypass
255 * default mapping via the page table (virtual kernels
256 * contain a lot of out-of-band data).
259 if (uio
->uio_rw
!= UIO_READ
)
260 prot
|= VM_PROT_WRITE
;
261 error
= kvm_access_check(saddr
, eaddr
, prot
);
264 error
= uiomove((caddr_t
)(vm_offset_t
)uio
->uio_offset
,
270 * minor device 2 (/dev/null) is EOF/RATHOLE
272 if (uio
->uio_rw
== UIO_READ
)
278 * minor device 3 (/dev/random) is source of filth
279 * on read, seeder on write
282 buf
= kmalloc(PAGE_SIZE
, M_TEMP
, M_WAITOK
);
283 c
= min(iov
->iov_len
, PAGE_SIZE
);
284 if (uio
->uio_rw
== UIO_WRITE
) {
285 error
= uiomove(buf
, (int)c
, uio
);
289 error
= add_buffer_randomness_src(buf
, c
, RAND_SRC_SEEDING
);
290 } else if (error
== 0) {
294 poolsize
= read_random(buf
, c
);
298 if ((flags
& IO_NDELAY
) != 0)
299 return (EWOULDBLOCK
);
302 c
= min(c
, poolsize
);
303 error
= uiomove(buf
, (int)c
, uio
);
308 * minor device 4 (/dev/urandom) is source of muck
309 * on read, writes are disallowed.
311 c
= min(iov
->iov_len
, PAGE_SIZE
);
312 if (uio
->uio_rw
== UIO_WRITE
) {
316 if (CURSIG(curthread
->td_lwp
) != 0) {
318 * Use tsleep() to get the error code right.
319 * It should return immediately.
321 error
= tsleep(&rand_bolt
, PCATCH
, "urand", 1);
322 if (error
!= 0 && error
!= EWOULDBLOCK
)
326 buf
= kmalloc(PAGE_SIZE
, M_TEMP
, M_WAITOK
);
327 poolsize
= read_random_unlimited(buf
, c
);
328 c
= min(c
, poolsize
);
329 error
= uiomove(buf
, (int)c
, uio
);
331 /* case 5: read/write not supported, mmap only */
332 /* case 6: read/write not supported, mmap only */
335 * minor device 12 (/dev/zero) is source of nulls
336 * on read, write are disallowed.
338 if (uio
->uio_rw
== UIO_WRITE
) {
343 zbuf
= (caddr_t
)kmalloc(PAGE_SIZE
, M_TEMP
,
346 c
= min(iov
->iov_len
, PAGE_SIZE
);
347 error
= uiomove(zbuf
, (int)c
, uio
);
354 iov
->iov_base
= (char *)iov
->iov_base
+ c
;
356 uio
->uio_offset
+= c
;
365 mmread(struct dev_read_args
*ap
)
367 return(mmrw(ap
->a_head
.a_dev
, ap
->a_uio
, ap
->a_ioflag
));
371 mmwrite(struct dev_write_args
*ap
)
373 return(mmrw(ap
->a_head
.a_dev
, ap
->a_uio
, ap
->a_ioflag
));
376 /*******************************************************\
377 * allow user processes to MMAP some memory sections *
378 * instead of going through read/write *
379 \*******************************************************/
381 static int user_kernel_mapping(int num
, vm_ooffset_t offset
,
382 vm_ooffset_t
*resultp
);
387 memmmap(struct dev_mmap_args
*ap
)
389 cdev_t dev
= ap
->a_head
.a_dev
;
393 switch (minor(dev
)) {
396 * minor device 0 is physical memory
398 ap
->a_result
= atop(ap
->a_offset
);
403 * minor device 1 is kernel memory
405 ap
->a_result
= atop(vtophys(ap
->a_offset
));
411 * minor device 5 is /dev/upmap (see sys/upmap.h)
412 * minor device 6 is /dev/kpmap (see sys/upmap.h)
415 error
= user_kernel_mapping(minor(dev
), ap
->a_offset
, &result
);
416 ap
->a_result
= atop(result
);
428 memuksmap(cdev_t dev
, vm_page_t fake
)
433 switch (minor(dev
)) {
436 * minor device 0 is physical memory
438 fake
->phys_addr
= ptoa(fake
->pindex
);
443 * minor device 1 is kernel memory
445 fake
->phys_addr
= vtophys(ptoa(fake
->pindex
));
451 * minor device 5 is /dev/upmap (see sys/upmap.h)
452 * minor device 6 is /dev/kpmap (see sys/upmap.h)
455 error
= user_kernel_mapping(minor(dev
),
456 ptoa(fake
->pindex
), &result
);
457 fake
->phys_addr
= result
;
467 mmioctl(struct dev_ioctl_args
*ap
)
469 cdev_t dev
= ap
->a_head
.a_dev
;
472 lockmgr(&mem_lock
, LK_EXCLUSIVE
);
474 switch (minor(dev
)) {
476 error
= mem_ioctl(dev
, ap
->a_cmd
, ap
->a_data
,
477 ap
->a_fflag
, ap
->a_cred
);
481 error
= random_ioctl(dev
, ap
->a_cmd
, ap
->a_data
,
482 ap
->a_fflag
, ap
->a_cred
);
489 lockmgr(&mem_lock
, LK_RELEASE
);
495 * Operations for changing memory attributes.
497 * This is basically just an ioctl shim for mem_range_attr_get
498 * and mem_range_attr_set.
501 mem_ioctl(cdev_t dev
, u_long cmd
, caddr_t data
, int flags
, struct ucred
*cred
)
504 struct mem_range_op
*mo
= (struct mem_range_op
*)data
;
505 struct mem_range_desc
*md
;
507 /* is this for us? */
508 if ((cmd
!= MEMRANGE_GET
) &&
509 (cmd
!= MEMRANGE_SET
))
512 /* any chance we can handle this? */
513 if (mem_range_softc
.mr_op
== NULL
)
516 /* do we have any descriptors? */
517 if (mem_range_softc
.mr_ndesc
== 0)
522 nd
= imin(mo
->mo_arg
[0], mem_range_softc
.mr_ndesc
);
524 md
= (struct mem_range_desc
*)
525 kmalloc(nd
* sizeof(struct mem_range_desc
),
526 M_MEMDESC
, M_WAITOK
);
527 error
= mem_range_attr_get(md
, &nd
);
529 error
= copyout(md
, mo
->mo_desc
,
530 nd
* sizeof(struct mem_range_desc
));
531 kfree(md
, M_MEMDESC
);
533 nd
= mem_range_softc
.mr_ndesc
;
539 md
= (struct mem_range_desc
*)kmalloc(sizeof(struct mem_range_desc
),
540 M_MEMDESC
, M_WAITOK
);
541 error
= copyin(mo
->mo_desc
, md
, sizeof(struct mem_range_desc
));
542 /* clamp description string */
543 md
->mr_owner
[sizeof(md
->mr_owner
) - 1] = 0;
545 error
= mem_range_attr_set(md
, &mo
->mo_arg
[0]);
546 kfree(md
, M_MEMDESC
);
553 * Implementation-neutral, kernel-callable functions for manipulating
554 * memory range attributes.
557 mem_range_attr_get(struct mem_range_desc
*mrd
, int *arg
)
559 /* can we handle this? */
560 if (mem_range_softc
.mr_op
== NULL
)
564 *arg
= mem_range_softc
.mr_ndesc
;
566 bcopy(mem_range_softc
.mr_desc
, mrd
, (*arg
) * sizeof(struct mem_range_desc
));
572 mem_range_attr_set(struct mem_range_desc
*mrd
, int *arg
)
574 /* can we handle this? */
575 if (mem_range_softc
.mr_op
== NULL
)
578 return (mem_range_softc
.mr_op
->set(&mem_range_softc
, mrd
, arg
));
582 mem_range_AP_init(void)
584 if (mem_range_softc
.mr_op
&& mem_range_softc
.mr_op
->initAP
)
585 mem_range_softc
.mr_op
->initAP(&mem_range_softc
);
589 random_ioctl(cdev_t dev
, u_long cmd
, caddr_t data
, int flags
, struct ucred
*cred
)
595 * Even inspecting the state is privileged, since it gives a hint
596 * about how easily the randomness might be guessed.
601 /* Really handled in upper layer */
605 intr
= *(int16_t *)data
;
606 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
608 if (intr
< 0 || intr
>= MAX_INTS
)
610 register_randintr(intr
);
613 intr
= *(int16_t *)data
;
614 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
616 if (intr
< 0 || intr
>= MAX_INTS
)
618 unregister_randintr(intr
);
624 intr
= *(int16_t *)data
;
625 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
627 if (intr
< 0 || intr
>= MAX_INTS
)
629 intr
= next_registered_randintr(intr
);
630 if (intr
== MAX_INTS
)
632 *(u_int16_t
*)data
= intr
;
642 mm_filter_read(struct knote
*kn
, long hint
)
648 mm_filter_write(struct knote
*kn
, long hint
)
654 dummy_filter_detach(struct knote
*kn
) {}
656 /* Implemented in kern_nrandom.c */
657 static struct filterops random_read_filtops
=
658 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, dummy_filter_detach
, random_filter_read
};
660 static struct filterops mm_read_filtops
=
661 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, dummy_filter_detach
, mm_filter_read
};
663 static struct filterops mm_write_filtops
=
664 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, dummy_filter_detach
, mm_filter_write
};
667 mmkqfilter(struct dev_kqfilter_args
*ap
)
669 struct knote
*kn
= ap
->a_kn
;
670 cdev_t dev
= ap
->a_head
.a_dev
;
673 switch (kn
->kn_filter
) {
675 switch (minor(dev
)) {
677 kn
->kn_fop
= &random_read_filtops
;
680 kn
->kn_fop
= &mm_read_filtops
;
685 kn
->kn_fop
= &mm_write_filtops
;
688 ap
->a_result
= EOPNOTSUPP
;
696 iszerodev(cdev_t dev
)
698 return (zerodev
== dev
);
702 * /dev/upmap and /dev/kpmap.
705 user_kernel_mapping(int num
, vm_ooffset_t offset
, vm_ooffset_t
*resultp
)
711 if ((p
= curproc
) == NULL
)
715 * If this is a child currently in vfork the pmap is shared with
716 * the parent! We need to actually set-up the parent's p_upmap,
717 * not the child's, and we need to set the invfork flag. Userland
718 * will probably adjust its static state so it must be consistent
719 * with the parent or userland will be really badly confused.
721 * (this situation can happen when user code in vfork() calls
722 * libc's getpid() or some other function which then decides
723 * it wants the upmap).
725 if (p
->p_flags
& P_PPWAIT
) {
739 * /dev/upmap - maps RW per-process shared user-kernel area.
741 if (p
->p_upmap
== NULL
)
742 proc_usermap(p
, invfork
);
744 p
->p_upmap
->invfork
= invfork
;
747 offset
< roundup2(sizeof(*p
->p_upmap
), PAGE_SIZE
)) {
748 /* only good for current process */
749 *resultp
= pmap_kextract((vm_offset_t
)p
->p_upmap
+
756 * /dev/kpmap - maps RO shared kernel global page
759 offset
< roundup2(sizeof(*kpmap
), PAGE_SIZE
)) {
760 *resultp
= pmap_kextract((vm_offset_t
)kpmap
+
772 mem_drvinit(void *unused
)
775 /* Initialise memory range handling */
776 if (mem_range_softc
.mr_op
!= NULL
)
777 mem_range_softc
.mr_op
->init(&mem_range_softc
);
779 make_dev(&mem_ops_mem
, 0, UID_ROOT
, GID_KMEM
, 0640, "mem");
780 make_dev(&mem_ops_mem
, 1, UID_ROOT
, GID_KMEM
, 0640, "kmem");
781 make_dev(&mem_ops
, 2, UID_ROOT
, GID_WHEEL
, 0666, "null");
782 make_dev(&mem_ops
, 3, UID_ROOT
, GID_WHEEL
, 0644, "random");
783 make_dev(&mem_ops
, 4, UID_ROOT
, GID_WHEEL
, 0644, "urandom");
784 make_dev(&mem_ops
, 5, UID_ROOT
, GID_WHEEL
, 0666, "upmap");
785 make_dev(&mem_ops
, 6, UID_ROOT
, GID_WHEEL
, 0444, "kpmap");
786 zerodev
= make_dev(&mem_ops
, 12, UID_ROOT
, GID_WHEEL
, 0666, "zero");
787 make_dev(&mem_ops_noq
, 14, UID_ROOT
, GID_WHEEL
, 0600, "io");
790 SYSINIT(memdev
, SI_SUB_DRIVERS
, SI_ORDER_MIDDLE
+ CDEV_MAJOR
, mem_drvinit
,