2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department, and code derived from software contributed to
9 * Berkeley by William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * from: Utah $Hdr: mem.c 1.13 89/10/08$
36 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
37 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $
44 #include <sys/param.h>
45 #include <sys/systm.h>
48 #include <sys/fcntl.h>
49 #include <sys/filio.h>
50 #include <sys/interrupt.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/memrange.h>
56 #include <sys/queue.h>
57 #include <sys/random.h>
58 #include <sys/signalvar.h>
60 #include <sys/vnode.h>
61 #include <sys/sysctl.h>
63 #include <sys/signal2.h>
64 #include <sys/spinlock2.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_extern.h>
72 static d_open_t mmopen
;
73 static d_close_t mmclose
;
74 static d_read_t mmread
;
75 static d_write_t mmwrite
;
76 static d_ioctl_t mmioctl
;
78 static d_mmap_t memmmap
;
80 static d_kqfilter_t mmkqfilter
;
81 static int memuksmap(vm_map_backing_t ba
, int op
, cdev_t dev
, vm_page_t fake
);
84 static struct dev_ops mem_ops
= {
85 { "mem", 0, D_MPSAFE
| D_QUICK
},
91 .d_kqfilter
= mmkqfilter
,
98 static struct dev_ops mem_ops_mem
= {
99 { "mem", 0, D_MEM
| D_MPSAFE
| D_QUICK
},
105 .d_kqfilter
= mmkqfilter
,
109 .d_uksmap
= memuksmap
112 static struct dev_ops mem_ops_noq
= {
113 { "mem", 0, D_MPSAFE
},
119 .d_kqfilter
= mmkqfilter
,
123 .d_uksmap
= memuksmap
126 static int rand_bolt
;
128 static cdev_t zerodev
= NULL
;
129 static struct lock mem_lock
= LOCK_INITIALIZER("memlk", 0, 0);
131 MALLOC_DEFINE(M_MEMDESC
, "memdesc", "memory range descriptors");
132 static int mem_ioctl (cdev_t
, u_long
, caddr_t
, int, struct ucred
*);
133 static int random_ioctl (cdev_t
, u_long
, caddr_t
, int, struct ucred
*);
135 struct mem_range_softc mem_range_softc
;
137 static int seedenable
;
138 SYSCTL_INT(_kern
, OID_AUTO
, seedenable
, CTLFLAG_RW
, &seedenable
, 0, "");
141 mmopen(struct dev_open_args
*ap
)
143 cdev_t dev
= ap
->a_head
.a_dev
;
146 switch (minor(dev
)) {
150 * /dev/mem and /dev/kmem
152 if (ap
->a_oflags
& FWRITE
) {
153 if (securelevel
> 0 || kernel_mem_readonly
)
160 * /dev/kpmap can only be opened for reading.
162 if (ap
->a_oflags
& FWRITE
)
167 error
= priv_check_cred(ap
->a_cred
, PRIV_ROOT
, 0);
170 if (securelevel
> 0 || kernel_mem_readonly
) {
174 error
= cpu_set_iopl();
184 mmclose(struct dev_close_args
*ap
)
186 cdev_t dev
= ap
->a_head
.a_dev
;
189 switch (minor(dev
)) {
191 error
= cpu_clr_iopl();
202 mmrw(cdev_t dev
, struct uio
*uio
, int flags
)
212 while (uio
->uio_resid
> 0 && error
== 0) {
214 if (iov
->iov_len
== 0) {
217 if (uio
->uio_iovcnt
< 0)
221 switch (minor(dev
)) {
224 * minor device 0 is physical memory, /dev/mem
227 v
&= ~(long)PAGE_MASK
;
228 pmap_kenter((vm_offset_t
)ptvmmap
, v
);
229 o
= (int)uio
->uio_offset
& PAGE_MASK
;
230 c
= (u_int
)(PAGE_SIZE
- ((uintptr_t)iov
->iov_base
& PAGE_MASK
));
231 c
= min(c
, (u_int
)(PAGE_SIZE
- o
));
232 c
= min(c
, (u_int
)iov
->iov_len
);
233 error
= uiomove((caddr_t
)&ptvmmap
[o
], (int)c
, uio
);
234 pmap_kremove((vm_offset_t
)ptvmmap
);
239 * minor device 1 is kernel memory, /dev/kmem
241 vm_offset_t saddr
, eaddr
;
247 * Make sure that all of the pages are currently
248 * resident so that we don't create any zero-fill
251 saddr
= trunc_page(uio
->uio_offset
);
252 eaddr
= round_page(uio
->uio_offset
+ c
);
257 * Make sure the kernel addresses are mapped.
258 * platform_direct_mapped() can be used to bypass
259 * default mapping via the page table (virtual kernels
260 * contain a lot of out-of-band data).
263 if (uio
->uio_rw
!= UIO_READ
)
264 prot
|= VM_PROT_WRITE
;
265 error
= kvm_access_check(saddr
, eaddr
, prot
);
268 error
= uiomove((caddr_t
)(vm_offset_t
)uio
->uio_offset
,
274 * minor device 2 (/dev/null) is EOF/RATHOLE
276 if (uio
->uio_rw
== UIO_READ
)
282 * minor device 3 (/dev/random) is source of filth
283 * on read, seeder on write
286 buf
= kmalloc(PAGE_SIZE
, M_TEMP
, M_WAITOK
);
287 c
= min(iov
->iov_len
, PAGE_SIZE
);
288 if (uio
->uio_rw
== UIO_WRITE
) {
289 error
= uiomove(buf
, (int)c
, uio
);
293 error
= add_buffer_randomness_src(buf
, c
, RAND_SRC_SEEDING
);
294 } else if (error
== 0) {
298 poolsize
= read_random(buf
, c
, 0);
302 if ((flags
& IO_NDELAY
) != 0)
303 return (EWOULDBLOCK
);
306 c
= min(c
, poolsize
);
307 error
= uiomove(buf
, (int)c
, uio
);
312 * minor device 4 (/dev/urandom) is source of muck
313 * on read, writes are disallowed.
315 c
= min(iov
->iov_len
, PAGE_SIZE
);
316 if (uio
->uio_rw
== UIO_WRITE
) {
320 if (CURSIG(curthread
->td_lwp
) != 0) {
322 * Use tsleep() to get the error code right.
323 * It should return immediately.
325 error
= tsleep(&rand_bolt
, PCATCH
, "urand", 1);
326 if (error
!= 0 && error
!= EWOULDBLOCK
)
330 buf
= kmalloc(PAGE_SIZE
, M_TEMP
, M_WAITOK
);
331 poolsize
= read_random(buf
, c
, 1);
332 c
= min(c
, poolsize
);
333 error
= uiomove(buf
, (int)c
, uio
);
335 /* case 5: read/write not supported, mmap only */
336 /* case 6: read/write not supported, mmap only */
339 * minor device 12 (/dev/zero) is source of nulls
340 * on read, write are disallowed.
342 if (uio
->uio_rw
== UIO_WRITE
) {
347 zbuf
= (caddr_t
)kmalloc(PAGE_SIZE
, M_TEMP
,
350 c
= min(iov
->iov_len
, PAGE_SIZE
);
351 error
= uiomove(zbuf
, (int)c
, uio
);
358 iov
->iov_base
= (char *)iov
->iov_base
+ c
;
360 uio
->uio_offset
+= c
;
369 mmread(struct dev_read_args
*ap
)
371 return(mmrw(ap
->a_head
.a_dev
, ap
->a_uio
, ap
->a_ioflag
));
375 mmwrite(struct dev_write_args
*ap
)
377 return(mmrw(ap
->a_head
.a_dev
, ap
->a_uio
, ap
->a_ioflag
));
380 /*******************************************************\
381 * allow user processes to MMAP some memory sections *
382 * instead of going through read/write *
383 \*******************************************************/
385 static int user_kernel_mapping(vm_map_backing_t ba
, int num
,
386 vm_ooffset_t offset
, vm_ooffset_t
*resultp
);
389 memuksmap(vm_map_backing_t ba
, int op
, cdev_t dev
, vm_page_t fake
)
400 * We only need to track mappings for /dev/lpmap, all process
401 * mappings will be deleted when the process exits and we
402 * do not need to track kernel mappings.
404 if (minor(dev
) == 7) {
406 spin_lock(&lp
->lwp_spin
);
407 TAILQ_INSERT_TAIL(&lp
->lwp_lpmap_backing_list
,
409 spin_unlock(&lp
->lwp_spin
);
414 * We only need to track mappings for /dev/lpmap, all process
415 * mappings will be deleted when the process exits and we
416 * do not need to track kernel mappings.
418 if (minor(dev
) == 7) {
420 spin_lock(&lp
->lwp_spin
);
421 TAILQ_REMOVE(&lp
->lwp_lpmap_backing_list
, ba
, entry
);
422 spin_unlock(&lp
->lwp_spin
);
426 switch (minor(dev
)) {
429 * minor device 0 is physical memory
431 fake
->phys_addr
= ptoa(fake
->pindex
);
435 * minor device 1 is kernel memory
437 fake
->phys_addr
= vtophys(ptoa(fake
->pindex
));
443 * minor device 5 is /dev/upmap (see sys/upmap.h)
444 * minor device 6 is /dev/kpmap (see sys/upmap.h)
445 * minor device 7 is /dev/lpmap (see sys/upmap.h)
448 error
= user_kernel_mapping(ba
,
452 fake
->phys_addr
= result
;
467 mmioctl(struct dev_ioctl_args
*ap
)
469 cdev_t dev
= ap
->a_head
.a_dev
;
472 lockmgr(&mem_lock
, LK_EXCLUSIVE
);
474 switch (minor(dev
)) {
476 error
= mem_ioctl(dev
, ap
->a_cmd
, ap
->a_data
,
477 ap
->a_fflag
, ap
->a_cred
);
481 error
= random_ioctl(dev
, ap
->a_cmd
, ap
->a_data
,
482 ap
->a_fflag
, ap
->a_cred
);
489 lockmgr(&mem_lock
, LK_RELEASE
);
495 * Operations for changing memory attributes.
497 * This is basically just an ioctl shim for mem_range_attr_get
498 * and mem_range_attr_set.
501 mem_ioctl(cdev_t dev
, u_long cmd
, caddr_t data
, int flags
, struct ucred
*cred
)
504 struct mem_range_op
*mo
= (struct mem_range_op
*)data
;
505 struct mem_range_desc
*md
;
507 /* is this for us? */
508 if ((cmd
!= MEMRANGE_GET
) &&
509 (cmd
!= MEMRANGE_SET
))
512 /* any chance we can handle this? */
513 if (mem_range_softc
.mr_op
== NULL
)
516 /* do we have any descriptors? */
517 if (mem_range_softc
.mr_ndesc
== 0)
522 nd
= imin(mo
->mo_arg
[0], mem_range_softc
.mr_ndesc
);
524 md
= (struct mem_range_desc
*)
525 kmalloc(nd
* sizeof(struct mem_range_desc
),
526 M_MEMDESC
, M_WAITOK
);
527 error
= mem_range_attr_get(md
, &nd
);
529 error
= copyout(md
, mo
->mo_desc
,
530 nd
* sizeof(struct mem_range_desc
));
531 kfree(md
, M_MEMDESC
);
533 nd
= mem_range_softc
.mr_ndesc
;
539 md
= (struct mem_range_desc
*)kmalloc(sizeof(struct mem_range_desc
),
540 M_MEMDESC
, M_WAITOK
);
541 error
= copyin(mo
->mo_desc
, md
, sizeof(struct mem_range_desc
));
542 /* clamp description string */
543 md
->mr_owner
[sizeof(md
->mr_owner
) - 1] = 0;
545 error
= mem_range_attr_set(md
, &mo
->mo_arg
[0]);
546 kfree(md
, M_MEMDESC
);
553 * Implementation-neutral, kernel-callable functions for manipulating
554 * memory range attributes.
557 mem_range_attr_get(struct mem_range_desc
*mrd
, int *arg
)
559 /* can we handle this? */
560 if (mem_range_softc
.mr_op
== NULL
)
564 *arg
= mem_range_softc
.mr_ndesc
;
566 bcopy(mem_range_softc
.mr_desc
, mrd
, (*arg
) * sizeof(struct mem_range_desc
));
572 mem_range_attr_set(struct mem_range_desc
*mrd
, int *arg
)
574 /* can we handle this? */
575 if (mem_range_softc
.mr_op
== NULL
)
578 return (mem_range_softc
.mr_op
->set(&mem_range_softc
, mrd
, arg
));
582 mem_range_AP_init(void)
584 if (mem_range_softc
.mr_op
&& mem_range_softc
.mr_op
->initAP
)
585 mem_range_softc
.mr_op
->initAP(&mem_range_softc
);
589 random_ioctl(cdev_t dev
, u_long cmd
, caddr_t data
, int flags
, struct ucred
*cred
)
595 * Even inspecting the state is privileged, since it gives a hint
596 * about how easily the randomness might be guessed.
601 /* Really handled in upper layer */
605 intr
= *(int16_t *)data
;
606 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
608 if (intr
< 0 || intr
>= MAX_INTS
)
610 register_randintr(intr
);
613 intr
= *(int16_t *)data
;
614 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
616 if (intr
< 0 || intr
>= MAX_INTS
)
618 unregister_randintr(intr
);
624 intr
= *(int16_t *)data
;
625 if ((error
= priv_check_cred(cred
, PRIV_ROOT
, 0)) != 0)
627 if (intr
< 0 || intr
>= MAX_INTS
)
629 intr
= next_registered_randintr(intr
);
630 if (intr
== MAX_INTS
)
632 *(u_int16_t
*)data
= intr
;
642 mm_filter_read(struct knote
*kn
, long hint
)
648 mm_filter_write(struct knote
*kn
, long hint
)
654 dummy_filter_detach(struct knote
*kn
) {}
656 /* Implemented in kern_nrandom.c */
657 static struct filterops random_read_filtops
=
658 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, dummy_filter_detach
, random_filter_read
};
660 static struct filterops mm_read_filtops
=
661 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, dummy_filter_detach
, mm_filter_read
};
663 static struct filterops mm_write_filtops
=
664 { FILTEROP_ISFD
|FILTEROP_MPSAFE
, NULL
, dummy_filter_detach
, mm_filter_write
};
667 mmkqfilter(struct dev_kqfilter_args
*ap
)
669 struct knote
*kn
= ap
->a_kn
;
670 cdev_t dev
= ap
->a_head
.a_dev
;
673 switch (kn
->kn_filter
) {
675 switch (minor(dev
)) {
677 kn
->kn_fop
= &random_read_filtops
;
680 kn
->kn_fop
= &mm_read_filtops
;
685 kn
->kn_fop
= &mm_write_filtops
;
688 ap
->a_result
= EOPNOTSUPP
;
696 iszerodev(cdev_t dev
)
698 return (zerodev
== dev
);
702 * /dev/lpmap, /dev/upmap, /dev/kpmap.
705 user_kernel_mapping(vm_map_backing_t ba
, int num
, vm_ooffset_t offset
,
706 vm_ooffset_t
*resultp
)
721 * /dev/upmap - maps RW per-process shared user-kernel area.
725 * If this is a child currently in vfork the pmap is shared
726 * with the parent! We need to actually set-up the parent's
727 * p_upmap, not the child's, and we need to set the invfork
728 * flag. Userland will probably adjust its static state so
729 * it must be consistent with the parent or userland will be
730 * really badly confused.
732 * (this situation can happen when user code in vfork() calls
733 * libc's getpid() or some other function which then decides
734 * it wants the upmap).
739 if (p
->p_flags
& P_PPWAIT
) {
749 * Create the kernel structure as required, set the invfork
750 * flag if we are faulting in on a vfork().
752 if (p
->p_upmap
== NULL
)
753 proc_usermap(p
, invfork
);
754 if (p
->p_upmap
&& invfork
)
755 p
->p_upmap
->invfork
= invfork
;
758 * Extract address for pmap
761 offset
< roundup2(sizeof(*p
->p_upmap
), PAGE_SIZE
)) {
762 /* only good for current process */
763 *resultp
= pmap_kextract((vm_offset_t
)p
->p_upmap
+
770 * /dev/kpmap - maps RO shared kernel global page
772 * Extract address for pmap
775 offset
< roundup2(sizeof(*kpmap
), PAGE_SIZE
)) {
776 *resultp
= pmap_kextract((vm_offset_t
)kpmap
+ offset
);
782 * /dev/lpmap - maps RW per-thread shared user-kernel area.
789 * Create the kernel structure as required
791 if (lp
->lwp_lpmap
== NULL
)
792 lwp_usermap(lp
, -1); /* second arg not yet XXX */
795 * Extract address for pmap
798 offset
< roundup2(sizeof(*lp
->lwp_lpmap
), PAGE_SIZE
)) {
799 /* only good for current process */
800 *resultp
= pmap_kextract((vm_offset_t
)lp
->lwp_lpmap
+
812 mem_drvinit(void *unused
)
815 /* Initialise memory range handling */
816 if (mem_range_softc
.mr_op
!= NULL
)
817 mem_range_softc
.mr_op
->init(&mem_range_softc
);
819 make_dev(&mem_ops_mem
, 0, UID_ROOT
, GID_KMEM
, 0640, "mem");
820 make_dev(&mem_ops_mem
, 1, UID_ROOT
, GID_KMEM
, 0640, "kmem");
821 make_dev(&mem_ops
, 2, UID_ROOT
, GID_WHEEL
, 0666, "null");
822 make_dev(&mem_ops
, 3, UID_ROOT
, GID_WHEEL
, 0644, "random");
823 make_dev(&mem_ops
, 4, UID_ROOT
, GID_WHEEL
, 0644, "urandom");
824 make_dev(&mem_ops
, 5, UID_ROOT
, GID_WHEEL
, 0666, "upmap");
825 make_dev(&mem_ops
, 6, UID_ROOT
, GID_WHEEL
, 0444, "kpmap");
826 make_dev(&mem_ops
, 7, UID_ROOT
, GID_WHEEL
, 0666, "lpmap");
827 zerodev
= make_dev(&mem_ops
, 12, UID_ROOT
, GID_WHEEL
, 0666, "zero");
828 make_dev(&mem_ops_noq
, 14, UID_ROOT
, GID_WHEEL
, 0600, "io");
831 SYSINIT(memdev
, SI_SUB_DRIVERS
, SI_ORDER_MIDDLE
+ CDEV_MAJOR
, mem_drvinit
,