Remove an unused include file.
[dragonfly.git] / sys / kern / kern_memio.c
blob4d6a70fe7d68f3ca57c75d4d5bb8b4509b80bf6b
1 /*-
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4 * All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department, and code derived from software contributed to
9 * Berkeley by William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 * from: Utah $Hdr: mem.c 1.13 89/10/08$
36 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
37 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $
41 * Memory special file
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/buf.h>
47 #include <sys/conf.h>
48 #include <sys/fcntl.h>
49 #include <sys/filio.h>
50 #include <sys/interrupt.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/memrange.h>
54 #include <sys/proc.h>
55 #include <sys/priv.h>
56 #include <sys/random.h>
57 #include <sys/signalvar.h>
58 #include <sys/uio.h>
59 #include <sys/vnode.h>
60 #include <sys/sysctl.h>
62 #include <sys/signal2.h>
64 #include <vm/vm.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_extern.h>
69 static d_open_t mmopen;
70 static d_close_t mmclose;
71 static d_read_t mmread;
72 static d_write_t mmwrite;
73 static d_ioctl_t mmioctl;
74 #if 0
75 static d_mmap_t memmmap;
76 #endif
77 static d_kqfilter_t mmkqfilter;
78 static int memuksmap(cdev_t dev, vm_page_t fake);
80 #define CDEV_MAJOR 2
81 static struct dev_ops mem_ops = {
82 { "mem", 0, D_MPSAFE | D_QUICK },
83 .d_open = mmopen,
84 .d_close = mmclose,
85 .d_read = mmread,
86 .d_write = mmwrite,
87 .d_ioctl = mmioctl,
88 .d_kqfilter = mmkqfilter,
89 #if 0
90 .d_mmap = memmmap,
91 #endif
92 .d_uksmap = memuksmap
95 static struct dev_ops mem_ops_mem = {
96 { "mem", 0, D_MEM | D_MPSAFE | D_QUICK },
97 .d_open = mmopen,
98 .d_close = mmclose,
99 .d_read = mmread,
100 .d_write = mmwrite,
101 .d_ioctl = mmioctl,
102 .d_kqfilter = mmkqfilter,
103 #if 0
104 .d_mmap = memmmap,
105 #endif
106 .d_uksmap = memuksmap
109 static struct dev_ops mem_ops_noq = {
110 { "mem", 0, D_MPSAFE },
111 .d_open = mmopen,
112 .d_close = mmclose,
113 .d_read = mmread,
114 .d_write = mmwrite,
115 .d_ioctl = mmioctl,
116 .d_kqfilter = mmkqfilter,
117 #if 0
118 .d_mmap = memmmap,
119 #endif
120 .d_uksmap = memuksmap
123 static int rand_bolt;
124 static caddr_t zbuf;
125 static cdev_t zerodev = NULL;
126 static struct lock mem_lock = LOCK_INITIALIZER("memlk", 0, 0);
128 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
129 static int mem_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *);
130 static int random_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *);
132 struct mem_range_softc mem_range_softc;
134 static int seedenable;
135 SYSCTL_INT(_kern, OID_AUTO, seedenable, CTLFLAG_RW, &seedenable, 0, "");
137 static int
138 mmopen(struct dev_open_args *ap)
140 cdev_t dev = ap->a_head.a_dev;
141 int error;
143 switch (minor(dev)) {
144 case 0:
145 case 1:
147 * /dev/mem and /dev/kmem
149 if (ap->a_oflags & FWRITE) {
150 if (securelevel > 0 || kernel_mem_readonly)
151 return (EPERM);
153 error = 0;
154 break;
155 case 6:
157 * /dev/kpmap can only be opened for reading.
159 if (ap->a_oflags & FWRITE)
160 return (EPERM);
161 error = 0;
162 break;
163 case 14:
164 error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0);
165 if (error != 0)
166 break;
167 if (securelevel > 0 || kernel_mem_readonly) {
168 error = EPERM;
169 break;
171 error = cpu_set_iopl();
172 break;
173 default:
174 error = 0;
175 break;
177 return (error);
180 static int
181 mmclose(struct dev_close_args *ap)
183 cdev_t dev = ap->a_head.a_dev;
184 int error;
186 switch (minor(dev)) {
187 case 14:
188 error = cpu_clr_iopl();
189 break;
190 default:
191 error = 0;
192 break;
194 return (error);
198 static int
199 mmrw(cdev_t dev, struct uio *uio, int flags)
201 int o;
202 u_int c;
203 u_int poolsize;
204 u_long v;
205 struct iovec *iov;
206 int error = 0;
207 caddr_t buf = NULL;
209 while (uio->uio_resid > 0 && error == 0) {
210 iov = uio->uio_iov;
211 if (iov->iov_len == 0) {
212 uio->uio_iov++;
213 uio->uio_iovcnt--;
214 if (uio->uio_iovcnt < 0)
215 panic("mmrw");
216 continue;
218 switch (minor(dev)) {
219 case 0:
221 * minor device 0 is physical memory, /dev/mem
223 v = uio->uio_offset;
224 v &= ~(long)PAGE_MASK;
225 pmap_kenter((vm_offset_t)ptvmmap, v);
226 o = (int)uio->uio_offset & PAGE_MASK;
227 c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK));
228 c = min(c, (u_int)(PAGE_SIZE - o));
229 c = min(c, (u_int)iov->iov_len);
230 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
231 pmap_kremove((vm_offset_t)ptvmmap);
232 continue;
234 case 1: {
236 * minor device 1 is kernel memory, /dev/kmem
238 vm_offset_t saddr, eaddr;
239 int prot;
241 c = iov->iov_len;
244 * Make sure that all of the pages are currently
245 * resident so that we don't create any zero-fill
246 * pages.
248 saddr = trunc_page(uio->uio_offset);
249 eaddr = round_page(uio->uio_offset + c);
250 if (saddr > eaddr)
251 return EFAULT;
254 * Make sure the kernel addresses are mapped.
255 * platform_direct_mapped() can be used to bypass
256 * default mapping via the page table (virtual kernels
257 * contain a lot of out-of-band data).
259 prot = VM_PROT_READ;
260 if (uio->uio_rw != UIO_READ)
261 prot |= VM_PROT_WRITE;
262 error = kvm_access_check(saddr, eaddr, prot);
263 if (error)
264 return (error);
265 error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset,
266 (int)c, uio);
267 continue;
269 case 2:
271 * minor device 2 (/dev/null) is EOF/RATHOLE
273 if (uio->uio_rw == UIO_READ)
274 return (0);
275 c = iov->iov_len;
276 break;
277 case 3:
279 * minor device 3 (/dev/random) is source of filth
280 * on read, seeder on write
282 if (buf == NULL)
283 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
284 c = min(iov->iov_len, PAGE_SIZE);
285 if (uio->uio_rw == UIO_WRITE) {
286 error = uiomove(buf, (int)c, uio);
287 if (error == 0 &&
288 seedenable &&
289 securelevel <= 0) {
290 error = add_buffer_randomness_src(buf, c, RAND_SRC_SEEDING);
291 } else if (error == 0) {
292 error = EPERM;
294 } else {
295 poolsize = read_random(buf, c);
296 if (poolsize == 0) {
297 if (buf)
298 kfree(buf, M_TEMP);
299 if ((flags & IO_NDELAY) != 0)
300 return (EWOULDBLOCK);
301 return (0);
303 c = min(c, poolsize);
304 error = uiomove(buf, (int)c, uio);
306 continue;
307 case 4:
309 * minor device 4 (/dev/urandom) is source of muck
310 * on read, writes are disallowed.
312 c = min(iov->iov_len, PAGE_SIZE);
313 if (uio->uio_rw == UIO_WRITE) {
314 error = EPERM;
315 break;
317 if (CURSIG(curthread->td_lwp) != 0) {
319 * Use tsleep() to get the error code right.
320 * It should return immediately.
322 error = tsleep(&rand_bolt, PCATCH, "urand", 1);
323 if (error != 0 && error != EWOULDBLOCK)
324 continue;
326 if (buf == NULL)
327 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
328 poolsize = read_random_unlimited(buf, c);
329 c = min(c, poolsize);
330 error = uiomove(buf, (int)c, uio);
331 continue;
332 /* case 5: read/write not supported, mmap only */
333 /* case 6: read/write not supported, mmap only */
334 case 12:
336 * minor device 12 (/dev/zero) is source of nulls
337 * on read, write are disallowed.
339 if (uio->uio_rw == UIO_WRITE) {
340 c = iov->iov_len;
341 break;
343 if (zbuf == NULL) {
344 zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP,
345 M_WAITOK | M_ZERO);
347 c = min(iov->iov_len, PAGE_SIZE);
348 error = uiomove(zbuf, (int)c, uio);
349 continue;
350 default:
351 return (ENODEV);
353 if (error)
354 break;
355 iov->iov_base = (char *)iov->iov_base + c;
356 iov->iov_len -= c;
357 uio->uio_offset += c;
358 uio->uio_resid -= c;
360 if (buf)
361 kfree(buf, M_TEMP);
362 return (error);
365 static int
366 mmread(struct dev_read_args *ap)
368 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
371 static int
372 mmwrite(struct dev_write_args *ap)
374 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
377 /*******************************************************\
378 * allow user processes to MMAP some memory sections *
379 * instead of going through read/write *
380 \*******************************************************/
382 static int user_kernel_mapping(int num, vm_ooffset_t offset,
383 vm_ooffset_t *resultp);
385 #if 0
387 static int
388 memmmap(struct dev_mmap_args *ap)
390 cdev_t dev = ap->a_head.a_dev;
391 vm_ooffset_t result;
392 int error;
394 switch (minor(dev)) {
395 case 0:
397 * minor device 0 is physical memory
399 ap->a_result = atop(ap->a_offset);
400 error = 0;
401 break;
402 case 1:
404 * minor device 1 is kernel memory
406 ap->a_result = atop(vtophys(ap->a_offset));
407 error = 0;
408 break;
409 case 5:
410 case 6:
412 * minor device 5 is /dev/upmap (see sys/upmap.h)
413 * minor device 6 is /dev/kpmap (see sys/upmap.h)
415 result = 0;
416 error = user_kernel_mapping(minor(dev), ap->a_offset, &result);
417 ap->a_result = atop(result);
418 break;
419 default:
420 error = EINVAL;
421 break;
423 return error;
426 #endif
428 static int
429 memuksmap(cdev_t dev, vm_page_t fake)
431 vm_ooffset_t result;
432 int error;
434 switch (minor(dev)) {
435 case 0:
437 * minor device 0 is physical memory
439 fake->phys_addr = ptoa(fake->pindex);
440 error = 0;
441 break;
442 case 1:
444 * minor device 1 is kernel memory
446 fake->phys_addr = vtophys(ptoa(fake->pindex));
447 error = 0;
448 break;
449 case 5:
450 case 6:
452 * minor device 5 is /dev/upmap (see sys/upmap.h)
453 * minor device 6 is /dev/kpmap (see sys/upmap.h)
455 result = 0;
456 error = user_kernel_mapping(minor(dev),
457 ptoa(fake->pindex), &result);
458 fake->phys_addr = result;
459 break;
460 default:
461 error = EINVAL;
462 break;
464 return error;
467 static int
468 mmioctl(struct dev_ioctl_args *ap)
470 cdev_t dev = ap->a_head.a_dev;
471 int error;
473 lockmgr(&mem_lock, LK_EXCLUSIVE);
475 switch (minor(dev)) {
476 case 0:
477 error = mem_ioctl(dev, ap->a_cmd, ap->a_data,
478 ap->a_fflag, ap->a_cred);
479 break;
480 case 3:
481 case 4:
482 error = random_ioctl(dev, ap->a_cmd, ap->a_data,
483 ap->a_fflag, ap->a_cred);
484 break;
485 default:
486 error = ENODEV;
487 break;
490 lockmgr(&mem_lock, LK_RELEASE);
492 return (error);
496 * Operations for changing memory attributes.
498 * This is basically just an ioctl shim for mem_range_attr_get
499 * and mem_range_attr_set.
501 static int
502 mem_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred)
504 int nd, error = 0;
505 struct mem_range_op *mo = (struct mem_range_op *)data;
506 struct mem_range_desc *md;
508 /* is this for us? */
509 if ((cmd != MEMRANGE_GET) &&
510 (cmd != MEMRANGE_SET))
511 return (ENOTTY);
513 /* any chance we can handle this? */
514 if (mem_range_softc.mr_op == NULL)
515 return (EOPNOTSUPP);
517 /* do we have any descriptors? */
518 if (mem_range_softc.mr_ndesc == 0)
519 return (ENXIO);
521 switch (cmd) {
522 case MEMRANGE_GET:
523 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc);
524 if (nd > 0) {
525 md = (struct mem_range_desc *)
526 kmalloc(nd * sizeof(struct mem_range_desc),
527 M_MEMDESC, M_WAITOK);
528 error = mem_range_attr_get(md, &nd);
529 if (!error)
530 error = copyout(md, mo->mo_desc,
531 nd * sizeof(struct mem_range_desc));
532 kfree(md, M_MEMDESC);
533 } else {
534 nd = mem_range_softc.mr_ndesc;
536 mo->mo_arg[0] = nd;
537 break;
539 case MEMRANGE_SET:
540 md = (struct mem_range_desc *)kmalloc(sizeof(struct mem_range_desc),
541 M_MEMDESC, M_WAITOK);
542 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc));
543 /* clamp description string */
544 md->mr_owner[sizeof(md->mr_owner) - 1] = 0;
545 if (error == 0)
546 error = mem_range_attr_set(md, &mo->mo_arg[0]);
547 kfree(md, M_MEMDESC);
548 break;
550 return (error);
554 * Implementation-neutral, kernel-callable functions for manipulating
555 * memory range attributes.
558 mem_range_attr_get(struct mem_range_desc *mrd, int *arg)
560 /* can we handle this? */
561 if (mem_range_softc.mr_op == NULL)
562 return (EOPNOTSUPP);
564 if (*arg == 0) {
565 *arg = mem_range_softc.mr_ndesc;
566 } else {
567 bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc));
569 return (0);
573 mem_range_attr_set(struct mem_range_desc *mrd, int *arg)
575 /* can we handle this? */
576 if (mem_range_softc.mr_op == NULL)
577 return (EOPNOTSUPP);
579 return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg));
582 void
583 mem_range_AP_init(void)
585 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
586 mem_range_softc.mr_op->initAP(&mem_range_softc);
589 static int
590 random_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred)
592 int error;
593 int intr;
596 * Even inspecting the state is privileged, since it gives a hint
597 * about how easily the randomness might be guessed.
599 error = 0;
601 switch (cmd) {
602 /* Really handled in upper layer */
603 case FIOASYNC:
604 break;
605 case MEM_SETIRQ:
606 intr = *(int16_t *)data;
607 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
608 break;
609 if (intr < 0 || intr >= MAX_INTS)
610 return (EINVAL);
611 register_randintr(intr);
612 break;
613 case MEM_CLEARIRQ:
614 intr = *(int16_t *)data;
615 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
616 break;
617 if (intr < 0 || intr >= MAX_INTS)
618 return (EINVAL);
619 unregister_randintr(intr);
620 break;
621 case MEM_RETURNIRQ:
622 error = ENOTSUP;
623 break;
624 case MEM_FINDIRQ:
625 intr = *(int16_t *)data;
626 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0)
627 break;
628 if (intr < 0 || intr >= MAX_INTS)
629 return (EINVAL);
630 intr = next_registered_randintr(intr);
631 if (intr == MAX_INTS)
632 return (ENOENT);
633 *(u_int16_t *)data = intr;
634 break;
635 default:
636 error = ENOTSUP;
637 break;
639 return (error);
642 static int
643 mm_filter_read(struct knote *kn, long hint)
645 return (1);
648 static int
649 mm_filter_write(struct knote *kn, long hint)
651 return (1);
654 static void
655 dummy_filter_detach(struct knote *kn) {}
657 /* Implemented in kern_nrandom.c */
658 static struct filterops random_read_filtops =
659 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, random_filter_read };
661 static struct filterops mm_read_filtops =
662 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_read };
664 static struct filterops mm_write_filtops =
665 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_write };
667 static int
668 mmkqfilter(struct dev_kqfilter_args *ap)
670 struct knote *kn = ap->a_kn;
671 cdev_t dev = ap->a_head.a_dev;
673 ap->a_result = 0;
674 switch (kn->kn_filter) {
675 case EVFILT_READ:
676 switch (minor(dev)) {
677 case 3:
678 kn->kn_fop = &random_read_filtops;
679 break;
680 default:
681 kn->kn_fop = &mm_read_filtops;
682 break;
684 break;
685 case EVFILT_WRITE:
686 kn->kn_fop = &mm_write_filtops;
687 break;
688 default:
689 ap->a_result = EOPNOTSUPP;
690 return (0);
693 return (0);
697 iszerodev(cdev_t dev)
699 return (zerodev == dev);
703 * /dev/upmap and /dev/kpmap.
705 static int
706 user_kernel_mapping(int num, vm_ooffset_t offset, vm_ooffset_t *resultp)
708 struct proc *p;
709 int error;
710 int invfork;
712 if ((p = curproc) == NULL)
713 return (EINVAL);
716 * If this is a child currently in vfork the pmap is shared with
717 * the parent! We need to actually set-up the parent's p_upmap,
718 * not the child's, and we need to set the invfork flag. Userland
719 * will probably adjust its static state so it must be consistent
720 * with the parent or userland will be really badly confused.
722 * (this situation can happen when user code in vfork() calls
723 * libc's getpid() or some other function which then decides
724 * it wants the upmap).
726 if (p->p_flags & P_PPWAIT) {
727 p = p->p_pptr;
728 if (p == NULL)
729 return (EINVAL);
730 invfork = 1;
731 } else {
732 invfork = 0;
735 error = EINVAL;
737 switch(num) {
738 case 5:
740 * /dev/upmap - maps RW per-process shared user-kernel area.
742 if (p->p_upmap == NULL)
743 proc_usermap(p, invfork);
744 else if (invfork)
745 p->p_upmap->invfork = invfork;
747 if (p->p_upmap &&
748 offset < roundup2(sizeof(*p->p_upmap), PAGE_SIZE)) {
749 /* only good for current process */
750 *resultp = pmap_kextract((vm_offset_t)p->p_upmap +
751 offset);
752 error = 0;
754 break;
755 case 6:
757 * /dev/kpmap - maps RO shared kernel global page
759 if (kpmap &&
760 offset < roundup2(sizeof(*kpmap), PAGE_SIZE)) {
761 *resultp = pmap_kextract((vm_offset_t)kpmap +
762 offset);
763 error = 0;
765 break;
766 default:
767 break;
769 return error;
772 static void
773 mem_drvinit(void *unused)
776 /* Initialise memory range handling */
777 if (mem_range_softc.mr_op != NULL)
778 mem_range_softc.mr_op->init(&mem_range_softc);
780 make_dev(&mem_ops_mem, 0, UID_ROOT, GID_KMEM, 0640, "mem");
781 make_dev(&mem_ops_mem, 1, UID_ROOT, GID_KMEM, 0640, "kmem");
782 make_dev(&mem_ops, 2, UID_ROOT, GID_WHEEL, 0666, "null");
783 make_dev(&mem_ops, 3, UID_ROOT, GID_WHEEL, 0644, "random");
784 make_dev(&mem_ops, 4, UID_ROOT, GID_WHEEL, 0644, "urandom");
785 make_dev(&mem_ops, 5, UID_ROOT, GID_WHEEL, 0666, "upmap");
786 make_dev(&mem_ops, 6, UID_ROOT, GID_WHEEL, 0444, "kpmap");
787 zerodev = make_dev(&mem_ops, 12, UID_ROOT, GID_WHEEL, 0666, "zero");
788 make_dev(&mem_ops_noq, 14, UID_ROOT, GID_WHEEL, 0600, "io");
791 SYSINIT(memdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, mem_drvinit,
792 NULL);