2 * Copyright (c) 1994 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
16 * 4. Modifications may be freely made to this file if the above conditions
20 #include <sys/param.h>
21 #include <sys/systm.h>
26 #include <sys/device.h>
27 #include <sys/thread2.h>
30 #include <vm/vm_extern.h>
33 physio(cdev_t dev
, struct uio
*uio
, int ioflag
)
43 if (uio
->uio_segflg
== UIO_USERSPACE
)
44 bp
= getpbuf_mem(NULL
);
46 bp
= getpbuf_kva(NULL
);
47 saflags
= bp
->b_flags
;
50 /* XXX: sanity check */
51 if (dev
->si_iosize_max
< PAGE_SIZE
) {
52 kprintf("WARNING: %s si_iosize_max=%d, using MAXPHYS.\n",
53 devtoname(dev
), dev
->si_iosize_max
);
54 dev
->si_iosize_max
= MAXPHYS
;
57 /* Must be a real uio */
58 KKASSERT(uio
->uio_segflg
!= UIO_NOCOPY
);
60 for (i
= 0; i
< uio
->uio_iovcnt
; i
++) {
61 while (uio
->uio_iov
[i
].iov_len
) {
62 if (uio
->uio_rw
== UIO_READ
)
63 bp
->b_cmd
= BUF_CMD_READ
;
65 bp
->b_cmd
= BUF_CMD_WRITE
;
66 bp
->b_flags
= saflags
;
67 bcount
= uio
->uio_iov
[i
].iov_len
;
69 reinitbufbio(bp
); /* clear translation cache */
70 bp
->b_bio1
.bio_offset
= uio
->uio_offset
;
71 bp
->b_bio1
.bio_done
= biodone_sync
;
72 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
75 * Setup for mapping the request into kernel memory.
77 * We can only write as much as fits in a pbuf,
78 * which is MAXPHYS, and no larger then the device's
81 * If not using bounce pages the base address of the
82 * user mapping into the pbuf may be offset, further
83 * reducing how much will actually fit in the pbuf.
85 if (bcount
> dev
->si_iosize_max
)
86 bcount
= dev
->si_iosize_max
;
88 ubase
= uio
->uio_iov
[i
].iov_base
;
89 iolen
= ((vm_offset_t
)ubase
) & PAGE_MASK
;
90 if (bcount
> bp
->b_kvasize
)
91 bcount
= bp
->b_kvasize
;
94 * If we have to use a bounce buffer allocate kernel
95 * memory and copyin/copyout. Otherwise map the
96 * user buffer directly into kernel memory without
99 if (uio
->uio_segflg
== UIO_USERSPACE
) {
100 bp
->b_bcount
= bcount
;
101 if (uio
->uio_rw
== UIO_WRITE
) {
102 error
= copyin(ubase
, bp
->b_data
, bcount
);
107 bp
->b_data
= uio
->uio_iov
[i
].iov_base
;
108 bp
->b_bcount
= bcount
;
110 dev_dstrategy(dev
, &bp
->b_bio1
);
111 biowait(&bp
->b_bio1
, "physstr");
113 iolen
= bp
->b_bcount
- bp
->b_resid
;
114 if (uio
->uio_segflg
== UIO_USERSPACE
) {
115 if (uio
->uio_rw
== UIO_READ
&& iolen
) {
116 error
= copyout(bp
->b_data
, ubase
, iolen
);
118 bp
->b_flags
|= B_ERROR
;
123 if (iolen
== 0 && !(bp
->b_flags
& B_ERROR
))
124 goto doerror
; /* EOF */
125 uio
->uio_iov
[i
].iov_len
-= iolen
;
126 uio
->uio_iov
[i
].iov_base
= (char *)uio
->uio_iov
[i
].iov_base
+ iolen
;
127 uio
->uio_resid
-= iolen
;
128 uio
->uio_offset
+= iolen
;
129 if (bp
->b_flags
& B_ERROR
) {
141 physread(struct dev_read_args
*ap
)
143 return(physio(ap
->a_head
.a_dev
, ap
->a_uio
, ap
->a_ioflag
));
147 physwrite(struct dev_write_args
*ap
)
149 return(physio(ap
->a_head
.a_dev
, ap
->a_uio
, ap
->a_ioflag
));