kernel - Many fixes for vkernel support, plus a few main kernel fixes
[dragonfly.git] / sys / platform / vkernel64 / platform / copyio.c
blob6c3bf1ed84bca95e832ba1a6293aa6e541a20cd1
1 /*
2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <cpu/lwbuf.h>
38 #include <vm/vm_page.h>
39 #include <vm/vm_extern.h>
40 #include <assert.h>
42 #include <sys/stat.h>
43 #include <sys/mman.h>
45 uint64_t
46 casu64(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
48 struct vmspace *vm = curproc->p_vmspace;
49 vm_offset_t kva;
50 vm_page_t m;
51 volatile uint64_t *dest;
52 uint64_t res;
53 int error;
54 int busy;
56 /* XXX No idea how to handle this case in a simple way, just abort */
57 if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
58 return -1;
60 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
61 VM_PROT_READ|VM_PROT_WRITE,
62 VM_FAULT_NORMAL,
63 &error, &busy);
64 if (error)
65 return -1;
66 KKASSERT(m->busy == 0);
68 kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
69 dest = (uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK));
70 res = oldval;
71 __asm __volatile(MPLOCKED "cmpxchgq %2,%1; " \
72 : "+a" (res), "=m" (*dest) \
73 : "r" (newval), "m" (*dest) \
74 : "memory");
76 if (busy)
77 vm_page_wakeup(m);
78 else
79 vm_page_unhold(m);
81 return res;
84 u_int
85 casu32(volatile u_int *p, u_int oldval, u_int newval)
87 struct vmspace *vm = curproc->p_vmspace;
88 vm_offset_t kva;
89 vm_page_t m;
90 volatile u_int *dest;
91 u_int res;
92 int error;
93 int busy;
95 /* XXX No idea how to handle this case in a simple way, just abort */
96 if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(u_int))
97 return -1;
99 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
100 VM_PROT_READ|VM_PROT_WRITE,
101 VM_FAULT_NORMAL,
102 &error, &busy);
103 if (error)
104 return -1;
105 KKASSERT(m->busy == 0);
107 kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
108 dest = (u_int *)(kva + ((vm_offset_t)p & PAGE_MASK));
109 res = oldval;
110 __asm __volatile(MPLOCKED "cmpxchgl %2,%1; " \
111 : "+a" (res), "=m" (*dest) \
112 : "r" (newval), "m" (*dest) \
113 : "memory");
115 if (busy)
116 vm_page_wakeup(m);
117 else
118 vm_page_unhold(m);
120 return res;
123 uint64_t
124 swapu64(volatile uint64_t *p, uint64_t val)
126 struct vmspace *vm = curproc->p_vmspace;
127 vm_offset_t kva;
128 vm_page_t m;
129 uint64_t res;
130 int error;
131 int busy;
133 /* XXX No idea how to handle this case in a simple way, just abort */
134 if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
135 return -1;
137 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
138 VM_PROT_READ|VM_PROT_WRITE,
139 VM_FAULT_NORMAL,
140 &error, &busy);
141 if (error)
142 return -1;
143 KKASSERT(m->busy == 0);
145 kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
146 res = atomic_swap_long((uint64_t *)(kva + ((vm_offset_t)p & PAGE_MASK)),
147 val);
148 if (busy)
149 vm_page_wakeup(m);
150 else
151 vm_page_unhold(m);
153 return res;
156 uint32_t
157 swapu32(volatile uint32_t *p, uint32_t val)
159 struct vmspace *vm = curproc->p_vmspace;
160 vm_offset_t kva;
161 vm_page_t m;
162 u_int res;
163 int error;
164 int busy;
166 /* XXX No idea how to handle this case in a simple way, just abort */
167 if (PAGE_SIZE - ((vm_offset_t)p & PAGE_MASK) < sizeof(uint64_t))
168 return -1;
170 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)p),
171 VM_PROT_READ|VM_PROT_WRITE,
172 VM_FAULT_NORMAL,
173 &error, &busy);
174 if (error)
175 return -1;
176 KKASSERT(m->busy == 0);
178 kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
179 res = atomic_swap_int((u_int *)(kva + ((vm_offset_t)p & PAGE_MASK)),
180 val);
181 if (busy)
182 vm_page_wakeup(m);
183 else
184 vm_page_unhold(m);
186 return res;
190 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied)
192 size_t i;
194 for (i = 0; i < len; ++i) {
195 if ((((char *)kdaddr)[i] = ((const char *)kfaddr)[i]) == 0) {
196 if (lencopied)
197 *lencopied = i + 1;
198 return(0);
201 return (ENAMETOOLONG);
205 * Copies a NUL-terminated string from user space to kernel space.
206 * The number of bytes copied, including the terminator, is returned in
207 * (*res).
209 * Returns 0 on success, EFAULT or ENAMETOOLONG on failure.
212 copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *res)
214 int error;
215 size_t n;
216 const char *uptr = udaddr;
217 char *kptr = kaddr;
219 if (res)
220 *res = 0;
221 while (len) {
222 n = PAGE_SIZE - ((vm_offset_t)uptr & PAGE_MASK);
223 if (n > 32)
224 n = 32;
225 if (n > len)
226 n = len;
227 if ((error = copyin(uptr, kptr, n)) != 0)
228 return(error);
229 while (n) {
230 if (res)
231 ++*res;
232 if (*kptr == 0)
233 return(0);
234 ++kptr;
235 ++uptr;
236 --n;
237 --len;
241 return(ENAMETOOLONG);
245 * Copy a binary buffer from user space to kernel space.
247 * Returns 0 on success, EFAULT on failure.
250 copyin(const void *udaddr, void *kaddr, size_t len)
252 struct vmspace *vm = curproc->p_vmspace;
253 struct lwbuf *lwb;
254 struct lwbuf lwb_cache;
255 vm_page_t m;
256 int error;
257 size_t n;
259 error = 0;
260 while (len) {
261 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
262 VM_PROT_READ,
263 VM_FAULT_NORMAL,
264 &error, NULL);
265 if (error)
266 break;
267 n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
268 if (n > len)
269 n = len;
270 lwb = lwbuf_alloc(m, &lwb_cache);
271 bcopy((char *)lwbuf_kva(lwb)+((vm_offset_t)udaddr & PAGE_MASK),
272 kaddr, n);
273 len -= n;
274 udaddr = (const char *)udaddr + n;
275 kaddr = (char *)kaddr + n;
276 lwbuf_free(lwb);
277 vm_page_unhold(m);
279 if (error)
280 error = EFAULT;
281 return (error);
285 * Copy a binary buffer from kernel space to user space.
287 * Returns 0 on success, EFAULT on failure.
290 copyout(const void *kaddr, void *udaddr, size_t len)
292 struct vmspace *vm = curproc->p_vmspace;
293 struct lwbuf *lwb;
294 struct lwbuf lwb_cache;
295 vm_page_t m;
296 int error;
297 int busy;
298 size_t n;
300 error = 0;
301 while (len) {
302 m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
303 VM_PROT_READ|VM_PROT_WRITE,
304 VM_FAULT_NORMAL,
305 &error, &busy);
306 if (error)
307 break;
308 KKASSERT(m->busy == 0);
309 n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
310 if (n > len)
311 n = len;
312 lwb = lwbuf_alloc(m, &lwb_cache);
313 bcopy(kaddr, (char *)lwbuf_kva(lwb) +
314 ((vm_offset_t)udaddr & PAGE_MASK), n);
315 len -= n;
316 udaddr = (char *)udaddr + n;
317 kaddr = (const char *)kaddr + n;
318 lwbuf_free(lwb);
319 if (busy)
320 vm_page_wakeup(m);
321 else
322 vm_page_unhold(m);
324 if (error)
325 error = EFAULT;
326 return (error);
330 * Fetch the byte at the specified user address. Returns -1 on failure.
333 fubyte(const uint8_t *base)
335 uint8_t c;
337 if (copyin(base, &c, 1) == 0)
338 return((int)c);
339 return(-1);
343 * Store a byte at the specified user address. Returns -1 on failure.
346 subyte(uint8_t *base, uint8_t byte)
348 uint8_t c = byte;
350 if (copyout(&c, base, 1) == 0)
351 return(0);
352 return(-1);
356 * Fetch a word (integer, 32 bits) from user space
358 int32_t
359 fuword32(const uint32_t *base)
361 uint32_t v;
363 if (copyin(base, &v, sizeof(v)) == 0)
364 return(v);
365 return(-1);
369 * Fetch a word (integer, 32 bits) from user space
371 int64_t
372 fuword64(const uint64_t *base)
374 uint64_t v;
376 if (copyin(base, &v, sizeof(v)) == 0)
377 return(v);
378 return(-1);
382 * Store a word (integer, 32 bits) to user space
385 suword64(uint64_t *base, uint64_t word)
387 if (copyout(&word, base, sizeof(word)) == 0)
388 return(0);
389 return(-1);
393 suword32(uint32_t *base, int word)
395 if (copyout(&word, base, sizeof(word)) == 0)
396 return(0);
397 return(-1);