headers: utsname.h redux
[linux-2.6/kvm.git] / arch / m68k / kernel / sys_m68k.c
blob7deb402bfc751c9ba7a77f13afdd2c904b02a7ae
1 /*
2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/ipc.h>
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
29 #include <asm/page.h>
30 #include <asm/unistd.h>
32 /* common code for old and new mmaps */
33 static inline long do_mmap2(
34 unsigned long addr, unsigned long len,
35 unsigned long prot, unsigned long flags,
36 unsigned long fd, unsigned long pgoff)
38 int error = -EBADF;
39 struct file * file = NULL;
41 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
42 if (!(flags & MAP_ANONYMOUS)) {
43 file = fget(fd);
44 if (!file)
45 goto out;
48 down_write(&current->mm->mmap_sem);
49 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
50 up_write(&current->mm->mmap_sem);
52 if (file)
53 fput(file);
54 out:
55 return error;
58 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
59 unsigned long prot, unsigned long flags,
60 unsigned long fd, unsigned long pgoff)
62 return do_mmap2(addr, len, prot, flags, fd, pgoff);
66 * Perform the select(nd, in, out, ex, tv) and mmap() system
67 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
68 * handle more than 4 system call parameters, so these system calls
69 * used a memory block for parameter passing..
72 struct mmap_arg_struct {
73 unsigned long addr;
74 unsigned long len;
75 unsigned long prot;
76 unsigned long flags;
77 unsigned long fd;
78 unsigned long offset;
81 asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
83 struct mmap_arg_struct a;
84 int error = -EFAULT;
86 if (copy_from_user(&a, arg, sizeof(a)))
87 goto out;
89 error = -EINVAL;
90 if (a.offset & ~PAGE_MASK)
91 goto out;
93 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
95 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
96 out:
97 return error;
100 #if 0
101 struct mmap_arg_struct64 {
102 __u32 addr;
103 __u32 len;
104 __u32 prot;
105 __u32 flags;
106 __u64 offset; /* 64 bits */
107 __u32 fd;
110 asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
112 int error = -EFAULT;
113 struct file * file = NULL;
114 struct mmap_arg_struct64 a;
115 unsigned long pgoff;
117 if (copy_from_user(&a, arg, sizeof(a)))
118 return -EFAULT;
120 if ((long)a.offset & ~PAGE_MASK)
121 return -EINVAL;
123 pgoff = a.offset >> PAGE_SHIFT;
124 if ((a.offset >> PAGE_SHIFT) != pgoff)
125 return -EINVAL;
127 if (!(a.flags & MAP_ANONYMOUS)) {
128 error = -EBADF;
129 file = fget(a.fd);
130 if (!file)
131 goto out;
133 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
135 down_write(&current->mm->mmap_sem);
136 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
137 up_write(&current->mm->mmap_sem);
138 if (file)
139 fput(file);
140 out:
141 return error;
143 #endif
145 struct sel_arg_struct {
146 unsigned long n;
147 fd_set __user *inp, *outp, *exp;
148 struct timeval __user *tvp;
151 asmlinkage int old_select(struct sel_arg_struct __user *arg)
153 struct sel_arg_struct a;
155 if (copy_from_user(&a, arg, sizeof(a)))
156 return -EFAULT;
157 /* sys_select() does the appropriate kernel locking */
158 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
162 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
164 * This is really horribly ugly.
166 asmlinkage int sys_ipc (uint call, int first, int second,
167 int third, void __user *ptr, long fifth)
169 int version, ret;
171 version = call >> 16; /* hack for backward compatibility */
172 call &= 0xffff;
174 if (call <= SEMCTL)
175 switch (call) {
176 case SEMOP:
177 return sys_semop (first, ptr, second);
178 case SEMGET:
179 return sys_semget (first, second, third);
180 case SEMCTL: {
181 union semun fourth;
182 if (!ptr)
183 return -EINVAL;
184 if (get_user(fourth.__pad, (void __user *__user *) ptr))
185 return -EFAULT;
186 return sys_semctl (first, second, third, fourth);
188 default:
189 return -ENOSYS;
191 if (call <= MSGCTL)
192 switch (call) {
193 case MSGSND:
194 return sys_msgsnd (first, ptr, second, third);
195 case MSGRCV:
196 switch (version) {
197 case 0: {
198 struct ipc_kludge tmp;
199 if (!ptr)
200 return -EINVAL;
201 if (copy_from_user (&tmp, ptr, sizeof (tmp)))
202 return -EFAULT;
203 return sys_msgrcv (first, tmp.msgp, second,
204 tmp.msgtyp, third);
206 default:
207 return sys_msgrcv (first, ptr,
208 second, fifth, third);
210 case MSGGET:
211 return sys_msgget ((key_t) first, second);
212 case MSGCTL:
213 return sys_msgctl (first, second, ptr);
214 default:
215 return -ENOSYS;
217 if (call <= SHMCTL)
218 switch (call) {
219 case SHMAT:
220 switch (version) {
221 default: {
222 ulong raddr;
223 ret = do_shmat (first, ptr, second, &raddr);
224 if (ret)
225 return ret;
226 return put_user (raddr, (ulong __user *) third);
229 case SHMDT:
230 return sys_shmdt (ptr);
231 case SHMGET:
232 return sys_shmget (first, second, third);
233 case SHMCTL:
234 return sys_shmctl (first, second, ptr);
235 default:
236 return -ENOSYS;
239 return -EINVAL;
242 /* Convert virtual (user) address VADDR to physical address PADDR */
243 #define virt_to_phys_040(vaddr) \
244 ({ \
245 unsigned long _mmusr, _paddr; \
247 __asm__ __volatile__ (".chip 68040\n\t" \
248 "ptestr (%1)\n\t" \
249 "movec %%mmusr,%0\n\t" \
250 ".chip 68k" \
251 : "=r" (_mmusr) \
252 : "a" (vaddr)); \
253 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
254 _paddr; \
257 static inline int
258 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
260 unsigned long paddr, i;
262 switch (scope)
264 case FLUSH_SCOPE_ALL:
265 switch (cache)
267 case FLUSH_CACHE_DATA:
268 /* This nop is needed for some broken versions of the 68040. */
269 __asm__ __volatile__ ("nop\n\t"
270 ".chip 68040\n\t"
271 "cpusha %dc\n\t"
272 ".chip 68k");
273 break;
274 case FLUSH_CACHE_INSN:
275 __asm__ __volatile__ ("nop\n\t"
276 ".chip 68040\n\t"
277 "cpusha %ic\n\t"
278 ".chip 68k");
279 break;
280 default:
281 case FLUSH_CACHE_BOTH:
282 __asm__ __volatile__ ("nop\n\t"
283 ".chip 68040\n\t"
284 "cpusha %bc\n\t"
285 ".chip 68k");
286 break;
288 break;
290 case FLUSH_SCOPE_LINE:
291 /* Find the physical address of the first mapped page in the
292 address range. */
293 if ((paddr = virt_to_phys_040(addr))) {
294 paddr += addr & ~(PAGE_MASK | 15);
295 len = (len + (addr & 15) + 15) >> 4;
296 } else {
297 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
299 if (len <= tmp)
300 return 0;
301 addr += tmp;
302 len -= tmp;
303 tmp = PAGE_SIZE;
304 for (;;)
306 if ((paddr = virt_to_phys_040(addr)))
307 break;
308 if (len <= tmp)
309 return 0;
310 addr += tmp;
311 len -= tmp;
313 len = (len + 15) >> 4;
315 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
316 while (len--)
318 switch (cache)
320 case FLUSH_CACHE_DATA:
321 __asm__ __volatile__ ("nop\n\t"
322 ".chip 68040\n\t"
323 "cpushl %%dc,(%0)\n\t"
324 ".chip 68k"
325 : : "a" (paddr));
326 break;
327 case FLUSH_CACHE_INSN:
328 __asm__ __volatile__ ("nop\n\t"
329 ".chip 68040\n\t"
330 "cpushl %%ic,(%0)\n\t"
331 ".chip 68k"
332 : : "a" (paddr));
333 break;
334 default:
335 case FLUSH_CACHE_BOTH:
336 __asm__ __volatile__ ("nop\n\t"
337 ".chip 68040\n\t"
338 "cpushl %%bc,(%0)\n\t"
339 ".chip 68k"
340 : : "a" (paddr));
341 break;
343 if (!--i && len)
346 * No need to page align here since it is done by
347 * virt_to_phys_040().
349 addr += PAGE_SIZE;
350 i = PAGE_SIZE / 16;
351 /* Recompute physical address when crossing a page
352 boundary. */
353 for (;;)
355 if ((paddr = virt_to_phys_040(addr)))
356 break;
357 if (len <= i)
358 return 0;
359 len -= i;
360 addr += PAGE_SIZE;
363 else
364 paddr += 16;
366 break;
368 default:
369 case FLUSH_SCOPE_PAGE:
370 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
371 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
373 if (!(paddr = virt_to_phys_040(addr)))
374 continue;
375 switch (cache)
377 case FLUSH_CACHE_DATA:
378 __asm__ __volatile__ ("nop\n\t"
379 ".chip 68040\n\t"
380 "cpushp %%dc,(%0)\n\t"
381 ".chip 68k"
382 : : "a" (paddr));
383 break;
384 case FLUSH_CACHE_INSN:
385 __asm__ __volatile__ ("nop\n\t"
386 ".chip 68040\n\t"
387 "cpushp %%ic,(%0)\n\t"
388 ".chip 68k"
389 : : "a" (paddr));
390 break;
391 default:
392 case FLUSH_CACHE_BOTH:
393 __asm__ __volatile__ ("nop\n\t"
394 ".chip 68040\n\t"
395 "cpushp %%bc,(%0)\n\t"
396 ".chip 68k"
397 : : "a" (paddr));
398 break;
401 break;
403 return 0;
406 #define virt_to_phys_060(vaddr) \
407 ({ \
408 unsigned long paddr; \
409 __asm__ __volatile__ (".chip 68060\n\t" \
410 "plpar (%0)\n\t" \
411 ".chip 68k" \
412 : "=a" (paddr) \
413 : "0" (vaddr)); \
414 (paddr); /* XXX */ \
417 static inline int
418 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
420 unsigned long paddr, i;
423 * 68060 manual says:
424 * cpush %dc : flush DC, remains valid (with our %cacr setup)
425 * cpush %ic : invalidate IC
426 * cpush %bc : flush DC + invalidate IC
428 switch (scope)
430 case FLUSH_SCOPE_ALL:
431 switch (cache)
433 case FLUSH_CACHE_DATA:
434 __asm__ __volatile__ (".chip 68060\n\t"
435 "cpusha %dc\n\t"
436 ".chip 68k");
437 break;
438 case FLUSH_CACHE_INSN:
439 __asm__ __volatile__ (".chip 68060\n\t"
440 "cpusha %ic\n\t"
441 ".chip 68k");
442 break;
443 default:
444 case FLUSH_CACHE_BOTH:
445 __asm__ __volatile__ (".chip 68060\n\t"
446 "cpusha %bc\n\t"
447 ".chip 68k");
448 break;
450 break;
452 case FLUSH_SCOPE_LINE:
453 /* Find the physical address of the first mapped page in the
454 address range. */
455 len += addr & 15;
456 addr &= -16;
457 if (!(paddr = virt_to_phys_060(addr))) {
458 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
460 if (len <= tmp)
461 return 0;
462 addr += tmp;
463 len -= tmp;
464 tmp = PAGE_SIZE;
465 for (;;)
467 if ((paddr = virt_to_phys_060(addr)))
468 break;
469 if (len <= tmp)
470 return 0;
471 addr += tmp;
472 len -= tmp;
475 len = (len + 15) >> 4;
476 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
477 while (len--)
479 switch (cache)
481 case FLUSH_CACHE_DATA:
482 __asm__ __volatile__ (".chip 68060\n\t"
483 "cpushl %%dc,(%0)\n\t"
484 ".chip 68k"
485 : : "a" (paddr));
486 break;
487 case FLUSH_CACHE_INSN:
488 __asm__ __volatile__ (".chip 68060\n\t"
489 "cpushl %%ic,(%0)\n\t"
490 ".chip 68k"
491 : : "a" (paddr));
492 break;
493 default:
494 case FLUSH_CACHE_BOTH:
495 __asm__ __volatile__ (".chip 68060\n\t"
496 "cpushl %%bc,(%0)\n\t"
497 ".chip 68k"
498 : : "a" (paddr));
499 break;
501 if (!--i && len)
505 * We just want to jump to the first cache line
506 * in the next page.
508 addr += PAGE_SIZE;
509 addr &= PAGE_MASK;
511 i = PAGE_SIZE / 16;
512 /* Recompute physical address when crossing a page
513 boundary. */
514 for (;;)
516 if ((paddr = virt_to_phys_060(addr)))
517 break;
518 if (len <= i)
519 return 0;
520 len -= i;
521 addr += PAGE_SIZE;
524 else
525 paddr += 16;
527 break;
529 default:
530 case FLUSH_SCOPE_PAGE:
531 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
532 addr &= PAGE_MASK; /* Workaround for bug in some
533 revisions of the 68060 */
534 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
536 if (!(paddr = virt_to_phys_060(addr)))
537 continue;
538 switch (cache)
540 case FLUSH_CACHE_DATA:
541 __asm__ __volatile__ (".chip 68060\n\t"
542 "cpushp %%dc,(%0)\n\t"
543 ".chip 68k"
544 : : "a" (paddr));
545 break;
546 case FLUSH_CACHE_INSN:
547 __asm__ __volatile__ (".chip 68060\n\t"
548 "cpushp %%ic,(%0)\n\t"
549 ".chip 68k"
550 : : "a" (paddr));
551 break;
552 default:
553 case FLUSH_CACHE_BOTH:
554 __asm__ __volatile__ (".chip 68060\n\t"
555 "cpushp %%bc,(%0)\n\t"
556 ".chip 68k"
557 : : "a" (paddr));
558 break;
561 break;
563 return 0;
566 /* sys_cacheflush -- flush (part of) the processor cache. */
567 asmlinkage int
568 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
570 struct vm_area_struct *vma;
571 int ret = -EINVAL;
573 lock_kernel();
574 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
575 cache & ~FLUSH_CACHE_BOTH)
576 goto out;
578 if (scope == FLUSH_SCOPE_ALL) {
579 /* Only the superuser may explicitly flush the whole cache. */
580 ret = -EPERM;
581 if (!capable(CAP_SYS_ADMIN))
582 goto out;
583 } else {
585 * Verify that the specified address region actually belongs
586 * to this process.
588 vma = find_vma (current->mm, addr);
589 ret = -EINVAL;
590 /* Check for overflow. */
591 if (addr + len < addr)
592 goto out;
593 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
594 goto out;
597 if (CPU_IS_020_OR_030) {
598 if (scope == FLUSH_SCOPE_LINE && len < 256) {
599 unsigned long cacr;
600 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
601 if (cache & FLUSH_CACHE_INSN)
602 cacr |= 4;
603 if (cache & FLUSH_CACHE_DATA)
604 cacr |= 0x400;
605 len >>= 2;
606 while (len--) {
607 __asm__ __volatile__ ("movec %1, %%caar\n\t"
608 "movec %0, %%cacr"
609 : /* no outputs */
610 : "r" (cacr), "r" (addr));
611 addr += 4;
613 } else {
614 /* Flush the whole cache, even if page granularity requested. */
615 unsigned long cacr;
616 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
617 if (cache & FLUSH_CACHE_INSN)
618 cacr |= 8;
619 if (cache & FLUSH_CACHE_DATA)
620 cacr |= 0x800;
621 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
623 ret = 0;
624 goto out;
625 } else {
627 * 040 or 060: don't blindly trust 'scope', someone could
628 * try to flush a few megs of memory.
631 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
632 scope=FLUSH_SCOPE_PAGE;
633 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
634 scope=FLUSH_SCOPE_ALL;
635 if (CPU_IS_040) {
636 ret = cache_flush_040 (addr, scope, cache, len);
637 } else if (CPU_IS_060) {
638 ret = cache_flush_060 (addr, scope, cache, len);
641 out:
642 unlock_kernel();
643 return ret;
646 asmlinkage int sys_getpagesize(void)
648 return PAGE_SIZE;
652 * Do a system call from kernel instead of calling sys_execve so we
653 * end up with proper pt_regs.
655 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
657 register long __res asm ("%d0") = __NR_execve;
658 register long __a asm ("%d1") = (long)(filename);
659 register long __b asm ("%d2") = (long)(argv);
660 register long __c asm ("%d3") = (long)(envp);
661 asm volatile ("trap #0" : "+d" (__res)
662 : "d" (__a), "d" (__b), "d" (__c));
663 return __res;