Import 2.1.116pre2
[davej-history.git] / arch / m68k / kernel / sys_m68k.c
blob75da525418b3c28101301bfd55f8a2e03bfb5e13
1 /*
2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/file.h>
20 #include <linux/utsname.h>
22 #include <asm/setup.h>
23 #include <asm/uaccess.h>
24 #include <asm/cachectl.h>
25 #include <asm/traps.h>
26 #include <asm/ipc.h>
29 * sys_pipe() is the normal C calling standard for creating
30 * a pipe. It's not the way unix traditionally does this, though.
32 asmlinkage int sys_pipe(unsigned long * fildes)
34 int fd[2];
35 int error;
37 lock_kernel();
38 error = do_pipe(fd);
39 if (!error) {
40 if (copy_to_user(fildes, fd, 2*sizeof(int)))
41 error = -EFAULT;
43 unlock_kernel();
44 return error;
48 * Perform the select(nd, in, out, ex, tv) and mmap() system
49 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
50 * handle more than 4 system call parameters, so these system calls
51 * used a memory block for parameter passing..
54 struct mmap_arg_struct {
55 unsigned long addr;
56 unsigned long len;
57 unsigned long prot;
58 unsigned long flags;
59 unsigned long fd;
60 unsigned long offset;
63 asmlinkage int old_mmap(struct mmap_arg_struct *arg)
65 int error;
66 struct file * file = NULL;
67 struct mmap_arg_struct a;
69 if (copy_from_user(&a, arg, sizeof(a)))
70 return -EFAULT;
72 down(&current->mm->mmap_sem);
73 lock_kernel();
74 if (!(a.flags & MAP_ANONYMOUS)) {
75 error = -EBADF;
76 file = fget(a.fd);
77 if (!file)
78 goto out;
80 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
82 error = do_mmap(file, a.addr, a.len, a.prot, a.flags, a.offset);
83 if (file)
84 fput(file);
85 out:
86 unlock_kernel();
87 up(&current->mm->mmap_sem);
88 return error;
92 extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
94 struct sel_arg_struct {
95 unsigned long n;
96 fd_set *inp, *outp, *exp;
97 struct timeval *tvp;
100 asmlinkage int old_select(struct sel_arg_struct *arg)
102 struct sel_arg_struct a;
104 if (copy_from_user(&a, arg, sizeof(a)))
105 return -EFAULT;
106 /* sys_select() does the appropriate kernel locking */
107 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
111 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
113 * This is really horribly ugly.
115 asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
117 int version, ret;
119 version = call >> 16; /* hack for backward compatibility */
120 call &= 0xffff;
122 if (call <= SEMCTL)
123 switch (call) {
124 case SEMOP:
125 ret = sys_semop (first, (struct sembuf *)ptr, second);
126 goto out;
127 case SEMGET:
128 ret = sys_semget (first, second, third);
129 goto out;
130 case SEMCTL: {
131 union semun fourth;
132 ret = -EINVAL;
133 if (!ptr)
134 goto out;
135 if ((ret = get_user(fourth.__pad, (void **) ptr)))
136 goto out;
137 ret = sys_semctl (first, second, third, fourth);
138 goto out;
140 default:
141 ret = -EINVAL;
142 goto out;
144 if (call <= MSGCTL)
145 switch (call) {
146 case MSGSND:
147 ret = sys_msgsnd (first, (struct msgbuf *) ptr,
148 second, third);
149 goto out;
150 case MSGRCV:
151 switch (version) {
152 case 0: {
153 struct ipc_kludge tmp;
154 ret = -EINVAL;
155 if (!ptr)
156 goto out;
157 ret = -EFAULT;
158 if (copy_from_user (&tmp, ptr, sizeof (tmp)))
159 goto out;
160 ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
161 goto out;
163 case 1: default:
164 ret = sys_msgrcv (first, (struct msgbuf *) ptr, second, fifth, third);
165 goto out;
167 case MSGGET:
168 ret = sys_msgget ((key_t) first, second);
169 goto out;
170 case MSGCTL:
171 ret = sys_msgctl (first, second, (struct msqid_ds *) ptr);
172 goto out;
173 default:
174 ret = -EINVAL;
175 goto out;
177 if (call <= SHMCTL)
178 switch (call) {
179 case SHMAT:
180 switch (version) {
181 case 0: default: {
182 ulong raddr;
183 ret = sys_shmat (first, (char *) ptr, second, &raddr);
184 if (ret)
185 goto out;
186 ret = put_user (raddr, (ulong *) third);
187 goto out;
190 case SHMDT:
191 ret = sys_shmdt ((char *)ptr);
192 goto out;
193 case SHMGET:
194 ret = sys_shmget (first, second, third);
195 goto out;
196 case SHMCTL:
197 ret = sys_shmctl (first, second, (struct shmid_ds *) ptr);
198 goto out;
199 default:
200 ret = -EINVAL;
201 goto out;
203 ret = -EINVAL;
204 out:
205 unlock_kernel();
206 return ret;
209 asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on)
211 return -ENOSYS;
214 /* Convert virtual address VADDR to physical address PADDR */
215 #define virt_to_phys_040(vaddr) \
216 ({ \
217 unsigned long _mmusr, _paddr; \
219 __asm__ __volatile__ (".chip 68040\n\t" \
220 "ptestr (%1)\n\t" \
221 "movec %%mmusr,%0\n\t" \
222 ".chip 68k" \
223 : "=r" (_mmusr) \
224 : "a" (vaddr)); \
225 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
226 _paddr; \
229 static inline int
230 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
232 unsigned long paddr, i;
234 switch (scope)
236 case FLUSH_SCOPE_ALL:
237 switch (cache)
239 case FLUSH_CACHE_DATA:
240 /* This nop is needed for some broken versions of the 68040. */
241 __asm__ __volatile__ ("nop\n\t"
242 ".chip 68040\n\t"
243 "cpusha %dc\n\t"
244 ".chip 68k");
245 break;
246 case FLUSH_CACHE_INSN:
247 __asm__ __volatile__ ("nop\n\t"
248 ".chip 68040\n\t"
249 "cpusha %ic\n\t"
250 ".chip 68k");
251 break;
252 default:
253 case FLUSH_CACHE_BOTH:
254 __asm__ __volatile__ ("nop\n\t"
255 ".chip 68040\n\t"
256 "cpusha %bc\n\t"
257 ".chip 68k");
258 break;
260 break;
262 case FLUSH_SCOPE_LINE:
263 /* Find the physical address of the first mapped page in the
264 address range. */
265 if ((paddr = virt_to_phys_040(addr))) {
266 paddr += addr & ~(PAGE_MASK | 15);
267 len = (len + (addr & 15) + 15) >> 4;
268 } else {
269 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
271 if (len <= tmp)
272 return 0;
273 addr += tmp;
274 len -= tmp;
275 tmp = PAGE_SIZE;
276 for (;;)
278 if ((paddr = virt_to_phys_040(addr)))
279 break;
280 if (len <= tmp)
281 return 0;
282 addr += tmp;
283 len -= tmp;
285 len = (len + 15) >> 4;
287 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
288 while (len--)
290 switch (cache)
292 case FLUSH_CACHE_DATA:
293 __asm__ __volatile__ ("nop\n\t"
294 ".chip 68040\n\t"
295 "cpushl %%dc,(%0)\n\t"
296 ".chip 68k"
297 : : "a" (paddr));
298 break;
299 case FLUSH_CACHE_INSN:
300 __asm__ __volatile__ ("nop\n\t"
301 ".chip 68040\n\t"
302 "cpushl %%ic,(%0)\n\t"
303 ".chip 68k"
304 : : "a" (paddr));
305 break;
306 default:
307 case FLUSH_CACHE_BOTH:
308 __asm__ __volatile__ ("nop\n\t"
309 ".chip 68040\n\t"
310 "cpushl %%bc,(%0)\n\t"
311 ".chip 68k"
312 : : "a" (paddr));
313 break;
315 if (!--i && len)
318 * No need to page align here since it is done by
319 * virt_to_phys_040().
321 addr += PAGE_SIZE;
322 i = PAGE_SIZE / 16;
323 /* Recompute physical address when crossing a page
324 boundary. */
325 for (;;)
327 if ((paddr = virt_to_phys_040(addr)))
328 break;
329 if (len <= i)
330 return 0;
331 len -= i;
332 addr += PAGE_SIZE;
335 else
336 paddr += 16;
338 break;
340 default:
341 case FLUSH_SCOPE_PAGE:
342 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
343 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
345 if (!(paddr = virt_to_phys_040(addr)))
346 continue;
347 switch (cache)
349 case FLUSH_CACHE_DATA:
350 __asm__ __volatile__ ("nop\n\t"
351 ".chip 68040\n\t"
352 "cpushp %%dc,(%0)\n\t"
353 ".chip 68k"
354 : : "a" (paddr));
355 break;
356 case FLUSH_CACHE_INSN:
357 __asm__ __volatile__ ("nop\n\t"
358 ".chip 68040\n\t"
359 "cpushp %%ic,(%0)\n\t"
360 ".chip 68k"
361 : : "a" (paddr));
362 break;
363 default:
364 case FLUSH_CACHE_BOTH:
365 __asm__ __volatile__ ("nop\n\t"
366 ".chip 68040\n\t"
367 "cpushp %%bc,(%0)\n\t"
368 ".chip 68k"
369 : : "a" (paddr));
370 break;
373 break;
375 return 0;
378 #define virt_to_phys_060(vaddr) \
379 ({ \
380 unsigned long paddr; \
381 __asm__ __volatile__ (".chip 68060\n\t" \
382 "plpar (%0)\n\t" \
383 ".chip 68k" \
384 : "=a" (paddr) \
385 : "0" (vaddr)); \
386 (paddr); /* XXX */ \
389 static inline int
390 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
392 unsigned long paddr, i;
394 switch (scope)
396 case FLUSH_SCOPE_ALL:
397 switch (cache)
399 case FLUSH_CACHE_DATA:
400 __asm__ __volatile__ (".chip 68060\n\t"
401 "cpusha %dc\n\t"
402 "cinva %dc\n\t"
403 ".chip 68k");
404 break;
405 case FLUSH_CACHE_INSN:
406 __asm__ __volatile__ (".chip 68060\n\t"
407 "cpusha %ic\n\t"
408 "cinva %ic\n\t"
409 ".chip 68k");
410 break;
411 default:
412 case FLUSH_CACHE_BOTH:
413 __asm__ __volatile__ (".chip 68060\n\t"
414 "cpusha %bc\n\t"
415 "cinva %bc\n\t"
416 ".chip 68k");
417 break;
419 break;
421 case FLUSH_SCOPE_LINE:
422 /* Find the physical address of the first mapped page in the
423 address range. */
424 len += addr & 15;
425 addr &= -16;
426 if (!(paddr = virt_to_phys_060(addr))) {
427 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
429 if (len <= tmp)
430 return 0;
431 addr += tmp;
432 len -= tmp;
433 tmp = PAGE_SIZE;
434 for (;;)
436 if ((paddr = virt_to_phys_060(addr)))
437 break;
438 if (len <= tmp)
439 return 0;
440 addr += tmp;
441 len -= tmp;
444 len = (len + 15) >> 4;
445 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
446 while (len--)
448 switch (cache)
450 case FLUSH_CACHE_DATA:
451 __asm__ __volatile__ (".chip 68060\n\t"
452 "cpushl %%dc,(%0)\n\t"
453 "cinvl %%dc,(%0)\n\t"
454 ".chip 68k"
455 : : "a" (paddr));
456 break;
457 case FLUSH_CACHE_INSN:
458 __asm__ __volatile__ (".chip 68060\n\t"
459 "cpushl %%ic,(%0)\n\t"
460 "cinvl %%ic,(%0)\n\t"
461 ".chip 68k"
462 : : "a" (paddr));
463 break;
464 default:
465 case FLUSH_CACHE_BOTH:
466 __asm__ __volatile__ (".chip 68060\n\t"
467 "cpushl %%bc,(%0)\n\t"
468 "cinvl %%bc,(%0)\n\t"
469 ".chip 68k"
470 : : "a" (paddr));
471 break;
473 if (!--i && len)
477 * We just want to jump to the first cache line
478 * in the next page.
480 addr += PAGE_SIZE;
481 addr &= PAGE_MASK;
483 i = PAGE_SIZE / 16;
484 /* Recompute physical address when crossing a page
485 boundary. */
486 for (;;)
488 if ((paddr = virt_to_phys_060(addr)))
489 break;
490 if (len <= i)
491 return 0;
492 len -= i;
493 addr += PAGE_SIZE;
496 else
497 paddr += 16;
499 break;
501 default:
502 case FLUSH_SCOPE_PAGE:
503 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
504 addr &= PAGE_MASK; /* Workaround for bug in some
505 revisions of the 68060 */
506 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
508 if (!(paddr = virt_to_phys_060(addr)))
509 continue;
510 switch (cache)
512 case FLUSH_CACHE_DATA:
513 __asm__ __volatile__ (".chip 68060\n\t"
514 "cpushp %%dc,(%0)\n\t"
515 "cinvp %%dc,(%0)\n\t"
516 ".chip 68k"
517 : : "a" (paddr));
518 break;
519 case FLUSH_CACHE_INSN:
520 __asm__ __volatile__ (".chip 68060\n\t"
521 "cpushp %%ic,(%0)\n\t"
522 "cinvp %%ic,(%0)\n\t"
523 ".chip 68k"
524 : : "a" (paddr));
525 break;
526 default:
527 case FLUSH_CACHE_BOTH:
528 __asm__ __volatile__ (".chip 68060\n\t"
529 "cpushp %%bc,(%0)\n\t"
530 "cinvp %%bc,(%0)\n\t"
531 ".chip 68k"
532 : : "a" (paddr));
533 break;
536 break;
538 return 0;
541 /* sys_cacheflush -- flush (part of) the processor cache. */
542 asmlinkage int
543 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
545 struct vm_area_struct *vma;
546 int ret = -EINVAL;
548 lock_kernel();
549 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
550 cache & ~FLUSH_CACHE_BOTH)
551 goto out;
553 if (scope == FLUSH_SCOPE_ALL) {
554 /* Only the superuser may flush the whole cache. */
555 ret = -EPERM;
556 if (!capable(CAP_SYS_ADMIN))
557 goto out;
558 } else {
559 /* Verify that the specified address region actually belongs to
560 * this process.
562 vma = find_vma (current->mm, addr);
563 ret = -EINVAL;
564 /* Check for overflow. */
565 if (addr + len < addr)
566 goto out;
567 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
568 goto out;
571 if (CPU_IS_020_OR_030) {
572 if (scope == FLUSH_SCOPE_LINE && len < 256) {
573 unsigned long cacr;
574 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
575 if (cache & FLUSH_CACHE_INSN)
576 cacr |= 4;
577 if (cache & FLUSH_CACHE_DATA)
578 cacr |= 0x400;
579 len >>= 2;
580 while (len--) {
581 __asm__ __volatile__ ("movec %1, %%caar\n\t"
582 "movec %0, %%cacr"
583 : /* no outputs */
584 : "r" (cacr), "r" (addr));
585 addr += 4;
587 } else {
588 /* Flush the whole cache, even if page granularity requested. */
589 unsigned long cacr;
590 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
591 if (cache & FLUSH_CACHE_INSN)
592 cacr |= 8;
593 if (cache & FLUSH_CACHE_DATA)
594 cacr |= 0x800;
595 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
597 ret = 0;
598 goto out;
599 } else if (CPU_IS_040) {
600 ret = cache_flush_040 (addr, scope, cache, len);
601 } else if (CPU_IS_060) {
602 ret = cache_flush_060 (addr, scope, cache, len);
604 out:
605 unlock_kernel();
606 return ret;
610 * Old cruft
612 asmlinkage int sys_pause(void)
614 current->state = TASK_INTERRUPTIBLE;
615 schedule();
616 return -ERESTARTNOHAND;