2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/errno.h>
10 #include <linux/sched.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/file.h>
20 #include <linux/utsname.h>
22 #include <asm/setup.h>
23 #include <asm/uaccess.h>
24 #include <asm/cachectl.h>
25 #include <asm/traps.h>
29 * sys_pipe() is the normal C calling standard for creating
30 * a pipe. It's not the way unix traditionally does this, though.
32 asmlinkage
int sys_pipe(unsigned long * fildes
)
40 if (copy_to_user(fildes
, fd
, 2*sizeof(int)))
48 * Perform the select(nd, in, out, ex, tv) and mmap() system
49 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
50 * handle more than 4 system call parameters, so these system calls
51 * used a memory block for parameter passing..
54 struct mmap_arg_struct
{
63 asmlinkage
int old_mmap(struct mmap_arg_struct
*arg
)
66 struct file
* file
= NULL
;
67 struct mmap_arg_struct a
;
69 if (copy_from_user(&a
, arg
, sizeof(a
)))
72 down(¤t
->mm
->mmap_sem
);
74 if (!(a
.flags
& MAP_ANONYMOUS
)) {
80 a
.flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
82 error
= do_mmap(file
, a
.addr
, a
.len
, a
.prot
, a
.flags
, a
.offset
);
87 up(¤t
->mm
->mmap_sem
);
92 extern asmlinkage
int sys_select(int, fd_set
*, fd_set
*, fd_set
*, struct timeval
*);
94 struct sel_arg_struct
{
96 fd_set
*inp
, *outp
, *exp
;
100 asmlinkage
int old_select(struct sel_arg_struct
*arg
)
102 struct sel_arg_struct a
;
104 if (copy_from_user(&a
, arg
, sizeof(a
)))
106 /* sys_select() does the appropriate kernel locking */
107 return sys_select(a
.n
, a
.inp
, a
.outp
, a
.exp
, a
.tvp
);
111 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
113 * This is really horribly ugly.
115 asmlinkage
int sys_ipc (uint call
, int first
, int second
, int third
, void *ptr
, long fifth
)
119 version
= call
>> 16; /* hack for backward compatibility */
125 ret
= sys_semop (first
, (struct sembuf
*)ptr
, second
);
128 ret
= sys_semget (first
, second
, third
);
135 if ((ret
= get_user(fourth
.__pad
, (void **) ptr
)))
137 ret
= sys_semctl (first
, second
, third
, fourth
);
147 ret
= sys_msgsnd (first
, (struct msgbuf
*) ptr
,
153 struct ipc_kludge tmp
;
158 if (copy_from_user (&tmp
, ptr
, sizeof (tmp
)))
160 ret
= sys_msgrcv (first
, tmp
.msgp
, second
, tmp
.msgtyp
, third
);
164 ret
= sys_msgrcv (first
, (struct msgbuf
*) ptr
, second
, fifth
, third
);
168 ret
= sys_msgget ((key_t
) first
, second
);
171 ret
= sys_msgctl (first
, second
, (struct msqid_ds
*) ptr
);
183 ret
= sys_shmat (first
, (char *) ptr
, second
, &raddr
);
186 ret
= put_user (raddr
, (ulong
*) third
);
191 ret
= sys_shmdt ((char *)ptr
);
194 ret
= sys_shmget (first
, second
, third
);
197 ret
= sys_shmctl (first
, second
, (struct shmid_ds
*) ptr
);
209 asmlinkage
int sys_ioperm(unsigned long from
, unsigned long num
, int on
)
214 /* Convert virtual address VADDR to physical address PADDR */
215 #define virt_to_phys_040(vaddr) \
217 unsigned long _mmusr, _paddr; \
219 __asm__ __volatile__ (".chip 68040\n\t" \
221 "movec %%mmusr,%0\n\t" \
225 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
230 cache_flush_040 (unsigned long addr
, int scope
, int cache
, unsigned long len
)
232 unsigned long paddr
, i
;
236 case FLUSH_SCOPE_ALL
:
239 case FLUSH_CACHE_DATA
:
240 /* This nop is needed for some broken versions of the 68040. */
241 __asm__
__volatile__ ("nop\n\t"
246 case FLUSH_CACHE_INSN
:
247 __asm__
__volatile__ ("nop\n\t"
253 case FLUSH_CACHE_BOTH
:
254 __asm__
__volatile__ ("nop\n\t"
262 case FLUSH_SCOPE_LINE
:
263 /* Find the physical address of the first mapped page in the
265 if ((paddr
= virt_to_phys_040(addr
))) {
266 paddr
+= addr
& ~(PAGE_MASK
| 15);
267 len
= (len
+ (addr
& 15) + 15) >> 4;
269 unsigned long tmp
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
278 if ((paddr
= virt_to_phys_040(addr
)))
285 len
= (len
+ 15) >> 4;
287 i
= (PAGE_SIZE
- (paddr
& ~PAGE_MASK
)) >> 4;
292 case FLUSH_CACHE_DATA
:
293 __asm__
__volatile__ ("nop\n\t"
295 "cpushl %%dc,(%0)\n\t"
299 case FLUSH_CACHE_INSN
:
300 __asm__
__volatile__ ("nop\n\t"
302 "cpushl %%ic,(%0)\n\t"
307 case FLUSH_CACHE_BOTH
:
308 __asm__
__volatile__ ("nop\n\t"
310 "cpushl %%bc,(%0)\n\t"
318 * No need to page align here since it is done by
319 * virt_to_phys_040().
323 /* Recompute physical address when crossing a page
327 if ((paddr
= virt_to_phys_040(addr
)))
341 case FLUSH_SCOPE_PAGE
:
342 len
+= (addr
& ~PAGE_MASK
) + (PAGE_SIZE
- 1);
343 for (len
>>= PAGE_SHIFT
; len
--; addr
+= PAGE_SIZE
)
345 if (!(paddr
= virt_to_phys_040(addr
)))
349 case FLUSH_CACHE_DATA
:
350 __asm__
__volatile__ ("nop\n\t"
352 "cpushp %%dc,(%0)\n\t"
356 case FLUSH_CACHE_INSN
:
357 __asm__
__volatile__ ("nop\n\t"
359 "cpushp %%ic,(%0)\n\t"
364 case FLUSH_CACHE_BOTH
:
365 __asm__
__volatile__ ("nop\n\t"
367 "cpushp %%bc,(%0)\n\t"
378 #define virt_to_phys_060(vaddr) \
380 unsigned long paddr; \
381 __asm__ __volatile__ (".chip 68060\n\t" \
390 cache_flush_060 (unsigned long addr
, int scope
, int cache
, unsigned long len
)
392 unsigned long paddr
, i
;
396 case FLUSH_SCOPE_ALL
:
399 case FLUSH_CACHE_DATA
:
400 __asm__
__volatile__ (".chip 68060\n\t"
405 case FLUSH_CACHE_INSN
:
406 __asm__
__volatile__ (".chip 68060\n\t"
412 case FLUSH_CACHE_BOTH
:
413 __asm__
__volatile__ (".chip 68060\n\t"
421 case FLUSH_SCOPE_LINE
:
422 /* Find the physical address of the first mapped page in the
426 if (!(paddr
= virt_to_phys_060(addr
))) {
427 unsigned long tmp
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
436 if ((paddr
= virt_to_phys_060(addr
)))
444 len
= (len
+ 15) >> 4;
445 i
= (PAGE_SIZE
- (paddr
& ~PAGE_MASK
)) >> 4;
450 case FLUSH_CACHE_DATA
:
451 __asm__
__volatile__ (".chip 68060\n\t"
452 "cpushl %%dc,(%0)\n\t"
453 "cinvl %%dc,(%0)\n\t"
457 case FLUSH_CACHE_INSN
:
458 __asm__
__volatile__ (".chip 68060\n\t"
459 "cpushl %%ic,(%0)\n\t"
460 "cinvl %%ic,(%0)\n\t"
465 case FLUSH_CACHE_BOTH
:
466 __asm__
__volatile__ (".chip 68060\n\t"
467 "cpushl %%bc,(%0)\n\t"
468 "cinvl %%bc,(%0)\n\t"
477 * We just want to jump to the first cache line
484 /* Recompute physical address when crossing a page
488 if ((paddr
= virt_to_phys_060(addr
)))
502 case FLUSH_SCOPE_PAGE
:
503 len
+= (addr
& ~PAGE_MASK
) + (PAGE_SIZE
- 1);
504 addr
&= PAGE_MASK
; /* Workaround for bug in some
505 revisions of the 68060 */
506 for (len
>>= PAGE_SHIFT
; len
--; addr
+= PAGE_SIZE
)
508 if (!(paddr
= virt_to_phys_060(addr
)))
512 case FLUSH_CACHE_DATA
:
513 __asm__
__volatile__ (".chip 68060\n\t"
514 "cpushp %%dc,(%0)\n\t"
515 "cinvp %%dc,(%0)\n\t"
519 case FLUSH_CACHE_INSN
:
520 __asm__
__volatile__ (".chip 68060\n\t"
521 "cpushp %%ic,(%0)\n\t"
522 "cinvp %%ic,(%0)\n\t"
527 case FLUSH_CACHE_BOTH
:
528 __asm__
__volatile__ (".chip 68060\n\t"
529 "cpushp %%bc,(%0)\n\t"
530 "cinvp %%bc,(%0)\n\t"
541 /* sys_cacheflush -- flush (part of) the processor cache. */
543 sys_cacheflush (unsigned long addr
, int scope
, int cache
, unsigned long len
)
545 struct vm_area_struct
*vma
;
549 if (scope
< FLUSH_SCOPE_LINE
|| scope
> FLUSH_SCOPE_ALL
||
550 cache
& ~FLUSH_CACHE_BOTH
)
553 if (scope
== FLUSH_SCOPE_ALL
) {
554 /* Only the superuser may flush the whole cache. */
556 if (!capable(CAP_SYS_ADMIN
))
559 /* Verify that the specified address region actually belongs to
562 vma
= find_vma (current
->mm
, addr
);
564 /* Check for overflow. */
565 if (addr
+ len
< addr
)
567 if (vma
== NULL
|| addr
< vma
->vm_start
|| addr
+ len
> vma
->vm_end
)
571 if (CPU_IS_020_OR_030
) {
572 if (scope
== FLUSH_SCOPE_LINE
&& len
< 256) {
574 __asm__ ("movec %%cacr, %0" : "=r" (cacr
));
575 if (cache
& FLUSH_CACHE_INSN
)
577 if (cache
& FLUSH_CACHE_DATA
)
581 __asm__
__volatile__ ("movec %1, %%caar\n\t"
584 : "r" (cacr
), "r" (addr
));
588 /* Flush the whole cache, even if page granularity requested. */
590 __asm__ ("movec %%cacr, %0" : "=r" (cacr
));
591 if (cache
& FLUSH_CACHE_INSN
)
593 if (cache
& FLUSH_CACHE_DATA
)
595 __asm__
__volatile__ ("movec %0, %%cacr" : : "r" (cacr
));
599 } else if (CPU_IS_040
) {
600 ret
= cache_flush_040 (addr
, scope
, cache
, len
);
601 } else if (CPU_IS_060
) {
602 ret
= cache_flush_060 (addr
, scope
, cache
, len
);
612 asmlinkage
int sys_pause(void)
614 current
->state
= TASK_INTERRUPTIBLE
;
616 return -ERESTARTNOHAND
;