2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/utsname.h>
24 #include <linux/ipc.h>
26 #include <asm/setup.h>
27 #include <asm/uaccess.h>
28 #include <asm/cachectl.h>
29 #include <asm/traps.h>
31 #include <asm/unistd.h>
33 /* common code for old and new mmaps */
34 static inline long do_mmap2(
35 unsigned long addr
, unsigned long len
,
36 unsigned long prot
, unsigned long flags
,
37 unsigned long fd
, unsigned long pgoff
)
40 struct file
* file
= NULL
;
42 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
43 if (!(flags
& MAP_ANONYMOUS
)) {
49 down_write(¤t
->mm
->mmap_sem
);
50 error
= do_mmap_pgoff(file
, addr
, len
, prot
, flags
, pgoff
);
51 up_write(¤t
->mm
->mmap_sem
);
59 asmlinkage
long sys_mmap2(unsigned long addr
, unsigned long len
,
60 unsigned long prot
, unsigned long flags
,
61 unsigned long fd
, unsigned long pgoff
)
63 return do_mmap2(addr
, len
, prot
, flags
, fd
, pgoff
);
67 * Perform the select(nd, in, out, ex, tv) and mmap() system
68 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
69 * handle more than 4 system call parameters, so these system calls
70 * used a memory block for parameter passing..
73 struct mmap_arg_struct
{
82 asmlinkage
int old_mmap(struct mmap_arg_struct __user
*arg
)
84 struct mmap_arg_struct a
;
87 if (copy_from_user(&a
, arg
, sizeof(a
)))
91 if (a
.offset
& ~PAGE_MASK
)
94 a
.flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
96 error
= do_mmap2(a
.addr
, a
.len
, a
.prot
, a
.flags
, a
.fd
, a
.offset
>> PAGE_SHIFT
);
102 struct mmap_arg_struct64
{
107 __u64 offset
; /* 64 bits */
111 asmlinkage
long sys_mmap64(struct mmap_arg_struct64
*arg
)
114 struct file
* file
= NULL
;
115 struct mmap_arg_struct64 a
;
118 if (copy_from_user(&a
, arg
, sizeof(a
)))
121 if ((long)a
.offset
& ~PAGE_MASK
)
124 pgoff
= a
.offset
>> PAGE_SHIFT
;
125 if ((a
.offset
>> PAGE_SHIFT
) != pgoff
)
128 if (!(a
.flags
& MAP_ANONYMOUS
)) {
134 a
.flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
136 down_write(¤t
->mm
->mmap_sem
);
137 error
= do_mmap_pgoff(file
, a
.addr
, a
.len
, a
.prot
, a
.flags
, pgoff
);
138 up_write(¤t
->mm
->mmap_sem
);
146 struct sel_arg_struct
{
148 fd_set __user
*inp
, *outp
, *exp
;
149 struct timeval __user
*tvp
;
152 asmlinkage
int old_select(struct sel_arg_struct __user
*arg
)
154 struct sel_arg_struct a
;
156 if (copy_from_user(&a
, arg
, sizeof(a
)))
158 /* sys_select() does the appropriate kernel locking */
159 return sys_select(a
.n
, a
.inp
, a
.outp
, a
.exp
, a
.tvp
);
163 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
165 * This is really horribly ugly.
167 asmlinkage
int sys_ipc (uint call
, int first
, int second
,
168 int third
, void __user
*ptr
, long fifth
)
172 version
= call
>> 16; /* hack for backward compatibility */
178 return sys_semop (first
, ptr
, second
);
180 return sys_semget (first
, second
, third
);
185 if (get_user(fourth
.__pad
, (void __user
*__user
*) ptr
))
187 return sys_semctl (first
, second
, third
, fourth
);
195 return sys_msgsnd (first
, ptr
, second
, third
);
199 struct ipc_kludge tmp
;
202 if (copy_from_user (&tmp
, ptr
, sizeof (tmp
)))
204 return sys_msgrcv (first
, tmp
.msgp
, second
,
208 return sys_msgrcv (first
, ptr
,
209 second
, fifth
, third
);
212 return sys_msgget ((key_t
) first
, second
);
214 return sys_msgctl (first
, second
, ptr
);
224 ret
= do_shmat (first
, ptr
, second
, &raddr
);
227 return put_user (raddr
, (ulong __user
*) third
);
231 return sys_shmdt (ptr
);
233 return sys_shmget (first
, second
, third
);
235 return sys_shmctl (first
, second
, ptr
);
243 /* Convert virtual (user) address VADDR to physical address PADDR */
244 #define virt_to_phys_040(vaddr) \
246 unsigned long _mmusr, _paddr; \
248 __asm__ __volatile__ (".chip 68040\n\t" \
250 "movec %%mmusr,%0\n\t" \
254 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
259 cache_flush_040 (unsigned long addr
, int scope
, int cache
, unsigned long len
)
261 unsigned long paddr
, i
;
265 case FLUSH_SCOPE_ALL
:
268 case FLUSH_CACHE_DATA
:
269 /* This nop is needed for some broken versions of the 68040. */
270 __asm__
__volatile__ ("nop\n\t"
275 case FLUSH_CACHE_INSN
:
276 __asm__
__volatile__ ("nop\n\t"
282 case FLUSH_CACHE_BOTH
:
283 __asm__
__volatile__ ("nop\n\t"
291 case FLUSH_SCOPE_LINE
:
292 /* Find the physical address of the first mapped page in the
294 if ((paddr
= virt_to_phys_040(addr
))) {
295 paddr
+= addr
& ~(PAGE_MASK
| 15);
296 len
= (len
+ (addr
& 15) + 15) >> 4;
298 unsigned long tmp
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
307 if ((paddr
= virt_to_phys_040(addr
)))
314 len
= (len
+ 15) >> 4;
316 i
= (PAGE_SIZE
- (paddr
& ~PAGE_MASK
)) >> 4;
321 case FLUSH_CACHE_DATA
:
322 __asm__
__volatile__ ("nop\n\t"
324 "cpushl %%dc,(%0)\n\t"
328 case FLUSH_CACHE_INSN
:
329 __asm__
__volatile__ ("nop\n\t"
331 "cpushl %%ic,(%0)\n\t"
336 case FLUSH_CACHE_BOTH
:
337 __asm__
__volatile__ ("nop\n\t"
339 "cpushl %%bc,(%0)\n\t"
347 * No need to page align here since it is done by
348 * virt_to_phys_040().
352 /* Recompute physical address when crossing a page
356 if ((paddr
= virt_to_phys_040(addr
)))
370 case FLUSH_SCOPE_PAGE
:
371 len
+= (addr
& ~PAGE_MASK
) + (PAGE_SIZE
- 1);
372 for (len
>>= PAGE_SHIFT
; len
--; addr
+= PAGE_SIZE
)
374 if (!(paddr
= virt_to_phys_040(addr
)))
378 case FLUSH_CACHE_DATA
:
379 __asm__
__volatile__ ("nop\n\t"
381 "cpushp %%dc,(%0)\n\t"
385 case FLUSH_CACHE_INSN
:
386 __asm__
__volatile__ ("nop\n\t"
388 "cpushp %%ic,(%0)\n\t"
393 case FLUSH_CACHE_BOTH
:
394 __asm__
__volatile__ ("nop\n\t"
396 "cpushp %%bc,(%0)\n\t"
407 #define virt_to_phys_060(vaddr) \
409 unsigned long paddr; \
410 __asm__ __volatile__ (".chip 68060\n\t" \
419 cache_flush_060 (unsigned long addr
, int scope
, int cache
, unsigned long len
)
421 unsigned long paddr
, i
;
425 * cpush %dc : flush DC, remains valid (with our %cacr setup)
426 * cpush %ic : invalidate IC
427 * cpush %bc : flush DC + invalidate IC
431 case FLUSH_SCOPE_ALL
:
434 case FLUSH_CACHE_DATA
:
435 __asm__
__volatile__ (".chip 68060\n\t"
439 case FLUSH_CACHE_INSN
:
440 __asm__
__volatile__ (".chip 68060\n\t"
445 case FLUSH_CACHE_BOTH
:
446 __asm__
__volatile__ (".chip 68060\n\t"
453 case FLUSH_SCOPE_LINE
:
454 /* Find the physical address of the first mapped page in the
458 if (!(paddr
= virt_to_phys_060(addr
))) {
459 unsigned long tmp
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
468 if ((paddr
= virt_to_phys_060(addr
)))
476 len
= (len
+ 15) >> 4;
477 i
= (PAGE_SIZE
- (paddr
& ~PAGE_MASK
)) >> 4;
482 case FLUSH_CACHE_DATA
:
483 __asm__
__volatile__ (".chip 68060\n\t"
484 "cpushl %%dc,(%0)\n\t"
488 case FLUSH_CACHE_INSN
:
489 __asm__
__volatile__ (".chip 68060\n\t"
490 "cpushl %%ic,(%0)\n\t"
495 case FLUSH_CACHE_BOTH
:
496 __asm__
__volatile__ (".chip 68060\n\t"
497 "cpushl %%bc,(%0)\n\t"
506 * We just want to jump to the first cache line
513 /* Recompute physical address when crossing a page
517 if ((paddr
= virt_to_phys_060(addr
)))
531 case FLUSH_SCOPE_PAGE
:
532 len
+= (addr
& ~PAGE_MASK
) + (PAGE_SIZE
- 1);
533 addr
&= PAGE_MASK
; /* Workaround for bug in some
534 revisions of the 68060 */
535 for (len
>>= PAGE_SHIFT
; len
--; addr
+= PAGE_SIZE
)
537 if (!(paddr
= virt_to_phys_060(addr
)))
541 case FLUSH_CACHE_DATA
:
542 __asm__
__volatile__ (".chip 68060\n\t"
543 "cpushp %%dc,(%0)\n\t"
547 case FLUSH_CACHE_INSN
:
548 __asm__
__volatile__ (".chip 68060\n\t"
549 "cpushp %%ic,(%0)\n\t"
554 case FLUSH_CACHE_BOTH
:
555 __asm__
__volatile__ (".chip 68060\n\t"
556 "cpushp %%bc,(%0)\n\t"
567 /* sys_cacheflush -- flush (part of) the processor cache. */
569 sys_cacheflush (unsigned long addr
, int scope
, int cache
, unsigned long len
)
571 struct vm_area_struct
*vma
;
575 if (scope
< FLUSH_SCOPE_LINE
|| scope
> FLUSH_SCOPE_ALL
||
576 cache
& ~FLUSH_CACHE_BOTH
)
579 if (scope
== FLUSH_SCOPE_ALL
) {
580 /* Only the superuser may explicitly flush the whole cache. */
582 if (!capable(CAP_SYS_ADMIN
))
586 * Verify that the specified address region actually belongs
589 vma
= find_vma (current
->mm
, addr
);
591 /* Check for overflow. */
592 if (addr
+ len
< addr
)
594 if (vma
== NULL
|| addr
< vma
->vm_start
|| addr
+ len
> vma
->vm_end
)
598 if (CPU_IS_020_OR_030
) {
599 if (scope
== FLUSH_SCOPE_LINE
&& len
< 256) {
601 __asm__ ("movec %%cacr, %0" : "=r" (cacr
));
602 if (cache
& FLUSH_CACHE_INSN
)
604 if (cache
& FLUSH_CACHE_DATA
)
608 __asm__
__volatile__ ("movec %1, %%caar\n\t"
611 : "r" (cacr
), "r" (addr
));
615 /* Flush the whole cache, even if page granularity requested. */
617 __asm__ ("movec %%cacr, %0" : "=r" (cacr
));
618 if (cache
& FLUSH_CACHE_INSN
)
620 if (cache
& FLUSH_CACHE_DATA
)
622 __asm__
__volatile__ ("movec %0, %%cacr" : : "r" (cacr
));
628 * 040 or 060: don't blindly trust 'scope', someone could
629 * try to flush a few megs of memory.
632 if (len
>=3*PAGE_SIZE
&& scope
<FLUSH_SCOPE_PAGE
)
633 scope
=FLUSH_SCOPE_PAGE
;
634 if (len
>=10*PAGE_SIZE
&& scope
<FLUSH_SCOPE_ALL
)
635 scope
=FLUSH_SCOPE_ALL
;
637 ret
= cache_flush_040 (addr
, scope
, cache
, len
);
638 } else if (CPU_IS_060
) {
639 ret
= cache_flush_060 (addr
, scope
, cache
, len
);
647 asmlinkage
int sys_getpagesize(void)
653 * Do a system call from kernel instead of calling sys_execve so we
654 * end up with proper pt_regs.
656 int kernel_execve(const char *filename
, char *const argv
[], char *const envp
[])
658 register long __res
asm ("%d0") = __NR_execve
;
659 register long __a
asm ("%d1") = (long)(filename
);
660 register long __b
asm ("%d2") = (long)(argv
);
661 register long __c
asm ("%d3") = (long)(envp
);
662 asm volatile ("trap #0" : "+d" (__res
)
663 : "d" (__a
), "d" (__b
), "d" (__c
));