bits/syscall.h: add process_madvise from linux v5.10
[musl.git] / arch / mipsn32 / syscall_arch.h
blobc681905d0f2b441edb71a0d63e39ef8f0262a907
1 #define __SYSCALL_LL_E(x) (x)
2 #define __SYSCALL_LL_O(x) (x)
4 #define SYSCALL_RLIM_INFINITY (-1UL/2)
6 #if __mips_isa_rev >= 6
7 #define SYSCALL_CLOBBERLIST \
8 "$1", "$3", "$10", "$11", "$12", "$13", \
9 "$14", "$15", "$24", "$25", "memory"
10 #else
11 #define SYSCALL_CLOBBERLIST \
12 "$1", "$3", "$10", "$11", "$12", "$13", \
13 "$14", "$15", "$24", "$25", "hi", "lo", "memory"
14 #endif
16 static inline long __syscall0(long n)
18 register long r7 __asm__("$7");
19 register long r2 __asm__("$2");
20 __asm__ __volatile__ (
21 "daddu $2,$0,%2 ; syscall"
22 : "=&r"(r2), "=r"(r7)
23 : "ir"(n), "0"(r2)
24 : SYSCALL_CLOBBERLIST);
25 return r7 && r2>0 ? -r2 : r2;
28 static inline long __syscall1(long n, long a)
30 register long r4 __asm__("$4") = a;
31 register long r7 __asm__("$7");
32 register long r2 __asm__("$2");
33 __asm__ __volatile__ (
34 "daddu $2,$0,%2 ; syscall"
35 : "=&r"(r2), "=r"(r7)
36 : "ir"(n), "0"(r2), "r"(r4)
37 : SYSCALL_CLOBBERLIST);
38 return r7 && r2>0 ? -r2 : r2;
41 static inline long __syscall2(long n, long a, long b)
43 register long r4 __asm__("$4") = a;
44 register long r5 __asm__("$5") = b;
45 register long r7 __asm__("$7");
46 register long r2 __asm__("$2");
48 __asm__ __volatile__ (
49 "daddu $2,$0,%2 ; syscall"
50 : "=&r"(r2), "=r"(r7)
51 : "ir"(n), "0"(r2), "r"(r4), "r"(r5)
52 : SYSCALL_CLOBBERLIST);
53 return r7 && r2>0 ? -r2 : r2;
56 static inline long __syscall3(long n, long a, long b, long c)
58 register long r4 __asm__("$4") = a;
59 register long r5 __asm__("$5") = b;
60 register long r6 __asm__("$6") = c;
61 register long r7 __asm__("$7");
62 register long r2 __asm__("$2");
64 __asm__ __volatile__ (
65 "daddu $2,$0,%2 ; syscall"
66 : "=&r"(r2), "=r"(r7)
67 : "ir"(n), "0"(r2), "r"(r4), "r"(r5), "r"(r6)
68 : SYSCALL_CLOBBERLIST);
69 return r7 && r2>0 ? -r2 : r2;
72 static inline long __syscall4(long n, long a, long b, long c, long d)
74 register long r4 __asm__("$4") = a;
75 register long r5 __asm__("$5") = b;
76 register long r6 __asm__("$6") = c;
77 register long r7 __asm__("$7") = d;
78 register long r2 __asm__("$2");
80 __asm__ __volatile__ (
81 "daddu $2,$0,%2 ; syscall"
82 : "=&r"(r2), "+r"(r7)
83 : "ir"(n), "0"(r2), "r"(r4), "r"(r5), "r"(r6)
84 : SYSCALL_CLOBBERLIST);
85 return r7 && r2>0 ? -r2 : r2;
88 static inline long __syscall5(long n, long a, long b, long c, long d, long e)
90 register long r4 __asm__("$4") = a;
91 register long r5 __asm__("$5") = b;
92 register long r6 __asm__("$6") = c;
93 register long r7 __asm__("$7") = d;
94 register long r8 __asm__("$8") = e;
95 register long r2 __asm__("$2");
97 __asm__ __volatile__ (
98 "daddu $2,$0,%2 ; syscall"
99 : "=&r"(r2), "+r"(r7)
100 : "ir"(n), "0"(r2), "r"(r4), "r"(r5), "r"(r6), "r"(r8)
101 : SYSCALL_CLOBBERLIST);
102 return r7 && r2>0 ? -r2 : r2;
105 static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
107 register long r4 __asm__("$4") = a;
108 register long r5 __asm__("$5") = b;
109 register long r6 __asm__("$6") = c;
110 register long r7 __asm__("$7") = d;
111 register long r8 __asm__("$8") = e;
112 register long r9 __asm__("$9") = f;
113 register long r2 __asm__("$2");
115 __asm__ __volatile__ (
116 "daddu $2,$0,%2 ; syscall"
117 : "=&r"(r2), "+r"(r7)
118 : "ir"(n), "0"(r2), "r"(r4), "r"(r5), "r"(r6), "r"(r8), "r"(r9)
119 : SYSCALL_CLOBBERLIST);
120 return r7 && r2>0 ? -r2 : r2;
123 #define VDSO_USEFUL
124 #define VDSO_CGT32_SYM "__vdso_clock_gettime"
125 #define VDSO_CGT32_VER "LINUX_2.6"
126 #define VDSO_CGT_SYM "__vdso_clock_gettime64"
127 #define VDSO_CGT_VER "LINUX_2.6"
129 #define SO_SNDTIMEO_OLD 0x1005
130 #define SO_RCVTIMEO_OLD 0x1006