2 * linux/kernel/compat.c
4 * Kernel compatibililty routines for e.g. 32 bit syscall support
7 * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/linkage.h>
15 #include <linux/compat.h>
16 #include <linux/errno.h>
17 #include <linux/time.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */
20 #include <linux/syscalls.h>
21 #include <linux/unistd.h>
22 #include <linux/security.h>
23 #include <linux/timex.h>
24 #include <linux/migrate.h>
25 #include <linux/posix-timers.h>
27 #include <asm/uaccess.h>
29 int get_compat_timespec(struct timespec
*ts
, const struct compat_timespec __user
*cts
)
31 return (!access_ok(VERIFY_READ
, cts
, sizeof(*cts
)) ||
32 __get_user(ts
->tv_sec
, &cts
->tv_sec
) ||
33 __get_user(ts
->tv_nsec
, &cts
->tv_nsec
)) ? -EFAULT
: 0;
36 int put_compat_timespec(const struct timespec
*ts
, struct compat_timespec __user
*cts
)
38 return (!access_ok(VERIFY_WRITE
, cts
, sizeof(*cts
)) ||
39 __put_user(ts
->tv_sec
, &cts
->tv_sec
) ||
40 __put_user(ts
->tv_nsec
, &cts
->tv_nsec
)) ? -EFAULT
: 0;
43 static long compat_nanosleep_restart(struct restart_block
*restart
)
45 unsigned long expire
= restart
->arg0
, now
= jiffies
;
46 struct compat_timespec __user
*rmtp
;
48 /* Did it expire while we handled signals? */
49 if (!time_after(expire
, now
))
52 expire
= schedule_timeout_interruptible(expire
- now
);
56 rmtp
= (struct compat_timespec __user
*)restart
->arg1
;
58 struct compat_timespec ct
;
61 jiffies_to_timespec(expire
, &t
);
63 ct
.tv_nsec
= t
.tv_nsec
;
64 if (copy_to_user(rmtp
, &ct
, sizeof(ct
)))
67 /* The 'restart' block is already filled in */
68 return -ERESTART_RESTARTBLOCK
;
71 asmlinkage
long compat_sys_nanosleep(struct compat_timespec __user
*rqtp
,
72 struct compat_timespec __user
*rmtp
)
75 struct restart_block
*restart
;
78 if (get_compat_timespec(&t
, rqtp
))
81 if ((t
.tv_nsec
>= 1000000000L) || (t
.tv_nsec
< 0) || (t
.tv_sec
< 0))
84 expire
= timespec_to_jiffies(&t
) + (t
.tv_sec
|| t
.tv_nsec
);
85 expire
= schedule_timeout_interruptible(expire
);
90 jiffies_to_timespec(expire
, &t
);
91 if (put_compat_timespec(&t
, rmtp
))
94 restart
= ¤t_thread_info()->restart_block
;
95 restart
->fn
= compat_nanosleep_restart
;
96 restart
->arg0
= jiffies
+ expire
;
97 restart
->arg1
= (unsigned long) rmtp
;
98 return -ERESTART_RESTARTBLOCK
;
101 static inline long get_compat_itimerval(struct itimerval
*o
,
102 struct compat_itimerval __user
*i
)
104 return (!access_ok(VERIFY_READ
, i
, sizeof(*i
)) ||
105 (__get_user(o
->it_interval
.tv_sec
, &i
->it_interval
.tv_sec
) |
106 __get_user(o
->it_interval
.tv_usec
, &i
->it_interval
.tv_usec
) |
107 __get_user(o
->it_value
.tv_sec
, &i
->it_value
.tv_sec
) |
108 __get_user(o
->it_value
.tv_usec
, &i
->it_value
.tv_usec
)));
111 static inline long put_compat_itimerval(struct compat_itimerval __user
*o
,
114 return (!access_ok(VERIFY_WRITE
, o
, sizeof(*o
)) ||
115 (__put_user(i
->it_interval
.tv_sec
, &o
->it_interval
.tv_sec
) |
116 __put_user(i
->it_interval
.tv_usec
, &o
->it_interval
.tv_usec
) |
117 __put_user(i
->it_value
.tv_sec
, &o
->it_value
.tv_sec
) |
118 __put_user(i
->it_value
.tv_usec
, &o
->it_value
.tv_usec
)));
121 asmlinkage
long compat_sys_getitimer(int which
,
122 struct compat_itimerval __user
*it
)
124 struct itimerval kit
;
127 error
= do_getitimer(which
, &kit
);
128 if (!error
&& put_compat_itimerval(it
, &kit
))
133 asmlinkage
long compat_sys_setitimer(int which
,
134 struct compat_itimerval __user
*in
,
135 struct compat_itimerval __user
*out
)
137 struct itimerval kin
, kout
;
141 if (get_compat_itimerval(&kin
, in
))
144 memset(&kin
, 0, sizeof(kin
));
146 error
= do_setitimer(which
, &kin
, out
? &kout
: NULL
);
149 if (put_compat_itimerval(out
, &kout
))
154 asmlinkage
long compat_sys_times(struct compat_tms __user
*tbuf
)
157 * In the SMP world we might just be unlucky and have one of
158 * the times increment as we use it. Since the value is an
159 * atomically safe type this is just fine. Conceptually its
160 * as if the syscall took an instant longer to occur.
163 struct compat_tms tmp
;
164 struct task_struct
*tsk
= current
;
165 struct task_struct
*t
;
166 cputime_t utime
, stime
, cutime
, cstime
;
168 read_lock(&tasklist_lock
);
169 utime
= tsk
->signal
->utime
;
170 stime
= tsk
->signal
->stime
;
173 utime
= cputime_add(utime
, t
->utime
);
174 stime
= cputime_add(stime
, t
->stime
);
179 * While we have tasklist_lock read-locked, no dying thread
180 * can be updating current->signal->[us]time. Instead,
181 * we got their counts included in the live thread loop.
182 * However, another thread can come in right now and
183 * do a wait call that updates current->signal->c[us]time.
184 * To make sure we always see that pair updated atomically,
185 * we take the siglock around fetching them.
187 spin_lock_irq(&tsk
->sighand
->siglock
);
188 cutime
= tsk
->signal
->cutime
;
189 cstime
= tsk
->signal
->cstime
;
190 spin_unlock_irq(&tsk
->sighand
->siglock
);
191 read_unlock(&tasklist_lock
);
193 tmp
.tms_utime
= compat_jiffies_to_clock_t(cputime_to_jiffies(utime
));
194 tmp
.tms_stime
= compat_jiffies_to_clock_t(cputime_to_jiffies(stime
));
195 tmp
.tms_cutime
= compat_jiffies_to_clock_t(cputime_to_jiffies(cutime
));
196 tmp
.tms_cstime
= compat_jiffies_to_clock_t(cputime_to_jiffies(cstime
));
197 if (copy_to_user(tbuf
, &tmp
, sizeof(tmp
)))
200 return compat_jiffies_to_clock_t(jiffies
);
204 * Assumption: old_sigset_t and compat_old_sigset_t are both
205 * types that can be passed to put_user()/get_user().
208 asmlinkage
long compat_sys_sigpending(compat_old_sigset_t __user
*set
)
212 mm_segment_t old_fs
= get_fs();
215 ret
= sys_sigpending((old_sigset_t __user
*) &s
);
218 ret
= put_user(s
, set
);
222 asmlinkage
long compat_sys_sigprocmask(int how
, compat_old_sigset_t __user
*set
,
223 compat_old_sigset_t __user
*oset
)
229 if (set
&& get_user(s
, set
))
233 ret
= sys_sigprocmask(how
,
234 set
? (old_sigset_t __user
*) &s
: NULL
,
235 oset
? (old_sigset_t __user
*) &s
: NULL
);
239 ret
= put_user(s
, oset
);
243 asmlinkage
long compat_sys_setrlimit(unsigned int resource
,
244 struct compat_rlimit __user
*rlim
)
248 mm_segment_t old_fs
= get_fs ();
250 if (resource
>= RLIM_NLIMITS
)
253 if (!access_ok(VERIFY_READ
, rlim
, sizeof(*rlim
)) ||
254 __get_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
255 __get_user(r
.rlim_max
, &rlim
->rlim_max
))
258 if (r
.rlim_cur
== COMPAT_RLIM_INFINITY
)
259 r
.rlim_cur
= RLIM_INFINITY
;
260 if (r
.rlim_max
== COMPAT_RLIM_INFINITY
)
261 r
.rlim_max
= RLIM_INFINITY
;
263 ret
= sys_setrlimit(resource
, (struct rlimit __user
*) &r
);
268 #ifdef COMPAT_RLIM_OLD_INFINITY
270 asmlinkage
long compat_sys_old_getrlimit(unsigned int resource
,
271 struct compat_rlimit __user
*rlim
)
275 mm_segment_t old_fs
= get_fs();
278 ret
= sys_old_getrlimit(resource
, &r
);
282 if (r
.rlim_cur
> COMPAT_RLIM_OLD_INFINITY
)
283 r
.rlim_cur
= COMPAT_RLIM_INFINITY
;
284 if (r
.rlim_max
> COMPAT_RLIM_OLD_INFINITY
)
285 r
.rlim_max
= COMPAT_RLIM_INFINITY
;
287 if (!access_ok(VERIFY_WRITE
, rlim
, sizeof(*rlim
)) ||
288 __put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
289 __put_user(r
.rlim_max
, &rlim
->rlim_max
))
297 asmlinkage
long compat_sys_getrlimit (unsigned int resource
,
298 struct compat_rlimit __user
*rlim
)
302 mm_segment_t old_fs
= get_fs();
305 ret
= sys_getrlimit(resource
, (struct rlimit __user
*) &r
);
308 if (r
.rlim_cur
> COMPAT_RLIM_INFINITY
)
309 r
.rlim_cur
= COMPAT_RLIM_INFINITY
;
310 if (r
.rlim_max
> COMPAT_RLIM_INFINITY
)
311 r
.rlim_max
= COMPAT_RLIM_INFINITY
;
313 if (!access_ok(VERIFY_WRITE
, rlim
, sizeof(*rlim
)) ||
314 __put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
315 __put_user(r
.rlim_max
, &rlim
->rlim_max
))
321 int put_compat_rusage(const struct rusage
*r
, struct compat_rusage __user
*ru
)
323 if (!access_ok(VERIFY_WRITE
, ru
, sizeof(*ru
)) ||
324 __put_user(r
->ru_utime
.tv_sec
, &ru
->ru_utime
.tv_sec
) ||
325 __put_user(r
->ru_utime
.tv_usec
, &ru
->ru_utime
.tv_usec
) ||
326 __put_user(r
->ru_stime
.tv_sec
, &ru
->ru_stime
.tv_sec
) ||
327 __put_user(r
->ru_stime
.tv_usec
, &ru
->ru_stime
.tv_usec
) ||
328 __put_user(r
->ru_maxrss
, &ru
->ru_maxrss
) ||
329 __put_user(r
->ru_ixrss
, &ru
->ru_ixrss
) ||
330 __put_user(r
->ru_idrss
, &ru
->ru_idrss
) ||
331 __put_user(r
->ru_isrss
, &ru
->ru_isrss
) ||
332 __put_user(r
->ru_minflt
, &ru
->ru_minflt
) ||
333 __put_user(r
->ru_majflt
, &ru
->ru_majflt
) ||
334 __put_user(r
->ru_nswap
, &ru
->ru_nswap
) ||
335 __put_user(r
->ru_inblock
, &ru
->ru_inblock
) ||
336 __put_user(r
->ru_oublock
, &ru
->ru_oublock
) ||
337 __put_user(r
->ru_msgsnd
, &ru
->ru_msgsnd
) ||
338 __put_user(r
->ru_msgrcv
, &ru
->ru_msgrcv
) ||
339 __put_user(r
->ru_nsignals
, &ru
->ru_nsignals
) ||
340 __put_user(r
->ru_nvcsw
, &ru
->ru_nvcsw
) ||
341 __put_user(r
->ru_nivcsw
, &ru
->ru_nivcsw
))
346 asmlinkage
long compat_sys_getrusage(int who
, struct compat_rusage __user
*ru
)
350 mm_segment_t old_fs
= get_fs();
353 ret
= sys_getrusage(who
, (struct rusage __user
*) &r
);
359 if (put_compat_rusage(&r
, ru
))
366 compat_sys_wait4(compat_pid_t pid
, compat_uint_t __user
*stat_addr
, int options
,
367 struct compat_rusage __user
*ru
)
370 return sys_wait4(pid
, stat_addr
, options
, NULL
);
375 mm_segment_t old_fs
= get_fs();
380 (unsigned int __user
*) &status
: NULL
),
381 options
, (struct rusage __user
*) &r
);
385 if (put_compat_rusage(&r
, ru
))
387 if (stat_addr
&& put_user(status
, stat_addr
))
394 asmlinkage
long compat_sys_waitid(int which
, compat_pid_t pid
,
395 struct compat_siginfo __user
*uinfo
, int options
,
396 struct compat_rusage __user
*uru
)
401 mm_segment_t old_fs
= get_fs();
403 memset(&info
, 0, sizeof(info
));
406 ret
= sys_waitid(which
, pid
, (siginfo_t __user
*)&info
, options
,
407 uru
? (struct rusage __user
*)&ru
: NULL
);
410 if ((ret
< 0) || (info
.si_signo
== 0))
414 ret
= put_compat_rusage(&ru
, uru
);
419 BUG_ON(info
.si_code
& __SI_MASK
);
420 info
.si_code
|= __SI_CHLD
;
421 return copy_siginfo_to_user32(uinfo
, &info
);
424 static int compat_get_user_cpu_mask(compat_ulong_t __user
*user_mask_ptr
,
425 unsigned len
, cpumask_t
*new_mask
)
429 if (len
< sizeof(cpumask_t
))
430 memset(new_mask
, 0, sizeof(cpumask_t
));
431 else if (len
> sizeof(cpumask_t
))
432 len
= sizeof(cpumask_t
);
434 k
= cpus_addr(*new_mask
);
435 return compat_get_bitmap(k
, user_mask_ptr
, len
* 8);
438 asmlinkage
long compat_sys_sched_setaffinity(compat_pid_t pid
,
440 compat_ulong_t __user
*user_mask_ptr
)
445 retval
= compat_get_user_cpu_mask(user_mask_ptr
, len
, &new_mask
);
449 return sched_setaffinity(pid
, new_mask
);
452 asmlinkage
long compat_sys_sched_getaffinity(compat_pid_t pid
, unsigned int len
,
453 compat_ulong_t __user
*user_mask_ptr
)
458 unsigned int min_length
= sizeof(cpumask_t
);
460 if (NR_CPUS
<= BITS_PER_COMPAT_LONG
)
461 min_length
= sizeof(compat_ulong_t
);
463 if (len
< min_length
)
466 ret
= sched_getaffinity(pid
, &mask
);
471 ret
= compat_put_bitmap(user_mask_ptr
, k
, min_length
* 8);
478 static int get_compat_itimerspec(struct itimerspec
*dst
,
479 struct compat_itimerspec __user
*src
)
481 if (get_compat_timespec(&dst
->it_interval
, &src
->it_interval
) ||
482 get_compat_timespec(&dst
->it_value
, &src
->it_value
))
487 static int put_compat_itimerspec(struct compat_itimerspec __user
*dst
,
488 struct itimerspec
*src
)
490 if (put_compat_timespec(&src
->it_interval
, &dst
->it_interval
) ||
491 put_compat_timespec(&src
->it_value
, &dst
->it_value
))
496 long compat_sys_timer_create(clockid_t which_clock
,
497 struct compat_sigevent __user
*timer_event_spec
,
498 timer_t __user
*created_timer_id
)
500 struct sigevent __user
*event
= NULL
;
502 if (timer_event_spec
) {
503 struct sigevent kevent
;
505 event
= compat_alloc_user_space(sizeof(*event
));
506 if (get_compat_sigevent(&kevent
, timer_event_spec
) ||
507 copy_to_user(event
, &kevent
, sizeof(*event
)))
511 return sys_timer_create(which_clock
, event
, created_timer_id
);
514 long compat_sys_timer_settime(timer_t timer_id
, int flags
,
515 struct compat_itimerspec __user
*new,
516 struct compat_itimerspec __user
*old
)
520 struct itimerspec newts
, oldts
;
524 if (get_compat_itimerspec(&newts
, new))
528 err
= sys_timer_settime(timer_id
, flags
,
529 (struct itimerspec __user
*) &newts
,
530 (struct itimerspec __user
*) &oldts
);
532 if (!err
&& old
&& put_compat_itimerspec(old
, &oldts
))
537 long compat_sys_timer_gettime(timer_t timer_id
,
538 struct compat_itimerspec __user
*setting
)
542 struct itimerspec ts
;
546 err
= sys_timer_gettime(timer_id
,
547 (struct itimerspec __user
*) &ts
);
549 if (!err
&& put_compat_itimerspec(setting
, &ts
))
554 long compat_sys_clock_settime(clockid_t which_clock
,
555 struct compat_timespec __user
*tp
)
561 if (get_compat_timespec(&ts
, tp
))
565 err
= sys_clock_settime(which_clock
,
566 (struct timespec __user
*) &ts
);
571 long compat_sys_clock_gettime(clockid_t which_clock
,
572 struct compat_timespec __user
*tp
)
580 err
= sys_clock_gettime(which_clock
,
581 (struct timespec __user
*) &ts
);
583 if (!err
&& put_compat_timespec(&ts
, tp
))
588 long compat_sys_clock_getres(clockid_t which_clock
,
589 struct compat_timespec __user
*tp
)
597 err
= sys_clock_getres(which_clock
,
598 (struct timespec __user
*) &ts
);
600 if (!err
&& tp
&& put_compat_timespec(&ts
, tp
))
605 static long compat_clock_nanosleep_restart(struct restart_block
*restart
)
610 struct compat_timespec
*rmtp
= (struct compat_timespec
*)(restart
->arg1
);
612 restart
->arg1
= (unsigned long) &tu
;
615 err
= clock_nanosleep_restart(restart
);
618 if ((err
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
619 put_compat_timespec(&tu
, rmtp
))
622 if (err
== -ERESTART_RESTARTBLOCK
) {
623 restart
->fn
= compat_clock_nanosleep_restart
;
624 restart
->arg1
= (unsigned long) rmtp
;
629 long compat_sys_clock_nanosleep(clockid_t which_clock
, int flags
,
630 struct compat_timespec __user
*rqtp
,
631 struct compat_timespec __user
*rmtp
)
635 struct timespec in
, out
;
636 struct restart_block
*restart
;
638 if (get_compat_timespec(&in
, rqtp
))
643 err
= sys_clock_nanosleep(which_clock
, flags
,
644 (struct timespec __user
*) &in
,
645 (struct timespec __user
*) &out
);
648 if ((err
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
649 put_compat_timespec(&out
, rmtp
))
652 if (err
== -ERESTART_RESTARTBLOCK
) {
653 restart
= ¤t_thread_info()->restart_block
;
654 restart
->fn
= compat_clock_nanosleep_restart
;
655 restart
->arg1
= (unsigned long) rmtp
;
661 * We currently only need the following fields from the sigevent
662 * structure: sigev_value, sigev_signo, sig_notify and (sometimes
663 * sigev_notify_thread_id). The others are handled in user mode.
664 * We also assume that copying sigev_value.sival_int is sufficient
665 * to keep all the bits of sigev_value.sival_ptr intact.
667 int get_compat_sigevent(struct sigevent
*event
,
668 const struct compat_sigevent __user
*u_event
)
670 memset(event
, 0, sizeof(*event
));
671 return (!access_ok(VERIFY_READ
, u_event
, sizeof(*u_event
)) ||
672 __get_user(event
->sigev_value
.sival_int
,
673 &u_event
->sigev_value
.sival_int
) ||
674 __get_user(event
->sigev_signo
, &u_event
->sigev_signo
) ||
675 __get_user(event
->sigev_notify
, &u_event
->sigev_notify
) ||
676 __get_user(event
->sigev_notify_thread_id
,
677 &u_event
->sigev_notify_thread_id
))
681 long compat_get_bitmap(unsigned long *mask
, compat_ulong_t __user
*umask
,
682 unsigned long bitmap_size
)
687 unsigned long nr_compat_longs
;
689 /* align bitmap up to nearest compat_long_t boundary */
690 bitmap_size
= ALIGN(bitmap_size
, BITS_PER_COMPAT_LONG
);
692 if (!access_ok(VERIFY_READ
, umask
, bitmap_size
/ 8))
695 nr_compat_longs
= BITS_TO_COMPAT_LONGS(bitmap_size
);
697 for (i
= 0; i
< BITS_TO_LONGS(bitmap_size
); i
++) {
700 for (j
= 0; j
< sizeof(m
)/sizeof(um
); j
++) {
702 * We dont want to read past the end of the userspace
703 * bitmap. We must however ensure the end of the
704 * kernel bitmap is zeroed.
706 if (nr_compat_longs
-- > 0) {
707 if (__get_user(um
, umask
))
714 m
|= (long)um
<< (j
* BITS_PER_COMPAT_LONG
);
722 long compat_put_bitmap(compat_ulong_t __user
*umask
, unsigned long *mask
,
723 unsigned long bitmap_size
)
728 unsigned long nr_compat_longs
;
730 /* align bitmap up to nearest compat_long_t boundary */
731 bitmap_size
= ALIGN(bitmap_size
, BITS_PER_COMPAT_LONG
);
733 if (!access_ok(VERIFY_WRITE
, umask
, bitmap_size
/ 8))
736 nr_compat_longs
= BITS_TO_COMPAT_LONGS(bitmap_size
);
738 for (i
= 0; i
< BITS_TO_LONGS(bitmap_size
); i
++) {
741 for (j
= 0; j
< sizeof(m
)/sizeof(um
); j
++) {
745 * We dont want to write past the end of the userspace
748 if (nr_compat_longs
-- > 0) {
749 if (__put_user(um
, umask
))
763 sigset_from_compat (sigset_t
*set
, compat_sigset_t
*compat
)
765 switch (_NSIG_WORDS
) {
766 case 4: set
->sig
[3] = compat
->sig
[6] | (((long)compat
->sig
[7]) << 32 );
767 case 3: set
->sig
[2] = compat
->sig
[4] | (((long)compat
->sig
[5]) << 32 );
768 case 2: set
->sig
[1] = compat
->sig
[2] | (((long)compat
->sig
[3]) << 32 );
769 case 1: set
->sig
[0] = compat
->sig
[0] | (((long)compat
->sig
[1]) << 32 );
774 compat_sys_rt_sigtimedwait (compat_sigset_t __user
*uthese
,
775 struct compat_siginfo __user
*uinfo
,
776 struct compat_timespec __user
*uts
, compat_size_t sigsetsize
)
783 long ret
, timeout
= 0;
785 if (sigsetsize
!= sizeof(sigset_t
))
788 if (copy_from_user(&s32
, uthese
, sizeof(compat_sigset_t
)))
790 sigset_from_compat(&s
, &s32
);
791 sigdelsetmask(&s
,sigmask(SIGKILL
)|sigmask(SIGSTOP
));
795 if (get_compat_timespec (&t
, uts
))
797 if (t
.tv_nsec
>= 1000000000L || t
.tv_nsec
< 0
802 spin_lock_irq(¤t
->sighand
->siglock
);
803 sig
= dequeue_signal(current
, &s
, &info
);
805 timeout
= MAX_SCHEDULE_TIMEOUT
;
807 timeout
= timespec_to_jiffies(&t
)
808 +(t
.tv_sec
|| t
.tv_nsec
);
810 current
->real_blocked
= current
->blocked
;
811 sigandsets(¤t
->blocked
, ¤t
->blocked
, &s
);
814 spin_unlock_irq(¤t
->sighand
->siglock
);
816 timeout
= schedule_timeout_interruptible(timeout
);
818 spin_lock_irq(¤t
->sighand
->siglock
);
819 sig
= dequeue_signal(current
, &s
, &info
);
820 current
->blocked
= current
->real_blocked
;
821 siginitset(¤t
->real_blocked
, 0);
825 spin_unlock_irq(¤t
->sighand
->siglock
);
830 if (copy_siginfo_to_user32(uinfo
, &info
))
834 ret
= timeout
?-EINTR
:-EAGAIN
;
840 #ifdef __ARCH_WANT_COMPAT_SYS_TIME
842 /* compat_time_t is a 32 bit "long" and needs to get converted. */
844 asmlinkage
long compat_sys_time(compat_time_t __user
* tloc
)
849 do_gettimeofday(&tv
);
853 if (put_user(i
,tloc
))
859 asmlinkage
long compat_sys_stime(compat_time_t __user
*tptr
)
864 if (get_user(tv
.tv_sec
, tptr
))
869 err
= security_settime(&tv
, NULL
);
873 do_settimeofday(&tv
);
877 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
879 #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
880 asmlinkage
long compat_sys_rt_sigsuspend(compat_sigset_t __user
*unewset
, compat_size_t sigsetsize
)
883 compat_sigset_t newset32
;
885 /* XXX: Don't preclude handling different sized sigset_t's. */
886 if (sigsetsize
!= sizeof(sigset_t
))
889 if (copy_from_user(&newset32
, unewset
, sizeof(compat_sigset_t
)))
891 sigset_from_compat(&newset
, &newset32
);
892 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
894 spin_lock_irq(¤t
->sighand
->siglock
);
895 current
->saved_sigmask
= current
->blocked
;
896 current
->blocked
= newset
;
898 spin_unlock_irq(¤t
->sighand
->siglock
);
900 current
->state
= TASK_INTERRUPTIBLE
;
902 set_thread_flag(TIF_RESTORE_SIGMASK
);
903 return -ERESTARTNOHAND
;
905 #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
907 asmlinkage
long compat_sys_adjtimex(struct compat_timex __user
*utp
)
912 memset(&txc
, 0, sizeof(struct timex
));
914 if (!access_ok(VERIFY_READ
, utp
, sizeof(struct compat_timex
)) ||
915 __get_user(txc
.modes
, &utp
->modes
) ||
916 __get_user(txc
.offset
, &utp
->offset
) ||
917 __get_user(txc
.freq
, &utp
->freq
) ||
918 __get_user(txc
.maxerror
, &utp
->maxerror
) ||
919 __get_user(txc
.esterror
, &utp
->esterror
) ||
920 __get_user(txc
.status
, &utp
->status
) ||
921 __get_user(txc
.constant
, &utp
->constant
) ||
922 __get_user(txc
.precision
, &utp
->precision
) ||
923 __get_user(txc
.tolerance
, &utp
->tolerance
) ||
924 __get_user(txc
.time
.tv_sec
, &utp
->time
.tv_sec
) ||
925 __get_user(txc
.time
.tv_usec
, &utp
->time
.tv_usec
) ||
926 __get_user(txc
.tick
, &utp
->tick
) ||
927 __get_user(txc
.ppsfreq
, &utp
->ppsfreq
) ||
928 __get_user(txc
.jitter
, &utp
->jitter
) ||
929 __get_user(txc
.shift
, &utp
->shift
) ||
930 __get_user(txc
.stabil
, &utp
->stabil
) ||
931 __get_user(txc
.jitcnt
, &utp
->jitcnt
) ||
932 __get_user(txc
.calcnt
, &utp
->calcnt
) ||
933 __get_user(txc
.errcnt
, &utp
->errcnt
) ||
934 __get_user(txc
.stbcnt
, &utp
->stbcnt
))
937 ret
= do_adjtimex(&txc
);
939 if (!access_ok(VERIFY_WRITE
, utp
, sizeof(struct compat_timex
)) ||
940 __put_user(txc
.modes
, &utp
->modes
) ||
941 __put_user(txc
.offset
, &utp
->offset
) ||
942 __put_user(txc
.freq
, &utp
->freq
) ||
943 __put_user(txc
.maxerror
, &utp
->maxerror
) ||
944 __put_user(txc
.esterror
, &utp
->esterror
) ||
945 __put_user(txc
.status
, &utp
->status
) ||
946 __put_user(txc
.constant
, &utp
->constant
) ||
947 __put_user(txc
.precision
, &utp
->precision
) ||
948 __put_user(txc
.tolerance
, &utp
->tolerance
) ||
949 __put_user(txc
.time
.tv_sec
, &utp
->time
.tv_sec
) ||
950 __put_user(txc
.time
.tv_usec
, &utp
->time
.tv_usec
) ||
951 __put_user(txc
.tick
, &utp
->tick
) ||
952 __put_user(txc
.ppsfreq
, &utp
->ppsfreq
) ||
953 __put_user(txc
.jitter
, &utp
->jitter
) ||
954 __put_user(txc
.shift
, &utp
->shift
) ||
955 __put_user(txc
.stabil
, &utp
->stabil
) ||
956 __put_user(txc
.jitcnt
, &utp
->jitcnt
) ||
957 __put_user(txc
.calcnt
, &utp
->calcnt
) ||
958 __put_user(txc
.errcnt
, &utp
->errcnt
) ||
959 __put_user(txc
.stbcnt
, &utp
->stbcnt
))
966 asmlinkage
long compat_sys_move_pages(pid_t pid
, unsigned long nr_pages
,
967 compat_uptr_t __user
*pages32
,
968 const int __user
*nodes
,
972 const void __user
* __user
*pages
;
975 pages
= compat_alloc_user_space(nr_pages
* sizeof(void *));
976 for (i
= 0; i
< nr_pages
; i
++) {
979 if (get_user(p
, pages32
+ i
) ||
980 put_user(compat_ptr(p
), pages
+ i
))
983 return sys_move_pages(pid
, nr_pages
, pages
, nodes
, status
, flags
);