2 * linux/kernel/compat.c
4 * Kernel compatibililty routines for e.g. 32 bit syscall support
7 * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/linkage.h>
15 #include <linux/compat.h>
16 #include <linux/errno.h>
17 #include <linux/time.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */
20 #include <linux/syscalls.h>
21 #include <linux/unistd.h>
22 #include <linux/security.h>
23 #include <linux/timex.h>
24 #include <linux/migrate.h>
25 #include <linux/posix-timers.h>
27 #include <asm/uaccess.h>
30 * Note that the native side is already converted to a timespec, because
31 * that's what we want anyway.
33 static int compat_get_timeval(struct timespec
*o
,
34 struct compat_timeval __user
*i
)
38 if (get_user(o
->tv_sec
, &i
->tv_sec
) ||
39 get_user(usec
, &i
->tv_usec
))
41 o
->tv_nsec
= usec
* 1000;
45 static int compat_put_timeval(struct compat_timeval __user
*o
,
48 return (put_user(i
->tv_sec
, &o
->tv_sec
) ||
49 put_user(i
->tv_usec
, &o
->tv_usec
)) ? -EFAULT
: 0;
52 asmlinkage
long compat_sys_gettimeofday(struct compat_timeval __user
*tv
,
53 struct timezone __user
*tz
)
57 do_gettimeofday(&ktv
);
58 if (compat_put_timeval(tv
, &ktv
))
62 if (copy_to_user(tz
, &sys_tz
, sizeof(sys_tz
)))
69 asmlinkage
long compat_sys_settimeofday(struct compat_timeval __user
*tv
,
70 struct timezone __user
*tz
)
76 if (compat_get_timeval(&kts
, tv
))
80 if (copy_from_user(&ktz
, tz
, sizeof(ktz
)))
84 return do_sys_settimeofday(tv
? &kts
: NULL
, tz
? &ktz
: NULL
);
87 int get_compat_timespec(struct timespec
*ts
, const struct compat_timespec __user
*cts
)
89 return (!access_ok(VERIFY_READ
, cts
, sizeof(*cts
)) ||
90 __get_user(ts
->tv_sec
, &cts
->tv_sec
) ||
91 __get_user(ts
->tv_nsec
, &cts
->tv_nsec
)) ? -EFAULT
: 0;
94 int put_compat_timespec(const struct timespec
*ts
, struct compat_timespec __user
*cts
)
96 return (!access_ok(VERIFY_WRITE
, cts
, sizeof(*cts
)) ||
97 __put_user(ts
->tv_sec
, &cts
->tv_sec
) ||
98 __put_user(ts
->tv_nsec
, &cts
->tv_nsec
)) ? -EFAULT
: 0;
101 static long compat_nanosleep_restart(struct restart_block
*restart
)
103 struct compat_timespec __user
*rmtp
;
108 restart
->nanosleep
.rmtp
= (struct timespec __user
*) &rmt
;
111 ret
= hrtimer_nanosleep_restart(restart
);
115 rmtp
= restart
->nanosleep
.compat_rmtp
;
117 if (rmtp
&& put_compat_timespec(&rmt
, rmtp
))
124 asmlinkage
long compat_sys_nanosleep(struct compat_timespec __user
*rqtp
,
125 struct compat_timespec __user
*rmtp
)
127 struct timespec tu
, rmt
;
131 if (get_compat_timespec(&tu
, rqtp
))
134 if (!timespec_valid(&tu
))
139 ret
= hrtimer_nanosleep(&tu
,
140 rmtp
? (struct timespec __user
*)&rmt
: NULL
,
141 HRTIMER_MODE_REL
, CLOCK_MONOTONIC
);
145 struct restart_block
*restart
146 = ¤t_thread_info()->restart_block
;
148 restart
->fn
= compat_nanosleep_restart
;
149 restart
->nanosleep
.compat_rmtp
= rmtp
;
151 if (rmtp
&& put_compat_timespec(&rmt
, rmtp
))
158 static inline long get_compat_itimerval(struct itimerval
*o
,
159 struct compat_itimerval __user
*i
)
161 return (!access_ok(VERIFY_READ
, i
, sizeof(*i
)) ||
162 (__get_user(o
->it_interval
.tv_sec
, &i
->it_interval
.tv_sec
) |
163 __get_user(o
->it_interval
.tv_usec
, &i
->it_interval
.tv_usec
) |
164 __get_user(o
->it_value
.tv_sec
, &i
->it_value
.tv_sec
) |
165 __get_user(o
->it_value
.tv_usec
, &i
->it_value
.tv_usec
)));
168 static inline long put_compat_itimerval(struct compat_itimerval __user
*o
,
171 return (!access_ok(VERIFY_WRITE
, o
, sizeof(*o
)) ||
172 (__put_user(i
->it_interval
.tv_sec
, &o
->it_interval
.tv_sec
) |
173 __put_user(i
->it_interval
.tv_usec
, &o
->it_interval
.tv_usec
) |
174 __put_user(i
->it_value
.tv_sec
, &o
->it_value
.tv_sec
) |
175 __put_user(i
->it_value
.tv_usec
, &o
->it_value
.tv_usec
)));
178 asmlinkage
long compat_sys_getitimer(int which
,
179 struct compat_itimerval __user
*it
)
181 struct itimerval kit
;
184 error
= do_getitimer(which
, &kit
);
185 if (!error
&& put_compat_itimerval(it
, &kit
))
190 asmlinkage
long compat_sys_setitimer(int which
,
191 struct compat_itimerval __user
*in
,
192 struct compat_itimerval __user
*out
)
194 struct itimerval kin
, kout
;
198 if (get_compat_itimerval(&kin
, in
))
201 memset(&kin
, 0, sizeof(kin
));
203 error
= do_setitimer(which
, &kin
, out
? &kout
: NULL
);
206 if (put_compat_itimerval(out
, &kout
))
211 asmlinkage
long compat_sys_times(struct compat_tms __user
*tbuf
)
214 * In the SMP world we might just be unlucky and have one of
215 * the times increment as we use it. Since the value is an
216 * atomically safe type this is just fine. Conceptually its
217 * as if the syscall took an instant longer to occur.
220 struct compat_tms tmp
;
221 struct task_struct
*tsk
= current
;
222 struct task_struct
*t
;
223 cputime_t utime
, stime
, cutime
, cstime
;
225 read_lock(&tasklist_lock
);
226 utime
= tsk
->signal
->utime
;
227 stime
= tsk
->signal
->stime
;
230 utime
= cputime_add(utime
, t
->utime
);
231 stime
= cputime_add(stime
, t
->stime
);
236 * While we have tasklist_lock read-locked, no dying thread
237 * can be updating current->signal->[us]time. Instead,
238 * we got their counts included in the live thread loop.
239 * However, another thread can come in right now and
240 * do a wait call that updates current->signal->c[us]time.
241 * To make sure we always see that pair updated atomically,
242 * we take the siglock around fetching them.
244 spin_lock_irq(&tsk
->sighand
->siglock
);
245 cutime
= tsk
->signal
->cutime
;
246 cstime
= tsk
->signal
->cstime
;
247 spin_unlock_irq(&tsk
->sighand
->siglock
);
248 read_unlock(&tasklist_lock
);
250 tmp
.tms_utime
= compat_jiffies_to_clock_t(cputime_to_jiffies(utime
));
251 tmp
.tms_stime
= compat_jiffies_to_clock_t(cputime_to_jiffies(stime
));
252 tmp
.tms_cutime
= compat_jiffies_to_clock_t(cputime_to_jiffies(cutime
));
253 tmp
.tms_cstime
= compat_jiffies_to_clock_t(cputime_to_jiffies(cstime
));
254 if (copy_to_user(tbuf
, &tmp
, sizeof(tmp
)))
257 return compat_jiffies_to_clock_t(jiffies
);
261 * Assumption: old_sigset_t and compat_old_sigset_t are both
262 * types that can be passed to put_user()/get_user().
265 asmlinkage
long compat_sys_sigpending(compat_old_sigset_t __user
*set
)
269 mm_segment_t old_fs
= get_fs();
272 ret
= sys_sigpending((old_sigset_t __user
*) &s
);
275 ret
= put_user(s
, set
);
279 asmlinkage
long compat_sys_sigprocmask(int how
, compat_old_sigset_t __user
*set
,
280 compat_old_sigset_t __user
*oset
)
286 if (set
&& get_user(s
, set
))
290 ret
= sys_sigprocmask(how
,
291 set
? (old_sigset_t __user
*) &s
: NULL
,
292 oset
? (old_sigset_t __user
*) &s
: NULL
);
296 ret
= put_user(s
, oset
);
300 asmlinkage
long compat_sys_setrlimit(unsigned int resource
,
301 struct compat_rlimit __user
*rlim
)
305 mm_segment_t old_fs
= get_fs ();
307 if (resource
>= RLIM_NLIMITS
)
310 if (!access_ok(VERIFY_READ
, rlim
, sizeof(*rlim
)) ||
311 __get_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
312 __get_user(r
.rlim_max
, &rlim
->rlim_max
))
315 if (r
.rlim_cur
== COMPAT_RLIM_INFINITY
)
316 r
.rlim_cur
= RLIM_INFINITY
;
317 if (r
.rlim_max
== COMPAT_RLIM_INFINITY
)
318 r
.rlim_max
= RLIM_INFINITY
;
320 ret
= sys_setrlimit(resource
, (struct rlimit __user
*) &r
);
325 #ifdef COMPAT_RLIM_OLD_INFINITY
327 asmlinkage
long compat_sys_old_getrlimit(unsigned int resource
,
328 struct compat_rlimit __user
*rlim
)
332 mm_segment_t old_fs
= get_fs();
335 ret
= sys_old_getrlimit(resource
, &r
);
339 if (r
.rlim_cur
> COMPAT_RLIM_OLD_INFINITY
)
340 r
.rlim_cur
= COMPAT_RLIM_INFINITY
;
341 if (r
.rlim_max
> COMPAT_RLIM_OLD_INFINITY
)
342 r
.rlim_max
= COMPAT_RLIM_INFINITY
;
344 if (!access_ok(VERIFY_WRITE
, rlim
, sizeof(*rlim
)) ||
345 __put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
346 __put_user(r
.rlim_max
, &rlim
->rlim_max
))
354 asmlinkage
long compat_sys_getrlimit (unsigned int resource
,
355 struct compat_rlimit __user
*rlim
)
359 mm_segment_t old_fs
= get_fs();
362 ret
= sys_getrlimit(resource
, (struct rlimit __user
*) &r
);
365 if (r
.rlim_cur
> COMPAT_RLIM_INFINITY
)
366 r
.rlim_cur
= COMPAT_RLIM_INFINITY
;
367 if (r
.rlim_max
> COMPAT_RLIM_INFINITY
)
368 r
.rlim_max
= COMPAT_RLIM_INFINITY
;
370 if (!access_ok(VERIFY_WRITE
, rlim
, sizeof(*rlim
)) ||
371 __put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
372 __put_user(r
.rlim_max
, &rlim
->rlim_max
))
378 int put_compat_rusage(const struct rusage
*r
, struct compat_rusage __user
*ru
)
380 if (!access_ok(VERIFY_WRITE
, ru
, sizeof(*ru
)) ||
381 __put_user(r
->ru_utime
.tv_sec
, &ru
->ru_utime
.tv_sec
) ||
382 __put_user(r
->ru_utime
.tv_usec
, &ru
->ru_utime
.tv_usec
) ||
383 __put_user(r
->ru_stime
.tv_sec
, &ru
->ru_stime
.tv_sec
) ||
384 __put_user(r
->ru_stime
.tv_usec
, &ru
->ru_stime
.tv_usec
) ||
385 __put_user(r
->ru_maxrss
, &ru
->ru_maxrss
) ||
386 __put_user(r
->ru_ixrss
, &ru
->ru_ixrss
) ||
387 __put_user(r
->ru_idrss
, &ru
->ru_idrss
) ||
388 __put_user(r
->ru_isrss
, &ru
->ru_isrss
) ||
389 __put_user(r
->ru_minflt
, &ru
->ru_minflt
) ||
390 __put_user(r
->ru_majflt
, &ru
->ru_majflt
) ||
391 __put_user(r
->ru_nswap
, &ru
->ru_nswap
) ||
392 __put_user(r
->ru_inblock
, &ru
->ru_inblock
) ||
393 __put_user(r
->ru_oublock
, &ru
->ru_oublock
) ||
394 __put_user(r
->ru_msgsnd
, &ru
->ru_msgsnd
) ||
395 __put_user(r
->ru_msgrcv
, &ru
->ru_msgrcv
) ||
396 __put_user(r
->ru_nsignals
, &ru
->ru_nsignals
) ||
397 __put_user(r
->ru_nvcsw
, &ru
->ru_nvcsw
) ||
398 __put_user(r
->ru_nivcsw
, &ru
->ru_nivcsw
))
403 asmlinkage
long compat_sys_getrusage(int who
, struct compat_rusage __user
*ru
)
407 mm_segment_t old_fs
= get_fs();
410 ret
= sys_getrusage(who
, (struct rusage __user
*) &r
);
416 if (put_compat_rusage(&r
, ru
))
423 compat_sys_wait4(compat_pid_t pid
, compat_uint_t __user
*stat_addr
, int options
,
424 struct compat_rusage __user
*ru
)
427 return sys_wait4(pid
, stat_addr
, options
, NULL
);
432 mm_segment_t old_fs
= get_fs();
437 (unsigned int __user
*) &status
: NULL
),
438 options
, (struct rusage __user
*) &r
);
442 if (put_compat_rusage(&r
, ru
))
444 if (stat_addr
&& put_user(status
, stat_addr
))
451 asmlinkage
long compat_sys_waitid(int which
, compat_pid_t pid
,
452 struct compat_siginfo __user
*uinfo
, int options
,
453 struct compat_rusage __user
*uru
)
458 mm_segment_t old_fs
= get_fs();
460 memset(&info
, 0, sizeof(info
));
463 ret
= sys_waitid(which
, pid
, (siginfo_t __user
*)&info
, options
,
464 uru
? (struct rusage __user
*)&ru
: NULL
);
467 if ((ret
< 0) || (info
.si_signo
== 0))
471 ret
= put_compat_rusage(&ru
, uru
);
476 BUG_ON(info
.si_code
& __SI_MASK
);
477 info
.si_code
|= __SI_CHLD
;
478 return copy_siginfo_to_user32(uinfo
, &info
);
481 static int compat_get_user_cpu_mask(compat_ulong_t __user
*user_mask_ptr
,
482 unsigned len
, cpumask_t
*new_mask
)
486 if (len
< sizeof(cpumask_t
))
487 memset(new_mask
, 0, sizeof(cpumask_t
));
488 else if (len
> sizeof(cpumask_t
))
489 len
= sizeof(cpumask_t
);
491 k
= cpus_addr(*new_mask
);
492 return compat_get_bitmap(k
, user_mask_ptr
, len
* 8);
495 asmlinkage
long compat_sys_sched_setaffinity(compat_pid_t pid
,
497 compat_ulong_t __user
*user_mask_ptr
)
502 retval
= compat_get_user_cpu_mask(user_mask_ptr
, len
, &new_mask
);
506 return sched_setaffinity(pid
, &new_mask
);
509 asmlinkage
long compat_sys_sched_getaffinity(compat_pid_t pid
, unsigned int len
,
510 compat_ulong_t __user
*user_mask_ptr
)
515 unsigned int min_length
= sizeof(cpumask_t
);
517 if (NR_CPUS
<= BITS_PER_COMPAT_LONG
)
518 min_length
= sizeof(compat_ulong_t
);
520 if (len
< min_length
)
523 ret
= sched_getaffinity(pid
, &mask
);
528 ret
= compat_put_bitmap(user_mask_ptr
, k
, min_length
* 8);
535 int get_compat_itimerspec(struct itimerspec
*dst
,
536 const struct compat_itimerspec __user
*src
)
538 if (get_compat_timespec(&dst
->it_interval
, &src
->it_interval
) ||
539 get_compat_timespec(&dst
->it_value
, &src
->it_value
))
544 int put_compat_itimerspec(struct compat_itimerspec __user
*dst
,
545 const struct itimerspec
*src
)
547 if (put_compat_timespec(&src
->it_interval
, &dst
->it_interval
) ||
548 put_compat_timespec(&src
->it_value
, &dst
->it_value
))
553 long compat_sys_timer_create(clockid_t which_clock
,
554 struct compat_sigevent __user
*timer_event_spec
,
555 timer_t __user
*created_timer_id
)
557 struct sigevent __user
*event
= NULL
;
559 if (timer_event_spec
) {
560 struct sigevent kevent
;
562 event
= compat_alloc_user_space(sizeof(*event
));
563 if (get_compat_sigevent(&kevent
, timer_event_spec
) ||
564 copy_to_user(event
, &kevent
, sizeof(*event
)))
568 return sys_timer_create(which_clock
, event
, created_timer_id
);
571 long compat_sys_timer_settime(timer_t timer_id
, int flags
,
572 struct compat_itimerspec __user
*new,
573 struct compat_itimerspec __user
*old
)
577 struct itimerspec newts
, oldts
;
581 if (get_compat_itimerspec(&newts
, new))
585 err
= sys_timer_settime(timer_id
, flags
,
586 (struct itimerspec __user
*) &newts
,
587 (struct itimerspec __user
*) &oldts
);
589 if (!err
&& old
&& put_compat_itimerspec(old
, &oldts
))
594 long compat_sys_timer_gettime(timer_t timer_id
,
595 struct compat_itimerspec __user
*setting
)
599 struct itimerspec ts
;
603 err
= sys_timer_gettime(timer_id
,
604 (struct itimerspec __user
*) &ts
);
606 if (!err
&& put_compat_itimerspec(setting
, &ts
))
611 long compat_sys_clock_settime(clockid_t which_clock
,
612 struct compat_timespec __user
*tp
)
618 if (get_compat_timespec(&ts
, tp
))
622 err
= sys_clock_settime(which_clock
,
623 (struct timespec __user
*) &ts
);
628 long compat_sys_clock_gettime(clockid_t which_clock
,
629 struct compat_timespec __user
*tp
)
637 err
= sys_clock_gettime(which_clock
,
638 (struct timespec __user
*) &ts
);
640 if (!err
&& put_compat_timespec(&ts
, tp
))
645 long compat_sys_clock_getres(clockid_t which_clock
,
646 struct compat_timespec __user
*tp
)
654 err
= sys_clock_getres(which_clock
,
655 (struct timespec __user
*) &ts
);
657 if (!err
&& tp
&& put_compat_timespec(&ts
, tp
))
662 static long compat_clock_nanosleep_restart(struct restart_block
*restart
)
667 struct compat_timespec
*rmtp
= restart
->nanosleep
.compat_rmtp
;
669 restart
->nanosleep
.rmtp
= (struct timespec __user
*) &tu
;
672 err
= clock_nanosleep_restart(restart
);
675 if ((err
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
676 put_compat_timespec(&tu
, rmtp
))
679 if (err
== -ERESTART_RESTARTBLOCK
) {
680 restart
->fn
= compat_clock_nanosleep_restart
;
681 restart
->nanosleep
.compat_rmtp
= rmtp
;
686 long compat_sys_clock_nanosleep(clockid_t which_clock
, int flags
,
687 struct compat_timespec __user
*rqtp
,
688 struct compat_timespec __user
*rmtp
)
692 struct timespec in
, out
;
693 struct restart_block
*restart
;
695 if (get_compat_timespec(&in
, rqtp
))
700 err
= sys_clock_nanosleep(which_clock
, flags
,
701 (struct timespec __user
*) &in
,
702 (struct timespec __user
*) &out
);
705 if ((err
== -ERESTART_RESTARTBLOCK
) && rmtp
&&
706 put_compat_timespec(&out
, rmtp
))
709 if (err
== -ERESTART_RESTARTBLOCK
) {
710 restart
= ¤t_thread_info()->restart_block
;
711 restart
->fn
= compat_clock_nanosleep_restart
;
712 restart
->nanosleep
.compat_rmtp
= rmtp
;
718 * We currently only need the following fields from the sigevent
719 * structure: sigev_value, sigev_signo, sig_notify and (sometimes
720 * sigev_notify_thread_id). The others are handled in user mode.
721 * We also assume that copying sigev_value.sival_int is sufficient
722 * to keep all the bits of sigev_value.sival_ptr intact.
724 int get_compat_sigevent(struct sigevent
*event
,
725 const struct compat_sigevent __user
*u_event
)
727 memset(event
, 0, sizeof(*event
));
728 return (!access_ok(VERIFY_READ
, u_event
, sizeof(*u_event
)) ||
729 __get_user(event
->sigev_value
.sival_int
,
730 &u_event
->sigev_value
.sival_int
) ||
731 __get_user(event
->sigev_signo
, &u_event
->sigev_signo
) ||
732 __get_user(event
->sigev_notify
, &u_event
->sigev_notify
) ||
733 __get_user(event
->sigev_notify_thread_id
,
734 &u_event
->sigev_notify_thread_id
))
738 long compat_get_bitmap(unsigned long *mask
, const compat_ulong_t __user
*umask
,
739 unsigned long bitmap_size
)
744 unsigned long nr_compat_longs
;
746 /* align bitmap up to nearest compat_long_t boundary */
747 bitmap_size
= ALIGN(bitmap_size
, BITS_PER_COMPAT_LONG
);
749 if (!access_ok(VERIFY_READ
, umask
, bitmap_size
/ 8))
752 nr_compat_longs
= BITS_TO_COMPAT_LONGS(bitmap_size
);
754 for (i
= 0; i
< BITS_TO_LONGS(bitmap_size
); i
++) {
757 for (j
= 0; j
< sizeof(m
)/sizeof(um
); j
++) {
759 * We dont want to read past the end of the userspace
760 * bitmap. We must however ensure the end of the
761 * kernel bitmap is zeroed.
763 if (nr_compat_longs
-- > 0) {
764 if (__get_user(um
, umask
))
771 m
|= (long)um
<< (j
* BITS_PER_COMPAT_LONG
);
779 long compat_put_bitmap(compat_ulong_t __user
*umask
, unsigned long *mask
,
780 unsigned long bitmap_size
)
785 unsigned long nr_compat_longs
;
787 /* align bitmap up to nearest compat_long_t boundary */
788 bitmap_size
= ALIGN(bitmap_size
, BITS_PER_COMPAT_LONG
);
790 if (!access_ok(VERIFY_WRITE
, umask
, bitmap_size
/ 8))
793 nr_compat_longs
= BITS_TO_COMPAT_LONGS(bitmap_size
);
795 for (i
= 0; i
< BITS_TO_LONGS(bitmap_size
); i
++) {
798 for (j
= 0; j
< sizeof(m
)/sizeof(um
); j
++) {
802 * We dont want to write past the end of the userspace
805 if (nr_compat_longs
-- > 0) {
806 if (__put_user(um
, umask
))
820 sigset_from_compat (sigset_t
*set
, compat_sigset_t
*compat
)
822 switch (_NSIG_WORDS
) {
823 case 4: set
->sig
[3] = compat
->sig
[6] | (((long)compat
->sig
[7]) << 32 );
824 case 3: set
->sig
[2] = compat
->sig
[4] | (((long)compat
->sig
[5]) << 32 );
825 case 2: set
->sig
[1] = compat
->sig
[2] | (((long)compat
->sig
[3]) << 32 );
826 case 1: set
->sig
[0] = compat
->sig
[0] | (((long)compat
->sig
[1]) << 32 );
831 compat_sys_rt_sigtimedwait (compat_sigset_t __user
*uthese
,
832 struct compat_siginfo __user
*uinfo
,
833 struct compat_timespec __user
*uts
, compat_size_t sigsetsize
)
840 long ret
, timeout
= 0;
842 if (sigsetsize
!= sizeof(sigset_t
))
845 if (copy_from_user(&s32
, uthese
, sizeof(compat_sigset_t
)))
847 sigset_from_compat(&s
, &s32
);
848 sigdelsetmask(&s
,sigmask(SIGKILL
)|sigmask(SIGSTOP
));
852 if (get_compat_timespec (&t
, uts
))
854 if (t
.tv_nsec
>= 1000000000L || t
.tv_nsec
< 0
859 spin_lock_irq(¤t
->sighand
->siglock
);
860 sig
= dequeue_signal(current
, &s
, &info
);
862 timeout
= MAX_SCHEDULE_TIMEOUT
;
864 timeout
= timespec_to_jiffies(&t
)
865 +(t
.tv_sec
|| t
.tv_nsec
);
867 current
->real_blocked
= current
->blocked
;
868 sigandsets(¤t
->blocked
, ¤t
->blocked
, &s
);
871 spin_unlock_irq(¤t
->sighand
->siglock
);
873 timeout
= schedule_timeout_interruptible(timeout
);
875 spin_lock_irq(¤t
->sighand
->siglock
);
876 sig
= dequeue_signal(current
, &s
, &info
);
877 current
->blocked
= current
->real_blocked
;
878 siginitset(¤t
->real_blocked
, 0);
882 spin_unlock_irq(¤t
->sighand
->siglock
);
887 if (copy_siginfo_to_user32(uinfo
, &info
))
891 ret
= timeout
?-EINTR
:-EAGAIN
;
897 #ifdef __ARCH_WANT_COMPAT_SYS_TIME
899 /* compat_time_t is a 32 bit "long" and needs to get converted. */
901 asmlinkage
long compat_sys_time(compat_time_t __user
* tloc
)
906 do_gettimeofday(&tv
);
910 if (put_user(i
,tloc
))
916 asmlinkage
long compat_sys_stime(compat_time_t __user
*tptr
)
921 if (get_user(tv
.tv_sec
, tptr
))
926 err
= security_settime(&tv
, NULL
);
930 do_settimeofday(&tv
);
934 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
936 #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
937 asmlinkage
long compat_sys_rt_sigsuspend(compat_sigset_t __user
*unewset
, compat_size_t sigsetsize
)
940 compat_sigset_t newset32
;
942 /* XXX: Don't preclude handling different sized sigset_t's. */
943 if (sigsetsize
!= sizeof(sigset_t
))
946 if (copy_from_user(&newset32
, unewset
, sizeof(compat_sigset_t
)))
948 sigset_from_compat(&newset
, &newset32
);
949 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
951 spin_lock_irq(¤t
->sighand
->siglock
);
952 current
->saved_sigmask
= current
->blocked
;
953 current
->blocked
= newset
;
955 spin_unlock_irq(¤t
->sighand
->siglock
);
957 current
->state
= TASK_INTERRUPTIBLE
;
959 set_restore_sigmask();
960 return -ERESTARTNOHAND
;
962 #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
964 asmlinkage
long compat_sys_adjtimex(struct compat_timex __user
*utp
)
969 memset(&txc
, 0, sizeof(struct timex
));
971 if (!access_ok(VERIFY_READ
, utp
, sizeof(struct compat_timex
)) ||
972 __get_user(txc
.modes
, &utp
->modes
) ||
973 __get_user(txc
.offset
, &utp
->offset
) ||
974 __get_user(txc
.freq
, &utp
->freq
) ||
975 __get_user(txc
.maxerror
, &utp
->maxerror
) ||
976 __get_user(txc
.esterror
, &utp
->esterror
) ||
977 __get_user(txc
.status
, &utp
->status
) ||
978 __get_user(txc
.constant
, &utp
->constant
) ||
979 __get_user(txc
.precision
, &utp
->precision
) ||
980 __get_user(txc
.tolerance
, &utp
->tolerance
) ||
981 __get_user(txc
.time
.tv_sec
, &utp
->time
.tv_sec
) ||
982 __get_user(txc
.time
.tv_usec
, &utp
->time
.tv_usec
) ||
983 __get_user(txc
.tick
, &utp
->tick
) ||
984 __get_user(txc
.ppsfreq
, &utp
->ppsfreq
) ||
985 __get_user(txc
.jitter
, &utp
->jitter
) ||
986 __get_user(txc
.shift
, &utp
->shift
) ||
987 __get_user(txc
.stabil
, &utp
->stabil
) ||
988 __get_user(txc
.jitcnt
, &utp
->jitcnt
) ||
989 __get_user(txc
.calcnt
, &utp
->calcnt
) ||
990 __get_user(txc
.errcnt
, &utp
->errcnt
) ||
991 __get_user(txc
.stbcnt
, &utp
->stbcnt
))
994 ret
= do_adjtimex(&txc
);
996 if (!access_ok(VERIFY_WRITE
, utp
, sizeof(struct compat_timex
)) ||
997 __put_user(txc
.modes
, &utp
->modes
) ||
998 __put_user(txc
.offset
, &utp
->offset
) ||
999 __put_user(txc
.freq
, &utp
->freq
) ||
1000 __put_user(txc
.maxerror
, &utp
->maxerror
) ||
1001 __put_user(txc
.esterror
, &utp
->esterror
) ||
1002 __put_user(txc
.status
, &utp
->status
) ||
1003 __put_user(txc
.constant
, &utp
->constant
) ||
1004 __put_user(txc
.precision
, &utp
->precision
) ||
1005 __put_user(txc
.tolerance
, &utp
->tolerance
) ||
1006 __put_user(txc
.time
.tv_sec
, &utp
->time
.tv_sec
) ||
1007 __put_user(txc
.time
.tv_usec
, &utp
->time
.tv_usec
) ||
1008 __put_user(txc
.tick
, &utp
->tick
) ||
1009 __put_user(txc
.ppsfreq
, &utp
->ppsfreq
) ||
1010 __put_user(txc
.jitter
, &utp
->jitter
) ||
1011 __put_user(txc
.shift
, &utp
->shift
) ||
1012 __put_user(txc
.stabil
, &utp
->stabil
) ||
1013 __put_user(txc
.jitcnt
, &utp
->jitcnt
) ||
1014 __put_user(txc
.calcnt
, &utp
->calcnt
) ||
1015 __put_user(txc
.errcnt
, &utp
->errcnt
) ||
1016 __put_user(txc
.stbcnt
, &utp
->stbcnt
) ||
1017 __put_user(txc
.tai
, &utp
->tai
))
1024 asmlinkage
long compat_sys_move_pages(pid_t pid
, unsigned long nr_pages
,
1025 compat_uptr_t __user
*pages32
,
1026 const int __user
*nodes
,
1030 const void __user
* __user
*pages
;
1033 pages
= compat_alloc_user_space(nr_pages
* sizeof(void *));
1034 for (i
= 0; i
< nr_pages
; i
++) {
1037 if (get_user(p
, pages32
+ i
) ||
1038 put_user(compat_ptr(p
), pages
+ i
))
1041 return sys_move_pages(pid
, nr_pages
, pages
, nodes
, status
, flags
);
1044 asmlinkage
long compat_sys_migrate_pages(compat_pid_t pid
,
1045 compat_ulong_t maxnode
,
1046 const compat_ulong_t __user
*old_nodes
,
1047 const compat_ulong_t __user
*new_nodes
)
1049 unsigned long __user
*old
= NULL
;
1050 unsigned long __user
*new = NULL
;
1051 nodemask_t tmp_mask
;
1052 unsigned long nr_bits
;
1055 nr_bits
= min_t(unsigned long, maxnode
- 1, MAX_NUMNODES
);
1056 size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1058 if (compat_get_bitmap(nodes_addr(tmp_mask
), old_nodes
, nr_bits
))
1060 old
= compat_alloc_user_space(new_nodes
? size
* 2 : size
);
1062 new = old
+ size
/ sizeof(unsigned long);
1063 if (copy_to_user(old
, nodes_addr(tmp_mask
), size
))
1067 if (compat_get_bitmap(nodes_addr(tmp_mask
), new_nodes
, nr_bits
))
1070 new = compat_alloc_user_space(size
);
1071 if (copy_to_user(new, nodes_addr(tmp_mask
), size
))
1074 return sys_migrate_pages(pid
, nr_bits
+ 1, old
, new);
1078 struct compat_sysinfo
{
1092 char _f
[20-2*sizeof(u32
)-sizeof(int)];
1096 compat_sys_sysinfo(struct compat_sysinfo __user
*info
)
1102 /* Check to see if any memory value is too large for 32-bit and scale
1105 if ((s
.totalram
>> 32) || (s
.totalswap
>> 32)) {
1108 while (s
.mem_unit
< PAGE_SIZE
) {
1113 s
.totalram
>>= bitcount
;
1114 s
.freeram
>>= bitcount
;
1115 s
.sharedram
>>= bitcount
;
1116 s
.bufferram
>>= bitcount
;
1117 s
.totalswap
>>= bitcount
;
1118 s
.freeswap
>>= bitcount
;
1119 s
.totalhigh
>>= bitcount
;
1120 s
.freehigh
>>= bitcount
;
1123 if (!access_ok(VERIFY_WRITE
, info
, sizeof(struct compat_sysinfo
)) ||
1124 __put_user (s
.uptime
, &info
->uptime
) ||
1125 __put_user (s
.loads
[0], &info
->loads
[0]) ||
1126 __put_user (s
.loads
[1], &info
->loads
[1]) ||
1127 __put_user (s
.loads
[2], &info
->loads
[2]) ||
1128 __put_user (s
.totalram
, &info
->totalram
) ||
1129 __put_user (s
.freeram
, &info
->freeram
) ||
1130 __put_user (s
.sharedram
, &info
->sharedram
) ||
1131 __put_user (s
.bufferram
, &info
->bufferram
) ||
1132 __put_user (s
.totalswap
, &info
->totalswap
) ||
1133 __put_user (s
.freeswap
, &info
->freeswap
) ||
1134 __put_user (s
.procs
, &info
->procs
) ||
1135 __put_user (s
.totalhigh
, &info
->totalhigh
) ||
1136 __put_user (s
.freehigh
, &info
->freehigh
) ||
1137 __put_user (s
.mem_unit
, &info
->mem_unit
))