Merge with Linux 2.3.40.
[linux-2.6/linux-mips.git] / kernel / sys.c
blobcc368de0118c1935a76f6add5e7e047ebbe14b3d
1 /*
2 * linux/kernel/sys.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 #include <linux/mm.h>
8 #include <linux/utsname.h>
9 #include <linux/mman.h>
10 #include <linux/smp_lock.h>
11 #include <linux/notifier.h>
12 #include <linux/reboot.h>
13 #include <linux/prctl.h>
14 #include <linux/init.h>
15 #include <linux/highuid.h>
17 #include <asm/uaccess.h>
18 #include <asm/io.h>
21 * this is where the system-wide overflow UID and GID are defined, for
22 * architectures that now have 32-bit UID/GID but didn't in the past
25 int overflowuid = DEFAULT_OVERFLOWUID;
26 int overflowgid = DEFAULT_OVERFLOWGID;
29 * the same as above, but for filesystems which can only store a 16-bit
30 * UID and GID. as such, this is needed on all architectures
33 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
34 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
37 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
40 int C_A_D = 1;
44 * Notifier list for kernel code which wants to be called
45 * at shutdown. This is used to stop any idling DMA operations
46 * and the like.
49 struct notifier_block *reboot_notifier_list = NULL;
51 int register_reboot_notifier(struct notifier_block * nb)
53 return notifier_chain_register(&reboot_notifier_list, nb);
56 int unregister_reboot_notifier(struct notifier_block * nb)
58 return notifier_chain_unregister(&reboot_notifier_list, nb);
61 asmlinkage long sys_ni_syscall(void)
63 return -ENOSYS;
66 static int proc_sel(struct task_struct *p, int which, int who)
68 if(p->pid)
70 switch (which) {
71 case PRIO_PROCESS:
72 if (!who && p == current)
73 return 1;
74 return(p->pid == who);
75 case PRIO_PGRP:
76 if (!who)
77 who = current->pgrp;
78 return(p->pgrp == who);
79 case PRIO_USER:
80 if (!who)
81 who = current->uid;
82 return(p->uid == who);
85 return 0;
88 asmlinkage long sys_setpriority(int which, int who, int niceval)
90 struct task_struct *p;
91 unsigned int priority;
92 int error;
94 if (which > 2 || which < 0)
95 return -EINVAL;
97 /* normalize: avoid signed division (rounding problems) */
98 error = ESRCH;
99 priority = niceval;
100 if (niceval < 0)
101 priority = -niceval;
102 if (priority > 20)
103 priority = 20;
104 priority = (priority * DEF_PRIORITY + 10) / 20 + DEF_PRIORITY;
106 if (niceval >= 0) {
107 priority = 2*DEF_PRIORITY - priority;
108 if (!priority)
109 priority = 1;
112 read_lock(&tasklist_lock);
113 for_each_task(p) {
114 if (!proc_sel(p, which, who))
115 continue;
116 if (p->uid != current->euid &&
117 p->uid != current->uid && !capable(CAP_SYS_NICE)) {
118 error = EPERM;
119 continue;
121 if (error == ESRCH)
122 error = 0;
123 if (priority > p->priority && !capable(CAP_SYS_NICE))
124 error = EACCES;
125 else
126 p->priority = priority;
128 read_unlock(&tasklist_lock);
130 return -error;
134 * Ugh. To avoid negative return values, "getpriority()" will
135 * not return the normal nice-value, but a value that has been
136 * offset by 20 (ie it returns 0..40 instead of -20..20)
138 asmlinkage long sys_getpriority(int which, int who)
140 struct task_struct *p;
141 long max_prio = -ESRCH;
143 if (which > 2 || which < 0)
144 return -EINVAL;
146 read_lock(&tasklist_lock);
147 for_each_task (p) {
148 if (!proc_sel(p, which, who))
149 continue;
150 if (p->priority > max_prio)
151 max_prio = p->priority;
153 read_unlock(&tasklist_lock);
155 /* scale the priority from timeslice to 0..40 */
156 if (max_prio > 0)
157 max_prio = (max_prio * 20 + DEF_PRIORITY/2) / DEF_PRIORITY;
158 return max_prio;
163 * Reboot system call: for obvious reasons only root may call it,
164 * and even root needs to set up some magic numbers in the registers
165 * so that some mistake won't make this reboot the whole machine.
166 * You can also set the meaning of the ctrl-alt-del-key here.
168 * reboot doesn't sync: do that yourself before calling this.
170 asmlinkage long sys_reboot(int magic1, int magic2, int cmd, void * arg)
172 char buffer[256];
174 /* We only trust the superuser with rebooting the system. */
175 if (!capable(CAP_SYS_BOOT))
176 return -EPERM;
178 /* For safety, we require "magic" arguments. */
179 if (magic1 != LINUX_REBOOT_MAGIC1 ||
180 (magic2 != LINUX_REBOOT_MAGIC2 && magic2 != LINUX_REBOOT_MAGIC2A &&
181 magic2 != LINUX_REBOOT_MAGIC2B))
182 return -EINVAL;
184 lock_kernel();
185 switch (cmd) {
186 case LINUX_REBOOT_CMD_RESTART:
187 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
188 printk(KERN_EMERG "Restarting system.\n");
189 machine_restart(NULL);
190 break;
192 case LINUX_REBOOT_CMD_CAD_ON:
193 C_A_D = 1;
194 break;
196 case LINUX_REBOOT_CMD_CAD_OFF:
197 C_A_D = 0;
198 break;
200 case LINUX_REBOOT_CMD_HALT:
201 notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
202 printk(KERN_EMERG "System halted.\n");
203 machine_halt();
204 do_exit(0);
205 break;
207 case LINUX_REBOOT_CMD_POWER_OFF:
208 notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
209 printk(KERN_EMERG "Power down.\n");
210 machine_power_off();
211 do_exit(0);
212 break;
214 case LINUX_REBOOT_CMD_RESTART2:
215 if (strncpy_from_user(&buffer[0], (char *)arg, sizeof(buffer) - 1) < 0) {
216 unlock_kernel();
217 return -EFAULT;
219 buffer[sizeof(buffer) - 1] = '\0';
221 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, buffer);
222 printk(KERN_EMERG "Restarting system with command '%s'.\n", buffer);
223 machine_restart(buffer);
224 break;
226 default:
227 unlock_kernel();
228 return -EINVAL;
229 break;
231 unlock_kernel();
232 return 0;
236 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
237 * As it's called within an interrupt, it may NOT sync: the only choice
238 * is whether to reboot at once, or just ignore the ctrl-alt-del.
240 void ctrl_alt_del(void)
242 if (C_A_D) {
243 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
244 machine_restart(NULL);
245 } else
246 kill_proc(1, SIGINT, 1);
251 * Unprivileged users may change the real gid to the effective gid
252 * or vice versa. (BSD-style)
254 * If you set the real gid at all, or set the effective gid to a value not
255 * equal to the real gid, then the saved gid is set to the new effective gid.
257 * This makes it possible for a setgid program to completely drop its
258 * privileges, which is often a useful assertion to make when you are doing
259 * a security audit over a program.
261 * The general idea is that a program which uses just setregid() will be
262 * 100% compatible with BSD. A program which uses just setgid() will be
263 * 100% compatible with POSIX with saved IDs.
265 * SMP: There are not races, the GIDs are checked only by filesystem
266 * operations (as far as semantic preservation is concerned).
268 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
270 int old_rgid = current->gid;
271 int old_egid = current->egid;
273 if (rgid != (gid_t) -1) {
274 if ((old_rgid == rgid) ||
275 (current->egid==rgid) ||
276 capable(CAP_SETGID))
277 current->gid = rgid;
278 else
279 return -EPERM;
281 if (egid != (gid_t) -1) {
282 if ((old_rgid == egid) ||
283 (current->egid == egid) ||
284 (current->sgid == egid) ||
285 capable(CAP_SETGID))
286 current->fsgid = current->egid = egid;
287 else {
288 current->gid = old_rgid;
289 return -EPERM;
292 if (rgid != (gid_t) -1 ||
293 (egid != (gid_t) -1 && egid != old_rgid))
294 current->sgid = current->egid;
295 current->fsgid = current->egid;
296 if (current->egid != old_egid)
297 current->dumpable = 0;
298 return 0;
302 * setgid() is implemented like SysV w/ SAVED_IDS
304 * SMP: Same implicit races as above.
306 asmlinkage long sys_setgid(gid_t gid)
308 int old_egid = current->egid;
310 if (capable(CAP_SETGID))
311 current->gid = current->egid = current->sgid = current->fsgid = gid;
312 else if ((gid == current->gid) || (gid == current->sgid))
313 current->egid = current->fsgid = gid;
314 else
315 return -EPERM;
317 if (current->egid != old_egid)
318 current->dumpable = 0;
319 return 0;
323 * cap_emulate_setxuid() fixes the effective / permitted capabilities of
324 * a process after a call to setuid, setreuid, or setresuid.
326 * 1) When set*uiding _from_ one of {r,e,s}uid == 0 _to_ all of
327 * {r,e,s}uid != 0, the permitted and effective capabilities are
328 * cleared.
330 * 2) When set*uiding _from_ euid == 0 _to_ euid != 0, the effective
331 * capabilities of the process are cleared.
333 * 3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective
334 * capabilities are set to the permitted capabilities.
336 * fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should
337 * never happen.
339 * -astor
341 extern inline void cap_emulate_setxuid(int old_ruid, int old_euid,
342 int old_suid)
344 if ((old_ruid == 0 || old_euid == 0 || old_suid == 0) &&
345 (current->uid != 0 && current->euid != 0 && current->suid != 0)) {
346 cap_clear(current->cap_permitted);
347 cap_clear(current->cap_effective);
349 if (old_euid == 0 && current->euid != 0) {
350 cap_clear(current->cap_effective);
352 if (old_euid != 0 && current->euid == 0) {
353 current->cap_effective = current->cap_permitted;
358 * Unprivileged users may change the real uid to the effective uid
359 * or vice versa. (BSD-style)
361 * If you set the real uid at all, or set the effective uid to a value not
362 * equal to the real uid, then the saved uid is set to the new effective uid.
364 * This makes it possible for a setuid program to completely drop its
365 * privileges, which is often a useful assertion to make when you are doing
366 * a security audit over a program.
368 * The general idea is that a program which uses just setreuid() will be
369 * 100% compatible with BSD. A program which uses just setuid() will be
370 * 100% compatible with POSIX with saved IDs.
372 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
374 int old_ruid, old_euid, old_suid, new_ruid;
376 new_ruid = old_ruid = current->uid;
377 old_euid = current->euid;
378 old_suid = current->suid;
379 if (ruid != (uid_t) -1) {
380 if ((old_ruid == ruid) ||
381 (current->euid==ruid) ||
382 capable(CAP_SETUID))
383 new_ruid = ruid;
384 else
385 return -EPERM;
387 if (euid != (uid_t) -1) {
388 if ((old_ruid == euid) ||
389 (current->euid == euid) ||
390 (current->suid == euid) ||
391 capable(CAP_SETUID))
392 current->fsuid = current->euid = euid;
393 else
394 return -EPERM;
396 if (ruid != (uid_t) -1 ||
397 (euid != (uid_t) -1 && euid != old_ruid))
398 current->suid = current->euid;
399 current->fsuid = current->euid;
400 if (current->euid != old_euid)
401 current->dumpable = 0;
403 if(new_ruid != old_ruid) {
404 /* What if a process setreuid()'s and this brings the
405 * new uid over his NPROC rlimit? We can check this now
406 * cheaply with the new uid cache, so if it matters
407 * we should be checking for it. -DaveM
409 free_uid(current);
410 current->uid = new_ruid;
411 alloc_uid(current);
414 if (!issecure(SECURE_NO_SETUID_FIXUP)) {
415 cap_emulate_setxuid(old_ruid, old_euid, old_suid);
418 return 0;
424 * setuid() is implemented like SysV with SAVED_IDS
426 * Note that SAVED_ID's is deficient in that a setuid root program
427 * like sendmail, for example, cannot set its uid to be a normal
428 * user and then switch back, because if you're root, setuid() sets
429 * the saved uid too. If you don't like this, blame the bright people
430 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
431 * will allow a root program to temporarily drop privileges and be able to
432 * regain them by swapping the real and effective uid.
434 asmlinkage long sys_setuid(uid_t uid)
436 int old_euid = current->euid;
437 int old_ruid, old_suid, new_ruid;
439 old_ruid = new_ruid = current->uid;
440 old_suid = current->suid;
441 if (capable(CAP_SETUID))
442 new_ruid = current->euid = current->suid = current->fsuid = uid;
443 else if ((uid == current->uid) || (uid == current->suid))
444 current->fsuid = current->euid = uid;
445 else
446 return -EPERM;
448 if (current->euid != old_euid)
449 current->dumpable = 0;
451 if (new_ruid != old_ruid) {
452 /* See comment above about NPROC rlimit issues... */
453 free_uid(current);
454 current->uid = new_ruid;
455 alloc_uid(current);
458 if (!issecure(SECURE_NO_SETUID_FIXUP)) {
459 cap_emulate_setxuid(old_ruid, old_euid, old_suid);
462 return 0;
467 * This function implements a generic ability to update ruid, euid,
468 * and suid. This allows you to implement the 4.4 compatible seteuid().
470 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
472 int old_ruid = current->uid;
473 int old_euid = current->euid;
474 int old_suid = current->suid;
476 if (!capable(CAP_SETUID)) {
477 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
478 (ruid != current->euid) && (ruid != current->suid))
479 return -EPERM;
480 if ((euid != (uid_t) -1) && (euid != current->uid) &&
481 (euid != current->euid) && (euid != current->suid))
482 return -EPERM;
483 if ((suid != (uid_t) -1) && (suid != current->uid) &&
484 (suid != current->euid) && (suid != current->suid))
485 return -EPERM;
487 if (ruid != (uid_t) -1) {
488 /* See above commentary about NPROC rlimit issues here. */
489 free_uid(current);
490 current->uid = ruid;
491 alloc_uid(current);
493 if (euid != (uid_t) -1) {
494 if (euid != current->euid)
495 current->dumpable = 0;
496 current->euid = euid;
497 current->fsuid = euid;
499 if (suid != (uid_t) -1)
500 current->suid = suid;
502 if (!issecure(SECURE_NO_SETUID_FIXUP)) {
503 cap_emulate_setxuid(old_ruid, old_euid, old_suid);
506 return 0;
509 asmlinkage long sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid)
511 int retval;
513 if (!(retval = put_user(current->uid, ruid)) &&
514 !(retval = put_user(current->euid, euid)))
515 retval = put_user(current->suid, suid);
517 return retval;
521 * Same as above, but for rgid, egid, sgid.
523 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
525 if (!capable(CAP_SETGID)) {
526 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
527 (rgid != current->egid) && (rgid != current->sgid))
528 return -EPERM;
529 if ((egid != (gid_t) -1) && (egid != current->gid) &&
530 (egid != current->egid) && (egid != current->sgid))
531 return -EPERM;
532 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
533 (sgid != current->egid) && (sgid != current->sgid))
534 return -EPERM;
536 if (rgid != (gid_t) -1)
537 current->gid = rgid;
538 if (egid != (gid_t) -1) {
539 if (egid != current->egid)
540 current->dumpable = 0;
541 current->egid = egid;
542 current->fsgid = egid;
544 if (sgid != (gid_t) -1)
545 current->sgid = sgid;
546 return 0;
549 asmlinkage long sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid)
551 int retval;
553 if (!(retval = put_user(current->gid, rgid)) &&
554 !(retval = put_user(current->egid, egid)))
555 retval = put_user(current->sgid, sgid);
557 return retval;
562 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
563 * is used for "access()" and for the NFS daemon (letting nfsd stay at
564 * whatever uid it wants to). It normally shadows "euid", except when
565 * explicitly set by setfsuid() or for access..
567 asmlinkage long sys_setfsuid(uid_t uid)
569 int old_fsuid;
571 old_fsuid = current->fsuid;
572 if (uid == current->uid || uid == current->euid ||
573 uid == current->suid || uid == current->fsuid ||
574 capable(CAP_SETUID))
575 current->fsuid = uid;
576 if (current->fsuid != old_fsuid)
577 current->dumpable = 0;
579 /* We emulate fsuid by essentially doing a scaled-down version
580 * of what we did in setresuid and friends. However, we only
581 * operate on the fs-specific bits of the process' effective
582 * capabilities
584 * FIXME - is fsuser used for all CAP_FS_MASK capabilities?
585 * if not, we might be a bit too harsh here.
588 if (!issecure(SECURE_NO_SETUID_FIXUP)) {
589 if (old_fsuid == 0 && current->fsuid != 0) {
590 cap_t(current->cap_effective) &= ~CAP_FS_MASK;
592 if (old_fsuid != 0 && current->fsuid == 0) {
593 cap_t(current->cap_effective) |=
594 (cap_t(current->cap_permitted) & CAP_FS_MASK);
598 return old_fsuid;
602 * Samma på svenska..
604 asmlinkage long sys_setfsgid(gid_t gid)
606 int old_fsgid;
608 old_fsgid = current->fsgid;
609 if (gid == current->gid || gid == current->egid ||
610 gid == current->sgid || gid == current->fsgid ||
611 capable(CAP_SETGID))
612 current->fsgid = gid;
613 if (current->fsgid != old_fsgid)
614 current->dumpable = 0;
616 return old_fsgid;
619 asmlinkage long sys_times(struct tms * tbuf)
621 struct tms temp;
624 * In the SMP world we might just be unlucky and have one of
625 * the times increment as we use it. Since the value is an
626 * atomically safe type this is just fine. Conceptually its
627 * as if the syscall took an instant longer to occur.
629 if (tbuf) {
630 temp.tms_utime = HZ_TO_STD(current->times.tms_utime);
631 temp.tms_stime = HZ_TO_STD(current->times.tms_stime);
632 temp.tms_cutime = HZ_TO_STD(current->times.tms_cutime);
633 temp.tms_cstime = HZ_TO_STD(current->times.tms_cstime);
634 if (copy_to_user(tbuf, &temp, sizeof(struct tms)))
635 return -EFAULT;
637 return HZ_TO_STD(jiffies);
641 * This needs some heavy checking ...
642 * I just haven't the stomach for it. I also don't fully
643 * understand sessions/pgrp etc. Let somebody who does explain it.
645 * OK, I think I have the protection semantics right.... this is really
646 * only important on a multi-user system anyway, to make sure one user
647 * can't send a signal to a process owned by another. -TYT, 12/12/91
649 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
650 * LBT 04.03.94
653 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
655 struct task_struct * p;
656 int err = -EINVAL;
658 if (!pid)
659 pid = current->pid;
660 if (!pgid)
661 pgid = pid;
662 if (pgid < 0)
663 return -EINVAL;
665 /* From this point forward we keep holding onto the tasklist lock
666 * so that our parent does not change from under us. -DaveM
668 read_lock(&tasklist_lock);
670 err = -ESRCH;
671 p = find_task_by_pid(pid);
672 if (!p)
673 goto out;
675 if (p->p_pptr == current || p->p_opptr == current) {
676 err = -EPERM;
677 if (p->session != current->session)
678 goto out;
679 err = -EACCES;
680 if (p->did_exec)
681 goto out;
682 } else if (p != current)
683 goto out;
684 err = -EPERM;
685 if (p->leader)
686 goto out;
687 if (pgid != pid) {
688 struct task_struct * tmp;
689 for_each_task (tmp) {
690 if (tmp->pgrp == pgid &&
691 tmp->session == current->session)
692 goto ok_pgid;
694 goto out;
697 ok_pgid:
698 p->pgrp = pgid;
699 err = 0;
700 out:
701 /* All paths lead to here, thus we are safe. -DaveM */
702 read_unlock(&tasklist_lock);
703 return err;
706 asmlinkage long sys_getpgid(pid_t pid)
708 if (!pid) {
709 return current->pgrp;
710 } else {
711 int retval;
712 struct task_struct *p;
714 read_lock(&tasklist_lock);
715 p = find_task_by_pid(pid);
717 retval = -ESRCH;
718 if (p)
719 retval = p->pgrp;
720 read_unlock(&tasklist_lock);
721 return retval;
725 asmlinkage long sys_getpgrp(void)
727 /* SMP - assuming writes are word atomic this is fine */
728 return current->pgrp;
731 asmlinkage long sys_getsid(pid_t pid)
733 if (!pid) {
734 return current->session;
735 } else {
736 int retval;
737 struct task_struct *p;
739 read_lock(&tasklist_lock);
740 p = find_task_by_pid(pid);
742 retval = -ESRCH;
743 if(p)
744 retval = p->session;
745 read_unlock(&tasklist_lock);
746 return retval;
750 asmlinkage long sys_setsid(void)
752 struct task_struct * p;
753 int err = -EPERM;
755 read_lock(&tasklist_lock);
756 for_each_task(p) {
757 if (p->pgrp == current->pid)
758 goto out;
761 current->leader = 1;
762 current->session = current->pgrp = current->pid;
763 current->tty = NULL;
764 current->tty_old_pgrp = 0;
765 err = current->pgrp;
766 out:
767 read_unlock(&tasklist_lock);
768 return err;
772 * Supplementary group IDs
774 asmlinkage long sys_getgroups(int gidsetsize, gid_t *grouplist)
776 int i;
779 * SMP: Nobody else can change our grouplist. Thus we are
780 * safe.
783 if (gidsetsize < 0)
784 return -EINVAL;
785 i = current->ngroups;
786 if (gidsetsize) {
787 if (i > gidsetsize)
788 return -EINVAL;
789 if (copy_to_user(grouplist, current->groups, sizeof(gid_t)*i))
790 return -EFAULT;
792 return i;
796 * SMP: Our groups are not shared. We can copy to/from them safely
797 * without another task interfering.
800 asmlinkage long sys_setgroups(int gidsetsize, gid_t *grouplist)
802 if (!capable(CAP_SETGID))
803 return -EPERM;
804 if ((unsigned) gidsetsize > NGROUPS)
805 return -EINVAL;
806 if(copy_from_user(current->groups, grouplist, gidsetsize * sizeof(gid_t)))
807 return -EFAULT;
808 current->ngroups = gidsetsize;
809 return 0;
812 int in_group_p(gid_t grp)
814 if (grp != current->fsgid) {
815 int i = current->ngroups;
816 if (i) {
817 gid_t *groups = current->groups;
818 do {
819 if (*groups == grp)
820 goto out;
821 groups++;
822 i--;
823 } while (i);
825 return 0;
827 out:
828 return 1;
832 * This should really be a blocking read-write lock
833 * rather than a semaphore. Anybody want to implement
834 * one?
836 DECLARE_MUTEX(uts_sem);
838 asmlinkage long sys_newuname(struct new_utsname * name)
840 int errno = 0;
842 down(&uts_sem);
843 if (copy_to_user(name,&system_utsname,sizeof *name))
844 errno = -EFAULT;
845 up(&uts_sem);
846 return errno;
849 asmlinkage long sys_sethostname(char *name, int len)
851 int errno;
853 if (!capable(CAP_SYS_ADMIN))
854 return -EPERM;
855 if (len < 0 || len > __NEW_UTS_LEN)
856 return -EINVAL;
857 down(&uts_sem);
858 errno = -EFAULT;
859 if (!copy_from_user(system_utsname.nodename, name, len)) {
860 system_utsname.nodename[len] = 0;
861 errno = 0;
863 up(&uts_sem);
864 return errno;
867 asmlinkage long sys_gethostname(char *name, int len)
869 int i, errno;
871 if (len < 0)
872 return -EINVAL;
873 down(&uts_sem);
874 i = 1 + strlen(system_utsname.nodename);
875 if (i > len)
876 i = len;
877 errno = 0;
878 if (copy_to_user(name, system_utsname.nodename, i))
879 errno = -EFAULT;
880 up(&uts_sem);
881 return errno;
885 * Only setdomainname; getdomainname can be implemented by calling
886 * uname()
888 asmlinkage long sys_setdomainname(char *name, int len)
890 int errno;
892 if (!capable(CAP_SYS_ADMIN))
893 return -EPERM;
894 if (len < 0 || len > __NEW_UTS_LEN)
895 return -EINVAL;
897 down(&uts_sem);
898 errno = -EFAULT;
899 if (!copy_from_user(system_utsname.domainname, name, len)) {
900 errno = 0;
901 system_utsname.domainname[len] = 0;
903 up(&uts_sem);
904 return errno;
907 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit *rlim)
909 if (resource >= RLIM_NLIMITS)
910 return -EINVAL;
911 else
912 return copy_to_user(rlim, current->rlim + resource, sizeof(*rlim))
913 ? -EFAULT : 0;
917 * Back compatibility for getrlimit. Needed for some apps.
920 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit *rlim)
922 struct rlimit x;
923 if (resource >= RLIM_NLIMITS)
924 return -EINVAL;
926 memcpy(&x, current->rlim + resource, sizeof(*rlim));
927 if(x.rlim_cur > 0x7FFFFFFF)
928 x.rlim_cur = 0x7FFFFFFF;
929 if(x.rlim_max > 0x7FFFFFFF)
930 x.rlim_max = 0x7FFFFFFF;
931 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
935 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit *rlim)
937 struct rlimit new_rlim, *old_rlim;
939 if (resource >= RLIM_NLIMITS)
940 return -EINVAL;
941 if(copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
942 return -EFAULT;
943 if (new_rlim.rlim_cur < 0 || new_rlim.rlim_max < 0)
944 return -EINVAL;
945 old_rlim = current->rlim + resource;
946 if (((new_rlim.rlim_cur > old_rlim->rlim_max) ||
947 (new_rlim.rlim_max > old_rlim->rlim_max)) &&
948 !capable(CAP_SYS_RESOURCE))
949 return -EPERM;
950 if (resource == RLIMIT_NOFILE) {
951 if (new_rlim.rlim_cur > NR_OPEN || new_rlim.rlim_max > NR_OPEN)
952 return -EPERM;
954 *old_rlim = new_rlim;
955 return 0;
959 * It would make sense to put struct rusage in the task_struct,
960 * except that would make the task_struct be *really big*. After
961 * task_struct gets moved into malloc'ed memory, it would
962 * make sense to do this. It will make moving the rest of the information
963 * a lot simpler! (Which we're not doing right now because we're not
964 * measuring them yet).
966 * This is SMP safe. Either we are called from sys_getrusage on ourselves
967 * below (we know we aren't going to exit/disappear and only we change our
968 * rusage counters), or we are called from wait4() on a process which is
969 * either stopped or zombied. In the zombied case the task won't get
970 * reaped till shortly after the call to getrusage(), in both cases the
971 * task being examined is in a frozen state so the counters won't change.
973 * FIXME! Get the fault counts properly!
975 int getrusage(struct task_struct *p, int who, struct rusage *ru)
977 struct rusage r;
979 memset((char *) &r, 0, sizeof(r));
980 switch (who) {
981 case RUSAGE_SELF:
982 r.ru_utime.tv_sec = CT_TO_SECS(p->times.tms_utime);
983 r.ru_utime.tv_usec = CT_TO_USECS(p->times.tms_utime);
984 r.ru_stime.tv_sec = CT_TO_SECS(p->times.tms_stime);
985 r.ru_stime.tv_usec = CT_TO_USECS(p->times.tms_stime);
986 r.ru_minflt = p->min_flt;
987 r.ru_majflt = p->maj_flt;
988 r.ru_nswap = p->nswap;
989 break;
990 case RUSAGE_CHILDREN:
991 r.ru_utime.tv_sec = CT_TO_SECS(p->times.tms_cutime);
992 r.ru_utime.tv_usec = CT_TO_USECS(p->times.tms_cutime);
993 r.ru_stime.tv_sec = CT_TO_SECS(p->times.tms_cstime);
994 r.ru_stime.tv_usec = CT_TO_USECS(p->times.tms_cstime);
995 r.ru_minflt = p->cmin_flt;
996 r.ru_majflt = p->cmaj_flt;
997 r.ru_nswap = p->cnswap;
998 break;
999 default:
1000 r.ru_utime.tv_sec = CT_TO_SECS(p->times.tms_utime + p->times.tms_cutime);
1001 r.ru_utime.tv_usec = CT_TO_USECS(p->times.tms_utime + p->times.tms_cutime);
1002 r.ru_stime.tv_sec = CT_TO_SECS(p->times.tms_stime + p->times.tms_cstime);
1003 r.ru_stime.tv_usec = CT_TO_USECS(p->times.tms_stime + p->times.tms_cstime);
1004 r.ru_minflt = p->min_flt + p->cmin_flt;
1005 r.ru_majflt = p->maj_flt + p->cmaj_flt;
1006 r.ru_nswap = p->nswap + p->cnswap;
1007 break;
1009 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1012 asmlinkage long sys_getrusage(int who, struct rusage *ru)
1014 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1015 return -EINVAL;
1016 return getrusage(current, who, ru);
1019 asmlinkage long sys_umask(int mask)
1021 mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1022 return mask;
1025 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1026 unsigned long arg4, unsigned long arg5)
1028 int error = 0;
1029 int sig;
1031 switch (option) {
1032 case PR_SET_PDEATHSIG:
1033 sig = arg2;
1034 if (sig > _NSIG) {
1035 error = -EINVAL;
1036 break;
1038 current->pdeath_signal = sig;
1039 break;
1040 case PR_GET_PDEATHSIG:
1041 error = put_user(current->pdeath_signal, (int *)arg2);
1042 break;
1043 case PR_GET_DUMPABLE:
1044 if (current->dumpable)
1045 error = 1;
1046 break;
1047 case PR_SET_DUMPABLE:
1048 if (arg2 != 0 && arg2 != 1) {
1049 error = -EINVAL;
1050 break;
1052 current->dumpable = arg2;
1053 break;
1054 default:
1055 error = -EINVAL;
1056 break;
1058 return error;