wbsio: support W83627UHG (0xa2); supported by lm(4) as W83627DHG (0xc1)
[dragonfly.git] / sys / kern / kern_resource.c
blob1c923b013f250a4eaa2035c7a4fc8f8972a8c3dd
1 /*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40 * $DragonFly: src/sys/kern/kern_resource.c,v 1.35 2008/05/27 05:25:34 dillon Exp $
43 #include "opt_compat.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/file.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/kernel.h>
51 #include <sys/resourcevar.h>
52 #include <sys/malloc.h>
53 #include <sys/proc.h>
54 #include <sys/priv.h>
55 #include <sys/time.h>
56 #include <sys/lockf.h>
58 #include <vm/vm.h>
59 #include <vm/vm_param.h>
60 #include <sys/lock.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
64 #include <sys/thread2.h>
65 #include <sys/spinlock2.h>
66 #include <sys/mplock2.h>
68 static int donice (struct proc *chgp, int n);
70 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
71 #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
72 static struct spinlock uihash_lock;
73 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
74 static u_long uihash; /* size of hash table - 1 */
76 static struct uidinfo *uicreate (uid_t uid);
77 static struct uidinfo *uilookup (uid_t uid);
80 * Resource controls and accounting.
83 struct getpriority_info {
84 int low;
85 int who;
88 static int getpriority_callback(struct proc *p, void *data);
91 * MPALMOSTSAFE
93 int
94 sys_getpriority(struct getpriority_args *uap)
96 struct getpriority_info info;
97 struct proc *curp = curproc;
98 struct proc *p;
99 int low = PRIO_MAX + 1;
100 int error;
102 get_mplock();
104 switch (uap->which) {
105 case PRIO_PROCESS:
106 if (uap->who == 0)
107 p = curp;
108 else
109 p = pfind(uap->who);
110 if (p == 0)
111 break;
112 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
113 break;
114 low = p->p_nice;
115 break;
117 case PRIO_PGRP:
119 struct pgrp *pg;
121 if (uap->who == 0)
122 pg = curp->p_pgrp;
123 else if ((pg = pgfind(uap->who)) == NULL)
124 break;
125 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
126 if ((PRISON_CHECK(curp->p_ucred, p->p_ucred) && p->p_nice < low))
127 low = p->p_nice;
129 break;
131 case PRIO_USER:
132 if (uap->who == 0)
133 uap->who = curp->p_ucred->cr_uid;
134 info.low = low;
135 info.who = uap->who;
136 allproc_scan(getpriority_callback, &info);
137 low = info.low;
138 break;
140 default:
141 error = EINVAL;
142 goto done;
144 if (low == PRIO_MAX + 1) {
145 error = ESRCH;
146 goto done;
148 uap->sysmsg_result = low;
149 error = 0;
150 done:
151 rel_mplock();
152 return (error);
156 * Figure out the current lowest nice priority for processes owned
157 * by the specified user.
159 static
161 getpriority_callback(struct proc *p, void *data)
163 struct getpriority_info *info = data;
165 if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
166 p->p_ucred->cr_uid == info->who &&
167 p->p_nice < info->low) {
168 info->low = p->p_nice;
170 return(0);
173 struct setpriority_info {
174 int prio;
175 int who;
176 int error;
177 int found;
180 static int setpriority_callback(struct proc *p, void *data);
183 * MPALMOSTSAFE
186 sys_setpriority(struct setpriority_args *uap)
188 struct setpriority_info info;
189 struct proc *curp = curproc;
190 struct proc *p;
191 int found = 0, error = 0;
193 get_mplock();
195 switch (uap->which) {
196 case PRIO_PROCESS:
197 if (uap->who == 0)
198 p = curp;
199 else
200 p = pfind(uap->who);
201 if (p == 0)
202 break;
203 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred))
204 break;
205 error = donice(p, uap->prio);
206 found++;
207 break;
209 case PRIO_PGRP:
211 struct pgrp *pg;
213 if (uap->who == 0)
214 pg = curp->p_pgrp;
215 else if ((pg = pgfind(uap->who)) == NULL)
216 break;
217 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
218 if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
219 error = donice(p, uap->prio);
220 found++;
223 break;
225 case PRIO_USER:
226 if (uap->who == 0)
227 uap->who = curp->p_ucred->cr_uid;
228 info.prio = uap->prio;
229 info.who = uap->who;
230 info.error = 0;
231 info.found = 0;
232 allproc_scan(setpriority_callback, &info);
233 error = info.error;
234 found = info.found;
235 break;
237 default:
238 error = EINVAL;
239 found = 1;
240 break;
243 rel_mplock();
244 if (found == 0)
245 error = ESRCH;
246 return (error);
249 static
251 setpriority_callback(struct proc *p, void *data)
253 struct setpriority_info *info = data;
254 int error;
256 if (p->p_ucred->cr_uid == info->who &&
257 PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
258 error = donice(p, info->prio);
259 if (error)
260 info->error = error;
261 ++info->found;
263 return(0);
266 static int
267 donice(struct proc *chgp, int n)
269 struct proc *curp = curproc;
270 struct ucred *cr = curp->p_ucred;
271 struct lwp *lp;
273 if (cr->cr_uid && cr->cr_ruid &&
274 cr->cr_uid != chgp->p_ucred->cr_uid &&
275 cr->cr_ruid != chgp->p_ucred->cr_uid)
276 return (EPERM);
277 if (n > PRIO_MAX)
278 n = PRIO_MAX;
279 if (n < PRIO_MIN)
280 n = PRIO_MIN;
281 if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
282 return (EACCES);
283 chgp->p_nice = n;
284 FOREACH_LWP_IN_PROC(lp, chgp)
285 chgp->p_usched->resetpriority(lp);
286 return (0);
290 * MPALMOSTSAFE
293 sys_lwp_rtprio(struct lwp_rtprio_args *uap)
295 struct proc *p = curproc;
296 struct lwp *lp;
297 struct rtprio rtp;
298 struct ucred *cr = curthread->td_ucred;
299 int error;
301 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
302 if (error)
303 return error;
304 if (uap->pid < 0)
305 return EINVAL;
307 get_mplock();
308 if (uap->pid == 0) {
309 /* curproc already loaded on p */
310 } else {
311 p = pfind(uap->pid);
314 if (p == NULL) {
315 error = ESRCH;
316 goto done;
319 if (uap->tid < -1) {
320 error = EINVAL;
321 goto done;
323 if (uap->tid == -1) {
325 * sadly, tid can be 0 so we can't use 0 here
326 * like sys_rtprio()
328 lp = curthread->td_lwp;
329 } else {
330 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
331 if (lp == NULL) {
332 error = ESRCH;
333 goto done;
337 switch (uap->function) {
338 case RTP_LOOKUP:
339 error = copyout(&lp->lwp_rtprio, uap->rtp,
340 sizeof(struct rtprio));
341 break;
342 case RTP_SET:
343 if (cr->cr_uid && cr->cr_ruid &&
344 cr->cr_uid != p->p_ucred->cr_uid &&
345 cr->cr_ruid != p->p_ucred->cr_uid) {
346 error = EPERM;
347 break;
349 /* disallow setting rtprio in most cases if not superuser */
350 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
351 /* can't set someone else's */
352 if (uap->pid) { /* XXX */
353 error = EPERM;
354 break;
356 /* can't set realtime priority */
358 * Realtime priority has to be restricted for reasons which should be
359 * obvious. However, for idle priority, there is a potential for
360 * system deadlock if an idleprio process gains a lock on a resource
361 * that other processes need (and the idleprio process can't run
362 * due to a CPU-bound normal process). Fix me! XXX
364 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
365 error = EPERM;
366 break;
369 switch (rtp.type) {
370 #ifdef RTP_PRIO_FIFO
371 case RTP_PRIO_FIFO:
372 #endif
373 case RTP_PRIO_REALTIME:
374 case RTP_PRIO_NORMAL:
375 case RTP_PRIO_IDLE:
376 if (rtp.prio > RTP_PRIO_MAX) {
377 error = EINVAL;
378 } else {
379 lp->lwp_rtprio = rtp;
380 error = 0;
382 break;
383 default:
384 error = EINVAL;
385 break;
387 break;
388 default:
389 error = EINVAL;
390 break;
393 done:
394 rel_mplock();
395 return (error);
399 * Set realtime priority
401 * MPALMOSTSAFE
404 sys_rtprio(struct rtprio_args *uap)
406 struct proc *curp = curproc;
407 struct proc *p;
408 struct lwp *lp;
409 struct ucred *cr = curthread->td_ucred;
410 struct rtprio rtp;
411 int error;
413 error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
414 if (error)
415 return (error);
417 get_mplock();
418 if (uap->pid == 0)
419 p = curp;
420 else
421 p = pfind(uap->pid);
423 if (p == NULL) {
424 error = ESRCH;
425 goto done;
428 /* XXX lwp */
429 lp = FIRST_LWP_IN_PROC(p);
430 switch (uap->function) {
431 case RTP_LOOKUP:
432 error = copyout(&lp->lwp_rtprio, uap->rtp,
433 sizeof(struct rtprio));
434 break;
435 case RTP_SET:
436 if (cr->cr_uid && cr->cr_ruid &&
437 cr->cr_uid != p->p_ucred->cr_uid &&
438 cr->cr_ruid != p->p_ucred->cr_uid) {
439 error = EPERM;
440 break;
442 /* disallow setting rtprio in most cases if not superuser */
443 if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
444 /* can't set someone else's */
445 if (uap->pid) {
446 error = EPERM;
447 break;
449 /* can't set realtime priority */
451 * Realtime priority has to be restricted for reasons which should be
452 * obvious. However, for idle priority, there is a potential for
453 * system deadlock if an idleprio process gains a lock on a resource
454 * that other processes need (and the idleprio process can't run
455 * due to a CPU-bound normal process). Fix me! XXX
457 if (RTP_PRIO_IS_REALTIME(rtp.type)) {
458 error = EPERM;
459 break;
462 switch (rtp.type) {
463 #ifdef RTP_PRIO_FIFO
464 case RTP_PRIO_FIFO:
465 #endif
466 case RTP_PRIO_REALTIME:
467 case RTP_PRIO_NORMAL:
468 case RTP_PRIO_IDLE:
469 if (rtp.prio > RTP_PRIO_MAX) {
470 error = EINVAL;
471 break;
473 lp->lwp_rtprio = rtp;
474 error = 0;
475 break;
476 default:
477 error = EINVAL;
478 break;
480 break;
481 default:
482 error = EINVAL;
483 break;
485 done:
486 rel_mplock();
487 return (error);
491 * MPSAFE
494 sys_setrlimit(struct __setrlimit_args *uap)
496 struct rlimit alim;
497 int error;
499 error = copyin(uap->rlp, &alim, sizeof(alim));
500 if (error)
501 return (error);
503 error = kern_setrlimit(uap->which, &alim);
505 return (error);
509 * MPSAFE
512 sys_getrlimit(struct __getrlimit_args *uap)
514 struct rlimit lim;
515 int error;
517 error = kern_getrlimit(uap->which, &lim);
519 if (error == 0)
520 error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
521 return error;
525 * Transform the running time and tick information in lwp lp's thread into user,
526 * system, and interrupt time usage.
528 * Since we are limited to statclock tick granularity this is a statisical
529 * calculation which will be correct over the long haul, but should not be
530 * expected to measure fine grained deltas.
532 * It is possible to catch a lwp in the midst of being created, so
533 * check whether lwp_thread is NULL or not.
535 void
536 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
538 struct thread *td;
541 * Calculate at the statclock level. YYY if the thread is owned by
542 * another cpu we need to forward the request to the other cpu, or
543 * have a token to interlock the information in order to avoid racing
544 * thread destruction.
546 if ((td = lp->lwp_thread) != NULL) {
547 crit_enter();
548 up->tv_sec = td->td_uticks / 1000000;
549 up->tv_usec = td->td_uticks % 1000000;
550 sp->tv_sec = td->td_sticks / 1000000;
551 sp->tv_usec = td->td_sticks % 1000000;
552 crit_exit();
557 * Aggregate resource statistics of all lwps of a process.
559 * proc.p_ru keeps track of all statistics directly related to a proc. This
560 * consists of RSS usage and nswap information and aggregate numbers for all
561 * former lwps of this proc.
563 * proc.p_cru is the sum of all stats of reaped children.
565 * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
566 * packet, scheduler switch or page fault counts, etc. This information gets
567 * added to lwp.lwp_proc.p_ru when the lwp exits.
569 void
570 calcru_proc(struct proc *p, struct rusage *ru)
572 struct timeval upt, spt;
573 long *rip1, *rip2;
574 struct lwp *lp;
576 *ru = p->p_ru;
578 FOREACH_LWP_IN_PROC(lp, p) {
579 calcru(lp, &upt, &spt);
580 timevaladd(&ru->ru_utime, &upt);
581 timevaladd(&ru->ru_stime, &spt);
582 for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
583 rip1 <= &ru->ru_last;
584 rip1++, rip2++)
585 *rip1 += *rip2;
591 * MPALMOSTSAFE
594 sys_getrusage(struct getrusage_args *uap)
596 struct rusage ru;
597 struct rusage *rup;
598 int error;
600 get_mplock();
602 switch (uap->who) {
603 case RUSAGE_SELF:
604 rup = &ru;
605 calcru_proc(curproc, rup);
606 error = 0;
607 break;
608 case RUSAGE_CHILDREN:
609 rup = &curproc->p_cru;
610 error = 0;
611 break;
612 default:
613 error = EINVAL;
614 break;
616 if (error == 0)
617 error = copyout(rup, uap->rusage, sizeof(struct rusage));
618 rel_mplock();
619 return (error);
622 void
623 ruadd(struct rusage *ru, struct rusage *ru2)
625 long *ip, *ip2;
626 int i;
628 timevaladd(&ru->ru_utime, &ru2->ru_utime);
629 timevaladd(&ru->ru_stime, &ru2->ru_stime);
630 if (ru->ru_maxrss < ru2->ru_maxrss)
631 ru->ru_maxrss = ru2->ru_maxrss;
632 ip = &ru->ru_first; ip2 = &ru2->ru_first;
633 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
634 *ip++ += *ip2++;
638 * Find the uidinfo structure for a uid. This structure is used to
639 * track the total resource consumption (process count, socket buffer
640 * size, etc.) for the uid and impose limits.
642 void
643 uihashinit(void)
645 spin_init(&uihash_lock);
646 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
650 * NOTE: Must be called with uihash_lock held
652 * MPSAFE
654 static struct uidinfo *
655 uilookup(uid_t uid)
657 struct uihashhead *uipp;
658 struct uidinfo *uip;
660 uipp = UIHASH(uid);
661 LIST_FOREACH(uip, uipp, ui_hash) {
662 if (uip->ui_uid == uid)
663 break;
665 return (uip);
669 * MPSAFE
671 static struct uidinfo *
672 uicreate(uid_t uid)
674 struct uidinfo *uip, *tmp;
676 * Allocate space and check for a race
678 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK);
680 * Initialize structure and enter it into the hash table
682 spin_init(&uip->ui_lock);
683 uip->ui_uid = uid;
684 uip->ui_proccnt = 0;
685 uip->ui_sbsize = 0;
686 uip->ui_ref = 1; /* we're returning a ref */
687 uip->ui_posixlocks = 0;
688 varsymset_init(&uip->ui_varsymset, NULL);
691 * Somebody may have already created the uidinfo for this
692 * uid. If so, return that instead.
694 spin_lock_wr(&uihash_lock);
695 tmp = uilookup(uid);
696 if (tmp != NULL) {
697 varsymset_clean(&uip->ui_varsymset);
698 spin_uninit(&uip->ui_lock);
699 FREE(uip, M_UIDINFO);
700 uip = tmp;
701 } else {
702 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
704 spin_unlock_wr(&uihash_lock);
706 return (uip);
710 * MPSAFE
712 struct uidinfo *
713 uifind(uid_t uid)
715 struct uidinfo *uip;
717 spin_lock_rd(&uihash_lock);
718 uip = uilookup(uid);
719 if (uip == NULL) {
720 spin_unlock_rd(&uihash_lock);
721 uip = uicreate(uid);
722 } else {
723 uihold(uip);
724 spin_unlock_rd(&uihash_lock);
726 return (uip);
730 * MPSAFE
732 static __inline void
733 uifree(struct uidinfo *uip)
735 spin_lock_wr(&uihash_lock);
738 * Note that we're taking a read lock even though we
739 * modify the structure because we know nobody can find
740 * it now that we've locked uihash_lock. If somebody
741 * can get to it through a stored pointer, the reference
742 * count will not be 0 and in that case we don't modify
743 * the struct.
745 spin_lock_rd(&uip->ui_lock);
746 if (uip->ui_ref != 0) {
748 * Someone found the uid and got a ref when we
749 * unlocked. No need to free any more.
751 spin_unlock_rd(&uip->ui_lock);
752 return;
754 if (uip->ui_sbsize != 0)
755 /* XXX no %qd in kernel. Truncate. */
756 kprintf("freeing uidinfo: uid = %d, sbsize = %ld\n",
757 uip->ui_uid, (long)uip->ui_sbsize);
758 if (uip->ui_proccnt != 0)
759 kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
760 uip->ui_uid, uip->ui_proccnt);
762 LIST_REMOVE(uip, ui_hash);
763 spin_unlock_wr(&uihash_lock);
764 varsymset_clean(&uip->ui_varsymset);
765 lockuninit(&uip->ui_varsymset.vx_lock);
766 spin_unlock_rd(&uip->ui_lock);
767 spin_uninit(&uip->ui_lock);
768 FREE(uip, M_UIDINFO);
772 * MPSAFE
774 void
775 uihold(struct uidinfo *uip)
777 atomic_add_int(&uip->ui_ref, 1);
778 KKASSERT(uip->ui_ref >= 0);
782 * MPSAFE
784 void
785 uidrop(struct uidinfo *uip)
787 KKASSERT(uip->ui_ref > 0);
788 if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) {
789 uifree(uip);
793 void
794 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
796 uidrop(*puip);
797 *puip = nuip;
801 * Change the count associated with number of processes
802 * a given user is using. When 'max' is 0, don't enforce a limit
805 chgproccnt(struct uidinfo *uip, int diff, int max)
807 int ret;
808 spin_lock_wr(&uip->ui_lock);
809 /* don't allow them to exceed max, but allow subtraction */
810 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
811 ret = 0;
812 } else {
813 uip->ui_proccnt += diff;
814 if (uip->ui_proccnt < 0)
815 kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
816 ret = 1;
818 spin_unlock_wr(&uip->ui_lock);
819 return ret;
823 * Change the total socket buffer size a user has used.
826 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
828 rlim_t new;
830 spin_lock_wr(&uip->ui_lock);
831 new = uip->ui_sbsize + to - *hiwat;
832 KKASSERT(new >= 0);
835 * If we are trying to increase the socket buffer size
836 * Scale down the hi water mark when we exceed the user's
837 * allowed socket buffer space.
839 * We can't scale down too much or we will blow up atomic packet
840 * operations.
842 if (to > *hiwat && to > MCLBYTES && new > max) {
843 to = to * max / new;
844 if (to < MCLBYTES)
845 to = MCLBYTES;
847 uip->ui_sbsize = new;
848 *hiwat = to;
849 spin_unlock_wr(&uip->ui_lock);
850 return (1);