kernel - Fix excessive call stack depth on stuck interrupt
[dragonfly.git] / sys / kern / kern_varsym.c
blob953dccf0315b14f8e6a48923a585f74c7a2471bb
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
36 * This module implements variable storage and management for variant
37 * symlinks. These variables may also be used for general purposes.
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/ucred.h>
44 #include <sys/resourcevar.h>
45 #include <sys/proc.h>
46 #include <sys/priv.h>
47 #include <sys/jail.h>
48 #include <sys/queue.h>
49 #include <sys/sysctl.h>
50 #include <sys/malloc.h>
51 #include <sys/varsym.h>
52 #include <sys/sysproto.h>
54 MALLOC_DEFINE(M_VARSYM, "varsym", "variable sets for variant symlinks");
56 struct varsymset varsymset_sys;
59 * Initialize the variant symlink subsystem
61 static void
62 varsym_sysinit(void *dummy)
64 varsymset_init(&varsymset_sys, NULL);
66 SYSINIT(announce, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, varsym_sysinit, NULL);
69 * varsymreplace() - called from namei
71 * Do variant symlink variable substitution
73 int
74 varsymreplace(char *cp, int linklen, int maxlen)
76 int rlen;
77 int xlen;
78 int nlen;
79 int i;
80 varsym_t var;
82 rlen = linklen;
83 while (linklen > 1) {
84 if (cp[0] == '$' && cp[1] == '{') {
85 for (i = 2; i < linklen; ++i) {
86 if (cp[i] == '}')
87 break;
89 if (i < linklen &&
90 (var = varsymfind(VARSYM_ALL_MASK, cp + 2, i - 2)) != NULL
91 ) {
92 xlen = i + 1; /* bytes to strike */
93 nlen = strlen(var->vs_data); /* bytes to add */
94 if (linklen + nlen - xlen >= maxlen) {
95 varsymdrop(var);
96 return(-1);
98 KKASSERT(linklen >= xlen);
99 if (linklen != xlen)
100 bcopy(cp + xlen, cp + nlen, linklen - xlen);
101 bcopy(var->vs_data, cp, nlen);
102 linklen += nlen - xlen; /* new relative length */
103 rlen += nlen - xlen; /* returned total length */
104 cp += nlen; /* adjust past replacement */
105 linklen -= nlen; /* adjust past replacement */
106 maxlen -= nlen; /* adjust past replacement */
107 } else {
109 * It's ok if i points to the '}', it will simply be
110 * skipped. i could also have hit linklen.
112 cp += i;
113 linklen -= i;
114 maxlen -= i;
116 } else {
117 ++cp;
118 --linklen;
119 --maxlen;
122 return(rlen);
126 * varsym_set() system call
128 * (int level, const char *name, const char *data)
131 sys_varsym_set(struct varsym_set_args *uap)
133 char name[MAXVARSYM_NAME];
134 char *buf;
135 struct thread *td;
136 struct lwp *lp;
137 int error;
139 td = curthread;
140 lp = td->td_lwp;
142 if ((error = copyinstr(uap->name, name, sizeof(name), NULL)) != 0)
143 goto done2;
144 buf = kmalloc(MAXVARSYM_DATA, M_TEMP, M_WAITOK);
145 if (uap->data &&
146 (error = copyinstr(uap->data, buf, MAXVARSYM_DATA, NULL)) != 0)
148 goto done1;
151 switch(uap->level) {
152 case VARSYM_SYS:
153 if (lp != NULL && td->td_ucred->cr_prison != NULL)
154 uap->level = VARSYM_PRISON;
155 /* fall through */
156 case VARSYM_PRISON:
157 if (lp != NULL &&
158 (error = priv_check_cred(td->td_ucred, PRIV_VARSYM_SYS, 0)) != 0)
159 break;
160 /* fall through */
161 case VARSYM_USER:
162 /* XXX check jail / implement per-jail user */
163 /* fall through */
164 case VARSYM_PROC:
165 if (uap->data) {
166 (void)varsymmake(uap->level, name, NULL);
167 error = varsymmake(uap->level, name, buf);
168 } else {
169 error = varsymmake(uap->level, name, NULL);
171 break;
173 done1:
174 kfree(buf, M_TEMP);
175 done2:
176 return(error);
180 * varsym_get() system call
182 * (int mask, const char *wild, char *buf, int bufsize)
184 * MPALMOSTSAFE
187 sys_varsym_get(struct varsym_get_args *uap)
189 char wild[MAXVARSYM_NAME];
190 varsym_t sym;
191 int error;
192 int dlen;
194 if ((error = copyinstr(uap->wild, wild, sizeof(wild), NULL)) != 0)
195 goto done;
196 sym = varsymfind(uap->mask, wild, strlen(wild));
197 if (sym == NULL) {
198 error = ENOENT;
199 goto done;
201 dlen = strlen(sym->vs_data);
202 if (dlen < uap->bufsize) {
203 copyout(sym->vs_data, uap->buf, dlen + 1);
204 } else if (uap->bufsize) {
205 copyout("", uap->buf, 1);
207 uap->sysmsg_result = dlen + 1;
208 varsymdrop(sym);
209 done:
211 return(error);
215 * varsym_list() system call
217 * (int level, char *buf, int maxsize, int *marker)
219 * MPALMOSTSAFE
222 sys_varsym_list(struct varsym_list_args *uap)
224 struct varsymset *vss;
225 struct varsyment *ve;
226 struct thread *td;
227 struct proc *p;
228 struct lwp *lp;
229 int i;
230 int error;
231 int bytes;
232 int earlyterm;
233 int marker;
236 * Get the marker from userspace.
238 if ((error = copyin(uap->marker, &marker, sizeof(marker))) != 0)
239 goto done;
242 * Figure out the varsym set.
244 td = curthread;
245 lp = td->td_lwp;
246 p = lp ? lp->lwp_proc : NULL;
248 vss = NULL;
250 switch (uap->level) {
251 case VARSYM_PROC:
252 if (p)
253 vss = &p->p_varsymset;
254 break;
255 case VARSYM_USER:
256 if (lp)
257 vss = &td->td_ucred->cr_uidinfo->ui_varsymset;
258 break;
259 case VARSYM_SYS:
260 vss = &varsymset_sys;
261 break;
262 case VARSYM_PRISON:
263 if (lp && td->td_ucred->cr_prison)
264 vss = &td->td_ucred->cr_prison->pr_varsymset;
265 break;
267 if (vss == NULL) {
268 error = EINVAL;
269 goto done;
273 * Loop through the variables and dump them to uap->buf
275 i = 0;
276 bytes = 0;
277 earlyterm = 0;
279 lockmgr(&vss->vx_lock, LK_SHARED);
280 TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
281 varsym_t sym = ve->ve_sym;
282 int namelen = strlen(sym->vs_name);
283 int datalen = strlen(sym->vs_data);
284 int totlen = namelen + datalen + 2;
287 * Skip to our index point
289 if (i < marker) {
290 ++i;
291 continue;
295 * Stop if there is insufficient space in the user buffer.
296 * If we haven't stored anything yet return EOVERFLOW.
297 * Note that the marker index (i) does not change.
299 if (bytes + totlen > uap->maxsize) {
300 if (bytes == 0)
301 error = EOVERFLOW;
302 earlyterm = 1;
303 break;
306 error = copyout(sym->vs_name, uap->buf + bytes, namelen + 1);
307 if (error == 0) {
308 bytes += namelen + 1;
309 error = copyout(sym->vs_data, uap->buf + bytes, datalen + 1);
310 if (error == 0)
311 bytes += datalen + 1;
312 else
313 bytes -= namelen + 1; /* revert if error */
315 if (error) {
316 earlyterm = 1;
317 break;
319 ++i;
321 lockmgr(&vss->vx_lock, LK_RELEASE);
324 * Save the marker back. If no error occured and earlyterm is clear
325 * the marker is set to -1 indicating that the variable list has been
326 * exhausted. If no error occured the number of bytes loaded into
327 * the buffer will be returned, otherwise the syscall code returns -1.
329 if (error == 0 && earlyterm == 0)
330 marker = -1;
331 else
332 marker = i;
333 if (error == 0)
334 error = copyout(&marker, uap->marker, sizeof(marker));
335 uap->sysmsg_result = bytes;
336 done:
337 return(error);
341 * Lookup a variant symlink. XXX use a hash table.
343 static
344 struct varsyment *
345 varsymlookup(struct varsymset *vss, const char *name, int namelen)
347 struct varsyment *ve;
349 KKASSERT(lockstatus(&vss->vx_lock, curthread) != 0);
350 TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
351 varsym_t var = ve->ve_sym;
352 if (var->vs_namelen == namelen &&
353 bcmp(name, var->vs_name, namelen) == 0
355 return(ve);
358 return(NULL);
361 static
362 void
363 vsslock(struct varsymset **vss, struct varsymset *n)
365 if (*vss) {
366 lockmgr(&(*vss)->vx_lock, LK_RELEASE);
368 lockmgr(&n->vx_lock, LK_SHARED);
369 *vss = n;
372 varsym_t
373 varsymfind(int mask, const char *name, int namelen)
375 struct varsyment *ve = NULL;
376 struct varsymset *vss = NULL;
377 struct thread *td;
378 struct lwp *lp;
379 struct proc *p;
380 varsym_t sym;
382 td = curthread;
383 lp = td->td_lwp;
384 p = lp ? lp->lwp_proc : NULL;
386 if ((mask & (VARSYM_PROC_MASK|VARSYM_USER_MASK)) && lp != NULL) {
387 if (mask & VARSYM_PROC_MASK) {
388 vsslock(&vss, &p->p_varsymset);
389 ve = varsymlookup(vss, name, namelen);
391 if (ve == NULL && (mask & VARSYM_USER_MASK)) {
392 vsslock(&vss, &td->td_ucred->cr_uidinfo->ui_varsymset);
393 ve = varsymlookup(vss, name, namelen);
396 if (ve == NULL && (mask & VARSYM_SYS_MASK)) {
397 if (lp != NULL && td->td_ucred->cr_prison) {
398 vsslock(&vss, &td->td_ucred->cr_prison->pr_varsymset);
399 ve = varsymlookup(vss, name, namelen);
400 } else {
401 vsslock(&vss, &varsymset_sys);
402 ve = varsymlookup(vss, name, namelen);
405 if (ve) {
406 sym = ve->ve_sym;
407 atomic_add_int(&sym->vs_refs, 1);
408 } else {
409 sym = NULL;
411 if (vss)
412 lockmgr(&vss->vx_lock, LK_RELEASE);
413 return sym;
417 varsymmake(int level, const char *name, const char *data)
419 struct varsymset *vss = NULL;
420 struct varsyment *ve;
421 struct thread *td;
422 struct proc *p;
423 struct lwp *lp;
424 varsym_t sym;
425 int namelen = strlen(name);
426 int datalen;
427 int error;
429 td = curthread;
430 lp = td->td_lwp;
431 p = lp ? lp->lwp_proc : NULL;
433 switch(level) {
434 case VARSYM_PROC:
435 if (p)
436 vss = &p->p_varsymset;
437 break;
438 case VARSYM_USER:
439 if (lp)
440 vss = &td->td_ucred->cr_uidinfo->ui_varsymset;
441 break;
442 case VARSYM_SYS:
443 vss = &varsymset_sys;
444 break;
445 case VARSYM_PRISON:
446 if (lp && td->td_ucred->cr_prison)
447 vss = &td->td_ucred->cr_prison->pr_varsymset;
448 break;
450 if (vss == NULL) {
451 return EINVAL;
453 lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
454 if (data && vss->vx_setsize >= MAXVARSYM_SET) {
455 error = E2BIG;
456 } else if (data) {
457 datalen = strlen(data);
458 ve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
459 sym = kmalloc(sizeof(struct varsym) + namelen + datalen + 2, M_VARSYM, M_WAITOK);
460 ve->ve_sym = sym;
461 sym->vs_refs = 1;
462 sym->vs_namelen = namelen;
463 sym->vs_name = (char *)(sym + 1);
464 sym->vs_data = sym->vs_name + namelen + 1;
465 strcpy(sym->vs_name, name);
466 strcpy(sym->vs_data, data);
467 TAILQ_INSERT_TAIL(&vss->vx_queue, ve, ve_entry);
468 vss->vx_setsize += sizeof(struct varsyment) + sizeof(struct varsym) + namelen + datalen + 8;
469 error = 0;
470 } else {
471 if ((ve = varsymlookup(vss, name, namelen)) != NULL) {
472 TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
473 vss->vx_setsize -= sizeof(struct varsyment) + sizeof(struct varsym) + namelen + strlen(ve->ve_sym->vs_data) + 8;
474 varsymdrop(ve->ve_sym);
475 kfree(ve, M_VARSYM);
476 error = 0;
477 } else {
478 error = ENOENT;
481 lockmgr(&vss->vx_lock, LK_RELEASE);
482 return(error);
485 void
486 varsymdrop(varsym_t sym)
488 KKASSERT(sym->vs_refs > 0);
489 if (atomic_fetchadd_int(&sym->vs_refs, -1) == 1) {
490 kfree(sym, M_VARSYM);
495 * Insert a duplicate of ve in vss. Does not do any locking,
496 * so it is the callers responsibility to make sure nobody
497 * else can mess with the TAILQ in vss at the same time.
499 static void
500 varsymdup(struct varsymset *vss, struct varsyment *ve)
502 struct varsyment *nve;
504 nve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
505 nve->ve_sym = ve->ve_sym;
506 ++nve->ve_sym->vs_refs; /* can't be reached, no need for atomic add */
508 * We're only called through varsymset_init() so vss is not yet reachable,
509 * no need to lock.
511 TAILQ_INSERT_TAIL(&vss->vx_queue, nve, ve_entry);
514 void
515 varsymset_init(struct varsymset *vss, struct varsymset *copy)
517 struct varsyment *ve;
519 TAILQ_INIT(&vss->vx_queue);
520 lockinit(&vss->vx_lock, "vx", 0, 0);
521 if (copy) {
522 TAILQ_FOREACH(ve, &copy->vx_queue, ve_entry) {
523 varsymdup(vss, ve);
525 vss->vx_setsize = copy->vx_setsize;
529 void
530 varsymset_clean(struct varsymset *vss)
532 struct varsyment *ve;
534 lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
535 while ((ve = TAILQ_FIRST(&vss->vx_queue)) != NULL) {
536 TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
537 varsymdrop(ve->ve_sym);
538 kfree(ve, M_VARSYM);
540 vss->vx_setsize = 0;
541 lockmgr(&vss->vx_lock, LK_RELEASE);