lock around accesses to uidinfo and varsymset
[dragonfly.git] / sys / kern / kern_varsym.c
blob53d64f10102fb572fd98669c67b11640cf89ab07
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/kern_varsym.c,v 1.9 2007/04/30 07:18:54 dillon Exp $
38 * This module implements variable storage and management for variant
39 * symlinks. These variables may also be used for general purposes.
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/ucred.h>
46 #include <sys/resourcevar.h>
47 #include <sys/proc.h>
48 #include <sys/priv.h>
49 #include <sys/jail.h>
50 #include <sys/queue.h>
51 #include <sys/sysctl.h>
52 #include <sys/malloc.h>
53 #include <sys/varsym.h>
54 #include <sys/sysproto.h>
56 MALLOC_DEFINE(M_VARSYM, "varsym", "variable sets for variant symlinks");
58 struct varsymset varsymset_sys;
61 * Initialize the variant symlink subsystem
63 static void
64 varsym_sysinit(void *dummy)
66 varsymset_init(&varsymset_sys, NULL);
68 SYSINIT(announce, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, varsym_sysinit, NULL);
71 * varsymreplace() - called from namei
73 * Do variant symlink variable substitution
75 int
76 varsymreplace(char *cp, int linklen, int maxlen)
78 int rlen;
79 int xlen;
80 int nlen;
81 int i;
82 varsym_t var;
84 rlen = linklen;
85 while (linklen > 1) {
86 if (cp[0] == '$' && cp[1] == '{') {
87 for (i = 2; i < linklen; ++i) {
88 if (cp[i] == '}')
89 break;
91 if (i < linklen &&
92 (var = varsymfind(VARSYM_ALL_MASK, cp + 2, i - 2)) != NULL
93 ) {
94 xlen = i + 1; /* bytes to strike */
95 nlen = strlen(var->vs_data); /* bytes to add */
96 if (linklen + nlen - xlen >= maxlen) {
97 varsymdrop(var);
98 return(-1);
100 KKASSERT(linklen >= xlen);
101 if (linklen != xlen)
102 bcopy(cp + xlen, cp + nlen, linklen - xlen);
103 bcopy(var->vs_data, cp, nlen);
104 linklen += nlen - xlen; /* new relative length */
105 rlen += nlen - xlen; /* returned total length */
106 cp += nlen; /* adjust past replacement */
107 linklen -= nlen; /* adjust past replacement */
108 maxlen -= nlen; /* adjust past replacement */
109 } else {
111 * It's ok if i points to the '}', it will simply be
112 * skipped. i could also have hit linklen.
114 cp += i;
115 linklen -= i;
116 maxlen -= i;
118 } else {
119 ++cp;
120 --linklen;
121 --maxlen;
124 return(rlen);
128 * varsym_set() system call
130 * (int level, const char *name, const char *data)
133 sys_varsym_set(struct varsym_set_args *uap)
135 char name[MAXVARSYM_NAME];
136 char *buf;
137 int error;
139 if ((error = copyinstr(uap->name, name, sizeof(name), NULL)) != 0)
140 goto done2;
141 buf = kmalloc(MAXVARSYM_DATA, M_TEMP, M_WAITOK);
142 if (uap->data &&
143 (error = copyinstr(uap->data, buf, MAXVARSYM_DATA, NULL)) != 0)
145 goto done1;
147 switch(uap->level) {
148 case VARSYM_SYS:
149 if (curthread->td_proc != NULL && curthread->td_proc->p_ucred->cr_prison != NULL)
150 uap->level = VARSYM_PRISON;
151 case VARSYM_PRISON:
152 if (curthread->td_proc != NULL &&
153 (error = priv_check_cred(curthread->td_proc->p_ucred, PRIV_ROOT, PRISON_ROOT)) != 0)
154 break;
155 /* fall through */
156 case VARSYM_USER:
157 /* XXX check jail / implement per-jail user */
158 /* fall through */
159 case VARSYM_PROC:
160 if (uap->data) {
161 (void)varsymmake(uap->level, name, NULL);
162 error = varsymmake(uap->level, name, buf);
163 } else {
164 error = varsymmake(uap->level, name, NULL);
166 break;
168 done1:
169 kfree(buf, M_TEMP);
170 done2:
171 return(error);
175 * varsym_get() system call
177 * (int mask, const char *wild, char *buf, int bufsize)
180 sys_varsym_get(struct varsym_get_args *uap)
182 char wild[MAXVARSYM_NAME];
183 varsym_t sym;
184 int error;
185 int dlen;
187 if ((error = copyinstr(uap->wild, wild, sizeof(wild), NULL)) != 0)
188 goto done;
189 sym = varsymfind(uap->mask, wild, strlen(wild));
190 if (sym == NULL) {
191 error = ENOENT;
192 goto done;
194 dlen = strlen(sym->vs_data);
195 if (dlen < uap->bufsize) {
196 copyout(sym->vs_data, uap->buf, dlen + 1);
197 } else if (uap->bufsize) {
198 copyout("", uap->buf, 1);
200 uap->sysmsg_result = dlen + 1;
201 varsymdrop(sym);
202 done:
203 return(error);
207 * varsym_list() system call
209 * (int level, char *buf, int maxsize, int *marker)
212 sys_varsym_list(struct varsym_list_args *uap)
214 struct varsymset *vss;
215 struct varsyment *ve;
216 struct proc *p;
217 int i;
218 int error;
219 int bytes;
220 int earlyterm;
221 int marker;
224 * Get the marker from userspace.
226 if ((error = copyin(uap->marker, &marker, sizeof(marker))) != 0)
227 goto done;
230 * Figure out the varsym set.
232 p = curproc;
233 vss = NULL;
235 switch (uap->level) {
236 case VARSYM_PROC:
237 if (p)
238 vss = &p->p_varsymset;
239 break;
240 case VARSYM_USER:
241 if (p)
242 vss = &p->p_ucred->cr_uidinfo->ui_varsymset;
243 break;
244 case VARSYM_SYS:
245 vss = &varsymset_sys;
246 break;
247 case VARSYM_PRISON:
248 if (p != NULL && p->p_ucred->cr_prison != NULL)
249 vss = &p->p_ucred->cr_prison->pr_varsymset;
250 break;
252 if (vss == NULL) {
253 error = EINVAL;
254 goto done;
258 * Loop through the variables and dump them to uap->buf
260 i = 0;
261 bytes = 0;
262 earlyterm = 0;
264 lockmgr(&vss->vx_lock, LK_SHARED);
265 TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
266 varsym_t sym = ve->ve_sym;
267 int namelen = strlen(sym->vs_name);
268 int datalen = strlen(sym->vs_data);
269 int totlen = namelen + datalen + 2;
272 * Skip to our index point
274 if (i < marker) {
275 ++i;
276 continue;
280 * Stop if there is insufficient space in the user buffer.
281 * If we haven't stored anything yet return EOVERFLOW.
282 * Note that the marker index (i) does not change.
284 if (bytes + totlen > uap->maxsize) {
285 if (bytes == 0)
286 error = EOVERFLOW;
287 earlyterm = 1;
288 break;
291 error = copyout(sym->vs_name, uap->buf + bytes, namelen + 1);
292 if (error == 0) {
293 bytes += namelen + 1;
294 error = copyout(sym->vs_data, uap->buf + bytes, datalen + 1);
295 if (error == 0)
296 bytes += datalen + 1;
297 else
298 bytes -= namelen + 1; /* revert if error */
300 if (error) {
301 earlyterm = 1;
302 break;
304 ++i;
306 lockmgr(&vss->vx_lock, LK_RELEASE);
309 * Save the marker back. If no error occured and earlyterm is clear
310 * the marker is set to -1 indicating that the variable list has been
311 * exhausted. If no error occured the number of bytes loaded into
312 * the buffer will be returned, otherwise the syscall code returns -1.
314 if (error == 0 && earlyterm == 0)
315 marker = -1;
316 else
317 marker = i;
318 if (error == 0)
319 error = copyout(&marker, uap->marker, sizeof(marker));
320 uap->sysmsg_result = bytes;
321 done:
322 return(error);
326 * Lookup a variant symlink. XXX use a hash table.
328 static
329 struct varsyment *
330 varsymlookup(struct varsymset *vss, const char *name, int namelen)
332 struct varsyment *ve;
334 KKASSERT(lockstatus(&vss->vx_lock, curthread) != 0);
335 TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
336 varsym_t var = ve->ve_sym;
337 if (var->vs_namelen == namelen &&
338 bcmp(name, var->vs_name, namelen) == 0
340 return(ve);
343 return(NULL);
346 static
347 void
348 vsslock(struct varsymset **vss, struct varsymset *n)
350 if (*vss) {
351 lockmgr(&(*vss)->vx_lock, LK_RELEASE);
353 lockmgr(&n->vx_lock, LK_SHARED);
354 *vss = n;
357 varsym_t
358 varsymfind(int mask, const char *name, int namelen)
360 struct proc *p = curproc;
361 struct varsyment *ve = NULL;
362 struct varsymset *vss = NULL;
363 varsym_t sym;
365 if ((mask & (VARSYM_PROC_MASK|VARSYM_USER_MASK)) && p != NULL) {
366 if (mask & VARSYM_PROC_MASK) {
367 vsslock(&vss, &p->p_varsymset);
368 ve = varsymlookup(vss, name, namelen);
370 if (ve == NULL && (mask & VARSYM_USER_MASK)) {
371 vsslock(&vss, &p->p_ucred->cr_uidinfo->ui_varsymset);
372 ve = varsymlookup(vss, name, namelen);
375 if (ve == NULL && (mask & VARSYM_SYS_MASK)) {
376 if (p != NULL && p->p_ucred->cr_prison) {
377 vsslock(&vss, &p->p_ucred->cr_prison->pr_varsymset);
378 ve = varsymlookup(vss, name, namelen);
379 } else {
380 vsslock(&vss, &varsymset_sys);
381 ve = varsymlookup(vss, name, namelen);
384 if (ve) {
385 sym = ve->ve_sym;
386 atomic_add_int(&sym->vs_refs, 1);
387 } else {
388 sym = NULL;
390 lockmgr(&vss->vx_lock, LK_RELEASE);
391 return sym;
395 varsymmake(int level, const char *name, const char *data)
397 struct varsymset *vss = NULL;
398 struct varsyment *ve;
399 struct proc *p = curproc;
400 varsym_t sym;
401 int namelen = strlen(name);
402 int datalen;
403 int error;
405 switch(level) {
406 case VARSYM_PROC:
407 if (p)
408 vss = &p->p_varsymset;
409 break;
410 case VARSYM_USER:
411 if (p)
412 vss = &p->p_ucred->cr_uidinfo->ui_varsymset;
413 break;
414 case VARSYM_SYS:
415 vss = &varsymset_sys;
416 break;
417 case VARSYM_PRISON:
418 if (p != NULL && p->p_ucred->cr_prison != NULL)
419 vss = &p->p_ucred->cr_prison->pr_varsymset;
420 break;
422 if (vss == NULL) {
423 return EINVAL;
425 lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
426 if (data && vss->vx_setsize >= MAXVARSYM_SET) {
427 error = E2BIG;
428 } else if (data) {
429 datalen = strlen(data);
430 ve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
431 sym = kmalloc(sizeof(struct varsym) + namelen + datalen + 2, M_VARSYM, M_WAITOK);
432 ve->ve_sym = sym;
433 sym->vs_refs = 1;
434 sym->vs_namelen = namelen;
435 sym->vs_name = (char *)(sym + 1);
436 sym->vs_data = sym->vs_name + namelen + 1;
437 strcpy(sym->vs_name, name);
438 strcpy(sym->vs_data, data);
439 TAILQ_INSERT_TAIL(&vss->vx_queue, ve, ve_entry);
440 vss->vx_setsize += sizeof(struct varsyment) + sizeof(struct varsym) + namelen + datalen + 8;
441 error = 0;
442 } else {
443 if ((ve = varsymlookup(vss, name, namelen)) != NULL) {
444 TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
445 vss->vx_setsize -= sizeof(struct varsyment) + sizeof(struct varsym) + namelen + strlen(ve->ve_sym->vs_data) + 8;
446 varsymdrop(ve->ve_sym);
447 kfree(ve, M_VARSYM);
448 error = 0;
449 } else {
450 error = ENOENT;
453 lockmgr(&vss->vx_lock, LK_RELEASE);
454 return(error);
457 void
458 varsymdrop(varsym_t sym)
460 KKASSERT(sym->vs_refs > 0);
461 if (atomic_fetchadd_int(&sym->vs_refs, -1) == 1) {
462 kfree(sym, M_VARSYM);
467 * Insert a duplicate of ve in vss. Does not do any locking,
468 * so it is the callers responsibility to make sure nobody
469 * else can mess with the TAILQ in vss at the same time.
471 static void
472 varsymdup(struct varsymset *vss, struct varsyment *ve)
474 struct varsyment *nve;
476 nve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
477 nve->ve_sym = ve->ve_sym;
478 ++nve->ve_sym->vs_refs; /* can't be reached, no need for atomic add */
480 * We're only called through varsymset_init() so vss is not yet reachable,
481 * no need to lock.
483 TAILQ_INSERT_TAIL(&vss->vx_queue, nve, ve_entry);
486 void
487 varsymset_init(struct varsymset *vss, struct varsymset *copy)
489 struct varsyment *ve;
491 TAILQ_INIT(&vss->vx_queue);
492 lockinit(&vss->vx_lock, "vx", 0, 0);
493 if (copy) {
494 TAILQ_FOREACH(ve, &copy->vx_queue, ve_entry) {
495 varsymdup(vss, ve);
497 vss->vx_setsize = copy->vx_setsize;
501 void
502 varsymset_clean(struct varsymset *vss)
504 struct varsyment *ve;
506 lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
507 while ((ve = TAILQ_FIRST(&vss->vx_queue)) != NULL) {
508 TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
509 varsymdrop(ve->ve_sym);
510 kfree(ve, M_VARSYM);
512 vss->vx_setsize = 0;
513 lockmgr(&vss->vx_lock, LK_RELEASE);