less(1): Regenerate defines.h and update Makefile
[dragonfly.git] / sys / kern / kern_varsym.c
blob4e2b5cedd32f72326ea1c4294b7ffec96f41fe28
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
36 * This module implements variable storage and management for variant
37 * symlinks. These variables may also be used for general purposes.
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/ucred.h>
44 #include <sys/resourcevar.h>
45 #include <sys/proc.h>
46 #include <sys/caps.h>
47 #include <sys/jail.h>
48 #include <sys/queue.h>
49 #include <sys/sysctl.h>
50 #include <sys/malloc.h>
51 #include <sys/varsym.h>
52 #include <sys/sysmsg.h>
54 MALLOC_DEFINE(M_VARSYM, "varsym", "variable sets for variant symlinks");
56 static struct varsymset varsymset_sys;
59 * Initialize the variant symlink subsystem
61 static void
62 varsym_sysinit(void *dummy)
64 varsymset_init(&varsymset_sys, NULL);
66 SYSINIT(announce, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, varsym_sysinit, NULL);
69 * varsymreplace() - called from namei
71 * Do variant symlink variable substitution
73 int
74 varsymreplace(char *cp, int linklen, int maxlen)
76 int rlen;
77 int xlen;
78 int nlen;
79 int i;
80 varsym_t var;
82 rlen = linklen;
83 while (linklen > 1) {
84 if (cp[0] == '$' && cp[1] == '{') {
85 for (i = 2; i < linklen; ++i) {
86 if (cp[i] == '}')
87 break;
89 if (i < linklen &&
90 (var = varsymfind(VARSYM_ALL_MASK, cp + 2, i - 2)) != NULL
91 ) {
92 xlen = i + 1; /* bytes to strike */
93 nlen = strlen(var->vs_data); /* bytes to add */
94 if (linklen + nlen - xlen >= maxlen) {
95 varsymdrop(var);
96 return(-1);
98 KKASSERT(linklen >= xlen);
99 if (linklen != xlen)
100 bcopy(cp + xlen, cp + nlen, linklen - xlen);
101 bcopy(var->vs_data, cp, nlen);
102 linklen += nlen - xlen; /* new relative length */
103 rlen += nlen - xlen; /* returned total length */
104 cp += nlen; /* adjust past replacement */
105 linklen -= nlen; /* adjust past replacement */
106 maxlen -= nlen; /* adjust past replacement */
107 } else {
109 * It's ok if i points to the '}', it will simply be
110 * skipped. i could also have hit linklen.
112 cp += i;
113 linklen -= i;
114 maxlen -= i;
116 } else {
117 ++cp;
118 --linklen;
119 --maxlen;
122 return(rlen);
126 * varsym_set() system call
128 * (int level, const char *name, const char *data)
131 sys_varsym_set(struct sysmsg *sysmsg, const struct varsym_set_args *uap)
133 char name[MAXVARSYM_NAME];
134 char *buf;
135 struct thread *td;
136 struct lwp *lp;
137 int error;
138 int level = uap->level;
140 td = curthread;
141 lp = td->td_lwp;
143 if ((error = copyinstr(uap->name, name, sizeof(name), NULL)) != 0)
144 goto done2;
145 buf = kmalloc(MAXVARSYM_DATA, M_TEMP, M_WAITOK);
146 if (uap->data &&
147 (error = copyinstr(uap->data, buf, MAXVARSYM_DATA, NULL)) != 0)
149 goto done1;
152 switch(level) {
153 case VARSYM_SYS:
154 if (lp != NULL && td->td_ucred->cr_prison != NULL)
155 level = VARSYM_PRISON;
156 /* fall through */
157 case VARSYM_PRISON:
158 if (lp != NULL &&
159 (error = caps_priv_check_td(td, SYSCAP_NOVARSYM_SYS)) != 0)
161 break;
163 /* fall through */
164 case VARSYM_USER:
165 /* XXX check jail / implement per-jail user */
166 /* fall through */
167 case VARSYM_PROC:
168 if (uap->data) {
169 (void)varsymmake(level, name, NULL);
170 error = varsymmake(level, name, buf);
171 } else {
172 error = varsymmake(level, name, NULL);
174 break;
176 done1:
177 kfree(buf, M_TEMP);
178 done2:
179 return(error);
183 * varsym_get() system call
185 * (int mask, const char *wild, char *buf, int bufsize)
187 * MPALMOSTSAFE
190 sys_varsym_get(struct sysmsg *sysmsg, const struct varsym_get_args *uap)
192 char wild[MAXVARSYM_NAME];
193 varsym_t sym;
194 int error;
195 int dlen;
197 if ((error = copyinstr(uap->wild, wild, sizeof(wild), NULL)) != 0)
198 goto done;
199 sym = varsymfind(uap->mask, wild, strlen(wild));
200 if (sym == NULL) {
201 error = ENOENT;
202 goto done;
204 dlen = strlen(sym->vs_data);
205 if (dlen < uap->bufsize) {
206 error = copyout(sym->vs_data, uap->buf, dlen + 1);
207 } else if (uap->bufsize) {
208 copyout("", uap->buf, 1);
209 error = EOVERFLOW;
211 sysmsg->sysmsg_result = 0;
212 varsymdrop(sym);
213 done:
215 return(error);
219 * varsym_list() system call
221 * (int level, char *buf, int maxsize, int *marker)
223 * MPALMOSTSAFE
226 sys_varsym_list(struct sysmsg *sysmsg, const struct varsym_list_args *uap)
228 struct varsymset *vss;
229 struct varsyment *ve;
230 struct thread *td;
231 struct proc *p;
232 struct lwp *lp;
233 int i;
234 int error;
235 int bytes;
236 int earlyterm;
237 int marker;
240 * Get the marker from userspace.
242 if ((error = copyin(uap->marker, &marker, sizeof(marker))) != 0)
243 goto done;
246 * Figure out the varsym set.
248 td = curthread;
249 lp = td->td_lwp;
250 p = lp ? lp->lwp_proc : NULL;
252 vss = NULL;
254 switch (uap->level) {
255 case VARSYM_PROC:
256 if (p)
257 vss = &p->p_varsymset;
258 break;
259 case VARSYM_USER:
260 if (lp)
261 vss = &td->td_ucred->cr_uidinfo->ui_varsymset;
262 break;
263 case VARSYM_SYS:
264 vss = &varsymset_sys;
265 break;
266 case VARSYM_PRISON:
267 if (lp && td->td_ucred->cr_prison)
268 vss = &td->td_ucred->cr_prison->pr_varsymset;
269 break;
271 if (vss == NULL) {
272 error = EINVAL;
273 goto done;
277 * Loop through the variables and dump them to uap->buf
279 i = 0;
280 bytes = 0;
281 earlyterm = 0;
283 lockmgr(&vss->vx_lock, LK_SHARED);
284 TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
285 varsym_t sym = ve->ve_sym;
286 int namelen = strlen(sym->vs_name);
287 int datalen = strlen(sym->vs_data);
288 int totlen = namelen + datalen + 2;
291 * Skip to our index point
293 if (i < marker) {
294 ++i;
295 continue;
299 * Stop if there is insufficient space in the user buffer.
300 * If we haven't stored anything yet return EOVERFLOW.
301 * Note that the marker index (i) does not change.
303 if (bytes + totlen > uap->maxsize) {
304 if (bytes == 0)
305 error = EOVERFLOW;
306 earlyterm = 1;
307 break;
310 error = copyout(sym->vs_name, uap->buf + bytes, namelen + 1);
311 if (error == 0) {
312 bytes += namelen + 1;
313 error = copyout(sym->vs_data, uap->buf + bytes, datalen + 1);
314 if (error == 0)
315 bytes += datalen + 1;
316 else
317 bytes -= namelen + 1; /* revert if error */
319 if (error) {
320 earlyterm = 1;
321 break;
323 ++i;
325 lockmgr(&vss->vx_lock, LK_RELEASE);
328 * Save the marker back. If no error occured and earlyterm is clear
329 * the marker is set to -1 indicating that the variable list has been
330 * exhausted. If no error occured the number of bytes loaded into
331 * the buffer will be returned, otherwise the syscall code returns -1.
333 if (error == 0 && earlyterm == 0)
334 marker = -1;
335 else
336 marker = i;
337 if (error == 0)
338 error = copyout(&marker, uap->marker, sizeof(marker));
339 sysmsg->sysmsg_result = bytes;
340 done:
341 return(error);
345 * Lookup a variant symlink. XXX use a hash table.
347 static
348 struct varsyment *
349 varsymlookup(struct varsymset *vss, const char *name, int namelen)
351 struct varsyment *ve;
353 KKASSERT(lockstatus(&vss->vx_lock, curthread) != 0);
354 TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
355 varsym_t var = ve->ve_sym;
356 if (var->vs_namelen == namelen &&
357 bcmp(name, var->vs_name, namelen) == 0
359 return(ve);
362 return(NULL);
365 static
366 void
367 vsslock(struct varsymset **vss, struct varsymset *n)
369 if (*vss) {
370 lockmgr(&(*vss)->vx_lock, LK_RELEASE);
372 lockmgr(&n->vx_lock, LK_SHARED);
373 *vss = n;
376 varsym_t
377 varsymfind(int mask, const char *name, int namelen)
379 struct varsyment *ve = NULL;
380 struct varsymset *vss = NULL;
381 struct thread *td;
382 struct lwp *lp;
383 struct proc *p;
384 varsym_t sym;
386 td = curthread;
387 lp = td->td_lwp;
388 p = lp ? lp->lwp_proc : NULL;
390 if ((mask & (VARSYM_PROC_MASK|VARSYM_USER_MASK)) && lp != NULL) {
391 if (mask & VARSYM_PROC_MASK) {
392 vsslock(&vss, &p->p_varsymset);
393 ve = varsymlookup(vss, name, namelen);
395 if (ve == NULL && (mask & VARSYM_USER_MASK)) {
396 vsslock(&vss, &td->td_ucred->cr_uidinfo->ui_varsymset);
397 ve = varsymlookup(vss, name, namelen);
400 if (ve == NULL && (mask & VARSYM_SYS_MASK)) {
401 if (lp != NULL && td->td_ucred->cr_prison) {
402 vsslock(&vss, &td->td_ucred->cr_prison->pr_varsymset);
403 ve = varsymlookup(vss, name, namelen);
404 } else {
405 vsslock(&vss, &varsymset_sys);
406 ve = varsymlookup(vss, name, namelen);
409 if (ve) {
410 sym = ve->ve_sym;
411 atomic_add_int(&sym->vs_refs, 1);
412 } else {
413 sym = NULL;
415 if (vss)
416 lockmgr(&vss->vx_lock, LK_RELEASE);
417 return sym;
421 varsymmake(int level, const char *name, const char *data)
423 struct varsymset *vss = NULL;
424 struct varsyment *ve;
425 struct thread *td;
426 struct proc *p;
427 struct lwp *lp;
428 varsym_t sym;
429 int namelen = strlen(name);
430 int datalen;
431 int error;
433 td = curthread;
434 lp = td->td_lwp;
435 p = lp ? lp->lwp_proc : NULL;
437 switch(level) {
438 case VARSYM_PROC:
439 if (p)
440 vss = &p->p_varsymset;
441 break;
442 case VARSYM_USER:
443 if (lp)
444 vss = &td->td_ucred->cr_uidinfo->ui_varsymset;
445 break;
446 case VARSYM_SYS:
447 vss = &varsymset_sys;
448 break;
449 case VARSYM_PRISON:
450 if (lp && td->td_ucred->cr_prison)
451 vss = &td->td_ucred->cr_prison->pr_varsymset;
452 break;
454 if (vss == NULL) {
455 return EINVAL;
457 lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
458 if (data && vss->vx_setsize >= MAXVARSYM_SET) {
459 error = E2BIG;
460 } else if (data) {
461 datalen = strlen(data);
462 ve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
463 sym = kmalloc(sizeof(struct varsym) + namelen + datalen + 2, M_VARSYM, M_WAITOK);
464 ve->ve_sym = sym;
465 sym->vs_refs = 1;
466 sym->vs_namelen = namelen;
467 sym->vs_name = (char *)(sym + 1);
468 sym->vs_data = sym->vs_name + namelen + 1;
469 strcpy(sym->vs_name, name);
470 strcpy(sym->vs_data, data);
471 TAILQ_INSERT_TAIL(&vss->vx_queue, ve, ve_entry);
472 vss->vx_setsize += sizeof(struct varsyment) + sizeof(struct varsym) + namelen + datalen + 8;
473 error = 0;
474 } else {
475 if ((ve = varsymlookup(vss, name, namelen)) != NULL) {
476 TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
477 vss->vx_setsize -= sizeof(struct varsyment) + sizeof(struct varsym) + namelen + strlen(ve->ve_sym->vs_data) + 8;
478 varsymdrop(ve->ve_sym);
479 kfree(ve, M_VARSYM);
480 error = 0;
481 } else {
482 error = ENOENT;
485 lockmgr(&vss->vx_lock, LK_RELEASE);
486 return(error);
489 void
490 varsymdrop(varsym_t sym)
492 KKASSERT(sym->vs_refs > 0);
493 if (atomic_fetchadd_int(&sym->vs_refs, -1) == 1) {
494 kfree(sym, M_VARSYM);
499 * Insert a duplicate of ve in vss. Does not do any locking,
500 * so it is the callers responsibility to make sure nobody
501 * else can mess with the TAILQ in vss at the same time.
503 static void
504 varsymdup(struct varsymset *vss, struct varsyment *ve)
506 struct varsyment *nve;
508 nve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
509 nve->ve_sym = ve->ve_sym;
510 ++nve->ve_sym->vs_refs; /* can't be reached, no need for atomic add */
512 * We're only called through varsymset_init() so vss is not yet reachable,
513 * no need to lock.
515 TAILQ_INSERT_TAIL(&vss->vx_queue, nve, ve_entry);
518 void
519 varsymset_init(struct varsymset *vss, struct varsymset *copy)
521 struct varsyment *ve;
523 TAILQ_INIT(&vss->vx_queue);
524 lockinit(&vss->vx_lock, "vx", 0, 0);
525 if (copy) {
526 TAILQ_FOREACH(ve, &copy->vx_queue, ve_entry) {
527 varsymdup(vss, ve);
529 vss->vx_setsize = copy->vx_setsize;
533 void
534 varsymset_clean(struct varsymset *vss)
536 struct varsyment *ve;
538 lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
539 while ((ve = TAILQ_FIRST(&vss->vx_queue)) != NULL) {
540 TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
541 varsymdrop(ve->ve_sym);
542 kfree(ve, M_VARSYM);
544 vss->vx_setsize = 0;
545 lockmgr(&vss->vx_lock, LK_RELEASE);