usr.sbin/makefs/ffs: Remove m_buf::b_is_hammer2
[dragonfly.git] / sys / kern / kern_varsym.c
blobf6011452befdc65685ec716ddd731cd0aa545dcc
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
36 * This module implements variable storage and management for variant
37 * symlinks. These variables may also be used for general purposes.
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/ucred.h>
44 #include <sys/resourcevar.h>
45 #include <sys/proc.h>
46 #include <sys/priv.h>
47 #include <sys/jail.h>
48 #include <sys/queue.h>
49 #include <sys/sysctl.h>
50 #include <sys/malloc.h>
51 #include <sys/varsym.h>
52 #include <sys/sysmsg.h>
54 MALLOC_DEFINE(M_VARSYM, "varsym", "variable sets for variant symlinks");
56 static struct varsymset varsymset_sys;
59 * Initialize the variant symlink subsystem
61 static void
62 varsym_sysinit(void *dummy)
64 varsymset_init(&varsymset_sys, NULL);
66 SYSINIT(announce, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, varsym_sysinit, NULL);
69 * varsymreplace() - called from namei
71 * Do variant symlink variable substitution
73 int
74 varsymreplace(char *cp, int linklen, int maxlen)
76 int rlen;
77 int xlen;
78 int nlen;
79 int i;
80 varsym_t var;
82 rlen = linklen;
83 while (linklen > 1) {
84 if (cp[0] == '$' && cp[1] == '{') {
85 for (i = 2; i < linklen; ++i) {
86 if (cp[i] == '}')
87 break;
89 if (i < linklen &&
90 (var = varsymfind(VARSYM_ALL_MASK, cp + 2, i - 2)) != NULL
91 ) {
92 xlen = i + 1; /* bytes to strike */
93 nlen = strlen(var->vs_data); /* bytes to add */
94 if (linklen + nlen - xlen >= maxlen) {
95 varsymdrop(var);
96 return(-1);
98 KKASSERT(linklen >= xlen);
99 if (linklen != xlen)
100 bcopy(cp + xlen, cp + nlen, linklen - xlen);
101 bcopy(var->vs_data, cp, nlen);
102 linklen += nlen - xlen; /* new relative length */
103 rlen += nlen - xlen; /* returned total length */
104 cp += nlen; /* adjust past replacement */
105 linklen -= nlen; /* adjust past replacement */
106 maxlen -= nlen; /* adjust past replacement */
107 } else {
109 * It's ok if i points to the '}', it will simply be
110 * skipped. i could also have hit linklen.
112 cp += i;
113 linklen -= i;
114 maxlen -= i;
116 } else {
117 ++cp;
118 --linklen;
119 --maxlen;
122 return(rlen);
126 * varsym_set() system call
128 * (int level, const char *name, const char *data)
131 sys_varsym_set(struct sysmsg *sysmsg, const struct varsym_set_args *uap)
133 char name[MAXVARSYM_NAME];
134 char *buf;
135 struct thread *td;
136 struct lwp *lp;
137 int error;
138 int level = uap->level;
140 td = curthread;
141 lp = td->td_lwp;
143 if ((error = copyinstr(uap->name, name, sizeof(name), NULL)) != 0)
144 goto done2;
145 buf = kmalloc(MAXVARSYM_DATA, M_TEMP, M_WAITOK);
146 if (uap->data &&
147 (error = copyinstr(uap->data, buf, MAXVARSYM_DATA, NULL)) != 0)
149 goto done1;
152 switch(level) {
153 case VARSYM_SYS:
154 if (lp != NULL && td->td_ucred->cr_prison != NULL)
155 level = VARSYM_PRISON;
156 /* fall through */
157 case VARSYM_PRISON:
158 if (lp != NULL &&
159 (error = priv_check_cred(td->td_ucred, PRIV_VARSYM_SYS, 0)) != 0)
160 break;
161 /* fall through */
162 case VARSYM_USER:
163 /* XXX check jail / implement per-jail user */
164 /* fall through */
165 case VARSYM_PROC:
166 if (uap->data) {
167 (void)varsymmake(level, name, NULL);
168 error = varsymmake(level, name, buf);
169 } else {
170 error = varsymmake(level, name, NULL);
172 break;
174 done1:
175 kfree(buf, M_TEMP);
176 done2:
177 return(error);
181 * varsym_get() system call
183 * (int mask, const char *wild, char *buf, int bufsize)
185 * MPALMOSTSAFE
188 sys_varsym_get(struct sysmsg *sysmsg, const struct varsym_get_args *uap)
190 char wild[MAXVARSYM_NAME];
191 varsym_t sym;
192 int error;
193 int dlen;
195 if ((error = copyinstr(uap->wild, wild, sizeof(wild), NULL)) != 0)
196 goto done;
197 sym = varsymfind(uap->mask, wild, strlen(wild));
198 if (sym == NULL) {
199 error = ENOENT;
200 goto done;
202 dlen = strlen(sym->vs_data);
203 if (dlen < uap->bufsize) {
204 error = copyout(sym->vs_data, uap->buf, dlen + 1);
205 } else if (uap->bufsize) {
206 copyout("", uap->buf, 1);
207 error = EOVERFLOW;
209 sysmsg->sysmsg_result = 0;
210 varsymdrop(sym);
211 done:
213 return(error);
217 * varsym_list() system call
219 * (int level, char *buf, int maxsize, int *marker)
221 * MPALMOSTSAFE
224 sys_varsym_list(struct sysmsg *sysmsg, const struct varsym_list_args *uap)
226 struct varsymset *vss;
227 struct varsyment *ve;
228 struct thread *td;
229 struct proc *p;
230 struct lwp *lp;
231 int i;
232 int error;
233 int bytes;
234 int earlyterm;
235 int marker;
238 * Get the marker from userspace.
240 if ((error = copyin(uap->marker, &marker, sizeof(marker))) != 0)
241 goto done;
244 * Figure out the varsym set.
246 td = curthread;
247 lp = td->td_lwp;
248 p = lp ? lp->lwp_proc : NULL;
250 vss = NULL;
252 switch (uap->level) {
253 case VARSYM_PROC:
254 if (p)
255 vss = &p->p_varsymset;
256 break;
257 case VARSYM_USER:
258 if (lp)
259 vss = &td->td_ucred->cr_uidinfo->ui_varsymset;
260 break;
261 case VARSYM_SYS:
262 vss = &varsymset_sys;
263 break;
264 case VARSYM_PRISON:
265 if (lp && td->td_ucred->cr_prison)
266 vss = &td->td_ucred->cr_prison->pr_varsymset;
267 break;
269 if (vss == NULL) {
270 error = EINVAL;
271 goto done;
275 * Loop through the variables and dump them to uap->buf
277 i = 0;
278 bytes = 0;
279 earlyterm = 0;
281 lockmgr(&vss->vx_lock, LK_SHARED);
282 TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
283 varsym_t sym = ve->ve_sym;
284 int namelen = strlen(sym->vs_name);
285 int datalen = strlen(sym->vs_data);
286 int totlen = namelen + datalen + 2;
289 * Skip to our index point
291 if (i < marker) {
292 ++i;
293 continue;
297 * Stop if there is insufficient space in the user buffer.
298 * If we haven't stored anything yet return EOVERFLOW.
299 * Note that the marker index (i) does not change.
301 if (bytes + totlen > uap->maxsize) {
302 if (bytes == 0)
303 error = EOVERFLOW;
304 earlyterm = 1;
305 break;
308 error = copyout(sym->vs_name, uap->buf + bytes, namelen + 1);
309 if (error == 0) {
310 bytes += namelen + 1;
311 error = copyout(sym->vs_data, uap->buf + bytes, datalen + 1);
312 if (error == 0)
313 bytes += datalen + 1;
314 else
315 bytes -= namelen + 1; /* revert if error */
317 if (error) {
318 earlyterm = 1;
319 break;
321 ++i;
323 lockmgr(&vss->vx_lock, LK_RELEASE);
326 * Save the marker back. If no error occured and earlyterm is clear
327 * the marker is set to -1 indicating that the variable list has been
328 * exhausted. If no error occured the number of bytes loaded into
329 * the buffer will be returned, otherwise the syscall code returns -1.
331 if (error == 0 && earlyterm == 0)
332 marker = -1;
333 else
334 marker = i;
335 if (error == 0)
336 error = copyout(&marker, uap->marker, sizeof(marker));
337 sysmsg->sysmsg_result = bytes;
338 done:
339 return(error);
343 * Lookup a variant symlink. XXX use a hash table.
345 static
346 struct varsyment *
347 varsymlookup(struct varsymset *vss, const char *name, int namelen)
349 struct varsyment *ve;
351 KKASSERT(lockstatus(&vss->vx_lock, curthread) != 0);
352 TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
353 varsym_t var = ve->ve_sym;
354 if (var->vs_namelen == namelen &&
355 bcmp(name, var->vs_name, namelen) == 0
357 return(ve);
360 return(NULL);
363 static
364 void
365 vsslock(struct varsymset **vss, struct varsymset *n)
367 if (*vss) {
368 lockmgr(&(*vss)->vx_lock, LK_RELEASE);
370 lockmgr(&n->vx_lock, LK_SHARED);
371 *vss = n;
374 varsym_t
375 varsymfind(int mask, const char *name, int namelen)
377 struct varsyment *ve = NULL;
378 struct varsymset *vss = NULL;
379 struct thread *td;
380 struct lwp *lp;
381 struct proc *p;
382 varsym_t sym;
384 td = curthread;
385 lp = td->td_lwp;
386 p = lp ? lp->lwp_proc : NULL;
388 if ((mask & (VARSYM_PROC_MASK|VARSYM_USER_MASK)) && lp != NULL) {
389 if (mask & VARSYM_PROC_MASK) {
390 vsslock(&vss, &p->p_varsymset);
391 ve = varsymlookup(vss, name, namelen);
393 if (ve == NULL && (mask & VARSYM_USER_MASK)) {
394 vsslock(&vss, &td->td_ucred->cr_uidinfo->ui_varsymset);
395 ve = varsymlookup(vss, name, namelen);
398 if (ve == NULL && (mask & VARSYM_SYS_MASK)) {
399 if (lp != NULL && td->td_ucred->cr_prison) {
400 vsslock(&vss, &td->td_ucred->cr_prison->pr_varsymset);
401 ve = varsymlookup(vss, name, namelen);
402 } else {
403 vsslock(&vss, &varsymset_sys);
404 ve = varsymlookup(vss, name, namelen);
407 if (ve) {
408 sym = ve->ve_sym;
409 atomic_add_int(&sym->vs_refs, 1);
410 } else {
411 sym = NULL;
413 if (vss)
414 lockmgr(&vss->vx_lock, LK_RELEASE);
415 return sym;
419 varsymmake(int level, const char *name, const char *data)
421 struct varsymset *vss = NULL;
422 struct varsyment *ve;
423 struct thread *td;
424 struct proc *p;
425 struct lwp *lp;
426 varsym_t sym;
427 int namelen = strlen(name);
428 int datalen;
429 int error;
431 td = curthread;
432 lp = td->td_lwp;
433 p = lp ? lp->lwp_proc : NULL;
435 switch(level) {
436 case VARSYM_PROC:
437 if (p)
438 vss = &p->p_varsymset;
439 break;
440 case VARSYM_USER:
441 if (lp)
442 vss = &td->td_ucred->cr_uidinfo->ui_varsymset;
443 break;
444 case VARSYM_SYS:
445 vss = &varsymset_sys;
446 break;
447 case VARSYM_PRISON:
448 if (lp && td->td_ucred->cr_prison)
449 vss = &td->td_ucred->cr_prison->pr_varsymset;
450 break;
452 if (vss == NULL) {
453 return EINVAL;
455 lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
456 if (data && vss->vx_setsize >= MAXVARSYM_SET) {
457 error = E2BIG;
458 } else if (data) {
459 datalen = strlen(data);
460 ve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
461 sym = kmalloc(sizeof(struct varsym) + namelen + datalen + 2, M_VARSYM, M_WAITOK);
462 ve->ve_sym = sym;
463 sym->vs_refs = 1;
464 sym->vs_namelen = namelen;
465 sym->vs_name = (char *)(sym + 1);
466 sym->vs_data = sym->vs_name + namelen + 1;
467 strcpy(sym->vs_name, name);
468 strcpy(sym->vs_data, data);
469 TAILQ_INSERT_TAIL(&vss->vx_queue, ve, ve_entry);
470 vss->vx_setsize += sizeof(struct varsyment) + sizeof(struct varsym) + namelen + datalen + 8;
471 error = 0;
472 } else {
473 if ((ve = varsymlookup(vss, name, namelen)) != NULL) {
474 TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
475 vss->vx_setsize -= sizeof(struct varsyment) + sizeof(struct varsym) + namelen + strlen(ve->ve_sym->vs_data) + 8;
476 varsymdrop(ve->ve_sym);
477 kfree(ve, M_VARSYM);
478 error = 0;
479 } else {
480 error = ENOENT;
483 lockmgr(&vss->vx_lock, LK_RELEASE);
484 return(error);
487 void
488 varsymdrop(varsym_t sym)
490 KKASSERT(sym->vs_refs > 0);
491 if (atomic_fetchadd_int(&sym->vs_refs, -1) == 1) {
492 kfree(sym, M_VARSYM);
497 * Insert a duplicate of ve in vss. Does not do any locking,
498 * so it is the callers responsibility to make sure nobody
499 * else can mess with the TAILQ in vss at the same time.
501 static void
502 varsymdup(struct varsymset *vss, struct varsyment *ve)
504 struct varsyment *nve;
506 nve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
507 nve->ve_sym = ve->ve_sym;
508 ++nve->ve_sym->vs_refs; /* can't be reached, no need for atomic add */
510 * We're only called through varsymset_init() so vss is not yet reachable,
511 * no need to lock.
513 TAILQ_INSERT_TAIL(&vss->vx_queue, nve, ve_entry);
516 void
517 varsymset_init(struct varsymset *vss, struct varsymset *copy)
519 struct varsyment *ve;
521 TAILQ_INIT(&vss->vx_queue);
522 lockinit(&vss->vx_lock, "vx", 0, 0);
523 if (copy) {
524 TAILQ_FOREACH(ve, &copy->vx_queue, ve_entry) {
525 varsymdup(vss, ve);
527 vss->vx_setsize = copy->vx_setsize;
531 void
532 varsymset_clean(struct varsymset *vss)
534 struct varsyment *ve;
536 lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
537 while ((ve = TAILQ_FIRST(&vss->vx_queue)) != NULL) {
538 TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
539 varsymdrop(ve->ve_sym);
540 kfree(ve, M_VARSYM);
542 vss->vx_setsize = 0;
543 lockmgr(&vss->vx_lock, LK_RELEASE);