kernel - Make numerous proc accesses use p->p_token instead of proc_token.
[dragonfly.git] / sys / vfs / procfs / procfs_subr.c
blob44847a026c2dc4323bde92fc51a10e50c493036b
1 /*
2 * Copyright (c) 1993 Jan-Simon Pendry
3 * Copyright (c) 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * Jan-Simon Pendry.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * @(#)procfs_subr.c 8.6 (Berkeley) 5/14/95
39 * $FreeBSD: src/sys/miscfs/procfs/procfs_subr.c,v 1.26.2.3 2002/02/18 21:28:04 des Exp $
40 * $DragonFly: src/sys/vfs/procfs/procfs_subr.c,v 1.18 2007/08/25 23:27:02 corecode Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysctl.h>
46 #include <sys/proc.h>
47 #include <sys/mount.h>
48 #include <sys/vnode.h>
49 #include <sys/malloc.h>
51 #include <vfs/procfs/procfs.h>
53 #define PFS_HSIZE 256
54 #define PFS_HMASK (PFS_HSIZE - 1)
56 static struct pfsnode *pfshead[PFS_HSIZE];
57 static int pfsvplock;
59 #define PFSHASH(pid) &pfshead[(pid) & PFS_HMASK]
62 * Allocate a pfsnode/vnode pair. If no error occurs the returned vnode
63 * will be referenced and exclusively locked.
65 * The pid, pfs_type, and mount point uniquely identify a pfsnode.
66 * The mount point is needed because someone might mount this filesystem
67 * twice.
69 * All pfsnodes are maintained on a singly-linked list. new nodes are
70 * only allocated when they cannot be found on this list. entries on
71 * the list are removed when the vfs reclaim entry is called.
73 * A single lock is kept for the entire list. this is needed because the
74 * getnewvnode() function can block waiting for a vnode to become free,
75 * in which case there may be more than one process trying to get the same
76 * vnode. this lock is only taken if we are going to call getnewvnode,
77 * since the kernel itself is single-threaded.
79 * If an entry is found on the list, then call vget() to take a reference
80 * and obtain the lock. This will properly re-reference the vnode if it
81 * had gotten onto the free list.
83 int
84 procfs_allocvp(struct mount *mp, struct vnode **vpp, long pid, pfstype pfs_type)
86 struct pfsnode *pfs;
87 struct vnode *vp;
88 struct pfsnode **pp;
89 int error;
91 pp = PFSHASH(pid);
92 loop:
93 for (pfs = *pp; pfs; pfs = pfs->pfs_next) {
94 if (pfs->pfs_pid == pid && pfs->pfs_type == pfs_type &&
95 PFSTOV(pfs)->v_mount == mp) {
96 vp = PFSTOV(pfs);
97 if (vget(vp, LK_EXCLUSIVE))
98 goto loop;
101 * Make sure the vnode is still in the cache after
102 * getting the interlock to avoid racing a free.
104 for (pfs = *pp; pfs; pfs = pfs->pfs_next) {
105 if (PFSTOV(pfs) == vp &&
106 pfs->pfs_pid == pid &&
107 pfs->pfs_type == pfs_type &&
108 PFSTOV(pfs)->v_mount == mp) {
109 break;
112 if (pfs == NULL || PFSTOV(pfs) != vp) {
113 vput(vp);
114 goto loop;
117 *vpp = vp;
118 return (0);
123 * otherwise lock the vp list while we call getnewvnode
124 * since that can block.
126 if (pfsvplock & PROCFS_LOCKED) {
127 pfsvplock |= PROCFS_WANT;
128 (void) tsleep((caddr_t) &pfsvplock, 0, "pfsavp", 0);
129 goto loop;
131 pfsvplock |= PROCFS_LOCKED;
134 * Do the MALLOC before the getnewvnode since doing so afterward
135 * might cause a bogus v_data pointer to get dereferenced
136 * elsewhere if MALLOC should block.
138 * XXX this may not matter anymore since getnewvnode now returns
139 * a VX locked vnode.
141 MALLOC(pfs, struct pfsnode *, sizeof(struct pfsnode), M_TEMP, M_WAITOK);
143 error = getnewvnode(VT_PROCFS, mp, vpp, 0, 0);
144 if (error) {
145 kfree(pfs, M_TEMP);
146 goto out;
148 vp = *vpp;
150 vp->v_data = pfs;
152 pfs->pfs_next = 0;
153 pfs->pfs_pid = (pid_t) pid;
154 pfs->pfs_type = pfs_type;
155 pfs->pfs_vnode = vp;
156 pfs->pfs_flags = 0;
157 pfs->pfs_lockowner = 0;
158 pfs->pfs_fileno = PROCFS_FILENO(pid, pfs_type);
160 switch (pfs_type) {
161 case Proot: /* /proc = dr-xr-xr-x */
162 pfs->pfs_mode = (VREAD|VEXEC) |
163 (VREAD|VEXEC) >> 3 |
164 (VREAD|VEXEC) >> 6;
165 vp->v_type = VDIR;
166 vp->v_flag = VROOT;
167 break;
169 case Pcurproc: /* /proc/curproc = lr--r--r-- */
170 pfs->pfs_mode = (VREAD) |
171 (VREAD >> 3) |
172 (VREAD >> 6);
173 vp->v_type = VLNK;
174 break;
176 case Pproc:
177 pfs->pfs_mode = (VREAD|VEXEC) |
178 (VREAD|VEXEC) >> 3 |
179 (VREAD|VEXEC) >> 6;
180 vp->v_type = VDIR;
181 break;
183 case Pfile:
184 pfs->pfs_mode = (VREAD|VEXEC) |
185 (VREAD|VEXEC) >> 3 |
186 (VREAD|VEXEC) >> 6;
187 vp->v_type = VLNK;
188 break;
190 case Pmem:
191 pfs->pfs_mode = (VREAD|VWRITE);
192 vp->v_type = VREG;
193 break;
195 case Pregs:
196 case Pfpregs:
197 case Pdbregs:
198 pfs->pfs_mode = (VREAD|VWRITE);
199 vp->v_type = VREG;
200 break;
202 case Pctl:
203 case Pnote:
204 case Pnotepg:
205 pfs->pfs_mode = (VWRITE);
206 vp->v_type = VREG;
207 break;
209 case Ptype:
210 case Pmap:
211 case Pstatus:
212 case Pcmdline:
213 case Prlimit:
214 pfs->pfs_mode = (VREAD) |
215 (VREAD >> 3) |
216 (VREAD >> 6);
217 vp->v_type = VREG;
218 break;
220 default:
221 panic("procfs_allocvp");
224 /* add to procfs vnode list */
225 pfs->pfs_next = *pp;
226 *pp = pfs;
228 out:
229 pfsvplock &= ~PROCFS_LOCKED;
231 if (pfsvplock & PROCFS_WANT) {
232 pfsvplock &= ~PROCFS_WANT;
233 wakeup((caddr_t) &pfsvplock);
236 return (error);
240 procfs_freevp(struct vnode *vp)
242 struct pfsnode **pfspp;
243 struct pfsnode *pfs;
245 pfs = VTOPFS(vp);
246 vp->v_data = NULL;
248 pfspp = PFSHASH(pfs->pfs_pid);
249 while (*pfspp != pfs && *pfspp)
250 pfspp = &(*pfspp)->pfs_next;
251 KKASSERT(*pfspp);
252 *pfspp = pfs->pfs_next;
253 pfs->pfs_next = NULL;
254 kfree(pfs, M_TEMP);
255 return (0);
259 procfs_rw(struct vop_read_args *ap)
261 struct vnode *vp = ap->a_vp;
262 struct uio *uio = ap->a_uio;
263 struct thread *curtd = uio->uio_td;
264 struct proc *curp;
265 struct pfsnode *pfs = VTOPFS(vp);
266 struct proc *p;
267 struct lwp *lp;
268 int rtval;
270 if (curtd == NULL)
271 return (EINVAL);
272 if ((curp = curtd->td_proc) == NULL) /* XXX */
273 return (EINVAL);
275 lwkt_gettoken(&proc_token);
277 p = pfind(pfs->pfs_pid);
278 if (p == NULL) {
279 lwkt_reltoken(&proc_token);
280 return (EINVAL);
282 if (p->p_pid == 1 && securelevel > 0 && uio->uio_rw == UIO_WRITE) {
283 lwkt_reltoken(&proc_token);
284 PRELE(p);
285 return (EACCES);
287 /* XXX lwp */
288 lp = FIRST_LWP_IN_PROC(p);
289 LWPHOLD(lp);
291 while (pfs->pfs_lockowner) {
292 tsleep(&pfs->pfs_lockowner, 0, "pfslck", 0);
294 pfs->pfs_lockowner = curproc->p_pid;
296 lwkt_gettoken(&p->p_token);
298 switch (pfs->pfs_type) {
299 case Pnote:
300 case Pnotepg:
301 rtval = procfs_donote(curp, lp, pfs, uio);
302 break;
304 case Pregs:
305 rtval = procfs_doregs(curp, lp, pfs, uio);
306 break;
308 case Pfpregs:
309 rtval = procfs_dofpregs(curp, lp, pfs, uio);
310 break;
312 case Pdbregs:
313 rtval = procfs_dodbregs(curp, lp, pfs, uio);
314 break;
316 case Pctl:
317 rtval = procfs_doctl(curp, lp, pfs, uio);
318 break;
320 case Pstatus:
321 rtval = procfs_dostatus(curp, lp, pfs, uio);
322 break;
324 case Pmap:
325 rtval = procfs_domap(curp, lp, pfs, uio);
326 break;
328 case Pmem:
329 rtval = procfs_domem(curp, lp, pfs, uio);
330 break;
332 case Ptype:
333 rtval = procfs_dotype(curp, lp, pfs, uio);
334 break;
336 case Pcmdline:
337 rtval = procfs_docmdline(curp, lp, pfs, uio);
338 break;
340 case Prlimit:
341 rtval = procfs_dorlimit(curp, lp, pfs, uio);
342 break;
344 default:
345 rtval = EOPNOTSUPP;
346 break;
348 lwkt_reltoken(&p->p_token);
349 LWPRELE(lp);
351 pfs->pfs_lockowner = 0;
352 lwkt_reltoken(&proc_token);
353 wakeup(&pfs->pfs_lockowner);
354 PRELE(p);
356 return rtval;
360 * Get a string from userland into (buf). Strip a trailing
361 * nl character (to allow easy access from the shell).
362 * The buffer should be *buflenp + 1 chars long. vfs_getuserstr
363 * will automatically add a nul char at the end.
365 * Returns 0 on success or the following errors
367 * EINVAL: file offset is non-zero.
368 * EMSGSIZE: message is longer than kernel buffer
369 * EFAULT: user i/o buffer is not addressable
372 vfs_getuserstr(struct uio *uio, char *buf, int *buflenp)
374 int xlen;
375 int error;
377 if (uio->uio_offset != 0)
378 return (EINVAL);
380 xlen = *buflenp;
382 /* must be able to read the whole string in one go */
383 if (xlen < uio->uio_resid)
384 return (EMSGSIZE);
385 xlen = uio->uio_resid;
387 if ((error = uiomove(buf, xlen, uio)) != 0)
388 return (error);
390 /* allow multiple writes without seeks */
391 uio->uio_offset = 0;
393 /* cleanup string and remove trailing newline */
394 buf[xlen] = '\0';
395 xlen = strlen(buf);
396 if (xlen > 0 && buf[xlen-1] == '\n')
397 buf[--xlen] = '\0';
398 *buflenp = xlen;
400 return (0);
403 vfs_namemap_t *
404 vfs_findname(vfs_namemap_t *nm, char *buf, int buflen)
407 for (; nm->nm_name; nm++)
408 if (bcmp(buf, nm->nm_name, buflen+1) == 0)
409 return (nm);
411 return (0);
414 void
415 procfs_exit(struct thread *td)
417 struct pfsnode *pfs;
418 struct vnode *vp;
419 pid_t pid;
421 KKASSERT(td->td_proc);
422 pid = td->td_proc->p_pid;
425 * NOTE: We can't just vgone() the vnode any more, not while
426 * it may potentially still be active. This will clean
427 * the vp and clear the mount and cause the new VOP subsystem
428 * to assert or panic when someone tries to do an operation
429 * on an open (exited) procfs descriptor.
431 * Prevent further operations on this pid by setting pfs_pid to -1.
432 * Note that a pfs_pid of 0 is used for nodes which do not track
433 * any particular pid.
435 * Use vx_get() to properly ref/lock a vp which may not have any
436 * refs and which may or may not already be reclaimed. vx_put()
437 * will then properly deactivate it and cause it to be recycled.
439 * The hash table can also get ripped out from under us when
440 * we block so take the easy way out and restart the scan.
442 again:
443 pfs = *PFSHASH(pid);
444 while (pfs) {
445 if (pfs->pfs_pid == pid) {
446 vp = PFSTOV(pfs);
447 vx_get(vp);
448 pfs->pfs_pid |= PFS_DEAD; /* does not effect hash */
449 vx_put(vp);
450 goto again;
452 pfs = pfs->pfs_next;