larn(6): Fix two "use of index before limits check" issues.
[dragonfly.git] / sys / kern / vfs_helper.c
blob61046915fcd2e97873bd50afb53b8681e97bb013
1 /*
2 * (The copyright below applies to ufs_access())
4 * Copyright (c) 1982, 1986, 1989, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * @(#)ufs_vnops.c 8.27 (Berkeley) 5/27/95
37 * $DragonFly: src/sys/kern/vfs_helper.c,v 1.5 2008/05/25 18:34:46 dillon Exp $
40 #include "opt_quota.h"
41 #include "opt_suiddir.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/conf.h>
46 #include <sys/kernel.h>
47 #include <sys/fcntl.h>
48 #include <sys/stat.h>
49 #include <sys/mount.h>
50 #include <sys/unistd.h>
51 #include <sys/vnode.h>
52 #include <sys/file.h> /* XXX */
53 #include <sys/proc.h>
54 #include <sys/priv.h>
55 #include <sys/jail.h>
56 #include <sys/sysctl.h>
57 #include <sys/sfbuf.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page2.h>
62 #ifdef LWBUF_IS_OPTIMAL
64 static int vm_read_shortcut_enable = 1;
65 static long vm_read_shortcut_count;
66 static long vm_read_shortcut_failed;
67 SYSCTL_INT(_vm, OID_AUTO, read_shortcut_enable, CTLFLAG_RW,
68 &vm_read_shortcut_enable, 0, "Direct vm_object vop_read shortcut");
69 SYSCTL_LONG(_vm, OID_AUTO, read_shortcut_count, CTLFLAG_RW,
70 &vm_read_shortcut_count, 0, "Statistics");
71 SYSCTL_LONG(_vm, OID_AUTO, read_shortcut_failed, CTLFLAG_RW,
72 &vm_read_shortcut_failed, 0, "Statistics");
74 #endif
77 * vop_helper_access()
79 * Provide standard UNIX semanics for VOP_ACCESS, but without the quota
80 * code. This procedure was basically pulled out of UFS.
82 int
83 vop_helper_access(struct vop_access_args *ap, uid_t ino_uid, gid_t ino_gid,
84 mode_t ino_mode, u_int32_t ino_flags)
86 struct vnode *vp = ap->a_vp;
87 struct ucred *cred = ap->a_cred;
88 mode_t mask, mode = ap->a_mode;
89 gid_t *gp;
90 int i;
91 uid_t proc_uid;
92 gid_t proc_gid;
94 if (ap->a_flags & AT_EACCESS) {
95 proc_uid = cred->cr_uid;
96 proc_gid = cred->cr_gid;
97 } else {
98 proc_uid = cred->cr_ruid;
99 proc_gid = cred->cr_rgid;
103 * Disallow write attempts on read-only filesystems;
104 * unless the file is a socket, fifo, or a block or
105 * character device resident on the filesystem.
107 if (mode & VWRITE) {
108 switch (vp->v_type) {
109 case VDIR:
110 case VLNK:
111 case VREG:
112 case VDATABASE:
113 if (vp->v_mount->mnt_flag & MNT_RDONLY)
114 return (EROFS);
115 break;
116 default:
117 break;
121 /* If immutable bit set, nobody gets to write it. */
122 if ((mode & VWRITE) && (ino_flags & IMMUTABLE))
123 return (EPERM);
125 /* Otherwise, user id 0 always gets access. */
126 if (proc_uid == 0)
127 return (0);
129 mask = 0;
131 /* Otherwise, check the owner. */
132 if (proc_uid == ino_uid) {
133 if (mode & VEXEC)
134 mask |= S_IXUSR;
135 if (mode & VREAD)
136 mask |= S_IRUSR;
137 if (mode & VWRITE)
138 mask |= S_IWUSR;
139 return ((ino_mode & mask) == mask ? 0 : EACCES);
143 * Otherwise, check the groups.
144 * We must special-case the primary group to, if needed, check against
145 * the real gid and not the effective one.
147 if (proc_gid == ino_gid) {
148 if (mode & VEXEC)
149 mask |= S_IXGRP;
150 if (mode & VREAD)
151 mask |= S_IRGRP;
152 if (mode & VWRITE)
153 mask |= S_IWGRP;
154 return ((ino_mode & mask) == mask ? 0 : EACCES);
156 for (i = 1, gp = &cred->cr_groups[1]; i < cred->cr_ngroups; i++, gp++)
157 if (ino_gid == *gp) {
158 if (mode & VEXEC)
159 mask |= S_IXGRP;
160 if (mode & VREAD)
161 mask |= S_IRGRP;
162 if (mode & VWRITE)
163 mask |= S_IWGRP;
164 return ((ino_mode & mask) == mask ? 0 : EACCES);
167 /* Otherwise, check everyone else. */
168 if (mode & VEXEC)
169 mask |= S_IXOTH;
170 if (mode & VREAD)
171 mask |= S_IROTH;
172 if (mode & VWRITE)
173 mask |= S_IWOTH;
174 return ((ino_mode & mask) == mask ? 0 : EACCES);
178 vop_helper_setattr_flags(u_int32_t *ino_flags, u_int32_t vaflags,
179 uid_t uid, struct ucred *cred)
181 int error;
184 * If uid doesn't match only a privileged user can change the flags
186 if (cred->cr_uid != uid &&
187 (error = priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0))) {
188 return(error);
190 if (cred->cr_uid == 0 &&
191 (!jailed(cred)|| jail_chflags_allowed)) {
192 if ((*ino_flags & (SF_NOUNLINK|SF_IMMUTABLE|SF_APPEND)) &&
193 securelevel > 0)
194 return (EPERM);
195 *ino_flags = vaflags;
196 } else {
197 if (*ino_flags & (SF_NOUNLINK|SF_IMMUTABLE|SF_APPEND) ||
198 (vaflags & UF_SETTABLE) != vaflags)
199 return (EPERM);
200 *ino_flags &= SF_SETTABLE;
201 *ino_flags |= vaflags & UF_SETTABLE;
203 return(0);
207 * This helper function may be used by VFSs to implement UNIX initial
208 * ownership semantics when creating new objects inside directories.
210 uid_t
211 vop_helper_create_uid(struct mount *mp, mode_t dmode, uid_t duid,
212 struct ucred *cred, mode_t *modep)
214 #ifdef SUIDDIR
215 if ((mp->mnt_flag & MNT_SUIDDIR) && (dmode & S_ISUID) &&
216 duid != cred->cr_uid && duid) {
217 *modep &= ~07111;
218 return(duid);
220 #endif
221 return(cred->cr_uid);
225 * This helper may be used by VFSs to implement unix chmod semantics.
228 vop_helper_chmod(struct vnode *vp, mode_t new_mode, struct ucred *cred,
229 uid_t cur_uid, gid_t cur_gid, mode_t *cur_modep)
231 int error;
233 if (cred->cr_uid != cur_uid) {
234 error = priv_check_cred(cred, PRIV_VFS_CHMOD, 0);
235 if (error)
236 return (error);
238 if (cred->cr_uid) {
239 if (vp->v_type != VDIR && (*cur_modep & S_ISTXT))
240 return (EFTYPE);
241 if (!groupmember(cur_gid, cred) && (*cur_modep & S_ISGID))
242 return (EPERM);
244 *cur_modep &= ~ALLPERMS;
245 *cur_modep |= new_mode & ALLPERMS;
246 return(0);
250 * This helper may be used by VFSs to implement unix chown semantics.
253 vop_helper_chown(struct vnode *vp, uid_t new_uid, gid_t new_gid,
254 struct ucred *cred,
255 uid_t *cur_uidp, gid_t *cur_gidp, mode_t *cur_modep)
257 gid_t ogid;
258 uid_t ouid;
259 int error;
261 if (new_uid == (uid_t)VNOVAL)
262 new_uid = *cur_uidp;
263 if (new_gid == (gid_t)VNOVAL)
264 new_gid = *cur_gidp;
267 * If we don't own the file, are trying to change the owner
268 * of the file, or are not a member of the target group,
269 * the caller must be privileged or the call fails.
271 if ((cred->cr_uid != *cur_uidp || new_uid != *cur_uidp ||
272 (new_gid != *cur_gidp && !(cred->cr_gid == new_gid ||
273 groupmember(new_gid, cred)))) &&
274 (error = priv_check_cred(cred, PRIV_VFS_CHOWN, 0))) {
275 return (error);
277 ogid = *cur_gidp;
278 ouid = *cur_uidp;
279 /* XXX QUOTA CODE */
280 *cur_uidp = new_uid;
281 *cur_gidp = new_gid;
282 /* XXX QUOTA CODE */
285 * DragonFly clears both SUID and SGID if either the owner or
286 * group is changed and root isn't doing it. If root is doing
287 * it we do not clear SUID/SGID.
289 if (cred->cr_uid != 0 && (ouid != new_uid || ogid != new_gid))
290 *cur_modep &= ~(S_ISUID | S_ISGID);
291 return(0);
294 #ifdef LWBUF_IS_OPTIMAL
297 * A VFS can call this function to try to dispose of a read request
298 * directly from the VM system, pretty much bypassing almost all VFS
299 * overhead except for atime updates.
301 * If 0 is returned some or all of the uio was handled. The caller must
302 * check the uio and handle the remainder.
304 * The caller must fail on a non-zero error.
307 vop_helper_read_shortcut(struct vop_read_args *ap)
309 struct vnode *vp;
310 struct uio *uio;
311 struct lwbuf *lwb;
312 struct lwbuf lwb_cache;
313 vm_object_t obj;
314 vm_page_t m;
315 int offset;
316 int n;
317 int error;
319 vp = ap->a_vp;
320 uio = ap->a_uio;
323 * We can't short-cut if there is no VM object or this is a special
324 * UIO_NOCOPY read (typically from VOP_STRATEGY()). We also can't
325 * do this if we cannot extract the filesize from the vnode.
327 if (vm_read_shortcut_enable == 0)
328 return(0);
329 if (vp->v_object == NULL || uio->uio_segflg == UIO_NOCOPY)
330 return(0);
331 if (vp->v_filesize == NOOFFSET)
332 return(0);
333 if (uio->uio_resid == 0)
334 return(0);
337 * Iterate the uio on a page-by-page basis
339 * XXX can we leave the object held shared during the uiomove()?
341 ++vm_read_shortcut_count;
342 obj = vp->v_object;
343 vm_object_hold_shared(obj);
345 error = 0;
346 while (uio->uio_resid && error == 0) {
347 offset = (int)uio->uio_offset & PAGE_MASK;
348 n = PAGE_SIZE - offset;
349 if (n > uio->uio_resid)
350 n = uio->uio_resid;
351 if (vp->v_filesize < uio->uio_offset)
352 break;
353 if (uio->uio_offset + n > vp->v_filesize)
354 n = vp->v_filesize - uio->uio_offset;
355 if (n == 0)
356 break; /* hit EOF */
358 m = vm_page_lookup_busy_try(obj, OFF_TO_IDX(uio->uio_offset),
359 FALSE, &error);
360 if (error || m == NULL) {
361 ++vm_read_shortcut_failed;
362 error = 0;
363 break;
365 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
366 ++vm_read_shortcut_failed;
367 vm_page_wakeup(m);
368 break;
370 lwb = lwbuf_alloc(m, &lwb_cache);
373 * Use a no-fault uiomove() to avoid deadlocking against
374 * our VM object (which could livelock on the same object
375 * due to shared-vs-exclusive), or deadlocking against
376 * our busied page. Returns EFAULT on any fault which
377 * winds up diving a vnode.
379 error = uiomove_nofault((char *)lwbuf_kva(lwb) + offset,
380 n, uio);
382 vm_page_flag_set(m, PG_REFERENCED);
383 lwbuf_free(lwb);
384 vm_page_wakeup(m);
386 vm_object_drop(obj);
389 * Ignore EFAULT since we used uiomove_nofault(), causes caller
390 * to fall-back to normal code for this case.
392 if (error == EFAULT)
393 error = 0;
395 return (error);
398 #else
401 * If lwbuf's aren't optimal then it's best to just use the buffer
402 * cache.
405 vop_helper_read_shortcut(struct vop_read_args *ap)
407 return(0);
410 #endif