kernel/dm: Include <sys/proc.h> for proc0.
[dragonfly.git] / sys / dev / disk / dm / dm_pdev.c
blob20456da4e70d4ddc9604a4a06dbb3efaf9192f00
1 /* $NetBSD: dm_pdev.c,v 1.6 2010/01/04 00:19:08 haad Exp $ */
3 /*
4 * Copyright (c) 2010-2011 Alex Hornung <alex@alexhornung.com>
5 * Copyright (c) 2008 The NetBSD Foundation, Inc.
6 * All rights reserved.
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Adam Hamsik.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/disk.h>
34 #include <sys/fcntl.h>
35 #include <sys/malloc.h>
36 #include <sys/namei.h>
37 #include <sys/nlookup.h>
38 #include <sys/proc.h>
40 #include <dev/disk/dm/dm.h>
42 static TAILQ_HEAD(, dm_pdev) dm_pdev_list;
44 static struct lock dm_pdev_mutex;
46 static dm_pdev_t *dm_pdev_alloc(const char *);
47 static int dm_pdev_free(dm_pdev_t *);
48 static dm_pdev_t *dm_pdev_lookup_name(const char *);
51 * Find used pdev with name == dm_pdev_name.
52 * needs to be called with the dm_pdev_mutex held.
54 static dm_pdev_t *
55 dm_pdev_lookup_name(const char *dm_pdev_name)
57 dm_pdev_t *dmp;
59 KKASSERT(dm_pdev_name != NULL);
61 TAILQ_FOREACH(dmp, &dm_pdev_list, next_pdev) {
62 if (strcmp(dm_pdev_name, dmp->name) == 0)
63 return dmp;
66 return NULL;
69 static int
70 dm_dk_lookup(const char *dev_name, struct vnode **vpp)
72 struct nlookupdata nd;
73 int error;
75 error = nlookup_init(&nd, dev_name, UIO_SYSSPACE, NLC_FOLLOW);
76 if (error)
77 return error;
79 error = vn_open(&nd, NULL, FREAD|FWRITE, 0);
80 if (error) {
81 nlookup_done(&nd);
82 return error;
85 *vpp = nd.nl_open_vp;
86 nd.nl_open_vp = NULL;
87 nlookup_done(&nd);
89 return 0;
93 * Since dm can have arbitrary stacking on any number of disks and any dm
94 * volume is at least stacked onto another disk, we need to adjust the
95 * dumping offset (which is a raw offset from the beginning of the lowest
96 * physical disk) taking into account the offset of the underlying device
97 * which in turn takes into account the offset below it, etc.
99 * This function adjusts the dumping offset that is passed to the next
100 * dev_ddump() so it is correct for that underlying device.
102 off_t
103 dm_pdev_correct_dump_offset(dm_pdev_t *pdev, off_t offset)
105 off_t noffset;
107 noffset = pdev->pdev_pinfo.reserved_blocks +
108 pdev->pdev_pinfo.media_offset / pdev->pdev_pinfo.media_blksize;
109 noffset *= DEV_BSIZE;
110 noffset += offset;
112 return noffset;
116 * Create entry for device with name dev_name and open vnode for it.
117 * If entry already exists in global TAILQ I will only increment
118 * reference counter.
120 dm_pdev_t *
121 dm_pdev_insert(const char *dev_name)
123 dm_pdev_t *dmp;
124 struct vattr va;
125 int error;
127 KKASSERT(dev_name != NULL);
129 lockmgr(&dm_pdev_mutex, LK_EXCLUSIVE);
130 dmp = dm_pdev_lookup_name(dev_name);
132 if (dmp != NULL) {
133 dmp->ref_cnt++;
134 dmdebug("pdev %s already in tree\n", dev_name);
135 lockmgr(&dm_pdev_mutex, LK_RELEASE);
136 return dmp;
139 if ((dmp = dm_pdev_alloc(dev_name)) == NULL) {
140 lockmgr(&dm_pdev_mutex, LK_RELEASE);
141 return NULL;
144 error = dm_dk_lookup(dev_name, &dmp->pdev_vnode);
145 if (error) {
146 dmdebug("Lookup on %s failed with error %d!\n",
147 dev_name, error);
148 dm_pdev_free(dmp);
149 lockmgr(&dm_pdev_mutex, LK_RELEASE);
150 return NULL;
152 dmp->ref_cnt = 1;
154 if (dm_pdev_get_vattr(dmp, &va) == -1) {
155 dmdebug("getattr on %s failed\n", dev_name);
156 dm_pdev_free(dmp);
157 lockmgr(&dm_pdev_mutex, LK_RELEASE);
158 return NULL;
160 ksnprintf(dmp->udev_name, sizeof(dmp->udev_name),
161 "%d:%d", va.va_rmajor, va.va_rminor);
162 dmp->udev = dm_pdev_get_udev(dmp);
165 * Get us the partinfo from the underlying device, it's needed for
166 * dumps.
168 bzero(&dmp->pdev_pinfo, sizeof(dmp->pdev_pinfo));
169 error = dev_dioctl(dmp->pdev_vnode->v_rdev, DIOCGPART,
170 (void *)&dmp->pdev_pinfo, 0, proc0.p_ucred, NULL, NULL);
171 if (!error) {
172 struct partinfo *dpart = &dmp->pdev_pinfo;
173 dmdebug("DIOCGPART offset=%ju size=%ju blocks=%ju blksize=%d\n",
174 dpart->media_offset,
175 dpart->media_size,
176 dpart->media_blocks,
177 dpart->media_blksize);
178 } else {
179 kprintf("dmp_pdev_insert DIOCGPART failed %d\n", error);
182 TAILQ_INSERT_TAIL(&dm_pdev_list, dmp, next_pdev);
183 lockmgr(&dm_pdev_mutex, LK_RELEASE);
185 dmdebug("pdev %s %s 0x%016jx\n",
186 dmp->name, dmp->udev_name, (uintmax_t)dmp->udev);
188 return dmp;
192 * Allocat new pdev structure if is not already present and
193 * set name.
195 static dm_pdev_t *
196 dm_pdev_alloc(const char *name)
198 dm_pdev_t *dmp;
200 dmp = kmalloc(sizeof(*dmp), M_DM, M_WAITOK | M_ZERO);
201 if (dmp == NULL)
202 return NULL;
204 if (name)
205 strlcpy(dmp->name, name, DM_MAX_DEV_NAME);
207 return dmp;
211 * Destroy allocated dm_pdev.
213 static int
214 dm_pdev_free(dm_pdev_t *dmp)
216 int err;
218 KKASSERT(dmp != NULL);
220 if (dmp->pdev_vnode != NULL) {
221 err = vn_close(dmp->pdev_vnode, FREAD | FWRITE, NULL);
222 if (err != 0) {
223 kfree(dmp, M_DM);
224 return err;
227 kfree(dmp, M_DM);
229 return 0;
233 * This funcion is called from targets' destroy() handler.
234 * When I'm removing device from list, I have to decrement
235 * reference counter. If reference counter is 0 I will remove
236 * dmp from global list and from device list to. And I will CLOSE
237 * dmp vnode too.
240 * Decrement pdev reference counter if 0 remove it.
243 dm_pdev_decr(dm_pdev_t *dmp)
245 KKASSERT(dmp != NULL);
247 * If this was last reference remove dmp from
248 * global list also.
250 lockmgr(&dm_pdev_mutex, LK_EXCLUSIVE);
252 if (--dmp->ref_cnt == 0) {
253 TAILQ_REMOVE(&dm_pdev_list, dmp, next_pdev);
254 lockmgr(&dm_pdev_mutex, LK_RELEASE);
255 dm_pdev_free(dmp);
256 return 0;
258 lockmgr(&dm_pdev_mutex, LK_RELEASE);
259 return 0;
262 uint64_t
263 dm_pdev_get_udev(dm_pdev_t *dmp)
265 struct vattr va;
266 int ret;
268 if (dmp->pdev_vnode == NULL)
269 return (uint64_t)-1;
271 ret = dm_pdev_get_vattr(dmp, &va);
272 if (ret)
273 return (uint64_t)-1;
275 ret = makeudev(va.va_rmajor, va.va_rminor);
277 return ret;
281 dm_pdev_get_vattr(dm_pdev_t *dmp, struct vattr *vap)
283 int ret;
285 if (dmp->pdev_vnode == NULL)
286 return -1;
288 KKASSERT(vap);
289 ret = VOP_GETATTR(dmp->pdev_vnode, vap);
290 if (ret)
291 return -1;
293 return 0;
297 * Initialize pdev subsystem.
300 dm_pdev_init(void)
302 TAILQ_INIT(&dm_pdev_list); /* initialize global pdev list */
303 lockinit(&dm_pdev_mutex, "dmpdev", 0, LK_CANRECURSE);
305 return 0;
309 * Destroy all existing pdev's in device-mapper.
312 dm_pdev_uninit(void)
314 dm_pdev_t *dmp;
316 lockmgr(&dm_pdev_mutex, LK_EXCLUSIVE);
318 while ((dmp = TAILQ_FIRST(&dm_pdev_list)) != NULL) {
319 TAILQ_REMOVE(&dm_pdev_list, dmp, next_pdev);
320 dm_pdev_free(dmp);
322 KKASSERT(TAILQ_EMPTY(&dm_pdev_list));
324 lockmgr(&dm_pdev_mutex, LK_RELEASE);
326 lockuninit(&dm_pdev_mutex);
327 return 0;