sys/dev/disk/dm: Remove dm_table_init_target() call with NULL
[dragonfly.git] / sys / dev / disk / dm / dm_table.c
blob44c400c7294de3c0f3d7a5a2ef9fb28a633b3665
1 /* $NetBSD: dm_table.c,v 1.5 2010/01/04 00:19:08 haad Exp $ */
3 /*
4 * Copyright (c) 2010-2011 Alex Hornung <alex@alexhornung.com>
5 * Copyright (c) 2008 The NetBSD Foundation, Inc.
6 * All rights reserved.
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Adam Hamsik.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/malloc.h>
34 #include <cpu/atomic.h>
35 #include <dev/disk/dm/dm.h>
38 * There are two types of users of this interface:
40 * a) Readers such as
41 * dmstrategy, dmgetdisklabel, dmsize, dm_dev_status_ioctl,
42 * dm_table_deps_ioctl, dm_table_status_ioctl, dm_table_reload_ioctl
44 * b) Writers such as
45 * dm_dev_remove_ioctl, dm_dev_resume_ioctl, dm_table_clear_ioctl
47 * Writers can work with table_head only when there are no readers. We
48 * simply use shared/exclusive locking to ensure this.
52 * Function to increment table user reference counter. Return id
53 * of table_id table.
54 * DM_TABLE_ACTIVE will return active table id.
55 * DM_TABLE_INACTIVE will return inactive table id.
57 static int
58 dm_table_busy(dm_table_head_t *head, uint8_t table_id)
60 uint8_t id;
62 id = 0;
64 lockmgr(&head->table_mtx, LK_SHARED);
66 if (table_id == DM_TABLE_ACTIVE)
67 id = head->cur_active_table;
68 else
69 id = 1 - head->cur_active_table;
71 atomic_add_int(&head->io_cnt, 1);
73 return id;
77 * Function release table lock and eventually wakeup all waiters.
79 static void
80 dm_table_unbusy(dm_table_head_t *head)
82 KKASSERT(head->io_cnt != 0);
84 atomic_subtract_int(&head->io_cnt, 1);
86 lockmgr(&head->table_mtx, LK_RELEASE);
90 * Return current active table to caller, increment io_cnt reference counter.
92 dm_table_t *
93 dm_table_get_entry(dm_table_head_t *head, uint8_t table_id)
95 uint8_t id;
97 id = dm_table_busy(head, table_id);
99 return &head->tables[id];
103 * Decrement io reference counter and release shared lock.
105 void
106 dm_table_release(dm_table_head_t *head, uint8_t table_id)
108 dm_table_unbusy(head);
112 * Switch table from inactive to active mode. Have to wait until io_cnt is 0.
114 void
115 dm_table_switch_tables(dm_table_head_t *head)
117 lockmgr(&head->table_mtx, LK_EXCLUSIVE);
119 head->cur_active_table = 1 - head->cur_active_table;
121 lockmgr(&head->table_mtx, LK_RELEASE);
125 * Destroy all table data. This function can run when there are no
126 * readers on table lists.
129 dm_table_destroy(dm_table_head_t *head, uint8_t table_id)
131 dm_table_t *tbl;
132 dm_table_entry_t *table_en;
133 uint8_t id;
135 lockmgr(&head->table_mtx, LK_EXCLUSIVE);
137 dmdebug("table_id=%d io_cnt=%d\n", table_id, head->io_cnt);
139 if (table_id == DM_TABLE_ACTIVE)
140 id = head->cur_active_table;
141 else
142 id = 1 - head->cur_active_table;
144 tbl = &head->tables[id];
146 while ((table_en = TAILQ_FIRST(tbl)) != NULL) {
147 TAILQ_REMOVE(tbl, table_en, next);
149 if (table_en->target->destroy(table_en) == 0)
150 table_en->target_config = NULL;
152 dm_table_free_deps(table_en);
154 /* decrement the refcount for the target */
155 dm_target_unbusy(table_en->target);
157 kfree(table_en, M_DM);
159 KKASSERT(TAILQ_EMPTY(tbl));
161 lockmgr(&head->table_mtx, LK_RELEASE);
163 return 0;
167 * Return length of active or inactive table in device.
169 static uint64_t
170 _dm_table_size(dm_table_head_t *head, int table)
172 dm_table_t *tbl;
173 dm_table_entry_t *table_en;
174 uint64_t length;
176 length = 0;
178 /* Select active table */
179 tbl = dm_table_get_entry(head, table);
182 * Find out what tables I want to select.
183 * if length => rawblkno then we should used that table.
185 TAILQ_FOREACH(table_en, tbl, next) {
186 length += table_en->length;
189 dm_table_unbusy(head);
191 return length;
194 uint64_t
195 dm_table_size(dm_table_head_t *head)
197 return _dm_table_size(head, DM_TABLE_ACTIVE);
200 uint64_t
201 dm_inactive_table_size(dm_table_head_t *head)
203 return _dm_table_size(head, DM_TABLE_INACTIVE);
207 * Return > 0 if table is at least one table entry (returns number of entries)
208 * and return 0 if there is not. Target count returned from this function
209 * doesn't need to be true when userspace user receive it (after return
210 * there can be dm_dev_resume_ioctl), therefore this is only informative.
213 dm_table_get_target_count(dm_table_head_t *head, uint8_t table_id)
215 dm_table_entry_t *table_en;
216 dm_table_t *tbl;
217 uint32_t target_count;
219 target_count = 0;
221 tbl = dm_table_get_entry(head, table_id);
223 TAILQ_FOREACH(table_en, tbl, next)
224 target_count++;
226 dm_table_unbusy(head);
228 return target_count;
232 * Initialize dm_table_head_t structures, I'm trying to keep this structure as
233 * opaque as possible.
235 void
236 dm_table_head_init(dm_table_head_t *head)
238 head->cur_active_table = 0;
239 head->io_cnt = 0;
241 /* Initialize tables. */
242 TAILQ_INIT(&head->tables[0]);
243 TAILQ_INIT(&head->tables[1]);
245 lockinit(&head->table_mtx, "dmtbl", 0, LK_CANRECURSE);
249 * Destroy all variables in table_head
251 void
252 dm_table_head_destroy(dm_table_head_t *head)
254 KKASSERT(lockcount(&head->table_mtx) == 0);
256 /* tables don't exist when I call this routine, therefore it
257 * doesn't make sense to have io_cnt != 0 */
258 KKASSERT(head->io_cnt == 0);
260 lockuninit(&head->table_mtx);
263 void
264 dm_table_init_target(dm_table_entry_t *table_en, void *cfg)
266 table_en->target_config = cfg;
270 dm_table_add_deps(dm_table_entry_t *table_en, dm_pdev_t *pdev)
272 dm_table_head_t *head;
273 dm_mapping_t *map;
275 KKASSERT(pdev);
277 head = &table_en->dev->table_head;
278 lockmgr(&head->table_mtx, LK_SHARED);
280 TAILQ_FOREACH(map, &table_en->pdev_maps, next) {
281 if (map->data.pdev->udev == pdev->udev) {
282 lockmgr(&head->table_mtx, LK_RELEASE);
283 return -1;
287 map = kmalloc(sizeof(*map), M_DM, M_WAITOK | M_ZERO);
288 map->data.pdev = pdev;
289 TAILQ_INSERT_TAIL(&table_en->pdev_maps, map, next);
291 lockmgr(&head->table_mtx, LK_RELEASE);
293 return 0;
296 void
297 dm_table_free_deps(dm_table_entry_t *table_en)
299 dm_table_head_t *head;
300 dm_mapping_t *map;
302 head = &table_en->dev->table_head;
303 lockmgr(&head->table_mtx, LK_SHARED);
305 while ((map = TAILQ_FIRST(&table_en->pdev_maps)) != NULL) {
306 TAILQ_REMOVE(&table_en->pdev_maps, map, next);
307 kfree(map, M_DM);
309 KKASSERT(TAILQ_EMPTY(&table_en->pdev_maps));
311 lockmgr(&head->table_mtx, LK_RELEASE);