Fix UTIME_OMIT handling
[dragonfly.git] / sys / dev / disk / dm / dm_table.c
blob2c69c42b94076ad4fd794c3724bf42e0833756a4
1 /* $NetBSD: dm_table.c,v 1.5 2010/01/04 00:19:08 haad Exp $ */
3 /*
4 * Copyright (c) 2010-2011 Alex Hornung <alex@alexhornung.com>
5 * Copyright (c) 2008 The NetBSD Foundation, Inc.
6 * All rights reserved.
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Adam Hamsik.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <cpu/atomic.h>
36 #include <dev/disk/dm/dm.h>
39 * There are two types of users of this interface:
41 * a) Readers such as
42 * dmstrategy, dmgetdisklabel, dmsize, dm_dev_status_ioctl,
43 * dm_table_deps_ioctl, dm_table_status_ioctl, dm_table_reload_ioctl
45 * b) Writers such as
46 * dm_dev_remove_ioctl, dm_dev_resume_ioctl, dm_table_clear_ioctl
48 * Writers can work with table_head only when there are no readers. We
49 * simply use shared/exclusive locking to ensure this.
53 * Function to increment table user reference counter. Return id
54 * of table_id table.
55 * DM_TABLE_ACTIVE will return active table id.
56 * DM_TABLE_INACTIVE will return inactive table id.
58 static int
59 dm_table_busy(dm_table_head_t *head, uint8_t table_id)
61 uint8_t id;
63 id = 0;
65 lockmgr(&head->table_mtx, LK_SHARED);
67 if (table_id == DM_TABLE_ACTIVE)
68 id = head->cur_active_table;
69 else
70 id = 1 - head->cur_active_table;
72 atomic_add_int(&head->io_cnt, 1);
74 return id;
78 * Function release table lock and eventually wakeup all waiters.
80 static void
81 dm_table_unbusy(dm_table_head_t *head)
83 KKASSERT(head->io_cnt != 0);
85 atomic_subtract_int(&head->io_cnt, 1);
87 lockmgr(&head->table_mtx, LK_RELEASE);
91 * Return current active table to caller, increment io_cnt reference counter.
93 dm_table_t *
94 dm_table_get_entry(dm_table_head_t *head, uint8_t table_id)
96 uint8_t id;
98 id = dm_table_busy(head, table_id);
100 return &head->tables[id];
104 * Decrement io reference counter and release shared lock.
106 void
107 dm_table_release(dm_table_head_t *head, uint8_t table_id)
109 dm_table_unbusy(head);
113 * Switch table from inactive to active mode. Have to wait until io_cnt is 0.
115 void
116 dm_table_switch_tables(dm_table_head_t *head)
118 lockmgr(&head->table_mtx, LK_EXCLUSIVE);
120 head->cur_active_table = 1 - head->cur_active_table;
122 lockmgr(&head->table_mtx, LK_RELEASE);
126 * Destroy all table data. This function can run when there are no
127 * readers on table lists.
130 dm_table_destroy(dm_table_head_t *head, uint8_t table_id)
132 dm_table_t *tbl;
133 dm_table_entry_t *table_en;
134 uint8_t id;
136 lockmgr(&head->table_mtx, LK_EXCLUSIVE);
138 dmdebug("table_id=%d io_cnt=%d\n", table_id, head->io_cnt);
140 if (table_id == DM_TABLE_ACTIVE)
141 id = head->cur_active_table;
142 else
143 id = 1 - head->cur_active_table;
145 tbl = &head->tables[id];
147 while ((table_en = TAILQ_FIRST(tbl)) != NULL) {
148 TAILQ_REMOVE(tbl, table_en, next);
150 if (table_en->target->destroy)
151 table_en->target->destroy(table_en);
152 table_en->target_config = NULL;
154 dm_table_free_deps(table_en);
156 /* decrement the refcount for the target */
157 dm_target_unbusy(table_en->target);
159 kfree(table_en, M_DM);
161 KKASSERT(TAILQ_EMPTY(tbl));
163 lockmgr(&head->table_mtx, LK_RELEASE);
165 return 0;
169 * Return length of active or inactive table in device.
171 static uint64_t
172 _dm_table_size(dm_table_head_t *head, int table)
174 dm_table_t *tbl;
175 dm_table_entry_t *table_en;
176 uint64_t length;
178 length = 0;
180 /* Select active table */
181 tbl = dm_table_get_entry(head, table);
184 * Find out what tables I want to select.
185 * if length => rawblkno then we should used that table.
187 TAILQ_FOREACH(table_en, tbl, next) {
188 length += table_en->length;
191 dm_table_unbusy(head);
193 return length;
196 uint64_t
197 dm_table_size(dm_table_head_t *head)
199 return _dm_table_size(head, DM_TABLE_ACTIVE);
202 uint64_t
203 dm_inactive_table_size(dm_table_head_t *head)
205 return _dm_table_size(head, DM_TABLE_INACTIVE);
209 * Return > 0 if table is at least one table entry (returns number of entries)
210 * and return 0 if there is not. Target count returned from this function
211 * doesn't need to be true when userspace user receive it (after return
212 * there can be dm_dev_resume_ioctl), therefore this is only informative.
215 dm_table_get_target_count(dm_table_head_t *head, uint8_t table_id)
217 dm_table_entry_t *table_en;
218 dm_table_t *tbl;
219 uint32_t target_count;
221 target_count = 0;
223 tbl = dm_table_get_entry(head, table_id);
225 TAILQ_FOREACH(table_en, tbl, next)
226 target_count++;
228 dm_table_unbusy(head);
230 return target_count;
234 * Initialize dm_table_head_t structures, I'm trying to keep this structure as
235 * opaque as possible.
237 void
238 dm_table_head_init(dm_table_head_t *head)
240 head->cur_active_table = 0;
241 head->io_cnt = 0;
243 /* Initialize tables. */
244 TAILQ_INIT(&head->tables[0]);
245 TAILQ_INIT(&head->tables[1]);
247 lockinit(&head->table_mtx, "dmtbl", 0, LK_CANRECURSE);
251 * Destroy all variables in table_head
253 void
254 dm_table_head_destroy(dm_table_head_t *head)
256 KKASSERT(!lockinuse(&head->table_mtx));
258 /* tables don't exist when I call this routine, therefore it
259 * doesn't make sense to have io_cnt != 0 */
260 KKASSERT(head->io_cnt == 0);
262 lockuninit(&head->table_mtx);
265 void
266 dm_table_init_target(dm_table_entry_t *table_en, void *cfg)
268 table_en->target_config = cfg;
272 dm_table_add_deps(dm_table_entry_t *table_en, dm_pdev_t *pdev)
274 dm_table_head_t *head;
275 dm_mapping_t *map;
277 KKASSERT(pdev);
279 head = &table_en->dev->table_head;
280 lockmgr(&head->table_mtx, LK_SHARED);
282 TAILQ_FOREACH(map, &table_en->pdev_maps, next) {
283 if (map->data.pdev->udev == pdev->udev) {
284 lockmgr(&head->table_mtx, LK_RELEASE);
285 return -1;
289 map = kmalloc(sizeof(*map), M_DM, M_WAITOK | M_ZERO);
290 map->data.pdev = pdev;
291 TAILQ_INSERT_TAIL(&table_en->pdev_maps, map, next);
293 lockmgr(&head->table_mtx, LK_RELEASE);
295 return 0;
298 void
299 dm_table_free_deps(dm_table_entry_t *table_en)
301 dm_table_head_t *head;
302 dm_mapping_t *map;
304 head = &table_en->dev->table_head;
305 lockmgr(&head->table_mtx, LK_SHARED);
307 while ((map = TAILQ_FIRST(&table_en->pdev_maps)) != NULL) {
308 TAILQ_REMOVE(&table_en->pdev_maps, map, next);
309 kfree(map, M_DM);
311 KKASSERT(TAILQ_EMPTY(&table_en->pdev_maps));
313 lockmgr(&head->table_mtx, LK_RELEASE);