7712 mandoc -Tlint does always exit with error code 0
[unleashed.git] / usr / src / uts / common / sys / lvm / md_mirror.h
blobfc6bca9b07f2e659508ffdc4d58f37559ada5d5a
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #ifndef _SYS_MD_MIRROR_H
28 #define _SYS_MD_MIRROR_H
30 #include <sys/callb.h>
31 #include <sys/lvm/mdvar.h>
32 #include <sys/lvm/md_mirror_shared.h>
33 #include <sys/lvm/md_rename.h>
34 #ifdef _KERNEL
35 #include <sys/sunddi.h>
36 #endif
38 #ifdef __cplusplus
39 extern "C" {
40 #endif
43 * following bits are used in status word in the common section
44 * of unit structure
46 #define SMS_IS(sm, state) (((sm)->sm_state & (state)) != 0)
47 #define SMS_BY_INDEX_IS(un, index, state) \
48 (((un)->un_sm[(index)].sm_state & (state)) != 0)
50 #define SMS_BY_INDEX_IS_TARGET(un, index) \
51 ((un)->un_sm[(index)].sm_flags & MD_SM_RESYNC_TARGET)
53 #define SUBMIRROR_IS_READABLE(un, isubmirror) \
54 ((((un)->un_sm[(isubmirror)].sm_state & SMS_IGNORE) == 0) && \
55 ((un)->un_sm[(isubmirror)].sm_state & \
56 (SMS_RUNNING | SMS_COMP_ERRED | SMS_COMP_RESYNC)))
58 #define SUBMIRROR_IS_WRITEABLE(un, isubmirror) \
59 ((un)->un_sm[(isubmirror)].sm_state & \
60 (SMS_RUNNING | SMS_COMP_ERRED | SMS_COMP_RESYNC | \
61 SMS_ATTACHED_RESYNC | SMS_OFFLINE_RESYNC))
64 * Default resync block size for MN resync messages
66 #define MD_DEF_RESYNC_BLK_SZ 8192
69 * macro to test if the current block is within the current resync region
71 #define IN_RESYNC_REGION(un, ps) \
72 ((un->un_rs_prev_overlap != NULL) && (ps->ps_firstblk >= \
73 un->un_rs_prev_overlap->ps_firstblk) && \
74 (ps->ps_lastblk <= un->un_rs_prev_overlap->ps_lastblk))
76 * Default resync update interval (in minutes).
78 #define MD_DEF_MIRROR_RESYNC_INTVL 5
81 * Defines for flags argument in function set_sm_comp_state()
83 #define MD_STATE_NO_XMIT 0x0000 /* Local action, (sent from master) */
84 #define MD_STATE_XMIT 0x0001 /* Non-local action, send to master */
85 #define MD_STATE_WMUPDATE 0x0002 /* Action because of watermark update */
86 #define MD_STATE_OCHELD 0x0004 /* open/close lock held */
89 * Defines for flags argument in function check_comp_4_hotspares()
91 #define MD_HOTSPARE_NO_XMIT 0x0000 /* Local action, (sent from master) */
92 #define MD_HOTSPARE_XMIT 0x0001 /* Non-local action, send to master */
93 #define MD_HOTSPARE_WMUPDATE 0x0002 /* Action because of watermark update */
94 #define MD_HOTSPARE_LINKHELD 0x0004 /* md_link_rw lock held */
97 * Defines for argument in function send_mn_resync_done_message()
99 #define RESYNC_ERR 0x1
100 #define CLEAR_OPT_NOT_DONE 0x2
103 * Defines for argument in function resync_read_blk_range()
105 #define MD_FIRST_RESYNC_NEXT 0x1
106 #define MD_SEND_MESS_XMIT 0x2
107 #define MD_RESYNC_FLAG_ERR 0x4
110 * Define for argument in function wait_for_overlaps()
112 #define MD_OVERLAP_ALLOW_REPEAT 0x1 /* Allow if ps already in tree */
113 #define MD_OVERLAP_NO_REPEAT 0 /* ps must not already be in tree */
116 * Define for max retries of mirror_owner
118 #define MD_OWNER_RETRIES 10
121 * mm_submirror32_od and mm_unit32_od are used only for 32 bit old format
123 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
124 #pragma pack(4)
125 #endif
126 typedef struct mm_submirror32_od { /* submirrors */
127 mdkey_t sm_key;
128 dev32_t sm_dev;
129 sm_state_t sm_state;
130 sm_flags_t sm_flags;
131 caddr32_t xx_sm_shared_by_blk; /* really void *) */
132 caddr32_t xx_sm_shared_by_indx; /* really void *) */
133 caddr32_t xx_sm_get_component_count;
134 caddr32_t xx_sm_get_bcss; /* block count skip size */
135 md_m_shared32_od_t sm_shared; /* used for mirroring plain devices */
136 int sm_hsp_id; /* used for mirroring plain devices */
137 struct timeval32 sm_timestamp; /* time of last state change */
138 } mm_submirror32_od_t;
140 typedef struct mm_submirror { /* submirrors */
141 mdkey_t sm_key;
142 md_dev64_t sm_dev; /* 64 bit */
143 sm_state_t sm_state;
144 sm_flags_t sm_flags;
145 md_m_shared_t sm_shared; /* used for mirroring plain devices */
146 int sm_hsp_id; /* used for mirroring plain devices */
147 md_timeval32_t sm_timestamp; /* time of last state change, 32 bit */
148 } mm_submirror_t;
150 typedef struct mm_unit32_od {
151 mdc_unit32_od_t c; /* common stuff */
153 int un_last_read; /* last submirror index read */
154 uint_t un_changecnt;
155 ushort_t un_nsm; /* number of submirrors */
156 mm_submirror32_od_t un_sm[NMIRROR];
157 int un_overlap_tree_flag;
158 int xx_un_overlap_tree_mx[2]; /* replaces mutex */
159 ushort_t xx_un_overlap_tree_cv;
160 caddr32_t xx_un_overlap_root;
161 mm_rd_opt_t un_read_option; /* mirror read option */
162 mm_wr_opt_t un_write_option; /* mirror write option */
163 mm_pass_num_t un_pass_num; /* resync pass number */
165 * following used to keep dirty bitmaps
167 int xx_un_resync_mx[2]; /* replaces mutex */
168 ushort_t xx_un_resync_cv;
169 uint_t un_resync_flg;
170 uint_t un_waiting_to_mark;
171 uint_t un_waiting_to_commit;
172 caddr32_t xx_un_outstanding_writes; /* outstanding write */
173 caddr32_t xx_un_goingclean_bm;
174 caddr32_t xx_un_goingdirty_bm;
175 caddr32_t xx_un_dirty_bm;
176 caddr32_t xx_un_resync_bm;
177 uint_t un_rrd_blksize; /* The blocksize of the dirty bits */
178 uint_t un_rrd_num; /* The number of resync regions */
179 mddb_recid_t un_rr_dirty_recid; /* resync region bm record id */
181 * following stuff is private to resync process
183 int un_rs_copysize;
184 int un_rs_dests; /* destinations */
185 daddr32_t un_rs_resync_done; /* used for percent done */
186 daddr32_t un_rs_resync_2_do; /* user for percent done */
187 int un_rs_dropped_lock;
188 caddr32_t un_rs_type; /* type of resync in progress */
190 * Incore elements in this old structure are no longer referenced by
191 * current 64 bit kernel. Comment them out for maintenance purpose.
193 * mm_submirror_ic_t un_smic[NMIRROR];
194 * kmutex_t un_ovrlap_chn_mx;
195 * kcondvar_t un_ovrlap_chn_cv;
196 * struct md_mps *un_ovrlap_chn;
197 * kmutex_t un_resync_mx;
198 * kcondvar_t un_resync_cv;
199 * short *un_outstanding_writes;
200 * uchar_t *un_goingclean_bm;
201 * uchar_t *un_goingdirty_bm;
202 * uchar_t *un_dirty_bm;
203 * uchar_t *un_resync_bm;
204 * char *un_rs_buffer;
206 } mm_unit32_od_t;
207 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
208 #pragma pack()
209 #endif
211 /* Types of resync in progress (used for un_rs_type) */
212 #define MD_RS_NONE 0 /* No resync */
213 #define MD_RS_OPTIMIZED 0x0001 /* Optimized resync */
214 #define MD_RS_COMPONENT 0x0002 /* Component resync */
215 #define MD_RS_SUBMIRROR 0x0003 /* Submirror resync */
216 #define MD_RS_ABR 0x0004 /* Application based resync */
219 * un_rs_type is split into the following bitfields:
221 * 0-3 Resync type (as above)
222 * 4-7 Submirror index [0..3]
223 * 8-31 Component index
225 #define RS_TYPE_MASK 0xF
226 #define RS_SMI_MASK 0xF0
227 #define RS_CI_MASK 0x1FFF00
229 #define RS_TYPE(x) ((x) & RS_TYPE_MASK)
230 #define RS_SMI(x) (((x) & RS_SMI_MASK) >> 4)
231 #define RS_CI(x) (((x) & RS_CI_MASK) >> 8)
233 #define SET_RS_TYPE(x, v) { \
234 (x) &= ~RS_TYPE_MASK; \
235 (x) |= ((v) & RS_TYPE_MASK); \
237 #define SET_RS_TYPE_NONE(x) { \
238 (x) &= ~RS_TYPE_MASK; \
240 #define SET_RS_SMI(x, v) { \
241 (x) &= ~RS_SMI_MASK; \
242 (x) |= (((v) << 4) & RS_SMI_MASK); \
244 #define SET_RS_CI(x, v) { \
245 (x) &= ~RS_CI_MASK; \
246 (x) |= (((v) << 8) & RS_CI_MASK); \
249 typedef struct mm_submirror_ic {
250 intptr_t (*sm_shared_by_blk)(md_dev64_t, void *,
251 diskaddr_t, u_longlong_t *);
252 intptr_t (*sm_shared_by_indx)(md_dev64_t, void *, int);
253 int (*sm_get_component_count)(md_dev64_t, void *);
254 int (*sm_get_bcss)(md_dev64_t, void *, int, diskaddr_t *,
255 size_t *, u_longlong_t *, u_longlong_t *);
256 } mm_submirror_ic_t;
258 typedef struct md_mps {
259 DAEMON_QUEUE
260 buf_t *ps_bp;
261 struct mm_unit *ps_un;
262 mdi_unit_t *ps_ui;
263 uint_t ps_childbflags;
264 caddr_t ps_addr;
265 diskaddr_t ps_firstblk;
266 diskaddr_t ps_lastblk;
267 uint_t ps_flags;
268 uint_t ps_allfrom_sm; /* entire read came from here */
269 uint_t ps_writable_sm;
270 uint_t ps_current_sm;
271 uint_t ps_active_cnt;
272 int ps_frags;
273 uint_t ps_changecnt;
274 struct md_mps *ps_unused1;
275 struct md_mps *ps_unused2;
276 void (*ps_call)();
277 kmutex_t ps_mx;
278 avl_node_t ps_overlap_node;
279 } md_mps_t;
281 #define MD_MPS_ON_OVERLAP 0x0001
282 #define MD_MPS_ERROR 0x0002
283 #define MD_MPS_WRITE_AFTER_READ 0x0004
284 #define MD_MPS_WOW 0x0008
285 #define MD_MPS_DONTFREE 0x0010
286 #define MD_MPS_DONE 0x0020
287 #define MD_MPS_MAPPED 0x0040 /* re: MD_STR_MAPPED */
288 #define MD_MPS_NOBLOCK 0x0080 /* re: MD_NOBLOCK */
289 #define MD_MPS_ABR 0x0100 /* re: MD_STR_ABR */
290 #define MD_MPS_DMR 0x0200 /* re: MD_STR_DMR */
291 #define MD_MPS_WMUPDATE 0x0400 /* re: MD_STR_WMUPDATE */
292 #define MD_MPS_DIRTY_RD 0x0800 /* re: MD_STR_DIRTY_RD */
293 #define MD_MPS_RESYNC_READ 0x1000
294 #define MD_MPS_FLAG_ERROR 0x2000 /* re: MD_STR_FLAG_ERR */
295 #define MD_MPS_BLOCKABLE_IO 0x4000 /* re: MD_STR_BLOCK_OK */
297 #define MPS_FREE(kc, ps) \
299 if ((ps)->ps_flags & MD_MPS_DONTFREE) \
300 (ps)->ps_flags |= MD_MPS_DONE; \
301 else \
302 kmem_cache_free((kc), (ps)); \
305 typedef struct md_mcs {
306 DAEMON_QUEUE
307 md_mps_t *cs_ps;
308 minor_t cs_mdunit;
309 /* Add new structure members HERE!! */
310 buf_t cs_buf;
311 /* DO NOT add structure members here; cs_buf is dynamically sized */
312 } md_mcs_t;
314 typedef struct mm_mirror_ic {
315 kmutex_t un_overlap_tree_mx;
316 kcondvar_t un_overlap_tree_cv;
317 avl_tree_t un_overlap_root;
318 kmutex_t un_resync_mx;
319 kcondvar_t un_resync_cv;
320 short *un_outstanding_writes; /* outstanding write array */
321 uchar_t *un_goingclean_bm;
322 uchar_t *un_goingdirty_bm;
323 uchar_t *un_dirty_bm;
324 uchar_t *un_resync_bm;
325 char *un_rs_buffer;
326 int un_suspend_wr_flag;
327 kmutex_t un_suspend_wr_mx;
328 kcondvar_t un_suspend_wr_cv;
329 md_mn_nodeid_t un_mirror_owner; /* Node which owns mirror */
330 diskaddr_t un_resync_startbl; /* Start block for resync */
331 kmutex_t un_owner_mx; /* Mutex for un_owner_state */
332 uint_t un_owner_state; /* See below */
333 uint_t un_mirror_owner_status; /* status for ioctl request */
334 kmutex_t un_dmr_mx; /* mutex for DMR requests */
335 kcondvar_t un_dmr_cv; /* condvar for DMR requests */
336 int un_dmr_last_read; /* last DMR submirror read */
337 callb_cpr_t un_rs_cprinfo; /* CPR info for resync thread */
338 kmutex_t un_rs_cpr_mx; /* mutex for resync CPR info */
339 kmutex_t un_prr_cpr_mx; /* mutex for prr CPR info */
340 uint_t un_resync_completed; /* type of last resync */
341 int un_abr_count; /* count of sp's with abr set */
343 uchar_t *un_pernode_dirty_bm[MD_MNMAXSIDES];
344 uchar_t *un_pernode_dirty_sum;
346 krwlock_t un_pernode_dirty_mx[MD_MNMAXSIDES];
347 ushort_t un_rr_clean_start_bit; /* where to start next clean */
349 #ifdef _KERNEL
350 ddi_taskq_t *un_drl_task; /* deferred RR_CLEAN taskq */
351 #else
352 void *un_drl_task; /* deferred RR_CLEAN taskq */
353 #endif /* _KERNEL */
354 uint_t un_waiting_to_clear; /* Blocked waiting to clear */
356 }mm_mirror_ic_t;
358 #define MM_MN_OWNER_SENT 0x0001 /* RPC in progress */
359 #define MM_MN_BECOME_OWNER 0x0002 /* Ownership change in prog. */
360 #define MM_MN_PREVENT_CHANGE 0x0004 /* Disallow ownership change */
362 typedef struct mm_unit {
363 mdc_unit_t c; /* common stuff */
365 int un_last_read; /* last submirror index read */
366 uint_t un_changecnt;
367 ushort_t un_nsm; /* number of submirrors */
368 mm_submirror_t un_sm[NMIRROR];
369 int un_overlap_tree_flag;
370 mm_rd_opt_t un_read_option; /* mirror read option */
371 mm_wr_opt_t un_write_option; /* mirror write option */
372 mm_pass_num_t un_pass_num; /* resync pass number */
374 * following used to keep dirty bitmaps
376 uint_t un_resync_flg;
377 uint_t un_waiting_to_mark;
378 uint_t un_waiting_to_commit;
379 uint_t un_rrd_blksize; /* The blocksize of the dirty bits */
380 uint_t un_rrd_num; /* The number of resync regions */
381 mddb_recid_t un_rr_dirty_recid; /* resync region bm db record id */
383 * following stuff is private to resync process
385 int un_rs_copysize;
386 int un_rs_dests; /* destinations */
387 diskaddr_t un_rs_resync_done; /* used for percent done */
388 diskaddr_t un_rs_resync_2_do; /* user for percent done */
389 int un_rs_dropped_lock;
390 uint_t un_rs_type; /* type of resync */
392 * Incore only elements
394 mm_submirror_ic_t un_smic[NMIRROR]; /* NMIRROR elements array */
395 mm_mirror_ic_t un_mmic;
396 kmutex_t un_rrp_inflight_mx;
398 * resync thread control
400 kthread_t *un_rs_thread; /* Resync thread ID */
401 kmutex_t un_rs_thread_mx; /* Thread cv mutex */
402 kcondvar_t un_rs_thread_cv; /* Cond. Var. for thread */
403 uint_t un_rs_thread_flags; /* Thread control flags */
404 md_mps_t *un_rs_prev_overlap; /* existing overlap request */
405 timeout_id_t un_rs_resync_to_id; /* resync progress timeout */
406 kmutex_t un_rs_progress_mx; /* Resync progress mutex */
407 kcondvar_t un_rs_progress_cv; /* Cond. Var. for progress */
408 uint_t un_rs_progress_flags; /* Thread control flags */
409 void *un_rs_msg; /* Intra-node resync message */
410 } mm_unit_t;
412 #define un_overlap_tree_mx un_mmic.un_overlap_tree_mx
413 #define un_overlap_tree_cv un_mmic.un_overlap_tree_cv
414 #define un_overlap_root un_mmic.un_overlap_root
415 #define un_resync_mx un_mmic.un_resync_mx
416 #define un_resync_cv un_mmic.un_resync_cv
417 #define un_outstanding_writes un_mmic.un_outstanding_writes
418 #define un_goingclean_bm un_mmic.un_goingclean_bm
419 #define un_goingdirty_bm un_mmic.un_goingdirty_bm
420 #define un_dirty_bm un_mmic.un_dirty_bm
421 #define un_resync_bm un_mmic.un_resync_bm
422 #define un_rs_buffer un_mmic.un_rs_buffer
423 #define un_suspend_wr_mx un_mmic.un_suspend_wr_mx
424 #define un_suspend_wr_cv un_mmic.un_suspend_wr_cv
425 #define un_suspend_wr_flag un_mmic.un_suspend_wr_flag
426 #define un_mirror_owner un_mmic.un_mirror_owner
427 #define un_resync_startbl un_mmic.un_resync_startbl
428 #define un_owner_mx un_mmic.un_owner_mx
429 #define un_owner_state un_mmic.un_owner_state
430 #define un_mirror_reqs un_mmic.un_mirror_reqs
431 #define un_mirror_reqs_done un_mmic.un_mirror_reqs_done
432 #define un_mirror_owner_status un_mmic.un_mirror_owner_status
433 #define un_dmr_mx un_mmic.un_dmr_mx
434 #define un_dmr_cv un_mmic.un_dmr_cv
435 #define un_dmr_last_read un_mmic.un_dmr_last_read
436 #define un_rs_cprinfo un_mmic.un_rs_cprinfo
437 #define un_rs_cpr_mx un_mmic.un_rs_cpr_mx
438 #define un_prr_cpr_mx un_mmic.un_prr_cpr_mx
439 #define un_resync_completed un_mmic.un_resync_completed
440 #define un_abr_count un_mmic.un_abr_count
441 #define un_pernode_dirty_bm un_mmic.un_pernode_dirty_bm
442 #define un_pernode_dirty_sum un_mmic.un_pernode_dirty_sum
443 #define un_pernode_dirty_mx un_mmic.un_pernode_dirty_mx
444 #define un_rr_clean_start_bit un_mmic.un_rr_clean_start_bit
445 #define un_drl_task un_mmic.un_drl_task
446 #define un_waiting_to_clear un_mmic.un_waiting_to_clear
448 #define MM_RF_GATECLOSED 0x0001
449 #define MM_RF_COMMIT_NEEDED 0x0002
450 #define MM_RF_COMMITING 0x0004
451 #define MM_RF_STALL_CLEAN (MM_RF_COMMITING | \
452 MM_RF_COMMIT_NEEDED | \
453 MM_RF_GATECLOSED)
456 #define MD_MN_MIRROR_UNOWNED 0
457 #define MD_MN_MIRROR_OWNER(un) (un->un_mirror_owner == md_mn_mynode_id)
458 #define MD_MN_NO_MIRROR_OWNER(un) \
459 (un->un_mirror_owner == MD_MN_MIRROR_UNOWNED)
461 typedef struct err_comp {
462 struct err_comp *ec_next;
463 int ec_smi;
464 int ec_ci;
465 } err_comp_t;
467 extern int md_min_rr_size;
468 extern int md_def_num_rr;
470 /* Optimized resync records controllers */
471 #define MD_MIN_RR_SIZE (md_min_rr_size)
472 #define MD_DEF_NUM_RR (md_def_num_rr)
473 #define MD_MAX_NUM_RR (4192*NBBY - sizeof (struct optim_resync))
475 /* default resync buffer size */
476 #define MD_DEF_RESYNC_BUF_SIZE (1024)
478 /* Structure for optimized resync records */
479 #define OR_MAGIC 0xFECA /* Only missing the L */
480 typedef struct optim_resync {
481 uint_t or_revision;
482 uint_t or_magic;
483 uint_t or_blksize;
484 uint_t or_num;
485 uchar_t or_rr[1];
486 } optim_resync_t;
488 /* Type 2 for mirror records */
489 #define MIRROR_REC 1
490 #define RESYNC_REC 2
492 #ifdef _KERNEL
494 #define NO_SUBMIRRORS (0)
495 #define ALL_SUBMIRRORS (0xFFF)
496 #define SMI2BIT(smi) (1 << (smi))
498 /* For use with mirror_other_sources() */
499 #define WHOLE_SM (-1)
501 #define BLK_TO_RR(i, b, un) {\
502 (i) = ((b) / ((un))->un_rrd_blksize); \
503 if ((i) > ((un))->un_rrd_num) \
504 { panic("md: BLK_TO_RR"); } \
507 #define RR_TO_BLK(b, i, un) \
508 (b) = ((i) * ((un))->un_rrd_blksize)
510 #define IS_GOING_DIRTY(i, un) (isset((un)->un_goingdirty_bm, (i)))
511 #define CLR_GOING_DIRTY(i, un) (clrbit((un)->un_goingdirty_bm, (i)))
512 #define SET_GOING_DIRTY(i, un) (setbit((un)->un_goingdirty_bm, (i)))
514 #define IS_GOING_CLEAN(i, un) (isset((un)->un_goingclean_bm, (i)))
515 #define CLR_GOING_CLEAN(i, un) (clrbit((un)->un_goingclean_bm, (i)))
516 #define SET_GOING_CLEAN(i, un) (setbit((un)->un_goingclean_bm, (i)))
518 #define IS_REGION_DIRTY(i, un) (isset((un)->un_dirty_bm, (i)))
519 #define CLR_REGION_DIRTY(i, un) (clrbit((un)->un_dirty_bm, (i)))
520 #define SET_REGION_DIRTY(i, un) (setbit((un)->un_dirty_bm, (i)))
522 #define IS_KEEPDIRTY(i, un) (isset((un)->un_resync_bm, (i)))
523 #define CLR_KEEPDIRTY(i, un) (clrbit((un)->un_resync_bm, (i)))
525 #define IS_PERNODE_DIRTY(n, i, un) \
526 (isset((un)->un_pernode_dirty_bm[(n)-1], (i)))
527 #define CLR_PERNODE_DIRTY(n, i, un) \
528 (clrbit((un)->un_pernode_dirty_bm[(n)-1], (i)))
529 #define SET_PERNODE_DIRTY(n, i, un) \
530 (setbit((un)->un_pernode_dirty_bm[(n)-1], (i)))
533 * Write-On-Write handling.
534 * flags for md_mirror_wow_flg
535 * structure for quing copy-writes
536 * macros for relative locating of header and buffer
538 #define WOW_DISABLE 0x0001 /* turn off WOW detection */
539 #define WOW_PHYS_ENABLE 0x0020 /* turn on WOW for PHYS */
540 #define WOW_LOGIT 0x0002 /* log non-disabled WOW detections */
541 #define WOW_NOCOPY 0x0004 /* repeat normal write on WOW detection */
543 typedef struct wowhdr {
544 DAEMON_QUEUE
545 md_mps_t *wow_ps;
546 int wow_offset;
547 } wowhdr_t;
549 #define WOWBUF_HDR(wowbuf) ((void *)(wowbuf-sizeof (wowhdr_t)))
550 #define WOWHDR_BUF(wowhdr) ((char *)wowhdr+sizeof (wowhdr_t))
553 * Structure used to to save information about DMR reads. Used to save
554 * the count of all DMR reads and the timestamp of the last one executed.
555 * We declare a global with this structure and it can be read by a debugger to
556 * verify that the DMR ioctl has been executed and the number of times that it
557 * has been executed.
559 typedef struct dmr_stats {
560 uint_t dmr_count;
561 struct timeval dmr_timestamp;
562 } dmr_stats_t;
564 /* Externals from mirror.c */
565 extern mddb_recid_t mirror_get_sm_unit(md_dev64_t);
566 extern void mirror_release_sm_unit(md_dev64_t);
568 extern void mirror_set_sm_state(mm_submirror_t *,
569 mm_submirror_ic_t *, sm_state_t, int);
571 extern void mirror_commit(mm_unit_t *, int, mddb_recid_t *);
572 extern int poke_hotspares(void);
573 extern void build_submirror(mm_unit_t *, int, int);
574 extern int mirror_build_incore(mm_unit_t *, int);
575 extern void reset_mirror(mm_unit_t *, minor_t, int);
576 extern int mirror_internal_open(minor_t, int, int, int, IOLOCK *);
577 extern int mirror_internal_close(minor_t, int, int, IOLOCK *);
578 extern void set_sm_comp_state(mm_unit_t *, int, int, int,
579 mddb_recid_t *, uint_t, IOLOCK *);
580 extern int mirror_other_sources(mm_unit_t *, int, int, int);
581 extern int mirror_resync_message(md_mn_rs_params_t *, IOLOCK *);
582 extern void md_mirror_strategy(buf_t *, int, void *);
583 extern int mirror_directed_read(dev_t, vol_directed_rd_t *, int);
584 extern void mirror_check_failfast(minor_t mnum);
585 extern int check_comp_4_hotspares(mm_unit_t *, int, int, uint_t,
586 mddb_recid_t, IOLOCK *);
587 extern void mirror_overlap_tree_remove(md_mps_t *ps);
588 extern void mirror_child_init(md_mcs_t *cs);
590 /* Externals from mirror_ioctl.c */
591 extern void reset_comp_states(mm_submirror_t *,
592 mm_submirror_ic_t *);
593 extern int mirror_grow_unit(mm_unit_t *un, md_error_t *ep);
594 extern int md_mirror_ioctl(dev_t dev, int cmd, void *data,
595 int mode, IOLOCK *lockp);
596 extern mm_unit_t *mirror_getun(minor_t, md_error_t *, int, IOLOCK *);
597 extern void mirror_get_status(mm_unit_t *un, IOLOCK *lockp);
598 extern int mirror_choose_owner(mm_unit_t *un, md_mn_req_owner_t *);
600 /* rename named service functions */
601 md_ren_list_svc_t mirror_rename_listkids;
602 md_ren_svc_t mirror_rename_check;
603 md_ren_roleswap_svc_t mirror_renexch_update_kids;
604 md_ren_roleswap_svc_t mirror_exchange_parent_update_to;
605 md_ren_roleswap_svc_t mirror_exchange_self_update_from_down;
607 /* Externals from mirror_resync.c */
608 extern int unit_setup_resync(mm_unit_t *, int);
609 extern int mirror_resync_unit(minor_t mnum, md_resync_ioctl_t *ri,
610 md_error_t *ep, IOLOCK *);
611 extern int mirror_ioctl_resync(md_resync_ioctl_t *p, IOLOCK *);
612 extern int mirror_mark_resync_region(mm_unit_t *, diskaddr_t,
613 diskaddr_t, md_mn_nodeid_t);
614 extern void resync_start_timeout(set_t setno);
615 extern int mirror_resize_resync_regions(mm_unit_t *, diskaddr_t);
616 extern int mirror_add_resync_regions(mm_unit_t *, diskaddr_t);
617 extern int mirror_probedevs(md_probedev_t *, IOLOCK *);
618 extern void mirror_copy_rr(int, uchar_t *, uchar_t *);
619 extern void mirror_process_unit_resync(mm_unit_t *);
620 extern int mirror_set_dirty_rr(md_mn_rr_dirty_params_t *);
621 extern int mirror_set_clean_rr(md_mn_rr_clean_params_t *);
622 #endif /* _KERNEL */
624 #ifdef __cplusplus
626 #endif
628 #endif /* _SYS_MD_MIRROR_H */