2 md_k.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 #define MD_RESERVED 0UL
24 #define TRANSLUCENT 5UL
26 #define MAX_PERSONALITY 7UL
28 extern inline int pers_to_level (int pers
)
32 case TRANSLUCENT
: return -2;
33 case LINEAR
: return -1;
38 panic("pers_to_level()");
41 extern inline int level_to_pers (int level
)
45 case -2: return TRANSLUCENT
;
46 case -1: return LINEAR
;
55 typedef struct mddev_s mddev_t
;
56 typedef struct mdk_rdev_s mdk_rdev_t
;
59 #error MD doesnt handle bigger kdev yet
62 #define MAX_REAL 12 /* Max number of disks per md dev */
63 #define MAX_MD_DEVS (1<<MINORBITS) /* Max number of md dev */
66 * Maps a kdev to an mddev/subdev. How 'data' is handled is up to
67 * the personality. (eg. HSM uses this to identify individual LVs)
69 typedef struct dev_mapping_s
{
74 extern dev_mapping_t mddev_map
[MAX_MD_DEVS
];
76 extern inline mddev_t
* kdev_to_mddev (kdev_t dev
)
78 if (MAJOR(dev
) != MD_MAJOR
)
80 return mddev_map
[MINOR(dev
)].mddev
;
84 * options passed in raidrun:
87 #define MAX_CHUNK_SIZE (4096*1024)
92 #define MD_READAHEAD (256 * 512)
94 extern inline int disk_faulty(mdp_disk_t
* d
)
96 return d
->state
& (1 << MD_DISK_FAULTY
);
99 extern inline int disk_active(mdp_disk_t
* d
)
101 return d
->state
& (1 << MD_DISK_ACTIVE
);
104 extern inline int disk_sync(mdp_disk_t
* d
)
106 return d
->state
& (1 << MD_DISK_SYNC
);
109 extern inline int disk_spare(mdp_disk_t
* d
)
111 return !disk_sync(d
) && !disk_active(d
) && !disk_faulty(d
);
114 extern inline int disk_removed(mdp_disk_t
* d
)
116 return d
->state
& (1 << MD_DISK_REMOVED
);
119 extern inline void mark_disk_faulty(mdp_disk_t
* d
)
121 d
->state
|= (1 << MD_DISK_FAULTY
);
124 extern inline void mark_disk_active(mdp_disk_t
* d
)
126 d
->state
|= (1 << MD_DISK_ACTIVE
);
129 extern inline void mark_disk_sync(mdp_disk_t
* d
)
131 d
->state
|= (1 << MD_DISK_SYNC
);
134 extern inline void mark_disk_spare(mdp_disk_t
* d
)
139 extern inline void mark_disk_removed(mdp_disk_t
* d
)
141 d
->state
= (1 << MD_DISK_FAULTY
) | (1 << MD_DISK_REMOVED
);
144 extern inline void mark_disk_inactive(mdp_disk_t
* d
)
146 d
->state
&= ~(1 << MD_DISK_ACTIVE
);
149 extern inline void mark_disk_nonsync(mdp_disk_t
* d
)
151 d
->state
&= ~(1 << MD_DISK_SYNC
);
155 * MD's 'extended' device
159 struct md_list_head same_set
; /* RAID devices within the same set */
160 struct md_list_head all
; /* all RAID devices */
161 struct md_list_head pending
; /* undetected RAID devices */
163 kdev_t dev
; /* Device number */
164 kdev_t old_dev
; /* "" when it was last imported */
165 unsigned long size
; /* Device size (in blocks) */
166 mddev_t
*mddev
; /* RAID array if running */
167 unsigned long last_events
; /* IO event timestamp */
169 struct inode
*inode
; /* Lock inode */
170 struct file filp
; /* Lock file */
173 unsigned long sb_offset
;
175 int faulty
; /* if faulty do not issue IO requests */
176 int desc_nr
; /* descriptor index in the superblock */
181 * disk operations in a working array:
183 #define DISKOP_SPARE_INACTIVE 0
184 #define DISKOP_SPARE_WRITE 1
185 #define DISKOP_SPARE_ACTIVE 2
186 #define DISKOP_HOT_REMOVE_DISK 3
187 #define DISKOP_HOT_ADD_DISK 4
189 typedef struct mdk_personality_s mdk_personality_t
;
194 mdk_personality_t
*pers
;
198 struct md_list_head disks
;
202 unsigned long curr_resync
; /* blocks scheduled */
203 unsigned long resync_mark
; /* a recent timestamp */
204 unsigned long resync_mark_cnt
;/* blocks written at resync_mark */
206 int recovery_running
;
207 struct semaphore reconfig_sem
;
208 struct semaphore recovery_sem
;
209 struct semaphore resync_sem
;
211 atomic_t recovery_active
; /* blocks scheduled, but not written */
212 md_wait_queue_head_t recovery_wait
;
214 struct md_list_head all_mddevs
;
215 request_queue_t queue
;
218 struct mdk_personality_s
221 int (*make_request
)(request_queue_t
*q
, mddev_t
*mddev
, int rw
, struct buffer_head
* bh
);
222 int (*run
)(mddev_t
*mddev
);
223 int (*stop
)(mddev_t
*mddev
);
224 int (*status
)(char *page
, mddev_t
*mddev
);
225 int (*error_handler
)(mddev_t
*mddev
, kdev_t dev
);
228 * Some personalities (RAID-1, RAID-5) can have disks hot-added and
229 * hot-removed. Hot removal is different from failure. (failure marks
230 * a disk inactive, but the disk is still part of the array) The interface
231 * to such operations is the 'pers->diskop()' function, can be NULL.
233 * the diskop function can change the pointer pointing to the incoming
234 * descriptor, but must do so very carefully. (currently only
235 * SPARE_ACTIVE expects such a change)
237 int (*diskop
) (mddev_t
*mddev
, mdp_disk_t
**descriptor
, int state
);
239 int (*stop_resync
)(mddev_t
*mddev
);
240 int (*restart_resync
)(mddev_t
*mddev
);
241 int (*sync_request
)(mddev_t
*mddev
, unsigned long block_nr
);
246 * Currently we index md_array directly, based on the minor
247 * number. This will have to change to dynamic allocation
248 * once we start supporting partitioning of md devices.
250 extern inline int mdidx (mddev_t
* mddev
)
252 return mddev
->__minor
;
255 extern inline kdev_t
mddev_to_kdev(mddev_t
* mddev
)
257 return MKDEV(MD_MAJOR
, mdidx(mddev
));
260 extern mdk_rdev_t
* find_rdev(mddev_t
* mddev
, kdev_t dev
);
261 extern mdk_rdev_t
* find_rdev_nr(mddev_t
*mddev
, int nr
);
264 * iterates through some rdev ringlist. It's safe to remove the
265 * current 'rdev'. Dont touch 'tmp' though.
267 #define ITERATE_RDEV_GENERIC(head,field,rdev,tmp) \
269 for (tmp = head.next; \
270 rdev = md_list_entry(tmp, mdk_rdev_t, field), \
271 tmp = tmp->next, tmp->prev != &head \
274 * iterates through the 'same array disks' ringlist
276 #define ITERATE_RDEV(mddev,rdev,tmp) \
277 ITERATE_RDEV_GENERIC((mddev)->disks,same_set,rdev,tmp)
280 * Same as above, but assumes that the device has rdev->desc_nr numbered
281 * from 0 to mddev->nb_dev, and iterates through rdevs in ascending order.
283 #define ITERATE_RDEV_ORDERED(mddev,rdev,i) \
284 for (i = 0; rdev = find_rdev_nr(mddev, i), i < mddev->nb_dev; i++)
288 * Iterates through all 'RAID managed disks'
290 #define ITERATE_RDEV_ALL(rdev,tmp) \
291 ITERATE_RDEV_GENERIC(all_raid_disks,all,rdev,tmp)
294 * Iterates through 'pending RAID disks'
296 #define ITERATE_RDEV_PENDING(rdev,tmp) \
297 ITERATE_RDEV_GENERIC(pending_raid_disks,pending,rdev,tmp)
300 * iterates through all used mddevs in the system.
302 #define ITERATE_MDDEV(mddev,tmp) \
304 for (tmp = all_mddevs.next; \
305 mddev = md_list_entry(tmp, mddev_t, all_mddevs), \
306 tmp = tmp->next, tmp->prev != &all_mddevs \
309 extern inline int lock_mddev (mddev_t
* mddev
)
311 return down_interruptible(&mddev
->reconfig_sem
);
314 extern inline void unlock_mddev (mddev_t
* mddev
)
316 up(&mddev
->reconfig_sem
);
319 #define xchg_values(x,y) do { __typeof__(x) __tmp = x; \
320 x = y; y = __tmp; } while (0)
322 typedef struct mdk_thread_s
{
323 void (*run
) (void *data
);
325 md_wait_queue_head_t wqueue
;
327 struct semaphore
*sem
;
328 struct task_struct
*tsk
;
332 #define THREAD_WAKEUP 0
334 #define MAX_DISKNAME_LEN 64
336 typedef struct dev_name_s
{
337 struct md_list_head list
;
339 char namebuf
[MAX_DISKNAME_LEN
];
344 #define __wait_event_lock_irq(wq, condition, lock) \
346 wait_queue_t __wait; \
347 init_waitqueue_entry(&__wait, current); \
349 add_wait_queue(&wq, &__wait); \
351 set_current_state(TASK_UNINTERRUPTIBLE); \
354 spin_unlock_irq(&lock); \
356 spin_lock_irq(&lock); \
358 current->state = TASK_RUNNING; \
359 remove_wait_queue(&wq, &__wait); \
362 #define wait_event_lock_irq(wq, condition, lock) \
366 __wait_event_lock_irq(wq, condition, lock); \