Blame view
drivers/md/md.h
13.9 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 |
/* md_k.h : kernel internal structure of the Linux MD driver Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. You should have received a copy of the GNU General Public License (for example /usr/src/linux/COPYING); if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
63fe08177
|
14 15 16 17 18 19 20 21 22 23 24 |
#ifndef _MD_MD_H #define _MD_MD_H #include <linux/blkdev.h> #include <linux/kobject.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/timer.h> #include <linux/wait.h> #include <linux/workqueue.h> |
9361401eb
|
25 |
|
1da177e4c
|
26 |
#define MaxSector (~(sector_t)0) |
1da177e4c
|
27 |
|
1da177e4c
|
28 29 |
typedef struct mddev_s mddev_t; typedef struct mdk_rdev_s mdk_rdev_t; |
1da177e4c
|
30 |
/* |
1da177e4c
|
31 32 33 34 35 |
* MD's 'extended' device */ struct mdk_rdev_s { struct list_head same_set; /* RAID devices within the same set */ |
dd8ac336c
|
36 |
sector_t sectors; /* Device size (in 512bytes sectors) */ |
1da177e4c
|
37 |
mddev_t *mddev; /* RAID array if running */ |
eea1bf384
|
38 |
int last_events; /* IO event timestamp */ |
1da177e4c
|
39 40 41 42 43 |
struct block_device *bdev; /* block device handle */ struct page *sb_page; int sb_loaded; |
425437691
|
44 |
__u64 sb_events; |
1da177e4c
|
45 |
sector_t data_offset; /* start of data in array */ |
0f420358e
|
46 |
sector_t sb_start; /* offset of the super block (in 512byte sectors) */ |
0002b2718
|
47 |
int sb_size; /* bytes in the superblock */ |
1da177e4c
|
48 |
int preferred_minor; /* autorun support */ |
86e6ffdd2
|
49 |
struct kobject kobj; |
1da177e4c
|
50 51 52 53 54 55 56 57 58 59 |
/* A device can be in one of three states based on two flags: * Not working: faulty==1 in_sync==0 * Fully working: faulty==0 in_sync==1 * Working, but not * in sync with array * faulty==0 in_sync==0 * * It can never have faulty==1, in_sync==1 * This reduces the burden of testing multiple flags in many cases */ |
1da177e4c
|
60 |
|
b2d444d7a
|
61 62 63 |
unsigned long flags; #define Faulty 1 /* device is known to have a fault */ #define In_sync 2 /* device is in_sync with rest of array */ |
8ddf9efe6
|
64 |
#define WriteMostly 4 /* Avoid reading if at all possible */ |
a9701a304
|
65 |
#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ |
c5d79adba
|
66 67 |
#define AllReserved 6 /* If whole device is reserved for * one array */ |
d0fae18f1
|
68 |
#define AutoDetected 7 /* added by auto-detect */ |
6bfe0b499
|
69 70 71 |
#define Blocked 8 /* An error occured on an externally * managed array, don't allow writes * until it is cleared */ |
526647320
|
72 73 74 |
#define StateChanged 9 /* Faulty or Blocked has changed during * interrupt, so it needs to be * notified by the thread */ |
6bfe0b499
|
75 |
wait_queue_head_t blocked_wait; |
8ddf9efe6
|
76 |
|
1da177e4c
|
77 78 |
int desc_nr; /* descriptor index in the superblock */ int raid_disk; /* role of device in array */ |
41158c7eb
|
79 80 81 82 |
int saved_raid_disk; /* role that device used to have in the * array and could again if we did a partial * resync from the bitmap */ |
5fd6c1dce
|
83 84 85 86 |
sector_t recovery_offset;/* If this device has been partially * recovered, this is where we were * up to. */ |
1da177e4c
|
87 88 89 90 91 |
atomic_t nr_pending; /* number of pending requests. * only maintained for arrays that * support hot removal */ |
ba22dcbf1
|
92 93 94 |
atomic_t read_errors; /* number of consecutive read errors that * we have tried to ignore. */ |
4dbcdc751
|
95 96 97 98 |
atomic_t corrected_errors; /* number of corrected read errors, * for reporting to userspace and storing * in superblock. */ |
5792a2856
|
99 |
struct work_struct del_work; /* used for delayed sysfs removal */ |
3c0ee63a6
|
100 101 102 |
struct sysfs_dirent *sysfs_state; /* handle for 'state' * sysfs entry */ |
1da177e4c
|
103 |
}; |
1da177e4c
|
104 105 106 |
struct mddev_s { void *private; |
2604b703b
|
107 |
struct mdk_personality *pers; |
1da177e4c
|
108 109 110 |
dev_t unit; int md_minor; struct list_head disks; |
850b2b420
|
111 112 113 114 |
unsigned long flags; #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_PENDING 2 /* superblock update in progress */ |
409c57f38
|
115 116 |
int suspended; atomic_t active_io; |
1da177e4c
|
117 118 119 |
int ro; struct gendisk *gendisk; |
eae1701fb
|
120 |
struct kobject kobj; |
d3374825c
|
121 122 |
int hold_active; #define UNTIL_IOCTL 1 |
efeb53c0e
|
123 |
#define UNTIL_STOP 2 |
eae1701fb
|
124 |
|
1da177e4c
|
125 126 127 128 129 |
/* Superblock information */ int major_version, minor_version, patch_version; int persistent; |
e691063a6
|
130 131 132 |
int external; /* metadata is * managed externally */ char metadata_type[17]; /* externally set*/ |
9d8f03636
|
133 |
int chunk_sectors; |
1da177e4c
|
134 135 |
time_t ctime, utime; int level, layout; |
d9d166c2a
|
136 |
char clevel[16]; |
1da177e4c
|
137 138 |
int raid_disks; int max_disks; |
58c0fed40
|
139 140 |
sector_t dev_sectors; /* used size of * component devices */ |
f233ea5c9
|
141 |
sector_t array_sectors; /* exported array size */ |
b522adcde
|
142 143 |
int external_size; /* size managed * externally */ |
1da177e4c
|
144 145 146 |
__u64 events; char uuid[16]; |
f67055780
|
147 148 149 150 151 152 |
/* If the array is being reshaped, we need to record the * new shape and an indication of where we are up to. * This is written to the superblock. * If reshape_position is MaxSector, then no reshape is happening (yet). */ sector_t reshape_position; |
664e7c413
|
153 154 |
int delta_disks, new_level, new_layout; int new_chunk_sectors; |
f67055780
|
155 |
|
1da177e4c
|
156 157 |
struct mdk_thread_s *thread; /* management thread */ struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ |
ff4e8d9a9
|
158 |
sector_t curr_resync; /* last block scheduled */ |
97e4f42d6
|
159 160 161 162 163 164 165 |
/* As resync requests can complete out of order, we cannot easily track * how much resync has been completed. So we occasionally pause until * everything completes, then set curr_resync_completed to curr_resync. * As such it may be well behind the real resync mark, but it is a value * we are certain of. */ sector_t curr_resync_completed; |
1da177e4c
|
166 167 |
unsigned long resync_mark; /* a recent timestamp */ sector_t resync_mark_cnt;/* blocks written at resync_mark */ |
ff4e8d9a9
|
168 |
sector_t curr_mark_cnt; /* blocks scheduled now */ |
1da177e4c
|
169 170 |
sector_t resync_max_sectors; /* may be set by personality */ |
9d88883e6
|
171 172 173 174 |
sector_t resync_mismatches; /* count of sectors where * parity/replica mismatch found */ |
e464eafdb
|
175 176 177 178 |
/* allow user-space to request suspension of IO to regions of the array */ sector_t suspend_lo; sector_t suspend_hi; |
88202a0c8
|
179 180 181 |
/* if zero, use the system-wide default */ int sync_speed_min; int sync_speed_max; |
90b08710e
|
182 183 |
/* resync even though the same disks are shared among md-devices */ int parallel_resync; |
6ff8d8ec0
|
184 |
int ok_start_degraded; |
1da177e4c
|
185 186 187 188 |
/* recovery/resync flags * NEEDED: we might need to start a resync/recover * RUNNING: a thread is running, or about to be started * SYNC: actually doing a resync, not a recovery |
72a23c211
|
189 |
* RECOVER: doing recovery, or need to try it. |
dfc706450
|
190 |
* INTR: resync needs to be aborted for some reason |
1da177e4c
|
191 |
* DONE: thread is done and is waiting to be reaped |
24dd469d7
|
192 193 |
* REQUEST: user-space has requested a sync (used with SYNC) * CHECK: user-space request for for check-only, no repair |
ccfcc3c10
|
194 195 196 |
* RESHAPE: A reshape is happening * * If neither SYNC or RESHAPE are set, then it is a recovery. |
1da177e4c
|
197 198 199 |
*/ #define MD_RECOVERY_RUNNING 0 #define MD_RECOVERY_SYNC 1 |
72a23c211
|
200 |
#define MD_RECOVERY_RECOVER 2 |
1da177e4c
|
201 202 203 |
#define MD_RECOVERY_INTR 3 #define MD_RECOVERY_DONE 4 #define MD_RECOVERY_NEEDED 5 |
24dd469d7
|
204 205 |
#define MD_RECOVERY_REQUESTED 6 #define MD_RECOVERY_CHECK 7 |
ccfcc3c10
|
206 |
#define MD_RECOVERY_RESHAPE 8 |
5fd6c1dce
|
207 |
#define MD_RECOVERY_FROZEN 9 |
1da177e4c
|
208 |
unsigned long recovery; |
4044ba58d
|
209 210 211 |
int recovery_disabled; /* if we detect that recovery * will always fail, set this * so we don't loop trying */ |
1da177e4c
|
212 213 |
int in_sync; /* know to not need resync */ |
df5b89b32
|
214 |
struct mutex reconfig_mutex; |
f2ea68cf4
|
215 216 |
atomic_t active; /* general refcount */ atomic_t openers; /* number of active opens */ |
1da177e4c
|
217 |
|
44ce6294d
|
218 |
int changed; /* true if we might need to reread partition info */ |
1da177e4c
|
219 220 221 |
int degraded; /* whether md should consider * adding a spare */ |
a9701a304
|
222 223 224 225 226 227 228 |
int barriers_work; /* initialised to true, cleared as soon * as a barrier request to slave * fails. Only supported */ struct bio *biolist; /* bios that need to be retried * because BIO_RW_BARRIER is not supported */ |
1da177e4c
|
229 230 231 232 |
atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; sector_t recovery_cp; |
5e96ee65c
|
233 234 |
sector_t resync_min; /* user requested sync * starts here */ |
c62072777
|
235 236 |
sector_t resync_max; /* resync should pause * when it gets here */ |
06d91a5fe
|
237 |
|
b62b75905
|
238 239 240 |
struct sysfs_dirent *sysfs_state; /* handle for 'array_state' * file in sysfs. */ |
0c3573f19
|
241 |
struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */ |
b62b75905
|
242 |
|
d3374825c
|
243 |
struct work_struct del_work; /* used for delayed sysfs removal */ |
06d91a5fe
|
244 |
spinlock_t write_lock; |
3d310eb7b
|
245 |
wait_queue_head_t sb_wait; /* for waiting on superblock updates */ |
7bfa19f27
|
246 |
atomic_t pending_writes; /* number of active superblock writes */ |
06d91a5fe
|
247 |
|
1da177e4c
|
248 249 250 251 252 253 |
unsigned int safemode; /* if set, update "clean" superblock * when no writes pending. */ unsigned int safemode_delay; struct timer_list safemode_timer; atomic_t writes_pending; |
165125e1e
|
254 |
struct request_queue *queue; /* for plugging ... */ |
1da177e4c
|
255 |
|
4b6d287f6
|
256 257 |
atomic_t write_behind; /* outstanding async IO */ unsigned int max_write_behind; /* 0 = sync */ |
32a7627cf
|
258 259 |
struct bitmap *bitmap; /* the bitmap for the device */ struct file *bitmap_file; /* the bitmap file */ |
a654b9d8f
|
260 261 262 263 |
long bitmap_offset; /* offset from superblock of * start of bitmap. May be * negative, but not '0' */ |
36fa30636
|
264 265 266 267 |
long default_bitmap_offset; /* this is the offset to use when * hot-adding a bitmap. It should * eventually be settable by sysfs. */ |
32a7627cf
|
268 |
|
1da177e4c
|
269 270 271 272 273 274 |
struct list_head all_mddevs; }; static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) { |
b2d444d7a
|
275 |
int faulty = test_bit(Faulty, &rdev->flags); |
1da177e4c
|
276 277 278 279 280 281 282 283 |
if (atomic_dec_and_test(&rdev->nr_pending) && faulty) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) { atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); } |
2604b703b
|
284 |
struct mdk_personality |
1da177e4c
|
285 286 |
{ char *name; |
2604b703b
|
287 288 |
int level; struct list_head list; |
1da177e4c
|
289 |
struct module *owner; |
165125e1e
|
290 |
int (*make_request)(struct request_queue *q, struct bio *bio); |
1da177e4c
|
291 292 293 294 295 296 297 298 299 300 |
int (*run)(mddev_t *mddev); int (*stop)(mddev_t *mddev); void (*status)(struct seq_file *seq, mddev_t *mddev); /* error_handler must set ->faulty and clear ->in_sync * if appropriate, and should abort recovery if needed */ void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); int (*hot_remove_disk) (mddev_t *mddev, int number); int (*spare_active) (mddev_t *mddev); |
57afd89f9
|
301 |
sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); |
1da177e4c
|
302 |
int (*resize) (mddev_t *mddev, sector_t sectors); |
80c3a6ce4
|
303 |
sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks); |
63c70c4f3
|
304 305 |
int (*check_reshape) (mddev_t *mddev); int (*start_reshape) (mddev_t *mddev); |
cea9c2280
|
306 |
void (*finish_reshape) (mddev_t *mddev); |
36fa30636
|
307 308 309 310 311 312 |
/* quiesce moves between quiescence states * 0 - fully active * 1 - no new requests allowed * others - reserved */ void (*quiesce) (mddev_t *mddev, int state); |
245f46c2c
|
313 314 315 316 317 318 319 320 321 322 |
/* takeover is used to transition an array from one * personality to another. The new personality must be able * to handle the data in the current layout. * e.g. 2drive raid1 -> 2drive raid5 * ndrive raid5 -> degraded n+1drive raid6 with special layout * If the takeover succeeds, a new 'private' structure is returned. * This needs to be installed and then ->run used to activate the * array. */ void *(*takeover) (mddev_t *mddev); |
1da177e4c
|
323 |
}; |
007583c92
|
324 325 326 327 328 |
struct md_sysfs_entry { struct attribute attr; ssize_t (*show)(mddev_t *, char *); ssize_t (*store)(mddev_t *, const char *, size_t); }; |
1da177e4c
|
329 330 331 332 |
static inline char * mdname (mddev_t * mddev) { return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; } |
1da177e4c
|
333 334 335 336 |
/* * iterates through some rdev ringlist. It's safe to remove the * current 'rdev'. Dont touch 'tmp' though. */ |
159ec1fc0
|
337 338 |
#define rdev_for_each_list(rdev, tmp, head) \ list_for_each_entry_safe(rdev, tmp, head, same_set) |
1da177e4c
|
339 340 341 |
/* * iterates through the 'same array disks' ringlist */ |
d089c6af1
|
342 |
#define rdev_for_each(rdev, tmp, mddev) \ |
159ec1fc0
|
343 |
list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) |
1da177e4c
|
344 |
|
4b80991c6
|
345 346 |
#define rdev_for_each_rcu(rdev, mddev) \ list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) |
1da177e4c
|
347 348 349 350 351 |
typedef struct mdk_thread_s { void (*run) (mddev_t *mddev); mddev_t *mddev; wait_queue_head_t wqueue; unsigned long flags; |
1da177e4c
|
352 |
struct task_struct *tsk; |
32a7627cf
|
353 |
unsigned long timeout; |
1da177e4c
|
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 |
} mdk_thread_t; #define THREAD_WAKEUP 0 #define __wait_event_lock_irq(wq, condition, lock, cmd) \ do { \ wait_queue_t __wait; \ init_waitqueue_entry(&__wait, current); \ \ add_wait_queue(&wq, &__wait); \ for (;;) { \ set_current_state(TASK_UNINTERRUPTIBLE); \ if (condition) \ break; \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock); \ } \ current->state = TASK_RUNNING; \ remove_wait_queue(&wq, &__wait); \ } while (0) #define wait_event_lock_irq(wq, condition, lock, cmd) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq, condition, lock, cmd); \ } while (0) |
1345b1d8a
|
383 384 385 386 |
static inline void safe_put_page(struct page *p) { if (p) put_page(p); } |
92022950c
|
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 |
extern int register_md_personality(struct mdk_personality *p); extern int unregister_md_personality(struct mdk_personality *p); extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), mddev_t *mddev, const char *name); extern void md_unregister_thread(mdk_thread_t *thread); extern void md_wakeup_thread(mdk_thread_t *thread); extern void md_check_recovery(mddev_t *mddev); extern void md_write_start(mddev_t *mddev, struct bio *bi); extern void md_write_end(mddev_t *mddev); extern void md_done_sync(mddev_t *mddev, int blocks, int ok); extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page); extern void md_super_wait(mddev_t *mddev); extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, struct page *page, int rw); extern void md_do_sync(mddev_t *mddev); extern void md_new_event(mddev_t *mddev); extern int md_allow_write(mddev_t *mddev); extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); |
1f403624b
|
408 |
extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); |
0894cc306
|
409 |
extern int md_check_no_bitmap(mddev_t *mddev); |
63fe08177
|
410 411 |
#endif /* _MD_MD_H */ |