Commit 84c89557a302e18414a011cc52b1abd034860743
Committed by
Alasdair G Kergon
1 parent
d9bf0b508d
Exists in
master
and in
7 other branches
dm ioctl: allow rename to fill empty uuid
Allow the uuid of a mapped device to be set after device creation. Previously the uuid (which is optional) could only be set by DM_DEV_CREATE. If no uuid was supplied it could not be set later. Sometimes it's necessary to create the device before the uuid is known, and in such cases the uuid must be filled in after the creation. This patch extends DM_DEV_RENAME to accept a uuid accompanied by a new flag DM_UUID_FLAG. This can only be done once and if no uuid was previously supplied. It cannot be used to change an existing uuid. DM_VERSION_MINOR is also bumped to 19 to indicate this interface extension is available. Signed-off-by: Peter Jones <pjones@redhat.com> Signed-off-by: Jonathan Brassow <jbrassow@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Showing 2 changed files with 87 additions and 28 deletions Inline Diff
drivers/md/dm-ioctl.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. | 2 | * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. |
3 | * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "dm.h" | 8 | #include "dm.h" |
9 | 9 | ||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/vmalloc.h> | 11 | #include <linux/vmalloc.h> |
12 | #include <linux/miscdevice.h> | 12 | #include <linux/miscdevice.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/wait.h> | 14 | #include <linux/wait.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/dm-ioctl.h> | 16 | #include <linux/dm-ioctl.h> |
17 | #include <linux/hdreg.h> | 17 | #include <linux/hdreg.h> |
18 | #include <linux/compat.h> | 18 | #include <linux/compat.h> |
19 | 19 | ||
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | 21 | ||
22 | #define DM_MSG_PREFIX "ioctl" | 22 | #define DM_MSG_PREFIX "ioctl" |
23 | #define DM_DRIVER_EMAIL "dm-devel@redhat.com" | 23 | #define DM_DRIVER_EMAIL "dm-devel@redhat.com" |
24 | 24 | ||
25 | /*----------------------------------------------------------------- | 25 | /*----------------------------------------------------------------- |
26 | * The ioctl interface needs to be able to look up devices by | 26 | * The ioctl interface needs to be able to look up devices by |
27 | * name or uuid. | 27 | * name or uuid. |
28 | *---------------------------------------------------------------*/ | 28 | *---------------------------------------------------------------*/ |
29 | struct hash_cell { | 29 | struct hash_cell { |
30 | struct list_head name_list; | 30 | struct list_head name_list; |
31 | struct list_head uuid_list; | 31 | struct list_head uuid_list; |
32 | 32 | ||
33 | char *name; | 33 | char *name; |
34 | char *uuid; | 34 | char *uuid; |
35 | struct mapped_device *md; | 35 | struct mapped_device *md; |
36 | struct dm_table *new_map; | 36 | struct dm_table *new_map; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct vers_iter { | 39 | struct vers_iter { |
40 | size_t param_size; | 40 | size_t param_size; |
41 | struct dm_target_versions *vers, *old_vers; | 41 | struct dm_target_versions *vers, *old_vers; |
42 | char *end; | 42 | char *end; |
43 | uint32_t flags; | 43 | uint32_t flags; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | 46 | ||
47 | #define NUM_BUCKETS 64 | 47 | #define NUM_BUCKETS 64 |
48 | #define MASK_BUCKETS (NUM_BUCKETS - 1) | 48 | #define MASK_BUCKETS (NUM_BUCKETS - 1) |
49 | static struct list_head _name_buckets[NUM_BUCKETS]; | 49 | static struct list_head _name_buckets[NUM_BUCKETS]; |
50 | static struct list_head _uuid_buckets[NUM_BUCKETS]; | 50 | static struct list_head _uuid_buckets[NUM_BUCKETS]; |
51 | 51 | ||
52 | static void dm_hash_remove_all(int keep_open_devices); | 52 | static void dm_hash_remove_all(int keep_open_devices); |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Guards access to both hash tables. | 55 | * Guards access to both hash tables. |
56 | */ | 56 | */ |
57 | static DECLARE_RWSEM(_hash_lock); | 57 | static DECLARE_RWSEM(_hash_lock); |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * Protects use of mdptr to obtain hash cell name and uuid from mapped device. | 60 | * Protects use of mdptr to obtain hash cell name and uuid from mapped device. |
61 | */ | 61 | */ |
62 | static DEFINE_MUTEX(dm_hash_cells_mutex); | 62 | static DEFINE_MUTEX(dm_hash_cells_mutex); |
63 | 63 | ||
64 | static void init_buckets(struct list_head *buckets) | 64 | static void init_buckets(struct list_head *buckets) |
65 | { | 65 | { |
66 | unsigned int i; | 66 | unsigned int i; |
67 | 67 | ||
68 | for (i = 0; i < NUM_BUCKETS; i++) | 68 | for (i = 0; i < NUM_BUCKETS; i++) |
69 | INIT_LIST_HEAD(buckets + i); | 69 | INIT_LIST_HEAD(buckets + i); |
70 | } | 70 | } |
71 | 71 | ||
72 | static int dm_hash_init(void) | 72 | static int dm_hash_init(void) |
73 | { | 73 | { |
74 | init_buckets(_name_buckets); | 74 | init_buckets(_name_buckets); |
75 | init_buckets(_uuid_buckets); | 75 | init_buckets(_uuid_buckets); |
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
78 | 78 | ||
79 | static void dm_hash_exit(void) | 79 | static void dm_hash_exit(void) |
80 | { | 80 | { |
81 | dm_hash_remove_all(0); | 81 | dm_hash_remove_all(0); |
82 | } | 82 | } |
83 | 83 | ||
84 | /*----------------------------------------------------------------- | 84 | /*----------------------------------------------------------------- |
85 | * Hash function: | 85 | * Hash function: |
86 | * We're not really concerned with the str hash function being | 86 | * We're not really concerned with the str hash function being |
87 | * fast since it's only used by the ioctl interface. | 87 | * fast since it's only used by the ioctl interface. |
88 | *---------------------------------------------------------------*/ | 88 | *---------------------------------------------------------------*/ |
89 | static unsigned int hash_str(const char *str) | 89 | static unsigned int hash_str(const char *str) |
90 | { | 90 | { |
91 | const unsigned int hash_mult = 2654435387U; | 91 | const unsigned int hash_mult = 2654435387U; |
92 | unsigned int h = 0; | 92 | unsigned int h = 0; |
93 | 93 | ||
94 | while (*str) | 94 | while (*str) |
95 | h = (h + (unsigned int) *str++) * hash_mult; | 95 | h = (h + (unsigned int) *str++) * hash_mult; |
96 | 96 | ||
97 | return h & MASK_BUCKETS; | 97 | return h & MASK_BUCKETS; |
98 | } | 98 | } |
99 | 99 | ||
100 | /*----------------------------------------------------------------- | 100 | /*----------------------------------------------------------------- |
101 | * Code for looking up a device by name | 101 | * Code for looking up a device by name |
102 | *---------------------------------------------------------------*/ | 102 | *---------------------------------------------------------------*/ |
103 | static struct hash_cell *__get_name_cell(const char *str) | 103 | static struct hash_cell *__get_name_cell(const char *str) |
104 | { | 104 | { |
105 | struct hash_cell *hc; | 105 | struct hash_cell *hc; |
106 | unsigned int h = hash_str(str); | 106 | unsigned int h = hash_str(str); |
107 | 107 | ||
108 | list_for_each_entry (hc, _name_buckets + h, name_list) | 108 | list_for_each_entry (hc, _name_buckets + h, name_list) |
109 | if (!strcmp(hc->name, str)) { | 109 | if (!strcmp(hc->name, str)) { |
110 | dm_get(hc->md); | 110 | dm_get(hc->md); |
111 | return hc; | 111 | return hc; |
112 | } | 112 | } |
113 | 113 | ||
114 | return NULL; | 114 | return NULL; |
115 | } | 115 | } |
116 | 116 | ||
117 | static struct hash_cell *__get_uuid_cell(const char *str) | 117 | static struct hash_cell *__get_uuid_cell(const char *str) |
118 | { | 118 | { |
119 | struct hash_cell *hc; | 119 | struct hash_cell *hc; |
120 | unsigned int h = hash_str(str); | 120 | unsigned int h = hash_str(str); |
121 | 121 | ||
122 | list_for_each_entry (hc, _uuid_buckets + h, uuid_list) | 122 | list_for_each_entry (hc, _uuid_buckets + h, uuid_list) |
123 | if (!strcmp(hc->uuid, str)) { | 123 | if (!strcmp(hc->uuid, str)) { |
124 | dm_get(hc->md); | 124 | dm_get(hc->md); |
125 | return hc; | 125 | return hc; |
126 | } | 126 | } |
127 | 127 | ||
128 | return NULL; | 128 | return NULL; |
129 | } | 129 | } |
130 | 130 | ||
131 | /*----------------------------------------------------------------- | 131 | /*----------------------------------------------------------------- |
132 | * Inserting, removing and renaming a device. | 132 | * Inserting, removing and renaming a device. |
133 | *---------------------------------------------------------------*/ | 133 | *---------------------------------------------------------------*/ |
134 | static struct hash_cell *alloc_cell(const char *name, const char *uuid, | 134 | static struct hash_cell *alloc_cell(const char *name, const char *uuid, |
135 | struct mapped_device *md) | 135 | struct mapped_device *md) |
136 | { | 136 | { |
137 | struct hash_cell *hc; | 137 | struct hash_cell *hc; |
138 | 138 | ||
139 | hc = kmalloc(sizeof(*hc), GFP_KERNEL); | 139 | hc = kmalloc(sizeof(*hc), GFP_KERNEL); |
140 | if (!hc) | 140 | if (!hc) |
141 | return NULL; | 141 | return NULL; |
142 | 142 | ||
143 | hc->name = kstrdup(name, GFP_KERNEL); | 143 | hc->name = kstrdup(name, GFP_KERNEL); |
144 | if (!hc->name) { | 144 | if (!hc->name) { |
145 | kfree(hc); | 145 | kfree(hc); |
146 | return NULL; | 146 | return NULL; |
147 | } | 147 | } |
148 | 148 | ||
149 | if (!uuid) | 149 | if (!uuid) |
150 | hc->uuid = NULL; | 150 | hc->uuid = NULL; |
151 | 151 | ||
152 | else { | 152 | else { |
153 | hc->uuid = kstrdup(uuid, GFP_KERNEL); | 153 | hc->uuid = kstrdup(uuid, GFP_KERNEL); |
154 | if (!hc->uuid) { | 154 | if (!hc->uuid) { |
155 | kfree(hc->name); | 155 | kfree(hc->name); |
156 | kfree(hc); | 156 | kfree(hc); |
157 | return NULL; | 157 | return NULL; |
158 | } | 158 | } |
159 | } | 159 | } |
160 | 160 | ||
161 | INIT_LIST_HEAD(&hc->name_list); | 161 | INIT_LIST_HEAD(&hc->name_list); |
162 | INIT_LIST_HEAD(&hc->uuid_list); | 162 | INIT_LIST_HEAD(&hc->uuid_list); |
163 | hc->md = md; | 163 | hc->md = md; |
164 | hc->new_map = NULL; | 164 | hc->new_map = NULL; |
165 | return hc; | 165 | return hc; |
166 | } | 166 | } |
167 | 167 | ||
168 | static void free_cell(struct hash_cell *hc) | 168 | static void free_cell(struct hash_cell *hc) |
169 | { | 169 | { |
170 | if (hc) { | 170 | if (hc) { |
171 | kfree(hc->name); | 171 | kfree(hc->name); |
172 | kfree(hc->uuid); | 172 | kfree(hc->uuid); |
173 | kfree(hc); | 173 | kfree(hc); |
174 | } | 174 | } |
175 | } | 175 | } |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * The kdev_t and uuid of a device can never change once it is | 178 | * The kdev_t and uuid of a device can never change once it is |
179 | * initially inserted. | 179 | * initially inserted. |
180 | */ | 180 | */ |
181 | static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) | 181 | static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) |
182 | { | 182 | { |
183 | struct hash_cell *cell, *hc; | 183 | struct hash_cell *cell, *hc; |
184 | 184 | ||
185 | /* | 185 | /* |
186 | * Allocate the new cells. | 186 | * Allocate the new cells. |
187 | */ | 187 | */ |
188 | cell = alloc_cell(name, uuid, md); | 188 | cell = alloc_cell(name, uuid, md); |
189 | if (!cell) | 189 | if (!cell) |
190 | return -ENOMEM; | 190 | return -ENOMEM; |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * Insert the cell into both hash tables. | 193 | * Insert the cell into both hash tables. |
194 | */ | 194 | */ |
195 | down_write(&_hash_lock); | 195 | down_write(&_hash_lock); |
196 | hc = __get_name_cell(name); | 196 | hc = __get_name_cell(name); |
197 | if (hc) { | 197 | if (hc) { |
198 | dm_put(hc->md); | 198 | dm_put(hc->md); |
199 | goto bad; | 199 | goto bad; |
200 | } | 200 | } |
201 | 201 | ||
202 | list_add(&cell->name_list, _name_buckets + hash_str(name)); | 202 | list_add(&cell->name_list, _name_buckets + hash_str(name)); |
203 | 203 | ||
204 | if (uuid) { | 204 | if (uuid) { |
205 | hc = __get_uuid_cell(uuid); | 205 | hc = __get_uuid_cell(uuid); |
206 | if (hc) { | 206 | if (hc) { |
207 | list_del(&cell->name_list); | 207 | list_del(&cell->name_list); |
208 | dm_put(hc->md); | 208 | dm_put(hc->md); |
209 | goto bad; | 209 | goto bad; |
210 | } | 210 | } |
211 | list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); | 211 | list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); |
212 | } | 212 | } |
213 | dm_get(md); | 213 | dm_get(md); |
214 | mutex_lock(&dm_hash_cells_mutex); | 214 | mutex_lock(&dm_hash_cells_mutex); |
215 | dm_set_mdptr(md, cell); | 215 | dm_set_mdptr(md, cell); |
216 | mutex_unlock(&dm_hash_cells_mutex); | 216 | mutex_unlock(&dm_hash_cells_mutex); |
217 | up_write(&_hash_lock); | 217 | up_write(&_hash_lock); |
218 | 218 | ||
219 | return 0; | 219 | return 0; |
220 | 220 | ||
221 | bad: | 221 | bad: |
222 | up_write(&_hash_lock); | 222 | up_write(&_hash_lock); |
223 | free_cell(cell); | 223 | free_cell(cell); |
224 | return -EBUSY; | 224 | return -EBUSY; |
225 | } | 225 | } |
226 | 226 | ||
227 | static void __hash_remove(struct hash_cell *hc) | 227 | static void __hash_remove(struct hash_cell *hc) |
228 | { | 228 | { |
229 | struct dm_table *table; | 229 | struct dm_table *table; |
230 | 230 | ||
231 | /* remove from the dev hash */ | 231 | /* remove from the dev hash */ |
232 | list_del(&hc->uuid_list); | 232 | list_del(&hc->uuid_list); |
233 | list_del(&hc->name_list); | 233 | list_del(&hc->name_list); |
234 | mutex_lock(&dm_hash_cells_mutex); | 234 | mutex_lock(&dm_hash_cells_mutex); |
235 | dm_set_mdptr(hc->md, NULL); | 235 | dm_set_mdptr(hc->md, NULL); |
236 | mutex_unlock(&dm_hash_cells_mutex); | 236 | mutex_unlock(&dm_hash_cells_mutex); |
237 | 237 | ||
238 | table = dm_get_live_table(hc->md); | 238 | table = dm_get_live_table(hc->md); |
239 | if (table) { | 239 | if (table) { |
240 | dm_table_event(table); | 240 | dm_table_event(table); |
241 | dm_table_put(table); | 241 | dm_table_put(table); |
242 | } | 242 | } |
243 | 243 | ||
244 | if (hc->new_map) | 244 | if (hc->new_map) |
245 | dm_table_destroy(hc->new_map); | 245 | dm_table_destroy(hc->new_map); |
246 | dm_put(hc->md); | 246 | dm_put(hc->md); |
247 | free_cell(hc); | 247 | free_cell(hc); |
248 | } | 248 | } |
249 | 249 | ||
250 | static void dm_hash_remove_all(int keep_open_devices) | 250 | static void dm_hash_remove_all(int keep_open_devices) |
251 | { | 251 | { |
252 | int i, dev_skipped; | 252 | int i, dev_skipped; |
253 | struct hash_cell *hc; | 253 | struct hash_cell *hc; |
254 | struct mapped_device *md; | 254 | struct mapped_device *md; |
255 | 255 | ||
256 | retry: | 256 | retry: |
257 | dev_skipped = 0; | 257 | dev_skipped = 0; |
258 | 258 | ||
259 | down_write(&_hash_lock); | 259 | down_write(&_hash_lock); |
260 | 260 | ||
261 | for (i = 0; i < NUM_BUCKETS; i++) { | 261 | for (i = 0; i < NUM_BUCKETS; i++) { |
262 | list_for_each_entry(hc, _name_buckets + i, name_list) { | 262 | list_for_each_entry(hc, _name_buckets + i, name_list) { |
263 | md = hc->md; | 263 | md = hc->md; |
264 | dm_get(md); | 264 | dm_get(md); |
265 | 265 | ||
266 | if (keep_open_devices && dm_lock_for_deletion(md)) { | 266 | if (keep_open_devices && dm_lock_for_deletion(md)) { |
267 | dm_put(md); | 267 | dm_put(md); |
268 | dev_skipped++; | 268 | dev_skipped++; |
269 | continue; | 269 | continue; |
270 | } | 270 | } |
271 | 271 | ||
272 | __hash_remove(hc); | 272 | __hash_remove(hc); |
273 | 273 | ||
274 | up_write(&_hash_lock); | 274 | up_write(&_hash_lock); |
275 | 275 | ||
276 | dm_put(md); | 276 | dm_put(md); |
277 | if (likely(keep_open_devices)) | 277 | if (likely(keep_open_devices)) |
278 | dm_destroy(md); | 278 | dm_destroy(md); |
279 | else | 279 | else |
280 | dm_destroy_immediate(md); | 280 | dm_destroy_immediate(md); |
281 | 281 | ||
282 | /* | 282 | /* |
283 | * Some mapped devices may be using other mapped | 283 | * Some mapped devices may be using other mapped |
284 | * devices, so repeat until we make no further | 284 | * devices, so repeat until we make no further |
285 | * progress. If a new mapped device is created | 285 | * progress. If a new mapped device is created |
286 | * here it will also get removed. | 286 | * here it will also get removed. |
287 | */ | 287 | */ |
288 | goto retry; | 288 | goto retry; |
289 | } | 289 | } |
290 | } | 290 | } |
291 | 291 | ||
292 | up_write(&_hash_lock); | 292 | up_write(&_hash_lock); |
293 | 293 | ||
294 | if (dev_skipped) | 294 | if (dev_skipped) |
295 | DMWARN("remove_all left %d open device(s)", dev_skipped); | 295 | DMWARN("remove_all left %d open device(s)", dev_skipped); |
296 | } | 296 | } |
297 | 297 | ||
298 | /* | ||
299 | * Set the uuid of a hash_cell that isn't already set. | ||
300 | */ | ||
301 | static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid) | ||
302 | { | ||
303 | mutex_lock(&dm_hash_cells_mutex); | ||
304 | hc->uuid = new_uuid; | ||
305 | mutex_unlock(&dm_hash_cells_mutex); | ||
306 | |||
307 | list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid)); | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Changes the name of a hash_cell and returns the old name for | ||
312 | * the caller to free. | ||
313 | */ | ||
314 | static char *__change_cell_name(struct hash_cell *hc, char *new_name) | ||
315 | { | ||
316 | char *old_name; | ||
317 | |||
318 | /* | ||
319 | * Rename and move the name cell. | ||
320 | */ | ||
321 | list_del(&hc->name_list); | ||
322 | old_name = hc->name; | ||
323 | |||
324 | mutex_lock(&dm_hash_cells_mutex); | ||
325 | hc->name = new_name; | ||
326 | mutex_unlock(&dm_hash_cells_mutex); | ||
327 | |||
328 | list_add(&hc->name_list, _name_buckets + hash_str(new_name)); | ||
329 | |||
330 | return old_name; | ||
331 | } | ||
332 | |||
298 | static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, | 333 | static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, |
299 | const char *new) | 334 | const char *new) |
300 | { | 335 | { |
301 | char *new_name, *old_name; | 336 | char *new_data, *old_name = NULL; |
302 | struct hash_cell *hc; | 337 | struct hash_cell *hc; |
303 | struct dm_table *table; | 338 | struct dm_table *table; |
304 | struct mapped_device *md; | 339 | struct mapped_device *md; |
340 | unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; | ||
305 | 341 | ||
306 | /* | 342 | /* |
307 | * duplicate new. | 343 | * duplicate new. |
308 | */ | 344 | */ |
309 | new_name = kstrdup(new, GFP_KERNEL); | 345 | new_data = kstrdup(new, GFP_KERNEL); |
310 | if (!new_name) | 346 | if (!new_data) |
311 | return ERR_PTR(-ENOMEM); | 347 | return ERR_PTR(-ENOMEM); |
312 | 348 | ||
313 | down_write(&_hash_lock); | 349 | down_write(&_hash_lock); |
314 | 350 | ||
315 | /* | 351 | /* |
316 | * Is new free ? | 352 | * Is new free ? |
317 | */ | 353 | */ |
318 | hc = __get_name_cell(new); | 354 | if (change_uuid) |
355 | hc = __get_uuid_cell(new); | ||
356 | else | ||
357 | hc = __get_name_cell(new); | ||
358 | |||
319 | if (hc) { | 359 | if (hc) { |
320 | DMWARN("asked to rename to an already-existing name %s -> %s", | 360 | DMWARN("Unable to change %s on mapped device %s to one that " |
361 | "already exists: %s", | ||
362 | change_uuid ? "uuid" : "name", | ||
321 | param->name, new); | 363 | param->name, new); |
322 | dm_put(hc->md); | 364 | dm_put(hc->md); |
323 | up_write(&_hash_lock); | 365 | up_write(&_hash_lock); |
324 | kfree(new_name); | 366 | kfree(new_data); |
325 | return ERR_PTR(-EBUSY); | 367 | return ERR_PTR(-EBUSY); |
326 | } | 368 | } |
327 | 369 | ||
328 | /* | 370 | /* |
329 | * Is there such a device as 'old' ? | 371 | * Is there such a device as 'old' ? |
330 | */ | 372 | */ |
331 | hc = __get_name_cell(param->name); | 373 | hc = __get_name_cell(param->name); |
332 | if (!hc) { | 374 | if (!hc) { |
333 | DMWARN("asked to rename a non-existent device %s -> %s", | 375 | DMWARN("Unable to rename non-existent device, %s to %s%s", |
334 | param->name, new); | 376 | param->name, change_uuid ? "uuid " : "", new); |
335 | up_write(&_hash_lock); | 377 | up_write(&_hash_lock); |
336 | kfree(new_name); | 378 | kfree(new_data); |
337 | return ERR_PTR(-ENXIO); | 379 | return ERR_PTR(-ENXIO); |
338 | } | 380 | } |
339 | 381 | ||
340 | /* | 382 | /* |
341 | * rename and move the name cell. | 383 | * Does this device already have a uuid? |
342 | */ | 384 | */ |
343 | list_del(&hc->name_list); | 385 | if (change_uuid && hc->uuid) { |
344 | old_name = hc->name; | 386 | DMWARN("Unable to change uuid of mapped device %s to %s " |
345 | mutex_lock(&dm_hash_cells_mutex); | 387 | "because uuid is already set to %s", |
346 | hc->name = new_name; | 388 | param->name, new, hc->uuid); |
347 | mutex_unlock(&dm_hash_cells_mutex); | 389 | dm_put(hc->md); |
348 | list_add(&hc->name_list, _name_buckets + hash_str(new_name)); | 390 | up_write(&_hash_lock); |
391 | kfree(new_data); | ||
392 | return ERR_PTR(-EINVAL); | ||
393 | } | ||
349 | 394 | ||
395 | if (change_uuid) | ||
396 | __set_cell_uuid(hc, new_data); | ||
397 | else | ||
398 | old_name = __change_cell_name(hc, new_data); | ||
399 | |||
350 | /* | 400 | /* |
351 | * Wake up any dm event waiters. | 401 | * Wake up any dm event waiters. |
352 | */ | 402 | */ |
353 | table = dm_get_live_table(hc->md); | 403 | table = dm_get_live_table(hc->md); |
354 | if (table) { | 404 | if (table) { |
355 | dm_table_event(table); | 405 | dm_table_event(table); |
356 | dm_table_put(table); | 406 | dm_table_put(table); |
357 | } | 407 | } |
358 | 408 | ||
359 | if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) | 409 | if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) |
360 | param->flags |= DM_UEVENT_GENERATED_FLAG; | 410 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
361 | 411 | ||
362 | md = hc->md; | 412 | md = hc->md; |
363 | up_write(&_hash_lock); | 413 | up_write(&_hash_lock); |
364 | kfree(old_name); | 414 | kfree(old_name); |
365 | 415 | ||
366 | return md; | 416 | return md; |
367 | } | 417 | } |
368 | 418 | ||
369 | /*----------------------------------------------------------------- | 419 | /*----------------------------------------------------------------- |
370 | * Implementation of the ioctl commands | 420 | * Implementation of the ioctl commands |
371 | *---------------------------------------------------------------*/ | 421 | *---------------------------------------------------------------*/ |
372 | /* | 422 | /* |
373 | * All the ioctl commands get dispatched to functions with this | 423 | * All the ioctl commands get dispatched to functions with this |
374 | * prototype. | 424 | * prototype. |
375 | */ | 425 | */ |
376 | typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size); | 426 | typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size); |
377 | 427 | ||
378 | static int remove_all(struct dm_ioctl *param, size_t param_size) | 428 | static int remove_all(struct dm_ioctl *param, size_t param_size) |
379 | { | 429 | { |
380 | dm_hash_remove_all(1); | 430 | dm_hash_remove_all(1); |
381 | param->data_size = 0; | 431 | param->data_size = 0; |
382 | return 0; | 432 | return 0; |
383 | } | 433 | } |
384 | 434 | ||
385 | /* | 435 | /* |
386 | * Round up the ptr to an 8-byte boundary. | 436 | * Round up the ptr to an 8-byte boundary. |
387 | */ | 437 | */ |
388 | #define ALIGN_MASK 7 | 438 | #define ALIGN_MASK 7 |
389 | static inline void *align_ptr(void *ptr) | 439 | static inline void *align_ptr(void *ptr) |
390 | { | 440 | { |
391 | return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); | 441 | return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); |
392 | } | 442 | } |
393 | 443 | ||
394 | /* | 444 | /* |
395 | * Retrieves the data payload buffer from an already allocated | 445 | * Retrieves the data payload buffer from an already allocated |
396 | * struct dm_ioctl. | 446 | * struct dm_ioctl. |
397 | */ | 447 | */ |
398 | static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, | 448 | static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, |
399 | size_t *len) | 449 | size_t *len) |
400 | { | 450 | { |
401 | param->data_start = align_ptr(param + 1) - (void *) param; | 451 | param->data_start = align_ptr(param + 1) - (void *) param; |
402 | 452 | ||
403 | if (param->data_start < param_size) | 453 | if (param->data_start < param_size) |
404 | *len = param_size - param->data_start; | 454 | *len = param_size - param->data_start; |
405 | else | 455 | else |
406 | *len = 0; | 456 | *len = 0; |
407 | 457 | ||
408 | return ((void *) param) + param->data_start; | 458 | return ((void *) param) + param->data_start; |
409 | } | 459 | } |
410 | 460 | ||
411 | static int list_devices(struct dm_ioctl *param, size_t param_size) | 461 | static int list_devices(struct dm_ioctl *param, size_t param_size) |
412 | { | 462 | { |
413 | unsigned int i; | 463 | unsigned int i; |
414 | struct hash_cell *hc; | 464 | struct hash_cell *hc; |
415 | size_t len, needed = 0; | 465 | size_t len, needed = 0; |
416 | struct gendisk *disk; | 466 | struct gendisk *disk; |
417 | struct dm_name_list *nl, *old_nl = NULL; | 467 | struct dm_name_list *nl, *old_nl = NULL; |
418 | 468 | ||
419 | down_write(&_hash_lock); | 469 | down_write(&_hash_lock); |
420 | 470 | ||
421 | /* | 471 | /* |
422 | * Loop through all the devices working out how much | 472 | * Loop through all the devices working out how much |
423 | * space we need. | 473 | * space we need. |
424 | */ | 474 | */ |
425 | for (i = 0; i < NUM_BUCKETS; i++) { | 475 | for (i = 0; i < NUM_BUCKETS; i++) { |
426 | list_for_each_entry (hc, _name_buckets + i, name_list) { | 476 | list_for_each_entry (hc, _name_buckets + i, name_list) { |
427 | needed += sizeof(struct dm_name_list); | 477 | needed += sizeof(struct dm_name_list); |
428 | needed += strlen(hc->name) + 1; | 478 | needed += strlen(hc->name) + 1; |
429 | needed += ALIGN_MASK; | 479 | needed += ALIGN_MASK; |
430 | } | 480 | } |
431 | } | 481 | } |
432 | 482 | ||
433 | /* | 483 | /* |
434 | * Grab our output buffer. | 484 | * Grab our output buffer. |
435 | */ | 485 | */ |
436 | nl = get_result_buffer(param, param_size, &len); | 486 | nl = get_result_buffer(param, param_size, &len); |
437 | if (len < needed) { | 487 | if (len < needed) { |
438 | param->flags |= DM_BUFFER_FULL_FLAG; | 488 | param->flags |= DM_BUFFER_FULL_FLAG; |
439 | goto out; | 489 | goto out; |
440 | } | 490 | } |
441 | param->data_size = param->data_start + needed; | 491 | param->data_size = param->data_start + needed; |
442 | 492 | ||
443 | nl->dev = 0; /* Flags no data */ | 493 | nl->dev = 0; /* Flags no data */ |
444 | 494 | ||
445 | /* | 495 | /* |
446 | * Now loop through filling out the names. | 496 | * Now loop through filling out the names. |
447 | */ | 497 | */ |
448 | for (i = 0; i < NUM_BUCKETS; i++) { | 498 | for (i = 0; i < NUM_BUCKETS; i++) { |
449 | list_for_each_entry (hc, _name_buckets + i, name_list) { | 499 | list_for_each_entry (hc, _name_buckets + i, name_list) { |
450 | if (old_nl) | 500 | if (old_nl) |
451 | old_nl->next = (uint32_t) ((void *) nl - | 501 | old_nl->next = (uint32_t) ((void *) nl - |
452 | (void *) old_nl); | 502 | (void *) old_nl); |
453 | disk = dm_disk(hc->md); | 503 | disk = dm_disk(hc->md); |
454 | nl->dev = huge_encode_dev(disk_devt(disk)); | 504 | nl->dev = huge_encode_dev(disk_devt(disk)); |
455 | nl->next = 0; | 505 | nl->next = 0; |
456 | strcpy(nl->name, hc->name); | 506 | strcpy(nl->name, hc->name); |
457 | 507 | ||
458 | old_nl = nl; | 508 | old_nl = nl; |
459 | nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1); | 509 | nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1); |
460 | } | 510 | } |
461 | } | 511 | } |
462 | 512 | ||
463 | out: | 513 | out: |
464 | up_write(&_hash_lock); | 514 | up_write(&_hash_lock); |
465 | return 0; | 515 | return 0; |
466 | } | 516 | } |
467 | 517 | ||
468 | static void list_version_get_needed(struct target_type *tt, void *needed_param) | 518 | static void list_version_get_needed(struct target_type *tt, void *needed_param) |
469 | { | 519 | { |
470 | size_t *needed = needed_param; | 520 | size_t *needed = needed_param; |
471 | 521 | ||
472 | *needed += sizeof(struct dm_target_versions); | 522 | *needed += sizeof(struct dm_target_versions); |
473 | *needed += strlen(tt->name); | 523 | *needed += strlen(tt->name); |
474 | *needed += ALIGN_MASK; | 524 | *needed += ALIGN_MASK; |
475 | } | 525 | } |
476 | 526 | ||
477 | static void list_version_get_info(struct target_type *tt, void *param) | 527 | static void list_version_get_info(struct target_type *tt, void *param) |
478 | { | 528 | { |
479 | struct vers_iter *info = param; | 529 | struct vers_iter *info = param; |
480 | 530 | ||
481 | /* Check space - it might have changed since the first iteration */ | 531 | /* Check space - it might have changed since the first iteration */ |
482 | if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > | 532 | if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > |
483 | info->end) { | 533 | info->end) { |
484 | 534 | ||
485 | info->flags = DM_BUFFER_FULL_FLAG; | 535 | info->flags = DM_BUFFER_FULL_FLAG; |
486 | return; | 536 | return; |
487 | } | 537 | } |
488 | 538 | ||
489 | if (info->old_vers) | 539 | if (info->old_vers) |
490 | info->old_vers->next = (uint32_t) ((void *)info->vers - | 540 | info->old_vers->next = (uint32_t) ((void *)info->vers - |
491 | (void *)info->old_vers); | 541 | (void *)info->old_vers); |
492 | info->vers->version[0] = tt->version[0]; | 542 | info->vers->version[0] = tt->version[0]; |
493 | info->vers->version[1] = tt->version[1]; | 543 | info->vers->version[1] = tt->version[1]; |
494 | info->vers->version[2] = tt->version[2]; | 544 | info->vers->version[2] = tt->version[2]; |
495 | info->vers->next = 0; | 545 | info->vers->next = 0; |
496 | strcpy(info->vers->name, tt->name); | 546 | strcpy(info->vers->name, tt->name); |
497 | 547 | ||
498 | info->old_vers = info->vers; | 548 | info->old_vers = info->vers; |
499 | info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); | 549 | info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); |
500 | } | 550 | } |
501 | 551 | ||
502 | static int list_versions(struct dm_ioctl *param, size_t param_size) | 552 | static int list_versions(struct dm_ioctl *param, size_t param_size) |
503 | { | 553 | { |
504 | size_t len, needed = 0; | 554 | size_t len, needed = 0; |
505 | struct dm_target_versions *vers; | 555 | struct dm_target_versions *vers; |
506 | struct vers_iter iter_info; | 556 | struct vers_iter iter_info; |
507 | 557 | ||
508 | /* | 558 | /* |
509 | * Loop through all the devices working out how much | 559 | * Loop through all the devices working out how much |
510 | * space we need. | 560 | * space we need. |
511 | */ | 561 | */ |
512 | dm_target_iterate(list_version_get_needed, &needed); | 562 | dm_target_iterate(list_version_get_needed, &needed); |
513 | 563 | ||
514 | /* | 564 | /* |
515 | * Grab our output buffer. | 565 | * Grab our output buffer. |
516 | */ | 566 | */ |
517 | vers = get_result_buffer(param, param_size, &len); | 567 | vers = get_result_buffer(param, param_size, &len); |
518 | if (len < needed) { | 568 | if (len < needed) { |
519 | param->flags |= DM_BUFFER_FULL_FLAG; | 569 | param->flags |= DM_BUFFER_FULL_FLAG; |
520 | goto out; | 570 | goto out; |
521 | } | 571 | } |
522 | param->data_size = param->data_start + needed; | 572 | param->data_size = param->data_start + needed; |
523 | 573 | ||
524 | iter_info.param_size = param_size; | 574 | iter_info.param_size = param_size; |
525 | iter_info.old_vers = NULL; | 575 | iter_info.old_vers = NULL; |
526 | iter_info.vers = vers; | 576 | iter_info.vers = vers; |
527 | iter_info.flags = 0; | 577 | iter_info.flags = 0; |
528 | iter_info.end = (char *)vers+len; | 578 | iter_info.end = (char *)vers+len; |
529 | 579 | ||
530 | /* | 580 | /* |
531 | * Now loop through filling out the names & versions. | 581 | * Now loop through filling out the names & versions. |
532 | */ | 582 | */ |
533 | dm_target_iterate(list_version_get_info, &iter_info); | 583 | dm_target_iterate(list_version_get_info, &iter_info); |
534 | param->flags |= iter_info.flags; | 584 | param->flags |= iter_info.flags; |
535 | 585 | ||
536 | out: | 586 | out: |
537 | return 0; | 587 | return 0; |
538 | } | 588 | } |
539 | 589 | ||
540 | static int check_name(const char *name) | 590 | static int check_name(const char *name) |
541 | { | 591 | { |
542 | if (strchr(name, '/')) { | 592 | if (strchr(name, '/')) { |
543 | DMWARN("invalid device name"); | 593 | DMWARN("invalid device name"); |
544 | return -EINVAL; | 594 | return -EINVAL; |
545 | } | 595 | } |
546 | 596 | ||
547 | return 0; | 597 | return 0; |
548 | } | 598 | } |
549 | 599 | ||
550 | /* | 600 | /* |
551 | * On successful return, the caller must not attempt to acquire | 601 | * On successful return, the caller must not attempt to acquire |
552 | * _hash_lock without first calling dm_table_put, because dm_table_destroy | 602 | * _hash_lock without first calling dm_table_put, because dm_table_destroy |
553 | * waits for this dm_table_put and could be called under this lock. | 603 | * waits for this dm_table_put and could be called under this lock. |
554 | */ | 604 | */ |
555 | static struct dm_table *dm_get_inactive_table(struct mapped_device *md) | 605 | static struct dm_table *dm_get_inactive_table(struct mapped_device *md) |
556 | { | 606 | { |
557 | struct hash_cell *hc; | 607 | struct hash_cell *hc; |
558 | struct dm_table *table = NULL; | 608 | struct dm_table *table = NULL; |
559 | 609 | ||
560 | down_read(&_hash_lock); | 610 | down_read(&_hash_lock); |
561 | hc = dm_get_mdptr(md); | 611 | hc = dm_get_mdptr(md); |
562 | if (!hc || hc->md != md) { | 612 | if (!hc || hc->md != md) { |
563 | DMWARN("device has been removed from the dev hash table."); | 613 | DMWARN("device has been removed from the dev hash table."); |
564 | goto out; | 614 | goto out; |
565 | } | 615 | } |
566 | 616 | ||
567 | table = hc->new_map; | 617 | table = hc->new_map; |
568 | if (table) | 618 | if (table) |
569 | dm_table_get(table); | 619 | dm_table_get(table); |
570 | 620 | ||
571 | out: | 621 | out: |
572 | up_read(&_hash_lock); | 622 | up_read(&_hash_lock); |
573 | 623 | ||
574 | return table; | 624 | return table; |
575 | } | 625 | } |
576 | 626 | ||
577 | static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, | 627 | static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, |
578 | struct dm_ioctl *param) | 628 | struct dm_ioctl *param) |
579 | { | 629 | { |
580 | return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? | 630 | return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? |
581 | dm_get_inactive_table(md) : dm_get_live_table(md); | 631 | dm_get_inactive_table(md) : dm_get_live_table(md); |
582 | } | 632 | } |
583 | 633 | ||
584 | /* | 634 | /* |
585 | * Fills in a dm_ioctl structure, ready for sending back to | 635 | * Fills in a dm_ioctl structure, ready for sending back to |
586 | * userland. | 636 | * userland. |
587 | */ | 637 | */ |
588 | static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) | 638 | static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) |
589 | { | 639 | { |
590 | struct gendisk *disk = dm_disk(md); | 640 | struct gendisk *disk = dm_disk(md); |
591 | struct dm_table *table; | 641 | struct dm_table *table; |
592 | 642 | ||
593 | param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | | 643 | param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | |
594 | DM_ACTIVE_PRESENT_FLAG); | 644 | DM_ACTIVE_PRESENT_FLAG); |
595 | 645 | ||
596 | if (dm_suspended_md(md)) | 646 | if (dm_suspended_md(md)) |
597 | param->flags |= DM_SUSPEND_FLAG; | 647 | param->flags |= DM_SUSPEND_FLAG; |
598 | 648 | ||
599 | param->dev = huge_encode_dev(disk_devt(disk)); | 649 | param->dev = huge_encode_dev(disk_devt(disk)); |
600 | 650 | ||
601 | /* | 651 | /* |
602 | * Yes, this will be out of date by the time it gets back | 652 | * Yes, this will be out of date by the time it gets back |
603 | * to userland, but it is still very useful for | 653 | * to userland, but it is still very useful for |
604 | * debugging. | 654 | * debugging. |
605 | */ | 655 | */ |
606 | param->open_count = dm_open_count(md); | 656 | param->open_count = dm_open_count(md); |
607 | 657 | ||
608 | param->event_nr = dm_get_event_nr(md); | 658 | param->event_nr = dm_get_event_nr(md); |
609 | param->target_count = 0; | 659 | param->target_count = 0; |
610 | 660 | ||
611 | table = dm_get_live_table(md); | 661 | table = dm_get_live_table(md); |
612 | if (table) { | 662 | if (table) { |
613 | if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { | 663 | if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { |
614 | if (get_disk_ro(disk)) | 664 | if (get_disk_ro(disk)) |
615 | param->flags |= DM_READONLY_FLAG; | 665 | param->flags |= DM_READONLY_FLAG; |
616 | param->target_count = dm_table_get_num_targets(table); | 666 | param->target_count = dm_table_get_num_targets(table); |
617 | } | 667 | } |
618 | dm_table_put(table); | 668 | dm_table_put(table); |
619 | 669 | ||
620 | param->flags |= DM_ACTIVE_PRESENT_FLAG; | 670 | param->flags |= DM_ACTIVE_PRESENT_FLAG; |
621 | } | 671 | } |
622 | 672 | ||
623 | if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { | 673 | if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { |
624 | table = dm_get_inactive_table(md); | 674 | table = dm_get_inactive_table(md); |
625 | if (table) { | 675 | if (table) { |
626 | if (!(dm_table_get_mode(table) & FMODE_WRITE)) | 676 | if (!(dm_table_get_mode(table) & FMODE_WRITE)) |
627 | param->flags |= DM_READONLY_FLAG; | 677 | param->flags |= DM_READONLY_FLAG; |
628 | param->target_count = dm_table_get_num_targets(table); | 678 | param->target_count = dm_table_get_num_targets(table); |
629 | dm_table_put(table); | 679 | dm_table_put(table); |
630 | } | 680 | } |
631 | } | 681 | } |
632 | } | 682 | } |
633 | 683 | ||
634 | static int dev_create(struct dm_ioctl *param, size_t param_size) | 684 | static int dev_create(struct dm_ioctl *param, size_t param_size) |
635 | { | 685 | { |
636 | int r, m = DM_ANY_MINOR; | 686 | int r, m = DM_ANY_MINOR; |
637 | struct mapped_device *md; | 687 | struct mapped_device *md; |
638 | 688 | ||
639 | r = check_name(param->name); | 689 | r = check_name(param->name); |
640 | if (r) | 690 | if (r) |
641 | return r; | 691 | return r; |
642 | 692 | ||
643 | if (param->flags & DM_PERSISTENT_DEV_FLAG) | 693 | if (param->flags & DM_PERSISTENT_DEV_FLAG) |
644 | m = MINOR(huge_decode_dev(param->dev)); | 694 | m = MINOR(huge_decode_dev(param->dev)); |
645 | 695 | ||
646 | r = dm_create(m, &md); | 696 | r = dm_create(m, &md); |
647 | if (r) | 697 | if (r) |
648 | return r; | 698 | return r; |
649 | 699 | ||
650 | r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); | 700 | r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); |
651 | if (r) { | 701 | if (r) { |
652 | dm_put(md); | 702 | dm_put(md); |
653 | dm_destroy(md); | 703 | dm_destroy(md); |
654 | return r; | 704 | return r; |
655 | } | 705 | } |
656 | 706 | ||
657 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | 707 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; |
658 | 708 | ||
659 | __dev_status(md, param); | 709 | __dev_status(md, param); |
660 | 710 | ||
661 | dm_put(md); | 711 | dm_put(md); |
662 | 712 | ||
663 | return 0; | 713 | return 0; |
664 | } | 714 | } |
665 | 715 | ||
666 | /* | 716 | /* |
667 | * Always use UUID for lookups if it's present, otherwise use name or dev. | 717 | * Always use UUID for lookups if it's present, otherwise use name or dev. |
668 | */ | 718 | */ |
669 | static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) | 719 | static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) |
670 | { | 720 | { |
671 | struct mapped_device *md; | 721 | struct mapped_device *md; |
672 | void *mdptr = NULL; | 722 | void *mdptr = NULL; |
673 | 723 | ||
674 | if (*param->uuid) | 724 | if (*param->uuid) |
675 | return __get_uuid_cell(param->uuid); | 725 | return __get_uuid_cell(param->uuid); |
676 | 726 | ||
677 | if (*param->name) | 727 | if (*param->name) |
678 | return __get_name_cell(param->name); | 728 | return __get_name_cell(param->name); |
679 | 729 | ||
680 | md = dm_get_md(huge_decode_dev(param->dev)); | 730 | md = dm_get_md(huge_decode_dev(param->dev)); |
681 | if (!md) | 731 | if (!md) |
682 | goto out; | 732 | goto out; |
683 | 733 | ||
684 | mdptr = dm_get_mdptr(md); | 734 | mdptr = dm_get_mdptr(md); |
685 | if (!mdptr) | 735 | if (!mdptr) |
686 | dm_put(md); | 736 | dm_put(md); |
687 | 737 | ||
688 | out: | 738 | out: |
689 | return mdptr; | 739 | return mdptr; |
690 | } | 740 | } |
691 | 741 | ||
692 | static struct mapped_device *find_device(struct dm_ioctl *param) | 742 | static struct mapped_device *find_device(struct dm_ioctl *param) |
693 | { | 743 | { |
694 | struct hash_cell *hc; | 744 | struct hash_cell *hc; |
695 | struct mapped_device *md = NULL; | 745 | struct mapped_device *md = NULL; |
696 | 746 | ||
697 | down_read(&_hash_lock); | 747 | down_read(&_hash_lock); |
698 | hc = __find_device_hash_cell(param); | 748 | hc = __find_device_hash_cell(param); |
699 | if (hc) { | 749 | if (hc) { |
700 | md = hc->md; | 750 | md = hc->md; |
701 | 751 | ||
702 | /* | 752 | /* |
703 | * Sneakily write in both the name and the uuid | 753 | * Sneakily write in both the name and the uuid |
704 | * while we have the cell. | 754 | * while we have the cell. |
705 | */ | 755 | */ |
706 | strlcpy(param->name, hc->name, sizeof(param->name)); | 756 | strlcpy(param->name, hc->name, sizeof(param->name)); |
707 | if (hc->uuid) | 757 | if (hc->uuid) |
708 | strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); | 758 | strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); |
709 | else | 759 | else |
710 | param->uuid[0] = '\0'; | 760 | param->uuid[0] = '\0'; |
711 | 761 | ||
712 | if (hc->new_map) | 762 | if (hc->new_map) |
713 | param->flags |= DM_INACTIVE_PRESENT_FLAG; | 763 | param->flags |= DM_INACTIVE_PRESENT_FLAG; |
714 | else | 764 | else |
715 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | 765 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; |
716 | } | 766 | } |
717 | up_read(&_hash_lock); | 767 | up_read(&_hash_lock); |
718 | 768 | ||
719 | return md; | 769 | return md; |
720 | } | 770 | } |
721 | 771 | ||
722 | static int dev_remove(struct dm_ioctl *param, size_t param_size) | 772 | static int dev_remove(struct dm_ioctl *param, size_t param_size) |
723 | { | 773 | { |
724 | struct hash_cell *hc; | 774 | struct hash_cell *hc; |
725 | struct mapped_device *md; | 775 | struct mapped_device *md; |
726 | int r; | 776 | int r; |
727 | 777 | ||
728 | down_write(&_hash_lock); | 778 | down_write(&_hash_lock); |
729 | hc = __find_device_hash_cell(param); | 779 | hc = __find_device_hash_cell(param); |
730 | 780 | ||
731 | if (!hc) { | 781 | if (!hc) { |
732 | DMWARN("device doesn't appear to be in the dev hash table."); | 782 | DMWARN("device doesn't appear to be in the dev hash table."); |
733 | up_write(&_hash_lock); | 783 | up_write(&_hash_lock); |
734 | return -ENXIO; | 784 | return -ENXIO; |
735 | } | 785 | } |
736 | 786 | ||
737 | md = hc->md; | 787 | md = hc->md; |
738 | 788 | ||
739 | /* | 789 | /* |
740 | * Ensure the device is not open and nothing further can open it. | 790 | * Ensure the device is not open and nothing further can open it. |
741 | */ | 791 | */ |
742 | r = dm_lock_for_deletion(md); | 792 | r = dm_lock_for_deletion(md); |
743 | if (r) { | 793 | if (r) { |
744 | DMWARN("unable to remove open device %s", hc->name); | 794 | DMWARN("unable to remove open device %s", hc->name); |
745 | up_write(&_hash_lock); | 795 | up_write(&_hash_lock); |
746 | dm_put(md); | 796 | dm_put(md); |
747 | return r; | 797 | return r; |
748 | } | 798 | } |
749 | 799 | ||
750 | __hash_remove(hc); | 800 | __hash_remove(hc); |
751 | up_write(&_hash_lock); | 801 | up_write(&_hash_lock); |
752 | 802 | ||
753 | if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) | 803 | if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) |
754 | param->flags |= DM_UEVENT_GENERATED_FLAG; | 804 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
755 | 805 | ||
756 | dm_put(md); | 806 | dm_put(md); |
757 | dm_destroy(md); | 807 | dm_destroy(md); |
758 | return 0; | 808 | return 0; |
759 | } | 809 | } |
760 | 810 | ||
761 | /* | 811 | /* |
762 | * Check a string doesn't overrun the chunk of | 812 | * Check a string doesn't overrun the chunk of |
763 | * memory we copied from userland. | 813 | * memory we copied from userland. |
764 | */ | 814 | */ |
765 | static int invalid_str(char *str, void *end) | 815 | static int invalid_str(char *str, void *end) |
766 | { | 816 | { |
767 | while ((void *) str < end) | 817 | while ((void *) str < end) |
768 | if (!*str++) | 818 | if (!*str++) |
769 | return 0; | 819 | return 0; |
770 | 820 | ||
771 | return -EINVAL; | 821 | return -EINVAL; |
772 | } | 822 | } |
773 | 823 | ||
774 | static int dev_rename(struct dm_ioctl *param, size_t param_size) | 824 | static int dev_rename(struct dm_ioctl *param, size_t param_size) |
775 | { | 825 | { |
776 | int r; | 826 | int r; |
777 | char *new_name = (char *) param + param->data_start; | 827 | char *new_data = (char *) param + param->data_start; |
778 | struct mapped_device *md; | 828 | struct mapped_device *md; |
829 | unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; | ||
779 | 830 | ||
780 | if (new_name < param->data || | 831 | if (new_data < param->data || |
781 | invalid_str(new_name, (void *) param + param_size) || | 832 | invalid_str(new_data, (void *) param + param_size) || |
782 | strlen(new_name) > DM_NAME_LEN - 1) { | 833 | strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { |
783 | DMWARN("Invalid new logical volume name supplied."); | 834 | DMWARN("Invalid new mapped device name or uuid string supplied."); |
784 | return -EINVAL; | 835 | return -EINVAL; |
785 | } | 836 | } |
786 | 837 | ||
787 | r = check_name(new_name); | 838 | if (!change_uuid) { |
788 | if (r) | 839 | r = check_name(new_data); |
789 | return r; | 840 | if (r) |
841 | return r; | ||
842 | } | ||
790 | 843 | ||
791 | md = dm_hash_rename(param, new_name); | 844 | md = dm_hash_rename(param, new_data); |
792 | if (IS_ERR(md)) | 845 | if (IS_ERR(md)) |
793 | return PTR_ERR(md); | 846 | return PTR_ERR(md); |
794 | 847 | ||
795 | __dev_status(md, param); | 848 | __dev_status(md, param); |
796 | dm_put(md); | 849 | dm_put(md); |
797 | 850 | ||
798 | return 0; | 851 | return 0; |
799 | } | 852 | } |
800 | 853 | ||
801 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) | 854 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) |
802 | { | 855 | { |
803 | int r = -EINVAL, x; | 856 | int r = -EINVAL, x; |
804 | struct mapped_device *md; | 857 | struct mapped_device *md; |
805 | struct hd_geometry geometry; | 858 | struct hd_geometry geometry; |
806 | unsigned long indata[4]; | 859 | unsigned long indata[4]; |
807 | char *geostr = (char *) param + param->data_start; | 860 | char *geostr = (char *) param + param->data_start; |
808 | 861 | ||
809 | md = find_device(param); | 862 | md = find_device(param); |
810 | if (!md) | 863 | if (!md) |
811 | return -ENXIO; | 864 | return -ENXIO; |
812 | 865 | ||
813 | if (geostr < param->data || | 866 | if (geostr < param->data || |
814 | invalid_str(geostr, (void *) param + param_size)) { | 867 | invalid_str(geostr, (void *) param + param_size)) { |
815 | DMWARN("Invalid geometry supplied."); | 868 | DMWARN("Invalid geometry supplied."); |
816 | goto out; | 869 | goto out; |
817 | } | 870 | } |
818 | 871 | ||
819 | x = sscanf(geostr, "%lu %lu %lu %lu", indata, | 872 | x = sscanf(geostr, "%lu %lu %lu %lu", indata, |
820 | indata + 1, indata + 2, indata + 3); | 873 | indata + 1, indata + 2, indata + 3); |
821 | 874 | ||
822 | if (x != 4) { | 875 | if (x != 4) { |
823 | DMWARN("Unable to interpret geometry settings."); | 876 | DMWARN("Unable to interpret geometry settings."); |
824 | goto out; | 877 | goto out; |
825 | } | 878 | } |
826 | 879 | ||
827 | if (indata[0] > 65535 || indata[1] > 255 || | 880 | if (indata[0] > 65535 || indata[1] > 255 || |
828 | indata[2] > 255 || indata[3] > ULONG_MAX) { | 881 | indata[2] > 255 || indata[3] > ULONG_MAX) { |
829 | DMWARN("Geometry exceeds range limits."); | 882 | DMWARN("Geometry exceeds range limits."); |
830 | goto out; | 883 | goto out; |
831 | } | 884 | } |
832 | 885 | ||
833 | geometry.cylinders = indata[0]; | 886 | geometry.cylinders = indata[0]; |
834 | geometry.heads = indata[1]; | 887 | geometry.heads = indata[1]; |
835 | geometry.sectors = indata[2]; | 888 | geometry.sectors = indata[2]; |
836 | geometry.start = indata[3]; | 889 | geometry.start = indata[3]; |
837 | 890 | ||
838 | r = dm_set_geometry(md, &geometry); | 891 | r = dm_set_geometry(md, &geometry); |
839 | 892 | ||
840 | param->data_size = 0; | 893 | param->data_size = 0; |
841 | 894 | ||
842 | out: | 895 | out: |
843 | dm_put(md); | 896 | dm_put(md); |
844 | return r; | 897 | return r; |
845 | } | 898 | } |
846 | 899 | ||
847 | static int do_suspend(struct dm_ioctl *param) | 900 | static int do_suspend(struct dm_ioctl *param) |
848 | { | 901 | { |
849 | int r = 0; | 902 | int r = 0; |
850 | unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; | 903 | unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; |
851 | struct mapped_device *md; | 904 | struct mapped_device *md; |
852 | 905 | ||
853 | md = find_device(param); | 906 | md = find_device(param); |
854 | if (!md) | 907 | if (!md) |
855 | return -ENXIO; | 908 | return -ENXIO; |
856 | 909 | ||
857 | if (param->flags & DM_SKIP_LOCKFS_FLAG) | 910 | if (param->flags & DM_SKIP_LOCKFS_FLAG) |
858 | suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; | 911 | suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; |
859 | if (param->flags & DM_NOFLUSH_FLAG) | 912 | if (param->flags & DM_NOFLUSH_FLAG) |
860 | suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; | 913 | suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; |
861 | 914 | ||
862 | if (!dm_suspended_md(md)) { | 915 | if (!dm_suspended_md(md)) { |
863 | r = dm_suspend(md, suspend_flags); | 916 | r = dm_suspend(md, suspend_flags); |
864 | if (r) | 917 | if (r) |
865 | goto out; | 918 | goto out; |
866 | } | 919 | } |
867 | 920 | ||
868 | __dev_status(md, param); | 921 | __dev_status(md, param); |
869 | 922 | ||
870 | out: | 923 | out: |
871 | dm_put(md); | 924 | dm_put(md); |
872 | 925 | ||
873 | return r; | 926 | return r; |
874 | } | 927 | } |
875 | 928 | ||
876 | static int do_resume(struct dm_ioctl *param) | 929 | static int do_resume(struct dm_ioctl *param) |
877 | { | 930 | { |
878 | int r = 0; | 931 | int r = 0; |
879 | unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; | 932 | unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; |
880 | struct hash_cell *hc; | 933 | struct hash_cell *hc; |
881 | struct mapped_device *md; | 934 | struct mapped_device *md; |
882 | struct dm_table *new_map, *old_map = NULL; | 935 | struct dm_table *new_map, *old_map = NULL; |
883 | 936 | ||
884 | down_write(&_hash_lock); | 937 | down_write(&_hash_lock); |
885 | 938 | ||
886 | hc = __find_device_hash_cell(param); | 939 | hc = __find_device_hash_cell(param); |
887 | if (!hc) { | 940 | if (!hc) { |
888 | DMWARN("device doesn't appear to be in the dev hash table."); | 941 | DMWARN("device doesn't appear to be in the dev hash table."); |
889 | up_write(&_hash_lock); | 942 | up_write(&_hash_lock); |
890 | return -ENXIO; | 943 | return -ENXIO; |
891 | } | 944 | } |
892 | 945 | ||
893 | md = hc->md; | 946 | md = hc->md; |
894 | 947 | ||
895 | new_map = hc->new_map; | 948 | new_map = hc->new_map; |
896 | hc->new_map = NULL; | 949 | hc->new_map = NULL; |
897 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | 950 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; |
898 | 951 | ||
899 | up_write(&_hash_lock); | 952 | up_write(&_hash_lock); |
900 | 953 | ||
901 | /* Do we need to load a new map ? */ | 954 | /* Do we need to load a new map ? */ |
902 | if (new_map) { | 955 | if (new_map) { |
903 | /* Suspend if it isn't already suspended */ | 956 | /* Suspend if it isn't already suspended */ |
904 | if (param->flags & DM_SKIP_LOCKFS_FLAG) | 957 | if (param->flags & DM_SKIP_LOCKFS_FLAG) |
905 | suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; | 958 | suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; |
906 | if (param->flags & DM_NOFLUSH_FLAG) | 959 | if (param->flags & DM_NOFLUSH_FLAG) |
907 | suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; | 960 | suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; |
908 | if (!dm_suspended_md(md)) | 961 | if (!dm_suspended_md(md)) |
909 | dm_suspend(md, suspend_flags); | 962 | dm_suspend(md, suspend_flags); |
910 | 963 | ||
911 | old_map = dm_swap_table(md, new_map); | 964 | old_map = dm_swap_table(md, new_map); |
912 | if (IS_ERR(old_map)) { | 965 | if (IS_ERR(old_map)) { |
913 | dm_table_destroy(new_map); | 966 | dm_table_destroy(new_map); |
914 | dm_put(md); | 967 | dm_put(md); |
915 | return PTR_ERR(old_map); | 968 | return PTR_ERR(old_map); |
916 | } | 969 | } |
917 | 970 | ||
918 | if (dm_table_get_mode(new_map) & FMODE_WRITE) | 971 | if (dm_table_get_mode(new_map) & FMODE_WRITE) |
919 | set_disk_ro(dm_disk(md), 0); | 972 | set_disk_ro(dm_disk(md), 0); |
920 | else | 973 | else |
921 | set_disk_ro(dm_disk(md), 1); | 974 | set_disk_ro(dm_disk(md), 1); |
922 | } | 975 | } |
923 | 976 | ||
924 | if (dm_suspended_md(md)) { | 977 | if (dm_suspended_md(md)) { |
925 | r = dm_resume(md); | 978 | r = dm_resume(md); |
926 | if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) | 979 | if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) |
927 | param->flags |= DM_UEVENT_GENERATED_FLAG; | 980 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
928 | } | 981 | } |
929 | 982 | ||
930 | if (old_map) | 983 | if (old_map) |
931 | dm_table_destroy(old_map); | 984 | dm_table_destroy(old_map); |
932 | 985 | ||
933 | if (!r) | 986 | if (!r) |
934 | __dev_status(md, param); | 987 | __dev_status(md, param); |
935 | 988 | ||
936 | dm_put(md); | 989 | dm_put(md); |
937 | return r; | 990 | return r; |
938 | } | 991 | } |
939 | 992 | ||
940 | /* | 993 | /* |
941 | * Set or unset the suspension state of a device. | 994 | * Set or unset the suspension state of a device. |
942 | * If the device already is in the requested state we just return its status. | 995 | * If the device already is in the requested state we just return its status. |
943 | */ | 996 | */ |
944 | static int dev_suspend(struct dm_ioctl *param, size_t param_size) | 997 | static int dev_suspend(struct dm_ioctl *param, size_t param_size) |
945 | { | 998 | { |
946 | if (param->flags & DM_SUSPEND_FLAG) | 999 | if (param->flags & DM_SUSPEND_FLAG) |
947 | return do_suspend(param); | 1000 | return do_suspend(param); |
948 | 1001 | ||
949 | return do_resume(param); | 1002 | return do_resume(param); |
950 | } | 1003 | } |
951 | 1004 | ||
952 | /* | 1005 | /* |
953 | * Copies device info back to user space, used by | 1006 | * Copies device info back to user space, used by |
954 | * the create and info ioctls. | 1007 | * the create and info ioctls. |
955 | */ | 1008 | */ |
956 | static int dev_status(struct dm_ioctl *param, size_t param_size) | 1009 | static int dev_status(struct dm_ioctl *param, size_t param_size) |
957 | { | 1010 | { |
958 | struct mapped_device *md; | 1011 | struct mapped_device *md; |
959 | 1012 | ||
960 | md = find_device(param); | 1013 | md = find_device(param); |
961 | if (!md) | 1014 | if (!md) |
962 | return -ENXIO; | 1015 | return -ENXIO; |
963 | 1016 | ||
964 | __dev_status(md, param); | 1017 | __dev_status(md, param); |
965 | dm_put(md); | 1018 | dm_put(md); |
966 | 1019 | ||
967 | return 0; | 1020 | return 0; |
968 | } | 1021 | } |
969 | 1022 | ||
970 | /* | 1023 | /* |
971 | * Build up the status struct for each target | 1024 | * Build up the status struct for each target |
972 | */ | 1025 | */ |
973 | static void retrieve_status(struct dm_table *table, | 1026 | static void retrieve_status(struct dm_table *table, |
974 | struct dm_ioctl *param, size_t param_size) | 1027 | struct dm_ioctl *param, size_t param_size) |
975 | { | 1028 | { |
976 | unsigned int i, num_targets; | 1029 | unsigned int i, num_targets; |
977 | struct dm_target_spec *spec; | 1030 | struct dm_target_spec *spec; |
978 | char *outbuf, *outptr; | 1031 | char *outbuf, *outptr; |
979 | status_type_t type; | 1032 | status_type_t type; |
980 | size_t remaining, len, used = 0; | 1033 | size_t remaining, len, used = 0; |
981 | 1034 | ||
982 | outptr = outbuf = get_result_buffer(param, param_size, &len); | 1035 | outptr = outbuf = get_result_buffer(param, param_size, &len); |
983 | 1036 | ||
984 | if (param->flags & DM_STATUS_TABLE_FLAG) | 1037 | if (param->flags & DM_STATUS_TABLE_FLAG) |
985 | type = STATUSTYPE_TABLE; | 1038 | type = STATUSTYPE_TABLE; |
986 | else | 1039 | else |
987 | type = STATUSTYPE_INFO; | 1040 | type = STATUSTYPE_INFO; |
988 | 1041 | ||
989 | /* Get all the target info */ | 1042 | /* Get all the target info */ |
990 | num_targets = dm_table_get_num_targets(table); | 1043 | num_targets = dm_table_get_num_targets(table); |
991 | for (i = 0; i < num_targets; i++) { | 1044 | for (i = 0; i < num_targets; i++) { |
992 | struct dm_target *ti = dm_table_get_target(table, i); | 1045 | struct dm_target *ti = dm_table_get_target(table, i); |
993 | 1046 | ||
994 | remaining = len - (outptr - outbuf); | 1047 | remaining = len - (outptr - outbuf); |
995 | if (remaining <= sizeof(struct dm_target_spec)) { | 1048 | if (remaining <= sizeof(struct dm_target_spec)) { |
996 | param->flags |= DM_BUFFER_FULL_FLAG; | 1049 | param->flags |= DM_BUFFER_FULL_FLAG; |
997 | break; | 1050 | break; |
998 | } | 1051 | } |
999 | 1052 | ||
1000 | spec = (struct dm_target_spec *) outptr; | 1053 | spec = (struct dm_target_spec *) outptr; |
1001 | 1054 | ||
1002 | spec->status = 0; | 1055 | spec->status = 0; |
1003 | spec->sector_start = ti->begin; | 1056 | spec->sector_start = ti->begin; |
1004 | spec->length = ti->len; | 1057 | spec->length = ti->len; |
1005 | strncpy(spec->target_type, ti->type->name, | 1058 | strncpy(spec->target_type, ti->type->name, |
1006 | sizeof(spec->target_type)); | 1059 | sizeof(spec->target_type)); |
1007 | 1060 | ||
1008 | outptr += sizeof(struct dm_target_spec); | 1061 | outptr += sizeof(struct dm_target_spec); |
1009 | remaining = len - (outptr - outbuf); | 1062 | remaining = len - (outptr - outbuf); |
1010 | if (remaining <= 0) { | 1063 | if (remaining <= 0) { |
1011 | param->flags |= DM_BUFFER_FULL_FLAG; | 1064 | param->flags |= DM_BUFFER_FULL_FLAG; |
1012 | break; | 1065 | break; |
1013 | } | 1066 | } |
1014 | 1067 | ||
1015 | /* Get the status/table string from the target driver */ | 1068 | /* Get the status/table string from the target driver */ |
1016 | if (ti->type->status) { | 1069 | if (ti->type->status) { |
1017 | if (ti->type->status(ti, type, outptr, remaining)) { | 1070 | if (ti->type->status(ti, type, outptr, remaining)) { |
1018 | param->flags |= DM_BUFFER_FULL_FLAG; | 1071 | param->flags |= DM_BUFFER_FULL_FLAG; |
1019 | break; | 1072 | break; |
1020 | } | 1073 | } |
1021 | } else | 1074 | } else |
1022 | outptr[0] = '\0'; | 1075 | outptr[0] = '\0'; |
1023 | 1076 | ||
1024 | outptr += strlen(outptr) + 1; | 1077 | outptr += strlen(outptr) + 1; |
1025 | used = param->data_start + (outptr - outbuf); | 1078 | used = param->data_start + (outptr - outbuf); |
1026 | 1079 | ||
1027 | outptr = align_ptr(outptr); | 1080 | outptr = align_ptr(outptr); |
1028 | spec->next = outptr - outbuf; | 1081 | spec->next = outptr - outbuf; |
1029 | } | 1082 | } |
1030 | 1083 | ||
1031 | if (used) | 1084 | if (used) |
1032 | param->data_size = used; | 1085 | param->data_size = used; |
1033 | 1086 | ||
1034 | param->target_count = num_targets; | 1087 | param->target_count = num_targets; |
1035 | } | 1088 | } |
1036 | 1089 | ||
1037 | /* | 1090 | /* |
1038 | * Wait for a device to report an event | 1091 | * Wait for a device to report an event |
1039 | */ | 1092 | */ |
1040 | static int dev_wait(struct dm_ioctl *param, size_t param_size) | 1093 | static int dev_wait(struct dm_ioctl *param, size_t param_size) |
1041 | { | 1094 | { |
1042 | int r = 0; | 1095 | int r = 0; |
1043 | struct mapped_device *md; | 1096 | struct mapped_device *md; |
1044 | struct dm_table *table; | 1097 | struct dm_table *table; |
1045 | 1098 | ||
1046 | md = find_device(param); | 1099 | md = find_device(param); |
1047 | if (!md) | 1100 | if (!md) |
1048 | return -ENXIO; | 1101 | return -ENXIO; |
1049 | 1102 | ||
1050 | /* | 1103 | /* |
1051 | * Wait for a notification event | 1104 | * Wait for a notification event |
1052 | */ | 1105 | */ |
1053 | if (dm_wait_event(md, param->event_nr)) { | 1106 | if (dm_wait_event(md, param->event_nr)) { |
1054 | r = -ERESTARTSYS; | 1107 | r = -ERESTARTSYS; |
1055 | goto out; | 1108 | goto out; |
1056 | } | 1109 | } |
1057 | 1110 | ||
1058 | /* | 1111 | /* |
1059 | * The userland program is going to want to know what | 1112 | * The userland program is going to want to know what |
1060 | * changed to trigger the event, so we may as well tell | 1113 | * changed to trigger the event, so we may as well tell |
1061 | * him and save an ioctl. | 1114 | * him and save an ioctl. |
1062 | */ | 1115 | */ |
1063 | __dev_status(md, param); | 1116 | __dev_status(md, param); |
1064 | 1117 | ||
1065 | table = dm_get_live_or_inactive_table(md, param); | 1118 | table = dm_get_live_or_inactive_table(md, param); |
1066 | if (table) { | 1119 | if (table) { |
1067 | retrieve_status(table, param, param_size); | 1120 | retrieve_status(table, param, param_size); |
1068 | dm_table_put(table); | 1121 | dm_table_put(table); |
1069 | } | 1122 | } |
1070 | 1123 | ||
1071 | out: | 1124 | out: |
1072 | dm_put(md); | 1125 | dm_put(md); |
1073 | 1126 | ||
1074 | return r; | 1127 | return r; |
1075 | } | 1128 | } |
1076 | 1129 | ||
1077 | static inline fmode_t get_mode(struct dm_ioctl *param) | 1130 | static inline fmode_t get_mode(struct dm_ioctl *param) |
1078 | { | 1131 | { |
1079 | fmode_t mode = FMODE_READ | FMODE_WRITE; | 1132 | fmode_t mode = FMODE_READ | FMODE_WRITE; |
1080 | 1133 | ||
1081 | if (param->flags & DM_READONLY_FLAG) | 1134 | if (param->flags & DM_READONLY_FLAG) |
1082 | mode = FMODE_READ; | 1135 | mode = FMODE_READ; |
1083 | 1136 | ||
1084 | return mode; | 1137 | return mode; |
1085 | } | 1138 | } |
1086 | 1139 | ||
1087 | static int next_target(struct dm_target_spec *last, uint32_t next, void *end, | 1140 | static int next_target(struct dm_target_spec *last, uint32_t next, void *end, |
1088 | struct dm_target_spec **spec, char **target_params) | 1141 | struct dm_target_spec **spec, char **target_params) |
1089 | { | 1142 | { |
1090 | *spec = (struct dm_target_spec *) ((unsigned char *) last + next); | 1143 | *spec = (struct dm_target_spec *) ((unsigned char *) last + next); |
1091 | *target_params = (char *) (*spec + 1); | 1144 | *target_params = (char *) (*spec + 1); |
1092 | 1145 | ||
1093 | if (*spec < (last + 1)) | 1146 | if (*spec < (last + 1)) |
1094 | return -EINVAL; | 1147 | return -EINVAL; |
1095 | 1148 | ||
1096 | return invalid_str(*target_params, end); | 1149 | return invalid_str(*target_params, end); |
1097 | } | 1150 | } |
1098 | 1151 | ||
1099 | static int populate_table(struct dm_table *table, | 1152 | static int populate_table(struct dm_table *table, |
1100 | struct dm_ioctl *param, size_t param_size) | 1153 | struct dm_ioctl *param, size_t param_size) |
1101 | { | 1154 | { |
1102 | int r; | 1155 | int r; |
1103 | unsigned int i = 0; | 1156 | unsigned int i = 0; |
1104 | struct dm_target_spec *spec = (struct dm_target_spec *) param; | 1157 | struct dm_target_spec *spec = (struct dm_target_spec *) param; |
1105 | uint32_t next = param->data_start; | 1158 | uint32_t next = param->data_start; |
1106 | void *end = (void *) param + param_size; | 1159 | void *end = (void *) param + param_size; |
1107 | char *target_params; | 1160 | char *target_params; |
1108 | 1161 | ||
1109 | if (!param->target_count) { | 1162 | if (!param->target_count) { |
1110 | DMWARN("populate_table: no targets specified"); | 1163 | DMWARN("populate_table: no targets specified"); |
1111 | return -EINVAL; | 1164 | return -EINVAL; |
1112 | } | 1165 | } |
1113 | 1166 | ||
1114 | for (i = 0; i < param->target_count; i++) { | 1167 | for (i = 0; i < param->target_count; i++) { |
1115 | 1168 | ||
1116 | r = next_target(spec, next, end, &spec, &target_params); | 1169 | r = next_target(spec, next, end, &spec, &target_params); |
1117 | if (r) { | 1170 | if (r) { |
1118 | DMWARN("unable to find target"); | 1171 | DMWARN("unable to find target"); |
1119 | return r; | 1172 | return r; |
1120 | } | 1173 | } |
1121 | 1174 | ||
1122 | r = dm_table_add_target(table, spec->target_type, | 1175 | r = dm_table_add_target(table, spec->target_type, |
1123 | (sector_t) spec->sector_start, | 1176 | (sector_t) spec->sector_start, |
1124 | (sector_t) spec->length, | 1177 | (sector_t) spec->length, |
1125 | target_params); | 1178 | target_params); |
1126 | if (r) { | 1179 | if (r) { |
1127 | DMWARN("error adding target to table"); | 1180 | DMWARN("error adding target to table"); |
1128 | return r; | 1181 | return r; |
1129 | } | 1182 | } |
1130 | 1183 | ||
1131 | next = spec->next; | 1184 | next = spec->next; |
1132 | } | 1185 | } |
1133 | 1186 | ||
1134 | return dm_table_complete(table); | 1187 | return dm_table_complete(table); |
1135 | } | 1188 | } |
1136 | 1189 | ||
1137 | static int table_load(struct dm_ioctl *param, size_t param_size) | 1190 | static int table_load(struct dm_ioctl *param, size_t param_size) |
1138 | { | 1191 | { |
1139 | int r; | 1192 | int r; |
1140 | struct hash_cell *hc; | 1193 | struct hash_cell *hc; |
1141 | struct dm_table *t; | 1194 | struct dm_table *t; |
1142 | struct mapped_device *md; | 1195 | struct mapped_device *md; |
1143 | 1196 | ||
1144 | md = find_device(param); | 1197 | md = find_device(param); |
1145 | if (!md) | 1198 | if (!md) |
1146 | return -ENXIO; | 1199 | return -ENXIO; |
1147 | 1200 | ||
1148 | r = dm_table_create(&t, get_mode(param), param->target_count, md); | 1201 | r = dm_table_create(&t, get_mode(param), param->target_count, md); |
1149 | if (r) | 1202 | if (r) |
1150 | goto out; | 1203 | goto out; |
1151 | 1204 | ||
1152 | r = populate_table(t, param, param_size); | 1205 | r = populate_table(t, param, param_size); |
1153 | if (r) { | 1206 | if (r) { |
1154 | dm_table_destroy(t); | 1207 | dm_table_destroy(t); |
1155 | goto out; | 1208 | goto out; |
1156 | } | 1209 | } |
1157 | 1210 | ||
1158 | /* Protect md->type and md->queue against concurrent table loads. */ | 1211 | /* Protect md->type and md->queue against concurrent table loads. */ |
1159 | dm_lock_md_type(md); | 1212 | dm_lock_md_type(md); |
1160 | if (dm_get_md_type(md) == DM_TYPE_NONE) | 1213 | if (dm_get_md_type(md) == DM_TYPE_NONE) |
1161 | /* Initial table load: acquire type of table. */ | 1214 | /* Initial table load: acquire type of table. */ |
1162 | dm_set_md_type(md, dm_table_get_type(t)); | 1215 | dm_set_md_type(md, dm_table_get_type(t)); |
1163 | else if (dm_get_md_type(md) != dm_table_get_type(t)) { | 1216 | else if (dm_get_md_type(md) != dm_table_get_type(t)) { |
1164 | DMWARN("can't change device type after initial table load."); | 1217 | DMWARN("can't change device type after initial table load."); |
1165 | dm_table_destroy(t); | 1218 | dm_table_destroy(t); |
1166 | dm_unlock_md_type(md); | 1219 | dm_unlock_md_type(md); |
1167 | r = -EINVAL; | 1220 | r = -EINVAL; |
1168 | goto out; | 1221 | goto out; |
1169 | } | 1222 | } |
1170 | 1223 | ||
1171 | /* setup md->queue to reflect md's type (may block) */ | 1224 | /* setup md->queue to reflect md's type (may block) */ |
1172 | r = dm_setup_md_queue(md); | 1225 | r = dm_setup_md_queue(md); |
1173 | if (r) { | 1226 | if (r) { |
1174 | DMWARN("unable to set up device queue for new table."); | 1227 | DMWARN("unable to set up device queue for new table."); |
1175 | dm_table_destroy(t); | 1228 | dm_table_destroy(t); |
1176 | dm_unlock_md_type(md); | 1229 | dm_unlock_md_type(md); |
1177 | goto out; | 1230 | goto out; |
1178 | } | 1231 | } |
1179 | dm_unlock_md_type(md); | 1232 | dm_unlock_md_type(md); |
1180 | 1233 | ||
1181 | /* stage inactive table */ | 1234 | /* stage inactive table */ |
1182 | down_write(&_hash_lock); | 1235 | down_write(&_hash_lock); |
1183 | hc = dm_get_mdptr(md); | 1236 | hc = dm_get_mdptr(md); |
1184 | if (!hc || hc->md != md) { | 1237 | if (!hc || hc->md != md) { |
1185 | DMWARN("device has been removed from the dev hash table."); | 1238 | DMWARN("device has been removed from the dev hash table."); |
1186 | dm_table_destroy(t); | 1239 | dm_table_destroy(t); |
1187 | up_write(&_hash_lock); | 1240 | up_write(&_hash_lock); |
1188 | r = -ENXIO; | 1241 | r = -ENXIO; |
1189 | goto out; | 1242 | goto out; |
1190 | } | 1243 | } |
1191 | 1244 | ||
1192 | if (hc->new_map) | 1245 | if (hc->new_map) |
1193 | dm_table_destroy(hc->new_map); | 1246 | dm_table_destroy(hc->new_map); |
1194 | hc->new_map = t; | 1247 | hc->new_map = t; |
1195 | up_write(&_hash_lock); | 1248 | up_write(&_hash_lock); |
1196 | 1249 | ||
1197 | param->flags |= DM_INACTIVE_PRESENT_FLAG; | 1250 | param->flags |= DM_INACTIVE_PRESENT_FLAG; |
1198 | __dev_status(md, param); | 1251 | __dev_status(md, param); |
1199 | 1252 | ||
1200 | out: | 1253 | out: |
1201 | dm_put(md); | 1254 | dm_put(md); |
1202 | 1255 | ||
1203 | return r; | 1256 | return r; |
1204 | } | 1257 | } |
1205 | 1258 | ||
1206 | static int table_clear(struct dm_ioctl *param, size_t param_size) | 1259 | static int table_clear(struct dm_ioctl *param, size_t param_size) |
1207 | { | 1260 | { |
1208 | struct hash_cell *hc; | 1261 | struct hash_cell *hc; |
1209 | struct mapped_device *md; | 1262 | struct mapped_device *md; |
1210 | 1263 | ||
1211 | down_write(&_hash_lock); | 1264 | down_write(&_hash_lock); |
1212 | 1265 | ||
1213 | hc = __find_device_hash_cell(param); | 1266 | hc = __find_device_hash_cell(param); |
1214 | if (!hc) { | 1267 | if (!hc) { |
1215 | DMWARN("device doesn't appear to be in the dev hash table."); | 1268 | DMWARN("device doesn't appear to be in the dev hash table."); |
1216 | up_write(&_hash_lock); | 1269 | up_write(&_hash_lock); |
1217 | return -ENXIO; | 1270 | return -ENXIO; |
1218 | } | 1271 | } |
1219 | 1272 | ||
1220 | if (hc->new_map) { | 1273 | if (hc->new_map) { |
1221 | dm_table_destroy(hc->new_map); | 1274 | dm_table_destroy(hc->new_map); |
1222 | hc->new_map = NULL; | 1275 | hc->new_map = NULL; |
1223 | } | 1276 | } |
1224 | 1277 | ||
1225 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | 1278 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; |
1226 | 1279 | ||
1227 | __dev_status(hc->md, param); | 1280 | __dev_status(hc->md, param); |
1228 | md = hc->md; | 1281 | md = hc->md; |
1229 | up_write(&_hash_lock); | 1282 | up_write(&_hash_lock); |
1230 | dm_put(md); | 1283 | dm_put(md); |
1231 | 1284 | ||
1232 | return 0; | 1285 | return 0; |
1233 | } | 1286 | } |
1234 | 1287 | ||
1235 | /* | 1288 | /* |
1236 | * Retrieves a list of devices used by a particular dm device. | 1289 | * Retrieves a list of devices used by a particular dm device. |
1237 | */ | 1290 | */ |
1238 | static void retrieve_deps(struct dm_table *table, | 1291 | static void retrieve_deps(struct dm_table *table, |
1239 | struct dm_ioctl *param, size_t param_size) | 1292 | struct dm_ioctl *param, size_t param_size) |
1240 | { | 1293 | { |
1241 | unsigned int count = 0; | 1294 | unsigned int count = 0; |
1242 | struct list_head *tmp; | 1295 | struct list_head *tmp; |
1243 | size_t len, needed; | 1296 | size_t len, needed; |
1244 | struct dm_dev_internal *dd; | 1297 | struct dm_dev_internal *dd; |
1245 | struct dm_target_deps *deps; | 1298 | struct dm_target_deps *deps; |
1246 | 1299 | ||
1247 | deps = get_result_buffer(param, param_size, &len); | 1300 | deps = get_result_buffer(param, param_size, &len); |
1248 | 1301 | ||
1249 | /* | 1302 | /* |
1250 | * Count the devices. | 1303 | * Count the devices. |
1251 | */ | 1304 | */ |
1252 | list_for_each (tmp, dm_table_get_devices(table)) | 1305 | list_for_each (tmp, dm_table_get_devices(table)) |
1253 | count++; | 1306 | count++; |
1254 | 1307 | ||
1255 | /* | 1308 | /* |
1256 | * Check we have enough space. | 1309 | * Check we have enough space. |
1257 | */ | 1310 | */ |
1258 | needed = sizeof(*deps) + (sizeof(*deps->dev) * count); | 1311 | needed = sizeof(*deps) + (sizeof(*deps->dev) * count); |
1259 | if (len < needed) { | 1312 | if (len < needed) { |
1260 | param->flags |= DM_BUFFER_FULL_FLAG; | 1313 | param->flags |= DM_BUFFER_FULL_FLAG; |
1261 | return; | 1314 | return; |
1262 | } | 1315 | } |
1263 | 1316 | ||
1264 | /* | 1317 | /* |
1265 | * Fill in the devices. | 1318 | * Fill in the devices. |
1266 | */ | 1319 | */ |
1267 | deps->count = count; | 1320 | deps->count = count; |
1268 | count = 0; | 1321 | count = 0; |
1269 | list_for_each_entry (dd, dm_table_get_devices(table), list) | 1322 | list_for_each_entry (dd, dm_table_get_devices(table), list) |
1270 | deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev); | 1323 | deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev); |
1271 | 1324 | ||
1272 | param->data_size = param->data_start + needed; | 1325 | param->data_size = param->data_start + needed; |
1273 | } | 1326 | } |
1274 | 1327 | ||
1275 | static int table_deps(struct dm_ioctl *param, size_t param_size) | 1328 | static int table_deps(struct dm_ioctl *param, size_t param_size) |
1276 | { | 1329 | { |
1277 | struct mapped_device *md; | 1330 | struct mapped_device *md; |
1278 | struct dm_table *table; | 1331 | struct dm_table *table; |
1279 | 1332 | ||
1280 | md = find_device(param); | 1333 | md = find_device(param); |
1281 | if (!md) | 1334 | if (!md) |
1282 | return -ENXIO; | 1335 | return -ENXIO; |
1283 | 1336 | ||
1284 | __dev_status(md, param); | 1337 | __dev_status(md, param); |
1285 | 1338 | ||
1286 | table = dm_get_live_or_inactive_table(md, param); | 1339 | table = dm_get_live_or_inactive_table(md, param); |
1287 | if (table) { | 1340 | if (table) { |
1288 | retrieve_deps(table, param, param_size); | 1341 | retrieve_deps(table, param, param_size); |
1289 | dm_table_put(table); | 1342 | dm_table_put(table); |
1290 | } | 1343 | } |
1291 | 1344 | ||
1292 | dm_put(md); | 1345 | dm_put(md); |
1293 | 1346 | ||
1294 | return 0; | 1347 | return 0; |
1295 | } | 1348 | } |
1296 | 1349 | ||
1297 | /* | 1350 | /* |
1298 | * Return the status of a device as a text string for each | 1351 | * Return the status of a device as a text string for each |
1299 | * target. | 1352 | * target. |
1300 | */ | 1353 | */ |
1301 | static int table_status(struct dm_ioctl *param, size_t param_size) | 1354 | static int table_status(struct dm_ioctl *param, size_t param_size) |
1302 | { | 1355 | { |
1303 | struct mapped_device *md; | 1356 | struct mapped_device *md; |
1304 | struct dm_table *table; | 1357 | struct dm_table *table; |
1305 | 1358 | ||
1306 | md = find_device(param); | 1359 | md = find_device(param); |
1307 | if (!md) | 1360 | if (!md) |
1308 | return -ENXIO; | 1361 | return -ENXIO; |
1309 | 1362 | ||
1310 | __dev_status(md, param); | 1363 | __dev_status(md, param); |
1311 | 1364 | ||
1312 | table = dm_get_live_or_inactive_table(md, param); | 1365 | table = dm_get_live_or_inactive_table(md, param); |
1313 | if (table) { | 1366 | if (table) { |
1314 | retrieve_status(table, param, param_size); | 1367 | retrieve_status(table, param, param_size); |
1315 | dm_table_put(table); | 1368 | dm_table_put(table); |
1316 | } | 1369 | } |
1317 | 1370 | ||
1318 | dm_put(md); | 1371 | dm_put(md); |
1319 | 1372 | ||
1320 | return 0; | 1373 | return 0; |
1321 | } | 1374 | } |
1322 | 1375 | ||
1323 | /* | 1376 | /* |
1324 | * Pass a message to the target that's at the supplied device offset. | 1377 | * Pass a message to the target that's at the supplied device offset. |
1325 | */ | 1378 | */ |
1326 | static int target_message(struct dm_ioctl *param, size_t param_size) | 1379 | static int target_message(struct dm_ioctl *param, size_t param_size) |
1327 | { | 1380 | { |
1328 | int r, argc; | 1381 | int r, argc; |
1329 | char **argv; | 1382 | char **argv; |
1330 | struct mapped_device *md; | 1383 | struct mapped_device *md; |
1331 | struct dm_table *table; | 1384 | struct dm_table *table; |
1332 | struct dm_target *ti; | 1385 | struct dm_target *ti; |
1333 | struct dm_target_msg *tmsg = (void *) param + param->data_start; | 1386 | struct dm_target_msg *tmsg = (void *) param + param->data_start; |
1334 | 1387 | ||
1335 | md = find_device(param); | 1388 | md = find_device(param); |
1336 | if (!md) | 1389 | if (!md) |
1337 | return -ENXIO; | 1390 | return -ENXIO; |
1338 | 1391 | ||
1339 | if (tmsg < (struct dm_target_msg *) param->data || | 1392 | if (tmsg < (struct dm_target_msg *) param->data || |
1340 | invalid_str(tmsg->message, (void *) param + param_size)) { | 1393 | invalid_str(tmsg->message, (void *) param + param_size)) { |
1341 | DMWARN("Invalid target message parameters."); | 1394 | DMWARN("Invalid target message parameters."); |
1342 | r = -EINVAL; | 1395 | r = -EINVAL; |
1343 | goto out; | 1396 | goto out; |
1344 | } | 1397 | } |
1345 | 1398 | ||
1346 | r = dm_split_args(&argc, &argv, tmsg->message); | 1399 | r = dm_split_args(&argc, &argv, tmsg->message); |
1347 | if (r) { | 1400 | if (r) { |
1348 | DMWARN("Failed to split target message parameters"); | 1401 | DMWARN("Failed to split target message parameters"); |
1349 | goto out; | 1402 | goto out; |
1350 | } | 1403 | } |
1351 | 1404 | ||
1352 | table = dm_get_live_table(md); | 1405 | table = dm_get_live_table(md); |
1353 | if (!table) | 1406 | if (!table) |
1354 | goto out_argv; | 1407 | goto out_argv; |
1355 | 1408 | ||
1356 | if (dm_deleting_md(md)) { | 1409 | if (dm_deleting_md(md)) { |
1357 | r = -ENXIO; | 1410 | r = -ENXIO; |
1358 | goto out_table; | 1411 | goto out_table; |
1359 | } | 1412 | } |
1360 | 1413 | ||
1361 | ti = dm_table_find_target(table, tmsg->sector); | 1414 | ti = dm_table_find_target(table, tmsg->sector); |
1362 | if (!dm_target_is_valid(ti)) { | 1415 | if (!dm_target_is_valid(ti)) { |
1363 | DMWARN("Target message sector outside device."); | 1416 | DMWARN("Target message sector outside device."); |
1364 | r = -EINVAL; | 1417 | r = -EINVAL; |
1365 | } else if (ti->type->message) | 1418 | } else if (ti->type->message) |
1366 | r = ti->type->message(ti, argc, argv); | 1419 | r = ti->type->message(ti, argc, argv); |
1367 | else { | 1420 | else { |
1368 | DMWARN("Target type does not support messages"); | 1421 | DMWARN("Target type does not support messages"); |
1369 | r = -EINVAL; | 1422 | r = -EINVAL; |
1370 | } | 1423 | } |
1371 | 1424 | ||
1372 | out_table: | 1425 | out_table: |
1373 | dm_table_put(table); | 1426 | dm_table_put(table); |
1374 | out_argv: | 1427 | out_argv: |
1375 | kfree(argv); | 1428 | kfree(argv); |
1376 | out: | 1429 | out: |
1377 | param->data_size = 0; | 1430 | param->data_size = 0; |
1378 | dm_put(md); | 1431 | dm_put(md); |
1379 | return r; | 1432 | return r; |
1380 | } | 1433 | } |
1381 | 1434 | ||
1382 | /*----------------------------------------------------------------- | 1435 | /*----------------------------------------------------------------- |
1383 | * Implementation of open/close/ioctl on the special char | 1436 | * Implementation of open/close/ioctl on the special char |
1384 | * device. | 1437 | * device. |
1385 | *---------------------------------------------------------------*/ | 1438 | *---------------------------------------------------------------*/ |
1386 | static ioctl_fn lookup_ioctl(unsigned int cmd) | 1439 | static ioctl_fn lookup_ioctl(unsigned int cmd) |
1387 | { | 1440 | { |
1388 | static struct { | 1441 | static struct { |
1389 | int cmd; | 1442 | int cmd; |
1390 | ioctl_fn fn; | 1443 | ioctl_fn fn; |
1391 | } _ioctls[] = { | 1444 | } _ioctls[] = { |
1392 | {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */ | 1445 | {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */ |
1393 | {DM_REMOVE_ALL_CMD, remove_all}, | 1446 | {DM_REMOVE_ALL_CMD, remove_all}, |
1394 | {DM_LIST_DEVICES_CMD, list_devices}, | 1447 | {DM_LIST_DEVICES_CMD, list_devices}, |
1395 | 1448 | ||
1396 | {DM_DEV_CREATE_CMD, dev_create}, | 1449 | {DM_DEV_CREATE_CMD, dev_create}, |
1397 | {DM_DEV_REMOVE_CMD, dev_remove}, | 1450 | {DM_DEV_REMOVE_CMD, dev_remove}, |
1398 | {DM_DEV_RENAME_CMD, dev_rename}, | 1451 | {DM_DEV_RENAME_CMD, dev_rename}, |
1399 | {DM_DEV_SUSPEND_CMD, dev_suspend}, | 1452 | {DM_DEV_SUSPEND_CMD, dev_suspend}, |
1400 | {DM_DEV_STATUS_CMD, dev_status}, | 1453 | {DM_DEV_STATUS_CMD, dev_status}, |
1401 | {DM_DEV_WAIT_CMD, dev_wait}, | 1454 | {DM_DEV_WAIT_CMD, dev_wait}, |
1402 | 1455 | ||
1403 | {DM_TABLE_LOAD_CMD, table_load}, | 1456 | {DM_TABLE_LOAD_CMD, table_load}, |
1404 | {DM_TABLE_CLEAR_CMD, table_clear}, | 1457 | {DM_TABLE_CLEAR_CMD, table_clear}, |
1405 | {DM_TABLE_DEPS_CMD, table_deps}, | 1458 | {DM_TABLE_DEPS_CMD, table_deps}, |
1406 | {DM_TABLE_STATUS_CMD, table_status}, | 1459 | {DM_TABLE_STATUS_CMD, table_status}, |
1407 | 1460 | ||
1408 | {DM_LIST_VERSIONS_CMD, list_versions}, | 1461 | {DM_LIST_VERSIONS_CMD, list_versions}, |
1409 | 1462 | ||
1410 | {DM_TARGET_MSG_CMD, target_message}, | 1463 | {DM_TARGET_MSG_CMD, target_message}, |
1411 | {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry} | 1464 | {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry} |
1412 | }; | 1465 | }; |
1413 | 1466 | ||
1414 | return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn; | 1467 | return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn; |
1415 | } | 1468 | } |
1416 | 1469 | ||
1417 | /* | 1470 | /* |
1418 | * As well as checking the version compatibility this always | 1471 | * As well as checking the version compatibility this always |
1419 | * copies the kernel interface version out. | 1472 | * copies the kernel interface version out. |
1420 | */ | 1473 | */ |
1421 | static int check_version(unsigned int cmd, struct dm_ioctl __user *user) | 1474 | static int check_version(unsigned int cmd, struct dm_ioctl __user *user) |
1422 | { | 1475 | { |
1423 | uint32_t version[3]; | 1476 | uint32_t version[3]; |
1424 | int r = 0; | 1477 | int r = 0; |
1425 | 1478 | ||
1426 | if (copy_from_user(version, user->version, sizeof(version))) | 1479 | if (copy_from_user(version, user->version, sizeof(version))) |
1427 | return -EFAULT; | 1480 | return -EFAULT; |
1428 | 1481 | ||
1429 | if ((DM_VERSION_MAJOR != version[0]) || | 1482 | if ((DM_VERSION_MAJOR != version[0]) || |
1430 | (DM_VERSION_MINOR < version[1])) { | 1483 | (DM_VERSION_MINOR < version[1])) { |
1431 | DMWARN("ioctl interface mismatch: " | 1484 | DMWARN("ioctl interface mismatch: " |
1432 | "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", | 1485 | "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", |
1433 | DM_VERSION_MAJOR, DM_VERSION_MINOR, | 1486 | DM_VERSION_MAJOR, DM_VERSION_MINOR, |
1434 | DM_VERSION_PATCHLEVEL, | 1487 | DM_VERSION_PATCHLEVEL, |
1435 | version[0], version[1], version[2], cmd); | 1488 | version[0], version[1], version[2], cmd); |
1436 | r = -EINVAL; | 1489 | r = -EINVAL; |
1437 | } | 1490 | } |
1438 | 1491 | ||
1439 | /* | 1492 | /* |
1440 | * Fill in the kernel version. | 1493 | * Fill in the kernel version. |
1441 | */ | 1494 | */ |
1442 | version[0] = DM_VERSION_MAJOR; | 1495 | version[0] = DM_VERSION_MAJOR; |
1443 | version[1] = DM_VERSION_MINOR; | 1496 | version[1] = DM_VERSION_MINOR; |
1444 | version[2] = DM_VERSION_PATCHLEVEL; | 1497 | version[2] = DM_VERSION_PATCHLEVEL; |
1445 | if (copy_to_user(user->version, version, sizeof(version))) | 1498 | if (copy_to_user(user->version, version, sizeof(version))) |
1446 | return -EFAULT; | 1499 | return -EFAULT; |
1447 | 1500 | ||
1448 | return r; | 1501 | return r; |
1449 | } | 1502 | } |
1450 | 1503 | ||
1451 | static void free_params(struct dm_ioctl *param) | 1504 | static void free_params(struct dm_ioctl *param) |
1452 | { | 1505 | { |
1453 | vfree(param); | 1506 | vfree(param); |
1454 | } | 1507 | } |
1455 | 1508 | ||
1456 | static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) | 1509 | static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) |
1457 | { | 1510 | { |
1458 | struct dm_ioctl tmp, *dmi; | 1511 | struct dm_ioctl tmp, *dmi; |
1459 | 1512 | ||
1460 | if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data))) | 1513 | if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data))) |
1461 | return -EFAULT; | 1514 | return -EFAULT; |
1462 | 1515 | ||
1463 | if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data))) | 1516 | if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data))) |
1464 | return -EINVAL; | 1517 | return -EINVAL; |
1465 | 1518 | ||
1466 | dmi = vmalloc(tmp.data_size); | 1519 | dmi = vmalloc(tmp.data_size); |
1467 | if (!dmi) | 1520 | if (!dmi) |
1468 | return -ENOMEM; | 1521 | return -ENOMEM; |
1469 | 1522 | ||
1470 | if (copy_from_user(dmi, user, tmp.data_size)) { | 1523 | if (copy_from_user(dmi, user, tmp.data_size)) { |
1471 | vfree(dmi); | 1524 | vfree(dmi); |
1472 | return -EFAULT; | 1525 | return -EFAULT; |
1473 | } | 1526 | } |
1474 | 1527 | ||
1475 | *param = dmi; | 1528 | *param = dmi; |
1476 | return 0; | 1529 | return 0; |
1477 | } | 1530 | } |
1478 | 1531 | ||
1479 | static int validate_params(uint cmd, struct dm_ioctl *param) | 1532 | static int validate_params(uint cmd, struct dm_ioctl *param) |
1480 | { | 1533 | { |
1481 | /* Always clear this flag */ | 1534 | /* Always clear this flag */ |
1482 | param->flags &= ~DM_BUFFER_FULL_FLAG; | 1535 | param->flags &= ~DM_BUFFER_FULL_FLAG; |
1483 | param->flags &= ~DM_UEVENT_GENERATED_FLAG; | 1536 | param->flags &= ~DM_UEVENT_GENERATED_FLAG; |
1484 | 1537 | ||
1485 | /* Ignores parameters */ | 1538 | /* Ignores parameters */ |
1486 | if (cmd == DM_REMOVE_ALL_CMD || | 1539 | if (cmd == DM_REMOVE_ALL_CMD || |
1487 | cmd == DM_LIST_DEVICES_CMD || | 1540 | cmd == DM_LIST_DEVICES_CMD || |
1488 | cmd == DM_LIST_VERSIONS_CMD) | 1541 | cmd == DM_LIST_VERSIONS_CMD) |
1489 | return 0; | 1542 | return 0; |
1490 | 1543 | ||
1491 | if ((cmd == DM_DEV_CREATE_CMD)) { | 1544 | if ((cmd == DM_DEV_CREATE_CMD)) { |
1492 | if (!*param->name) { | 1545 | if (!*param->name) { |
1493 | DMWARN("name not supplied when creating device"); | 1546 | DMWARN("name not supplied when creating device"); |
1494 | return -EINVAL; | 1547 | return -EINVAL; |
1495 | } | 1548 | } |
1496 | } else if ((*param->uuid && *param->name)) { | 1549 | } else if ((*param->uuid && *param->name)) { |
1497 | DMWARN("only supply one of name or uuid, cmd(%u)", cmd); | 1550 | DMWARN("only supply one of name or uuid, cmd(%u)", cmd); |
1498 | return -EINVAL; | 1551 | return -EINVAL; |
1499 | } | 1552 | } |
1500 | 1553 | ||
1501 | /* Ensure strings are terminated */ | 1554 | /* Ensure strings are terminated */ |
1502 | param->name[DM_NAME_LEN - 1] = '\0'; | 1555 | param->name[DM_NAME_LEN - 1] = '\0'; |
1503 | param->uuid[DM_UUID_LEN - 1] = '\0'; | 1556 | param->uuid[DM_UUID_LEN - 1] = '\0'; |
1504 | 1557 | ||
1505 | return 0; | 1558 | return 0; |
1506 | } | 1559 | } |
1507 | 1560 | ||
1508 | static int ctl_ioctl(uint command, struct dm_ioctl __user *user) | 1561 | static int ctl_ioctl(uint command, struct dm_ioctl __user *user) |
1509 | { | 1562 | { |
1510 | int r = 0; | 1563 | int r = 0; |
1511 | unsigned int cmd; | 1564 | unsigned int cmd; |
1512 | struct dm_ioctl *uninitialized_var(param); | 1565 | struct dm_ioctl *uninitialized_var(param); |
1513 | ioctl_fn fn = NULL; | 1566 | ioctl_fn fn = NULL; |
1514 | size_t param_size; | 1567 | size_t param_size; |
1515 | 1568 | ||
1516 | /* only root can play with this */ | 1569 | /* only root can play with this */ |
1517 | if (!capable(CAP_SYS_ADMIN)) | 1570 | if (!capable(CAP_SYS_ADMIN)) |
1518 | return -EACCES; | 1571 | return -EACCES; |
1519 | 1572 | ||
1520 | if (_IOC_TYPE(command) != DM_IOCTL) | 1573 | if (_IOC_TYPE(command) != DM_IOCTL) |
1521 | return -ENOTTY; | 1574 | return -ENOTTY; |
1522 | 1575 | ||
1523 | cmd = _IOC_NR(command); | 1576 | cmd = _IOC_NR(command); |
1524 | 1577 | ||
1525 | /* | 1578 | /* |
1526 | * Check the interface version passed in. This also | 1579 | * Check the interface version passed in. This also |
1527 | * writes out the kernel's interface version. | 1580 | * writes out the kernel's interface version. |
1528 | */ | 1581 | */ |
1529 | r = check_version(cmd, user); | 1582 | r = check_version(cmd, user); |
1530 | if (r) | 1583 | if (r) |
1531 | return r; | 1584 | return r; |
1532 | 1585 | ||
1533 | /* | 1586 | /* |
1534 | * Nothing more to do for the version command. | 1587 | * Nothing more to do for the version command. |
1535 | */ | 1588 | */ |
1536 | if (cmd == DM_VERSION_CMD) | 1589 | if (cmd == DM_VERSION_CMD) |
1537 | return 0; | 1590 | return 0; |
1538 | 1591 | ||
1539 | fn = lookup_ioctl(cmd); | 1592 | fn = lookup_ioctl(cmd); |
1540 | if (!fn) { | 1593 | if (!fn) { |
1541 | DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); | 1594 | DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); |
1542 | return -ENOTTY; | 1595 | return -ENOTTY; |
1543 | } | 1596 | } |
1544 | 1597 | ||
1545 | /* | 1598 | /* |
1546 | * Trying to avoid low memory issues when a device is | 1599 | * Trying to avoid low memory issues when a device is |
1547 | * suspended. | 1600 | * suspended. |
1548 | */ | 1601 | */ |
1549 | current->flags |= PF_MEMALLOC; | 1602 | current->flags |= PF_MEMALLOC; |
1550 | 1603 | ||
1551 | /* | 1604 | /* |
1552 | * Copy the parameters into kernel space. | 1605 | * Copy the parameters into kernel space. |
1553 | */ | 1606 | */ |
1554 | r = copy_params(user, ¶m); | 1607 | r = copy_params(user, ¶m); |
1555 | 1608 | ||
1556 | current->flags &= ~PF_MEMALLOC; | 1609 | current->flags &= ~PF_MEMALLOC; |
1557 | 1610 | ||
1558 | if (r) | 1611 | if (r) |
1559 | return r; | 1612 | return r; |
1560 | 1613 | ||
1561 | r = validate_params(cmd, param); | 1614 | r = validate_params(cmd, param); |
1562 | if (r) | 1615 | if (r) |
1563 | goto out; | 1616 | goto out; |
1564 | 1617 | ||
1565 | param_size = param->data_size; | 1618 | param_size = param->data_size; |
1566 | param->data_size = sizeof(*param); | 1619 | param->data_size = sizeof(*param); |
1567 | r = fn(param, param_size); | 1620 | r = fn(param, param_size); |
1568 | 1621 | ||
1569 | /* | 1622 | /* |
1570 | * Copy the results back to userland. | 1623 | * Copy the results back to userland. |
1571 | */ | 1624 | */ |
1572 | if (!r && copy_to_user(user, param, param->data_size)) | 1625 | if (!r && copy_to_user(user, param, param->data_size)) |
1573 | r = -EFAULT; | 1626 | r = -EFAULT; |
1574 | 1627 | ||
1575 | out: | 1628 | out: |
1576 | free_params(param); | 1629 | free_params(param); |
1577 | return r; | 1630 | return r; |
1578 | } | 1631 | } |
1579 | 1632 | ||
1580 | static long dm_ctl_ioctl(struct file *file, uint command, ulong u) | 1633 | static long dm_ctl_ioctl(struct file *file, uint command, ulong u) |
1581 | { | 1634 | { |
1582 | return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u); | 1635 | return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u); |
1583 | } | 1636 | } |
1584 | 1637 | ||
1585 | #ifdef CONFIG_COMPAT | 1638 | #ifdef CONFIG_COMPAT |
1586 | static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) | 1639 | static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) |
1587 | { | 1640 | { |
1588 | return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u)); | 1641 | return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u)); |
1589 | } | 1642 | } |
1590 | #else | 1643 | #else |
1591 | #define dm_compat_ctl_ioctl NULL | 1644 | #define dm_compat_ctl_ioctl NULL |
1592 | #endif | 1645 | #endif |
1593 | 1646 | ||
1594 | static const struct file_operations _ctl_fops = { | 1647 | static const struct file_operations _ctl_fops = { |
1595 | .open = nonseekable_open, | 1648 | .open = nonseekable_open, |
1596 | .unlocked_ioctl = dm_ctl_ioctl, | 1649 | .unlocked_ioctl = dm_ctl_ioctl, |
1597 | .compat_ioctl = dm_compat_ctl_ioctl, | 1650 | .compat_ioctl = dm_compat_ctl_ioctl, |
1598 | .owner = THIS_MODULE, | 1651 | .owner = THIS_MODULE, |
1599 | .llseek = noop_llseek, | 1652 | .llseek = noop_llseek, |
1600 | }; | 1653 | }; |
1601 | 1654 | ||
1602 | static struct miscdevice _dm_misc = { | 1655 | static struct miscdevice _dm_misc = { |
1603 | .minor = MAPPER_CTRL_MINOR, | 1656 | .minor = MAPPER_CTRL_MINOR, |
1604 | .name = DM_NAME, | 1657 | .name = DM_NAME, |
1605 | .nodename = DM_DIR "/" DM_CONTROL_NODE, | 1658 | .nodename = DM_DIR "/" DM_CONTROL_NODE, |
1606 | .fops = &_ctl_fops | 1659 | .fops = &_ctl_fops |
1607 | }; | 1660 | }; |
1608 | 1661 | ||
1609 | MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); | 1662 | MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); |
1610 | MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); | 1663 | MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); |
1611 | 1664 | ||
1612 | /* | 1665 | /* |
1613 | * Create misc character device and link to DM_DIR/control. | 1666 | * Create misc character device and link to DM_DIR/control. |
1614 | */ | 1667 | */ |
1615 | int __init dm_interface_init(void) | 1668 | int __init dm_interface_init(void) |
1616 | { | 1669 | { |
1617 | int r; | 1670 | int r; |
1618 | 1671 | ||
1619 | r = dm_hash_init(); | 1672 | r = dm_hash_init(); |
1620 | if (r) | 1673 | if (r) |
1621 | return r; | 1674 | return r; |
1622 | 1675 | ||
1623 | r = misc_register(&_dm_misc); | 1676 | r = misc_register(&_dm_misc); |
1624 | if (r) { | 1677 | if (r) { |
1625 | DMERR("misc_register failed for control device"); | 1678 | DMERR("misc_register failed for control device"); |
1626 | dm_hash_exit(); | 1679 | dm_hash_exit(); |
1627 | return r; | 1680 | return r; |
1628 | } | 1681 | } |
1629 | 1682 | ||
1630 | DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, | 1683 | DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, |
1631 | DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, | 1684 | DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, |
1632 | DM_DRIVER_EMAIL); | 1685 | DM_DRIVER_EMAIL); |
1633 | return 0; | 1686 | return 0; |
1634 | } | 1687 | } |
1635 | 1688 | ||
1636 | void dm_interface_exit(void) | 1689 | void dm_interface_exit(void) |
1637 | { | 1690 | { |
1638 | if (misc_deregister(&_dm_misc) < 0) | 1691 | if (misc_deregister(&_dm_misc) < 0) |
1639 | DMERR("misc_deregister failed for control device"); | 1692 | DMERR("misc_deregister failed for control device"); |
1640 | 1693 | ||
1641 | dm_hash_exit(); | 1694 | dm_hash_exit(); |
1642 | } | 1695 | } |
1643 | 1696 | ||
1644 | /** | 1697 | /** |
1645 | * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers | 1698 | * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers |
1646 | * @md: Pointer to mapped_device | 1699 | * @md: Pointer to mapped_device |
1647 | * @name: Buffer (size DM_NAME_LEN) for name | 1700 | * @name: Buffer (size DM_NAME_LEN) for name |
1648 | * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined | 1701 | * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined |
1649 | */ | 1702 | */ |
1650 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) | 1703 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) |
1651 | { | 1704 | { |
1652 | int r = 0; | 1705 | int r = 0; |
1653 | struct hash_cell *hc; | 1706 | struct hash_cell *hc; |
1654 | 1707 | ||
1655 | if (!md) | 1708 | if (!md) |
1656 | return -ENXIO; | 1709 | return -ENXIO; |
1657 | 1710 | ||
1658 | mutex_lock(&dm_hash_cells_mutex); | 1711 | mutex_lock(&dm_hash_cells_mutex); |
1659 | hc = dm_get_mdptr(md); | 1712 | hc = dm_get_mdptr(md); |
1660 | if (!hc || hc->md != md) { | 1713 | if (!hc || hc->md != md) { |
1661 | r = -ENXIO; | 1714 | r = -ENXIO; |
1662 | goto out; | 1715 | goto out; |
1663 | } | 1716 | } |
1664 | 1717 | ||
1665 | if (name) | 1718 | if (name) |
1666 | strcpy(name, hc->name); | 1719 | strcpy(name, hc->name); |
1667 | if (uuid) | 1720 | if (uuid) |
1668 | strcpy(uuid, hc->uuid ? : ""); | 1721 | strcpy(uuid, hc->uuid ? : ""); |
1669 | 1722 | ||
1670 | out: | 1723 | out: |
1671 | mutex_unlock(&dm_hash_cells_mutex); | 1724 | mutex_unlock(&dm_hash_cells_mutex); |
1672 | 1725 | ||
1673 | return r; | 1726 | return r; |
1674 | } | 1727 | } |
1675 | 1728 |
include/linux/dm-ioctl.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited. | 2 | * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited. |
3 | * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This file is released under the LGPL. | 5 | * This file is released under the LGPL. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef _LINUX_DM_IOCTL_V4_H | 8 | #ifndef _LINUX_DM_IOCTL_V4_H |
9 | #define _LINUX_DM_IOCTL_V4_H | 9 | #define _LINUX_DM_IOCTL_V4_H |
10 | 10 | ||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | 12 | ||
13 | #define DM_DIR "mapper" /* Slashes not supported */ | 13 | #define DM_DIR "mapper" /* Slashes not supported */ |
14 | #define DM_CONTROL_NODE "control" | 14 | #define DM_CONTROL_NODE "control" |
15 | #define DM_MAX_TYPE_NAME 16 | 15 | #define DM_MAX_TYPE_NAME 16 |
16 | #define DM_NAME_LEN 128 | 16 | #define DM_NAME_LEN 128 |
17 | #define DM_UUID_LEN 129 | 17 | #define DM_UUID_LEN 129 |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * A traditional ioctl interface for the device mapper. | 20 | * A traditional ioctl interface for the device mapper. |
21 | * | 21 | * |
22 | * Each device can have two tables associated with it, an | 22 | * Each device can have two tables associated with it, an |
23 | * 'active' table which is the one currently used by io passing | 23 | * 'active' table which is the one currently used by io passing |
24 | * through the device, and an 'inactive' one which is a table | 24 | * through the device, and an 'inactive' one which is a table |
25 | * that is being prepared as a replacement for the 'active' one. | 25 | * that is being prepared as a replacement for the 'active' one. |
26 | * | 26 | * |
27 | * DM_VERSION: | 27 | * DM_VERSION: |
28 | * Just get the version information for the ioctl interface. | 28 | * Just get the version information for the ioctl interface. |
29 | * | 29 | * |
30 | * DM_REMOVE_ALL: | 30 | * DM_REMOVE_ALL: |
31 | * Remove all dm devices, destroy all tables. Only really used | 31 | * Remove all dm devices, destroy all tables. Only really used |
32 | * for debug. | 32 | * for debug. |
33 | * | 33 | * |
34 | * DM_LIST_DEVICES: | 34 | * DM_LIST_DEVICES: |
35 | * Get a list of all the dm device names. | 35 | * Get a list of all the dm device names. |
36 | * | 36 | * |
37 | * DM_DEV_CREATE: | 37 | * DM_DEV_CREATE: |
38 | * Create a new device, neither the 'active' or 'inactive' table | 38 | * Create a new device, neither the 'active' or 'inactive' table |
39 | * slots will be filled. The device will be in suspended state | 39 | * slots will be filled. The device will be in suspended state |
40 | * after creation, however any io to the device will get errored | 40 | * after creation, however any io to the device will get errored |
41 | * since it will be out-of-bounds. | 41 | * since it will be out-of-bounds. |
42 | * | 42 | * |
43 | * DM_DEV_REMOVE: | 43 | * DM_DEV_REMOVE: |
44 | * Remove a device, destroy any tables. | 44 | * Remove a device, destroy any tables. |
45 | * | 45 | * |
46 | * DM_DEV_RENAME: | 46 | * DM_DEV_RENAME: |
47 | * Rename a device. | 47 | * Rename a device or set its uuid if none was previously supplied. |
48 | * | 48 | * |
49 | * DM_SUSPEND: | 49 | * DM_SUSPEND: |
50 | * This performs both suspend and resume, depending which flag is | 50 | * This performs both suspend and resume, depending which flag is |
51 | * passed in. | 51 | * passed in. |
52 | * Suspend: This command will not return until all pending io to | 52 | * Suspend: This command will not return until all pending io to |
53 | * the device has completed. Further io will be deferred until | 53 | * the device has completed. Further io will be deferred until |
54 | * the device is resumed. | 54 | * the device is resumed. |
55 | * Resume: It is no longer an error to issue this command on an | 55 | * Resume: It is no longer an error to issue this command on an |
56 | * unsuspended device. If a table is present in the 'inactive' | 56 | * unsuspended device. If a table is present in the 'inactive' |
57 | * slot, it will be moved to the active slot, then the old table | 57 | * slot, it will be moved to the active slot, then the old table |
58 | * from the active slot will be _destroyed_. Finally the device | 58 | * from the active slot will be _destroyed_. Finally the device |
59 | * is resumed. | 59 | * is resumed. |
60 | * | 60 | * |
61 | * DM_DEV_STATUS: | 61 | * DM_DEV_STATUS: |
62 | * Retrieves the status for the table in the 'active' slot. | 62 | * Retrieves the status for the table in the 'active' slot. |
63 | * | 63 | * |
64 | * DM_DEV_WAIT: | 64 | * DM_DEV_WAIT: |
65 | * Wait for a significant event to occur to the device. This | 65 | * Wait for a significant event to occur to the device. This |
66 | * could either be caused by an event triggered by one of the | 66 | * could either be caused by an event triggered by one of the |
67 | * targets of the table in the 'active' slot, or a table change. | 67 | * targets of the table in the 'active' slot, or a table change. |
68 | * | 68 | * |
69 | * DM_TABLE_LOAD: | 69 | * DM_TABLE_LOAD: |
70 | * Load a table into the 'inactive' slot for the device. The | 70 | * Load a table into the 'inactive' slot for the device. The |
71 | * device does _not_ need to be suspended prior to this command. | 71 | * device does _not_ need to be suspended prior to this command. |
72 | * | 72 | * |
73 | * DM_TABLE_CLEAR: | 73 | * DM_TABLE_CLEAR: |
74 | * Destroy any table in the 'inactive' slot (ie. abort). | 74 | * Destroy any table in the 'inactive' slot (ie. abort). |
75 | * | 75 | * |
76 | * DM_TABLE_DEPS: | 76 | * DM_TABLE_DEPS: |
77 | * Return a set of device dependencies for the 'active' table. | 77 | * Return a set of device dependencies for the 'active' table. |
78 | * | 78 | * |
79 | * DM_TABLE_STATUS: | 79 | * DM_TABLE_STATUS: |
80 | * Return the targets status for the 'active' table. | 80 | * Return the targets status for the 'active' table. |
81 | * | 81 | * |
82 | * DM_TARGET_MSG: | 82 | * DM_TARGET_MSG: |
83 | * Pass a message string to the target at a specific offset of a device. | 83 | * Pass a message string to the target at a specific offset of a device. |
84 | * | 84 | * |
85 | * DM_DEV_SET_GEOMETRY: | 85 | * DM_DEV_SET_GEOMETRY: |
86 | * Set the geometry of a device by passing in a string in this format: | 86 | * Set the geometry of a device by passing in a string in this format: |
87 | * | 87 | * |
88 | * "cylinders heads sectors_per_track start_sector" | 88 | * "cylinders heads sectors_per_track start_sector" |
89 | * | 89 | * |
90 | * Beware that CHS geometry is nearly obsolete and only provided | 90 | * Beware that CHS geometry is nearly obsolete and only provided |
91 | * for compatibility with dm devices that can be booted by a PC | 91 | * for compatibility with dm devices that can be booted by a PC |
92 | * BIOS. See struct hd_geometry for range limits. Also note that | 92 | * BIOS. See struct hd_geometry for range limits. Also note that |
93 | * the geometry is erased if the device size changes. | 93 | * the geometry is erased if the device size changes. |
94 | */ | 94 | */ |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * All ioctl arguments consist of a single chunk of memory, with | 97 | * All ioctl arguments consist of a single chunk of memory, with |
98 | * this structure at the start. If a uuid is specified any | 98 | * this structure at the start. If a uuid is specified any |
99 | * lookup (eg. for a DM_INFO) will be done on that, *not* the | 99 | * lookup (eg. for a DM_INFO) will be done on that, *not* the |
100 | * name. | 100 | * name. |
101 | */ | 101 | */ |
102 | struct dm_ioctl { | 102 | struct dm_ioctl { |
103 | /* | 103 | /* |
104 | * The version number is made up of three parts: | 104 | * The version number is made up of three parts: |
105 | * major - no backward or forward compatibility, | 105 | * major - no backward or forward compatibility, |
106 | * minor - only backwards compatible, | 106 | * minor - only backwards compatible, |
107 | * patch - both backwards and forwards compatible. | 107 | * patch - both backwards and forwards compatible. |
108 | * | 108 | * |
109 | * All clients of the ioctl interface should fill in the | 109 | * All clients of the ioctl interface should fill in the |
110 | * version number of the interface that they were | 110 | * version number of the interface that they were |
111 | * compiled with. | 111 | * compiled with. |
112 | * | 112 | * |
113 | * All recognised ioctl commands (ie. those that don't | 113 | * All recognised ioctl commands (ie. those that don't |
114 | * return -ENOTTY) fill out this field, even if the | 114 | * return -ENOTTY) fill out this field, even if the |
115 | * command failed. | 115 | * command failed. |
116 | */ | 116 | */ |
117 | __u32 version[3]; /* in/out */ | 117 | __u32 version[3]; /* in/out */ |
118 | __u32 data_size; /* total size of data passed in | 118 | __u32 data_size; /* total size of data passed in |
119 | * including this struct */ | 119 | * including this struct */ |
120 | 120 | ||
121 | __u32 data_start; /* offset to start of data | 121 | __u32 data_start; /* offset to start of data |
122 | * relative to start of this struct */ | 122 | * relative to start of this struct */ |
123 | 123 | ||
124 | __u32 target_count; /* in/out */ | 124 | __u32 target_count; /* in/out */ |
125 | __s32 open_count; /* out */ | 125 | __s32 open_count; /* out */ |
126 | __u32 flags; /* in/out */ | 126 | __u32 flags; /* in/out */ |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * event_nr holds either the event number (input and output) or the | 129 | * event_nr holds either the event number (input and output) or the |
130 | * udev cookie value (input only). | 130 | * udev cookie value (input only). |
131 | * The DM_DEV_WAIT ioctl takes an event number as input. | 131 | * The DM_DEV_WAIT ioctl takes an event number as input. |
132 | * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls | 132 | * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls |
133 | * use the field as a cookie to return in the DM_COOKIE | 133 | * use the field as a cookie to return in the DM_COOKIE |
134 | * variable with the uevents they issue. | 134 | * variable with the uevents they issue. |
135 | * For output, the ioctls return the event number, not the cookie. | 135 | * For output, the ioctls return the event number, not the cookie. |
136 | */ | 136 | */ |
137 | __u32 event_nr; /* in/out */ | 137 | __u32 event_nr; /* in/out */ |
138 | __u32 padding; | 138 | __u32 padding; |
139 | 139 | ||
140 | __u64 dev; /* in/out */ | 140 | __u64 dev; /* in/out */ |
141 | 141 | ||
142 | char name[DM_NAME_LEN]; /* device name */ | 142 | char name[DM_NAME_LEN]; /* device name */ |
143 | char uuid[DM_UUID_LEN]; /* unique identifier for | 143 | char uuid[DM_UUID_LEN]; /* unique identifier for |
144 | * the block device */ | 144 | * the block device */ |
145 | char data[7]; /* padding or data */ | 145 | char data[7]; /* padding or data */ |
146 | }; | 146 | }; |
147 | 147 | ||
148 | /* | 148 | /* |
149 | * Used to specify tables. These structures appear after the | 149 | * Used to specify tables. These structures appear after the |
150 | * dm_ioctl. | 150 | * dm_ioctl. |
151 | */ | 151 | */ |
152 | struct dm_target_spec { | 152 | struct dm_target_spec { |
153 | __u64 sector_start; | 153 | __u64 sector_start; |
154 | __u64 length; | 154 | __u64 length; |
155 | __s32 status; /* used when reading from kernel only */ | 155 | __s32 status; /* used when reading from kernel only */ |
156 | 156 | ||
157 | /* | 157 | /* |
158 | * Location of the next dm_target_spec. | 158 | * Location of the next dm_target_spec. |
159 | * - When specifying targets on a DM_TABLE_LOAD command, this value is | 159 | * - When specifying targets on a DM_TABLE_LOAD command, this value is |
160 | * the number of bytes from the start of the "current" dm_target_spec | 160 | * the number of bytes from the start of the "current" dm_target_spec |
161 | * to the start of the "next" dm_target_spec. | 161 | * to the start of the "next" dm_target_spec. |
162 | * - When retrieving targets on a DM_TABLE_STATUS command, this value | 162 | * - When retrieving targets on a DM_TABLE_STATUS command, this value |
163 | * is the number of bytes from the start of the first dm_target_spec | 163 | * is the number of bytes from the start of the first dm_target_spec |
164 | * (that follows the dm_ioctl struct) to the start of the "next" | 164 | * (that follows the dm_ioctl struct) to the start of the "next" |
165 | * dm_target_spec. | 165 | * dm_target_spec. |
166 | */ | 166 | */ |
167 | __u32 next; | 167 | __u32 next; |
168 | 168 | ||
169 | char target_type[DM_MAX_TYPE_NAME]; | 169 | char target_type[DM_MAX_TYPE_NAME]; |
170 | 170 | ||
171 | /* | 171 | /* |
172 | * Parameter string starts immediately after this object. | 172 | * Parameter string starts immediately after this object. |
173 | * Be careful to add padding after string to ensure correct | 173 | * Be careful to add padding after string to ensure correct |
174 | * alignment of subsequent dm_target_spec. | 174 | * alignment of subsequent dm_target_spec. |
175 | */ | 175 | */ |
176 | }; | 176 | }; |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * Used to retrieve the target dependencies. | 179 | * Used to retrieve the target dependencies. |
180 | */ | 180 | */ |
181 | struct dm_target_deps { | 181 | struct dm_target_deps { |
182 | __u32 count; /* Array size */ | 182 | __u32 count; /* Array size */ |
183 | __u32 padding; /* unused */ | 183 | __u32 padding; /* unused */ |
184 | __u64 dev[0]; /* out */ | 184 | __u64 dev[0]; /* out */ |
185 | }; | 185 | }; |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * Used to get a list of all dm devices. | 188 | * Used to get a list of all dm devices. |
189 | */ | 189 | */ |
190 | struct dm_name_list { | 190 | struct dm_name_list { |
191 | __u64 dev; | 191 | __u64 dev; |
192 | __u32 next; /* offset to the next record from | 192 | __u32 next; /* offset to the next record from |
193 | the _start_ of this */ | 193 | the _start_ of this */ |
194 | char name[0]; | 194 | char name[0]; |
195 | }; | 195 | }; |
196 | 196 | ||
197 | /* | 197 | /* |
198 | * Used to retrieve the target versions | 198 | * Used to retrieve the target versions |
199 | */ | 199 | */ |
200 | struct dm_target_versions { | 200 | struct dm_target_versions { |
201 | __u32 next; | 201 | __u32 next; |
202 | __u32 version[3]; | 202 | __u32 version[3]; |
203 | 203 | ||
204 | char name[0]; | 204 | char name[0]; |
205 | }; | 205 | }; |
206 | 206 | ||
207 | /* | 207 | /* |
208 | * Used to pass message to a target | 208 | * Used to pass message to a target |
209 | */ | 209 | */ |
210 | struct dm_target_msg { | 210 | struct dm_target_msg { |
211 | __u64 sector; /* Device sector */ | 211 | __u64 sector; /* Device sector */ |
212 | 212 | ||
213 | char message[0]; | 213 | char message[0]; |
214 | }; | 214 | }; |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * If you change this make sure you make the corresponding change | 217 | * If you change this make sure you make the corresponding change |
218 | * to dm-ioctl.c:lookup_ioctl() | 218 | * to dm-ioctl.c:lookup_ioctl() |
219 | */ | 219 | */ |
220 | enum { | 220 | enum { |
221 | /* Top level cmds */ | 221 | /* Top level cmds */ |
222 | DM_VERSION_CMD = 0, | 222 | DM_VERSION_CMD = 0, |
223 | DM_REMOVE_ALL_CMD, | 223 | DM_REMOVE_ALL_CMD, |
224 | DM_LIST_DEVICES_CMD, | 224 | DM_LIST_DEVICES_CMD, |
225 | 225 | ||
226 | /* device level cmds */ | 226 | /* device level cmds */ |
227 | DM_DEV_CREATE_CMD, | 227 | DM_DEV_CREATE_CMD, |
228 | DM_DEV_REMOVE_CMD, | 228 | DM_DEV_REMOVE_CMD, |
229 | DM_DEV_RENAME_CMD, | 229 | DM_DEV_RENAME_CMD, |
230 | DM_DEV_SUSPEND_CMD, | 230 | DM_DEV_SUSPEND_CMD, |
231 | DM_DEV_STATUS_CMD, | 231 | DM_DEV_STATUS_CMD, |
232 | DM_DEV_WAIT_CMD, | 232 | DM_DEV_WAIT_CMD, |
233 | 233 | ||
234 | /* Table level cmds */ | 234 | /* Table level cmds */ |
235 | DM_TABLE_LOAD_CMD, | 235 | DM_TABLE_LOAD_CMD, |
236 | DM_TABLE_CLEAR_CMD, | 236 | DM_TABLE_CLEAR_CMD, |
237 | DM_TABLE_DEPS_CMD, | 237 | DM_TABLE_DEPS_CMD, |
238 | DM_TABLE_STATUS_CMD, | 238 | DM_TABLE_STATUS_CMD, |
239 | 239 | ||
240 | /* Added later */ | 240 | /* Added later */ |
241 | DM_LIST_VERSIONS_CMD, | 241 | DM_LIST_VERSIONS_CMD, |
242 | DM_TARGET_MSG_CMD, | 242 | DM_TARGET_MSG_CMD, |
243 | DM_DEV_SET_GEOMETRY_CMD | 243 | DM_DEV_SET_GEOMETRY_CMD |
244 | }; | 244 | }; |
245 | 245 | ||
246 | #define DM_IOCTL 0xfd | 246 | #define DM_IOCTL 0xfd |
247 | 247 | ||
248 | #define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl) | 248 | #define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl) |
249 | #define DM_REMOVE_ALL _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, struct dm_ioctl) | 249 | #define DM_REMOVE_ALL _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, struct dm_ioctl) |
250 | #define DM_LIST_DEVICES _IOWR(DM_IOCTL, DM_LIST_DEVICES_CMD, struct dm_ioctl) | 250 | #define DM_LIST_DEVICES _IOWR(DM_IOCTL, DM_LIST_DEVICES_CMD, struct dm_ioctl) |
251 | 251 | ||
252 | #define DM_DEV_CREATE _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, struct dm_ioctl) | 252 | #define DM_DEV_CREATE _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, struct dm_ioctl) |
253 | #define DM_DEV_REMOVE _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, struct dm_ioctl) | 253 | #define DM_DEV_REMOVE _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, struct dm_ioctl) |
254 | #define DM_DEV_RENAME _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, struct dm_ioctl) | 254 | #define DM_DEV_RENAME _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, struct dm_ioctl) |
255 | #define DM_DEV_SUSPEND _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, struct dm_ioctl) | 255 | #define DM_DEV_SUSPEND _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, struct dm_ioctl) |
256 | #define DM_DEV_STATUS _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, struct dm_ioctl) | 256 | #define DM_DEV_STATUS _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, struct dm_ioctl) |
257 | #define DM_DEV_WAIT _IOWR(DM_IOCTL, DM_DEV_WAIT_CMD, struct dm_ioctl) | 257 | #define DM_DEV_WAIT _IOWR(DM_IOCTL, DM_DEV_WAIT_CMD, struct dm_ioctl) |
258 | 258 | ||
259 | #define DM_TABLE_LOAD _IOWR(DM_IOCTL, DM_TABLE_LOAD_CMD, struct dm_ioctl) | 259 | #define DM_TABLE_LOAD _IOWR(DM_IOCTL, DM_TABLE_LOAD_CMD, struct dm_ioctl) |
260 | #define DM_TABLE_CLEAR _IOWR(DM_IOCTL, DM_TABLE_CLEAR_CMD, struct dm_ioctl) | 260 | #define DM_TABLE_CLEAR _IOWR(DM_IOCTL, DM_TABLE_CLEAR_CMD, struct dm_ioctl) |
261 | #define DM_TABLE_DEPS _IOWR(DM_IOCTL, DM_TABLE_DEPS_CMD, struct dm_ioctl) | 261 | #define DM_TABLE_DEPS _IOWR(DM_IOCTL, DM_TABLE_DEPS_CMD, struct dm_ioctl) |
262 | #define DM_TABLE_STATUS _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, struct dm_ioctl) | 262 | #define DM_TABLE_STATUS _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, struct dm_ioctl) |
263 | 263 | ||
264 | #define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl) | 264 | #define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl) |
265 | 265 | ||
266 | #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) | 266 | #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) |
267 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) | 267 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) |
268 | 268 | ||
269 | #define DM_VERSION_MAJOR 4 | 269 | #define DM_VERSION_MAJOR 4 |
270 | #define DM_VERSION_MINOR 18 | 270 | #define DM_VERSION_MINOR 19 |
271 | #define DM_VERSION_PATCHLEVEL 0 | 271 | #define DM_VERSION_PATCHLEVEL 0 |
272 | #define DM_VERSION_EXTRA "-ioctl (2010-06-29)" | 272 | #define DM_VERSION_EXTRA "-ioctl (2010-10-14)" |
273 | 273 | ||
274 | /* Status bits */ | 274 | /* Status bits */ |
275 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ | 275 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |
276 | #define DM_SUSPEND_FLAG (1 << 1) /* In/Out */ | 276 | #define DM_SUSPEND_FLAG (1 << 1) /* In/Out */ |
277 | #define DM_PERSISTENT_DEV_FLAG (1 << 3) /* In */ | 277 | #define DM_PERSISTENT_DEV_FLAG (1 << 3) /* In */ |
278 | 278 | ||
279 | /* | 279 | /* |
280 | * Flag passed into ioctl STATUS command to get table information | 280 | * Flag passed into ioctl STATUS command to get table information |
281 | * rather than current status. | 281 | * rather than current status. |
282 | */ | 282 | */ |
283 | #define DM_STATUS_TABLE_FLAG (1 << 4) /* In */ | 283 | #define DM_STATUS_TABLE_FLAG (1 << 4) /* In */ |
284 | 284 | ||
285 | /* | 285 | /* |
286 | * Flags that indicate whether a table is present in either of | 286 | * Flags that indicate whether a table is present in either of |
287 | * the two table slots that a device has. | 287 | * the two table slots that a device has. |
288 | */ | 288 | */ |
289 | #define DM_ACTIVE_PRESENT_FLAG (1 << 5) /* Out */ | 289 | #define DM_ACTIVE_PRESENT_FLAG (1 << 5) /* Out */ |
290 | #define DM_INACTIVE_PRESENT_FLAG (1 << 6) /* Out */ | 290 | #define DM_INACTIVE_PRESENT_FLAG (1 << 6) /* Out */ |
291 | 291 | ||
292 | /* | 292 | /* |
293 | * Indicates that the buffer passed in wasn't big enough for the | 293 | * Indicates that the buffer passed in wasn't big enough for the |
294 | * results. | 294 | * results. |
295 | */ | 295 | */ |
296 | #define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */ | 296 | #define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */ |
297 | 297 | ||
298 | /* | 298 | /* |
299 | * This flag is now ignored. | 299 | * This flag is now ignored. |
300 | */ | 300 | */ |
301 | #define DM_SKIP_BDGET_FLAG (1 << 9) /* In */ | 301 | #define DM_SKIP_BDGET_FLAG (1 << 9) /* In */ |
302 | 302 | ||
303 | /* | 303 | /* |
304 | * Set this to avoid attempting to freeze any filesystem when suspending. | 304 | * Set this to avoid attempting to freeze any filesystem when suspending. |
305 | */ | 305 | */ |
306 | #define DM_SKIP_LOCKFS_FLAG (1 << 10) /* In */ | 306 | #define DM_SKIP_LOCKFS_FLAG (1 << 10) /* In */ |
307 | 307 | ||
308 | /* | 308 | /* |
309 | * Set this to suspend without flushing queued ios. | 309 | * Set this to suspend without flushing queued ios. |
310 | */ | 310 | */ |
311 | #define DM_NOFLUSH_FLAG (1 << 11) /* In */ | 311 | #define DM_NOFLUSH_FLAG (1 << 11) /* In */ |
312 | 312 | ||
313 | /* | 313 | /* |
314 | * If set, any table information returned will relate to the inactive | 314 | * If set, any table information returned will relate to the inactive |
315 | * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG | 315 | * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG |
316 | * is set before using the data returned. | 316 | * is set before using the data returned. |
317 | */ | 317 | */ |
318 | #define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */ | 318 | #define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */ |
319 | 319 | ||
320 | /* | 320 | /* |
321 | * If set, a uevent was generated for which the caller may need to wait. | 321 | * If set, a uevent was generated for which the caller may need to wait. |
322 | */ | 322 | */ |
323 | #define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */ | 323 | #define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */ |
324 | |||
325 | /* | ||
326 | * If set, rename changes the uuid not the name. Only permitted | ||
327 | * if no uuid was previously supplied: an existing uuid cannot be changed. | ||
328 | */ | ||
329 | #define DM_UUID_FLAG (1 << 14) /* In */ | ||
324 | 330 | ||
325 | #endif /* _LINUX_DM_IOCTL_H */ | 331 | #endif /* _LINUX_DM_IOCTL_H */ |
326 | 332 |