Commit 759dea204cce9f1fc2a5d00ea25211299fc7a4a0
Committed by
Alasdair G Kergon
1 parent
ba2e19b0f4
Exists in
master
and in
6 other branches
dm ioctl: forbid multiple device specifiers
Exactly one of name, uuid or device must be specified when referencing an existing device. This removes the ambiguity (risking the wrong device being updated) if two conflicting parameters were specified. Previously one parameter got used and any others were ignored silently. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Showing 1 changed file with 6 additions and 0 deletions Inline Diff
drivers/md/dm-ioctl.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. | 2 | * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. |
3 | * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "dm.h" | 8 | #include "dm.h" |
9 | 9 | ||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/vmalloc.h> | 11 | #include <linux/vmalloc.h> |
12 | #include <linux/miscdevice.h> | 12 | #include <linux/miscdevice.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/wait.h> | 14 | #include <linux/wait.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/dm-ioctl.h> | 16 | #include <linux/dm-ioctl.h> |
17 | #include <linux/hdreg.h> | 17 | #include <linux/hdreg.h> |
18 | #include <linux/compat.h> | 18 | #include <linux/compat.h> |
19 | 19 | ||
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | 21 | ||
22 | #define DM_MSG_PREFIX "ioctl" | 22 | #define DM_MSG_PREFIX "ioctl" |
23 | #define DM_DRIVER_EMAIL "dm-devel@redhat.com" | 23 | #define DM_DRIVER_EMAIL "dm-devel@redhat.com" |
24 | 24 | ||
25 | /*----------------------------------------------------------------- | 25 | /*----------------------------------------------------------------- |
26 | * The ioctl interface needs to be able to look up devices by | 26 | * The ioctl interface needs to be able to look up devices by |
27 | * name or uuid. | 27 | * name or uuid. |
28 | *---------------------------------------------------------------*/ | 28 | *---------------------------------------------------------------*/ |
29 | struct hash_cell { | 29 | struct hash_cell { |
30 | struct list_head name_list; | 30 | struct list_head name_list; |
31 | struct list_head uuid_list; | 31 | struct list_head uuid_list; |
32 | 32 | ||
33 | char *name; | 33 | char *name; |
34 | char *uuid; | 34 | char *uuid; |
35 | struct mapped_device *md; | 35 | struct mapped_device *md; |
36 | struct dm_table *new_map; | 36 | struct dm_table *new_map; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct vers_iter { | 39 | struct vers_iter { |
40 | size_t param_size; | 40 | size_t param_size; |
41 | struct dm_target_versions *vers, *old_vers; | 41 | struct dm_target_versions *vers, *old_vers; |
42 | char *end; | 42 | char *end; |
43 | uint32_t flags; | 43 | uint32_t flags; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | 46 | ||
47 | #define NUM_BUCKETS 64 | 47 | #define NUM_BUCKETS 64 |
48 | #define MASK_BUCKETS (NUM_BUCKETS - 1) | 48 | #define MASK_BUCKETS (NUM_BUCKETS - 1) |
49 | static struct list_head _name_buckets[NUM_BUCKETS]; | 49 | static struct list_head _name_buckets[NUM_BUCKETS]; |
50 | static struct list_head _uuid_buckets[NUM_BUCKETS]; | 50 | static struct list_head _uuid_buckets[NUM_BUCKETS]; |
51 | 51 | ||
52 | static void dm_hash_remove_all(int keep_open_devices); | 52 | static void dm_hash_remove_all(int keep_open_devices); |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Guards access to both hash tables. | 55 | * Guards access to both hash tables. |
56 | */ | 56 | */ |
57 | static DECLARE_RWSEM(_hash_lock); | 57 | static DECLARE_RWSEM(_hash_lock); |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * Protects use of mdptr to obtain hash cell name and uuid from mapped device. | 60 | * Protects use of mdptr to obtain hash cell name and uuid from mapped device. |
61 | */ | 61 | */ |
62 | static DEFINE_MUTEX(dm_hash_cells_mutex); | 62 | static DEFINE_MUTEX(dm_hash_cells_mutex); |
63 | 63 | ||
64 | static void init_buckets(struct list_head *buckets) | 64 | static void init_buckets(struct list_head *buckets) |
65 | { | 65 | { |
66 | unsigned int i; | 66 | unsigned int i; |
67 | 67 | ||
68 | for (i = 0; i < NUM_BUCKETS; i++) | 68 | for (i = 0; i < NUM_BUCKETS; i++) |
69 | INIT_LIST_HEAD(buckets + i); | 69 | INIT_LIST_HEAD(buckets + i); |
70 | } | 70 | } |
71 | 71 | ||
72 | static int dm_hash_init(void) | 72 | static int dm_hash_init(void) |
73 | { | 73 | { |
74 | init_buckets(_name_buckets); | 74 | init_buckets(_name_buckets); |
75 | init_buckets(_uuid_buckets); | 75 | init_buckets(_uuid_buckets); |
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
78 | 78 | ||
79 | static void dm_hash_exit(void) | 79 | static void dm_hash_exit(void) |
80 | { | 80 | { |
81 | dm_hash_remove_all(0); | 81 | dm_hash_remove_all(0); |
82 | } | 82 | } |
83 | 83 | ||
84 | /*----------------------------------------------------------------- | 84 | /*----------------------------------------------------------------- |
85 | * Hash function: | 85 | * Hash function: |
86 | * We're not really concerned with the str hash function being | 86 | * We're not really concerned with the str hash function being |
87 | * fast since it's only used by the ioctl interface. | 87 | * fast since it's only used by the ioctl interface. |
88 | *---------------------------------------------------------------*/ | 88 | *---------------------------------------------------------------*/ |
89 | static unsigned int hash_str(const char *str) | 89 | static unsigned int hash_str(const char *str) |
90 | { | 90 | { |
91 | const unsigned int hash_mult = 2654435387U; | 91 | const unsigned int hash_mult = 2654435387U; |
92 | unsigned int h = 0; | 92 | unsigned int h = 0; |
93 | 93 | ||
94 | while (*str) | 94 | while (*str) |
95 | h = (h + (unsigned int) *str++) * hash_mult; | 95 | h = (h + (unsigned int) *str++) * hash_mult; |
96 | 96 | ||
97 | return h & MASK_BUCKETS; | 97 | return h & MASK_BUCKETS; |
98 | } | 98 | } |
99 | 99 | ||
100 | /*----------------------------------------------------------------- | 100 | /*----------------------------------------------------------------- |
101 | * Code for looking up a device by name | 101 | * Code for looking up a device by name |
102 | *---------------------------------------------------------------*/ | 102 | *---------------------------------------------------------------*/ |
103 | static struct hash_cell *__get_name_cell(const char *str) | 103 | static struct hash_cell *__get_name_cell(const char *str) |
104 | { | 104 | { |
105 | struct hash_cell *hc; | 105 | struct hash_cell *hc; |
106 | unsigned int h = hash_str(str); | 106 | unsigned int h = hash_str(str); |
107 | 107 | ||
108 | list_for_each_entry (hc, _name_buckets + h, name_list) | 108 | list_for_each_entry (hc, _name_buckets + h, name_list) |
109 | if (!strcmp(hc->name, str)) { | 109 | if (!strcmp(hc->name, str)) { |
110 | dm_get(hc->md); | 110 | dm_get(hc->md); |
111 | return hc; | 111 | return hc; |
112 | } | 112 | } |
113 | 113 | ||
114 | return NULL; | 114 | return NULL; |
115 | } | 115 | } |
116 | 116 | ||
117 | static struct hash_cell *__get_uuid_cell(const char *str) | 117 | static struct hash_cell *__get_uuid_cell(const char *str) |
118 | { | 118 | { |
119 | struct hash_cell *hc; | 119 | struct hash_cell *hc; |
120 | unsigned int h = hash_str(str); | 120 | unsigned int h = hash_str(str); |
121 | 121 | ||
122 | list_for_each_entry (hc, _uuid_buckets + h, uuid_list) | 122 | list_for_each_entry (hc, _uuid_buckets + h, uuid_list) |
123 | if (!strcmp(hc->uuid, str)) { | 123 | if (!strcmp(hc->uuid, str)) { |
124 | dm_get(hc->md); | 124 | dm_get(hc->md); |
125 | return hc; | 125 | return hc; |
126 | } | 126 | } |
127 | 127 | ||
128 | return NULL; | 128 | return NULL; |
129 | } | 129 | } |
130 | 130 | ||
131 | static struct hash_cell *__get_dev_cell(uint64_t dev) | 131 | static struct hash_cell *__get_dev_cell(uint64_t dev) |
132 | { | 132 | { |
133 | struct mapped_device *md; | 133 | struct mapped_device *md; |
134 | struct hash_cell *hc; | 134 | struct hash_cell *hc; |
135 | 135 | ||
136 | md = dm_get_md(huge_decode_dev(dev)); | 136 | md = dm_get_md(huge_decode_dev(dev)); |
137 | if (!md) | 137 | if (!md) |
138 | return NULL; | 138 | return NULL; |
139 | 139 | ||
140 | hc = dm_get_mdptr(md); | 140 | hc = dm_get_mdptr(md); |
141 | if (!hc) { | 141 | if (!hc) { |
142 | dm_put(md); | 142 | dm_put(md); |
143 | return NULL; | 143 | return NULL; |
144 | } | 144 | } |
145 | 145 | ||
146 | return hc; | 146 | return hc; |
147 | } | 147 | } |
148 | 148 | ||
149 | /*----------------------------------------------------------------- | 149 | /*----------------------------------------------------------------- |
150 | * Inserting, removing and renaming a device. | 150 | * Inserting, removing and renaming a device. |
151 | *---------------------------------------------------------------*/ | 151 | *---------------------------------------------------------------*/ |
152 | static struct hash_cell *alloc_cell(const char *name, const char *uuid, | 152 | static struct hash_cell *alloc_cell(const char *name, const char *uuid, |
153 | struct mapped_device *md) | 153 | struct mapped_device *md) |
154 | { | 154 | { |
155 | struct hash_cell *hc; | 155 | struct hash_cell *hc; |
156 | 156 | ||
157 | hc = kmalloc(sizeof(*hc), GFP_KERNEL); | 157 | hc = kmalloc(sizeof(*hc), GFP_KERNEL); |
158 | if (!hc) | 158 | if (!hc) |
159 | return NULL; | 159 | return NULL; |
160 | 160 | ||
161 | hc->name = kstrdup(name, GFP_KERNEL); | 161 | hc->name = kstrdup(name, GFP_KERNEL); |
162 | if (!hc->name) { | 162 | if (!hc->name) { |
163 | kfree(hc); | 163 | kfree(hc); |
164 | return NULL; | 164 | return NULL; |
165 | } | 165 | } |
166 | 166 | ||
167 | if (!uuid) | 167 | if (!uuid) |
168 | hc->uuid = NULL; | 168 | hc->uuid = NULL; |
169 | 169 | ||
170 | else { | 170 | else { |
171 | hc->uuid = kstrdup(uuid, GFP_KERNEL); | 171 | hc->uuid = kstrdup(uuid, GFP_KERNEL); |
172 | if (!hc->uuid) { | 172 | if (!hc->uuid) { |
173 | kfree(hc->name); | 173 | kfree(hc->name); |
174 | kfree(hc); | 174 | kfree(hc); |
175 | return NULL; | 175 | return NULL; |
176 | } | 176 | } |
177 | } | 177 | } |
178 | 178 | ||
179 | INIT_LIST_HEAD(&hc->name_list); | 179 | INIT_LIST_HEAD(&hc->name_list); |
180 | INIT_LIST_HEAD(&hc->uuid_list); | 180 | INIT_LIST_HEAD(&hc->uuid_list); |
181 | hc->md = md; | 181 | hc->md = md; |
182 | hc->new_map = NULL; | 182 | hc->new_map = NULL; |
183 | return hc; | 183 | return hc; |
184 | } | 184 | } |
185 | 185 | ||
186 | static void free_cell(struct hash_cell *hc) | 186 | static void free_cell(struct hash_cell *hc) |
187 | { | 187 | { |
188 | if (hc) { | 188 | if (hc) { |
189 | kfree(hc->name); | 189 | kfree(hc->name); |
190 | kfree(hc->uuid); | 190 | kfree(hc->uuid); |
191 | kfree(hc); | 191 | kfree(hc); |
192 | } | 192 | } |
193 | } | 193 | } |
194 | 194 | ||
195 | /* | 195 | /* |
196 | * The kdev_t and uuid of a device can never change once it is | 196 | * The kdev_t and uuid of a device can never change once it is |
197 | * initially inserted. | 197 | * initially inserted. |
198 | */ | 198 | */ |
199 | static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) | 199 | static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) |
200 | { | 200 | { |
201 | struct hash_cell *cell, *hc; | 201 | struct hash_cell *cell, *hc; |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Allocate the new cells. | 204 | * Allocate the new cells. |
205 | */ | 205 | */ |
206 | cell = alloc_cell(name, uuid, md); | 206 | cell = alloc_cell(name, uuid, md); |
207 | if (!cell) | 207 | if (!cell) |
208 | return -ENOMEM; | 208 | return -ENOMEM; |
209 | 209 | ||
210 | /* | 210 | /* |
211 | * Insert the cell into both hash tables. | 211 | * Insert the cell into both hash tables. |
212 | */ | 212 | */ |
213 | down_write(&_hash_lock); | 213 | down_write(&_hash_lock); |
214 | hc = __get_name_cell(name); | 214 | hc = __get_name_cell(name); |
215 | if (hc) { | 215 | if (hc) { |
216 | dm_put(hc->md); | 216 | dm_put(hc->md); |
217 | goto bad; | 217 | goto bad; |
218 | } | 218 | } |
219 | 219 | ||
220 | list_add(&cell->name_list, _name_buckets + hash_str(name)); | 220 | list_add(&cell->name_list, _name_buckets + hash_str(name)); |
221 | 221 | ||
222 | if (uuid) { | 222 | if (uuid) { |
223 | hc = __get_uuid_cell(uuid); | 223 | hc = __get_uuid_cell(uuid); |
224 | if (hc) { | 224 | if (hc) { |
225 | list_del(&cell->name_list); | 225 | list_del(&cell->name_list); |
226 | dm_put(hc->md); | 226 | dm_put(hc->md); |
227 | goto bad; | 227 | goto bad; |
228 | } | 228 | } |
229 | list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); | 229 | list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); |
230 | } | 230 | } |
231 | dm_get(md); | 231 | dm_get(md); |
232 | mutex_lock(&dm_hash_cells_mutex); | 232 | mutex_lock(&dm_hash_cells_mutex); |
233 | dm_set_mdptr(md, cell); | 233 | dm_set_mdptr(md, cell); |
234 | mutex_unlock(&dm_hash_cells_mutex); | 234 | mutex_unlock(&dm_hash_cells_mutex); |
235 | up_write(&_hash_lock); | 235 | up_write(&_hash_lock); |
236 | 236 | ||
237 | return 0; | 237 | return 0; |
238 | 238 | ||
239 | bad: | 239 | bad: |
240 | up_write(&_hash_lock); | 240 | up_write(&_hash_lock); |
241 | free_cell(cell); | 241 | free_cell(cell); |
242 | return -EBUSY; | 242 | return -EBUSY; |
243 | } | 243 | } |
244 | 244 | ||
245 | static void __hash_remove(struct hash_cell *hc) | 245 | static void __hash_remove(struct hash_cell *hc) |
246 | { | 246 | { |
247 | struct dm_table *table; | 247 | struct dm_table *table; |
248 | 248 | ||
249 | /* remove from the dev hash */ | 249 | /* remove from the dev hash */ |
250 | list_del(&hc->uuid_list); | 250 | list_del(&hc->uuid_list); |
251 | list_del(&hc->name_list); | 251 | list_del(&hc->name_list); |
252 | mutex_lock(&dm_hash_cells_mutex); | 252 | mutex_lock(&dm_hash_cells_mutex); |
253 | dm_set_mdptr(hc->md, NULL); | 253 | dm_set_mdptr(hc->md, NULL); |
254 | mutex_unlock(&dm_hash_cells_mutex); | 254 | mutex_unlock(&dm_hash_cells_mutex); |
255 | 255 | ||
256 | table = dm_get_live_table(hc->md); | 256 | table = dm_get_live_table(hc->md); |
257 | if (table) { | 257 | if (table) { |
258 | dm_table_event(table); | 258 | dm_table_event(table); |
259 | dm_table_put(table); | 259 | dm_table_put(table); |
260 | } | 260 | } |
261 | 261 | ||
262 | if (hc->new_map) | 262 | if (hc->new_map) |
263 | dm_table_destroy(hc->new_map); | 263 | dm_table_destroy(hc->new_map); |
264 | dm_put(hc->md); | 264 | dm_put(hc->md); |
265 | free_cell(hc); | 265 | free_cell(hc); |
266 | } | 266 | } |
267 | 267 | ||
268 | static void dm_hash_remove_all(int keep_open_devices) | 268 | static void dm_hash_remove_all(int keep_open_devices) |
269 | { | 269 | { |
270 | int i, dev_skipped; | 270 | int i, dev_skipped; |
271 | struct hash_cell *hc; | 271 | struct hash_cell *hc; |
272 | struct mapped_device *md; | 272 | struct mapped_device *md; |
273 | 273 | ||
274 | retry: | 274 | retry: |
275 | dev_skipped = 0; | 275 | dev_skipped = 0; |
276 | 276 | ||
277 | down_write(&_hash_lock); | 277 | down_write(&_hash_lock); |
278 | 278 | ||
279 | for (i = 0; i < NUM_BUCKETS; i++) { | 279 | for (i = 0; i < NUM_BUCKETS; i++) { |
280 | list_for_each_entry(hc, _name_buckets + i, name_list) { | 280 | list_for_each_entry(hc, _name_buckets + i, name_list) { |
281 | md = hc->md; | 281 | md = hc->md; |
282 | dm_get(md); | 282 | dm_get(md); |
283 | 283 | ||
284 | if (keep_open_devices && dm_lock_for_deletion(md)) { | 284 | if (keep_open_devices && dm_lock_for_deletion(md)) { |
285 | dm_put(md); | 285 | dm_put(md); |
286 | dev_skipped++; | 286 | dev_skipped++; |
287 | continue; | 287 | continue; |
288 | } | 288 | } |
289 | 289 | ||
290 | __hash_remove(hc); | 290 | __hash_remove(hc); |
291 | 291 | ||
292 | up_write(&_hash_lock); | 292 | up_write(&_hash_lock); |
293 | 293 | ||
294 | dm_put(md); | 294 | dm_put(md); |
295 | if (likely(keep_open_devices)) | 295 | if (likely(keep_open_devices)) |
296 | dm_destroy(md); | 296 | dm_destroy(md); |
297 | else | 297 | else |
298 | dm_destroy_immediate(md); | 298 | dm_destroy_immediate(md); |
299 | 299 | ||
300 | /* | 300 | /* |
301 | * Some mapped devices may be using other mapped | 301 | * Some mapped devices may be using other mapped |
302 | * devices, so repeat until we make no further | 302 | * devices, so repeat until we make no further |
303 | * progress. If a new mapped device is created | 303 | * progress. If a new mapped device is created |
304 | * here it will also get removed. | 304 | * here it will also get removed. |
305 | */ | 305 | */ |
306 | goto retry; | 306 | goto retry; |
307 | } | 307 | } |
308 | } | 308 | } |
309 | 309 | ||
310 | up_write(&_hash_lock); | 310 | up_write(&_hash_lock); |
311 | 311 | ||
312 | if (dev_skipped) | 312 | if (dev_skipped) |
313 | DMWARN("remove_all left %d open device(s)", dev_skipped); | 313 | DMWARN("remove_all left %d open device(s)", dev_skipped); |
314 | } | 314 | } |
315 | 315 | ||
316 | /* | 316 | /* |
317 | * Set the uuid of a hash_cell that isn't already set. | 317 | * Set the uuid of a hash_cell that isn't already set. |
318 | */ | 318 | */ |
319 | static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid) | 319 | static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid) |
320 | { | 320 | { |
321 | mutex_lock(&dm_hash_cells_mutex); | 321 | mutex_lock(&dm_hash_cells_mutex); |
322 | hc->uuid = new_uuid; | 322 | hc->uuid = new_uuid; |
323 | mutex_unlock(&dm_hash_cells_mutex); | 323 | mutex_unlock(&dm_hash_cells_mutex); |
324 | 324 | ||
325 | list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid)); | 325 | list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid)); |
326 | } | 326 | } |
327 | 327 | ||
328 | /* | 328 | /* |
329 | * Changes the name of a hash_cell and returns the old name for | 329 | * Changes the name of a hash_cell and returns the old name for |
330 | * the caller to free. | 330 | * the caller to free. |
331 | */ | 331 | */ |
332 | static char *__change_cell_name(struct hash_cell *hc, char *new_name) | 332 | static char *__change_cell_name(struct hash_cell *hc, char *new_name) |
333 | { | 333 | { |
334 | char *old_name; | 334 | char *old_name; |
335 | 335 | ||
336 | /* | 336 | /* |
337 | * Rename and move the name cell. | 337 | * Rename and move the name cell. |
338 | */ | 338 | */ |
339 | list_del(&hc->name_list); | 339 | list_del(&hc->name_list); |
340 | old_name = hc->name; | 340 | old_name = hc->name; |
341 | 341 | ||
342 | mutex_lock(&dm_hash_cells_mutex); | 342 | mutex_lock(&dm_hash_cells_mutex); |
343 | hc->name = new_name; | 343 | hc->name = new_name; |
344 | mutex_unlock(&dm_hash_cells_mutex); | 344 | mutex_unlock(&dm_hash_cells_mutex); |
345 | 345 | ||
346 | list_add(&hc->name_list, _name_buckets + hash_str(new_name)); | 346 | list_add(&hc->name_list, _name_buckets + hash_str(new_name)); |
347 | 347 | ||
348 | return old_name; | 348 | return old_name; |
349 | } | 349 | } |
350 | 350 | ||
351 | static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, | 351 | static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, |
352 | const char *new) | 352 | const char *new) |
353 | { | 353 | { |
354 | char *new_data, *old_name = NULL; | 354 | char *new_data, *old_name = NULL; |
355 | struct hash_cell *hc; | 355 | struct hash_cell *hc; |
356 | struct dm_table *table; | 356 | struct dm_table *table; |
357 | struct mapped_device *md; | 357 | struct mapped_device *md; |
358 | unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; | 358 | unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; |
359 | 359 | ||
360 | /* | 360 | /* |
361 | * duplicate new. | 361 | * duplicate new. |
362 | */ | 362 | */ |
363 | new_data = kstrdup(new, GFP_KERNEL); | 363 | new_data = kstrdup(new, GFP_KERNEL); |
364 | if (!new_data) | 364 | if (!new_data) |
365 | return ERR_PTR(-ENOMEM); | 365 | return ERR_PTR(-ENOMEM); |
366 | 366 | ||
367 | down_write(&_hash_lock); | 367 | down_write(&_hash_lock); |
368 | 368 | ||
369 | /* | 369 | /* |
370 | * Is new free ? | 370 | * Is new free ? |
371 | */ | 371 | */ |
372 | if (change_uuid) | 372 | if (change_uuid) |
373 | hc = __get_uuid_cell(new); | 373 | hc = __get_uuid_cell(new); |
374 | else | 374 | else |
375 | hc = __get_name_cell(new); | 375 | hc = __get_name_cell(new); |
376 | 376 | ||
377 | if (hc) { | 377 | if (hc) { |
378 | DMWARN("Unable to change %s on mapped device %s to one that " | 378 | DMWARN("Unable to change %s on mapped device %s to one that " |
379 | "already exists: %s", | 379 | "already exists: %s", |
380 | change_uuid ? "uuid" : "name", | 380 | change_uuid ? "uuid" : "name", |
381 | param->name, new); | 381 | param->name, new); |
382 | dm_put(hc->md); | 382 | dm_put(hc->md); |
383 | up_write(&_hash_lock); | 383 | up_write(&_hash_lock); |
384 | kfree(new_data); | 384 | kfree(new_data); |
385 | return ERR_PTR(-EBUSY); | 385 | return ERR_PTR(-EBUSY); |
386 | } | 386 | } |
387 | 387 | ||
388 | /* | 388 | /* |
389 | * Is there such a device as 'old' ? | 389 | * Is there such a device as 'old' ? |
390 | */ | 390 | */ |
391 | hc = __get_name_cell(param->name); | 391 | hc = __get_name_cell(param->name); |
392 | if (!hc) { | 392 | if (!hc) { |
393 | DMWARN("Unable to rename non-existent device, %s to %s%s", | 393 | DMWARN("Unable to rename non-existent device, %s to %s%s", |
394 | param->name, change_uuid ? "uuid " : "", new); | 394 | param->name, change_uuid ? "uuid " : "", new); |
395 | up_write(&_hash_lock); | 395 | up_write(&_hash_lock); |
396 | kfree(new_data); | 396 | kfree(new_data); |
397 | return ERR_PTR(-ENXIO); | 397 | return ERR_PTR(-ENXIO); |
398 | } | 398 | } |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * Does this device already have a uuid? | 401 | * Does this device already have a uuid? |
402 | */ | 402 | */ |
403 | if (change_uuid && hc->uuid) { | 403 | if (change_uuid && hc->uuid) { |
404 | DMWARN("Unable to change uuid of mapped device %s to %s " | 404 | DMWARN("Unable to change uuid of mapped device %s to %s " |
405 | "because uuid is already set to %s", | 405 | "because uuid is already set to %s", |
406 | param->name, new, hc->uuid); | 406 | param->name, new, hc->uuid); |
407 | dm_put(hc->md); | 407 | dm_put(hc->md); |
408 | up_write(&_hash_lock); | 408 | up_write(&_hash_lock); |
409 | kfree(new_data); | 409 | kfree(new_data); |
410 | return ERR_PTR(-EINVAL); | 410 | return ERR_PTR(-EINVAL); |
411 | } | 411 | } |
412 | 412 | ||
413 | if (change_uuid) | 413 | if (change_uuid) |
414 | __set_cell_uuid(hc, new_data); | 414 | __set_cell_uuid(hc, new_data); |
415 | else | 415 | else |
416 | old_name = __change_cell_name(hc, new_data); | 416 | old_name = __change_cell_name(hc, new_data); |
417 | 417 | ||
418 | /* | 418 | /* |
419 | * Wake up any dm event waiters. | 419 | * Wake up any dm event waiters. |
420 | */ | 420 | */ |
421 | table = dm_get_live_table(hc->md); | 421 | table = dm_get_live_table(hc->md); |
422 | if (table) { | 422 | if (table) { |
423 | dm_table_event(table); | 423 | dm_table_event(table); |
424 | dm_table_put(table); | 424 | dm_table_put(table); |
425 | } | 425 | } |
426 | 426 | ||
427 | if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) | 427 | if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) |
428 | param->flags |= DM_UEVENT_GENERATED_FLAG; | 428 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
429 | 429 | ||
430 | md = hc->md; | 430 | md = hc->md; |
431 | up_write(&_hash_lock); | 431 | up_write(&_hash_lock); |
432 | kfree(old_name); | 432 | kfree(old_name); |
433 | 433 | ||
434 | return md; | 434 | return md; |
435 | } | 435 | } |
436 | 436 | ||
437 | /*----------------------------------------------------------------- | 437 | /*----------------------------------------------------------------- |
438 | * Implementation of the ioctl commands | 438 | * Implementation of the ioctl commands |
439 | *---------------------------------------------------------------*/ | 439 | *---------------------------------------------------------------*/ |
440 | /* | 440 | /* |
441 | * All the ioctl commands get dispatched to functions with this | 441 | * All the ioctl commands get dispatched to functions with this |
442 | * prototype. | 442 | * prototype. |
443 | */ | 443 | */ |
444 | typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size); | 444 | typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size); |
445 | 445 | ||
446 | static int remove_all(struct dm_ioctl *param, size_t param_size) | 446 | static int remove_all(struct dm_ioctl *param, size_t param_size) |
447 | { | 447 | { |
448 | dm_hash_remove_all(1); | 448 | dm_hash_remove_all(1); |
449 | param->data_size = 0; | 449 | param->data_size = 0; |
450 | return 0; | 450 | return 0; |
451 | } | 451 | } |
452 | 452 | ||
453 | /* | 453 | /* |
454 | * Round up the ptr to an 8-byte boundary. | 454 | * Round up the ptr to an 8-byte boundary. |
455 | */ | 455 | */ |
456 | #define ALIGN_MASK 7 | 456 | #define ALIGN_MASK 7 |
457 | static inline void *align_ptr(void *ptr) | 457 | static inline void *align_ptr(void *ptr) |
458 | { | 458 | { |
459 | return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); | 459 | return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); |
460 | } | 460 | } |
461 | 461 | ||
462 | /* | 462 | /* |
463 | * Retrieves the data payload buffer from an already allocated | 463 | * Retrieves the data payload buffer from an already allocated |
464 | * struct dm_ioctl. | 464 | * struct dm_ioctl. |
465 | */ | 465 | */ |
466 | static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, | 466 | static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, |
467 | size_t *len) | 467 | size_t *len) |
468 | { | 468 | { |
469 | param->data_start = align_ptr(param + 1) - (void *) param; | 469 | param->data_start = align_ptr(param + 1) - (void *) param; |
470 | 470 | ||
471 | if (param->data_start < param_size) | 471 | if (param->data_start < param_size) |
472 | *len = param_size - param->data_start; | 472 | *len = param_size - param->data_start; |
473 | else | 473 | else |
474 | *len = 0; | 474 | *len = 0; |
475 | 475 | ||
476 | return ((void *) param) + param->data_start; | 476 | return ((void *) param) + param->data_start; |
477 | } | 477 | } |
478 | 478 | ||
479 | static int list_devices(struct dm_ioctl *param, size_t param_size) | 479 | static int list_devices(struct dm_ioctl *param, size_t param_size) |
480 | { | 480 | { |
481 | unsigned int i; | 481 | unsigned int i; |
482 | struct hash_cell *hc; | 482 | struct hash_cell *hc; |
483 | size_t len, needed = 0; | 483 | size_t len, needed = 0; |
484 | struct gendisk *disk; | 484 | struct gendisk *disk; |
485 | struct dm_name_list *nl, *old_nl = NULL; | 485 | struct dm_name_list *nl, *old_nl = NULL; |
486 | 486 | ||
487 | down_write(&_hash_lock); | 487 | down_write(&_hash_lock); |
488 | 488 | ||
489 | /* | 489 | /* |
490 | * Loop through all the devices working out how much | 490 | * Loop through all the devices working out how much |
491 | * space we need. | 491 | * space we need. |
492 | */ | 492 | */ |
493 | for (i = 0; i < NUM_BUCKETS; i++) { | 493 | for (i = 0; i < NUM_BUCKETS; i++) { |
494 | list_for_each_entry (hc, _name_buckets + i, name_list) { | 494 | list_for_each_entry (hc, _name_buckets + i, name_list) { |
495 | needed += sizeof(struct dm_name_list); | 495 | needed += sizeof(struct dm_name_list); |
496 | needed += strlen(hc->name) + 1; | 496 | needed += strlen(hc->name) + 1; |
497 | needed += ALIGN_MASK; | 497 | needed += ALIGN_MASK; |
498 | } | 498 | } |
499 | } | 499 | } |
500 | 500 | ||
501 | /* | 501 | /* |
502 | * Grab our output buffer. | 502 | * Grab our output buffer. |
503 | */ | 503 | */ |
504 | nl = get_result_buffer(param, param_size, &len); | 504 | nl = get_result_buffer(param, param_size, &len); |
505 | if (len < needed) { | 505 | if (len < needed) { |
506 | param->flags |= DM_BUFFER_FULL_FLAG; | 506 | param->flags |= DM_BUFFER_FULL_FLAG; |
507 | goto out; | 507 | goto out; |
508 | } | 508 | } |
509 | param->data_size = param->data_start + needed; | 509 | param->data_size = param->data_start + needed; |
510 | 510 | ||
511 | nl->dev = 0; /* Flags no data */ | 511 | nl->dev = 0; /* Flags no data */ |
512 | 512 | ||
513 | /* | 513 | /* |
514 | * Now loop through filling out the names. | 514 | * Now loop through filling out the names. |
515 | */ | 515 | */ |
516 | for (i = 0; i < NUM_BUCKETS; i++) { | 516 | for (i = 0; i < NUM_BUCKETS; i++) { |
517 | list_for_each_entry (hc, _name_buckets + i, name_list) { | 517 | list_for_each_entry (hc, _name_buckets + i, name_list) { |
518 | if (old_nl) | 518 | if (old_nl) |
519 | old_nl->next = (uint32_t) ((void *) nl - | 519 | old_nl->next = (uint32_t) ((void *) nl - |
520 | (void *) old_nl); | 520 | (void *) old_nl); |
521 | disk = dm_disk(hc->md); | 521 | disk = dm_disk(hc->md); |
522 | nl->dev = huge_encode_dev(disk_devt(disk)); | 522 | nl->dev = huge_encode_dev(disk_devt(disk)); |
523 | nl->next = 0; | 523 | nl->next = 0; |
524 | strcpy(nl->name, hc->name); | 524 | strcpy(nl->name, hc->name); |
525 | 525 | ||
526 | old_nl = nl; | 526 | old_nl = nl; |
527 | nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1); | 527 | nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1); |
528 | } | 528 | } |
529 | } | 529 | } |
530 | 530 | ||
531 | out: | 531 | out: |
532 | up_write(&_hash_lock); | 532 | up_write(&_hash_lock); |
533 | return 0; | 533 | return 0; |
534 | } | 534 | } |
535 | 535 | ||
536 | static void list_version_get_needed(struct target_type *tt, void *needed_param) | 536 | static void list_version_get_needed(struct target_type *tt, void *needed_param) |
537 | { | 537 | { |
538 | size_t *needed = needed_param; | 538 | size_t *needed = needed_param; |
539 | 539 | ||
540 | *needed += sizeof(struct dm_target_versions); | 540 | *needed += sizeof(struct dm_target_versions); |
541 | *needed += strlen(tt->name); | 541 | *needed += strlen(tt->name); |
542 | *needed += ALIGN_MASK; | 542 | *needed += ALIGN_MASK; |
543 | } | 543 | } |
544 | 544 | ||
545 | static void list_version_get_info(struct target_type *tt, void *param) | 545 | static void list_version_get_info(struct target_type *tt, void *param) |
546 | { | 546 | { |
547 | struct vers_iter *info = param; | 547 | struct vers_iter *info = param; |
548 | 548 | ||
549 | /* Check space - it might have changed since the first iteration */ | 549 | /* Check space - it might have changed since the first iteration */ |
550 | if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > | 550 | if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > |
551 | info->end) { | 551 | info->end) { |
552 | 552 | ||
553 | info->flags = DM_BUFFER_FULL_FLAG; | 553 | info->flags = DM_BUFFER_FULL_FLAG; |
554 | return; | 554 | return; |
555 | } | 555 | } |
556 | 556 | ||
557 | if (info->old_vers) | 557 | if (info->old_vers) |
558 | info->old_vers->next = (uint32_t) ((void *)info->vers - | 558 | info->old_vers->next = (uint32_t) ((void *)info->vers - |
559 | (void *)info->old_vers); | 559 | (void *)info->old_vers); |
560 | info->vers->version[0] = tt->version[0]; | 560 | info->vers->version[0] = tt->version[0]; |
561 | info->vers->version[1] = tt->version[1]; | 561 | info->vers->version[1] = tt->version[1]; |
562 | info->vers->version[2] = tt->version[2]; | 562 | info->vers->version[2] = tt->version[2]; |
563 | info->vers->next = 0; | 563 | info->vers->next = 0; |
564 | strcpy(info->vers->name, tt->name); | 564 | strcpy(info->vers->name, tt->name); |
565 | 565 | ||
566 | info->old_vers = info->vers; | 566 | info->old_vers = info->vers; |
567 | info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); | 567 | info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); |
568 | } | 568 | } |
569 | 569 | ||
570 | static int list_versions(struct dm_ioctl *param, size_t param_size) | 570 | static int list_versions(struct dm_ioctl *param, size_t param_size) |
571 | { | 571 | { |
572 | size_t len, needed = 0; | 572 | size_t len, needed = 0; |
573 | struct dm_target_versions *vers; | 573 | struct dm_target_versions *vers; |
574 | struct vers_iter iter_info; | 574 | struct vers_iter iter_info; |
575 | 575 | ||
576 | /* | 576 | /* |
577 | * Loop through all the devices working out how much | 577 | * Loop through all the devices working out how much |
578 | * space we need. | 578 | * space we need. |
579 | */ | 579 | */ |
580 | dm_target_iterate(list_version_get_needed, &needed); | 580 | dm_target_iterate(list_version_get_needed, &needed); |
581 | 581 | ||
582 | /* | 582 | /* |
583 | * Grab our output buffer. | 583 | * Grab our output buffer. |
584 | */ | 584 | */ |
585 | vers = get_result_buffer(param, param_size, &len); | 585 | vers = get_result_buffer(param, param_size, &len); |
586 | if (len < needed) { | 586 | if (len < needed) { |
587 | param->flags |= DM_BUFFER_FULL_FLAG; | 587 | param->flags |= DM_BUFFER_FULL_FLAG; |
588 | goto out; | 588 | goto out; |
589 | } | 589 | } |
590 | param->data_size = param->data_start + needed; | 590 | param->data_size = param->data_start + needed; |
591 | 591 | ||
592 | iter_info.param_size = param_size; | 592 | iter_info.param_size = param_size; |
593 | iter_info.old_vers = NULL; | 593 | iter_info.old_vers = NULL; |
594 | iter_info.vers = vers; | 594 | iter_info.vers = vers; |
595 | iter_info.flags = 0; | 595 | iter_info.flags = 0; |
596 | iter_info.end = (char *)vers+len; | 596 | iter_info.end = (char *)vers+len; |
597 | 597 | ||
598 | /* | 598 | /* |
599 | * Now loop through filling out the names & versions. | 599 | * Now loop through filling out the names & versions. |
600 | */ | 600 | */ |
601 | dm_target_iterate(list_version_get_info, &iter_info); | 601 | dm_target_iterate(list_version_get_info, &iter_info); |
602 | param->flags |= iter_info.flags; | 602 | param->flags |= iter_info.flags; |
603 | 603 | ||
604 | out: | 604 | out: |
605 | return 0; | 605 | return 0; |
606 | } | 606 | } |
607 | 607 | ||
608 | static int check_name(const char *name) | 608 | static int check_name(const char *name) |
609 | { | 609 | { |
610 | if (strchr(name, '/')) { | 610 | if (strchr(name, '/')) { |
611 | DMWARN("invalid device name"); | 611 | DMWARN("invalid device name"); |
612 | return -EINVAL; | 612 | return -EINVAL; |
613 | } | 613 | } |
614 | 614 | ||
615 | return 0; | 615 | return 0; |
616 | } | 616 | } |
617 | 617 | ||
618 | /* | 618 | /* |
619 | * On successful return, the caller must not attempt to acquire | 619 | * On successful return, the caller must not attempt to acquire |
620 | * _hash_lock without first calling dm_table_put, because dm_table_destroy | 620 | * _hash_lock without first calling dm_table_put, because dm_table_destroy |
621 | * waits for this dm_table_put and could be called under this lock. | 621 | * waits for this dm_table_put and could be called under this lock. |
622 | */ | 622 | */ |
623 | static struct dm_table *dm_get_inactive_table(struct mapped_device *md) | 623 | static struct dm_table *dm_get_inactive_table(struct mapped_device *md) |
624 | { | 624 | { |
625 | struct hash_cell *hc; | 625 | struct hash_cell *hc; |
626 | struct dm_table *table = NULL; | 626 | struct dm_table *table = NULL; |
627 | 627 | ||
628 | down_read(&_hash_lock); | 628 | down_read(&_hash_lock); |
629 | hc = dm_get_mdptr(md); | 629 | hc = dm_get_mdptr(md); |
630 | if (!hc || hc->md != md) { | 630 | if (!hc || hc->md != md) { |
631 | DMWARN("device has been removed from the dev hash table."); | 631 | DMWARN("device has been removed from the dev hash table."); |
632 | goto out; | 632 | goto out; |
633 | } | 633 | } |
634 | 634 | ||
635 | table = hc->new_map; | 635 | table = hc->new_map; |
636 | if (table) | 636 | if (table) |
637 | dm_table_get(table); | 637 | dm_table_get(table); |
638 | 638 | ||
639 | out: | 639 | out: |
640 | up_read(&_hash_lock); | 640 | up_read(&_hash_lock); |
641 | 641 | ||
642 | return table; | 642 | return table; |
643 | } | 643 | } |
644 | 644 | ||
645 | static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, | 645 | static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, |
646 | struct dm_ioctl *param) | 646 | struct dm_ioctl *param) |
647 | { | 647 | { |
648 | return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? | 648 | return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? |
649 | dm_get_inactive_table(md) : dm_get_live_table(md); | 649 | dm_get_inactive_table(md) : dm_get_live_table(md); |
650 | } | 650 | } |
651 | 651 | ||
652 | /* | 652 | /* |
653 | * Fills in a dm_ioctl structure, ready for sending back to | 653 | * Fills in a dm_ioctl structure, ready for sending back to |
654 | * userland. | 654 | * userland. |
655 | */ | 655 | */ |
656 | static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) | 656 | static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) |
657 | { | 657 | { |
658 | struct gendisk *disk = dm_disk(md); | 658 | struct gendisk *disk = dm_disk(md); |
659 | struct dm_table *table; | 659 | struct dm_table *table; |
660 | 660 | ||
661 | param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | | 661 | param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | |
662 | DM_ACTIVE_PRESENT_FLAG); | 662 | DM_ACTIVE_PRESENT_FLAG); |
663 | 663 | ||
664 | if (dm_suspended_md(md)) | 664 | if (dm_suspended_md(md)) |
665 | param->flags |= DM_SUSPEND_FLAG; | 665 | param->flags |= DM_SUSPEND_FLAG; |
666 | 666 | ||
667 | param->dev = huge_encode_dev(disk_devt(disk)); | 667 | param->dev = huge_encode_dev(disk_devt(disk)); |
668 | 668 | ||
669 | /* | 669 | /* |
670 | * Yes, this will be out of date by the time it gets back | 670 | * Yes, this will be out of date by the time it gets back |
671 | * to userland, but it is still very useful for | 671 | * to userland, but it is still very useful for |
672 | * debugging. | 672 | * debugging. |
673 | */ | 673 | */ |
674 | param->open_count = dm_open_count(md); | 674 | param->open_count = dm_open_count(md); |
675 | 675 | ||
676 | param->event_nr = dm_get_event_nr(md); | 676 | param->event_nr = dm_get_event_nr(md); |
677 | param->target_count = 0; | 677 | param->target_count = 0; |
678 | 678 | ||
679 | table = dm_get_live_table(md); | 679 | table = dm_get_live_table(md); |
680 | if (table) { | 680 | if (table) { |
681 | if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { | 681 | if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { |
682 | if (get_disk_ro(disk)) | 682 | if (get_disk_ro(disk)) |
683 | param->flags |= DM_READONLY_FLAG; | 683 | param->flags |= DM_READONLY_FLAG; |
684 | param->target_count = dm_table_get_num_targets(table); | 684 | param->target_count = dm_table_get_num_targets(table); |
685 | } | 685 | } |
686 | dm_table_put(table); | 686 | dm_table_put(table); |
687 | 687 | ||
688 | param->flags |= DM_ACTIVE_PRESENT_FLAG; | 688 | param->flags |= DM_ACTIVE_PRESENT_FLAG; |
689 | } | 689 | } |
690 | 690 | ||
691 | if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { | 691 | if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { |
692 | table = dm_get_inactive_table(md); | 692 | table = dm_get_inactive_table(md); |
693 | if (table) { | 693 | if (table) { |
694 | if (!(dm_table_get_mode(table) & FMODE_WRITE)) | 694 | if (!(dm_table_get_mode(table) & FMODE_WRITE)) |
695 | param->flags |= DM_READONLY_FLAG; | 695 | param->flags |= DM_READONLY_FLAG; |
696 | param->target_count = dm_table_get_num_targets(table); | 696 | param->target_count = dm_table_get_num_targets(table); |
697 | dm_table_put(table); | 697 | dm_table_put(table); |
698 | } | 698 | } |
699 | } | 699 | } |
700 | } | 700 | } |
701 | 701 | ||
702 | static int dev_create(struct dm_ioctl *param, size_t param_size) | 702 | static int dev_create(struct dm_ioctl *param, size_t param_size) |
703 | { | 703 | { |
704 | int r, m = DM_ANY_MINOR; | 704 | int r, m = DM_ANY_MINOR; |
705 | struct mapped_device *md; | 705 | struct mapped_device *md; |
706 | 706 | ||
707 | r = check_name(param->name); | 707 | r = check_name(param->name); |
708 | if (r) | 708 | if (r) |
709 | return r; | 709 | return r; |
710 | 710 | ||
711 | if (param->flags & DM_PERSISTENT_DEV_FLAG) | 711 | if (param->flags & DM_PERSISTENT_DEV_FLAG) |
712 | m = MINOR(huge_decode_dev(param->dev)); | 712 | m = MINOR(huge_decode_dev(param->dev)); |
713 | 713 | ||
714 | r = dm_create(m, &md); | 714 | r = dm_create(m, &md); |
715 | if (r) | 715 | if (r) |
716 | return r; | 716 | return r; |
717 | 717 | ||
718 | r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); | 718 | r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); |
719 | if (r) { | 719 | if (r) { |
720 | dm_put(md); | 720 | dm_put(md); |
721 | dm_destroy(md); | 721 | dm_destroy(md); |
722 | return r; | 722 | return r; |
723 | } | 723 | } |
724 | 724 | ||
725 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | 725 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; |
726 | 726 | ||
727 | __dev_status(md, param); | 727 | __dev_status(md, param); |
728 | 728 | ||
729 | dm_put(md); | 729 | dm_put(md); |
730 | 730 | ||
731 | return 0; | 731 | return 0; |
732 | } | 732 | } |
733 | 733 | ||
734 | /* | 734 | /* |
735 | * Always use UUID for lookups if it's present, otherwise use name or dev. | 735 | * Always use UUID for lookups if it's present, otherwise use name or dev. |
736 | */ | 736 | */ |
737 | static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) | 737 | static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) |
738 | { | 738 | { |
739 | struct hash_cell *hc = NULL; | 739 | struct hash_cell *hc = NULL; |
740 | 740 | ||
741 | if (*param->uuid) { | 741 | if (*param->uuid) { |
742 | if (*param->name || param->dev) | ||
743 | return NULL; | ||
744 | |||
742 | hc = __get_uuid_cell(param->uuid); | 745 | hc = __get_uuid_cell(param->uuid); |
743 | if (!hc) | 746 | if (!hc) |
744 | return NULL; | 747 | return NULL; |
745 | } else if (*param->name) { | 748 | } else if (*param->name) { |
749 | if (param->dev) | ||
750 | return NULL; | ||
751 | |||
746 | hc = __get_name_cell(param->name); | 752 | hc = __get_name_cell(param->name); |
747 | if (!hc) | 753 | if (!hc) |
748 | return NULL; | 754 | return NULL; |
749 | } else if (param->dev) { | 755 | } else if (param->dev) { |
750 | hc = __get_dev_cell(param->dev); | 756 | hc = __get_dev_cell(param->dev); |
751 | if (!hc) | 757 | if (!hc) |
752 | return NULL; | 758 | return NULL; |
753 | } else | 759 | } else |
754 | return NULL; | 760 | return NULL; |
755 | 761 | ||
756 | /* | 762 | /* |
757 | * Sneakily write in both the name and the uuid | 763 | * Sneakily write in both the name and the uuid |
758 | * while we have the cell. | 764 | * while we have the cell. |
759 | */ | 765 | */ |
760 | strlcpy(param->name, hc->name, sizeof(param->name)); | 766 | strlcpy(param->name, hc->name, sizeof(param->name)); |
761 | if (hc->uuid) | 767 | if (hc->uuid) |
762 | strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); | 768 | strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); |
763 | else | 769 | else |
764 | param->uuid[0] = '\0'; | 770 | param->uuid[0] = '\0'; |
765 | 771 | ||
766 | if (hc->new_map) | 772 | if (hc->new_map) |
767 | param->flags |= DM_INACTIVE_PRESENT_FLAG; | 773 | param->flags |= DM_INACTIVE_PRESENT_FLAG; |
768 | else | 774 | else |
769 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | 775 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; |
770 | 776 | ||
771 | return hc; | 777 | return hc; |
772 | } | 778 | } |
773 | 779 | ||
774 | static struct mapped_device *find_device(struct dm_ioctl *param) | 780 | static struct mapped_device *find_device(struct dm_ioctl *param) |
775 | { | 781 | { |
776 | struct hash_cell *hc; | 782 | struct hash_cell *hc; |
777 | struct mapped_device *md = NULL; | 783 | struct mapped_device *md = NULL; |
778 | 784 | ||
779 | down_read(&_hash_lock); | 785 | down_read(&_hash_lock); |
780 | hc = __find_device_hash_cell(param); | 786 | hc = __find_device_hash_cell(param); |
781 | if (hc) | 787 | if (hc) |
782 | md = hc->md; | 788 | md = hc->md; |
783 | up_read(&_hash_lock); | 789 | up_read(&_hash_lock); |
784 | 790 | ||
785 | return md; | 791 | return md; |
786 | } | 792 | } |
787 | 793 | ||
788 | static int dev_remove(struct dm_ioctl *param, size_t param_size) | 794 | static int dev_remove(struct dm_ioctl *param, size_t param_size) |
789 | { | 795 | { |
790 | struct hash_cell *hc; | 796 | struct hash_cell *hc; |
791 | struct mapped_device *md; | 797 | struct mapped_device *md; |
792 | int r; | 798 | int r; |
793 | 799 | ||
794 | down_write(&_hash_lock); | 800 | down_write(&_hash_lock); |
795 | hc = __find_device_hash_cell(param); | 801 | hc = __find_device_hash_cell(param); |
796 | 802 | ||
797 | if (!hc) { | 803 | if (!hc) { |
798 | DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); | 804 | DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); |
799 | up_write(&_hash_lock); | 805 | up_write(&_hash_lock); |
800 | return -ENXIO; | 806 | return -ENXIO; |
801 | } | 807 | } |
802 | 808 | ||
803 | md = hc->md; | 809 | md = hc->md; |
804 | 810 | ||
805 | /* | 811 | /* |
806 | * Ensure the device is not open and nothing further can open it. | 812 | * Ensure the device is not open and nothing further can open it. |
807 | */ | 813 | */ |
808 | r = dm_lock_for_deletion(md); | 814 | r = dm_lock_for_deletion(md); |
809 | if (r) { | 815 | if (r) { |
810 | DMDEBUG_LIMIT("unable to remove open device %s", hc->name); | 816 | DMDEBUG_LIMIT("unable to remove open device %s", hc->name); |
811 | up_write(&_hash_lock); | 817 | up_write(&_hash_lock); |
812 | dm_put(md); | 818 | dm_put(md); |
813 | return r; | 819 | return r; |
814 | } | 820 | } |
815 | 821 | ||
816 | __hash_remove(hc); | 822 | __hash_remove(hc); |
817 | up_write(&_hash_lock); | 823 | up_write(&_hash_lock); |
818 | 824 | ||
819 | if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) | 825 | if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) |
820 | param->flags |= DM_UEVENT_GENERATED_FLAG; | 826 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
821 | 827 | ||
822 | dm_put(md); | 828 | dm_put(md); |
823 | dm_destroy(md); | 829 | dm_destroy(md); |
824 | return 0; | 830 | return 0; |
825 | } | 831 | } |
826 | 832 | ||
827 | /* | 833 | /* |
828 | * Check a string doesn't overrun the chunk of | 834 | * Check a string doesn't overrun the chunk of |
829 | * memory we copied from userland. | 835 | * memory we copied from userland. |
830 | */ | 836 | */ |
831 | static int invalid_str(char *str, void *end) | 837 | static int invalid_str(char *str, void *end) |
832 | { | 838 | { |
833 | while ((void *) str < end) | 839 | while ((void *) str < end) |
834 | if (!*str++) | 840 | if (!*str++) |
835 | return 0; | 841 | return 0; |
836 | 842 | ||
837 | return -EINVAL; | 843 | return -EINVAL; |
838 | } | 844 | } |
839 | 845 | ||
840 | static int dev_rename(struct dm_ioctl *param, size_t param_size) | 846 | static int dev_rename(struct dm_ioctl *param, size_t param_size) |
841 | { | 847 | { |
842 | int r; | 848 | int r; |
843 | char *new_data = (char *) param + param->data_start; | 849 | char *new_data = (char *) param + param->data_start; |
844 | struct mapped_device *md; | 850 | struct mapped_device *md; |
845 | unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; | 851 | unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; |
846 | 852 | ||
847 | if (new_data < param->data || | 853 | if (new_data < param->data || |
848 | invalid_str(new_data, (void *) param + param_size) || | 854 | invalid_str(new_data, (void *) param + param_size) || |
849 | strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { | 855 | strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { |
850 | DMWARN("Invalid new mapped device name or uuid string supplied."); | 856 | DMWARN("Invalid new mapped device name or uuid string supplied."); |
851 | return -EINVAL; | 857 | return -EINVAL; |
852 | } | 858 | } |
853 | 859 | ||
854 | if (!change_uuid) { | 860 | if (!change_uuid) { |
855 | r = check_name(new_data); | 861 | r = check_name(new_data); |
856 | if (r) | 862 | if (r) |
857 | return r; | 863 | return r; |
858 | } | 864 | } |
859 | 865 | ||
860 | md = dm_hash_rename(param, new_data); | 866 | md = dm_hash_rename(param, new_data); |
861 | if (IS_ERR(md)) | 867 | if (IS_ERR(md)) |
862 | return PTR_ERR(md); | 868 | return PTR_ERR(md); |
863 | 869 | ||
864 | __dev_status(md, param); | 870 | __dev_status(md, param); |
865 | dm_put(md); | 871 | dm_put(md); |
866 | 872 | ||
867 | return 0; | 873 | return 0; |
868 | } | 874 | } |
869 | 875 | ||
870 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) | 876 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) |
871 | { | 877 | { |
872 | int r = -EINVAL, x; | 878 | int r = -EINVAL, x; |
873 | struct mapped_device *md; | 879 | struct mapped_device *md; |
874 | struct hd_geometry geometry; | 880 | struct hd_geometry geometry; |
875 | unsigned long indata[4]; | 881 | unsigned long indata[4]; |
876 | char *geostr = (char *) param + param->data_start; | 882 | char *geostr = (char *) param + param->data_start; |
877 | 883 | ||
878 | md = find_device(param); | 884 | md = find_device(param); |
879 | if (!md) | 885 | if (!md) |
880 | return -ENXIO; | 886 | return -ENXIO; |
881 | 887 | ||
882 | if (geostr < param->data || | 888 | if (geostr < param->data || |
883 | invalid_str(geostr, (void *) param + param_size)) { | 889 | invalid_str(geostr, (void *) param + param_size)) { |
884 | DMWARN("Invalid geometry supplied."); | 890 | DMWARN("Invalid geometry supplied."); |
885 | goto out; | 891 | goto out; |
886 | } | 892 | } |
887 | 893 | ||
888 | x = sscanf(geostr, "%lu %lu %lu %lu", indata, | 894 | x = sscanf(geostr, "%lu %lu %lu %lu", indata, |
889 | indata + 1, indata + 2, indata + 3); | 895 | indata + 1, indata + 2, indata + 3); |
890 | 896 | ||
891 | if (x != 4) { | 897 | if (x != 4) { |
892 | DMWARN("Unable to interpret geometry settings."); | 898 | DMWARN("Unable to interpret geometry settings."); |
893 | goto out; | 899 | goto out; |
894 | } | 900 | } |
895 | 901 | ||
896 | if (indata[0] > 65535 || indata[1] > 255 || | 902 | if (indata[0] > 65535 || indata[1] > 255 || |
897 | indata[2] > 255 || indata[3] > ULONG_MAX) { | 903 | indata[2] > 255 || indata[3] > ULONG_MAX) { |
898 | DMWARN("Geometry exceeds range limits."); | 904 | DMWARN("Geometry exceeds range limits."); |
899 | goto out; | 905 | goto out; |
900 | } | 906 | } |
901 | 907 | ||
902 | geometry.cylinders = indata[0]; | 908 | geometry.cylinders = indata[0]; |
903 | geometry.heads = indata[1]; | 909 | geometry.heads = indata[1]; |
904 | geometry.sectors = indata[2]; | 910 | geometry.sectors = indata[2]; |
905 | geometry.start = indata[3]; | 911 | geometry.start = indata[3]; |
906 | 912 | ||
907 | r = dm_set_geometry(md, &geometry); | 913 | r = dm_set_geometry(md, &geometry); |
908 | 914 | ||
909 | param->data_size = 0; | 915 | param->data_size = 0; |
910 | 916 | ||
911 | out: | 917 | out: |
912 | dm_put(md); | 918 | dm_put(md); |
913 | return r; | 919 | return r; |
914 | } | 920 | } |
915 | 921 | ||
916 | static int do_suspend(struct dm_ioctl *param) | 922 | static int do_suspend(struct dm_ioctl *param) |
917 | { | 923 | { |
918 | int r = 0; | 924 | int r = 0; |
919 | unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; | 925 | unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; |
920 | struct mapped_device *md; | 926 | struct mapped_device *md; |
921 | 927 | ||
922 | md = find_device(param); | 928 | md = find_device(param); |
923 | if (!md) | 929 | if (!md) |
924 | return -ENXIO; | 930 | return -ENXIO; |
925 | 931 | ||
926 | if (param->flags & DM_SKIP_LOCKFS_FLAG) | 932 | if (param->flags & DM_SKIP_LOCKFS_FLAG) |
927 | suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; | 933 | suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; |
928 | if (param->flags & DM_NOFLUSH_FLAG) | 934 | if (param->flags & DM_NOFLUSH_FLAG) |
929 | suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; | 935 | suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; |
930 | 936 | ||
931 | if (!dm_suspended_md(md)) { | 937 | if (!dm_suspended_md(md)) { |
932 | r = dm_suspend(md, suspend_flags); | 938 | r = dm_suspend(md, suspend_flags); |
933 | if (r) | 939 | if (r) |
934 | goto out; | 940 | goto out; |
935 | } | 941 | } |
936 | 942 | ||
937 | __dev_status(md, param); | 943 | __dev_status(md, param); |
938 | 944 | ||
939 | out: | 945 | out: |
940 | dm_put(md); | 946 | dm_put(md); |
941 | 947 | ||
942 | return r; | 948 | return r; |
943 | } | 949 | } |
944 | 950 | ||
945 | static int do_resume(struct dm_ioctl *param) | 951 | static int do_resume(struct dm_ioctl *param) |
946 | { | 952 | { |
947 | int r = 0; | 953 | int r = 0; |
948 | unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; | 954 | unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; |
949 | struct hash_cell *hc; | 955 | struct hash_cell *hc; |
950 | struct mapped_device *md; | 956 | struct mapped_device *md; |
951 | struct dm_table *new_map, *old_map = NULL; | 957 | struct dm_table *new_map, *old_map = NULL; |
952 | 958 | ||
953 | down_write(&_hash_lock); | 959 | down_write(&_hash_lock); |
954 | 960 | ||
955 | hc = __find_device_hash_cell(param); | 961 | hc = __find_device_hash_cell(param); |
956 | if (!hc) { | 962 | if (!hc) { |
957 | DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); | 963 | DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); |
958 | up_write(&_hash_lock); | 964 | up_write(&_hash_lock); |
959 | return -ENXIO; | 965 | return -ENXIO; |
960 | } | 966 | } |
961 | 967 | ||
962 | md = hc->md; | 968 | md = hc->md; |
963 | 969 | ||
964 | new_map = hc->new_map; | 970 | new_map = hc->new_map; |
965 | hc->new_map = NULL; | 971 | hc->new_map = NULL; |
966 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | 972 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; |
967 | 973 | ||
968 | up_write(&_hash_lock); | 974 | up_write(&_hash_lock); |
969 | 975 | ||
970 | /* Do we need to load a new map ? */ | 976 | /* Do we need to load a new map ? */ |
971 | if (new_map) { | 977 | if (new_map) { |
972 | /* Suspend if it isn't already suspended */ | 978 | /* Suspend if it isn't already suspended */ |
973 | if (param->flags & DM_SKIP_LOCKFS_FLAG) | 979 | if (param->flags & DM_SKIP_LOCKFS_FLAG) |
974 | suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; | 980 | suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; |
975 | if (param->flags & DM_NOFLUSH_FLAG) | 981 | if (param->flags & DM_NOFLUSH_FLAG) |
976 | suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; | 982 | suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; |
977 | if (!dm_suspended_md(md)) | 983 | if (!dm_suspended_md(md)) |
978 | dm_suspend(md, suspend_flags); | 984 | dm_suspend(md, suspend_flags); |
979 | 985 | ||
980 | old_map = dm_swap_table(md, new_map); | 986 | old_map = dm_swap_table(md, new_map); |
981 | if (IS_ERR(old_map)) { | 987 | if (IS_ERR(old_map)) { |
982 | dm_table_destroy(new_map); | 988 | dm_table_destroy(new_map); |
983 | dm_put(md); | 989 | dm_put(md); |
984 | return PTR_ERR(old_map); | 990 | return PTR_ERR(old_map); |
985 | } | 991 | } |
986 | 992 | ||
987 | if (dm_table_get_mode(new_map) & FMODE_WRITE) | 993 | if (dm_table_get_mode(new_map) & FMODE_WRITE) |
988 | set_disk_ro(dm_disk(md), 0); | 994 | set_disk_ro(dm_disk(md), 0); |
989 | else | 995 | else |
990 | set_disk_ro(dm_disk(md), 1); | 996 | set_disk_ro(dm_disk(md), 1); |
991 | } | 997 | } |
992 | 998 | ||
993 | if (dm_suspended_md(md)) { | 999 | if (dm_suspended_md(md)) { |
994 | r = dm_resume(md); | 1000 | r = dm_resume(md); |
995 | if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) | 1001 | if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) |
996 | param->flags |= DM_UEVENT_GENERATED_FLAG; | 1002 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
997 | } | 1003 | } |
998 | 1004 | ||
999 | if (old_map) | 1005 | if (old_map) |
1000 | dm_table_destroy(old_map); | 1006 | dm_table_destroy(old_map); |
1001 | 1007 | ||
1002 | if (!r) | 1008 | if (!r) |
1003 | __dev_status(md, param); | 1009 | __dev_status(md, param); |
1004 | 1010 | ||
1005 | dm_put(md); | 1011 | dm_put(md); |
1006 | return r; | 1012 | return r; |
1007 | } | 1013 | } |
1008 | 1014 | ||
1009 | /* | 1015 | /* |
1010 | * Set or unset the suspension state of a device. | 1016 | * Set or unset the suspension state of a device. |
1011 | * If the device already is in the requested state we just return its status. | 1017 | * If the device already is in the requested state we just return its status. |
1012 | */ | 1018 | */ |
1013 | static int dev_suspend(struct dm_ioctl *param, size_t param_size) | 1019 | static int dev_suspend(struct dm_ioctl *param, size_t param_size) |
1014 | { | 1020 | { |
1015 | if (param->flags & DM_SUSPEND_FLAG) | 1021 | if (param->flags & DM_SUSPEND_FLAG) |
1016 | return do_suspend(param); | 1022 | return do_suspend(param); |
1017 | 1023 | ||
1018 | return do_resume(param); | 1024 | return do_resume(param); |
1019 | } | 1025 | } |
1020 | 1026 | ||
1021 | /* | 1027 | /* |
1022 | * Copies device info back to user space, used by | 1028 | * Copies device info back to user space, used by |
1023 | * the create and info ioctls. | 1029 | * the create and info ioctls. |
1024 | */ | 1030 | */ |
1025 | static int dev_status(struct dm_ioctl *param, size_t param_size) | 1031 | static int dev_status(struct dm_ioctl *param, size_t param_size) |
1026 | { | 1032 | { |
1027 | struct mapped_device *md; | 1033 | struct mapped_device *md; |
1028 | 1034 | ||
1029 | md = find_device(param); | 1035 | md = find_device(param); |
1030 | if (!md) | 1036 | if (!md) |
1031 | return -ENXIO; | 1037 | return -ENXIO; |
1032 | 1038 | ||
1033 | __dev_status(md, param); | 1039 | __dev_status(md, param); |
1034 | dm_put(md); | 1040 | dm_put(md); |
1035 | 1041 | ||
1036 | return 0; | 1042 | return 0; |
1037 | } | 1043 | } |
1038 | 1044 | ||
1039 | /* | 1045 | /* |
1040 | * Build up the status struct for each target | 1046 | * Build up the status struct for each target |
1041 | */ | 1047 | */ |
1042 | static void retrieve_status(struct dm_table *table, | 1048 | static void retrieve_status(struct dm_table *table, |
1043 | struct dm_ioctl *param, size_t param_size) | 1049 | struct dm_ioctl *param, size_t param_size) |
1044 | { | 1050 | { |
1045 | unsigned int i, num_targets; | 1051 | unsigned int i, num_targets; |
1046 | struct dm_target_spec *spec; | 1052 | struct dm_target_spec *spec; |
1047 | char *outbuf, *outptr; | 1053 | char *outbuf, *outptr; |
1048 | status_type_t type; | 1054 | status_type_t type; |
1049 | size_t remaining, len, used = 0; | 1055 | size_t remaining, len, used = 0; |
1050 | 1056 | ||
1051 | outptr = outbuf = get_result_buffer(param, param_size, &len); | 1057 | outptr = outbuf = get_result_buffer(param, param_size, &len); |
1052 | 1058 | ||
1053 | if (param->flags & DM_STATUS_TABLE_FLAG) | 1059 | if (param->flags & DM_STATUS_TABLE_FLAG) |
1054 | type = STATUSTYPE_TABLE; | 1060 | type = STATUSTYPE_TABLE; |
1055 | else | 1061 | else |
1056 | type = STATUSTYPE_INFO; | 1062 | type = STATUSTYPE_INFO; |
1057 | 1063 | ||
1058 | /* Get all the target info */ | 1064 | /* Get all the target info */ |
1059 | num_targets = dm_table_get_num_targets(table); | 1065 | num_targets = dm_table_get_num_targets(table); |
1060 | for (i = 0; i < num_targets; i++) { | 1066 | for (i = 0; i < num_targets; i++) { |
1061 | struct dm_target *ti = dm_table_get_target(table, i); | 1067 | struct dm_target *ti = dm_table_get_target(table, i); |
1062 | 1068 | ||
1063 | remaining = len - (outptr - outbuf); | 1069 | remaining = len - (outptr - outbuf); |
1064 | if (remaining <= sizeof(struct dm_target_spec)) { | 1070 | if (remaining <= sizeof(struct dm_target_spec)) { |
1065 | param->flags |= DM_BUFFER_FULL_FLAG; | 1071 | param->flags |= DM_BUFFER_FULL_FLAG; |
1066 | break; | 1072 | break; |
1067 | } | 1073 | } |
1068 | 1074 | ||
1069 | spec = (struct dm_target_spec *) outptr; | 1075 | spec = (struct dm_target_spec *) outptr; |
1070 | 1076 | ||
1071 | spec->status = 0; | 1077 | spec->status = 0; |
1072 | spec->sector_start = ti->begin; | 1078 | spec->sector_start = ti->begin; |
1073 | spec->length = ti->len; | 1079 | spec->length = ti->len; |
1074 | strncpy(spec->target_type, ti->type->name, | 1080 | strncpy(spec->target_type, ti->type->name, |
1075 | sizeof(spec->target_type)); | 1081 | sizeof(spec->target_type)); |
1076 | 1082 | ||
1077 | outptr += sizeof(struct dm_target_spec); | 1083 | outptr += sizeof(struct dm_target_spec); |
1078 | remaining = len - (outptr - outbuf); | 1084 | remaining = len - (outptr - outbuf); |
1079 | if (remaining <= 0) { | 1085 | if (remaining <= 0) { |
1080 | param->flags |= DM_BUFFER_FULL_FLAG; | 1086 | param->flags |= DM_BUFFER_FULL_FLAG; |
1081 | break; | 1087 | break; |
1082 | } | 1088 | } |
1083 | 1089 | ||
1084 | /* Get the status/table string from the target driver */ | 1090 | /* Get the status/table string from the target driver */ |
1085 | if (ti->type->status) { | 1091 | if (ti->type->status) { |
1086 | if (ti->type->status(ti, type, outptr, remaining)) { | 1092 | if (ti->type->status(ti, type, outptr, remaining)) { |
1087 | param->flags |= DM_BUFFER_FULL_FLAG; | 1093 | param->flags |= DM_BUFFER_FULL_FLAG; |
1088 | break; | 1094 | break; |
1089 | } | 1095 | } |
1090 | } else | 1096 | } else |
1091 | outptr[0] = '\0'; | 1097 | outptr[0] = '\0'; |
1092 | 1098 | ||
1093 | outptr += strlen(outptr) + 1; | 1099 | outptr += strlen(outptr) + 1; |
1094 | used = param->data_start + (outptr - outbuf); | 1100 | used = param->data_start + (outptr - outbuf); |
1095 | 1101 | ||
1096 | outptr = align_ptr(outptr); | 1102 | outptr = align_ptr(outptr); |
1097 | spec->next = outptr - outbuf; | 1103 | spec->next = outptr - outbuf; |
1098 | } | 1104 | } |
1099 | 1105 | ||
1100 | if (used) | 1106 | if (used) |
1101 | param->data_size = used; | 1107 | param->data_size = used; |
1102 | 1108 | ||
1103 | param->target_count = num_targets; | 1109 | param->target_count = num_targets; |
1104 | } | 1110 | } |
1105 | 1111 | ||
1106 | /* | 1112 | /* |
1107 | * Wait for a device to report an event | 1113 | * Wait for a device to report an event |
1108 | */ | 1114 | */ |
1109 | static int dev_wait(struct dm_ioctl *param, size_t param_size) | 1115 | static int dev_wait(struct dm_ioctl *param, size_t param_size) |
1110 | { | 1116 | { |
1111 | int r = 0; | 1117 | int r = 0; |
1112 | struct mapped_device *md; | 1118 | struct mapped_device *md; |
1113 | struct dm_table *table; | 1119 | struct dm_table *table; |
1114 | 1120 | ||
1115 | md = find_device(param); | 1121 | md = find_device(param); |
1116 | if (!md) | 1122 | if (!md) |
1117 | return -ENXIO; | 1123 | return -ENXIO; |
1118 | 1124 | ||
1119 | /* | 1125 | /* |
1120 | * Wait for a notification event | 1126 | * Wait for a notification event |
1121 | */ | 1127 | */ |
1122 | if (dm_wait_event(md, param->event_nr)) { | 1128 | if (dm_wait_event(md, param->event_nr)) { |
1123 | r = -ERESTARTSYS; | 1129 | r = -ERESTARTSYS; |
1124 | goto out; | 1130 | goto out; |
1125 | } | 1131 | } |
1126 | 1132 | ||
1127 | /* | 1133 | /* |
1128 | * The userland program is going to want to know what | 1134 | * The userland program is going to want to know what |
1129 | * changed to trigger the event, so we may as well tell | 1135 | * changed to trigger the event, so we may as well tell |
1130 | * him and save an ioctl. | 1136 | * him and save an ioctl. |
1131 | */ | 1137 | */ |
1132 | __dev_status(md, param); | 1138 | __dev_status(md, param); |
1133 | 1139 | ||
1134 | table = dm_get_live_or_inactive_table(md, param); | 1140 | table = dm_get_live_or_inactive_table(md, param); |
1135 | if (table) { | 1141 | if (table) { |
1136 | retrieve_status(table, param, param_size); | 1142 | retrieve_status(table, param, param_size); |
1137 | dm_table_put(table); | 1143 | dm_table_put(table); |
1138 | } | 1144 | } |
1139 | 1145 | ||
1140 | out: | 1146 | out: |
1141 | dm_put(md); | 1147 | dm_put(md); |
1142 | 1148 | ||
1143 | return r; | 1149 | return r; |
1144 | } | 1150 | } |
1145 | 1151 | ||
1146 | static inline fmode_t get_mode(struct dm_ioctl *param) | 1152 | static inline fmode_t get_mode(struct dm_ioctl *param) |
1147 | { | 1153 | { |
1148 | fmode_t mode = FMODE_READ | FMODE_WRITE; | 1154 | fmode_t mode = FMODE_READ | FMODE_WRITE; |
1149 | 1155 | ||
1150 | if (param->flags & DM_READONLY_FLAG) | 1156 | if (param->flags & DM_READONLY_FLAG) |
1151 | mode = FMODE_READ; | 1157 | mode = FMODE_READ; |
1152 | 1158 | ||
1153 | return mode; | 1159 | return mode; |
1154 | } | 1160 | } |
1155 | 1161 | ||
1156 | static int next_target(struct dm_target_spec *last, uint32_t next, void *end, | 1162 | static int next_target(struct dm_target_spec *last, uint32_t next, void *end, |
1157 | struct dm_target_spec **spec, char **target_params) | 1163 | struct dm_target_spec **spec, char **target_params) |
1158 | { | 1164 | { |
1159 | *spec = (struct dm_target_spec *) ((unsigned char *) last + next); | 1165 | *spec = (struct dm_target_spec *) ((unsigned char *) last + next); |
1160 | *target_params = (char *) (*spec + 1); | 1166 | *target_params = (char *) (*spec + 1); |
1161 | 1167 | ||
1162 | if (*spec < (last + 1)) | 1168 | if (*spec < (last + 1)) |
1163 | return -EINVAL; | 1169 | return -EINVAL; |
1164 | 1170 | ||
1165 | return invalid_str(*target_params, end); | 1171 | return invalid_str(*target_params, end); |
1166 | } | 1172 | } |
1167 | 1173 | ||
1168 | static int populate_table(struct dm_table *table, | 1174 | static int populate_table(struct dm_table *table, |
1169 | struct dm_ioctl *param, size_t param_size) | 1175 | struct dm_ioctl *param, size_t param_size) |
1170 | { | 1176 | { |
1171 | int r; | 1177 | int r; |
1172 | unsigned int i = 0; | 1178 | unsigned int i = 0; |
1173 | struct dm_target_spec *spec = (struct dm_target_spec *) param; | 1179 | struct dm_target_spec *spec = (struct dm_target_spec *) param; |
1174 | uint32_t next = param->data_start; | 1180 | uint32_t next = param->data_start; |
1175 | void *end = (void *) param + param_size; | 1181 | void *end = (void *) param + param_size; |
1176 | char *target_params; | 1182 | char *target_params; |
1177 | 1183 | ||
1178 | if (!param->target_count) { | 1184 | if (!param->target_count) { |
1179 | DMWARN("populate_table: no targets specified"); | 1185 | DMWARN("populate_table: no targets specified"); |
1180 | return -EINVAL; | 1186 | return -EINVAL; |
1181 | } | 1187 | } |
1182 | 1188 | ||
1183 | for (i = 0; i < param->target_count; i++) { | 1189 | for (i = 0; i < param->target_count; i++) { |
1184 | 1190 | ||
1185 | r = next_target(spec, next, end, &spec, &target_params); | 1191 | r = next_target(spec, next, end, &spec, &target_params); |
1186 | if (r) { | 1192 | if (r) { |
1187 | DMWARN("unable to find target"); | 1193 | DMWARN("unable to find target"); |
1188 | return r; | 1194 | return r; |
1189 | } | 1195 | } |
1190 | 1196 | ||
1191 | r = dm_table_add_target(table, spec->target_type, | 1197 | r = dm_table_add_target(table, spec->target_type, |
1192 | (sector_t) spec->sector_start, | 1198 | (sector_t) spec->sector_start, |
1193 | (sector_t) spec->length, | 1199 | (sector_t) spec->length, |
1194 | target_params); | 1200 | target_params); |
1195 | if (r) { | 1201 | if (r) { |
1196 | DMWARN("error adding target to table"); | 1202 | DMWARN("error adding target to table"); |
1197 | return r; | 1203 | return r; |
1198 | } | 1204 | } |
1199 | 1205 | ||
1200 | next = spec->next; | 1206 | next = spec->next; |
1201 | } | 1207 | } |
1202 | 1208 | ||
1203 | return dm_table_complete(table); | 1209 | return dm_table_complete(table); |
1204 | } | 1210 | } |
1205 | 1211 | ||
1206 | static int table_load(struct dm_ioctl *param, size_t param_size) | 1212 | static int table_load(struct dm_ioctl *param, size_t param_size) |
1207 | { | 1213 | { |
1208 | int r; | 1214 | int r; |
1209 | struct hash_cell *hc; | 1215 | struct hash_cell *hc; |
1210 | struct dm_table *t; | 1216 | struct dm_table *t; |
1211 | struct mapped_device *md; | 1217 | struct mapped_device *md; |
1212 | 1218 | ||
1213 | md = find_device(param); | 1219 | md = find_device(param); |
1214 | if (!md) | 1220 | if (!md) |
1215 | return -ENXIO; | 1221 | return -ENXIO; |
1216 | 1222 | ||
1217 | r = dm_table_create(&t, get_mode(param), param->target_count, md); | 1223 | r = dm_table_create(&t, get_mode(param), param->target_count, md); |
1218 | if (r) | 1224 | if (r) |
1219 | goto out; | 1225 | goto out; |
1220 | 1226 | ||
1221 | r = populate_table(t, param, param_size); | 1227 | r = populate_table(t, param, param_size); |
1222 | if (r) { | 1228 | if (r) { |
1223 | dm_table_destroy(t); | 1229 | dm_table_destroy(t); |
1224 | goto out; | 1230 | goto out; |
1225 | } | 1231 | } |
1226 | 1232 | ||
1227 | /* Protect md->type and md->queue against concurrent table loads. */ | 1233 | /* Protect md->type and md->queue against concurrent table loads. */ |
1228 | dm_lock_md_type(md); | 1234 | dm_lock_md_type(md); |
1229 | if (dm_get_md_type(md) == DM_TYPE_NONE) | 1235 | if (dm_get_md_type(md) == DM_TYPE_NONE) |
1230 | /* Initial table load: acquire type of table. */ | 1236 | /* Initial table load: acquire type of table. */ |
1231 | dm_set_md_type(md, dm_table_get_type(t)); | 1237 | dm_set_md_type(md, dm_table_get_type(t)); |
1232 | else if (dm_get_md_type(md) != dm_table_get_type(t)) { | 1238 | else if (dm_get_md_type(md) != dm_table_get_type(t)) { |
1233 | DMWARN("can't change device type after initial table load."); | 1239 | DMWARN("can't change device type after initial table load."); |
1234 | dm_table_destroy(t); | 1240 | dm_table_destroy(t); |
1235 | dm_unlock_md_type(md); | 1241 | dm_unlock_md_type(md); |
1236 | r = -EINVAL; | 1242 | r = -EINVAL; |
1237 | goto out; | 1243 | goto out; |
1238 | } | 1244 | } |
1239 | 1245 | ||
1240 | /* setup md->queue to reflect md's type (may block) */ | 1246 | /* setup md->queue to reflect md's type (may block) */ |
1241 | r = dm_setup_md_queue(md); | 1247 | r = dm_setup_md_queue(md); |
1242 | if (r) { | 1248 | if (r) { |
1243 | DMWARN("unable to set up device queue for new table."); | 1249 | DMWARN("unable to set up device queue for new table."); |
1244 | dm_table_destroy(t); | 1250 | dm_table_destroy(t); |
1245 | dm_unlock_md_type(md); | 1251 | dm_unlock_md_type(md); |
1246 | goto out; | 1252 | goto out; |
1247 | } | 1253 | } |
1248 | dm_unlock_md_type(md); | 1254 | dm_unlock_md_type(md); |
1249 | 1255 | ||
1250 | /* stage inactive table */ | 1256 | /* stage inactive table */ |
1251 | down_write(&_hash_lock); | 1257 | down_write(&_hash_lock); |
1252 | hc = dm_get_mdptr(md); | 1258 | hc = dm_get_mdptr(md); |
1253 | if (!hc || hc->md != md) { | 1259 | if (!hc || hc->md != md) { |
1254 | DMWARN("device has been removed from the dev hash table."); | 1260 | DMWARN("device has been removed from the dev hash table."); |
1255 | dm_table_destroy(t); | 1261 | dm_table_destroy(t); |
1256 | up_write(&_hash_lock); | 1262 | up_write(&_hash_lock); |
1257 | r = -ENXIO; | 1263 | r = -ENXIO; |
1258 | goto out; | 1264 | goto out; |
1259 | } | 1265 | } |
1260 | 1266 | ||
1261 | if (hc->new_map) | 1267 | if (hc->new_map) |
1262 | dm_table_destroy(hc->new_map); | 1268 | dm_table_destroy(hc->new_map); |
1263 | hc->new_map = t; | 1269 | hc->new_map = t; |
1264 | up_write(&_hash_lock); | 1270 | up_write(&_hash_lock); |
1265 | 1271 | ||
1266 | param->flags |= DM_INACTIVE_PRESENT_FLAG; | 1272 | param->flags |= DM_INACTIVE_PRESENT_FLAG; |
1267 | __dev_status(md, param); | 1273 | __dev_status(md, param); |
1268 | 1274 | ||
1269 | out: | 1275 | out: |
1270 | dm_put(md); | 1276 | dm_put(md); |
1271 | 1277 | ||
1272 | return r; | 1278 | return r; |
1273 | } | 1279 | } |
1274 | 1280 | ||
1275 | static int table_clear(struct dm_ioctl *param, size_t param_size) | 1281 | static int table_clear(struct dm_ioctl *param, size_t param_size) |
1276 | { | 1282 | { |
1277 | struct hash_cell *hc; | 1283 | struct hash_cell *hc; |
1278 | struct mapped_device *md; | 1284 | struct mapped_device *md; |
1279 | 1285 | ||
1280 | down_write(&_hash_lock); | 1286 | down_write(&_hash_lock); |
1281 | 1287 | ||
1282 | hc = __find_device_hash_cell(param); | 1288 | hc = __find_device_hash_cell(param); |
1283 | if (!hc) { | 1289 | if (!hc) { |
1284 | DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); | 1290 | DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); |
1285 | up_write(&_hash_lock); | 1291 | up_write(&_hash_lock); |
1286 | return -ENXIO; | 1292 | return -ENXIO; |
1287 | } | 1293 | } |
1288 | 1294 | ||
1289 | if (hc->new_map) { | 1295 | if (hc->new_map) { |
1290 | dm_table_destroy(hc->new_map); | 1296 | dm_table_destroy(hc->new_map); |
1291 | hc->new_map = NULL; | 1297 | hc->new_map = NULL; |
1292 | } | 1298 | } |
1293 | 1299 | ||
1294 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | 1300 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; |
1295 | 1301 | ||
1296 | __dev_status(hc->md, param); | 1302 | __dev_status(hc->md, param); |
1297 | md = hc->md; | 1303 | md = hc->md; |
1298 | up_write(&_hash_lock); | 1304 | up_write(&_hash_lock); |
1299 | dm_put(md); | 1305 | dm_put(md); |
1300 | 1306 | ||
1301 | return 0; | 1307 | return 0; |
1302 | } | 1308 | } |
1303 | 1309 | ||
1304 | /* | 1310 | /* |
1305 | * Retrieves a list of devices used by a particular dm device. | 1311 | * Retrieves a list of devices used by a particular dm device. |
1306 | */ | 1312 | */ |
1307 | static void retrieve_deps(struct dm_table *table, | 1313 | static void retrieve_deps(struct dm_table *table, |
1308 | struct dm_ioctl *param, size_t param_size) | 1314 | struct dm_ioctl *param, size_t param_size) |
1309 | { | 1315 | { |
1310 | unsigned int count = 0; | 1316 | unsigned int count = 0; |
1311 | struct list_head *tmp; | 1317 | struct list_head *tmp; |
1312 | size_t len, needed; | 1318 | size_t len, needed; |
1313 | struct dm_dev_internal *dd; | 1319 | struct dm_dev_internal *dd; |
1314 | struct dm_target_deps *deps; | 1320 | struct dm_target_deps *deps; |
1315 | 1321 | ||
1316 | deps = get_result_buffer(param, param_size, &len); | 1322 | deps = get_result_buffer(param, param_size, &len); |
1317 | 1323 | ||
1318 | /* | 1324 | /* |
1319 | * Count the devices. | 1325 | * Count the devices. |
1320 | */ | 1326 | */ |
1321 | list_for_each (tmp, dm_table_get_devices(table)) | 1327 | list_for_each (tmp, dm_table_get_devices(table)) |
1322 | count++; | 1328 | count++; |
1323 | 1329 | ||
1324 | /* | 1330 | /* |
1325 | * Check we have enough space. | 1331 | * Check we have enough space. |
1326 | */ | 1332 | */ |
1327 | needed = sizeof(*deps) + (sizeof(*deps->dev) * count); | 1333 | needed = sizeof(*deps) + (sizeof(*deps->dev) * count); |
1328 | if (len < needed) { | 1334 | if (len < needed) { |
1329 | param->flags |= DM_BUFFER_FULL_FLAG; | 1335 | param->flags |= DM_BUFFER_FULL_FLAG; |
1330 | return; | 1336 | return; |
1331 | } | 1337 | } |
1332 | 1338 | ||
1333 | /* | 1339 | /* |
1334 | * Fill in the devices. | 1340 | * Fill in the devices. |
1335 | */ | 1341 | */ |
1336 | deps->count = count; | 1342 | deps->count = count; |
1337 | count = 0; | 1343 | count = 0; |
1338 | list_for_each_entry (dd, dm_table_get_devices(table), list) | 1344 | list_for_each_entry (dd, dm_table_get_devices(table), list) |
1339 | deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev); | 1345 | deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev); |
1340 | 1346 | ||
1341 | param->data_size = param->data_start + needed; | 1347 | param->data_size = param->data_start + needed; |
1342 | } | 1348 | } |
1343 | 1349 | ||
1344 | static int table_deps(struct dm_ioctl *param, size_t param_size) | 1350 | static int table_deps(struct dm_ioctl *param, size_t param_size) |
1345 | { | 1351 | { |
1346 | struct mapped_device *md; | 1352 | struct mapped_device *md; |
1347 | struct dm_table *table; | 1353 | struct dm_table *table; |
1348 | 1354 | ||
1349 | md = find_device(param); | 1355 | md = find_device(param); |
1350 | if (!md) | 1356 | if (!md) |
1351 | return -ENXIO; | 1357 | return -ENXIO; |
1352 | 1358 | ||
1353 | __dev_status(md, param); | 1359 | __dev_status(md, param); |
1354 | 1360 | ||
1355 | table = dm_get_live_or_inactive_table(md, param); | 1361 | table = dm_get_live_or_inactive_table(md, param); |
1356 | if (table) { | 1362 | if (table) { |
1357 | retrieve_deps(table, param, param_size); | 1363 | retrieve_deps(table, param, param_size); |
1358 | dm_table_put(table); | 1364 | dm_table_put(table); |
1359 | } | 1365 | } |
1360 | 1366 | ||
1361 | dm_put(md); | 1367 | dm_put(md); |
1362 | 1368 | ||
1363 | return 0; | 1369 | return 0; |
1364 | } | 1370 | } |
1365 | 1371 | ||
1366 | /* | 1372 | /* |
1367 | * Return the status of a device as a text string for each | 1373 | * Return the status of a device as a text string for each |
1368 | * target. | 1374 | * target. |
1369 | */ | 1375 | */ |
1370 | static int table_status(struct dm_ioctl *param, size_t param_size) | 1376 | static int table_status(struct dm_ioctl *param, size_t param_size) |
1371 | { | 1377 | { |
1372 | struct mapped_device *md; | 1378 | struct mapped_device *md; |
1373 | struct dm_table *table; | 1379 | struct dm_table *table; |
1374 | 1380 | ||
1375 | md = find_device(param); | 1381 | md = find_device(param); |
1376 | if (!md) | 1382 | if (!md) |
1377 | return -ENXIO; | 1383 | return -ENXIO; |
1378 | 1384 | ||
1379 | __dev_status(md, param); | 1385 | __dev_status(md, param); |
1380 | 1386 | ||
1381 | table = dm_get_live_or_inactive_table(md, param); | 1387 | table = dm_get_live_or_inactive_table(md, param); |
1382 | if (table) { | 1388 | if (table) { |
1383 | retrieve_status(table, param, param_size); | 1389 | retrieve_status(table, param, param_size); |
1384 | dm_table_put(table); | 1390 | dm_table_put(table); |
1385 | } | 1391 | } |
1386 | 1392 | ||
1387 | dm_put(md); | 1393 | dm_put(md); |
1388 | 1394 | ||
1389 | return 0; | 1395 | return 0; |
1390 | } | 1396 | } |
1391 | 1397 | ||
1392 | /* | 1398 | /* |
1393 | * Pass a message to the target that's at the supplied device offset. | 1399 | * Pass a message to the target that's at the supplied device offset. |
1394 | */ | 1400 | */ |
1395 | static int target_message(struct dm_ioctl *param, size_t param_size) | 1401 | static int target_message(struct dm_ioctl *param, size_t param_size) |
1396 | { | 1402 | { |
1397 | int r, argc; | 1403 | int r, argc; |
1398 | char **argv; | 1404 | char **argv; |
1399 | struct mapped_device *md; | 1405 | struct mapped_device *md; |
1400 | struct dm_table *table; | 1406 | struct dm_table *table; |
1401 | struct dm_target *ti; | 1407 | struct dm_target *ti; |
1402 | struct dm_target_msg *tmsg = (void *) param + param->data_start; | 1408 | struct dm_target_msg *tmsg = (void *) param + param->data_start; |
1403 | 1409 | ||
1404 | md = find_device(param); | 1410 | md = find_device(param); |
1405 | if (!md) | 1411 | if (!md) |
1406 | return -ENXIO; | 1412 | return -ENXIO; |
1407 | 1413 | ||
1408 | if (tmsg < (struct dm_target_msg *) param->data || | 1414 | if (tmsg < (struct dm_target_msg *) param->data || |
1409 | invalid_str(tmsg->message, (void *) param + param_size)) { | 1415 | invalid_str(tmsg->message, (void *) param + param_size)) { |
1410 | DMWARN("Invalid target message parameters."); | 1416 | DMWARN("Invalid target message parameters."); |
1411 | r = -EINVAL; | 1417 | r = -EINVAL; |
1412 | goto out; | 1418 | goto out; |
1413 | } | 1419 | } |
1414 | 1420 | ||
1415 | r = dm_split_args(&argc, &argv, tmsg->message); | 1421 | r = dm_split_args(&argc, &argv, tmsg->message); |
1416 | if (r) { | 1422 | if (r) { |
1417 | DMWARN("Failed to split target message parameters"); | 1423 | DMWARN("Failed to split target message parameters"); |
1418 | goto out; | 1424 | goto out; |
1419 | } | 1425 | } |
1420 | 1426 | ||
1421 | if (!argc) { | 1427 | if (!argc) { |
1422 | DMWARN("Empty message received."); | 1428 | DMWARN("Empty message received."); |
1423 | goto out; | 1429 | goto out; |
1424 | } | 1430 | } |
1425 | 1431 | ||
1426 | table = dm_get_live_table(md); | 1432 | table = dm_get_live_table(md); |
1427 | if (!table) | 1433 | if (!table) |
1428 | goto out_argv; | 1434 | goto out_argv; |
1429 | 1435 | ||
1430 | if (dm_deleting_md(md)) { | 1436 | if (dm_deleting_md(md)) { |
1431 | r = -ENXIO; | 1437 | r = -ENXIO; |
1432 | goto out_table; | 1438 | goto out_table; |
1433 | } | 1439 | } |
1434 | 1440 | ||
1435 | ti = dm_table_find_target(table, tmsg->sector); | 1441 | ti = dm_table_find_target(table, tmsg->sector); |
1436 | if (!dm_target_is_valid(ti)) { | 1442 | if (!dm_target_is_valid(ti)) { |
1437 | DMWARN("Target message sector outside device."); | 1443 | DMWARN("Target message sector outside device."); |
1438 | r = -EINVAL; | 1444 | r = -EINVAL; |
1439 | } else if (ti->type->message) | 1445 | } else if (ti->type->message) |
1440 | r = ti->type->message(ti, argc, argv); | 1446 | r = ti->type->message(ti, argc, argv); |
1441 | else { | 1447 | else { |
1442 | DMWARN("Target type does not support messages"); | 1448 | DMWARN("Target type does not support messages"); |
1443 | r = -EINVAL; | 1449 | r = -EINVAL; |
1444 | } | 1450 | } |
1445 | 1451 | ||
1446 | out_table: | 1452 | out_table: |
1447 | dm_table_put(table); | 1453 | dm_table_put(table); |
1448 | out_argv: | 1454 | out_argv: |
1449 | kfree(argv); | 1455 | kfree(argv); |
1450 | out: | 1456 | out: |
1451 | param->data_size = 0; | 1457 | param->data_size = 0; |
1452 | dm_put(md); | 1458 | dm_put(md); |
1453 | return r; | 1459 | return r; |
1454 | } | 1460 | } |
1455 | 1461 | ||
1456 | /*----------------------------------------------------------------- | 1462 | /*----------------------------------------------------------------- |
1457 | * Implementation of open/close/ioctl on the special char | 1463 | * Implementation of open/close/ioctl on the special char |
1458 | * device. | 1464 | * device. |
1459 | *---------------------------------------------------------------*/ | 1465 | *---------------------------------------------------------------*/ |
1460 | static ioctl_fn lookup_ioctl(unsigned int cmd) | 1466 | static ioctl_fn lookup_ioctl(unsigned int cmd) |
1461 | { | 1467 | { |
1462 | static struct { | 1468 | static struct { |
1463 | int cmd; | 1469 | int cmd; |
1464 | ioctl_fn fn; | 1470 | ioctl_fn fn; |
1465 | } _ioctls[] = { | 1471 | } _ioctls[] = { |
1466 | {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */ | 1472 | {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */ |
1467 | {DM_REMOVE_ALL_CMD, remove_all}, | 1473 | {DM_REMOVE_ALL_CMD, remove_all}, |
1468 | {DM_LIST_DEVICES_CMD, list_devices}, | 1474 | {DM_LIST_DEVICES_CMD, list_devices}, |
1469 | 1475 | ||
1470 | {DM_DEV_CREATE_CMD, dev_create}, | 1476 | {DM_DEV_CREATE_CMD, dev_create}, |
1471 | {DM_DEV_REMOVE_CMD, dev_remove}, | 1477 | {DM_DEV_REMOVE_CMD, dev_remove}, |
1472 | {DM_DEV_RENAME_CMD, dev_rename}, | 1478 | {DM_DEV_RENAME_CMD, dev_rename}, |
1473 | {DM_DEV_SUSPEND_CMD, dev_suspend}, | 1479 | {DM_DEV_SUSPEND_CMD, dev_suspend}, |
1474 | {DM_DEV_STATUS_CMD, dev_status}, | 1480 | {DM_DEV_STATUS_CMD, dev_status}, |
1475 | {DM_DEV_WAIT_CMD, dev_wait}, | 1481 | {DM_DEV_WAIT_CMD, dev_wait}, |
1476 | 1482 | ||
1477 | {DM_TABLE_LOAD_CMD, table_load}, | 1483 | {DM_TABLE_LOAD_CMD, table_load}, |
1478 | {DM_TABLE_CLEAR_CMD, table_clear}, | 1484 | {DM_TABLE_CLEAR_CMD, table_clear}, |
1479 | {DM_TABLE_DEPS_CMD, table_deps}, | 1485 | {DM_TABLE_DEPS_CMD, table_deps}, |
1480 | {DM_TABLE_STATUS_CMD, table_status}, | 1486 | {DM_TABLE_STATUS_CMD, table_status}, |
1481 | 1487 | ||
1482 | {DM_LIST_VERSIONS_CMD, list_versions}, | 1488 | {DM_LIST_VERSIONS_CMD, list_versions}, |
1483 | 1489 | ||
1484 | {DM_TARGET_MSG_CMD, target_message}, | 1490 | {DM_TARGET_MSG_CMD, target_message}, |
1485 | {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry} | 1491 | {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry} |
1486 | }; | 1492 | }; |
1487 | 1493 | ||
1488 | return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn; | 1494 | return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn; |
1489 | } | 1495 | } |
1490 | 1496 | ||
1491 | /* | 1497 | /* |
1492 | * As well as checking the version compatibility this always | 1498 | * As well as checking the version compatibility this always |
1493 | * copies the kernel interface version out. | 1499 | * copies the kernel interface version out. |
1494 | */ | 1500 | */ |
1495 | static int check_version(unsigned int cmd, struct dm_ioctl __user *user) | 1501 | static int check_version(unsigned int cmd, struct dm_ioctl __user *user) |
1496 | { | 1502 | { |
1497 | uint32_t version[3]; | 1503 | uint32_t version[3]; |
1498 | int r = 0; | 1504 | int r = 0; |
1499 | 1505 | ||
1500 | if (copy_from_user(version, user->version, sizeof(version))) | 1506 | if (copy_from_user(version, user->version, sizeof(version))) |
1501 | return -EFAULT; | 1507 | return -EFAULT; |
1502 | 1508 | ||
1503 | if ((DM_VERSION_MAJOR != version[0]) || | 1509 | if ((DM_VERSION_MAJOR != version[0]) || |
1504 | (DM_VERSION_MINOR < version[1])) { | 1510 | (DM_VERSION_MINOR < version[1])) { |
1505 | DMWARN("ioctl interface mismatch: " | 1511 | DMWARN("ioctl interface mismatch: " |
1506 | "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", | 1512 | "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", |
1507 | DM_VERSION_MAJOR, DM_VERSION_MINOR, | 1513 | DM_VERSION_MAJOR, DM_VERSION_MINOR, |
1508 | DM_VERSION_PATCHLEVEL, | 1514 | DM_VERSION_PATCHLEVEL, |
1509 | version[0], version[1], version[2], cmd); | 1515 | version[0], version[1], version[2], cmd); |
1510 | r = -EINVAL; | 1516 | r = -EINVAL; |
1511 | } | 1517 | } |
1512 | 1518 | ||
1513 | /* | 1519 | /* |
1514 | * Fill in the kernel version. | 1520 | * Fill in the kernel version. |
1515 | */ | 1521 | */ |
1516 | version[0] = DM_VERSION_MAJOR; | 1522 | version[0] = DM_VERSION_MAJOR; |
1517 | version[1] = DM_VERSION_MINOR; | 1523 | version[1] = DM_VERSION_MINOR; |
1518 | version[2] = DM_VERSION_PATCHLEVEL; | 1524 | version[2] = DM_VERSION_PATCHLEVEL; |
1519 | if (copy_to_user(user->version, version, sizeof(version))) | 1525 | if (copy_to_user(user->version, version, sizeof(version))) |
1520 | return -EFAULT; | 1526 | return -EFAULT; |
1521 | 1527 | ||
1522 | return r; | 1528 | return r; |
1523 | } | 1529 | } |
1524 | 1530 | ||
1525 | static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) | 1531 | static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) |
1526 | { | 1532 | { |
1527 | struct dm_ioctl tmp, *dmi; | 1533 | struct dm_ioctl tmp, *dmi; |
1528 | int secure_data; | 1534 | int secure_data; |
1529 | 1535 | ||
1530 | if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data))) | 1536 | if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data))) |
1531 | return -EFAULT; | 1537 | return -EFAULT; |
1532 | 1538 | ||
1533 | if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data))) | 1539 | if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data))) |
1534 | return -EINVAL; | 1540 | return -EINVAL; |
1535 | 1541 | ||
1536 | secure_data = tmp.flags & DM_SECURE_DATA_FLAG; | 1542 | secure_data = tmp.flags & DM_SECURE_DATA_FLAG; |
1537 | 1543 | ||
1538 | dmi = vmalloc(tmp.data_size); | 1544 | dmi = vmalloc(tmp.data_size); |
1539 | if (!dmi) { | 1545 | if (!dmi) { |
1540 | if (secure_data && clear_user(user, tmp.data_size)) | 1546 | if (secure_data && clear_user(user, tmp.data_size)) |
1541 | return -EFAULT; | 1547 | return -EFAULT; |
1542 | return -ENOMEM; | 1548 | return -ENOMEM; |
1543 | } | 1549 | } |
1544 | 1550 | ||
1545 | if (copy_from_user(dmi, user, tmp.data_size)) | 1551 | if (copy_from_user(dmi, user, tmp.data_size)) |
1546 | goto bad; | 1552 | goto bad; |
1547 | 1553 | ||
1548 | /* Wipe the user buffer so we do not return it to userspace */ | 1554 | /* Wipe the user buffer so we do not return it to userspace */ |
1549 | if (secure_data && clear_user(user, tmp.data_size)) | 1555 | if (secure_data && clear_user(user, tmp.data_size)) |
1550 | goto bad; | 1556 | goto bad; |
1551 | 1557 | ||
1552 | *param = dmi; | 1558 | *param = dmi; |
1553 | return 0; | 1559 | return 0; |
1554 | 1560 | ||
1555 | bad: | 1561 | bad: |
1556 | if (secure_data) | 1562 | if (secure_data) |
1557 | memset(dmi, 0, tmp.data_size); | 1563 | memset(dmi, 0, tmp.data_size); |
1558 | vfree(dmi); | 1564 | vfree(dmi); |
1559 | return -EFAULT; | 1565 | return -EFAULT; |
1560 | } | 1566 | } |
1561 | 1567 | ||
1562 | static int validate_params(uint cmd, struct dm_ioctl *param) | 1568 | static int validate_params(uint cmd, struct dm_ioctl *param) |
1563 | { | 1569 | { |
1564 | /* Always clear this flag */ | 1570 | /* Always clear this flag */ |
1565 | param->flags &= ~DM_BUFFER_FULL_FLAG; | 1571 | param->flags &= ~DM_BUFFER_FULL_FLAG; |
1566 | param->flags &= ~DM_UEVENT_GENERATED_FLAG; | 1572 | param->flags &= ~DM_UEVENT_GENERATED_FLAG; |
1567 | param->flags &= ~DM_SECURE_DATA_FLAG; | 1573 | param->flags &= ~DM_SECURE_DATA_FLAG; |
1568 | 1574 | ||
1569 | /* Ignores parameters */ | 1575 | /* Ignores parameters */ |
1570 | if (cmd == DM_REMOVE_ALL_CMD || | 1576 | if (cmd == DM_REMOVE_ALL_CMD || |
1571 | cmd == DM_LIST_DEVICES_CMD || | 1577 | cmd == DM_LIST_DEVICES_CMD || |
1572 | cmd == DM_LIST_VERSIONS_CMD) | 1578 | cmd == DM_LIST_VERSIONS_CMD) |
1573 | return 0; | 1579 | return 0; |
1574 | 1580 | ||
1575 | if ((cmd == DM_DEV_CREATE_CMD)) { | 1581 | if ((cmd == DM_DEV_CREATE_CMD)) { |
1576 | if (!*param->name) { | 1582 | if (!*param->name) { |
1577 | DMWARN("name not supplied when creating device"); | 1583 | DMWARN("name not supplied when creating device"); |
1578 | return -EINVAL; | 1584 | return -EINVAL; |
1579 | } | 1585 | } |
1580 | } else if ((*param->uuid && *param->name)) { | 1586 | } else if ((*param->uuid && *param->name)) { |
1581 | DMWARN("only supply one of name or uuid, cmd(%u)", cmd); | 1587 | DMWARN("only supply one of name or uuid, cmd(%u)", cmd); |
1582 | return -EINVAL; | 1588 | return -EINVAL; |
1583 | } | 1589 | } |
1584 | 1590 | ||
1585 | /* Ensure strings are terminated */ | 1591 | /* Ensure strings are terminated */ |
1586 | param->name[DM_NAME_LEN - 1] = '\0'; | 1592 | param->name[DM_NAME_LEN - 1] = '\0'; |
1587 | param->uuid[DM_UUID_LEN - 1] = '\0'; | 1593 | param->uuid[DM_UUID_LEN - 1] = '\0'; |
1588 | 1594 | ||
1589 | return 0; | 1595 | return 0; |
1590 | } | 1596 | } |
1591 | 1597 | ||
1592 | static int ctl_ioctl(uint command, struct dm_ioctl __user *user) | 1598 | static int ctl_ioctl(uint command, struct dm_ioctl __user *user) |
1593 | { | 1599 | { |
1594 | int r = 0; | 1600 | int r = 0; |
1595 | int wipe_buffer; | 1601 | int wipe_buffer; |
1596 | unsigned int cmd; | 1602 | unsigned int cmd; |
1597 | struct dm_ioctl *uninitialized_var(param); | 1603 | struct dm_ioctl *uninitialized_var(param); |
1598 | ioctl_fn fn = NULL; | 1604 | ioctl_fn fn = NULL; |
1599 | size_t input_param_size; | 1605 | size_t input_param_size; |
1600 | 1606 | ||
1601 | /* only root can play with this */ | 1607 | /* only root can play with this */ |
1602 | if (!capable(CAP_SYS_ADMIN)) | 1608 | if (!capable(CAP_SYS_ADMIN)) |
1603 | return -EACCES; | 1609 | return -EACCES; |
1604 | 1610 | ||
1605 | if (_IOC_TYPE(command) != DM_IOCTL) | 1611 | if (_IOC_TYPE(command) != DM_IOCTL) |
1606 | return -ENOTTY; | 1612 | return -ENOTTY; |
1607 | 1613 | ||
1608 | cmd = _IOC_NR(command); | 1614 | cmd = _IOC_NR(command); |
1609 | 1615 | ||
1610 | /* | 1616 | /* |
1611 | * Check the interface version passed in. This also | 1617 | * Check the interface version passed in. This also |
1612 | * writes out the kernel's interface version. | 1618 | * writes out the kernel's interface version. |
1613 | */ | 1619 | */ |
1614 | r = check_version(cmd, user); | 1620 | r = check_version(cmd, user); |
1615 | if (r) | 1621 | if (r) |
1616 | return r; | 1622 | return r; |
1617 | 1623 | ||
1618 | /* | 1624 | /* |
1619 | * Nothing more to do for the version command. | 1625 | * Nothing more to do for the version command. |
1620 | */ | 1626 | */ |
1621 | if (cmd == DM_VERSION_CMD) | 1627 | if (cmd == DM_VERSION_CMD) |
1622 | return 0; | 1628 | return 0; |
1623 | 1629 | ||
1624 | fn = lookup_ioctl(cmd); | 1630 | fn = lookup_ioctl(cmd); |
1625 | if (!fn) { | 1631 | if (!fn) { |
1626 | DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); | 1632 | DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); |
1627 | return -ENOTTY; | 1633 | return -ENOTTY; |
1628 | } | 1634 | } |
1629 | 1635 | ||
1630 | /* | 1636 | /* |
1631 | * Trying to avoid low memory issues when a device is | 1637 | * Trying to avoid low memory issues when a device is |
1632 | * suspended. | 1638 | * suspended. |
1633 | */ | 1639 | */ |
1634 | current->flags |= PF_MEMALLOC; | 1640 | current->flags |= PF_MEMALLOC; |
1635 | 1641 | ||
1636 | /* | 1642 | /* |
1637 | * Copy the parameters into kernel space. | 1643 | * Copy the parameters into kernel space. |
1638 | */ | 1644 | */ |
1639 | r = copy_params(user, ¶m); | 1645 | r = copy_params(user, ¶m); |
1640 | 1646 | ||
1641 | current->flags &= ~PF_MEMALLOC; | 1647 | current->flags &= ~PF_MEMALLOC; |
1642 | 1648 | ||
1643 | if (r) | 1649 | if (r) |
1644 | return r; | 1650 | return r; |
1645 | 1651 | ||
1646 | input_param_size = param->data_size; | 1652 | input_param_size = param->data_size; |
1647 | wipe_buffer = param->flags & DM_SECURE_DATA_FLAG; | 1653 | wipe_buffer = param->flags & DM_SECURE_DATA_FLAG; |
1648 | 1654 | ||
1649 | r = validate_params(cmd, param); | 1655 | r = validate_params(cmd, param); |
1650 | if (r) | 1656 | if (r) |
1651 | goto out; | 1657 | goto out; |
1652 | 1658 | ||
1653 | param->data_size = sizeof(*param); | 1659 | param->data_size = sizeof(*param); |
1654 | r = fn(param, input_param_size); | 1660 | r = fn(param, input_param_size); |
1655 | 1661 | ||
1656 | /* | 1662 | /* |
1657 | * Copy the results back to userland. | 1663 | * Copy the results back to userland. |
1658 | */ | 1664 | */ |
1659 | if (!r && copy_to_user(user, param, param->data_size)) | 1665 | if (!r && copy_to_user(user, param, param->data_size)) |
1660 | r = -EFAULT; | 1666 | r = -EFAULT; |
1661 | 1667 | ||
1662 | out: | 1668 | out: |
1663 | if (wipe_buffer) | 1669 | if (wipe_buffer) |
1664 | memset(param, 0, input_param_size); | 1670 | memset(param, 0, input_param_size); |
1665 | 1671 | ||
1666 | vfree(param); | 1672 | vfree(param); |
1667 | return r; | 1673 | return r; |
1668 | } | 1674 | } |
1669 | 1675 | ||
1670 | static long dm_ctl_ioctl(struct file *file, uint command, ulong u) | 1676 | static long dm_ctl_ioctl(struct file *file, uint command, ulong u) |
1671 | { | 1677 | { |
1672 | return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u); | 1678 | return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u); |
1673 | } | 1679 | } |
1674 | 1680 | ||
1675 | #ifdef CONFIG_COMPAT | 1681 | #ifdef CONFIG_COMPAT |
1676 | static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) | 1682 | static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) |
1677 | { | 1683 | { |
1678 | return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u)); | 1684 | return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u)); |
1679 | } | 1685 | } |
1680 | #else | 1686 | #else |
1681 | #define dm_compat_ctl_ioctl NULL | 1687 | #define dm_compat_ctl_ioctl NULL |
1682 | #endif | 1688 | #endif |
1683 | 1689 | ||
1684 | static const struct file_operations _ctl_fops = { | 1690 | static const struct file_operations _ctl_fops = { |
1685 | .open = nonseekable_open, | 1691 | .open = nonseekable_open, |
1686 | .unlocked_ioctl = dm_ctl_ioctl, | 1692 | .unlocked_ioctl = dm_ctl_ioctl, |
1687 | .compat_ioctl = dm_compat_ctl_ioctl, | 1693 | .compat_ioctl = dm_compat_ctl_ioctl, |
1688 | .owner = THIS_MODULE, | 1694 | .owner = THIS_MODULE, |
1689 | .llseek = noop_llseek, | 1695 | .llseek = noop_llseek, |
1690 | }; | 1696 | }; |
1691 | 1697 | ||
1692 | static struct miscdevice _dm_misc = { | 1698 | static struct miscdevice _dm_misc = { |
1693 | .minor = MAPPER_CTRL_MINOR, | 1699 | .minor = MAPPER_CTRL_MINOR, |
1694 | .name = DM_NAME, | 1700 | .name = DM_NAME, |
1695 | .nodename = DM_DIR "/" DM_CONTROL_NODE, | 1701 | .nodename = DM_DIR "/" DM_CONTROL_NODE, |
1696 | .fops = &_ctl_fops | 1702 | .fops = &_ctl_fops |
1697 | }; | 1703 | }; |
1698 | 1704 | ||
1699 | MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); | 1705 | MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); |
1700 | MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); | 1706 | MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); |
1701 | 1707 | ||
1702 | /* | 1708 | /* |
1703 | * Create misc character device and link to DM_DIR/control. | 1709 | * Create misc character device and link to DM_DIR/control. |
1704 | */ | 1710 | */ |
1705 | int __init dm_interface_init(void) | 1711 | int __init dm_interface_init(void) |
1706 | { | 1712 | { |
1707 | int r; | 1713 | int r; |
1708 | 1714 | ||
1709 | r = dm_hash_init(); | 1715 | r = dm_hash_init(); |
1710 | if (r) | 1716 | if (r) |
1711 | return r; | 1717 | return r; |
1712 | 1718 | ||
1713 | r = misc_register(&_dm_misc); | 1719 | r = misc_register(&_dm_misc); |
1714 | if (r) { | 1720 | if (r) { |
1715 | DMERR("misc_register failed for control device"); | 1721 | DMERR("misc_register failed for control device"); |
1716 | dm_hash_exit(); | 1722 | dm_hash_exit(); |
1717 | return r; | 1723 | return r; |
1718 | } | 1724 | } |
1719 | 1725 | ||
1720 | DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, | 1726 | DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, |
1721 | DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, | 1727 | DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, |
1722 | DM_DRIVER_EMAIL); | 1728 | DM_DRIVER_EMAIL); |
1723 | return 0; | 1729 | return 0; |
1724 | } | 1730 | } |
1725 | 1731 | ||
1726 | void dm_interface_exit(void) | 1732 | void dm_interface_exit(void) |
1727 | { | 1733 | { |
1728 | if (misc_deregister(&_dm_misc) < 0) | 1734 | if (misc_deregister(&_dm_misc) < 0) |
1729 | DMERR("misc_deregister failed for control device"); | 1735 | DMERR("misc_deregister failed for control device"); |
1730 | 1736 | ||
1731 | dm_hash_exit(); | 1737 | dm_hash_exit(); |
1732 | } | 1738 | } |
1733 | 1739 | ||
1734 | /** | 1740 | /** |
1735 | * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers | 1741 | * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers |
1736 | * @md: Pointer to mapped_device | 1742 | * @md: Pointer to mapped_device |
1737 | * @name: Buffer (size DM_NAME_LEN) for name | 1743 | * @name: Buffer (size DM_NAME_LEN) for name |
1738 | * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined | 1744 | * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined |
1739 | */ | 1745 | */ |
1740 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) | 1746 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) |
1741 | { | 1747 | { |
1742 | int r = 0; | 1748 | int r = 0; |
1743 | struct hash_cell *hc; | 1749 | struct hash_cell *hc; |
1744 | 1750 | ||
1745 | if (!md) | 1751 | if (!md) |
1746 | return -ENXIO; | 1752 | return -ENXIO; |
1747 | 1753 | ||
1748 | mutex_lock(&dm_hash_cells_mutex); | 1754 | mutex_lock(&dm_hash_cells_mutex); |
1749 | hc = dm_get_mdptr(md); | 1755 | hc = dm_get_mdptr(md); |
1750 | if (!hc || hc->md != md) { | 1756 | if (!hc || hc->md != md) { |
1751 | r = -ENXIO; | 1757 | r = -ENXIO; |
1752 | goto out; | 1758 | goto out; |
1753 | } | 1759 | } |
1754 | 1760 | ||
1755 | if (name) | 1761 | if (name) |
1756 | strcpy(name, hc->name); | 1762 | strcpy(name, hc->name); |
1757 | if (uuid) | 1763 | if (uuid) |
1758 | strcpy(uuid, hc->uuid ? : ""); | 1764 | strcpy(uuid, hc->uuid ? : ""); |
1759 | 1765 | ||
1760 | out: | 1766 | out: |
1761 | mutex_unlock(&dm_hash_cells_mutex); | 1767 | mutex_unlock(&dm_hash_cells_mutex); |
1762 | 1768 | ||
1763 | return r; | 1769 | return r; |
1764 | } | 1770 | } |
1765 | 1771 |