Commit 22a8578fca5a47e643bb4f70c232d0ec84db9e4e

Authored by Ezequiel Garcia
Committed by Artem Bityutskiy
1 parent 9329c5eb5b

mtd: mtd_blkdevs: Replace request handler kthread with a workqueue

By replacing a kthread with a workqueue, the code is now a bit clearer.
There's also a slight reduction of code size (numbers apply for x86):
Before:
   text	   data	    bss	    dec	    hex	filename
   3248	     36	      0	   3284	    cd4	drivers/mtd/mtd_blkdevs.o

After:
   text	   data	    bss	    dec	    hex	filename
   3150	     36	      0	   3186	    c72	drivers/mtd/mtd_blkdevs.o

Due to lack of real hardware, tests have been performed on an emulated
environment with mtdswap and mtdblock over nandsim devices.
Some real testing should be done, before merging this patch.

Signed-off-by: Ezequiel Garcia <elezegarcia@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>

Showing 2 changed files with 18 additions and 33 deletions Inline Diff

drivers/mtd/mtd_blkdevs.c
1 /* 1 /*
2 * Interface to Linux block layer for MTD 'translation layers'. 2 * Interface to Linux block layer for MTD 'translation layers'.
3 * 3 *
4 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> 4 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 * 19 *
20 */ 20 */
21 21
22 #include <linux/kernel.h> 22 #include <linux/kernel.h>
23 #include <linux/slab.h> 23 #include <linux/slab.h>
24 #include <linux/module.h> 24 #include <linux/module.h>
25 #include <linux/list.h> 25 #include <linux/list.h>
26 #include <linux/fs.h> 26 #include <linux/fs.h>
27 #include <linux/mtd/blktrans.h> 27 #include <linux/mtd/blktrans.h>
28 #include <linux/mtd/mtd.h> 28 #include <linux/mtd/mtd.h>
29 #include <linux/blkdev.h> 29 #include <linux/blkdev.h>
30 #include <linux/blkpg.h> 30 #include <linux/blkpg.h>
31 #include <linux/spinlock.h> 31 #include <linux/spinlock.h>
32 #include <linux/hdreg.h> 32 #include <linux/hdreg.h>
33 #include <linux/init.h> 33 #include <linux/init.h>
34 #include <linux/mutex.h> 34 #include <linux/mutex.h>
35 #include <linux/kthread.h>
36 #include <asm/uaccess.h> 35 #include <asm/uaccess.h>
37 36
38 #include "mtdcore.h" 37 #include "mtdcore.h"
39 38
40 static LIST_HEAD(blktrans_majors); 39 static LIST_HEAD(blktrans_majors);
41 static DEFINE_MUTEX(blktrans_ref_mutex); 40 static DEFINE_MUTEX(blktrans_ref_mutex);
42 41
43 static void blktrans_dev_release(struct kref *kref) 42 static void blktrans_dev_release(struct kref *kref)
44 { 43 {
45 struct mtd_blktrans_dev *dev = 44 struct mtd_blktrans_dev *dev =
46 container_of(kref, struct mtd_blktrans_dev, ref); 45 container_of(kref, struct mtd_blktrans_dev, ref);
47 46
48 dev->disk->private_data = NULL; 47 dev->disk->private_data = NULL;
49 blk_cleanup_queue(dev->rq); 48 blk_cleanup_queue(dev->rq);
50 put_disk(dev->disk); 49 put_disk(dev->disk);
51 list_del(&dev->list); 50 list_del(&dev->list);
52 kfree(dev); 51 kfree(dev);
53 } 52 }
54 53
55 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk) 54 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
56 { 55 {
57 struct mtd_blktrans_dev *dev; 56 struct mtd_blktrans_dev *dev;
58 57
59 mutex_lock(&blktrans_ref_mutex); 58 mutex_lock(&blktrans_ref_mutex);
60 dev = disk->private_data; 59 dev = disk->private_data;
61 60
62 if (!dev) 61 if (!dev)
63 goto unlock; 62 goto unlock;
64 kref_get(&dev->ref); 63 kref_get(&dev->ref);
65 unlock: 64 unlock:
66 mutex_unlock(&blktrans_ref_mutex); 65 mutex_unlock(&blktrans_ref_mutex);
67 return dev; 66 return dev;
68 } 67 }
69 68
70 static void blktrans_dev_put(struct mtd_blktrans_dev *dev) 69 static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
71 { 70 {
72 mutex_lock(&blktrans_ref_mutex); 71 mutex_lock(&blktrans_ref_mutex);
73 kref_put(&dev->ref, blktrans_dev_release); 72 kref_put(&dev->ref, blktrans_dev_release);
74 mutex_unlock(&blktrans_ref_mutex); 73 mutex_unlock(&blktrans_ref_mutex);
75 } 74 }
76 75
77 76
78 static int do_blktrans_request(struct mtd_blktrans_ops *tr, 77 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
79 struct mtd_blktrans_dev *dev, 78 struct mtd_blktrans_dev *dev,
80 struct request *req) 79 struct request *req)
81 { 80 {
82 unsigned long block, nsect; 81 unsigned long block, nsect;
83 char *buf; 82 char *buf;
84 83
85 block = blk_rq_pos(req) << 9 >> tr->blkshift; 84 block = blk_rq_pos(req) << 9 >> tr->blkshift;
86 nsect = blk_rq_cur_bytes(req) >> tr->blkshift; 85 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
87 86
88 buf = req->buffer; 87 buf = req->buffer;
89 88
90 if (req->cmd_type != REQ_TYPE_FS) 89 if (req->cmd_type != REQ_TYPE_FS)
91 return -EIO; 90 return -EIO;
92 91
93 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 92 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
94 get_capacity(req->rq_disk)) 93 get_capacity(req->rq_disk))
95 return -EIO; 94 return -EIO;
96 95
97 if (req->cmd_flags & REQ_DISCARD) 96 if (req->cmd_flags & REQ_DISCARD)
98 return tr->discard(dev, block, nsect); 97 return tr->discard(dev, block, nsect);
99 98
100 switch(rq_data_dir(req)) { 99 switch(rq_data_dir(req)) {
101 case READ: 100 case READ:
102 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 101 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
103 if (tr->readsect(dev, block, buf)) 102 if (tr->readsect(dev, block, buf))
104 return -EIO; 103 return -EIO;
105 rq_flush_dcache_pages(req); 104 rq_flush_dcache_pages(req);
106 return 0; 105 return 0;
107 case WRITE: 106 case WRITE:
108 if (!tr->writesect) 107 if (!tr->writesect)
109 return -EIO; 108 return -EIO;
110 109
111 rq_flush_dcache_pages(req); 110 rq_flush_dcache_pages(req);
112 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 111 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
113 if (tr->writesect(dev, block, buf)) 112 if (tr->writesect(dev, block, buf))
114 return -EIO; 113 return -EIO;
115 return 0; 114 return 0;
116 default: 115 default:
117 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 116 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
118 return -EIO; 117 return -EIO;
119 } 118 }
120 } 119 }
121 120
122 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) 121 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
123 { 122 {
124 if (kthread_should_stop())
125 return 1;
126
127 return dev->bg_stop; 123 return dev->bg_stop;
128 } 124 }
129 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); 125 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
130 126
131 static int mtd_blktrans_thread(void *arg) 127 static void mtd_blktrans_work(struct work_struct *work)
132 { 128 {
133 struct mtd_blktrans_dev *dev = arg; 129 struct mtd_blktrans_dev *dev =
130 container_of(work, struct mtd_blktrans_dev, work);
134 struct mtd_blktrans_ops *tr = dev->tr; 131 struct mtd_blktrans_ops *tr = dev->tr;
135 struct request_queue *rq = dev->rq; 132 struct request_queue *rq = dev->rq;
136 struct request *req = NULL; 133 struct request *req = NULL;
137 int background_done = 0; 134 int background_done = 0;
138 135
139 spin_lock_irq(rq->queue_lock); 136 spin_lock_irq(rq->queue_lock);
140 137
141 while (!kthread_should_stop()) { 138 while (1) {
142 int res; 139 int res;
143 140
144 dev->bg_stop = false; 141 dev->bg_stop = false;
145 if (!req && !(req = blk_fetch_request(rq))) { 142 if (!req && !(req = blk_fetch_request(rq))) {
146 if (tr->background && !background_done) { 143 if (tr->background && !background_done) {
147 spin_unlock_irq(rq->queue_lock); 144 spin_unlock_irq(rq->queue_lock);
148 mutex_lock(&dev->lock); 145 mutex_lock(&dev->lock);
149 tr->background(dev); 146 tr->background(dev);
150 mutex_unlock(&dev->lock); 147 mutex_unlock(&dev->lock);
151 spin_lock_irq(rq->queue_lock); 148 spin_lock_irq(rq->queue_lock);
152 /* 149 /*
153 * Do background processing just once per idle 150 * Do background processing just once per idle
154 * period. 151 * period.
155 */ 152 */
156 background_done = !dev->bg_stop; 153 background_done = !dev->bg_stop;
157 continue; 154 continue;
158 } 155 }
159 set_current_state(TASK_INTERRUPTIBLE); 156 break;
160
161 if (kthread_should_stop())
162 set_current_state(TASK_RUNNING);
163
164 spin_unlock_irq(rq->queue_lock);
165 schedule();
166 spin_lock_irq(rq->queue_lock);
167 continue;
168 } 157 }
169 158
170 spin_unlock_irq(rq->queue_lock); 159 spin_unlock_irq(rq->queue_lock);
171 160
172 mutex_lock(&dev->lock); 161 mutex_lock(&dev->lock);
173 res = do_blktrans_request(dev->tr, dev, req); 162 res = do_blktrans_request(dev->tr, dev, req);
174 mutex_unlock(&dev->lock); 163 mutex_unlock(&dev->lock);
175 164
176 spin_lock_irq(rq->queue_lock); 165 spin_lock_irq(rq->queue_lock);
177 166
178 if (!__blk_end_request_cur(req, res)) 167 if (!__blk_end_request_cur(req, res))
179 req = NULL; 168 req = NULL;
180 169
181 background_done = 0; 170 background_done = 0;
182 } 171 }
183 172
184 if (req) 173 if (req)
185 __blk_end_request_all(req, -EIO); 174 __blk_end_request_all(req, -EIO);
186 175
187 spin_unlock_irq(rq->queue_lock); 176 spin_unlock_irq(rq->queue_lock);
188
189 return 0;
190 } 177 }
191 178
192 static void mtd_blktrans_request(struct request_queue *rq) 179 static void mtd_blktrans_request(struct request_queue *rq)
193 { 180 {
194 struct mtd_blktrans_dev *dev; 181 struct mtd_blktrans_dev *dev;
195 struct request *req = NULL; 182 struct request *req = NULL;
196 183
197 dev = rq->queuedata; 184 dev = rq->queuedata;
198 185
199 if (!dev) 186 if (!dev)
200 while ((req = blk_fetch_request(rq)) != NULL) 187 while ((req = blk_fetch_request(rq)) != NULL)
201 __blk_end_request_all(req, -ENODEV); 188 __blk_end_request_all(req, -ENODEV);
202 else { 189 else
203 dev->bg_stop = true; 190 queue_work(dev->wq, &dev->work);
204 wake_up_process(dev->thread);
205 }
206 } 191 }
207 192
208 static int blktrans_open(struct block_device *bdev, fmode_t mode) 193 static int blktrans_open(struct block_device *bdev, fmode_t mode)
209 { 194 {
210 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 195 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
211 int ret = 0; 196 int ret = 0;
212 197
213 if (!dev) 198 if (!dev)
214 return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/ 199 return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
215 200
216 mutex_lock(&dev->lock); 201 mutex_lock(&dev->lock);
217 202
218 if (dev->open) 203 if (dev->open)
219 goto unlock; 204 goto unlock;
220 205
221 kref_get(&dev->ref); 206 kref_get(&dev->ref);
222 __module_get(dev->tr->owner); 207 __module_get(dev->tr->owner);
223 208
224 if (!dev->mtd) 209 if (!dev->mtd)
225 goto unlock; 210 goto unlock;
226 211
227 if (dev->tr->open) { 212 if (dev->tr->open) {
228 ret = dev->tr->open(dev); 213 ret = dev->tr->open(dev);
229 if (ret) 214 if (ret)
230 goto error_put; 215 goto error_put;
231 } 216 }
232 217
233 ret = __get_mtd_device(dev->mtd); 218 ret = __get_mtd_device(dev->mtd);
234 if (ret) 219 if (ret)
235 goto error_release; 220 goto error_release;
236 dev->file_mode = mode; 221 dev->file_mode = mode;
237 222
238 unlock: 223 unlock:
239 dev->open++; 224 dev->open++;
240 mutex_unlock(&dev->lock); 225 mutex_unlock(&dev->lock);
241 blktrans_dev_put(dev); 226 blktrans_dev_put(dev);
242 return ret; 227 return ret;
243 228
244 error_release: 229 error_release:
245 if (dev->tr->release) 230 if (dev->tr->release)
246 dev->tr->release(dev); 231 dev->tr->release(dev);
247 error_put: 232 error_put:
248 module_put(dev->tr->owner); 233 module_put(dev->tr->owner);
249 kref_put(&dev->ref, blktrans_dev_release); 234 kref_put(&dev->ref, blktrans_dev_release);
250 mutex_unlock(&dev->lock); 235 mutex_unlock(&dev->lock);
251 blktrans_dev_put(dev); 236 blktrans_dev_put(dev);
252 return ret; 237 return ret;
253 } 238 }
254 239
255 static int blktrans_release(struct gendisk *disk, fmode_t mode) 240 static int blktrans_release(struct gendisk *disk, fmode_t mode)
256 { 241 {
257 struct mtd_blktrans_dev *dev = blktrans_dev_get(disk); 242 struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
258 int ret = 0; 243 int ret = 0;
259 244
260 if (!dev) 245 if (!dev)
261 return ret; 246 return ret;
262 247
263 mutex_lock(&dev->lock); 248 mutex_lock(&dev->lock);
264 249
265 if (--dev->open) 250 if (--dev->open)
266 goto unlock; 251 goto unlock;
267 252
268 kref_put(&dev->ref, blktrans_dev_release); 253 kref_put(&dev->ref, blktrans_dev_release);
269 module_put(dev->tr->owner); 254 module_put(dev->tr->owner);
270 255
271 if (dev->mtd) { 256 if (dev->mtd) {
272 ret = dev->tr->release ? dev->tr->release(dev) : 0; 257 ret = dev->tr->release ? dev->tr->release(dev) : 0;
273 __put_mtd_device(dev->mtd); 258 __put_mtd_device(dev->mtd);
274 } 259 }
275 unlock: 260 unlock:
276 mutex_unlock(&dev->lock); 261 mutex_unlock(&dev->lock);
277 blktrans_dev_put(dev); 262 blktrans_dev_put(dev);
278 return ret; 263 return ret;
279 } 264 }
280 265
281 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) 266 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
282 { 267 {
283 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 268 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
284 int ret = -ENXIO; 269 int ret = -ENXIO;
285 270
286 if (!dev) 271 if (!dev)
287 return ret; 272 return ret;
288 273
289 mutex_lock(&dev->lock); 274 mutex_lock(&dev->lock);
290 275
291 if (!dev->mtd) 276 if (!dev->mtd)
292 goto unlock; 277 goto unlock;
293 278
294 ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0; 279 ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
295 unlock: 280 unlock:
296 mutex_unlock(&dev->lock); 281 mutex_unlock(&dev->lock);
297 blktrans_dev_put(dev); 282 blktrans_dev_put(dev);
298 return ret; 283 return ret;
299 } 284 }
300 285
301 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, 286 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
302 unsigned int cmd, unsigned long arg) 287 unsigned int cmd, unsigned long arg)
303 { 288 {
304 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 289 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
305 int ret = -ENXIO; 290 int ret = -ENXIO;
306 291
307 if (!dev) 292 if (!dev)
308 return ret; 293 return ret;
309 294
310 mutex_lock(&dev->lock); 295 mutex_lock(&dev->lock);
311 296
312 if (!dev->mtd) 297 if (!dev->mtd)
313 goto unlock; 298 goto unlock;
314 299
315 switch (cmd) { 300 switch (cmd) {
316 case BLKFLSBUF: 301 case BLKFLSBUF:
317 ret = dev->tr->flush ? dev->tr->flush(dev) : 0; 302 ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
318 break; 303 break;
319 default: 304 default:
320 ret = -ENOTTY; 305 ret = -ENOTTY;
321 } 306 }
322 unlock: 307 unlock:
323 mutex_unlock(&dev->lock); 308 mutex_unlock(&dev->lock);
324 blktrans_dev_put(dev); 309 blktrans_dev_put(dev);
325 return ret; 310 return ret;
326 } 311 }
327 312
328 static const struct block_device_operations mtd_block_ops = { 313 static const struct block_device_operations mtd_block_ops = {
329 .owner = THIS_MODULE, 314 .owner = THIS_MODULE,
330 .open = blktrans_open, 315 .open = blktrans_open,
331 .release = blktrans_release, 316 .release = blktrans_release,
332 .ioctl = blktrans_ioctl, 317 .ioctl = blktrans_ioctl,
333 .getgeo = blktrans_getgeo, 318 .getgeo = blktrans_getgeo,
334 }; 319 };
335 320
336 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) 321 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
337 { 322 {
338 struct mtd_blktrans_ops *tr = new->tr; 323 struct mtd_blktrans_ops *tr = new->tr;
339 struct mtd_blktrans_dev *d; 324 struct mtd_blktrans_dev *d;
340 int last_devnum = -1; 325 int last_devnum = -1;
341 struct gendisk *gd; 326 struct gendisk *gd;
342 int ret; 327 int ret;
343 328
344 if (mutex_trylock(&mtd_table_mutex)) { 329 if (mutex_trylock(&mtd_table_mutex)) {
345 mutex_unlock(&mtd_table_mutex); 330 mutex_unlock(&mtd_table_mutex);
346 BUG(); 331 BUG();
347 } 332 }
348 333
349 mutex_lock(&blktrans_ref_mutex); 334 mutex_lock(&blktrans_ref_mutex);
350 list_for_each_entry(d, &tr->devs, list) { 335 list_for_each_entry(d, &tr->devs, list) {
351 if (new->devnum == -1) { 336 if (new->devnum == -1) {
352 /* Use first free number */ 337 /* Use first free number */
353 if (d->devnum != last_devnum+1) { 338 if (d->devnum != last_devnum+1) {
354 /* Found a free devnum. Plug it in here */ 339 /* Found a free devnum. Plug it in here */
355 new->devnum = last_devnum+1; 340 new->devnum = last_devnum+1;
356 list_add_tail(&new->list, &d->list); 341 list_add_tail(&new->list, &d->list);
357 goto added; 342 goto added;
358 } 343 }
359 } else if (d->devnum == new->devnum) { 344 } else if (d->devnum == new->devnum) {
360 /* Required number taken */ 345 /* Required number taken */
361 mutex_unlock(&blktrans_ref_mutex); 346 mutex_unlock(&blktrans_ref_mutex);
362 return -EBUSY; 347 return -EBUSY;
363 } else if (d->devnum > new->devnum) { 348 } else if (d->devnum > new->devnum) {
364 /* Required number was free */ 349 /* Required number was free */
365 list_add_tail(&new->list, &d->list); 350 list_add_tail(&new->list, &d->list);
366 goto added; 351 goto added;
367 } 352 }
368 last_devnum = d->devnum; 353 last_devnum = d->devnum;
369 } 354 }
370 355
371 ret = -EBUSY; 356 ret = -EBUSY;
372 if (new->devnum == -1) 357 if (new->devnum == -1)
373 new->devnum = last_devnum+1; 358 new->devnum = last_devnum+1;
374 359
375 /* Check that the device and any partitions will get valid 360 /* Check that the device and any partitions will get valid
376 * minor numbers and that the disk naming code below can cope 361 * minor numbers and that the disk naming code below can cope
377 * with this number. */ 362 * with this number. */
378 if (new->devnum > (MINORMASK >> tr->part_bits) || 363 if (new->devnum > (MINORMASK >> tr->part_bits) ||
379 (tr->part_bits && new->devnum >= 27 * 26)) { 364 (tr->part_bits && new->devnum >= 27 * 26)) {
380 mutex_unlock(&blktrans_ref_mutex); 365 mutex_unlock(&blktrans_ref_mutex);
381 goto error1; 366 goto error1;
382 } 367 }
383 368
384 list_add_tail(&new->list, &tr->devs); 369 list_add_tail(&new->list, &tr->devs);
385 added: 370 added:
386 mutex_unlock(&blktrans_ref_mutex); 371 mutex_unlock(&blktrans_ref_mutex);
387 372
388 mutex_init(&new->lock); 373 mutex_init(&new->lock);
389 kref_init(&new->ref); 374 kref_init(&new->ref);
390 if (!tr->writesect) 375 if (!tr->writesect)
391 new->readonly = 1; 376 new->readonly = 1;
392 377
393 /* Create gendisk */ 378 /* Create gendisk */
394 ret = -ENOMEM; 379 ret = -ENOMEM;
395 gd = alloc_disk(1 << tr->part_bits); 380 gd = alloc_disk(1 << tr->part_bits);
396 381
397 if (!gd) 382 if (!gd)
398 goto error2; 383 goto error2;
399 384
400 new->disk = gd; 385 new->disk = gd;
401 gd->private_data = new; 386 gd->private_data = new;
402 gd->major = tr->major; 387 gd->major = tr->major;
403 gd->first_minor = (new->devnum) << tr->part_bits; 388 gd->first_minor = (new->devnum) << tr->part_bits;
404 gd->fops = &mtd_block_ops; 389 gd->fops = &mtd_block_ops;
405 390
406 if (tr->part_bits) 391 if (tr->part_bits)
407 if (new->devnum < 26) 392 if (new->devnum < 26)
408 snprintf(gd->disk_name, sizeof(gd->disk_name), 393 snprintf(gd->disk_name, sizeof(gd->disk_name),
409 "%s%c", tr->name, 'a' + new->devnum); 394 "%s%c", tr->name, 'a' + new->devnum);
410 else 395 else
411 snprintf(gd->disk_name, sizeof(gd->disk_name), 396 snprintf(gd->disk_name, sizeof(gd->disk_name),
412 "%s%c%c", tr->name, 397 "%s%c%c", tr->name,
413 'a' - 1 + new->devnum / 26, 398 'a' - 1 + new->devnum / 26,
414 'a' + new->devnum % 26); 399 'a' + new->devnum % 26);
415 else 400 else
416 snprintf(gd->disk_name, sizeof(gd->disk_name), 401 snprintf(gd->disk_name, sizeof(gd->disk_name),
417 "%s%d", tr->name, new->devnum); 402 "%s%d", tr->name, new->devnum);
418 403
419 set_capacity(gd, (new->size * tr->blksize) >> 9); 404 set_capacity(gd, (new->size * tr->blksize) >> 9);
420 405
421 /* Create the request queue */ 406 /* Create the request queue */
422 spin_lock_init(&new->queue_lock); 407 spin_lock_init(&new->queue_lock);
423 new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock); 408 new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
424 409
425 if (!new->rq) 410 if (!new->rq)
426 goto error3; 411 goto error3;
427 412
428 new->rq->queuedata = new; 413 new->rq->queuedata = new;
429 blk_queue_logical_block_size(new->rq, tr->blksize); 414 blk_queue_logical_block_size(new->rq, tr->blksize);
430 415
431 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); 416 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
432 417
433 if (tr->discard) { 418 if (tr->discard) {
434 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); 419 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
435 new->rq->limits.max_discard_sectors = UINT_MAX; 420 new->rq->limits.max_discard_sectors = UINT_MAX;
436 } 421 }
437 422
438 gd->queue = new->rq; 423 gd->queue = new->rq;
439 424
440 /* Create processing thread */ 425 /* Create processing workqueue */
441 /* TODO: workqueue ? */ 426 new->wq = alloc_workqueue("%s%d", 0, 0,
442 new->thread = kthread_run(mtd_blktrans_thread, new, 427 tr->name, new->mtd->index);
443 "%s%d", tr->name, new->mtd->index); 428 if (!new->wq)
444 if (IS_ERR(new->thread)) {
445 ret = PTR_ERR(new->thread);
446 goto error4; 429 goto error4;
447 } 430 INIT_WORK(&new->work, mtd_blktrans_work);
431
448 gd->driverfs_dev = &new->mtd->dev; 432 gd->driverfs_dev = &new->mtd->dev;
449 433
450 if (new->readonly) 434 if (new->readonly)
451 set_disk_ro(gd, 1); 435 set_disk_ro(gd, 1);
452 436
453 add_disk(gd); 437 add_disk(gd);
454 438
455 if (new->disk_attributes) { 439 if (new->disk_attributes) {
456 ret = sysfs_create_group(&disk_to_dev(gd)->kobj, 440 ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
457 new->disk_attributes); 441 new->disk_attributes);
458 WARN_ON(ret); 442 WARN_ON(ret);
459 } 443 }
460 return 0; 444 return 0;
461 error4: 445 error4:
462 blk_cleanup_queue(new->rq); 446 blk_cleanup_queue(new->rq);
463 error3: 447 error3:
464 put_disk(new->disk); 448 put_disk(new->disk);
465 error2: 449 error2:
466 list_del(&new->list); 450 list_del(&new->list);
467 error1: 451 error1:
468 return ret; 452 return ret;
469 } 453 }
470 454
471 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 455 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
472 { 456 {
473 unsigned long flags; 457 unsigned long flags;
474 458
475 if (mutex_trylock(&mtd_table_mutex)) { 459 if (mutex_trylock(&mtd_table_mutex)) {
476 mutex_unlock(&mtd_table_mutex); 460 mutex_unlock(&mtd_table_mutex);
477 BUG(); 461 BUG();
478 } 462 }
479 463
480 if (old->disk_attributes) 464 if (old->disk_attributes)
481 sysfs_remove_group(&disk_to_dev(old->disk)->kobj, 465 sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
482 old->disk_attributes); 466 old->disk_attributes);
483 467
484 /* Stop new requests to arrive */ 468 /* Stop new requests to arrive */
485 del_gendisk(old->disk); 469 del_gendisk(old->disk);
486 470
487 471 /* Stop workqueue. This will perform any pending request. */
488 /* Stop the thread */ 472 destroy_workqueue(old->wq);
489 kthread_stop(old->thread);
490 473
491 /* Kill current requests */ 474 /* Kill current requests */
492 spin_lock_irqsave(&old->queue_lock, flags); 475 spin_lock_irqsave(&old->queue_lock, flags);
493 old->rq->queuedata = NULL; 476 old->rq->queuedata = NULL;
494 blk_start_queue(old->rq); 477 blk_start_queue(old->rq);
495 spin_unlock_irqrestore(&old->queue_lock, flags); 478 spin_unlock_irqrestore(&old->queue_lock, flags);
496 479
497 /* If the device is currently open, tell trans driver to close it, 480 /* If the device is currently open, tell trans driver to close it,
498 then put mtd device, and don't touch it again */ 481 then put mtd device, and don't touch it again */
499 mutex_lock(&old->lock); 482 mutex_lock(&old->lock);
500 if (old->open) { 483 if (old->open) {
501 if (old->tr->release) 484 if (old->tr->release)
502 old->tr->release(old); 485 old->tr->release(old);
503 __put_mtd_device(old->mtd); 486 __put_mtd_device(old->mtd);
504 } 487 }
505 488
506 old->mtd = NULL; 489 old->mtd = NULL;
507 490
508 mutex_unlock(&old->lock); 491 mutex_unlock(&old->lock);
509 blktrans_dev_put(old); 492 blktrans_dev_put(old);
510 return 0; 493 return 0;
511 } 494 }
512 495
513 static void blktrans_notify_remove(struct mtd_info *mtd) 496 static void blktrans_notify_remove(struct mtd_info *mtd)
514 { 497 {
515 struct mtd_blktrans_ops *tr; 498 struct mtd_blktrans_ops *tr;
516 struct mtd_blktrans_dev *dev, *next; 499 struct mtd_blktrans_dev *dev, *next;
517 500
518 list_for_each_entry(tr, &blktrans_majors, list) 501 list_for_each_entry(tr, &blktrans_majors, list)
519 list_for_each_entry_safe(dev, next, &tr->devs, list) 502 list_for_each_entry_safe(dev, next, &tr->devs, list)
520 if (dev->mtd == mtd) 503 if (dev->mtd == mtd)
521 tr->remove_dev(dev); 504 tr->remove_dev(dev);
522 } 505 }
523 506
524 static void blktrans_notify_add(struct mtd_info *mtd) 507 static void blktrans_notify_add(struct mtd_info *mtd)
525 { 508 {
526 struct mtd_blktrans_ops *tr; 509 struct mtd_blktrans_ops *tr;
527 510
528 if (mtd->type == MTD_ABSENT) 511 if (mtd->type == MTD_ABSENT)
529 return; 512 return;
530 513
531 list_for_each_entry(tr, &blktrans_majors, list) 514 list_for_each_entry(tr, &blktrans_majors, list)
532 tr->add_mtd(tr, mtd); 515 tr->add_mtd(tr, mtd);
533 } 516 }
534 517
535 static struct mtd_notifier blktrans_notifier = { 518 static struct mtd_notifier blktrans_notifier = {
536 .add = blktrans_notify_add, 519 .add = blktrans_notify_add,
537 .remove = blktrans_notify_remove, 520 .remove = blktrans_notify_remove,
538 }; 521 };
539 522
540 int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 523 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
541 { 524 {
542 struct mtd_info *mtd; 525 struct mtd_info *mtd;
543 int ret; 526 int ret;
544 527
545 /* Register the notifier if/when the first device type is 528 /* Register the notifier if/when the first device type is
546 registered, to prevent the link/init ordering from fucking 529 registered, to prevent the link/init ordering from fucking
547 us over. */ 530 us over. */
548 if (!blktrans_notifier.list.next) 531 if (!blktrans_notifier.list.next)
549 register_mtd_user(&blktrans_notifier); 532 register_mtd_user(&blktrans_notifier);
550 533
551 534
552 mutex_lock(&mtd_table_mutex); 535 mutex_lock(&mtd_table_mutex);
553 536
554 ret = register_blkdev(tr->major, tr->name); 537 ret = register_blkdev(tr->major, tr->name);
555 if (ret < 0) { 538 if (ret < 0) {
556 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 539 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
557 tr->name, tr->major, ret); 540 tr->name, tr->major, ret);
558 mutex_unlock(&mtd_table_mutex); 541 mutex_unlock(&mtd_table_mutex);
559 return ret; 542 return ret;
560 } 543 }
561 544
562 if (ret) 545 if (ret)
563 tr->major = ret; 546 tr->major = ret;
564 547
565 tr->blkshift = ffs(tr->blksize) - 1; 548 tr->blkshift = ffs(tr->blksize) - 1;
566 549
567 INIT_LIST_HEAD(&tr->devs); 550 INIT_LIST_HEAD(&tr->devs);
568 list_add(&tr->list, &blktrans_majors); 551 list_add(&tr->list, &blktrans_majors);
569 552
570 mtd_for_each_device(mtd) 553 mtd_for_each_device(mtd)
571 if (mtd->type != MTD_ABSENT) 554 if (mtd->type != MTD_ABSENT)
572 tr->add_mtd(tr, mtd); 555 tr->add_mtd(tr, mtd);
573 556
574 mutex_unlock(&mtd_table_mutex); 557 mutex_unlock(&mtd_table_mutex);
575 return 0; 558 return 0;
576 } 559 }
577 560
578 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) 561 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
579 { 562 {
580 struct mtd_blktrans_dev *dev, *next; 563 struct mtd_blktrans_dev *dev, *next;
581 564
582 mutex_lock(&mtd_table_mutex); 565 mutex_lock(&mtd_table_mutex);
583 566
584 /* Remove it from the list of active majors */ 567 /* Remove it from the list of active majors */
585 list_del(&tr->list); 568 list_del(&tr->list);
586 569
587 list_for_each_entry_safe(dev, next, &tr->devs, list) 570 list_for_each_entry_safe(dev, next, &tr->devs, list)
588 tr->remove_dev(dev); 571 tr->remove_dev(dev);
589 572
590 unregister_blkdev(tr->major, tr->name); 573 unregister_blkdev(tr->major, tr->name);
591 mutex_unlock(&mtd_table_mutex); 574 mutex_unlock(&mtd_table_mutex);
592 575
593 BUG_ON(!list_empty(&tr->devs)); 576 BUG_ON(!list_empty(&tr->devs));
594 return 0; 577 return 0;
595 } 578 }
596 579
597 static void __exit mtd_blktrans_exit(void) 580 static void __exit mtd_blktrans_exit(void)
598 { 581 {
599 /* No race here -- if someone's currently in register_mtd_blktrans 582 /* No race here -- if someone's currently in register_mtd_blktrans
600 we're screwed anyway. */ 583 we're screwed anyway. */
601 if (blktrans_notifier.list.next) 584 if (blktrans_notifier.list.next)
602 unregister_mtd_user(&blktrans_notifier); 585 unregister_mtd_user(&blktrans_notifier);
603 } 586 }
604 587
605 module_exit(mtd_blktrans_exit); 588 module_exit(mtd_blktrans_exit);
606 589
607 EXPORT_SYMBOL_GPL(register_mtd_blktrans); 590 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
608 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); 591 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
609 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); 592 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
610 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); 593 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
611 594
612 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 595 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
613 MODULE_LICENSE("GPL"); 596 MODULE_LICENSE("GPL");
include/linux/mtd/blktrans.h
1 /* 1 /*
2 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> 2 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 * 17 *
18 */ 18 */
19 19
20 #ifndef __MTD_TRANS_H__ 20 #ifndef __MTD_TRANS_H__
21 #define __MTD_TRANS_H__ 21 #define __MTD_TRANS_H__
22 22
23 #include <linux/mutex.h> 23 #include <linux/mutex.h>
24 #include <linux/kref.h> 24 #include <linux/kref.h>
25 #include <linux/sysfs.h> 25 #include <linux/sysfs.h>
26 #include <linux/workqueue.h>
26 27
27 struct hd_geometry; 28 struct hd_geometry;
28 struct mtd_info; 29 struct mtd_info;
29 struct mtd_blktrans_ops; 30 struct mtd_blktrans_ops;
30 struct file; 31 struct file;
31 struct inode; 32 struct inode;
32 33
33 struct mtd_blktrans_dev { 34 struct mtd_blktrans_dev {
34 struct mtd_blktrans_ops *tr; 35 struct mtd_blktrans_ops *tr;
35 struct list_head list; 36 struct list_head list;
36 struct mtd_info *mtd; 37 struct mtd_info *mtd;
37 struct mutex lock; 38 struct mutex lock;
38 int devnum; 39 int devnum;
39 bool bg_stop; 40 bool bg_stop;
40 unsigned long size; 41 unsigned long size;
41 int readonly; 42 int readonly;
42 int open; 43 int open;
43 struct kref ref; 44 struct kref ref;
44 struct gendisk *disk; 45 struct gendisk *disk;
45 struct attribute_group *disk_attributes; 46 struct attribute_group *disk_attributes;
46 struct task_struct *thread; 47 struct workqueue_struct *wq;
48 struct work_struct work;
47 struct request_queue *rq; 49 struct request_queue *rq;
48 spinlock_t queue_lock; 50 spinlock_t queue_lock;
49 void *priv; 51 void *priv;
50 fmode_t file_mode; 52 fmode_t file_mode;
51 }; 53 };
52 54
53 struct mtd_blktrans_ops { 55 struct mtd_blktrans_ops {
54 char *name; 56 char *name;
55 int major; 57 int major;
56 int part_bits; 58 int part_bits;
57 int blksize; 59 int blksize;
58 int blkshift; 60 int blkshift;
59 61
60 /* Access functions */ 62 /* Access functions */
61 int (*readsect)(struct mtd_blktrans_dev *dev, 63 int (*readsect)(struct mtd_blktrans_dev *dev,
62 unsigned long block, char *buffer); 64 unsigned long block, char *buffer);
63 int (*writesect)(struct mtd_blktrans_dev *dev, 65 int (*writesect)(struct mtd_blktrans_dev *dev,
64 unsigned long block, char *buffer); 66 unsigned long block, char *buffer);
65 int (*discard)(struct mtd_blktrans_dev *dev, 67 int (*discard)(struct mtd_blktrans_dev *dev,
66 unsigned long block, unsigned nr_blocks); 68 unsigned long block, unsigned nr_blocks);
67 void (*background)(struct mtd_blktrans_dev *dev); 69 void (*background)(struct mtd_blktrans_dev *dev);
68 70
69 /* Block layer ioctls */ 71 /* Block layer ioctls */
70 int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo); 72 int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo);
71 int (*flush)(struct mtd_blktrans_dev *dev); 73 int (*flush)(struct mtd_blktrans_dev *dev);
72 74
73 /* Called with mtd_table_mutex held; no race with add/remove */ 75 /* Called with mtd_table_mutex held; no race with add/remove */
74 int (*open)(struct mtd_blktrans_dev *dev); 76 int (*open)(struct mtd_blktrans_dev *dev);
75 int (*release)(struct mtd_blktrans_dev *dev); 77 int (*release)(struct mtd_blktrans_dev *dev);
76 78
77 /* Called on {de,}registration and on subsequent addition/removal 79 /* Called on {de,}registration and on subsequent addition/removal
78 of devices, with mtd_table_mutex held. */ 80 of devices, with mtd_table_mutex held. */
79 void (*add_mtd)(struct mtd_blktrans_ops *tr, struct mtd_info *mtd); 81 void (*add_mtd)(struct mtd_blktrans_ops *tr, struct mtd_info *mtd);
80 void (*remove_dev)(struct mtd_blktrans_dev *dev); 82 void (*remove_dev)(struct mtd_blktrans_dev *dev);
81 83
82 struct list_head devs; 84 struct list_head devs;
83 struct list_head list; 85 struct list_head list;
84 struct module *owner; 86 struct module *owner;
85 }; 87 };
86 88
87 extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr); 89 extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr);
88 extern int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr); 90 extern int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr);
89 extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); 91 extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
90 extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); 92 extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
91 extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev); 93 extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev);
92 94
93 95
94 #endif /* __MTD_TRANS_H__ */ 96 #endif /* __MTD_TRANS_H__ */
95 97