Blame view
drivers/block/brd.c
13.1 KB
9db5579be rewrite rd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
/* * Ram backed block device driver. * * Copyright (C) 2007 Nick Piggin * Copyright (C) 2007 Novell Inc. * * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright * of their respective owners. */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/highmem.h> |
2a48fc0ab block: autoconver... |
18 |
#include <linux/mutex.h> |
9db5579be rewrite rd |
19 |
#include <linux/radix-tree.h> |
ff01bb483 fs: move code out... |
20 |
#include <linux/fs.h> |
5a0e3ad6a include cleanup: ... |
21 |
#include <linux/slab.h> |
34c0fd540 mm, dax, pmem: in... |
22 23 24 |
#ifdef CONFIG_BLK_DEV_RAM_DAX #include <linux/pfn_t.h> #endif |
9db5579be rewrite rd |
25 |
|
7c0f6ba68 Replace <asm/uacc... |
26 |
#include <linux/uaccess.h> |
9db5579be rewrite rd |
27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
#define SECTOR_SHIFT 9 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) /* * Each block ramdisk device has a radix_tree brd_pages of pages that stores * the pages containing the block device's contents. A brd page's ->index is * its offset in PAGE_SIZE units. This is similar to, but in no way connected * with, the kernel's pagecache or buffer cache (which sit above our block * device). */ struct brd_device { int brd_number; |
9db5579be rewrite rd |
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
struct request_queue *brd_queue; struct gendisk *brd_disk; struct list_head brd_list; /* * Backing store of pages and lock to protect it. This is the contents * of the block device. */ spinlock_t brd_lock; struct radix_tree_root brd_pages; }; /* * Look up and return a brd's page for a given sector. */ |
2a48fc0ab block: autoconver... |
57 |
static DEFINE_MUTEX(brd_mutex); |
9db5579be rewrite rd |
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) { pgoff_t idx; struct page *page; /* * The page lifetime is protected by the fact that we have opened the * device node -- brd pages will never be deleted under us, so we * don't need any further locking or refcounting. * * This is strictly true for the radix-tree nodes as well (ie. we * don't actually need the rcu_read_lock()), however that is not a * documented feature of the radix-tree API so it is better to be * safe here (we don't have total exclusion from radix tree updates * here, only deletes). */ rcu_read_lock(); idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ page = radix_tree_lookup(&brd->brd_pages, idx); rcu_read_unlock(); BUG_ON(page && page->index != idx); return page; } /* * Look up and return a brd's page for a given sector. * If one does not exist, allocate an empty page, and insert that. Then * return it. */ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) { pgoff_t idx; struct page *page; |
75acb9cd2 rd: support XIP |
93 |
gfp_t gfp_flags; |
9db5579be rewrite rd |
94 95 96 97 98 99 100 101 |
page = brd_lookup_page(brd, sector); if (page) return page; /* * Must use NOIO because we don't want to recurse back into the * block or filesystem layers from page reclaim. |
75acb9cd2 rd: support XIP |
102 |
* |
a7a97fc9f brd: rename XIP t... |
103 104 105 |
* Cannot support DAX and highmem, because our ->direct_access * routine for DAX must return memory that is always addressable. * If DAX was reworked to use pfns and kmap throughout, this |
75acb9cd2 rd: support XIP |
106 |
* restriction might be able to be lifted. |
9db5579be rewrite rd |
107 |
*/ |
75acb9cd2 rd: support XIP |
108 |
gfp_flags = GFP_NOIO | __GFP_ZERO; |
a7a97fc9f brd: rename XIP t... |
109 |
#ifndef CONFIG_BLK_DEV_RAM_DAX |
75acb9cd2 rd: support XIP |
110 111 |
gfp_flags |= __GFP_HIGHMEM; #endif |
26defe34e fix brd allocatio... |
112 |
page = alloc_page(gfp_flags); |
9db5579be rewrite rd |
113 114 115 116 117 118 119 120 121 122 |
if (!page) return NULL; if (radix_tree_preload(GFP_NOIO)) { __free_page(page); return NULL; } spin_lock(&brd->brd_lock); idx = sector >> PAGE_SECTORS_SHIFT; |
dfd20b2b1 drivers/block/brd... |
123 |
page->index = idx; |
9db5579be rewrite rd |
124 125 126 127 128 |
if (radix_tree_insert(&brd->brd_pages, idx, page)) { __free_page(page); page = radix_tree_lookup(&brd->brd_pages, idx); BUG_ON(!page); BUG_ON(page->index != idx); |
dfd20b2b1 drivers/block/brd... |
129 |
} |
9db5579be rewrite rd |
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
spin_unlock(&brd->brd_lock); radix_tree_preload_end(); return page; } /* * Free all backing store pages and radix tree. This must only be called when * there are no other users of the device. */ #define FREE_BATCH 16 static void brd_free_pages(struct brd_device *brd) { unsigned long pos = 0; struct page *pages[FREE_BATCH]; int nr_pages; do { int i; nr_pages = radix_tree_gang_lookup(&brd->brd_pages, (void **)pages, pos, FREE_BATCH); for (i = 0; i < nr_pages; i++) { void *ret; BUG_ON(pages[i]->index < pos); pos = pages[i]->index; ret = radix_tree_delete(&brd->brd_pages, pos); BUG_ON(!ret || ret != pages[i]); __free_page(pages[i]); } pos++; /* * This assumes radix_tree_gang_lookup always returns as * many pages as possible. If the radix-tree code changes, * so will this have to. */ } while (nr_pages == FREE_BATCH); } /* * copy_to_brd_setup must be called before copy_to_brd. It may sleep. */ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) { unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; size_t copy; copy = min_t(size_t, n, PAGE_SIZE - offset); if (!brd_insert_page(brd, sector)) |
96f8d8e09 brd: return -ENOS... |
184 |
return -ENOSPC; |
9db5579be rewrite rd |
185 186 187 |
if (copy < n) { sector += copy >> SECTOR_SHIFT; if (!brd_insert_page(brd, sector)) |
96f8d8e09 brd: return -ENOS... |
188 |
return -ENOSPC; |
9db5579be rewrite rd |
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
} return 0; } /* * Copy n bytes from src to the brd starting at sector. Does not sleep. */ static void copy_to_brd(struct brd_device *brd, const void *src, sector_t sector, size_t n) { struct page *page; void *dst; unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; size_t copy; copy = min_t(size_t, n, PAGE_SIZE - offset); page = brd_lookup_page(brd, sector); BUG_ON(!page); |
cfd8005c9 block: remove the... |
207 |
dst = kmap_atomic(page); |
9db5579be rewrite rd |
208 |
memcpy(dst + offset, src, copy); |
cfd8005c9 block: remove the... |
209 |
kunmap_atomic(dst); |
9db5579be rewrite rd |
210 211 212 213 214 215 216 |
if (copy < n) { src += copy; sector += copy >> SECTOR_SHIFT; copy = n - copy; page = brd_lookup_page(brd, sector); BUG_ON(!page); |
cfd8005c9 block: remove the... |
217 |
dst = kmap_atomic(page); |
9db5579be rewrite rd |
218 |
memcpy(dst, src, copy); |
cfd8005c9 block: remove the... |
219 |
kunmap_atomic(dst); |
9db5579be rewrite rd |
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 |
} } /* * Copy n bytes to dst from the brd starting at sector. Does not sleep. */ static void copy_from_brd(void *dst, struct brd_device *brd, sector_t sector, size_t n) { struct page *page; void *src; unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; size_t copy; copy = min_t(size_t, n, PAGE_SIZE - offset); page = brd_lookup_page(brd, sector); if (page) { |
cfd8005c9 block: remove the... |
237 |
src = kmap_atomic(page); |
9db5579be rewrite rd |
238 |
memcpy(dst, src + offset, copy); |
cfd8005c9 block: remove the... |
239 |
kunmap_atomic(src); |
9db5579be rewrite rd |
240 241 242 243 244 245 246 247 248 |
} else memset(dst, 0, copy); if (copy < n) { dst += copy; sector += copy >> SECTOR_SHIFT; copy = n - copy; page = brd_lookup_page(brd, sector); if (page) { |
cfd8005c9 block: remove the... |
249 |
src = kmap_atomic(page); |
9db5579be rewrite rd |
250 |
memcpy(dst, src, copy); |
cfd8005c9 block: remove the... |
251 |
kunmap_atomic(src); |
9db5579be rewrite rd |
252 253 254 255 256 257 258 259 260 |
} else memset(dst, 0, copy); } } /* * Process a single bvec of a bio. */ static int brd_do_bvec(struct brd_device *brd, struct page *page, |
c11f0c0b5 block/mm: make bd... |
261 |
unsigned int len, unsigned int off, bool is_write, |
9db5579be rewrite rd |
262 263 264 265 |
sector_t sector) { void *mem; int err = 0; |
c11f0c0b5 block/mm: make bd... |
266 |
if (is_write) { |
9db5579be rewrite rd |
267 268 269 270 |
err = copy_to_brd_setup(brd, sector, len); if (err) goto out; } |
cfd8005c9 block: remove the... |
271 |
mem = kmap_atomic(page); |
c11f0c0b5 block/mm: make bd... |
272 |
if (!is_write) { |
9db5579be rewrite rd |
273 274 |
copy_from_brd(mem + off, brd, sector, len); flush_dcache_page(page); |
c2572f2b4 brd: fix cacheflu... |
275 276 |
} else { flush_dcache_page(page); |
9db5579be rewrite rd |
277 |
copy_to_brd(brd, mem + off, sector, len); |
c2572f2b4 brd: fix cacheflu... |
278 |
} |
cfd8005c9 block: remove the... |
279 |
kunmap_atomic(mem); |
9db5579be rewrite rd |
280 281 282 283 |
out: return err; } |
dece16353 block: change ->m... |
284 |
static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) |
9db5579be rewrite rd |
285 286 287 |
{ struct block_device *bdev = bio->bi_bdev; struct brd_device *brd = bdev->bd_disk->private_data; |
7988613b0 block: Convert bi... |
288 |
struct bio_vec bvec; |
9db5579be rewrite rd |
289 |
sector_t sector; |
7988613b0 block: Convert bi... |
290 |
struct bvec_iter iter; |
9db5579be rewrite rd |
291 |
|
4f024f379 block: Abstract o... |
292 |
sector = bio->bi_iter.bi_sector; |
f73a1c7d1 block: Add bio_en... |
293 |
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) |
4246a0b63 block: add a bi_e... |
294 |
goto io_error; |
9db5579be rewrite rd |
295 |
|
7988613b0 block: Convert bi... |
296 297 |
bio_for_each_segment(bvec, bio, iter) { unsigned int len = bvec.bv_len; |
4246a0b63 block: add a bi_e... |
298 |
int err; |
c11f0c0b5 block/mm: make bd... |
299 300 |
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, op_is_write(bio_op(bio)), sector); |
9db5579be rewrite rd |
301 |
if (err) |
4246a0b63 block: add a bi_e... |
302 |
goto io_error; |
9db5579be rewrite rd |
303 304 |
sector += len >> SECTOR_SHIFT; } |
4246a0b63 block: add a bi_e... |
305 |
bio_endio(bio); |
dece16353 block: change ->m... |
306 |
return BLK_QC_T_NONE; |
4246a0b63 block: add a bi_e... |
307 308 |
io_error: bio_io_error(bio); |
dece16353 block: change ->m... |
309 |
return BLK_QC_T_NONE; |
9db5579be rewrite rd |
310 |
} |
a72132c31 brd: add support ... |
311 |
static int brd_rw_page(struct block_device *bdev, sector_t sector, |
c11f0c0b5 block/mm: make bd... |
312 |
struct page *page, bool is_write) |
a72132c31 brd: add support ... |
313 314 |
{ struct brd_device *brd = bdev->bd_disk->private_data; |
c11f0c0b5 block/mm: make bd... |
315 316 |
int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector); page_endio(page, is_write, err); |
a72132c31 brd: add support ... |
317 318 |
return err; } |
a7a97fc9f brd: rename XIP t... |
319 |
#ifdef CONFIG_BLK_DEV_RAM_DAX |
dd22f551a block: Change dir... |
320 |
static long brd_direct_access(struct block_device *bdev, sector_t sector, |
7a9eb2066 pmem: kill __pmem... |
321 |
void **kaddr, pfn_t *pfn, long size) |
75acb9cd2 rd: support XIP |
322 323 324 325 326 327 |
{ struct brd_device *brd = bdev->bd_disk->private_data; struct page *page; if (!brd) return -ENODEV; |
75acb9cd2 rd: support XIP |
328 329 |
page = brd_insert_page(brd, sector); if (!page) |
96f8d8e09 brd: return -ENOS... |
330 |
return -ENOSPC; |
7a9eb2066 pmem: kill __pmem... |
331 |
*kaddr = page_address(page); |
34c0fd540 mm, dax, pmem: in... |
332 |
*pfn = page_to_pfn_t(page); |
75acb9cd2 rd: support XIP |
333 |
|
dd22f551a block: Change dir... |
334 |
return PAGE_SIZE; |
75acb9cd2 rd: support XIP |
335 |
} |
a7a97fc9f brd: rename XIP t... |
336 337 |
#else #define brd_direct_access NULL |
75acb9cd2 rd: support XIP |
338 |
#endif |
83d5cde47 const: make block... |
339 |
static const struct block_device_operations brd_fops = { |
75acb9cd2 rd: support XIP |
340 |
.owner = THIS_MODULE, |
a72132c31 brd: add support ... |
341 |
.rw_page = brd_rw_page, |
75acb9cd2 rd: support XIP |
342 |
.direct_access = brd_direct_access, |
9db5579be rewrite rd |
343 344 345 346 347 |
}; /* * And now the modules code and kernel interface. */ |
937af5ecd brd: Fix all part... |
348 |
static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT; |
8892cbaf6 brd: export modul... |
349 |
module_param(rd_nr, int, S_IRUGO); |
9db5579be rewrite rd |
350 |
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); |
937af5ecd brd: Fix all part... |
351 |
|
366f4aea6 brd: Switch rd_si... |
352 353 |
unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE; module_param(rd_size, ulong, S_IRUGO); |
9db5579be rewrite rd |
354 |
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); |
937af5ecd brd: Fix all part... |
355 356 |
static int max_part = 1; |
8892cbaf6 brd: export modul... |
357 |
module_param(max_part, int, S_IRUGO); |
937af5ecd brd: Fix all part... |
358 |
MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices"); |
9db5579be rewrite rd |
359 360 |
MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); |
efedf51c8 Add 'rd' alias to... |
361 |
MODULE_ALIAS("rd"); |
9db5579be rewrite rd |
362 363 364 365 366 367 368 369 |
#ifndef MODULE /* Legacy boot options - nonmodular */ static int __init ramdisk_size(char *str) { rd_size = simple_strtol(str, NULL, 0); return 1; } |
1adbee50f ramdisk: remove l... |
370 |
__setup("ramdisk_size=", ramdisk_size); |
9db5579be rewrite rd |
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 |
#endif /* * The device scheme is derived from loop.c. Keep them in synch where possible * (should share code eventually). */ static LIST_HEAD(brd_devices); static DEFINE_MUTEX(brd_devices_mutex); static struct brd_device *brd_alloc(int i) { struct brd_device *brd; struct gendisk *disk; brd = kzalloc(sizeof(*brd), GFP_KERNEL); if (!brd) goto out; brd->brd_number = i; spin_lock_init(&brd->brd_lock); INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); brd->brd_queue = blk_alloc_queue(GFP_KERNEL); if (!brd->brd_queue) goto out_free_dev; |
c8fa31730 brd: Request from... |
395 |
|
9db5579be rewrite rd |
396 |
blk_queue_make_request(brd->brd_queue, brd_make_request); |
086fa5ff0 block: Rename blk... |
397 |
blk_queue_max_hw_sectors(brd->brd_queue, 1024); |
9db5579be rewrite rd |
398 |
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); |
c8fa31730 brd: Request from... |
399 400 401 402 403 404 405 |
/* This is so fdisk will align partitions on 4k, because of * direct_access API needing 4k alignment, returning a PFN * (This is only a problem on very small devices <= 4M, * otherwise fdisk will align on 1M. Regardless this call * is harmless) */ blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE); |
163d4baae block: add QUEUE_... |
406 407 408 |
#ifdef CONFIG_BLK_DEV_RAM_DAX queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue); #endif |
937af5ecd brd: Fix all part... |
409 |
disk = brd->brd_disk = alloc_disk(max_part); |
9db5579be rewrite rd |
410 411 412 |
if (!disk) goto out_free_queue; disk->major = RAMDISK_MAJOR; |
937af5ecd brd: Fix all part... |
413 |
disk->first_minor = i * max_part; |
9db5579be rewrite rd |
414 415 416 |
disk->fops = &brd_fops; disk->private_data = brd; disk->queue = brd->brd_queue; |
937af5ecd brd: Fix all part... |
417 |
disk->flags = GENHD_FL_EXT_DEVT; |
9db5579be rewrite rd |
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 |
sprintf(disk->disk_name, "ram%d", i); set_capacity(disk, rd_size * 2); return brd; out_free_queue: blk_cleanup_queue(brd->brd_queue); out_free_dev: kfree(brd); out: return NULL; } static void brd_free(struct brd_device *brd) { put_disk(brd->brd_disk); blk_cleanup_queue(brd->brd_queue); brd_free_pages(brd); kfree(brd); } |
937af5ecd brd: Fix all part... |
438 |
static struct brd_device *brd_init_one(int i, bool *new) |
9db5579be rewrite rd |
439 440 |
{ struct brd_device *brd; |
937af5ecd brd: Fix all part... |
441 |
*new = false; |
9db5579be rewrite rd |
442 443 444 445 446 447 448 449 450 451 |
list_for_each_entry(brd, &brd_devices, brd_list) { if (brd->brd_number == i) goto out; } brd = brd_alloc(i); if (brd) { add_disk(brd->brd_disk); list_add_tail(&brd->brd_list, &brd_devices); } |
937af5ecd brd: Fix all part... |
452 |
*new = true; |
9db5579be rewrite rd |
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 |
out: return brd; } static void brd_del_one(struct brd_device *brd) { list_del(&brd->brd_list); del_gendisk(brd->brd_disk); brd_free(brd); } static struct kobject *brd_probe(dev_t dev, int *part, void *data) { struct brd_device *brd; struct kobject *kobj; |
937af5ecd brd: Fix all part... |
468 |
bool new; |
9db5579be rewrite rd |
469 470 |
mutex_lock(&brd_devices_mutex); |
937af5ecd brd: Fix all part... |
471 |
brd = brd_init_one(MINOR(dev) / max_part, &new); |
a207f5937 block: fix a prob... |
472 |
kobj = brd ? get_disk(brd->brd_disk) : NULL; |
9db5579be rewrite rd |
473 |
mutex_unlock(&brd_devices_mutex); |
937af5ecd brd: Fix all part... |
474 475 |
if (new) *part = 0; |
9db5579be rewrite rd |
476 477 478 479 480 |
return kobj; } static int __init brd_init(void) { |
9db5579be rewrite rd |
481 |
struct brd_device *brd, *next; |
937af5ecd brd: Fix all part... |
482 |
int i; |
9db5579be rewrite rd |
483 484 485 486 |
/* * brd module now has a feature to instantiate underlying device * structure on-demand, provided that there is an access dev node. |
9db5579be rewrite rd |
487 |
* |
937af5ecd brd: Fix all part... |
488 489 490 491 492 493 494 495 496 |
* (1) if rd_nr is specified, create that many upfront. else * it defaults to CONFIG_BLK_DEV_RAM_COUNT * (2) User can further extend brd devices by create dev node themselves * and have kernel automatically instantiate actual device * on-demand. Example: * mknod /path/devnod_name b 1 X # 1 is the rd major * fdisk -l /path/devnod_name * If (X / max_part) was not already created it will be created * dynamically. |
9db5579be rewrite rd |
497 |
*/ |
d7853d1f8 brd: modify ramdi... |
498 |
|
9db5579be rewrite rd |
499 500 |
if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) return -EIO; |
937af5ecd brd: Fix all part... |
501 502 503 504 |
if (unlikely(!max_part)) max_part = 1; for (i = 0; i < rd_nr; i++) { |
9db5579be rewrite rd |
505 506 507 508 509 510 511 512 513 514 |
brd = brd_alloc(i); if (!brd) goto out_free; list_add_tail(&brd->brd_list, &brd_devices); } /* point of no return */ list_for_each_entry(brd, &brd_devices, brd_list) add_disk(brd->brd_disk); |
937af5ecd brd: Fix all part... |
515 |
blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS, |
9db5579be rewrite rd |
516 |
THIS_MODULE, brd_probe, NULL, NULL); |
937af5ecd brd: Fix all part... |
517 518 |
pr_info("brd: module loaded "); |
9db5579be rewrite rd |
519 520 521 522 523 524 525 |
return 0; out_free: list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { list_del(&brd->brd_list); brd_free(brd); } |
c82f29660 brd: fix name arg... |
526 |
unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); |
9db5579be rewrite rd |
527 |
|
937af5ecd brd: Fix all part... |
528 529 |
pr_info("brd: module NOT loaded !!! "); |
9db5579be rewrite rd |
530 531 532 533 534 |
return -ENOMEM; } static void __exit brd_exit(void) { |
9db5579be rewrite rd |
535 |
struct brd_device *brd, *next; |
9db5579be rewrite rd |
536 537 |
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) brd_del_one(brd); |
937af5ecd brd: Fix all part... |
538 |
blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS); |
9db5579be rewrite rd |
539 |
unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); |
937af5ecd brd: Fix all part... |
540 541 542 |
pr_info("brd: module unloaded "); |
9db5579be rewrite rd |
543 544 545 546 |
} module_init(brd_init); module_exit(brd_exit); |