Commit 9c191f701ce9f9bc604e88a5dc69cd943daa5d3b

Authored by T Makphaibulchoke
Committed by Theodore Ts'o
1 parent 1f3e55fe02

ext4: each filesystem creates and uses its own mb_cache

This patch adds new interfaces to create and destory cache,
ext4_xattr_create_cache() and ext4_xattr_destroy_cache(), and remove
the cache creation and destory calls from ex4_init_xattr() and
ext4_exitxattr() in fs/ext4/xattr.c.

fs/ext4/super.c has been changed so that when a filesystem is mounted
a cache is allocated and attched to its ext4_sb_info structure.

fs/mbcache.c has been changed so that only one slab allocator is
allocated and used by all mbcache structures.

Signed-off-by: T. Makphaibulchoke <tmac@hp.com>

Showing 5 changed files with 62 additions and 39 deletions Side-by-side Diff

... ... @@ -1329,6 +1329,7 @@
1329 1329 struct list_head s_es_lru;
1330 1330 unsigned long s_es_last_sorted;
1331 1331 struct percpu_counter s_extent_cache_cnt;
  1332 + struct mb_cache *s_mb_cache;
1332 1333 spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
1333 1334  
1334 1335 /* Ratelimit ext4 messages. */
... ... @@ -59,6 +59,7 @@
59 59 static struct ext4_lazy_init *ext4_li_info;
60 60 static struct mutex ext4_li_mtx;
61 61 static struct ext4_features *ext4_feat;
  62 +static int ext4_mballoc_ready;
62 63  
63 64 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
64 65 unsigned long journal_devnum);
... ... @@ -845,6 +846,10 @@
845 846 invalidate_bdev(sbi->journal_bdev);
846 847 ext4_blkdev_remove(sbi);
847 848 }
  849 + if (sbi->s_mb_cache) {
  850 + ext4_xattr_destroy_cache(sbi->s_mb_cache);
  851 + sbi->s_mb_cache = NULL;
  852 + }
848 853 if (sbi->s_mmp_tsk)
849 854 kthread_stop(sbi->s_mmp_tsk);
850 855 sb->s_fs_info = NULL;
... ... @@ -4010,6 +4015,14 @@
4010 4015 percpu_counter_set(&sbi->s_dirtyclusters_counter, 0);
4011 4016  
4012 4017 no_journal:
  4018 + if (ext4_mballoc_ready) {
  4019 + sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
  4020 + if (!sbi->s_mb_cache) {
  4021 + ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
  4022 + goto failed_mount_wq;
  4023 + }
  4024 + }
  4025 +
4013 4026 /*
4014 4027 * Get the # of file system overhead blocks from the
4015 4028 * superblock if present.
4016 4029  
... ... @@ -5519,11 +5532,9 @@
5519 5532  
5520 5533 err = ext4_init_mballoc();
5521 5534 if (err)
5522   - goto out3;
5523   -
5524   - err = ext4_init_xattr();
5525   - if (err)
5526 5535 goto out2;
  5536 + else
  5537 + ext4_mballoc_ready = 1;
5527 5538 err = init_inodecache();
5528 5539 if (err)
5529 5540 goto out1;
5530 5541  
... ... @@ -5539,10 +5550,9 @@
5539 5550 unregister_as_ext3();
5540 5551 destroy_inodecache();
5541 5552 out1:
5542   - ext4_exit_xattr();
5543   -out2:
  5553 + ext4_mballoc_ready = 0;
5544 5554 ext4_exit_mballoc();
5545   -out3:
  5555 +out2:
5546 5556 ext4_exit_feat_adverts();
5547 5557 out4:
5548 5558 if (ext4_proc_root)
... ... @@ -5565,7 +5575,6 @@
5565 5575 unregister_as_ext3();
5566 5576 unregister_filesystem(&ext4_fs_type);
5567 5577 destroy_inodecache();
5568   - ext4_exit_xattr();
5569 5578 ext4_exit_mballoc();
5570 5579 ext4_exit_feat_adverts();
5571 5580 remove_proc_entry("fs/ext4", NULL);
... ... @@ -81,7 +81,7 @@
81 81 # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
82 82 #endif
83 83  
84   -static void ext4_xattr_cache_insert(struct buffer_head *);
  84 +static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
85 85 static struct buffer_head *ext4_xattr_cache_find(struct inode *,
86 86 struct ext4_xattr_header *,
87 87 struct mb_cache_entry **);
... ... @@ -90,8 +90,6 @@
90 90 static int ext4_xattr_list(struct dentry *dentry, char *buffer,
91 91 size_t buffer_size);
92 92  
93   -static struct mb_cache *ext4_xattr_cache;
94   -
95 93 static const struct xattr_handler *ext4_xattr_handler_map[] = {
96 94 [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
97 95 #ifdef CONFIG_EXT4_FS_POSIX_ACL
... ... @@ -117,6 +115,9 @@
117 115 NULL
118 116 };
119 117  
  118 +#define EXT4_GET_MB_CACHE(inode) (((struct ext4_sb_info *) \
  119 + inode->i_sb->s_fs_info)->s_mb_cache)
  120 +
120 121 static __le32 ext4_xattr_block_csum(struct inode *inode,
121 122 sector_t block_nr,
122 123 struct ext4_xattr_header *hdr)
... ... @@ -265,6 +266,7 @@
265 266 struct ext4_xattr_entry *entry;
266 267 size_t size;
267 268 int error;
  269 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
268 270  
269 271 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
270 272 name_index, name, buffer, (long)buffer_size);
... ... @@ -286,7 +288,7 @@
286 288 error = -EIO;
287 289 goto cleanup;
288 290 }
289   - ext4_xattr_cache_insert(bh);
  291 + ext4_xattr_cache_insert(ext4_mb_cache, bh);
290 292 entry = BFIRST(bh);
291 293 error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
292 294 if (error == -EIO)
... ... @@ -409,6 +411,7 @@
409 411 struct inode *inode = dentry->d_inode;
410 412 struct buffer_head *bh = NULL;
411 413 int error;
  414 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
412 415  
413 416 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
414 417 buffer, (long)buffer_size);
... ... @@ -430,7 +433,7 @@
430 433 error = -EIO;
431 434 goto cleanup;
432 435 }
433   - ext4_xattr_cache_insert(bh);
  436 + ext4_xattr_cache_insert(ext4_mb_cache, bh);
434 437 error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
435 438  
436 439 cleanup:
437 440  
... ... @@ -526,8 +529,9 @@
526 529 {
527 530 struct mb_cache_entry *ce = NULL;
528 531 int error = 0;
  532 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
529 533  
530   - ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
  534 + ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
531 535 error = ext4_journal_get_write_access(handle, bh);
532 536 if (error)
533 537 goto out;
534 538  
... ... @@ -746,13 +750,14 @@
746 750 struct ext4_xattr_search *s = &bs->s;
747 751 struct mb_cache_entry *ce = NULL;
748 752 int error = 0;
  753 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
749 754  
750 755 #define header(x) ((struct ext4_xattr_header *)(x))
751 756  
752 757 if (i->value && i->value_len > sb->s_blocksize)
753 758 return -ENOSPC;
754 759 if (s->base) {
755   - ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
  760 + ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
756 761 bs->bh->b_blocknr);
757 762 error = ext4_journal_get_write_access(handle, bs->bh);
758 763 if (error)
... ... @@ -770,7 +775,8 @@
770 775 if (!IS_LAST_ENTRY(s->first))
771 776 ext4_xattr_rehash(header(s->base),
772 777 s->here);
773   - ext4_xattr_cache_insert(bs->bh);
  778 + ext4_xattr_cache_insert(ext4_mb_cache,
  779 + bs->bh);
774 780 }
775 781 unlock_buffer(bs->bh);
776 782 if (error == -EIO)
... ... @@ -906,7 +912,7 @@
906 912 memcpy(new_bh->b_data, s->base, new_bh->b_size);
907 913 set_buffer_uptodate(new_bh);
908 914 unlock_buffer(new_bh);
909   - ext4_xattr_cache_insert(new_bh);
  915 + ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
910 916 error = ext4_handle_dirty_xattr_block(handle,
911 917 inode, new_bh);
912 918 if (error)
913 919  
... ... @@ -1495,13 +1501,13 @@
1495 1501 * Returns 0, or a negative error number on failure.
1496 1502 */
1497 1503 static void
1498   -ext4_xattr_cache_insert(struct buffer_head *bh)
  1504 +ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
1499 1505 {
1500 1506 __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
1501 1507 struct mb_cache_entry *ce;
1502 1508 int error;
1503 1509  
1504   - ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
  1510 + ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
1505 1511 if (!ce) {
1506 1512 ea_bdebug(bh, "out of memory");
1507 1513 return;
1508 1514  
... ... @@ -1573,12 +1579,13 @@
1573 1579 {
1574 1580 __u32 hash = le32_to_cpu(header->h_hash);
1575 1581 struct mb_cache_entry *ce;
  1582 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
1576 1583  
1577 1584 if (!header->h_hash)
1578 1585 return NULL; /* never share */
1579 1586 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
1580 1587 again:
1581   - ce = mb_cache_entry_find_first(ext4_xattr_cache, inode->i_sb->s_bdev,
  1588 + ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev,
1582 1589 hash);
1583 1590 while (ce) {
1584 1591 struct buffer_head *bh;
1585 1592  
1586 1593  
1587 1594  
... ... @@ -1676,20 +1683,17 @@
1676 1683  
1677 1684 #undef BLOCK_HASH_SHIFT
1678 1685  
1679   -int __init
1680   -ext4_init_xattr(void)
  1686 +#define HASH_BUCKET_BITS 10
  1687 +
  1688 +struct mb_cache *
  1689 +ext4_xattr_create_cache(char *name)
1681 1690 {
1682   - ext4_xattr_cache = mb_cache_create("ext4_xattr", 6);
1683   - if (!ext4_xattr_cache)
1684   - return -ENOMEM;
1685   - return 0;
  1691 + return mb_cache_create(name, HASH_BUCKET_BITS);
1686 1692 }
1687 1693  
1688   -void
1689   -ext4_exit_xattr(void)
  1694 +void ext4_xattr_destroy_cache(struct mb_cache *cache)
1690 1695 {
1691   - if (ext4_xattr_cache)
1692   - mb_cache_destroy(ext4_xattr_cache);
1693   - ext4_xattr_cache = NULL;
  1696 + if (cache)
  1697 + mb_cache_destroy(cache);
1694 1698 }
... ... @@ -110,9 +110,6 @@
110 110 extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
111 111 struct ext4_inode *raw_inode, handle_t *handle);
112 112  
113   -extern int __init ext4_init_xattr(void);
114   -extern void ext4_exit_xattr(void);
115   -
116 113 extern const struct xattr_handler *ext4_xattr_handlers[];
117 114  
118 115 extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
... ... @@ -123,6 +120,9 @@
123 120 extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
124 121 struct ext4_xattr_info *i,
125 122 struct ext4_xattr_ibody_find *is);
  123 +
  124 +extern struct mb_cache *ext4_xattr_create_cache(char *name);
  125 +extern void ext4_xattr_destroy_cache(struct mb_cache *);
126 126  
127 127 #ifdef CONFIG_EXT4_FS_SECURITY
128 128 extern int ext4_init_security(handle_t *handle, struct inode *inode,
... ... @@ -99,6 +99,7 @@
99 99  
100 100 static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
101 101 static struct blockgroup_lock *mb_cache_bg_lock;
  102 +static struct kmem_cache *mb_cache_kmem_cache;
102 103  
103 104 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
104 105 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
... ... @@ -351,11 +352,14 @@
351 352 goto fail;
352 353 for (n=0; n<bucket_count; n++)
353 354 INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
354   - cache->c_entry_cache = kmem_cache_create(name,
355   - sizeof(struct mb_cache_entry), 0,
356   - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
357   - if (!cache->c_entry_cache)
358   - goto fail2;
  355 + if (!mb_cache_kmem_cache) {
  356 + mb_cache_kmem_cache = kmem_cache_create(name,
  357 + sizeof(struct mb_cache_entry), 0,
  358 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
  359 + if (!mb_cache_kmem_cache)
  360 + goto fail2;
  361 + }
  362 + cache->c_entry_cache = mb_cache_kmem_cache;
359 363  
360 364 /*
361 365 * Set an upper limit on the number of cache entries so that the hash
... ... @@ -476,6 +480,10 @@
476 480 atomic_read(&cache->c_entry_count));
477 481 }
478 482  
  483 + if (list_empty(&mb_cache_list)) {
  484 + kmem_cache_destroy(mb_cache_kmem_cache);
  485 + mb_cache_kmem_cache = NULL;
  486 + }
479 487 kfree(cache->c_index_hash);
480 488 kfree(cache->c_block_hash);
481 489 kfree(cache);