Commit 6451e041c8d39daf39c71eefe839641c2093713e

Authored by Jaegeuk Kim
1 parent 953e6cc6bc

f2fs: add infra for ino management

This patch changes the naming of orphan-related data structures to use as
inode numbers managed globally.
Later, we can use this facility for managing any inode number lists.

Reviewed-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>

Showing 4 changed files with 55 additions and 44 deletions Side-by-side Diff

fs/f2fs/checkpoint.c
... ... @@ -22,7 +22,7 @@
22 22 #include "segment.h"
23 23 #include <trace/events/f2fs.h>
24 24  
25   -static struct kmem_cache *orphan_entry_slab;
  25 +static struct kmem_cache *ino_entry_slab;
26 26 static struct kmem_cache *inode_entry_slab;
27 27  
28 28 /*
29 29  
30 30  
31 31  
32 32  
... ... @@ -282,19 +282,18 @@
282 282 .set_page_dirty = f2fs_set_meta_page_dirty,
283 283 };
284 284  
285   -static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino)
  285 +static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
286 286 {
287   - struct list_head *head;
288   - struct orphan_inode_entry *new, *e;
  287 + struct ino_entry *new, *e;
289 288  
290   - new = f2fs_kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
  289 + new = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
291 290 new->ino = ino;
292 291  
293   - spin_lock(&sbi->orphan_inode_lock);
294   - list_for_each_entry(e, &sbi->orphan_inode_list, list) {
  292 + spin_lock(&sbi->ino_lock[type]);
  293 + list_for_each_entry(e, &sbi->ino_list[type], list) {
295 294 if (e->ino == ino) {
296   - spin_unlock(&sbi->orphan_inode_lock);
297   - kmem_cache_free(orphan_entry_slab, new);
  295 + spin_unlock(&sbi->ino_lock[type]);
  296 + kmem_cache_free(ino_entry_slab, new);
298 297 return;
299 298 }
300 299 if (e->ino > ino)
301 300  
302 301  
303 302  
304 303  
305 304  
306 305  
307 306  
308 307  
309 308  
310 309  
311 310  
... ... @@ -303,58 +302,58 @@
303 302  
304 303 /* add new entry into list which is sorted by inode number */
305 304 list_add_tail(&new->list, &e->list);
306   - spin_unlock(&sbi->orphan_inode_lock);
  305 + spin_unlock(&sbi->ino_lock[type]);
307 306 }
308 307  
309   -static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino)
  308 +static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
310 309 {
311   - struct orphan_inode_entry *e;
  310 + struct ino_entry *e;
312 311  
313   - spin_lock(&sbi->orphan_inode_lock);
314   - list_for_each_entry(e, &sbi->orphan_inode_list, list) {
  312 + spin_lock(&sbi->ino_lock[type]);
  313 + list_for_each_entry(e, &sbi->ino_list[type], list) {
315 314 if (e->ino == ino) {
316 315 list_del(&e->list);
317 316 sbi->n_orphans--;
318   - spin_unlock(&sbi->orphan_inode_lock);
319   - kmem_cache_free(orphan_entry_slab, e);
  317 + spin_unlock(&sbi->ino_lock[type]);
  318 + kmem_cache_free(ino_entry_slab, e);
320 319 return;
321 320 }
322 321 }
323   - spin_unlock(&sbi->orphan_inode_lock);
  322 + spin_unlock(&sbi->ino_lock[type]);
324 323 }
325 324  
326 325 int acquire_orphan_inode(struct f2fs_sb_info *sbi)
327 326 {
328 327 int err = 0;
329 328  
330   - spin_lock(&sbi->orphan_inode_lock);
  329 + spin_lock(&sbi->ino_lock[ORPHAN_INO]);
331 330 if (unlikely(sbi->n_orphans >= sbi->max_orphans))
332 331 err = -ENOSPC;
333 332 else
334 333 sbi->n_orphans++;
335   - spin_unlock(&sbi->orphan_inode_lock);
  334 + spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
336 335  
337 336 return err;
338 337 }
339 338  
340 339 void release_orphan_inode(struct f2fs_sb_info *sbi)
341 340 {
342   - spin_lock(&sbi->orphan_inode_lock);
  341 + spin_lock(&sbi->ino_lock[ORPHAN_INO]);
343 342 f2fs_bug_on(sbi->n_orphans == 0);
344 343 sbi->n_orphans--;
345   - spin_unlock(&sbi->orphan_inode_lock);
  344 + spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
346 345 }
347 346  
348 347 void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
349 348 {
350 349 /* add new orphan entry into list which is sorted by inode number */
351   - __add_ino_entry(sbi, ino);
  350 + __add_ino_entry(sbi, ino, ORPHAN_INO);
352 351 }
353 352  
354 353 void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
355 354 {
356 355 /* remove orphan entry from orphan list */
357   - __remove_ino_entry(sbi, ino);
  356 + __remove_ino_entry(sbi, ino, ORPHAN_INO);
358 357 }
359 358  
360 359 static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
361 360  
... ... @@ -408,14 +407,14 @@
408 407 unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans +
409 408 (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK);
410 409 struct page *page = NULL;
411   - struct orphan_inode_entry *orphan = NULL;
  410 + struct ino_entry *orphan = NULL;
412 411  
413 412 for (index = 0; index < orphan_blocks; index++)
414 413 grab_meta_page(sbi, start_blk + index);
415 414  
416 415 index = 1;
417   - spin_lock(&sbi->orphan_inode_lock);
418   - head = &sbi->orphan_inode_list;
  416 + spin_lock(&sbi->ino_lock[ORPHAN_INO]);
  417 + head = &sbi->ino_list[ORPHAN_INO];
419 418  
420 419 /* loop for each orphan inode entry and write them in Jornal block */
421 420 list_for_each_entry(orphan, head, list) {
... ... @@ -455,7 +454,7 @@
455 454 f2fs_put_page(page, 1);
456 455 }
457 456  
458   - spin_unlock(&sbi->orphan_inode_lock);
  457 + spin_unlock(&sbi->ino_lock[ORPHAN_INO]);
459 458 }
460 459  
461 460 static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
462 461  
463 462  
464 463  
465 464  
... ... @@ -939,31 +938,36 @@
939 938 trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
940 939 }
941 940  
942   -void init_orphan_info(struct f2fs_sb_info *sbi)
  941 +void init_ino_entry_info(struct f2fs_sb_info *sbi)
943 942 {
944   - spin_lock_init(&sbi->orphan_inode_lock);
945   - INIT_LIST_HEAD(&sbi->orphan_inode_list);
946   - sbi->n_orphans = 0;
  943 + int i;
  944 +
  945 + for (i = 0; i < MAX_INO_ENTRY; i++) {
  946 + spin_lock_init(&sbi->ino_lock[i]);
  947 + INIT_LIST_HEAD(&sbi->ino_list[i]);
  948 + }
  949 +
947 950 /*
948 951 * considering 512 blocks in a segment 8 blocks are needed for cp
949 952 * and log segment summaries. Remaining blocks are used to keep
950 953 * orphan entries with the limitation one reserved segment
951 954 * for cp pack we can have max 1020*504 orphan entries
952 955 */
  956 + sbi->n_orphans = 0;
953 957 sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE)
954 958 * F2FS_ORPHANS_PER_BLOCK;
955 959 }
956 960  
957 961 int __init create_checkpoint_caches(void)
958 962 {
959   - orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
960   - sizeof(struct orphan_inode_entry));
961   - if (!orphan_entry_slab)
  963 + ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
  964 + sizeof(struct ino_entry));
  965 + if (!ino_entry_slab)
962 966 return -ENOMEM;
963 967 inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
964 968 sizeof(struct dir_inode_entry));
965 969 if (!inode_entry_slab) {
966   - kmem_cache_destroy(orphan_entry_slab);
  970 + kmem_cache_destroy(ino_entry_slab);
967 971 return -ENOMEM;
968 972 }
969 973 return 0;
... ... @@ -971,7 +975,7 @@
971 975  
972 976 void destroy_checkpoint_caches(void)
973 977 {
974   - kmem_cache_destroy(orphan_entry_slab);
  978 + kmem_cache_destroy(ino_entry_slab);
975 979 kmem_cache_destroy(inode_entry_slab);
976 980 }
... ... @@ -167,7 +167,7 @@
167 167 si->cache_mem += npages << PAGE_CACHE_SHIFT;
168 168 npages = META_MAPPING(sbi)->nrpages;
169 169 si->cache_mem += npages << PAGE_CACHE_SHIFT;
170   - si->cache_mem += sbi->n_orphans * sizeof(struct orphan_inode_entry);
  170 + si->cache_mem += sbi->n_orphans * sizeof(struct ino_entry);
171 171 si->cache_mem += sbi->n_dirty_dirs * sizeof(struct dir_inode_entry);
172 172 }
173 173  
... ... @@ -100,8 +100,13 @@
100 100 META_SSA
101 101 };
102 102  
103   -/* for the list of orphan inodes */
104   -struct orphan_inode_entry {
  103 +/* for the list of ino */
  104 +enum {
  105 + ORPHAN_INO, /* for orphan ino list */
  106 + MAX_INO_ENTRY, /* max. list */
  107 +};
  108 +
  109 +struct ino_entry {
105 110 struct list_head list; /* list head */
106 111 nid_t ino; /* inode number */
107 112 };
... ... @@ -450,9 +455,11 @@
450 455 bool por_doing; /* recovery is doing or not */
451 456 wait_queue_head_t cp_wait;
452 457  
453   - /* for orphan inode management */
454   - struct list_head orphan_inode_list; /* orphan inode list */
455   - spinlock_t orphan_inode_lock; /* for orphan inode list */
  458 + /* for inode management */
  459 + spinlock_t ino_lock[MAX_INO_ENTRY]; /* for ino entry lock */
  460 + struct list_head ino_list[MAX_INO_ENTRY]; /* inode list head */
  461 +
  462 + /* for orphan inode, use 0'th array */
456 463 unsigned int n_orphans; /* # of orphan inodes */
457 464 unsigned int max_orphans; /* max orphan inodes */
458 465  
... ... @@ -1255,7 +1262,7 @@
1255 1262 void remove_dirty_dir_inode(struct inode *);
1256 1263 void sync_dirty_dir_inodes(struct f2fs_sb_info *);
1257 1264 void write_checkpoint(struct f2fs_sb_info *, bool);
1258   -void init_orphan_info(struct f2fs_sb_info *);
  1265 +void init_ino_entry_info(struct f2fs_sb_info *);
1259 1266 int __init create_checkpoint_caches(void);
1260 1267 void destroy_checkpoint_caches(void);
1261 1268  
... ... @@ -1003,7 +1003,7 @@
1003 1003 INIT_LIST_HEAD(&sbi->dir_inode_list);
1004 1004 spin_lock_init(&sbi->dir_inode_lock);
1005 1005  
1006   - init_orphan_info(sbi);
  1006 + init_ino_entry_info(sbi);
1007 1007  
1008 1008 /* setup f2fs internal modules */
1009 1009 err = build_segment_manager(sbi);