Commit de5604231ce4bc8db1bc1dcd27d8540cbedf1518

Authored by Nick Piggin
Committed by Linus Torvalds
1 parent 489b24f2cb

mm: percpu-vmap fix RCU list walking

RCU list walking of the per-cpu vmap cache was broken.  It did not use
RCU primitives, and also the union of free_list and rcu_head is
obviously wrong (because free_list is indeed the list we are RCU
walking).

While we are there, remove a couple of unused fields from an earlier
iteration.

These APIs aren't actually used anywhere, because of problems with the
XFS conversion.  Christoph has now verified that the problems are solved
with these patches.  Also it is an exported interface, so I think it
will be good to be merged now (and Christoph wants to get the XFS
changes into their local tree).

Cc: stable@kernel.org
Cc: linux-mm@kvack.org
Tested-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Nick Piggin <npiggin@suse.de>
--
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 6 additions and 14 deletions Side-by-side Diff

... ... @@ -667,8 +667,6 @@
667 667 struct vmap_block_queue {
668 668 spinlock_t lock;
669 669 struct list_head free;
670   - struct list_head dirty;
671   - unsigned int nr_dirty;
672 670 };
673 671  
674 672 struct vmap_block {
... ... @@ -678,10 +676,8 @@
678 676 unsigned long free, dirty;
679 677 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
680 678 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
681   - union {
682   - struct list_head free_list;
683   - struct rcu_head rcu_head;
684   - };
  679 + struct list_head free_list;
  680 + struct rcu_head rcu_head;
685 681 };
686 682  
687 683 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
... ... @@ -757,7 +753,7 @@
757 753 vbq = &get_cpu_var(vmap_block_queue);
758 754 vb->vbq = vbq;
759 755 spin_lock(&vbq->lock);
760   - list_add(&vb->free_list, &vbq->free);
  756 + list_add_rcu(&vb->free_list, &vbq->free);
761 757 spin_unlock(&vbq->lock);
762 758 put_cpu_var(vmap_block_queue);
763 759  
... ... @@ -776,8 +772,6 @@
776 772 struct vmap_block *tmp;
777 773 unsigned long vb_idx;
778 774  
779   - BUG_ON(!list_empty(&vb->free_list));
780   -
781 775 vb_idx = addr_to_vb_idx(vb->va->va_start);
782 776 spin_lock(&vmap_block_tree_lock);
783 777 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
... ... @@ -816,7 +810,7 @@
816 810 vb->free -= 1UL << order;
817 811 if (vb->free == 0) {
818 812 spin_lock(&vbq->lock);
819   - list_del_init(&vb->free_list);
  813 + list_del_rcu(&vb->free_list);
820 814 spin_unlock(&vbq->lock);
821 815 }
822 816 spin_unlock(&vb->lock);
823 817  
... ... @@ -860,11 +854,11 @@
860 854 BUG_ON(!vb);
861 855  
862 856 spin_lock(&vb->lock);
863   - bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
  857 + BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
864 858  
865 859 vb->dirty += 1UL << order;
866 860 if (vb->dirty == VMAP_BBMAP_BITS) {
867   - BUG_ON(vb->free || !list_empty(&vb->free_list));
  861 + BUG_ON(vb->free);
868 862 spin_unlock(&vb->lock);
869 863 free_vmap_block(vb);
870 864 } else
... ... @@ -1033,8 +1027,6 @@
1033 1027 vbq = &per_cpu(vmap_block_queue, i);
1034 1028 spin_lock_init(&vbq->lock);
1035 1029 INIT_LIST_HEAD(&vbq->free);
1036   - INIT_LIST_HEAD(&vbq->dirty);
1037   - vbq->nr_dirty = 0;
1038 1030 }
1039 1031  
1040 1032 /* Import existing vmlist entries. */