Commit 8869b8f6e09a1b49bf915eb03f663f2e4e8fbcd4

Authored by Hugh Dickins
Committed by Linus Torvalds
1 parent 8289546e57

memcg: memcontrol whitespace cleanups

Sorry, before getting down to more important changes, I'd like to do some
cleanup in memcontrol.c.  This patch doesn't change the code generated, but
cleans up whitespace, moves up a double declaration, removes an unused enum,
removes void returns, removes misleading comments, that kind of thing.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 28 additions and 66 deletions Side-by-side Diff

... ... @@ -137,6 +137,7 @@
137 137 */
138 138 struct mem_cgroup_stat stat;
139 139 };
  140 +static struct mem_cgroup init_mem_cgroup;
140 141  
141 142 /*
142 143 * We use the lower bit of the page->page_cgroup pointer as a bit spin
... ... @@ -162,7 +163,7 @@
162 163 struct mem_cgroup *mem_cgroup;
163 164 atomic_t ref_cnt; /* Helpful when pages move b/w */
164 165 /* mapped and cached states */
165   - int flags;
  166 + int flags;
166 167 };
167 168 #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
168 169 #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
169 170  
... ... @@ -177,20 +178,11 @@
177 178 return page_zonenum(pc->page);
178 179 }
179 180  
180   -enum {
181   - MEM_CGROUP_TYPE_UNSPEC = 0,
182   - MEM_CGROUP_TYPE_MAPPED,
183   - MEM_CGROUP_TYPE_CACHED,
184   - MEM_CGROUP_TYPE_ALL,
185   - MEM_CGROUP_TYPE_MAX,
186   -};
187   -
188 181 enum charge_type {
189 182 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
190 183 MEM_CGROUP_CHARGE_TYPE_MAPPED,
191 184 };
192 185  
193   -
194 186 /*
195 187 * Always modified under lru lock. Then, not necessary to preempt_disable()
196 188 */
197 189  
198 190  
... ... @@ -199,11 +191,10 @@
199 191 {
200 192 int val = (charge)? 1 : -1;
201 193 struct mem_cgroup_stat *stat = &mem->stat;
202   - VM_BUG_ON(!irqs_disabled());
203 194  
  195 + VM_BUG_ON(!irqs_disabled());
204 196 if (flags & PAGE_CGROUP_FLAG_CACHE)
205   - __mem_cgroup_stat_add_safe(stat,
206   - MEM_CGROUP_STAT_CACHE, val);
  197 + __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
207 198 else
208 199 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
209 200 }
... ... @@ -240,8 +231,6 @@
240 231 return total;
241 232 }
242 233  
243   -static struct mem_cgroup init_mem_cgroup;
244   -
245 234 static inline
246 235 struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
247 236 {
... ... @@ -273,8 +262,7 @@
273 262  
274 263 static inline int page_cgroup_locked(struct page *page)
275 264 {
276   - return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT,
277   - &page->page_cgroup);
  265 + return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
278 266 }
279 267  
280 268 static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
... ... @@ -285,8 +273,7 @@
285 273  
286 274 struct page_cgroup *page_get_page_cgroup(struct page *page)
287 275 {
288   - return (struct page_cgroup *)
289   - (page->page_cgroup & ~PAGE_CGROUP_LOCK);
  276 + return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
290 277 }
291 278  
292 279 static void __always_inline lock_page_cgroup(struct page *page)
... ... @@ -308,7 +295,6 @@
308 295 * A can can detect failure of clearing by following
309 296 * clear_page_cgroup(page, pc) == pc
310 297 */
311   -
312 298 static struct page_cgroup *clear_page_cgroup(struct page *page,
313 299 struct page_cgroup *pc)
314 300 {
... ... @@ -417,6 +403,7 @@
417 403 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
418 404 return (int)((rss * 100L) / total);
419 405 }
  406 +
420 407 /*
421 408 * This function is called from vmscan.c. In page reclaiming loop. balance
422 409 * between active and inactive list is calculated. For memory controller
... ... @@ -480,7 +467,6 @@
480 467 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
481 468  
482 469 nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
483   -
484 470 return (nr_inactive >> priority);
485 471 }
486 472  
487 473  
... ... @@ -601,16 +587,11 @@
601 587 rcu_read_lock();
602 588 mem = rcu_dereference(mm->mem_cgroup);
603 589 /*
604   - * For every charge from the cgroup, increment reference
605   - * count
  590 + * For every charge from the cgroup, increment reference count
606 591 */
607 592 css_get(&mem->css);
608 593 rcu_read_unlock();
609 594  
610   - /*
611   - * If we created the page_cgroup, we should free it on exceeding
612   - * the cgroup limit.
613   - */
614 595 while (res_counter_charge(&mem->res, PAGE_SIZE)) {
615 596 if (!(gfp_mask & __GFP_WAIT))
616 597 goto out;
... ... @@ -619,12 +600,12 @@
619 600 continue;
620 601  
621 602 /*
622   - * try_to_free_mem_cgroup_pages() might not give us a full
623   - * picture of reclaim. Some pages are reclaimed and might be
624   - * moved to swap cache or just unmapped from the cgroup.
625   - * Check the limit again to see if the reclaim reduced the
626   - * current usage of the cgroup before giving up
627   - */
  603 + * try_to_free_mem_cgroup_pages() might not give us a full
  604 + * picture of reclaim. Some pages are reclaimed and might be
  605 + * moved to swap cache or just unmapped from the cgroup.
  606 + * Check the limit again to see if the reclaim reduced the
  607 + * current usage of the cgroup before giving up
  608 + */
628 609 if (res_counter_check_under_limit(&mem->res))
629 610 continue;
630 611  
... ... @@ -660,7 +641,6 @@
660 641  
661 642 mz = page_cgroup_zoneinfo(pc);
662 643 spin_lock_irqsave(&mz->lru_lock, flags);
663   - /* Update statistics vector */
664 644 __mem_cgroup_add_list(pc);
665 645 spin_unlock_irqrestore(&mz->lru_lock, flags);
666 646  
667 647  
668 648  
669 649  
670 650  
671 651  
... ... @@ -673,26 +653,19 @@
673 653 return -ENOMEM;
674 654 }
675 655  
676   -int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
677   - gfp_t gfp_mask)
  656 +int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
678 657 {
679 658 return mem_cgroup_charge_common(page, mm, gfp_mask,
680   - MEM_CGROUP_CHARGE_TYPE_MAPPED);
  659 + MEM_CGROUP_CHARGE_TYPE_MAPPED);
681 660 }
682 661  
683   -/*
684   - * See if the cached pages should be charged at all?
685   - */
686 662 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
687 663 gfp_t gfp_mask)
688 664 {
689   - int ret = 0;
690 665 if (!mm)
691 666 mm = &init_mm;
692   -
693   - ret = mem_cgroup_charge_common(page, mm, gfp_mask,
  667 + return mem_cgroup_charge_common(page, mm, gfp_mask,
694 668 MEM_CGROUP_CHARGE_TYPE_CACHE);
695   - return ret;
696 669 }
697 670  
698 671 /*
699 672  
... ... @@ -742,11 +715,11 @@
742 715 * Returns non-zero if a page (under migration) has valid page_cgroup member.
743 716 * Refcnt of page_cgroup is incremented.
744 717 */
745   -
746 718 int mem_cgroup_prepare_migration(struct page *page)
747 719 {
748 720 struct page_cgroup *pc;
749 721 int ret = 0;
  722 +
750 723 lock_page_cgroup(page);
751 724 pc = page_get_page_cgroup(page);
752 725 if (pc && atomic_inc_not_zero(&pc->ref_cnt))
753 726  
754 727  
755 728  
756 729  
757 730  
758 731  
... ... @@ -759,28 +732,30 @@
759 732 {
760 733 mem_cgroup_uncharge_page(page);
761 734 }
  735 +
762 736 /*
763   - * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
  737 + * We know both *page* and *newpage* are now not-on-LRU and PG_locked.
764 738 * And no race with uncharge() routines because page_cgroup for *page*
765 739 * has extra one reference by mem_cgroup_prepare_migration.
766 740 */
767   -
768 741 void mem_cgroup_page_migration(struct page *page, struct page *newpage)
769 742 {
770 743 struct page_cgroup *pc;
771 744 struct mem_cgroup *mem;
772 745 unsigned long flags;
773 746 struct mem_cgroup_per_zone *mz;
  747 +
774 748 retry:
775 749 pc = page_get_page_cgroup(page);
776 750 if (!pc)
777 751 return;
  752 +
778 753 mem = pc->mem_cgroup;
779 754 mz = page_cgroup_zoneinfo(pc);
780 755 if (clear_page_cgroup(page, pc) != pc)
781 756 goto retry;
782   - spin_lock_irqsave(&mz->lru_lock, flags);
783 757  
  758 + spin_lock_irqsave(&mz->lru_lock, flags);
784 759 __mem_cgroup_remove_list(pc);
785 760 spin_unlock_irqrestore(&mz->lru_lock, flags);
786 761  
... ... @@ -793,7 +768,6 @@
793 768 spin_lock_irqsave(&mz->lru_lock, flags);
794 769 __mem_cgroup_add_list(pc);
795 770 spin_unlock_irqrestore(&mz->lru_lock, flags);
796   - return;
797 771 }
798 772  
799 773 /*
... ... @@ -802,8 +776,7 @@
802 776 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
803 777 */
804 778 #define FORCE_UNCHARGE_BATCH (128)
805   -static void
806   -mem_cgroup_force_empty_list(struct mem_cgroup *mem,
  779 +static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
807 780 struct mem_cgroup_per_zone *mz,
808 781 int active)
809 782 {
810 783  
811 784  
812 785  
813 786  
... ... @@ -837,27 +810,27 @@
837 810 } else /* being uncharged ? ...do relax */
838 811 break;
839 812 }
  813 +
840 814 spin_unlock_irqrestore(&mz->lru_lock, flags);
841 815 if (!list_empty(list)) {
842 816 cond_resched();
843 817 goto retry;
844 818 }
845   - return;
846 819 }
847 820  
848 821 /*
849 822 * make mem_cgroup's charge to be 0 if there is no task.
850 823 * This enables deleting this mem_cgroup.
851 824 */
852   -
853 825 int mem_cgroup_force_empty(struct mem_cgroup *mem)
854 826 {
855 827 int ret = -EBUSY;
856 828 int node, zid;
  829 +
857 830 css_get(&mem->css);
858 831 /*
859 832 * page reclaim code (kswapd etc..) will move pages between
860   -` * active_list <-> inactive_list while we don't take a lock.
  833 + * active_list <-> inactive_list while we don't take a lock.
861 834 * So, we have to do loop here until all lists are empty.
862 835 */
863 836 while (mem->res.usage > 0) {
... ... @@ -879,8 +852,6 @@
879 852 return ret;
880 853 }
881 854  
882   -
883   -
884 855 int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
885 856 {
886 857 *tmp = memparse(buf, &buf);
... ... @@ -918,8 +889,7 @@
918 889 size_t nbytes, loff_t *ppos)
919 890 {
920 891 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
921   - int ret;
922   - ret = mem_cgroup_force_empty(mem);
  892 + int ret = mem_cgroup_force_empty(mem);
923 893 if (!ret)
924 894 ret = nbytes;
925 895 return ret;
... ... @@ -928,7 +898,6 @@
928 898 /*
929 899 * Note: This should be removed if cgroup supports write-only file.
930 900 */
931   -
932 901 static ssize_t mem_force_empty_read(struct cgroup *cont,
933 902 struct cftype *cft,
934 903 struct file *file, char __user *userbuf,
... ... @@ -937,7 +906,6 @@
937 906 return -EINVAL;
938 907 }
939 908  
940   -
941 909 static const struct mem_cgroup_stat_desc {
942 910 const char *msg;
943 911 u64 unit;
... ... @@ -990,8 +958,6 @@
990 958 return single_open(file, mem_control_stat_show, cont);
991 959 }
992 960  
993   -
994   -
995 961 static struct cftype mem_cgroup_files[] = {
996 962 {
997 963 .name = "usage_in_bytes",
... ... @@ -1057,9 +1023,6 @@
1057 1023 kfree(mem->info.nodeinfo[node]);
1058 1024 }
1059 1025  
1060   -
1061   -static struct mem_cgroup init_mem_cgroup;
1062   -
1063 1026 static struct cgroup_subsys_state *
1064 1027 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1065 1028 {
... ... @@ -1149,7 +1112,6 @@
1149 1112  
1150 1113 out:
1151 1114 mmput(mm);
1152   - return;
1153 1115 }
1154 1116  
1155 1117 struct cgroup_subsys mem_cgroup_subsys = {