Commit ee99c71c59f897436ec65debb99372b3146f9985

Authored by KOSAKI Motohiro
Committed by Linus Torvalds
1 parent a6dc60f897

mm: introduce for_each_populated_zone() macro

Impact: cleanup

In almost cases, for_each_zone() is used with populated_zone().  It's
because almost function doesn't need memoryless node information.
Therefore, for_each_populated_zone() can help to make code simplify.

This patch has no functional change.

[akpm@linux-foundation.org: small cleanup]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 6 changed files with 27 additions and 48 deletions Side-by-side Diff

include/linux/mmzone.h
... ... @@ -806,6 +806,14 @@
806 806 zone; \
807 807 zone = next_zone(zone))
808 808  
  809 +#define for_each_populated_zone(zone) \
  810 + for (zone = (first_online_pgdat())->node_zones; \
  811 + zone; \
  812 + zone = next_zone(zone)) \
  813 + if (!populated_zone(zone)) \
  814 + ; /* do nothing */ \
  815 + else
  816 +
809 817 static inline struct zone *zonelist_zone(struct zoneref *zoneref)
810 818 {
811 819 return zoneref->zone;
kernel/power/snapshot.c
... ... @@ -321,13 +321,10 @@
321 321  
322 322 INIT_LIST_HEAD(list);
323 323  
324   - for_each_zone(zone) {
  324 + for_each_populated_zone(zone) {
325 325 unsigned long zone_start, zone_end;
326 326 struct mem_extent *ext, *cur, *aux;
327 327  
328   - if (!populated_zone(zone))
329   - continue;
330   -
331 328 zone_start = zone->zone_start_pfn;
332 329 zone_end = zone->zone_start_pfn + zone->spanned_pages;
333 330  
... ... @@ -804,8 +801,8 @@
804 801 struct zone *zone;
805 802 unsigned int cnt = 0;
806 803  
807   - for_each_zone(zone)
808   - if (populated_zone(zone) && is_highmem(zone))
  804 + for_each_populated_zone(zone)
  805 + if (is_highmem(zone))
809 806 cnt += zone_page_state(zone, NR_FREE_PAGES);
810 807  
811 808 return cnt;
kernel/power/swsusp.c
... ... @@ -229,17 +229,16 @@
229 229 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
230 230 tmp = size;
231 231 size += highmem_size;
232   - for_each_zone (zone)
233   - if (populated_zone(zone)) {
234   - tmp += snapshot_additional_pages(zone);
235   - if (is_highmem(zone)) {
236   - highmem_size -=
  232 + for_each_populated_zone(zone) {
  233 + tmp += snapshot_additional_pages(zone);
  234 + if (is_highmem(zone)) {
  235 + highmem_size -=
237 236 zone_page_state(zone, NR_FREE_PAGES);
238   - } else {
239   - tmp -= zone_page_state(zone, NR_FREE_PAGES);
240   - tmp += zone->lowmem_reserve[ZONE_NORMAL];
241   - }
  237 + } else {
  238 + tmp -= zone_page_state(zone, NR_FREE_PAGES);
  239 + tmp += zone->lowmem_reserve[ZONE_NORMAL];
242 240 }
  241 + }
243 242  
244 243 if (highmem_size < 0)
245 244 highmem_size = 0;
... ... @@ -922,13 +922,10 @@
922 922 unsigned long flags;
923 923 struct zone *zone;
924 924  
925   - for_each_zone(zone) {
  925 + for_each_populated_zone(zone) {
926 926 struct per_cpu_pageset *pset;
927 927 struct per_cpu_pages *pcp;
928 928  
929   - if (!populated_zone(zone))
930   - continue;
931   -
932 929 pset = zone_pcp(zone, cpu);
933 930  
934 931 pcp = &pset->pcp;
... ... @@ -1879,10 +1876,7 @@
1879 1876 int cpu;
1880 1877 struct zone *zone;
1881 1878  
1882   - for_each_zone(zone) {
1883   - if (!populated_zone(zone))
1884   - continue;
1885   -
  1879 + for_each_populated_zone(zone) {
1886 1880 show_node(zone);
1887 1881 printk("%s per-cpu:\n", zone->name);
1888 1882  
1889 1883  
... ... @@ -1922,12 +1916,9 @@
1922 1916 global_page_state(NR_PAGETABLE),
1923 1917 global_page_state(NR_BOUNCE));
1924 1918  
1925   - for_each_zone(zone) {
  1919 + for_each_populated_zone(zone) {
1926 1920 int i;
1927 1921  
1928   - if (!populated_zone(zone))
1929   - continue;
1930   -
1931 1922 show_node(zone);
1932 1923 printk("%s"
1933 1924 " free:%lukB"
1934 1925  
... ... @@ -1967,12 +1958,9 @@
1967 1958 printk("\n");
1968 1959 }
1969 1960  
1970   - for_each_zone(zone) {
  1961 + for_each_populated_zone(zone) {
1971 1962 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1972 1963  
1973   - if (!populated_zone(zone))
1974   - continue;
1975   -
1976 1964 show_node(zone);
1977 1965 printk("%s: ", zone->name);
1978 1966  
... ... @@ -2784,11 +2772,7 @@
2784 2772  
2785 2773 node_set_state(node, N_CPU); /* this node has a cpu */
2786 2774  
2787   - for_each_zone(zone) {
2788   -
2789   - if (!populated_zone(zone))
2790   - continue;
2791   -
  2775 + for_each_populated_zone(zone) {
2792 2776 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2793 2777 GFP_KERNEL, node);
2794 2778 if (!zone_pcp(zone, cpu))
... ... @@ -2061,11 +2061,9 @@
2061 2061 struct zone *zone;
2062 2062 unsigned long ret = 0;
2063 2063  
2064   - for_each_zone(zone) {
  2064 + for_each_populated_zone(zone) {
2065 2065 enum lru_list l;
2066 2066  
2067   - if (!populated_zone(zone))
2068   - continue;
2069 2067 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2070 2068 continue;
2071 2069  
... ... @@ -135,11 +135,7 @@
135 135 int cpu;
136 136 int threshold;
137 137  
138   - for_each_zone(zone) {
139   -
140   - if (!zone->present_pages)
141   - continue;
142   -
  138 + for_each_populated_zone(zone) {
143 139 threshold = calculate_threshold(zone);
144 140  
145 141 for_each_online_cpu(cpu)
146 142  
... ... @@ -301,11 +297,8 @@
301 297 int i;
302 298 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
303 299  
304   - for_each_zone(zone) {
  300 + for_each_populated_zone(zone) {
305 301 struct per_cpu_pageset *p;
306   -
307   - if (!populated_zone(zone))
308   - continue;
309 302  
310 303 p = zone_pcp(zone, cpu);
311 304