Commit c33bc315fd921b1179a1d3df5756e0da6fb73944
Committed by
Linus Torvalds
1 parent
674470d979
Exists in
master
and in
20 other branches
mm: use zone_end_pfn() instead of zone_start_pfn+spanned_pages
Use "zone_end_pfn()" instead of "zone->zone_start_pfn + zone->spanned_pages". Simplify the code, no functional change. [akpm@linux-foundation.org: fix build] Signed-off-by: Xishi Qiu <qiuxishi@huawei.com> Cc: Cody P Schafer <cody@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 2 changed files with 10 additions and 9 deletions Side-by-side Diff
kernel/power/snapshot.c
... | ... | @@ -352,7 +352,7 @@ |
352 | 352 | struct mem_extent *ext, *cur, *aux; |
353 | 353 | |
354 | 354 | zone_start = zone->zone_start_pfn; |
355 | - zone_end = zone->zone_start_pfn + zone->spanned_pages; | |
355 | + zone_end = zone_end_pfn(zone); | |
356 | 356 | |
357 | 357 | list_for_each_entry(ext, list, hook) |
358 | 358 | if (zone_start <= ext->end) |
... | ... | @@ -884,7 +884,7 @@ |
884 | 884 | continue; |
885 | 885 | |
886 | 886 | mark_free_pages(zone); |
887 | - max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
887 | + max_zone_pfn = zone_end_pfn(zone); | |
888 | 888 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
889 | 889 | if (saveable_highmem_page(zone, pfn)) |
890 | 890 | n++; |
... | ... | @@ -948,7 +948,7 @@ |
948 | 948 | continue; |
949 | 949 | |
950 | 950 | mark_free_pages(zone); |
951 | - max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
951 | + max_zone_pfn = zone_end_pfn(zone); | |
952 | 952 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
953 | 953 | if (saveable_page(zone, pfn)) |
954 | 954 | n++; |
... | ... | @@ -1041,7 +1041,7 @@ |
1041 | 1041 | unsigned long max_zone_pfn; |
1042 | 1042 | |
1043 | 1043 | mark_free_pages(zone); |
1044 | - max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
1044 | + max_zone_pfn = zone_end_pfn(zone); | |
1045 | 1045 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
1046 | 1046 | if (page_is_saveable(zone, pfn)) |
1047 | 1047 | memory_bm_set_bit(orig_bm, pfn); |
... | ... | @@ -1093,7 +1093,7 @@ |
1093 | 1093 | unsigned long pfn, max_zone_pfn; |
1094 | 1094 | |
1095 | 1095 | for_each_populated_zone(zone) { |
1096 | - max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
1096 | + max_zone_pfn = zone_end_pfn(zone); | |
1097 | 1097 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
1098 | 1098 | if (pfn_valid(pfn)) { |
1099 | 1099 | struct page *page = pfn_to_page(pfn); |
... | ... | @@ -1755,7 +1755,7 @@ |
1755 | 1755 | |
1756 | 1756 | /* Clear page flags */ |
1757 | 1757 | for_each_populated_zone(zone) { |
1758 | - max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
1758 | + max_zone_pfn = zone_end_pfn(zone); | |
1759 | 1759 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
1760 | 1760 | if (pfn_valid(pfn)) |
1761 | 1761 | swsusp_unset_page_free(pfn_to_page(pfn)); |
mm/memory_hotplug.c
... | ... | @@ -229,7 +229,7 @@ |
229 | 229 | |
230 | 230 | zone_span_writelock(zone); |
231 | 231 | |
232 | - old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
232 | + old_zone_end_pfn = zone_end_pfn(zone); | |
233 | 233 | if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn) |
234 | 234 | zone->zone_start_pfn = start_pfn; |
235 | 235 | |
... | ... | @@ -514,8 +514,9 @@ |
514 | 514 | static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, |
515 | 515 | unsigned long end_pfn) |
516 | 516 | { |
517 | - unsigned long zone_start_pfn = zone->zone_start_pfn; | |
518 | - unsigned long zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
517 | + unsigned long zone_start_pfn = zone->zone_start_pfn; | |
518 | + unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ | |
519 | + unsigned long zone_end_pfn = z; | |
519 | 520 | unsigned long pfn; |
520 | 521 | struct mem_section *ms; |
521 | 522 | int nid = zone_to_nid(zone); |