Commit bc357f431c836c6631751e3ef7dfe7882394ad67
Committed by
Linus Torvalds
1 parent
2e30abd173
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
mm: cma: remove watermark hacks
Commits 2139cbe627b8 ("cma: fix counting of isolated pages") and d95ea5d18e69 ("cma: fix watermark checking") introduced a reliable method of free page accounting when memory is being allocated from CMA regions, so the workaround introduced earlier by commit 49f223a9cd96 ("mm: trigger page reclaim in alloc_contig_range() to stabilise watermarks") can be finally removed. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Kyungmin Park <kyungmin.park@samsung.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Mel Gorman <mel@csn.ul.ie> Acked-by: Michal Nazarewicz <mina86@mina86.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 2 changed files with 0 additions and 67 deletions Side-by-side Diff
include/linux/mmzone.h
... | ... | @@ -63,10 +63,8 @@ |
63 | 63 | |
64 | 64 | #ifdef CONFIG_CMA |
65 | 65 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) |
66 | -# define cma_wmark_pages(zone) zone->min_cma_pages | |
67 | 66 | #else |
68 | 67 | # define is_migrate_cma(migratetype) false |
69 | -# define cma_wmark_pages(zone) 0 | |
70 | 68 | #endif |
71 | 69 | |
72 | 70 | #define for_each_migratetype_order(order, type) \ |
... | ... | @@ -382,13 +380,6 @@ |
382 | 380 | #ifdef CONFIG_MEMORY_HOTPLUG |
383 | 381 | /* see spanned/present_pages for more description */ |
384 | 382 | seqlock_t span_seqlock; |
385 | -#endif | |
386 | -#ifdef CONFIG_CMA | |
387 | - /* | |
388 | - * CMA needs to increase watermark levels during the allocation | |
389 | - * process to make sure that the system is not starved. | |
390 | - */ | |
391 | - unsigned long min_cma_pages; | |
392 | 383 | #endif |
393 | 384 | struct free_area free_area[MAX_ORDER]; |
394 | 385 |
mm/page_alloc.c
... | ... | @@ -5218,10 +5218,6 @@ |
5218 | 5218 | zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); |
5219 | 5219 | zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); |
5220 | 5220 | |
5221 | - zone->watermark[WMARK_MIN] += cma_wmark_pages(zone); | |
5222 | - zone->watermark[WMARK_LOW] += cma_wmark_pages(zone); | |
5223 | - zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone); | |
5224 | - | |
5225 | 5221 | setup_zone_migrate_reserve(zone); |
5226 | 5222 | spin_unlock_irqrestore(&zone->lock, flags); |
5227 | 5223 | } |
... | ... | @@ -5766,54 +5762,6 @@ |
5766 | 5762 | return ret > 0 ? 0 : ret; |
5767 | 5763 | } |
5768 | 5764 | |
5769 | -/* | |
5770 | - * Update zone's cma pages counter used for watermark level calculation. | |
5771 | - */ | |
5772 | -static inline void __update_cma_watermarks(struct zone *zone, int count) | |
5773 | -{ | |
5774 | - unsigned long flags; | |
5775 | - spin_lock_irqsave(&zone->lock, flags); | |
5776 | - zone->min_cma_pages += count; | |
5777 | - spin_unlock_irqrestore(&zone->lock, flags); | |
5778 | - setup_per_zone_wmarks(); | |
5779 | -} | |
5780 | - | |
5781 | -/* | |
5782 | - * Trigger memory pressure bump to reclaim some pages in order to be able to | |
5783 | - * allocate 'count' pages in single page units. Does similar work as | |
5784 | - *__alloc_pages_slowpath() function. | |
5785 | - */ | |
5786 | -static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count) | |
5787 | -{ | |
5788 | - enum zone_type high_zoneidx = gfp_zone(gfp_mask); | |
5789 | - struct zonelist *zonelist = node_zonelist(0, gfp_mask); | |
5790 | - int did_some_progress = 0; | |
5791 | - int order = 1; | |
5792 | - | |
5793 | - /* | |
5794 | - * Increase level of watermarks to force kswapd do his job | |
5795 | - * to stabilise at new watermark level. | |
5796 | - */ | |
5797 | - __update_cma_watermarks(zone, count); | |
5798 | - | |
5799 | - /* Obey watermarks as if the page was being allocated */ | |
5800 | - while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) { | |
5801 | - wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone)); | |
5802 | - | |
5803 | - did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, | |
5804 | - NULL); | |
5805 | - if (!did_some_progress) { | |
5806 | - /* Exhausted what can be done so it's blamo time */ | |
5807 | - out_of_memory(zonelist, gfp_mask, order, NULL, false); | |
5808 | - } | |
5809 | - } | |
5810 | - | |
5811 | - /* Restore original watermark levels. */ | |
5812 | - __update_cma_watermarks(zone, -count); | |
5813 | - | |
5814 | - return count; | |
5815 | -} | |
5816 | - | |
5817 | 5765 | /** |
5818 | 5766 | * alloc_contig_range() -- tries to allocate given range of pages |
5819 | 5767 | * @start: start PFN to allocate |
... | ... | @@ -5837,7 +5785,6 @@ |
5837 | 5785 | int alloc_contig_range(unsigned long start, unsigned long end, |
5838 | 5786 | unsigned migratetype) |
5839 | 5787 | { |
5840 | - struct zone *zone = page_zone(pfn_to_page(start)); | |
5841 | 5788 | unsigned long outer_start, outer_end; |
5842 | 5789 | int ret = 0, order; |
5843 | 5790 | |
... | ... | @@ -5922,11 +5869,6 @@ |
5922 | 5869 | goto done; |
5923 | 5870 | } |
5924 | 5871 | |
5925 | - /* | |
5926 | - * Reclaim enough pages to make sure that contiguous allocation | |
5927 | - * will not starve the system. | |
5928 | - */ | |
5929 | - __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start); | |
5930 | 5872 | |
5931 | 5873 | /* Grab isolated pages from freelists. */ |
5932 | 5874 | outer_end = isolate_freepages_range(&cc, outer_start, end); |