Blame view

mm/cma.c 14.3 KB
8607a9652   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
a254129e8   Joonsoo Kim   CMA: generalize C...
2
3
4
5
6
7
8
9
10
11
12
  /*
   * Contiguous Memory Allocator
   *
   * Copyright (c) 2010-2011 by Samsung Electronics.
   * Copyright IBM Corporation, 2013
   * Copyright LG Electronics Inc., 2014
   * Written by:
   *	Marek Szyprowski <m.szyprowski@samsung.com>
   *	Michal Nazarewicz <mina86@mina86.com>
   *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
a254129e8   Joonsoo Kim   CMA: generalize C...
13
14
15
16
17
18
19
20
21
   */
  
  #define pr_fmt(fmt) "cma: " fmt
  
  #ifdef CONFIG_CMA_DEBUG
  #ifndef DEBUG
  #  define DEBUG
  #endif
  #endif
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
22
  #define CREATE_TRACE_POINTS
a254129e8   Joonsoo Kim   CMA: generalize C...
23
24
25
26
27
28
29
30
31
  
  #include <linux/memblock.h>
  #include <linux/err.h>
  #include <linux/mm.h>
  #include <linux/mutex.h>
  #include <linux/sizes.h>
  #include <linux/slab.h>
  #include <linux/log2.h>
  #include <linux/cma.h>
f7426b983   Marek Szyprowski   mm: cma: adjust a...
32
  #include <linux/highmem.h>
620951e27   Thierry Reding   mm/cma: make kmem...
33
  #include <linux/io.h>
514c60324   Randy Dunlap   headers: untangle...
34
  #include <linux/kmemleak.h>
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
35
  #include <trace/events/cma.h>
a254129e8   Joonsoo Kim   CMA: generalize C...
36

28b24c1fc   Sasha Levin   mm: cma: debugfs ...
37
38
39
40
  #include "cma.h"
  
  struct cma cma_areas[MAX_CMA_AREAS];
  unsigned cma_area_count;
a254129e8   Joonsoo Kim   CMA: generalize C...
41
  static DEFINE_MUTEX(cma_mutex);
ac1738249   Sasha Levin   mm: cma: constify...
42
  phys_addr_t cma_get_base(const struct cma *cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
43
44
45
  {
  	return PFN_PHYS(cma->base_pfn);
  }
ac1738249   Sasha Levin   mm: cma: constify...
46
  unsigned long cma_get_size(const struct cma *cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
47
48
49
  {
  	return cma->count << PAGE_SHIFT;
  }
f318dd083   Laura Abbott   cma: Store a name...
50
51
52
53
  const char *cma_get_name(const struct cma *cma)
  {
  	return cma->name ? cma->name : "(undefined)";
  }
04cdbc715   Sherry Sun   MLK-24795-1: mm: ...
54
  EXPORT_SYMBOL_GPL(cma_get_name);
f318dd083   Laura Abbott   cma: Store a name...
55

ac1738249   Sasha Levin   mm: cma: constify...
56
  static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
e048cb32f   Doug Berger   cma: fix calculat...
57
  					     unsigned int align_order)
a254129e8   Joonsoo Kim   CMA: generalize C...
58
  {
68faed630   Weijie Yang   mm/cma: fix cma b...
59
60
61
  	if (align_order <= cma->order_per_bit)
  		return 0;
  	return (1UL << (align_order - cma->order_per_bit)) - 1;
a254129e8   Joonsoo Kim   CMA: generalize C...
62
  }
850fc430f   Danesh Petigara   mm: cma: fix CMA ...
63
  /*
e048cb32f   Doug Berger   cma: fix calculat...
64
65
   * Find the offset of the base PFN from the specified align_order.
   * The value returned is represented in order_per_bits.
850fc430f   Danesh Petigara   mm: cma: fix CMA ...
66
   */
ac1738249   Sasha Levin   mm: cma: constify...
67
  static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
e048cb32f   Doug Berger   cma: fix calculat...
68
  					       unsigned int align_order)
b5be83e30   Gregory Fong   mm: cma: align to...
69
  {
e048cb32f   Doug Berger   cma: fix calculat...
70
71
  	return (cma->base_pfn & ((1UL << align_order) - 1))
  		>> cma->order_per_bit;
b5be83e30   Gregory Fong   mm: cma: align to...
72
  }
ac1738249   Sasha Levin   mm: cma: constify...
73
74
  static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
  					      unsigned long pages)
a254129e8   Joonsoo Kim   CMA: generalize C...
75
76
77
  {
  	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
  }
ac1738249   Sasha Levin   mm: cma: constify...
78
79
  static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
  			     unsigned int count)
a254129e8   Joonsoo Kim   CMA: generalize C...
80
81
82
83
84
85
86
87
88
89
  {
  	unsigned long bitmap_no, bitmap_count;
  
  	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
  
  	mutex_lock(&cma->lock);
  	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
  	mutex_unlock(&cma->lock);
  }
302b9e189   Mike Kravetz   cma: don't quit a...
90
  static void __init cma_activate_area(struct cma *cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
91
  {
a254129e8   Joonsoo Kim   CMA: generalize C...
92
93
94
  	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
  	unsigned i = cma->count >> pageblock_order;
  	struct zone *zone;
aed14b1b5   Yunfeng Ye   mm/cma.c: switch ...
95
  	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
302b9e189   Mike Kravetz   cma: don't quit a...
96
97
  	if (!cma->bitmap)
  		goto out_error;
a254129e8   Joonsoo Kim   CMA: generalize C...
98

d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
99
100
  	WARN_ON_ONCE(!pfn_valid(pfn));
  	zone = page_zone(pfn_to_page(pfn));
a254129e8   Joonsoo Kim   CMA: generalize C...
101
102
103
104
105
  	do {
  		unsigned j;
  
  		base_pfn = pfn;
  		for (j = pageblock_nr_pages; j; --j, pfn++) {
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
106
  			WARN_ON_ONCE(!pfn_valid(pfn));
a254129e8   Joonsoo Kim   CMA: generalize C...
107
  			/*
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
108
109
110
111
  			 * alloc_contig_range requires the pfn range
  			 * specified to be in the same zone. Make this
  			 * simple by forcing the entire CMA resv range
  			 * to be in the same zone.
a254129e8   Joonsoo Kim   CMA: generalize C...
112
113
  			 */
  			if (page_zone(pfn_to_page(pfn)) != zone)
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
114
  				goto not_in_zone;
a254129e8   Joonsoo Kim   CMA: generalize C...
115
116
117
118
119
  		}
  		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
  	} while (--i);
  
  	mutex_init(&cma->lock);
26b02a1f9   Sasha Levin   mm: cma: allocati...
120
121
122
123
124
  
  #ifdef CONFIG_CMA_DEBUGFS
  	INIT_HLIST_HEAD(&cma->mem_head);
  	spin_lock_init(&cma->mem_head_lock);
  #endif
302b9e189   Mike Kravetz   cma: don't quit a...
125
  	return;
a254129e8   Joonsoo Kim   CMA: generalize C...
126

d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
127
  not_in_zone:
aed14b1b5   Yunfeng Ye   mm/cma.c: switch ...
128
  	bitmap_free(cma->bitmap);
302b9e189   Mike Kravetz   cma: don't quit a...
129
  out_error:
f022d8cb7   Laurent Pinchart   mm: cma: Don't cr...
130
  	cma->count = 0;
302b9e189   Mike Kravetz   cma: don't quit a...
131
132
133
  	pr_err("CMA area %s could not be activated
  ", cma->name);
  	return;
a254129e8   Joonsoo Kim   CMA: generalize C...
134
135
136
137
138
  }
  
  static int __init cma_init_reserved_areas(void)
  {
  	int i;
302b9e189   Mike Kravetz   cma: don't quit a...
139
140
  	for (i = 0; i < cma_area_count; i++)
  		cma_activate_area(&cma_areas[i]);
a254129e8   Joonsoo Kim   CMA: generalize C...
141
142
143
  
  	return 0;
  }
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
144
  core_initcall(cma_init_reserved_areas);
a254129e8   Joonsoo Kim   CMA: generalize C...
145
146
  
  /**
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
147
148
149
150
   * cma_init_reserved_mem() - create custom contiguous area from reserved memory
   * @base: Base address of the reserved area
   * @size: Size of the reserved area (in bytes),
   * @order_per_bit: Order of pages represented by one bit on bitmap.
e8b098fc5   Mike Rapoport   mm: kernel-doc: a...
151
152
153
   * @name: The name of the area. If this parameter is NULL, the name of
   *        the area will be set to "cmaN", where N is a running counter of
   *        used areas.
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
154
155
156
157
158
   * @res_cma: Pointer to store the created cma region.
   *
   * This function creates custom contiguous area from already reserved memory.
   */
  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
ac1738249   Sasha Levin   mm: cma: constify...
159
  				 unsigned int order_per_bit,
f318dd083   Laura Abbott   cma: Store a name...
160
  				 const char *name,
ac1738249   Sasha Levin   mm: cma: constify...
161
  				 struct cma **res_cma)
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
162
163
164
165
166
167
168
169
170
171
172
173
174
  {
  	struct cma *cma;
  	phys_addr_t alignment;
  
  	/* Sanity checks */
  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  		pr_err("Not enough slots for CMA reserved regions!
  ");
  		return -ENOSPC;
  	}
  
  	if (!size || !memblock_is_region_reserved(base, size))
  		return -EINVAL;
0f96ae292   Shailendra Verma   mm/cma.c: fix typ...
175
  	/* ensure minimal alignment required by mm core */
badbda53e   Stephen Rothwell   mm/cma: silence w...
176
177
  	alignment = PAGE_SIZE <<
  			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
178
179
180
181
182
183
184
185
186
187
188
189
190
  
  	/* alignment should be aligned with order_per_bit */
  	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
  		return -EINVAL;
  
  	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
  		return -EINVAL;
  
  	/*
  	 * Each reserved area must be initialised later, when more kernel
  	 * subsystems (like slab allocator) are available.
  	 */
  	cma = &cma_areas[cma_area_count];
f318dd083   Laura Abbott   cma: Store a name...
191
192
193
194
195
196
197
198
  	if (name) {
  		cma->name = name;
  	} else {
  		cma->name = kasprintf(GFP_KERNEL, "cma%d
  ", cma_area_count);
  		if (!cma->name)
  			return -ENOMEM;
  	}
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
199
200
201
202
203
  	cma->base_pfn = PFN_DOWN(base);
  	cma->count = size >> PAGE_SHIFT;
  	cma->order_per_bit = order_per_bit;
  	*res_cma = cma;
  	cma_area_count++;
94737a85f   George G. Davis   mm: cma: fix tota...
204
  	totalcma_pages += (size / PAGE_SIZE);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
205
206
207
208
209
  
  	return 0;
  }
  
  /**
a254129e8   Joonsoo Kim   CMA: generalize C...
210
   * cma_declare_contiguous() - reserve custom contiguous area
a254129e8   Joonsoo Kim   CMA: generalize C...
211
   * @base: Base address of the reserved area optional, use 0 for any
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
212
   * @size: Size of the reserved area (in bytes),
a254129e8   Joonsoo Kim   CMA: generalize C...
213
214
215
   * @limit: End address of the reserved memory (optional, 0 for any).
   * @alignment: Alignment for the CMA area, should be power of 2 or zero
   * @order_per_bit: Order of pages represented by one bit on bitmap.
a254129e8   Joonsoo Kim   CMA: generalize C...
216
   * @fixed: hint about where to place the reserved area
e8b098fc5   Mike Rapoport   mm: kernel-doc: a...
217
   * @name: The name of the area. See function cma_init_reserved_mem()
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
218
   * @res_cma: Pointer to store the created cma region.
a254129e8   Joonsoo Kim   CMA: generalize C...
219
220
221
222
223
224
225
226
227
   *
   * This function reserves memory from early allocator. It should be
   * called by arch specific code once the early allocator (memblock or bootmem)
   * has been activated and all other subsystems have already allocated/reserved
   * memory. This function allows to create custom reserved areas.
   *
   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
   * reserve in range from @base to @limit.
   */
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
228
229
  int __init cma_declare_contiguous(phys_addr_t base,
  			phys_addr_t size, phys_addr_t limit,
a254129e8   Joonsoo Kim   CMA: generalize C...
230
  			phys_addr_t alignment, unsigned int order_per_bit,
f318dd083   Laura Abbott   cma: Store a name...
231
  			bool fixed, const char *name, struct cma **res_cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
232
  {
f7426b983   Marek Szyprowski   mm: cma: adjust a...
233
  	phys_addr_t memblock_end = memblock_end_of_DRAM();
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
234
  	phys_addr_t highmem_start;
a254129e8   Joonsoo Kim   CMA: generalize C...
235
  	int ret = 0;
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
236
  	/*
2dece445b   Laura Abbott   mm/cma: Cleanup h...
237
238
239
240
  	 * We can't use __pa(high_memory) directly, since high_memory
  	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
  	 * complain. Find the boundary by adding one to the last valid
  	 * address.
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
241
  	 */
2dece445b   Laura Abbott   mm/cma: Cleanup h...
242
  	highmem_start = __pa(high_memory - 1) + 1;
56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
243
244
245
  	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)
  ",
  		__func__, &size, &base, &limit, &alignment);
a254129e8   Joonsoo Kim   CMA: generalize C...
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
  
  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  		pr_err("Not enough slots for CMA reserved regions!
  ");
  		return -ENOSPC;
  	}
  
  	if (!size)
  		return -EINVAL;
  
  	if (alignment && !is_power_of_2(alignment))
  		return -EINVAL;
  
  	/*
  	 * Sanitise input arguments.
  	 * Pages both ends in CMA area could be merged into adjacent unmovable
  	 * migratetype page by page allocator's buddy algorithm. In the case,
  	 * you couldn't get a contiguous memory, which is not what we want.
  	 */
badbda53e   Stephen Rothwell   mm/cma: silence w...
265
266
  	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
  			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
c633324e3   Doug Berger   mm/cma.c: fail if...
267
268
269
270
271
272
273
  	if (fixed && base & (alignment - 1)) {
  		ret = -EINVAL;
  		pr_err("Region at %pa must be aligned to %pa bytes
  ",
  			&base, &alignment);
  		goto err;
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
274
275
276
  	base = ALIGN(base, alignment);
  	size = ALIGN(size, alignment);
  	limit &= ~(alignment - 1);
800a85d3d   Laurent Pinchart   mm: cma: Always c...
277
278
  	if (!base)
  		fixed = false;
a254129e8   Joonsoo Kim   CMA: generalize C...
279
280
281
  	/* size should be aligned with order_per_bit */
  	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
  		return -EINVAL;
f7426b983   Marek Szyprowski   mm: cma: adjust a...
282
  	/*
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
283
284
  	 * If allocating at a fixed base the request region must not cross the
  	 * low/high memory boundary.
f7426b983   Marek Szyprowski   mm: cma: adjust a...
285
  	 */
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
286
  	if (fixed && base < highmem_start && base + size > highmem_start) {
f7426b983   Marek Szyprowski   mm: cma: adjust a...
287
  		ret = -EINVAL;
56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
288
289
290
  		pr_err("Region at %pa defined on low/high memory boundary (%pa)
  ",
  			&base, &highmem_start);
f7426b983   Marek Szyprowski   mm: cma: adjust a...
291
292
  		goto err;
  	}
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
293
294
295
296
297
298
299
  	/*
  	 * If the limit is unspecified or above the memblock end, its effective
  	 * value will be the memblock end. Set it explicitly to simplify further
  	 * checks.
  	 */
  	if (limit == 0 || limit > memblock_end)
  		limit = memblock_end;
c633324e3   Doug Berger   mm/cma.c: fail if...
300
301
302
303
304
305
306
  	if (base + size > limit) {
  		ret = -EINVAL;
  		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)
  ",
  			&size, &base, &limit);
  		goto err;
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
307
  	/* Reserve memory */
800a85d3d   Laurent Pinchart   mm: cma: Always c...
308
  	if (fixed) {
a254129e8   Joonsoo Kim   CMA: generalize C...
309
310
311
312
313
314
  		if (memblock_is_region_reserved(base, size) ||
  		    memblock_reserve(base, size) < 0) {
  			ret = -EBUSY;
  			goto err;
  		}
  	} else {
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
315
316
317
318
319
320
321
322
323
  		phys_addr_t addr = 0;
  
  		/*
  		 * All pages in the reserved area must come from the same zone.
  		 * If the requested region crosses the low/high memory boundary,
  		 * try allocating from high memory first and fall back to low
  		 * memory in case of failure.
  		 */
  		if (base < highmem_start && limit > highmem_start) {
8a770c2a8   Mike Rapoport   memblock: emphasi...
324
325
  			addr = memblock_phys_alloc_range(size, alignment,
  							 highmem_start, limit);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
326
327
  			limit = highmem_start;
  		}
a254129e8   Joonsoo Kim   CMA: generalize C...
328
  		if (!addr) {
8a770c2a8   Mike Rapoport   memblock: emphasi...
329
330
  			addr = memblock_phys_alloc_range(size, alignment, base,
  							 limit);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
331
332
333
334
  			if (!addr) {
  				ret = -ENOMEM;
  				goto err;
  			}
a254129e8   Joonsoo Kim   CMA: generalize C...
335
  		}
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
336

620951e27   Thierry Reding   mm/cma: make kmem...
337
338
339
340
  		/*
  		 * kmemleak scans/reads tracked objects for pointers to other
  		 * objects but this address isn't mapped and accessible
  		 */
9099daed9   Catalin Marinas   mm: kmemleak: avo...
341
  		kmemleak_ignore_phys(addr);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
342
  		base = addr;
a254129e8   Joonsoo Kim   CMA: generalize C...
343
  	}
f318dd083   Laura Abbott   cma: Store a name...
344
  	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
345
  	if (ret)
0d3bd18a5   Peng Fan   mm/cma.c: cma_dec...
346
  		goto free_mem;
a254129e8   Joonsoo Kim   CMA: generalize C...
347

56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
348
349
350
  	pr_info("Reserved %ld MiB at %pa
  ", (unsigned long)size / SZ_1M,
  		&base);
a254129e8   Joonsoo Kim   CMA: generalize C...
351
  	return 0;
0d3bd18a5   Peng Fan   mm/cma.c: cma_dec...
352
353
  free_mem:
  	memblock_free(base, size);
a254129e8   Joonsoo Kim   CMA: generalize C...
354
  err:
0de9d2ebe   Joonsoo Kim   mm, CMA: clean-up...
355
356
  	pr_err("Failed to reserve %ld MiB
  ", (unsigned long)size / SZ_1M);
a254129e8   Joonsoo Kim   CMA: generalize C...
357
358
  	return ret;
  }
dbe43d4d2   Jaewon Kim   mm: cma: print al...
359
360
361
  #ifdef CONFIG_CMA_DEBUG
  static void cma_debug_show_areas(struct cma *cma)
  {
2b59e01a3   Yue Hu   mm/cma.c: fix the...
362
  	unsigned long next_zero_bit, next_set_bit, nr_zero;
dbe43d4d2   Jaewon Kim   mm: cma: print al...
363
  	unsigned long start = 0;
2b59e01a3   Yue Hu   mm/cma.c: fix the...
364
365
  	unsigned long nr_part, nr_total = 0;
  	unsigned long nbits = cma_bitmap_maxno(cma);
dbe43d4d2   Jaewon Kim   mm: cma: print al...
366
367
368
369
  
  	mutex_lock(&cma->lock);
  	pr_info("number of available pages: ");
  	for (;;) {
2b59e01a3   Yue Hu   mm/cma.c: fix the...
370
371
  		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
  		if (next_zero_bit >= nbits)
dbe43d4d2   Jaewon Kim   mm: cma: print al...
372
  			break;
2b59e01a3   Yue Hu   mm/cma.c: fix the...
373
  		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
dbe43d4d2   Jaewon Kim   mm: cma: print al...
374
  		nr_zero = next_set_bit - next_zero_bit;
2b59e01a3   Yue Hu   mm/cma.c: fix the...
375
376
377
378
  		nr_part = nr_zero << cma->order_per_bit;
  		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
  			next_zero_bit);
  		nr_total += nr_part;
dbe43d4d2   Jaewon Kim   mm: cma: print al...
379
380
  		start = next_zero_bit + nr_zero;
  	}
2b59e01a3   Yue Hu   mm/cma.c: fix the...
381
382
  	pr_cont("=> %lu free of %lu total pages
  ", nr_total, cma->count);
dbe43d4d2   Jaewon Kim   mm: cma: print al...
383
384
385
386
387
  	mutex_unlock(&cma->lock);
  }
  #else
  static inline void cma_debug_show_areas(struct cma *cma) { }
  #endif
a254129e8   Joonsoo Kim   CMA: generalize C...
388
389
390
391
392
  /**
   * cma_alloc() - allocate pages from contiguous area
   * @cma:   Contiguous memory region for which the allocation is performed.
   * @count: Requested number of pages.
   * @align: Requested alignment of pages (in PAGE_SIZE order).
651820297   Marek Szyprowski   mm/cma: remove un...
393
   * @no_warn: Avoid printing message about failed allocation
a254129e8   Joonsoo Kim   CMA: generalize C...
394
395
396
397
   *
   * This function allocates part of contiguous memory on specific
   * contiguous memory area.
   */
e2f466e32   Lucas Stach   mm: cma_alloc: al...
398
  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
651820297   Marek Szyprowski   mm/cma: remove un...
399
  		       bool no_warn)
a254129e8   Joonsoo Kim   CMA: generalize C...
400
  {
3acaea680   Andrew Morton   mm/cma.c: suppres...
401
402
403
  	unsigned long mask, offset;
  	unsigned long pfn = -1;
  	unsigned long start = 0;
a254129e8   Joonsoo Kim   CMA: generalize C...
404
  	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
2813b9c02   Andrey Konovalov   kasan, mm, arm64:...
405
  	size_t i;
a254129e8   Joonsoo Kim   CMA: generalize C...
406
  	struct page *page = NULL;
dbe43d4d2   Jaewon Kim   mm: cma: print al...
407
  	int ret = -ENOMEM;
a254129e8   Joonsoo Kim   CMA: generalize C...
408
409
410
  
  	if (!cma || !cma->count)
  		return NULL;
67a2e213e   Rohit Vaswani   mm: cma: fix inco...
411
412
  	pr_debug("%s(cma %p, count %zu, align %d)
  ", __func__, (void *)cma,
a254129e8   Joonsoo Kim   CMA: generalize C...
413
414
415
416
417
418
  		 count, align);
  
  	if (!count)
  		return NULL;
  
  	mask = cma_bitmap_aligned_mask(cma, align);
b5be83e30   Gregory Fong   mm: cma: align to...
419
  	offset = cma_bitmap_aligned_offset(cma, align);
a254129e8   Joonsoo Kim   CMA: generalize C...
420
421
  	bitmap_maxno = cma_bitmap_maxno(cma);
  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
6b36ba599   Shiraz Hashim   mm/cma.c: check t...
422
423
  	if (bitmap_count > bitmap_maxno)
  		return NULL;
a254129e8   Joonsoo Kim   CMA: generalize C...
424
425
  	for (;;) {
  		mutex_lock(&cma->lock);
b5be83e30   Gregory Fong   mm: cma: align to...
426
427
428
  		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
  				bitmap_maxno, start, bitmap_count, mask,
  				offset);
a254129e8   Joonsoo Kim   CMA: generalize C...
429
430
431
432
433
434
435
436
437
438
439
440
441
442
  		if (bitmap_no >= bitmap_maxno) {
  			mutex_unlock(&cma->lock);
  			break;
  		}
  		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
  		/*
  		 * It's safe to drop the lock here. We've marked this region for
  		 * our exclusive use. If the migration fails we will take the
  		 * lock again and unmark it.
  		 */
  		mutex_unlock(&cma->lock);
  
  		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
  		mutex_lock(&cma_mutex);
ca96b6253   Lucas Stach   mm: alloc_contig_...
443
  		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
651820297   Marek Szyprowski   mm/cma: remove un...
444
  				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
a254129e8   Joonsoo Kim   CMA: generalize C...
445
446
447
448
  		mutex_unlock(&cma_mutex);
  		if (ret == 0) {
  			page = pfn_to_page(pfn);
  			break;
a254129e8   Joonsoo Kim   CMA: generalize C...
449
  		}
b7155e76a   Joonsoo Kim   mm, CMA: clean-up...
450

a254129e8   Joonsoo Kim   CMA: generalize C...
451
  		cma_clear_bitmap(cma, pfn, count);
b7155e76a   Joonsoo Kim   mm, CMA: clean-up...
452
453
  		if (ret != -EBUSY)
  			break;
a254129e8   Joonsoo Kim   CMA: generalize C...
454
455
456
457
458
459
  		pr_debug("%s(): memory range at %p is busy, retrying
  ",
  			 __func__, pfn_to_page(pfn));
  		/* try again with a bit different memory target */
  		start = bitmap_no + mask + 1;
  	}
3acaea680   Andrew Morton   mm/cma.c: suppres...
460
  	trace_cma_alloc(pfn, page, count, align);
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
461

2813b9c02   Andrey Konovalov   kasan, mm, arm64:...
462
463
464
465
466
467
468
469
470
  	/*
  	 * CMA can allocate multiple page blocks, which results in different
  	 * blocks being marked with different tags. Reset the tags to ignore
  	 * those page blocks.
  	 */
  	if (page) {
  		for (i = 0; i < count; i++)
  			page_kasan_tag_reset(page + i);
  	}
651820297   Marek Szyprowski   mm/cma: remove un...
471
  	if (ret && !no_warn) {
5984af108   Pintu Agarwal   mm/cma.c: change ...
472
473
  		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d
  ",
dbe43d4d2   Jaewon Kim   mm: cma: print al...
474
475
476
  			__func__, count, ret);
  		cma_debug_show_areas(cma);
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
477
478
479
480
481
482
483
484
485
486
487
  	pr_debug("%s(): returned %p
  ", __func__, page);
  	return page;
  }
  
  /**
   * cma_release() - release allocated pages
   * @cma:   Contiguous memory region for which the allocation is performed.
   * @pages: Allocated pages.
   * @count: Number of allocated pages.
   *
929f92f78   Ryohei Suzuki   mm/cma.c: fix a t...
488
   * This function releases memory allocated by cma_alloc().
a254129e8   Joonsoo Kim   CMA: generalize C...
489
490
491
   * It returns false when provided pages do not belong to contiguous area and
   * true otherwise.
   */
ac1738249   Sasha Levin   mm: cma: constify...
492
  bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
a254129e8   Joonsoo Kim   CMA: generalize C...
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
  {
  	unsigned long pfn;
  
  	if (!cma || !pages)
  		return false;
  
  	pr_debug("%s(page %p)
  ", __func__, (void *)pages);
  
  	pfn = page_to_pfn(pages);
  
  	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
  		return false;
  
  	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
  
  	free_contig_range(pfn, count);
  	cma_clear_bitmap(cma, pfn, count);
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
511
  	trace_cma_release(pfn, pages, count);
a254129e8   Joonsoo Kim   CMA: generalize C...
512
513
514
  
  	return true;
  }
e4231bcda   Laura Abbott   cma: Introduce cm...
515
516
517
518
519
520
521
522
523
524
525
526
527
528
  
  int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
  {
  	int i;
  
  	for (i = 0; i < cma_area_count; i++) {
  		int ret = it(&cma_areas[i], data);
  
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }
04cdbc715   Sherry Sun   MLK-24795-1: mm: ...
529
  EXPORT_SYMBOL_GPL(cma_for_each_area);