Blame view

mm/cma.c 14.3 KB
8607a9652   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
a254129e8   Joonsoo Kim   CMA: generalize C...
2
3
4
5
6
7
8
9
10
11
12
  /*
   * Contiguous Memory Allocator
   *
   * Copyright (c) 2010-2011 by Samsung Electronics.
   * Copyright IBM Corporation, 2013
   * Copyright LG Electronics Inc., 2014
   * Written by:
   *	Marek Szyprowski <m.szyprowski@samsung.com>
   *	Michal Nazarewicz <mina86@mina86.com>
   *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
a254129e8   Joonsoo Kim   CMA: generalize C...
13
14
15
16
17
18
19
20
21
   */
  
  #define pr_fmt(fmt) "cma: " fmt
  
  #ifdef CONFIG_CMA_DEBUG
  #ifndef DEBUG
  #  define DEBUG
  #endif
  #endif
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
22
  #define CREATE_TRACE_POINTS
a254129e8   Joonsoo Kim   CMA: generalize C...
23
24
25
26
27
28
29
30
31
  
  #include <linux/memblock.h>
  #include <linux/err.h>
  #include <linux/mm.h>
  #include <linux/mutex.h>
  #include <linux/sizes.h>
  #include <linux/slab.h>
  #include <linux/log2.h>
  #include <linux/cma.h>
f7426b983   Marek Szyprowski   mm: cma: adjust a...
32
  #include <linux/highmem.h>
620951e27   Thierry Reding   mm/cma: make kmem...
33
  #include <linux/io.h>
514c60324   Randy Dunlap   headers: untangle...
34
  #include <linux/kmemleak.h>
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
35
  #include <trace/events/cma.h>
a254129e8   Joonsoo Kim   CMA: generalize C...
36

28b24c1fc   Sasha Levin   mm: cma: debugfs ...
37
38
39
40
  #include "cma.h"
  
  struct cma cma_areas[MAX_CMA_AREAS];
  unsigned cma_area_count;
a254129e8   Joonsoo Kim   CMA: generalize C...
41
  static DEFINE_MUTEX(cma_mutex);
ac1738249   Sasha Levin   mm: cma: constify...
42
  phys_addr_t cma_get_base(const struct cma *cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
43
44
45
  {
  	return PFN_PHYS(cma->base_pfn);
  }
ac1738249   Sasha Levin   mm: cma: constify...
46
  unsigned long cma_get_size(const struct cma *cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
47
48
49
  {
  	return cma->count << PAGE_SHIFT;
  }
f318dd083   Laura Abbott   cma: Store a name...
50
51
52
53
  const char *cma_get_name(const struct cma *cma)
  {
  	return cma->name ? cma->name : "(undefined)";
  }
ac1738249   Sasha Levin   mm: cma: constify...
54
  static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
e048cb32f   Doug Berger   cma: fix calculat...
55
  					     unsigned int align_order)
a254129e8   Joonsoo Kim   CMA: generalize C...
56
  {
68faed630   Weijie Yang   mm/cma: fix cma b...
57
58
59
  	if (align_order <= cma->order_per_bit)
  		return 0;
  	return (1UL << (align_order - cma->order_per_bit)) - 1;
a254129e8   Joonsoo Kim   CMA: generalize C...
60
  }
850fc430f   Danesh Petigara   mm: cma: fix CMA ...
61
  /*
e048cb32f   Doug Berger   cma: fix calculat...
62
63
   * Find the offset of the base PFN from the specified align_order.
   * The value returned is represented in order_per_bits.
850fc430f   Danesh Petigara   mm: cma: fix CMA ...
64
   */
ac1738249   Sasha Levin   mm: cma: constify...
65
  static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
e048cb32f   Doug Berger   cma: fix calculat...
66
  					       unsigned int align_order)
b5be83e30   Gregory Fong   mm: cma: align to...
67
  {
e048cb32f   Doug Berger   cma: fix calculat...
68
69
  	return (cma->base_pfn & ((1UL << align_order) - 1))
  		>> cma->order_per_bit;
b5be83e30   Gregory Fong   mm: cma: align to...
70
  }
ac1738249   Sasha Levin   mm: cma: constify...
71
72
  static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
  					      unsigned long pages)
a254129e8   Joonsoo Kim   CMA: generalize C...
73
74
75
  {
  	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
  }
ac1738249   Sasha Levin   mm: cma: constify...
76
77
  static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
  			     unsigned int count)
a254129e8   Joonsoo Kim   CMA: generalize C...
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
  {
  	unsigned long bitmap_no, bitmap_count;
  
  	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
  
  	mutex_lock(&cma->lock);
  	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
  	mutex_unlock(&cma->lock);
  }
  
  static int __init cma_activate_area(struct cma *cma)
  {
  	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
  	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
  	unsigned i = cma->count >> pageblock_order;
  	struct zone *zone;
  
  	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1df3a3390   Yue Hu   mm/cma.c: fix cra...
97
98
  	if (!cma->bitmap) {
  		cma->count = 0;
a254129e8   Joonsoo Kim   CMA: generalize C...
99
  		return -ENOMEM;
1df3a3390   Yue Hu   mm/cma.c: fix cra...
100
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
101

d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
102
103
  	WARN_ON_ONCE(!pfn_valid(pfn));
  	zone = page_zone(pfn_to_page(pfn));
a254129e8   Joonsoo Kim   CMA: generalize C...
104
105
106
107
108
  	do {
  		unsigned j;
  
  		base_pfn = pfn;
  		for (j = pageblock_nr_pages; j; --j, pfn++) {
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
109
  			WARN_ON_ONCE(!pfn_valid(pfn));
a254129e8   Joonsoo Kim   CMA: generalize C...
110
  			/*
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
111
112
113
114
  			 * alloc_contig_range requires the pfn range
  			 * specified to be in the same zone. Make this
  			 * simple by forcing the entire CMA resv range
  			 * to be in the same zone.
a254129e8   Joonsoo Kim   CMA: generalize C...
115
116
  			 */
  			if (page_zone(pfn_to_page(pfn)) != zone)
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
117
  				goto not_in_zone;
a254129e8   Joonsoo Kim   CMA: generalize C...
118
119
120
121
122
  		}
  		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
  	} while (--i);
  
  	mutex_init(&cma->lock);
26b02a1f9   Sasha Levin   mm: cma: allocati...
123
124
125
126
127
  
  #ifdef CONFIG_CMA_DEBUGFS
  	INIT_HLIST_HEAD(&cma->mem_head);
  	spin_lock_init(&cma->mem_head_lock);
  #endif
a254129e8   Joonsoo Kim   CMA: generalize C...
128
  	return 0;
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
129
  not_in_zone:
e35ef6397   Anshuman Khandual   mm/cma.c: warn if...
130
131
  	pr_err("CMA area %s could not be activated
  ", cma->name);
a254129e8   Joonsoo Kim   CMA: generalize C...
132
  	kfree(cma->bitmap);
f022d8cb7   Laurent Pinchart   mm: cma: Don't cr...
133
  	cma->count = 0;
a254129e8   Joonsoo Kim   CMA: generalize C...
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  	return -EINVAL;
  }
  
  static int __init cma_init_reserved_areas(void)
  {
  	int i;
  
  	for (i = 0; i < cma_area_count; i++) {
  		int ret = cma_activate_area(&cma_areas[i]);
  
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
150
  core_initcall(cma_init_reserved_areas);
a254129e8   Joonsoo Kim   CMA: generalize C...
151
152
  
  /**
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
153
154
155
156
   * cma_init_reserved_mem() - create custom contiguous area from reserved memory
   * @base: Base address of the reserved area
   * @size: Size of the reserved area (in bytes),
   * @order_per_bit: Order of pages represented by one bit on bitmap.
e8b098fc5   Mike Rapoport   mm: kernel-doc: a...
157
158
159
   * @name: The name of the area. If this parameter is NULL, the name of
   *        the area will be set to "cmaN", where N is a running counter of
   *        used areas.
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
160
161
162
163
164
   * @res_cma: Pointer to store the created cma region.
   *
   * This function creates custom contiguous area from already reserved memory.
   */
  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
ac1738249   Sasha Levin   mm: cma: constify...
165
  				 unsigned int order_per_bit,
f318dd083   Laura Abbott   cma: Store a name...
166
  				 const char *name,
ac1738249   Sasha Levin   mm: cma: constify...
167
  				 struct cma **res_cma)
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
168
169
170
171
172
173
174
175
176
177
178
179
180
  {
  	struct cma *cma;
  	phys_addr_t alignment;
  
  	/* Sanity checks */
  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  		pr_err("Not enough slots for CMA reserved regions!
  ");
  		return -ENOSPC;
  	}
  
  	if (!size || !memblock_is_region_reserved(base, size))
  		return -EINVAL;
0f96ae292   Shailendra Verma   mm/cma.c: fix typ...
181
  	/* ensure minimal alignment required by mm core */
badbda53e   Stephen Rothwell   mm/cma: silence w...
182
183
  	alignment = PAGE_SIZE <<
  			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
184
185
186
187
188
189
190
191
192
193
194
195
196
  
  	/* alignment should be aligned with order_per_bit */
  	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
  		return -EINVAL;
  
  	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
  		return -EINVAL;
  
  	/*
  	 * Each reserved area must be initialised later, when more kernel
  	 * subsystems (like slab allocator) are available.
  	 */
  	cma = &cma_areas[cma_area_count];
f318dd083   Laura Abbott   cma: Store a name...
197
198
199
200
201
202
203
204
  	if (name) {
  		cma->name = name;
  	} else {
  		cma->name = kasprintf(GFP_KERNEL, "cma%d
  ", cma_area_count);
  		if (!cma->name)
  			return -ENOMEM;
  	}
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
205
206
207
208
209
  	cma->base_pfn = PFN_DOWN(base);
  	cma->count = size >> PAGE_SHIFT;
  	cma->order_per_bit = order_per_bit;
  	*res_cma = cma;
  	cma_area_count++;
94737a85f   George G. Davis   mm: cma: fix tota...
210
  	totalcma_pages += (size / PAGE_SIZE);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
211
212
213
214
215
  
  	return 0;
  }
  
  /**
a254129e8   Joonsoo Kim   CMA: generalize C...
216
   * cma_declare_contiguous() - reserve custom contiguous area
a254129e8   Joonsoo Kim   CMA: generalize C...
217
   * @base: Base address of the reserved area optional, use 0 for any
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
218
   * @size: Size of the reserved area (in bytes),
a254129e8   Joonsoo Kim   CMA: generalize C...
219
220
221
   * @limit: End address of the reserved memory (optional, 0 for any).
   * @alignment: Alignment for the CMA area, should be power of 2 or zero
   * @order_per_bit: Order of pages represented by one bit on bitmap.
a254129e8   Joonsoo Kim   CMA: generalize C...
222
   * @fixed: hint about where to place the reserved area
e8b098fc5   Mike Rapoport   mm: kernel-doc: a...
223
   * @name: The name of the area. See function cma_init_reserved_mem()
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
224
   * @res_cma: Pointer to store the created cma region.
a254129e8   Joonsoo Kim   CMA: generalize C...
225
226
227
228
229
230
231
232
233
   *
   * This function reserves memory from early allocator. It should be
   * called by arch specific code once the early allocator (memblock or bootmem)
   * has been activated and all other subsystems have already allocated/reserved
   * memory. This function allows to create custom reserved areas.
   *
   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
   * reserve in range from @base to @limit.
   */
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
234
235
  int __init cma_declare_contiguous(phys_addr_t base,
  			phys_addr_t size, phys_addr_t limit,
a254129e8   Joonsoo Kim   CMA: generalize C...
236
  			phys_addr_t alignment, unsigned int order_per_bit,
f318dd083   Laura Abbott   cma: Store a name...
237
  			bool fixed, const char *name, struct cma **res_cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
238
  {
f7426b983   Marek Szyprowski   mm: cma: adjust a...
239
  	phys_addr_t memblock_end = memblock_end_of_DRAM();
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
240
  	phys_addr_t highmem_start;
a254129e8   Joonsoo Kim   CMA: generalize C...
241
  	int ret = 0;
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
242
  	/*
2dece445b   Laura Abbott   mm/cma: Cleanup h...
243
244
245
246
  	 * We can't use __pa(high_memory) directly, since high_memory
  	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
  	 * complain. Find the boundary by adding one to the last valid
  	 * address.
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
247
  	 */
2dece445b   Laura Abbott   mm/cma: Cleanup h...
248
  	highmem_start = __pa(high_memory - 1) + 1;
56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
249
250
251
  	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)
  ",
  		__func__, &size, &base, &limit, &alignment);
a254129e8   Joonsoo Kim   CMA: generalize C...
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
  
  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  		pr_err("Not enough slots for CMA reserved regions!
  ");
  		return -ENOSPC;
  	}
  
  	if (!size)
  		return -EINVAL;
  
  	if (alignment && !is_power_of_2(alignment))
  		return -EINVAL;
  
  	/*
  	 * Sanitise input arguments.
  	 * Pages both ends in CMA area could be merged into adjacent unmovable
  	 * migratetype page by page allocator's buddy algorithm. In the case,
  	 * you couldn't get a contiguous memory, which is not what we want.
  	 */
badbda53e   Stephen Rothwell   mm/cma: silence w...
271
272
  	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
  			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
c633324e3   Doug Berger   mm/cma.c: fail if...
273
274
275
276
277
278
279
  	if (fixed && base & (alignment - 1)) {
  		ret = -EINVAL;
  		pr_err("Region at %pa must be aligned to %pa bytes
  ",
  			&base, &alignment);
  		goto err;
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
280
281
282
  	base = ALIGN(base, alignment);
  	size = ALIGN(size, alignment);
  	limit &= ~(alignment - 1);
800a85d3d   Laurent Pinchart   mm: cma: Always c...
283
284
  	if (!base)
  		fixed = false;
a254129e8   Joonsoo Kim   CMA: generalize C...
285
286
287
  	/* size should be aligned with order_per_bit */
  	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
  		return -EINVAL;
f7426b983   Marek Szyprowski   mm: cma: adjust a...
288
  	/*
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
289
290
  	 * If allocating at a fixed base the request region must not cross the
  	 * low/high memory boundary.
f7426b983   Marek Szyprowski   mm: cma: adjust a...
291
  	 */
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
292
  	if (fixed && base < highmem_start && base + size > highmem_start) {
f7426b983   Marek Szyprowski   mm: cma: adjust a...
293
  		ret = -EINVAL;
56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
294
295
296
  		pr_err("Region at %pa defined on low/high memory boundary (%pa)
  ",
  			&base, &highmem_start);
f7426b983   Marek Szyprowski   mm: cma: adjust a...
297
298
  		goto err;
  	}
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
299
300
301
302
303
304
305
  	/*
  	 * If the limit is unspecified or above the memblock end, its effective
  	 * value will be the memblock end. Set it explicitly to simplify further
  	 * checks.
  	 */
  	if (limit == 0 || limit > memblock_end)
  		limit = memblock_end;
c633324e3   Doug Berger   mm/cma.c: fail if...
306
307
308
309
310
311
312
  	if (base + size > limit) {
  		ret = -EINVAL;
  		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)
  ",
  			&size, &base, &limit);
  		goto err;
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
313
  	/* Reserve memory */
800a85d3d   Laurent Pinchart   mm: cma: Always c...
314
  	if (fixed) {
a254129e8   Joonsoo Kim   CMA: generalize C...
315
316
317
318
319
320
  		if (memblock_is_region_reserved(base, size) ||
  		    memblock_reserve(base, size) < 0) {
  			ret = -EBUSY;
  			goto err;
  		}
  	} else {
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
321
322
323
324
325
326
327
328
329
  		phys_addr_t addr = 0;
  
  		/*
  		 * All pages in the reserved area must come from the same zone.
  		 * If the requested region crosses the low/high memory boundary,
  		 * try allocating from high memory first and fall back to low
  		 * memory in case of failure.
  		 */
  		if (base < highmem_start && limit > highmem_start) {
8a770c2a8   Mike Rapoport   memblock: emphasi...
330
331
  			addr = memblock_phys_alloc_range(size, alignment,
  							 highmem_start, limit);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
332
333
  			limit = highmem_start;
  		}
a254129e8   Joonsoo Kim   CMA: generalize C...
334
  		if (!addr) {
8a770c2a8   Mike Rapoport   memblock: emphasi...
335
336
  			addr = memblock_phys_alloc_range(size, alignment, base,
  							 limit);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
337
338
339
340
  			if (!addr) {
  				ret = -ENOMEM;
  				goto err;
  			}
a254129e8   Joonsoo Kim   CMA: generalize C...
341
  		}
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
342

620951e27   Thierry Reding   mm/cma: make kmem...
343
344
345
346
  		/*
  		 * kmemleak scans/reads tracked objects for pointers to other
  		 * objects but this address isn't mapped and accessible
  		 */
9099daed9   Catalin Marinas   mm: kmemleak: avo...
347
  		kmemleak_ignore_phys(addr);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
348
  		base = addr;
a254129e8   Joonsoo Kim   CMA: generalize C...
349
  	}
f318dd083   Laura Abbott   cma: Store a name...
350
  	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
351
  	if (ret)
0d3bd18a5   Peng Fan   mm/cma.c: cma_dec...
352
  		goto free_mem;
a254129e8   Joonsoo Kim   CMA: generalize C...
353

56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
354
355
356
  	pr_info("Reserved %ld MiB at %pa
  ", (unsigned long)size / SZ_1M,
  		&base);
a254129e8   Joonsoo Kim   CMA: generalize C...
357
  	return 0;
0d3bd18a5   Peng Fan   mm/cma.c: cma_dec...
358
359
  free_mem:
  	memblock_free(base, size);
a254129e8   Joonsoo Kim   CMA: generalize C...
360
  err:
0de9d2ebe   Joonsoo Kim   mm, CMA: clean-up...
361
362
  	pr_err("Failed to reserve %ld MiB
  ", (unsigned long)size / SZ_1M);
a254129e8   Joonsoo Kim   CMA: generalize C...
363
364
  	return ret;
  }
dbe43d4d2   Jaewon Kim   mm: cma: print al...
365
366
367
  #ifdef CONFIG_CMA_DEBUG
  static void cma_debug_show_areas(struct cma *cma)
  {
2b59e01a3   Yue Hu   mm/cma.c: fix the...
368
  	unsigned long next_zero_bit, next_set_bit, nr_zero;
dbe43d4d2   Jaewon Kim   mm: cma: print al...
369
  	unsigned long start = 0;
2b59e01a3   Yue Hu   mm/cma.c: fix the...
370
371
  	unsigned long nr_part, nr_total = 0;
  	unsigned long nbits = cma_bitmap_maxno(cma);
dbe43d4d2   Jaewon Kim   mm: cma: print al...
372
373
374
375
  
  	mutex_lock(&cma->lock);
  	pr_info("number of available pages: ");
  	for (;;) {
2b59e01a3   Yue Hu   mm/cma.c: fix the...
376
377
  		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
  		if (next_zero_bit >= nbits)
dbe43d4d2   Jaewon Kim   mm: cma: print al...
378
  			break;
2b59e01a3   Yue Hu   mm/cma.c: fix the...
379
  		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
dbe43d4d2   Jaewon Kim   mm: cma: print al...
380
  		nr_zero = next_set_bit - next_zero_bit;
2b59e01a3   Yue Hu   mm/cma.c: fix the...
381
382
383
384
  		nr_part = nr_zero << cma->order_per_bit;
  		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
  			next_zero_bit);
  		nr_total += nr_part;
dbe43d4d2   Jaewon Kim   mm: cma: print al...
385
386
  		start = next_zero_bit + nr_zero;
  	}
2b59e01a3   Yue Hu   mm/cma.c: fix the...
387
388
  	pr_cont("=> %lu free of %lu total pages
  ", nr_total, cma->count);
dbe43d4d2   Jaewon Kim   mm: cma: print al...
389
390
391
392
393
  	mutex_unlock(&cma->lock);
  }
  #else
  static inline void cma_debug_show_areas(struct cma *cma) { }
  #endif
a254129e8   Joonsoo Kim   CMA: generalize C...
394
395
396
397
398
  /**
   * cma_alloc() - allocate pages from contiguous area
   * @cma:   Contiguous memory region for which the allocation is performed.
   * @count: Requested number of pages.
   * @align: Requested alignment of pages (in PAGE_SIZE order).
651820297   Marek Szyprowski   mm/cma: remove un...
399
   * @no_warn: Avoid printing message about failed allocation
a254129e8   Joonsoo Kim   CMA: generalize C...
400
401
402
403
   *
   * This function allocates part of contiguous memory on specific
   * contiguous memory area.
   */
e2f466e32   Lucas Stach   mm: cma_alloc: al...
404
  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
651820297   Marek Szyprowski   mm/cma: remove un...
405
  		       bool no_warn)
a254129e8   Joonsoo Kim   CMA: generalize C...
406
  {
3acaea680   Andrew Morton   mm/cma.c: suppres...
407
408
409
  	unsigned long mask, offset;
  	unsigned long pfn = -1;
  	unsigned long start = 0;
a254129e8   Joonsoo Kim   CMA: generalize C...
410
  	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
2813b9c02   Andrey Konovalov   kasan, mm, arm64:...
411
  	size_t i;
a254129e8   Joonsoo Kim   CMA: generalize C...
412
  	struct page *page = NULL;
dbe43d4d2   Jaewon Kim   mm: cma: print al...
413
  	int ret = -ENOMEM;
a254129e8   Joonsoo Kim   CMA: generalize C...
414
415
416
  
  	if (!cma || !cma->count)
  		return NULL;
67a2e213e   Rohit Vaswani   mm: cma: fix inco...
417
418
  	pr_debug("%s(cma %p, count %zu, align %d)
  ", __func__, (void *)cma,
a254129e8   Joonsoo Kim   CMA: generalize C...
419
420
421
422
423
424
  		 count, align);
  
  	if (!count)
  		return NULL;
  
  	mask = cma_bitmap_aligned_mask(cma, align);
b5be83e30   Gregory Fong   mm: cma: align to...
425
  	offset = cma_bitmap_aligned_offset(cma, align);
a254129e8   Joonsoo Kim   CMA: generalize C...
426
427
  	bitmap_maxno = cma_bitmap_maxno(cma);
  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
6b36ba599   Shiraz Hashim   mm/cma.c: check t...
428
429
  	if (bitmap_count > bitmap_maxno)
  		return NULL;
a254129e8   Joonsoo Kim   CMA: generalize C...
430
431
  	for (;;) {
  		mutex_lock(&cma->lock);
b5be83e30   Gregory Fong   mm: cma: align to...
432
433
434
  		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
  				bitmap_maxno, start, bitmap_count, mask,
  				offset);
a254129e8   Joonsoo Kim   CMA: generalize C...
435
436
437
438
439
440
441
442
443
444
445
446
447
448
  		if (bitmap_no >= bitmap_maxno) {
  			mutex_unlock(&cma->lock);
  			break;
  		}
  		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
  		/*
  		 * It's safe to drop the lock here. We've marked this region for
  		 * our exclusive use. If the migration fails we will take the
  		 * lock again and unmark it.
  		 */
  		mutex_unlock(&cma->lock);
  
  		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
  		mutex_lock(&cma_mutex);
ca96b6253   Lucas Stach   mm: alloc_contig_...
449
  		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
651820297   Marek Szyprowski   mm/cma: remove un...
450
  				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
a254129e8   Joonsoo Kim   CMA: generalize C...
451
452
453
454
  		mutex_unlock(&cma_mutex);
  		if (ret == 0) {
  			page = pfn_to_page(pfn);
  			break;
a254129e8   Joonsoo Kim   CMA: generalize C...
455
  		}
b7155e76a   Joonsoo Kim   mm, CMA: clean-up...
456

a254129e8   Joonsoo Kim   CMA: generalize C...
457
  		cma_clear_bitmap(cma, pfn, count);
b7155e76a   Joonsoo Kim   mm, CMA: clean-up...
458
459
  		if (ret != -EBUSY)
  			break;
a254129e8   Joonsoo Kim   CMA: generalize C...
460
461
462
463
464
465
  		pr_debug("%s(): memory range at %p is busy, retrying
  ",
  			 __func__, pfn_to_page(pfn));
  		/* try again with a bit different memory target */
  		start = bitmap_no + mask + 1;
  	}
3acaea680   Andrew Morton   mm/cma.c: suppres...
466
  	trace_cma_alloc(pfn, page, count, align);
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
467

2813b9c02   Andrey Konovalov   kasan, mm, arm64:...
468
469
470
471
472
473
474
475
476
  	/*
  	 * CMA can allocate multiple page blocks, which results in different
  	 * blocks being marked with different tags. Reset the tags to ignore
  	 * those page blocks.
  	 */
  	if (page) {
  		for (i = 0; i < count; i++)
  			page_kasan_tag_reset(page + i);
  	}
651820297   Marek Szyprowski   mm/cma: remove un...
477
  	if (ret && !no_warn) {
5984af108   Pintu Agarwal   mm/cma.c: change ...
478
479
  		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d
  ",
dbe43d4d2   Jaewon Kim   mm: cma: print al...
480
481
482
  			__func__, count, ret);
  		cma_debug_show_areas(cma);
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
483
484
485
486
487
488
489
490
491
492
493
  	pr_debug("%s(): returned %p
  ", __func__, page);
  	return page;
  }
  
  /**
   * cma_release() - release allocated pages
   * @cma:   Contiguous memory region for which the allocation is performed.
   * @pages: Allocated pages.
   * @count: Number of allocated pages.
   *
929f92f78   Ryohei Suzuki   mm/cma.c: fix a t...
494
   * This function releases memory allocated by cma_alloc().
a254129e8   Joonsoo Kim   CMA: generalize C...
495
496
497
   * It returns false when provided pages do not belong to contiguous area and
   * true otherwise.
   */
ac1738249   Sasha Levin   mm: cma: constify...
498
  bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
a254129e8   Joonsoo Kim   CMA: generalize C...
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
  {
  	unsigned long pfn;
  
  	if (!cma || !pages)
  		return false;
  
  	pr_debug("%s(page %p)
  ", __func__, (void *)pages);
  
  	pfn = page_to_pfn(pages);
  
  	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
  		return false;
  
  	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
  
  	free_contig_range(pfn, count);
  	cma_clear_bitmap(cma, pfn, count);
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
517
  	trace_cma_release(pfn, pages, count);
a254129e8   Joonsoo Kim   CMA: generalize C...
518
519
520
  
  	return true;
  }
e4231bcda   Laura Abbott   cma: Introduce cm...
521
522
523
524
525
526
527
528
529
530
531
532
533
534
  
  int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
  {
  	int i;
  
  	for (i = 0; i < cma_area_count; i++) {
  		int ret = it(&cma_areas[i], data);
  
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }