Blame view

mm/cma.c 13.9 KB
a254129e8   Joonsoo Kim   CMA: generalize C...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
  /*
   * Contiguous Memory Allocator
   *
   * Copyright (c) 2010-2011 by Samsung Electronics.
   * Copyright IBM Corporation, 2013
   * Copyright LG Electronics Inc., 2014
   * Written by:
   *	Marek Szyprowski <m.szyprowski@samsung.com>
   *	Michal Nazarewicz <mina86@mina86.com>
   *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License as
   * published by the Free Software Foundation; either version 2 of the
   * License or (at your optional) any later version of the license.
   */
  
  #define pr_fmt(fmt) "cma: " fmt
  
  #ifdef CONFIG_CMA_DEBUG
  #ifndef DEBUG
  #  define DEBUG
  #endif
  #endif
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
26
  #define CREATE_TRACE_POINTS
a254129e8   Joonsoo Kim   CMA: generalize C...
27
28
29
30
31
32
33
34
35
  
  #include <linux/memblock.h>
  #include <linux/err.h>
  #include <linux/mm.h>
  #include <linux/mutex.h>
  #include <linux/sizes.h>
  #include <linux/slab.h>
  #include <linux/log2.h>
  #include <linux/cma.h>
f7426b983   Marek Szyprowski   mm: cma: adjust a...
36
  #include <linux/highmem.h>
620951e27   Thierry Reding   mm/cma: make kmem...
37
  #include <linux/io.h>
514c60324   Randy Dunlap   headers: untangle...
38
  #include <linux/kmemleak.h>
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
39
  #include <trace/events/cma.h>
a254129e8   Joonsoo Kim   CMA: generalize C...
40

28b24c1fc   Sasha Levin   mm: cma: debugfs ...
41
42
43
44
  #include "cma.h"
  
  struct cma cma_areas[MAX_CMA_AREAS];
  unsigned cma_area_count;
a254129e8   Joonsoo Kim   CMA: generalize C...
45
  static DEFINE_MUTEX(cma_mutex);
ac1738249   Sasha Levin   mm: cma: constify...
46
  phys_addr_t cma_get_base(const struct cma *cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
47
48
49
  {
  	return PFN_PHYS(cma->base_pfn);
  }
ac1738249   Sasha Levin   mm: cma: constify...
50
  unsigned long cma_get_size(const struct cma *cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
51
52
53
  {
  	return cma->count << PAGE_SHIFT;
  }
f318dd083   Laura Abbott   cma: Store a name...
54
55
56
57
  const char *cma_get_name(const struct cma *cma)
  {
  	return cma->name ? cma->name : "(undefined)";
  }
ac1738249   Sasha Levin   mm: cma: constify...
58
  static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
e048cb32f   Doug Berger   cma: fix calculat...
59
  					     unsigned int align_order)
a254129e8   Joonsoo Kim   CMA: generalize C...
60
  {
68faed630   Weijie Yang   mm/cma: fix cma b...
61
62
63
  	if (align_order <= cma->order_per_bit)
  		return 0;
  	return (1UL << (align_order - cma->order_per_bit)) - 1;
a254129e8   Joonsoo Kim   CMA: generalize C...
64
  }
850fc430f   Danesh Petigara   mm: cma: fix CMA ...
65
  /*
e048cb32f   Doug Berger   cma: fix calculat...
66
67
   * Find the offset of the base PFN from the specified align_order.
   * The value returned is represented in order_per_bits.
850fc430f   Danesh Petigara   mm: cma: fix CMA ...
68
   */
ac1738249   Sasha Levin   mm: cma: constify...
69
  static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
e048cb32f   Doug Berger   cma: fix calculat...
70
  					       unsigned int align_order)
b5be83e30   Gregory Fong   mm: cma: align to...
71
  {
e048cb32f   Doug Berger   cma: fix calculat...
72
73
  	return (cma->base_pfn & ((1UL << align_order) - 1))
  		>> cma->order_per_bit;
b5be83e30   Gregory Fong   mm: cma: align to...
74
  }
ac1738249   Sasha Levin   mm: cma: constify...
75
76
  static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
  					      unsigned long pages)
a254129e8   Joonsoo Kim   CMA: generalize C...
77
78
79
  {
  	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
  }
ac1738249   Sasha Levin   mm: cma: constify...
80
81
  static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
  			     unsigned int count)
a254129e8   Joonsoo Kim   CMA: generalize C...
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
  {
  	unsigned long bitmap_no, bitmap_count;
  
  	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
  
  	mutex_lock(&cma->lock);
  	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
  	mutex_unlock(&cma->lock);
  }
  
  static int __init cma_activate_area(struct cma *cma)
  {
  	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
  	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
  	unsigned i = cma->count >> pageblock_order;
  	struct zone *zone;
  
  	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  
  	if (!cma->bitmap)
  		return -ENOMEM;
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
104
105
  	WARN_ON_ONCE(!pfn_valid(pfn));
  	zone = page_zone(pfn_to_page(pfn));
a254129e8   Joonsoo Kim   CMA: generalize C...
106
107
108
109
110
  	do {
  		unsigned j;
  
  		base_pfn = pfn;
  		for (j = pageblock_nr_pages; j; --j, pfn++) {
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
111
  			WARN_ON_ONCE(!pfn_valid(pfn));
a254129e8   Joonsoo Kim   CMA: generalize C...
112
  			/*
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
113
114
115
116
  			 * alloc_contig_range requires the pfn range
  			 * specified to be in the same zone. Make this
  			 * simple by forcing the entire CMA resv range
  			 * to be in the same zone.
a254129e8   Joonsoo Kim   CMA: generalize C...
117
118
  			 */
  			if (page_zone(pfn_to_page(pfn)) != zone)
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
119
  				goto not_in_zone;
a254129e8   Joonsoo Kim   CMA: generalize C...
120
121
122
123
124
  		}
  		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
  	} while (--i);
  
  	mutex_init(&cma->lock);
26b02a1f9   Sasha Levin   mm: cma: allocati...
125
126
127
128
129
  
  #ifdef CONFIG_CMA_DEBUGFS
  	INIT_HLIST_HEAD(&cma->mem_head);
  	spin_lock_init(&cma->mem_head_lock);
  #endif
a254129e8   Joonsoo Kim   CMA: generalize C...
130
  	return 0;
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
131
  not_in_zone:
e35ef6397   Anshuman Khandual   mm/cma.c: warn if...
132
133
  	pr_err("CMA area %s could not be activated
  ", cma->name);
a254129e8   Joonsoo Kim   CMA: generalize C...
134
  	kfree(cma->bitmap);
f022d8cb7   Laurent Pinchart   mm: cma: Don't cr...
135
  	cma->count = 0;
a254129e8   Joonsoo Kim   CMA: generalize C...
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  	return -EINVAL;
  }
  
  static int __init cma_init_reserved_areas(void)
  {
  	int i;
  
  	for (i = 0; i < cma_area_count; i++) {
  		int ret = cma_activate_area(&cma_areas[i]);
  
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
152
  core_initcall(cma_init_reserved_areas);
a254129e8   Joonsoo Kim   CMA: generalize C...
153
154
  
  /**
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
155
156
157
158
   * cma_init_reserved_mem() - create custom contiguous area from reserved memory
   * @base: Base address of the reserved area
   * @size: Size of the reserved area (in bytes),
   * @order_per_bit: Order of pages represented by one bit on bitmap.
e8b098fc5   Mike Rapoport   mm: kernel-doc: a...
159
160
161
   * @name: The name of the area. If this parameter is NULL, the name of
   *        the area will be set to "cmaN", where N is a running counter of
   *        used areas.
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
162
163
164
165
166
   * @res_cma: Pointer to store the created cma region.
   *
   * This function creates custom contiguous area from already reserved memory.
   */
  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
ac1738249   Sasha Levin   mm: cma: constify...
167
  				 unsigned int order_per_bit,
f318dd083   Laura Abbott   cma: Store a name...
168
  				 const char *name,
ac1738249   Sasha Levin   mm: cma: constify...
169
  				 struct cma **res_cma)
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
170
171
172
173
174
175
176
177
178
179
180
181
182
  {
  	struct cma *cma;
  	phys_addr_t alignment;
  
  	/* Sanity checks */
  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  		pr_err("Not enough slots for CMA reserved regions!
  ");
  		return -ENOSPC;
  	}
  
  	if (!size || !memblock_is_region_reserved(base, size))
  		return -EINVAL;
0f96ae292   Shailendra Verma   mm/cma.c: fix typ...
183
  	/* ensure minimal alignment required by mm core */
badbda53e   Stephen Rothwell   mm/cma: silence w...
184
185
  	alignment = PAGE_SIZE <<
  			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
186
187
188
189
190
191
192
193
194
195
196
197
198
  
  	/* alignment should be aligned with order_per_bit */
  	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
  		return -EINVAL;
  
  	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
  		return -EINVAL;
  
  	/*
  	 * Each reserved area must be initialised later, when more kernel
  	 * subsystems (like slab allocator) are available.
  	 */
  	cma = &cma_areas[cma_area_count];
f318dd083   Laura Abbott   cma: Store a name...
199
200
201
202
203
204
205
206
  	if (name) {
  		cma->name = name;
  	} else {
  		cma->name = kasprintf(GFP_KERNEL, "cma%d
  ", cma_area_count);
  		if (!cma->name)
  			return -ENOMEM;
  	}
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
207
208
209
210
211
  	cma->base_pfn = PFN_DOWN(base);
  	cma->count = size >> PAGE_SHIFT;
  	cma->order_per_bit = order_per_bit;
  	*res_cma = cma;
  	cma_area_count++;
94737a85f   George G. Davis   mm: cma: fix tota...
212
  	totalcma_pages += (size / PAGE_SIZE);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
213
214
215
216
217
  
  	return 0;
  }
  
  /**
a254129e8   Joonsoo Kim   CMA: generalize C...
218
   * cma_declare_contiguous() - reserve custom contiguous area
a254129e8   Joonsoo Kim   CMA: generalize C...
219
   * @base: Base address of the reserved area optional, use 0 for any
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
220
   * @size: Size of the reserved area (in bytes),
a254129e8   Joonsoo Kim   CMA: generalize C...
221
222
223
   * @limit: End address of the reserved memory (optional, 0 for any).
   * @alignment: Alignment for the CMA area, should be power of 2 or zero
   * @order_per_bit: Order of pages represented by one bit on bitmap.
a254129e8   Joonsoo Kim   CMA: generalize C...
224
   * @fixed: hint about where to place the reserved area
e8b098fc5   Mike Rapoport   mm: kernel-doc: a...
225
   * @name: The name of the area. See function cma_init_reserved_mem()
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
226
   * @res_cma: Pointer to store the created cma region.
a254129e8   Joonsoo Kim   CMA: generalize C...
227
228
229
230
231
232
233
234
235
   *
   * This function reserves memory from early allocator. It should be
   * called by arch specific code once the early allocator (memblock or bootmem)
   * has been activated and all other subsystems have already allocated/reserved
   * memory. This function allows to create custom reserved areas.
   *
   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
   * reserve in range from @base to @limit.
   */
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
236
237
  int __init cma_declare_contiguous(phys_addr_t base,
  			phys_addr_t size, phys_addr_t limit,
a254129e8   Joonsoo Kim   CMA: generalize C...
238
  			phys_addr_t alignment, unsigned int order_per_bit,
f318dd083   Laura Abbott   cma: Store a name...
239
  			bool fixed, const char *name, struct cma **res_cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
240
  {
f7426b983   Marek Szyprowski   mm: cma: adjust a...
241
  	phys_addr_t memblock_end = memblock_end_of_DRAM();
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
242
  	phys_addr_t highmem_start;
a254129e8   Joonsoo Kim   CMA: generalize C...
243
  	int ret = 0;
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
244
  	/*
2dece445b   Laura Abbott   mm/cma: Cleanup h...
245
246
247
248
  	 * We can't use __pa(high_memory) directly, since high_memory
  	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
  	 * complain. Find the boundary by adding one to the last valid
  	 * address.
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
249
  	 */
2dece445b   Laura Abbott   mm/cma: Cleanup h...
250
  	highmem_start = __pa(high_memory - 1) + 1;
56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
251
252
253
  	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)
  ",
  		__func__, &size, &base, &limit, &alignment);
a254129e8   Joonsoo Kim   CMA: generalize C...
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
  
  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  		pr_err("Not enough slots for CMA reserved regions!
  ");
  		return -ENOSPC;
  	}
  
  	if (!size)
  		return -EINVAL;
  
  	if (alignment && !is_power_of_2(alignment))
  		return -EINVAL;
  
  	/*
  	 * Sanitise input arguments.
  	 * Pages both ends in CMA area could be merged into adjacent unmovable
  	 * migratetype page by page allocator's buddy algorithm. In the case,
  	 * you couldn't get a contiguous memory, which is not what we want.
  	 */
badbda53e   Stephen Rothwell   mm/cma: silence w...
273
274
  	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
  			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
a254129e8   Joonsoo Kim   CMA: generalize C...
275
276
277
  	base = ALIGN(base, alignment);
  	size = ALIGN(size, alignment);
  	limit &= ~(alignment - 1);
800a85d3d   Laurent Pinchart   mm: cma: Always c...
278
279
  	if (!base)
  		fixed = false;
a254129e8   Joonsoo Kim   CMA: generalize C...
280
281
282
  	/* size should be aligned with order_per_bit */
  	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
  		return -EINVAL;
f7426b983   Marek Szyprowski   mm: cma: adjust a...
283
  	/*
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
284
285
  	 * If allocating at a fixed base the request region must not cross the
  	 * low/high memory boundary.
f7426b983   Marek Szyprowski   mm: cma: adjust a...
286
  	 */
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
287
  	if (fixed && base < highmem_start && base + size > highmem_start) {
f7426b983   Marek Szyprowski   mm: cma: adjust a...
288
  		ret = -EINVAL;
56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
289
290
291
  		pr_err("Region at %pa defined on low/high memory boundary (%pa)
  ",
  			&base, &highmem_start);
f7426b983   Marek Szyprowski   mm: cma: adjust a...
292
293
  		goto err;
  	}
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
294
295
296
297
298
299
300
  	/*
  	 * If the limit is unspecified or above the memblock end, its effective
  	 * value will be the memblock end. Set it explicitly to simplify further
  	 * checks.
  	 */
  	if (limit == 0 || limit > memblock_end)
  		limit = memblock_end;
a254129e8   Joonsoo Kim   CMA: generalize C...
301
  	/* Reserve memory */
800a85d3d   Laurent Pinchart   mm: cma: Always c...
302
  	if (fixed) {
a254129e8   Joonsoo Kim   CMA: generalize C...
303
304
305
306
307
308
  		if (memblock_is_region_reserved(base, size) ||
  		    memblock_reserve(base, size) < 0) {
  			ret = -EBUSY;
  			goto err;
  		}
  	} else {
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
309
310
311
312
313
314
315
316
317
318
  		phys_addr_t addr = 0;
  
  		/*
  		 * All pages in the reserved area must come from the same zone.
  		 * If the requested region crosses the low/high memory boundary,
  		 * try allocating from high memory first and fall back to low
  		 * memory in case of failure.
  		 */
  		if (base < highmem_start && limit > highmem_start) {
  			addr = memblock_alloc_range(size, alignment,
fc6daaf93   Tony Luck   mm/memblock: add ...
319
320
  						    highmem_start, limit,
  						    MEMBLOCK_NONE);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
321
322
  			limit = highmem_start;
  		}
a254129e8   Joonsoo Kim   CMA: generalize C...
323
  		if (!addr) {
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
324
  			addr = memblock_alloc_range(size, alignment, base,
fc6daaf93   Tony Luck   mm/memblock: add ...
325
326
  						    limit,
  						    MEMBLOCK_NONE);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
327
328
329
330
  			if (!addr) {
  				ret = -ENOMEM;
  				goto err;
  			}
a254129e8   Joonsoo Kim   CMA: generalize C...
331
  		}
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
332

620951e27   Thierry Reding   mm/cma: make kmem...
333
334
335
336
  		/*
  		 * kmemleak scans/reads tracked objects for pointers to other
  		 * objects but this address isn't mapped and accessible
  		 */
9099daed9   Catalin Marinas   mm: kmemleak: avo...
337
  		kmemleak_ignore_phys(addr);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
338
  		base = addr;
a254129e8   Joonsoo Kim   CMA: generalize C...
339
  	}
f318dd083   Laura Abbott   cma: Store a name...
340
  	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
341
  	if (ret)
f555b008c   Peng Fan   mm/cma.c: cma_dec...
342
  		goto free_mem;
a254129e8   Joonsoo Kim   CMA: generalize C...
343

56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
344
345
346
  	pr_info("Reserved %ld MiB at %pa
  ", (unsigned long)size / SZ_1M,
  		&base);
a254129e8   Joonsoo Kim   CMA: generalize C...
347
  	return 0;
f555b008c   Peng Fan   mm/cma.c: cma_dec...
348
349
  free_mem:
  	memblock_free(base, size);
a254129e8   Joonsoo Kim   CMA: generalize C...
350
  err:
0de9d2ebe   Joonsoo Kim   mm, CMA: clean-up...
351
352
  	pr_err("Failed to reserve %ld MiB
  ", (unsigned long)size / SZ_1M);
a254129e8   Joonsoo Kim   CMA: generalize C...
353
354
  	return ret;
  }
dbe43d4d2   Jaewon Kim   mm: cma: print al...
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
  #ifdef CONFIG_CMA_DEBUG
  static void cma_debug_show_areas(struct cma *cma)
  {
  	unsigned long next_zero_bit, next_set_bit;
  	unsigned long start = 0;
  	unsigned int nr_zero, nr_total = 0;
  
  	mutex_lock(&cma->lock);
  	pr_info("number of available pages: ");
  	for (;;) {
  		next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
  		if (next_zero_bit >= cma->count)
  			break;
  		next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
  		nr_zero = next_set_bit - next_zero_bit;
  		pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
  		nr_total += nr_zero;
  		start = next_zero_bit + nr_zero;
  	}
  	pr_cont("=> %u free of %lu total pages
  ", nr_total, cma->count);
  	mutex_unlock(&cma->lock);
  }
  #else
  static inline void cma_debug_show_areas(struct cma *cma) { }
  #endif
a254129e8   Joonsoo Kim   CMA: generalize C...
381
382
383
384
385
  /**
   * cma_alloc() - allocate pages from contiguous area
   * @cma:   Contiguous memory region for which the allocation is performed.
   * @count: Requested number of pages.
   * @align: Requested alignment of pages (in PAGE_SIZE order).
651820297   Marek Szyprowski   mm/cma: remove un...
386
   * @no_warn: Avoid printing message about failed allocation
a254129e8   Joonsoo Kim   CMA: generalize C...
387
388
389
390
   *
   * This function allocates part of contiguous memory on specific
   * contiguous memory area.
   */
e2f466e32   Lucas Stach   mm: cma_alloc: al...
391
  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
651820297   Marek Szyprowski   mm/cma: remove un...
392
  		       bool no_warn)
a254129e8   Joonsoo Kim   CMA: generalize C...
393
  {
3acaea680   Andrew Morton   mm/cma.c: suppres...
394
395
396
  	unsigned long mask, offset;
  	unsigned long pfn = -1;
  	unsigned long start = 0;
a254129e8   Joonsoo Kim   CMA: generalize C...
397
398
  	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
  	struct page *page = NULL;
dbe43d4d2   Jaewon Kim   mm: cma: print al...
399
  	int ret = -ENOMEM;
a254129e8   Joonsoo Kim   CMA: generalize C...
400
401
402
  
  	if (!cma || !cma->count)
  		return NULL;
67a2e213e   Rohit Vaswani   mm: cma: fix inco...
403
404
  	pr_debug("%s(cma %p, count %zu, align %d)
  ", __func__, (void *)cma,
a254129e8   Joonsoo Kim   CMA: generalize C...
405
406
407
408
409
410
  		 count, align);
  
  	if (!count)
  		return NULL;
  
  	mask = cma_bitmap_aligned_mask(cma, align);
b5be83e30   Gregory Fong   mm: cma: align to...
411
  	offset = cma_bitmap_aligned_offset(cma, align);
a254129e8   Joonsoo Kim   CMA: generalize C...
412
413
  	bitmap_maxno = cma_bitmap_maxno(cma);
  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
6b36ba599   Shiraz Hashim   mm/cma.c: check t...
414
415
  	if (bitmap_count > bitmap_maxno)
  		return NULL;
a254129e8   Joonsoo Kim   CMA: generalize C...
416
417
  	for (;;) {
  		mutex_lock(&cma->lock);
b5be83e30   Gregory Fong   mm: cma: align to...
418
419
420
  		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
  				bitmap_maxno, start, bitmap_count, mask,
  				offset);
a254129e8   Joonsoo Kim   CMA: generalize C...
421
422
423
424
425
426
427
428
429
430
431
432
433
434
  		if (bitmap_no >= bitmap_maxno) {
  			mutex_unlock(&cma->lock);
  			break;
  		}
  		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
  		/*
  		 * It's safe to drop the lock here. We've marked this region for
  		 * our exclusive use. If the migration fails we will take the
  		 * lock again and unmark it.
  		 */
  		mutex_unlock(&cma->lock);
  
  		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
  		mutex_lock(&cma_mutex);
ca96b6253   Lucas Stach   mm: alloc_contig_...
435
  		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
651820297   Marek Szyprowski   mm/cma: remove un...
436
  				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
a254129e8   Joonsoo Kim   CMA: generalize C...
437
438
439
440
  		mutex_unlock(&cma_mutex);
  		if (ret == 0) {
  			page = pfn_to_page(pfn);
  			break;
a254129e8   Joonsoo Kim   CMA: generalize C...
441
  		}
b7155e76a   Joonsoo Kim   mm, CMA: clean-up...
442

a254129e8   Joonsoo Kim   CMA: generalize C...
443
  		cma_clear_bitmap(cma, pfn, count);
b7155e76a   Joonsoo Kim   mm, CMA: clean-up...
444
445
  		if (ret != -EBUSY)
  			break;
a254129e8   Joonsoo Kim   CMA: generalize C...
446
447
448
449
450
451
  		pr_debug("%s(): memory range at %p is busy, retrying
  ",
  			 __func__, pfn_to_page(pfn));
  		/* try again with a bit different memory target */
  		start = bitmap_no + mask + 1;
  	}
3acaea680   Andrew Morton   mm/cma.c: suppres...
452
  	trace_cma_alloc(pfn, page, count, align);
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
453

651820297   Marek Szyprowski   mm/cma: remove un...
454
  	if (ret && !no_warn) {
5984af108   Pintu Agarwal   mm/cma.c: change ...
455
456
  		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d
  ",
dbe43d4d2   Jaewon Kim   mm: cma: print al...
457
458
459
  			__func__, count, ret);
  		cma_debug_show_areas(cma);
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
  	pr_debug("%s(): returned %p
  ", __func__, page);
  	return page;
  }
  
  /**
   * cma_release() - release allocated pages
   * @cma:   Contiguous memory region for which the allocation is performed.
   * @pages: Allocated pages.
   * @count: Number of allocated pages.
   *
   * This function releases memory allocated by alloc_cma().
   * It returns false when provided pages do not belong to contiguous area and
   * true otherwise.
   */
ac1738249   Sasha Levin   mm: cma: constify...
475
  bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
a254129e8   Joonsoo Kim   CMA: generalize C...
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
  {
  	unsigned long pfn;
  
  	if (!cma || !pages)
  		return false;
  
  	pr_debug("%s(page %p)
  ", __func__, (void *)pages);
  
  	pfn = page_to_pfn(pages);
  
  	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
  		return false;
  
  	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
  
  	free_contig_range(pfn, count);
  	cma_clear_bitmap(cma, pfn, count);
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
494
  	trace_cma_release(pfn, pages, count);
a254129e8   Joonsoo Kim   CMA: generalize C...
495
496
497
  
  	return true;
  }
e4231bcda   Laura Abbott   cma: Introduce cm...
498
499
500
501
502
503
504
505
506
507
508
509
510
511
  
  int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
  {
  	int i;
  
  	for (i = 0; i < cma_area_count; i++) {
  		int ret = it(&cma_areas[i], data);
  
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }