Blame view

mm/cma.c 14.3 KB
8607a9652   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
a254129e8   Joonsoo Kim   CMA: generalize C...
2
3
4
5
6
7
8
9
10
11
12
  /*
   * Contiguous Memory Allocator
   *
   * Copyright (c) 2010-2011 by Samsung Electronics.
   * Copyright IBM Corporation, 2013
   * Copyright LG Electronics Inc., 2014
   * Written by:
   *	Marek Szyprowski <m.szyprowski@samsung.com>
   *	Michal Nazarewicz <mina86@mina86.com>
   *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
a254129e8   Joonsoo Kim   CMA: generalize C...
13
14
15
16
17
18
19
20
21
   */
  
  #define pr_fmt(fmt) "cma: " fmt
  
  #ifdef CONFIG_CMA_DEBUG
  #ifndef DEBUG
  #  define DEBUG
  #endif
  #endif
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
22
  #define CREATE_TRACE_POINTS
a254129e8   Joonsoo Kim   CMA: generalize C...
23
24
25
26
27
28
29
30
31
  
  #include <linux/memblock.h>
  #include <linux/err.h>
  #include <linux/mm.h>
  #include <linux/mutex.h>
  #include <linux/sizes.h>
  #include <linux/slab.h>
  #include <linux/log2.h>
  #include <linux/cma.h>
f7426b983   Marek Szyprowski   mm: cma: adjust a...
32
  #include <linux/highmem.h>
620951e27   Thierry Reding   mm/cma: make kmem...
33
  #include <linux/io.h>
514c60324   Randy Dunlap   headers: untangle...
34
  #include <linux/kmemleak.h>
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
35
  #include <trace/events/cma.h>
a254129e8   Joonsoo Kim   CMA: generalize C...
36

28b24c1fc   Sasha Levin   mm: cma: debugfs ...
37
38
39
40
  #include "cma.h"
  
  struct cma cma_areas[MAX_CMA_AREAS];
  unsigned cma_area_count;
a254129e8   Joonsoo Kim   CMA: generalize C...
41
  static DEFINE_MUTEX(cma_mutex);
ac1738249   Sasha Levin   mm: cma: constify...
42
  phys_addr_t cma_get_base(const struct cma *cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
43
44
45
  {
  	return PFN_PHYS(cma->base_pfn);
  }
ac1738249   Sasha Levin   mm: cma: constify...
46
  unsigned long cma_get_size(const struct cma *cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
47
48
49
  {
  	return cma->count << PAGE_SHIFT;
  }
f318dd083   Laura Abbott   cma: Store a name...
50
51
  const char *cma_get_name(const struct cma *cma)
  {
18e98e56f   Barry Song   mm: cma: fix the ...
52
  	return cma->name;
f318dd083   Laura Abbott   cma: Store a name...
53
  }
ac1738249   Sasha Levin   mm: cma: constify...
54
  static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
e048cb32f   Doug Berger   cma: fix calculat...
55
  					     unsigned int align_order)
a254129e8   Joonsoo Kim   CMA: generalize C...
56
  {
68faed630   Weijie Yang   mm/cma: fix cma b...
57
58
59
  	if (align_order <= cma->order_per_bit)
  		return 0;
  	return (1UL << (align_order - cma->order_per_bit)) - 1;
a254129e8   Joonsoo Kim   CMA: generalize C...
60
  }
850fc430f   Danesh Petigara   mm: cma: fix CMA ...
61
  /*
e048cb32f   Doug Berger   cma: fix calculat...
62
63
   * Find the offset of the base PFN from the specified align_order.
   * The value returned is represented in order_per_bits.
850fc430f   Danesh Petigara   mm: cma: fix CMA ...
64
   */
ac1738249   Sasha Levin   mm: cma: constify...
65
  static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
e048cb32f   Doug Berger   cma: fix calculat...
66
  					       unsigned int align_order)
b5be83e30   Gregory Fong   mm: cma: align to...
67
  {
e048cb32f   Doug Berger   cma: fix calculat...
68
69
  	return (cma->base_pfn & ((1UL << align_order) - 1))
  		>> cma->order_per_bit;
b5be83e30   Gregory Fong   mm: cma: align to...
70
  }
ac1738249   Sasha Levin   mm: cma: constify...
71
72
  static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
  					      unsigned long pages)
a254129e8   Joonsoo Kim   CMA: generalize C...
73
74
75
  {
  	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
  }
ac1738249   Sasha Levin   mm: cma: constify...
76
77
  static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
  			     unsigned int count)
a254129e8   Joonsoo Kim   CMA: generalize C...
78
79
80
81
82
83
84
85
86
87
  {
  	unsigned long bitmap_no, bitmap_count;
  
  	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
  
  	mutex_lock(&cma->lock);
  	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
  	mutex_unlock(&cma->lock);
  }
3a5139f1c   Mike Kravetz   cma: don't quit a...
88
  static void __init cma_activate_area(struct cma *cma)
a254129e8   Joonsoo Kim   CMA: generalize C...
89
  {
a254129e8   Joonsoo Kim   CMA: generalize C...
90
91
92
  	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
  	unsigned i = cma->count >> pageblock_order;
  	struct zone *zone;
2184f9928   Yunfeng Ye   mm/cma.c: switch ...
93
  	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
3a5139f1c   Mike Kravetz   cma: don't quit a...
94
95
  	if (!cma->bitmap)
  		goto out_error;
a254129e8   Joonsoo Kim   CMA: generalize C...
96

d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
97
98
  	WARN_ON_ONCE(!pfn_valid(pfn));
  	zone = page_zone(pfn_to_page(pfn));
a254129e8   Joonsoo Kim   CMA: generalize C...
99
100
101
102
103
  	do {
  		unsigned j;
  
  		base_pfn = pfn;
  		for (j = pageblock_nr_pages; j; --j, pfn++) {
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
104
  			WARN_ON_ONCE(!pfn_valid(pfn));
a254129e8   Joonsoo Kim   CMA: generalize C...
105
  			/*
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
106
107
108
109
  			 * alloc_contig_range requires the pfn range
  			 * specified to be in the same zone. Make this
  			 * simple by forcing the entire CMA resv range
  			 * to be in the same zone.
a254129e8   Joonsoo Kim   CMA: generalize C...
110
111
  			 */
  			if (page_zone(pfn_to_page(pfn)) != zone)
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
112
  				goto not_in_zone;
a254129e8   Joonsoo Kim   CMA: generalize C...
113
114
115
116
117
  		}
  		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
  	} while (--i);
  
  	mutex_init(&cma->lock);
26b02a1f9   Sasha Levin   mm: cma: allocati...
118
119
120
121
122
  
  #ifdef CONFIG_CMA_DEBUGFS
  	INIT_HLIST_HEAD(&cma->mem_head);
  	spin_lock_init(&cma->mem_head_lock);
  #endif
3a5139f1c   Mike Kravetz   cma: don't quit a...
123
  	return;
a254129e8   Joonsoo Kim   CMA: generalize C...
124

d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
125
  not_in_zone:
2184f9928   Yunfeng Ye   mm/cma.c: switch ...
126
  	bitmap_free(cma->bitmap);
3a5139f1c   Mike Kravetz   cma: don't quit a...
127
  out_error:
f022d8cb7   Laurent Pinchart   mm: cma: Don't cr...
128
  	cma->count = 0;
3a5139f1c   Mike Kravetz   cma: don't quit a...
129
130
131
  	pr_err("CMA area %s could not be activated
  ", cma->name);
  	return;
a254129e8   Joonsoo Kim   CMA: generalize C...
132
133
134
135
136
  }
  
  static int __init cma_init_reserved_areas(void)
  {
  	int i;
3a5139f1c   Mike Kravetz   cma: don't quit a...
137
138
  	for (i = 0; i < cma_area_count; i++)
  		cma_activate_area(&cma_areas[i]);
a254129e8   Joonsoo Kim   CMA: generalize C...
139
140
141
  
  	return 0;
  }
d883c6cf3   Joonsoo Kim   Revert "mm/cma: m...
142
  core_initcall(cma_init_reserved_areas);
a254129e8   Joonsoo Kim   CMA: generalize C...
143
144
  
  /**
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
145
146
147
148
   * cma_init_reserved_mem() - create custom contiguous area from reserved memory
   * @base: Base address of the reserved area
   * @size: Size of the reserved area (in bytes),
   * @order_per_bit: Order of pages represented by one bit on bitmap.
e8b098fc5   Mike Rapoport   mm: kernel-doc: a...
149
150
151
   * @name: The name of the area. If this parameter is NULL, the name of
   *        the area will be set to "cmaN", where N is a running counter of
   *        used areas.
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
152
153
154
155
156
   * @res_cma: Pointer to store the created cma region.
   *
   * This function creates custom contiguous area from already reserved memory.
   */
  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
ac1738249   Sasha Levin   mm: cma: constify...
157
  				 unsigned int order_per_bit,
f318dd083   Laura Abbott   cma: Store a name...
158
  				 const char *name,
ac1738249   Sasha Levin   mm: cma: constify...
159
  				 struct cma **res_cma)
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
160
161
162
163
164
165
166
167
168
169
170
171
172
  {
  	struct cma *cma;
  	phys_addr_t alignment;
  
  	/* Sanity checks */
  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  		pr_err("Not enough slots for CMA reserved regions!
  ");
  		return -ENOSPC;
  	}
  
  	if (!size || !memblock_is_region_reserved(base, size))
  		return -EINVAL;
0f96ae292   Shailendra Verma   mm/cma.c: fix typ...
173
  	/* ensure minimal alignment required by mm core */
badbda53e   Stephen Rothwell   mm/cma: silence w...
174
175
  	alignment = PAGE_SIZE <<
  			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
176
177
178
179
180
181
182
183
184
185
186
187
188
  
  	/* alignment should be aligned with order_per_bit */
  	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
  		return -EINVAL;
  
  	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
  		return -EINVAL;
  
  	/*
  	 * Each reserved area must be initialised later, when more kernel
  	 * subsystems (like slab allocator) are available.
  	 */
  	cma = &cma_areas[cma_area_count];
18e98e56f   Barry Song   mm: cma: fix the ...
189
190
191
192
193
194
  
  	if (name)
  		snprintf(cma->name, CMA_MAX_NAME, name);
  	else
  		snprintf(cma->name, CMA_MAX_NAME,  "cma%d
  ", cma_area_count);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
195
196
197
198
199
  	cma->base_pfn = PFN_DOWN(base);
  	cma->count = size >> PAGE_SHIFT;
  	cma->order_per_bit = order_per_bit;
  	*res_cma = cma;
  	cma_area_count++;
94737a85f   George G. Davis   mm: cma: fix tota...
200
  	totalcma_pages += (size / PAGE_SIZE);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
201
202
203
204
205
  
  	return 0;
  }
  
  /**
8676af1ff   Aslan Bakirov   mm: cma: NUMA nod...
206
   * cma_declare_contiguous_nid() - reserve custom contiguous area
a254129e8   Joonsoo Kim   CMA: generalize C...
207
   * @base: Base address of the reserved area optional, use 0 for any
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
208
   * @size: Size of the reserved area (in bytes),
a254129e8   Joonsoo Kim   CMA: generalize C...
209
210
211
   * @limit: End address of the reserved memory (optional, 0 for any).
   * @alignment: Alignment for the CMA area, should be power of 2 or zero
   * @order_per_bit: Order of pages represented by one bit on bitmap.
a254129e8   Joonsoo Kim   CMA: generalize C...
212
   * @fixed: hint about where to place the reserved area
e8b098fc5   Mike Rapoport   mm: kernel-doc: a...
213
   * @name: The name of the area. See function cma_init_reserved_mem()
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
214
   * @res_cma: Pointer to store the created cma region.
8676af1ff   Aslan Bakirov   mm: cma: NUMA nod...
215
   * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
a254129e8   Joonsoo Kim   CMA: generalize C...
216
217
218
219
220
221
222
223
224
   *
   * This function reserves memory from early allocator. It should be
   * called by arch specific code once the early allocator (memblock or bootmem)
   * has been activated and all other subsystems have already allocated/reserved
   * memory. This function allows to create custom reserved areas.
   *
   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
   * reserve in range from @base to @limit.
   */
8676af1ff   Aslan Bakirov   mm: cma: NUMA nod...
225
  int __init cma_declare_contiguous_nid(phys_addr_t base,
c1f733aaa   Joonsoo Kim   mm, CMA: change c...
226
  			phys_addr_t size, phys_addr_t limit,
a254129e8   Joonsoo Kim   CMA: generalize C...
227
  			phys_addr_t alignment, unsigned int order_per_bit,
8676af1ff   Aslan Bakirov   mm: cma: NUMA nod...
228
229
  			bool fixed, const char *name, struct cma **res_cma,
  			int nid)
a254129e8   Joonsoo Kim   CMA: generalize C...
230
  {
f7426b983   Marek Szyprowski   mm: cma: adjust a...
231
  	phys_addr_t memblock_end = memblock_end_of_DRAM();
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
232
  	phys_addr_t highmem_start;
a254129e8   Joonsoo Kim   CMA: generalize C...
233
  	int ret = 0;
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
234
  	/*
2dece445b   Laura Abbott   mm/cma: Cleanup h...
235
236
237
238
  	 * We can't use __pa(high_memory) directly, since high_memory
  	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
  	 * complain. Find the boundary by adding one to the last valid
  	 * address.
6b101e2a3   Joonsoo Kim   mm/CMA: fix boot ...
239
  	 */
2dece445b   Laura Abbott   mm/cma: Cleanup h...
240
  	highmem_start = __pa(high_memory - 1) + 1;
56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
241
242
243
  	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)
  ",
  		__func__, &size, &base, &limit, &alignment);
a254129e8   Joonsoo Kim   CMA: generalize C...
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
  
  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  		pr_err("Not enough slots for CMA reserved regions!
  ");
  		return -ENOSPC;
  	}
  
  	if (!size)
  		return -EINVAL;
  
  	if (alignment && !is_power_of_2(alignment))
  		return -EINVAL;
  
  	/*
  	 * Sanitise input arguments.
  	 * Pages both ends in CMA area could be merged into adjacent unmovable
  	 * migratetype page by page allocator's buddy algorithm. In the case,
  	 * you couldn't get a contiguous memory, which is not what we want.
  	 */
badbda53e   Stephen Rothwell   mm/cma: silence w...
263
264
  	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
  			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
c633324e3   Doug Berger   mm/cma.c: fail if...
265
266
267
268
269
270
271
  	if (fixed && base & (alignment - 1)) {
  		ret = -EINVAL;
  		pr_err("Region at %pa must be aligned to %pa bytes
  ",
  			&base, &alignment);
  		goto err;
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
272
273
274
  	base = ALIGN(base, alignment);
  	size = ALIGN(size, alignment);
  	limit &= ~(alignment - 1);
800a85d3d   Laurent Pinchart   mm: cma: Always c...
275
276
  	if (!base)
  		fixed = false;
a254129e8   Joonsoo Kim   CMA: generalize C...
277
278
279
  	/* size should be aligned with order_per_bit */
  	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
  		return -EINVAL;
f7426b983   Marek Szyprowski   mm: cma: adjust a...
280
  	/*
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
281
282
  	 * If allocating at a fixed base the request region must not cross the
  	 * low/high memory boundary.
f7426b983   Marek Szyprowski   mm: cma: adjust a...
283
  	 */
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
284
  	if (fixed && base < highmem_start && base + size > highmem_start) {
f7426b983   Marek Szyprowski   mm: cma: adjust a...
285
  		ret = -EINVAL;
56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
286
287
288
  		pr_err("Region at %pa defined on low/high memory boundary (%pa)
  ",
  			&base, &highmem_start);
f7426b983   Marek Szyprowski   mm: cma: adjust a...
289
290
  		goto err;
  	}
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
291
292
293
294
295
296
297
  	/*
  	 * If the limit is unspecified or above the memblock end, its effective
  	 * value will be the memblock end. Set it explicitly to simplify further
  	 * checks.
  	 */
  	if (limit == 0 || limit > memblock_end)
  		limit = memblock_end;
c633324e3   Doug Berger   mm/cma.c: fail if...
298
299
300
301
302
303
304
  	if (base + size > limit) {
  		ret = -EINVAL;
  		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)
  ",
  			&size, &base, &limit);
  		goto err;
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
305
  	/* Reserve memory */
800a85d3d   Laurent Pinchart   mm: cma: Always c...
306
  	if (fixed) {
a254129e8   Joonsoo Kim   CMA: generalize C...
307
308
309
310
311
312
  		if (memblock_is_region_reserved(base, size) ||
  		    memblock_reserve(base, size) < 0) {
  			ret = -EBUSY;
  			goto err;
  		}
  	} else {
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
313
314
315
316
317
318
319
320
321
  		phys_addr_t addr = 0;
  
  		/*
  		 * All pages in the reserved area must come from the same zone.
  		 * If the requested region crosses the low/high memory boundary,
  		 * try allocating from high memory first and fall back to low
  		 * memory in case of failure.
  		 */
  		if (base < highmem_start && limit > highmem_start) {
8676af1ff   Aslan Bakirov   mm: cma: NUMA nod...
322
  			addr = memblock_alloc_range_nid(size, alignment,
40366bd70   Barry Song   mm/cma.c: use exa...
323
  					highmem_start, limit, nid, true);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
324
325
  			limit = highmem_start;
  		}
a254129e8   Joonsoo Kim   CMA: generalize C...
326
  		if (!addr) {
8676af1ff   Aslan Bakirov   mm: cma: NUMA nod...
327
  			addr = memblock_alloc_range_nid(size, alignment, base,
40366bd70   Barry Song   mm/cma.c: use exa...
328
  					limit, nid, true);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
329
330
331
332
  			if (!addr) {
  				ret = -ENOMEM;
  				goto err;
  			}
a254129e8   Joonsoo Kim   CMA: generalize C...
333
  		}
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
334

620951e27   Thierry Reding   mm/cma: make kmem...
335
336
337
338
  		/*
  		 * kmemleak scans/reads tracked objects for pointers to other
  		 * objects but this address isn't mapped and accessible
  		 */
9099daed9   Catalin Marinas   mm: kmemleak: avo...
339
  		kmemleak_ignore_phys(addr);
16195ddd4   Laurent Pinchart   mm: cma: Ensure t...
340
  		base = addr;
a254129e8   Joonsoo Kim   CMA: generalize C...
341
  	}
f318dd083   Laura Abbott   cma: Store a name...
342
  	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
de9e14eeb   Marek Szyprowski   drivers: dma-cont...
343
  	if (ret)
0d3bd18a5   Peng Fan   mm/cma.c: cma_dec...
344
  		goto free_mem;
a254129e8   Joonsoo Kim   CMA: generalize C...
345

56fa4f609   Laurent Pinchart   mm: cma: Use %pa ...
346
347
348
  	pr_info("Reserved %ld MiB at %pa
  ", (unsigned long)size / SZ_1M,
  		&base);
a254129e8   Joonsoo Kim   CMA: generalize C...
349
  	return 0;
0d3bd18a5   Peng Fan   mm/cma.c: cma_dec...
350
351
  free_mem:
  	memblock_free(base, size);
a254129e8   Joonsoo Kim   CMA: generalize C...
352
  err:
0de9d2ebe   Joonsoo Kim   mm, CMA: clean-up...
353
354
  	pr_err("Failed to reserve %ld MiB
  ", (unsigned long)size / SZ_1M);
a254129e8   Joonsoo Kim   CMA: generalize C...
355
356
  	return ret;
  }
dbe43d4d2   Jaewon Kim   mm: cma: print al...
357
358
359
  #ifdef CONFIG_CMA_DEBUG
  static void cma_debug_show_areas(struct cma *cma)
  {
2b59e01a3   Yue Hu   mm/cma.c: fix the...
360
  	unsigned long next_zero_bit, next_set_bit, nr_zero;
dbe43d4d2   Jaewon Kim   mm: cma: print al...
361
  	unsigned long start = 0;
2b59e01a3   Yue Hu   mm/cma.c: fix the...
362
363
  	unsigned long nr_part, nr_total = 0;
  	unsigned long nbits = cma_bitmap_maxno(cma);
dbe43d4d2   Jaewon Kim   mm: cma: print al...
364
365
366
367
  
  	mutex_lock(&cma->lock);
  	pr_info("number of available pages: ");
  	for (;;) {
2b59e01a3   Yue Hu   mm/cma.c: fix the...
368
369
  		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
  		if (next_zero_bit >= nbits)
dbe43d4d2   Jaewon Kim   mm: cma: print al...
370
  			break;
2b59e01a3   Yue Hu   mm/cma.c: fix the...
371
  		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
dbe43d4d2   Jaewon Kim   mm: cma: print al...
372
  		nr_zero = next_set_bit - next_zero_bit;
2b59e01a3   Yue Hu   mm/cma.c: fix the...
373
374
375
376
  		nr_part = nr_zero << cma->order_per_bit;
  		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
  			next_zero_bit);
  		nr_total += nr_part;
dbe43d4d2   Jaewon Kim   mm: cma: print al...
377
378
  		start = next_zero_bit + nr_zero;
  	}
2b59e01a3   Yue Hu   mm/cma.c: fix the...
379
380
  	pr_cont("=> %lu free of %lu total pages
  ", nr_total, cma->count);
dbe43d4d2   Jaewon Kim   mm: cma: print al...
381
382
383
384
385
  	mutex_unlock(&cma->lock);
  }
  #else
  static inline void cma_debug_show_areas(struct cma *cma) { }
  #endif
a254129e8   Joonsoo Kim   CMA: generalize C...
386
387
388
389
390
  /**
   * cma_alloc() - allocate pages from contiguous area
   * @cma:   Contiguous memory region for which the allocation is performed.
   * @count: Requested number of pages.
   * @align: Requested alignment of pages (in PAGE_SIZE order).
651820297   Marek Szyprowski   mm/cma: remove un...
391
   * @no_warn: Avoid printing message about failed allocation
a254129e8   Joonsoo Kim   CMA: generalize C...
392
393
394
395
   *
   * This function allocates part of contiguous memory on specific
   * contiguous memory area.
   */
e2f466e32   Lucas Stach   mm: cma_alloc: al...
396
  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
651820297   Marek Szyprowski   mm/cma: remove un...
397
  		       bool no_warn)
a254129e8   Joonsoo Kim   CMA: generalize C...
398
  {
3acaea680   Andrew Morton   mm/cma.c: suppres...
399
400
401
  	unsigned long mask, offset;
  	unsigned long pfn = -1;
  	unsigned long start = 0;
a254129e8   Joonsoo Kim   CMA: generalize C...
402
  	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
2813b9c02   Andrey Konovalov   kasan, mm, arm64:...
403
  	size_t i;
a254129e8   Joonsoo Kim   CMA: generalize C...
404
  	struct page *page = NULL;
dbe43d4d2   Jaewon Kim   mm: cma: print al...
405
  	int ret = -ENOMEM;
a254129e8   Joonsoo Kim   CMA: generalize C...
406

835832ba0   Jianqun Xu   mm/cma.c: fix NUL...
407
  	if (!cma || !cma->count || !cma->bitmap)
a254129e8   Joonsoo Kim   CMA: generalize C...
408
  		return NULL;
67a2e213e   Rohit Vaswani   mm: cma: fix inco...
409
410
  	pr_debug("%s(cma %p, count %zu, align %d)
  ", __func__, (void *)cma,
a254129e8   Joonsoo Kim   CMA: generalize C...
411
412
413
414
415
416
  		 count, align);
  
  	if (!count)
  		return NULL;
  
  	mask = cma_bitmap_aligned_mask(cma, align);
b5be83e30   Gregory Fong   mm: cma: align to...
417
  	offset = cma_bitmap_aligned_offset(cma, align);
a254129e8   Joonsoo Kim   CMA: generalize C...
418
419
  	bitmap_maxno = cma_bitmap_maxno(cma);
  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
6b36ba599   Shiraz Hashim   mm/cma.c: check t...
420
421
  	if (bitmap_count > bitmap_maxno)
  		return NULL;
a254129e8   Joonsoo Kim   CMA: generalize C...
422
423
  	for (;;) {
  		mutex_lock(&cma->lock);
b5be83e30   Gregory Fong   mm: cma: align to...
424
425
426
  		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
  				bitmap_maxno, start, bitmap_count, mask,
  				offset);
a254129e8   Joonsoo Kim   CMA: generalize C...
427
428
429
430
431
432
433
434
435
436
437
438
439
440
  		if (bitmap_no >= bitmap_maxno) {
  			mutex_unlock(&cma->lock);
  			break;
  		}
  		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
  		/*
  		 * It's safe to drop the lock here. We've marked this region for
  		 * our exclusive use. If the migration fails we will take the
  		 * lock again and unmark it.
  		 */
  		mutex_unlock(&cma->lock);
  
  		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
  		mutex_lock(&cma_mutex);
ca96b6253   Lucas Stach   mm: alloc_contig_...
441
  		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
651820297   Marek Szyprowski   mm/cma: remove un...
442
  				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
a254129e8   Joonsoo Kim   CMA: generalize C...
443
444
445
446
  		mutex_unlock(&cma_mutex);
  		if (ret == 0) {
  			page = pfn_to_page(pfn);
  			break;
a254129e8   Joonsoo Kim   CMA: generalize C...
447
  		}
b7155e76a   Joonsoo Kim   mm, CMA: clean-up...
448

a254129e8   Joonsoo Kim   CMA: generalize C...
449
  		cma_clear_bitmap(cma, pfn, count);
b7155e76a   Joonsoo Kim   mm, CMA: clean-up...
450
451
  		if (ret != -EBUSY)
  			break;
a254129e8   Joonsoo Kim   CMA: generalize C...
452
453
454
455
456
457
  		pr_debug("%s(): memory range at %p is busy, retrying
  ",
  			 __func__, pfn_to_page(pfn));
  		/* try again with a bit different memory target */
  		start = bitmap_no + mask + 1;
  	}
3acaea680   Andrew Morton   mm/cma.c: suppres...
458
  	trace_cma_alloc(pfn, page, count, align);
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
459

2813b9c02   Andrey Konovalov   kasan, mm, arm64:...
460
461
462
463
464
465
466
467
468
  	/*
  	 * CMA can allocate multiple page blocks, which results in different
  	 * blocks being marked with different tags. Reset the tags to ignore
  	 * those page blocks.
  	 */
  	if (page) {
  		for (i = 0; i < count; i++)
  			page_kasan_tag_reset(page + i);
  	}
651820297   Marek Szyprowski   mm/cma: remove un...
469
  	if (ret && !no_warn) {
5984af108   Pintu Agarwal   mm/cma.c: change ...
470
471
  		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d
  ",
dbe43d4d2   Jaewon Kim   mm: cma: print al...
472
473
474
  			__func__, count, ret);
  		cma_debug_show_areas(cma);
  	}
a254129e8   Joonsoo Kim   CMA: generalize C...
475
476
477
478
479
480
481
482
483
484
485
  	pr_debug("%s(): returned %p
  ", __func__, page);
  	return page;
  }
  
  /**
   * cma_release() - release allocated pages
   * @cma:   Contiguous memory region for which the allocation is performed.
   * @pages: Allocated pages.
   * @count: Number of allocated pages.
   *
929f92f78   Ryohei Suzuki   mm/cma.c: fix a t...
486
   * This function releases memory allocated by cma_alloc().
a254129e8   Joonsoo Kim   CMA: generalize C...
487
488
489
   * It returns false when provided pages do not belong to contiguous area and
   * true otherwise.
   */
ac1738249   Sasha Levin   mm: cma: constify...
490
  bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
a254129e8   Joonsoo Kim   CMA: generalize C...
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
  {
  	unsigned long pfn;
  
  	if (!cma || !pages)
  		return false;
  
  	pr_debug("%s(page %p)
  ", __func__, (void *)pages);
  
  	pfn = page_to_pfn(pages);
  
  	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
  		return false;
  
  	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
  
  	free_contig_range(pfn, count);
  	cma_clear_bitmap(cma, pfn, count);
99e8ea6cd   Stefan Strogin   mm: cma: add trac...
509
  	trace_cma_release(pfn, pages, count);
a254129e8   Joonsoo Kim   CMA: generalize C...
510
511
512
  
  	return true;
  }
e4231bcda   Laura Abbott   cma: Introduce cm...
513
514
515
516
517
518
519
520
521
522
523
524
525
526
  
  int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
  {
  	int i;
  
  	for (i = 0; i < cma_area_count; i++) {
  		int ret = it(&cma_areas[i], data);
  
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }