Blame view
mm/cma.c
12.1 KB
a254129e8
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
/* * Contiguous Memory Allocator * * Copyright (c) 2010-2011 by Samsung Electronics. * Copyright IBM Corporation, 2013 * Copyright LG Electronics Inc., 2014 * Written by: * Marek Szyprowski <m.szyprowski@samsung.com> * Michal Nazarewicz <mina86@mina86.com> * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * Joonsoo Kim <iamjoonsoo.kim@lge.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License or (at your optional) any later version of the license. */ #define pr_fmt(fmt) "cma: " fmt #ifdef CONFIG_CMA_DEBUG #ifndef DEBUG # define DEBUG #endif #endif |
99e8ea6cd
|
26 |
#define CREATE_TRACE_POINTS |
a254129e8
|
27 28 29 30 31 32 33 34 35 |
#include <linux/memblock.h> #include <linux/err.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/log2.h> #include <linux/cma.h> |
f7426b983
|
36 |
#include <linux/highmem.h> |
620951e27
|
37 |
#include <linux/io.h> |
99e8ea6cd
|
38 |
#include <trace/events/cma.h> |
a254129e8
|
39 |
|
28b24c1fc
|
40 41 42 43 |
#include "cma.h" struct cma cma_areas[MAX_CMA_AREAS]; unsigned cma_area_count; |
a254129e8
|
44 |
static DEFINE_MUTEX(cma_mutex); |
ac1738249
|
45 |
phys_addr_t cma_get_base(const struct cma *cma) |
a254129e8
|
46 47 48 |
{ return PFN_PHYS(cma->base_pfn); } |
ac1738249
|
49 |
unsigned long cma_get_size(const struct cma *cma) |
a254129e8
|
50 51 52 |
{ return cma->count << PAGE_SHIFT; } |
ac1738249
|
53 54 |
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, int align_order) |
a254129e8
|
55 |
{ |
68faed630
|
56 57 58 |
if (align_order <= cma->order_per_bit) return 0; return (1UL << (align_order - cma->order_per_bit)) - 1; |
a254129e8
|
59 |
} |
850fc430f
|
60 61 62 63 |
/* * Find a PFN aligned to the specified order and return an offset represented in * order_per_bits. */ |
ac1738249
|
64 65 |
static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, int align_order) |
b5be83e30
|
66 |
{ |
b5be83e30
|
67 68 |
if (align_order <= cma->order_per_bit) return 0; |
850fc430f
|
69 70 71 |
return (ALIGN(cma->base_pfn, (1UL << align_order)) - cma->base_pfn) >> cma->order_per_bit; |
b5be83e30
|
72 |
} |
ac1738249
|
73 74 |
static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, unsigned long pages) |
a254129e8
|
75 76 77 |
{ return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; } |
ac1738249
|
78 79 |
static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, unsigned int count) |
a254129e8
|
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
{ unsigned long bitmap_no, bitmap_count; bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; bitmap_count = cma_bitmap_pages_to_bits(cma, count); mutex_lock(&cma->lock); bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); mutex_unlock(&cma->lock); } static int __init cma_activate_area(struct cma *cma) { int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; unsigned i = cma->count >> pageblock_order; struct zone *zone; cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!cma->bitmap) return -ENOMEM; WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); do { unsigned j; base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); /* * alloc_contig_range requires the pfn range * specified to be in the same zone. Make this * simple by forcing the entire CMA resv range * to be in the same zone. */ if (page_zone(pfn_to_page(pfn)) != zone) goto err; } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); mutex_init(&cma->lock); |
26b02a1f9
|
125 126 127 128 129 |
#ifdef CONFIG_CMA_DEBUGFS INIT_HLIST_HEAD(&cma->mem_head); spin_lock_init(&cma->mem_head_lock); #endif |
a254129e8
|
130 131 132 133 |
return 0; err: kfree(cma->bitmap); |
f022d8cb7
|
134 |
cma->count = 0; |
a254129e8
|
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
return -EINVAL; } static int __init cma_init_reserved_areas(void) { int i; for (i = 0; i < cma_area_count; i++) { int ret = cma_activate_area(&cma_areas[i]); if (ret) return ret; } return 0; } core_initcall(cma_init_reserved_areas); /** |
de9e14eeb
|
154 155 156 157 158 159 160 161 162 |
* cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area * @size: Size of the reserved area (in bytes), * @order_per_bit: Order of pages represented by one bit on bitmap. * @res_cma: Pointer to store the created cma region. * * This function creates custom contiguous area from already reserved memory. */ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
ac1738249
|
163 164 |
unsigned int order_per_bit, struct cma **res_cma) |
de9e14eeb
|
165 166 167 168 169 170 171 172 173 174 175 176 177 |
{ struct cma *cma; phys_addr_t alignment; /* Sanity checks */ if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions! "); return -ENOSPC; } if (!size || !memblock_is_region_reserved(base, size)) return -EINVAL; |
0f96ae292
|
178 |
/* ensure minimal alignment required by mm core */ |
de9e14eeb
|
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); /* alignment should be aligned with order_per_bit */ if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) return -EINVAL; if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) return -EINVAL; /* * Each reserved area must be initialised later, when more kernel * subsystems (like slab allocator) are available. */ cma = &cma_areas[cma_area_count]; cma->base_pfn = PFN_DOWN(base); cma->count = size >> PAGE_SHIFT; cma->order_per_bit = order_per_bit; *res_cma = cma; cma_area_count++; |
94737a85f
|
198 |
totalcma_pages += (size / PAGE_SIZE); |
de9e14eeb
|
199 200 201 202 203 |
return 0; } /** |
a254129e8
|
204 |
* cma_declare_contiguous() - reserve custom contiguous area |
a254129e8
|
205 |
* @base: Base address of the reserved area optional, use 0 for any |
c1f733aaa
|
206 |
* @size: Size of the reserved area (in bytes), |
a254129e8
|
207 208 209 |
* @limit: End address of the reserved memory (optional, 0 for any). * @alignment: Alignment for the CMA area, should be power of 2 or zero * @order_per_bit: Order of pages represented by one bit on bitmap. |
a254129e8
|
210 |
* @fixed: hint about where to place the reserved area |
c1f733aaa
|
211 |
* @res_cma: Pointer to store the created cma region. |
a254129e8
|
212 213 214 215 216 217 218 219 220 |
* * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. This function allows to create custom reserved areas. * * If @fixed is true, reserve contiguous area at exactly @base. If false, * reserve in range from @base to @limit. */ |
c1f733aaa
|
221 222 |
int __init cma_declare_contiguous(phys_addr_t base, phys_addr_t size, phys_addr_t limit, |
a254129e8
|
223 |
phys_addr_t alignment, unsigned int order_per_bit, |
c1f733aaa
|
224 |
bool fixed, struct cma **res_cma) |
a254129e8
|
225 |
{ |
f7426b983
|
226 |
phys_addr_t memblock_end = memblock_end_of_DRAM(); |
6b101e2a3
|
227 |
phys_addr_t highmem_start; |
a254129e8
|
228 |
int ret = 0; |
6b101e2a3
|
229 230 231 232 |
#ifdef CONFIG_X86 /* * high_memory isn't direct mapped memory so retrieving its physical * address isn't appropriate. But it would be useful to check the |
0f96ae292
|
233 |
* physical address of the highmem boundary so it's justifiable to get |
6b101e2a3
|
234 235 236 237 238 239 240 |
* the physical address from it. On x86 there is a validation check for * this case, so the following workaround is needed to avoid it. */ highmem_start = __pa_nodebug(high_memory); #else highmem_start = __pa(high_memory); #endif |
56fa4f609
|
241 242 243 |
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa) ", __func__, &size, &base, &limit, &alignment); |
a254129e8
|
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 |
if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions! "); return -ENOSPC; } if (!size) return -EINVAL; if (alignment && !is_power_of_2(alignment)) return -EINVAL; /* * Sanitise input arguments. * Pages both ends in CMA area could be merged into adjacent unmovable * migratetype page by page allocator's buddy algorithm. In the case, * you couldn't get a contiguous memory, which is not what we want. */ alignment = max(alignment, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); |
800a85d3d
|
268 269 |
if (!base) fixed = false; |
a254129e8
|
270 271 272 |
/* size should be aligned with order_per_bit */ if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) return -EINVAL; |
f7426b983
|
273 |
/* |
16195ddd4
|
274 275 |
* If allocating at a fixed base the request region must not cross the * low/high memory boundary. |
f7426b983
|
276 |
*/ |
16195ddd4
|
277 |
if (fixed && base < highmem_start && base + size > highmem_start) { |
f7426b983
|
278 |
ret = -EINVAL; |
56fa4f609
|
279 280 281 |
pr_err("Region at %pa defined on low/high memory boundary (%pa) ", &base, &highmem_start); |
f7426b983
|
282 283 |
goto err; } |
16195ddd4
|
284 285 286 287 288 289 290 |
/* * If the limit is unspecified or above the memblock end, its effective * value will be the memblock end. Set it explicitly to simplify further * checks. */ if (limit == 0 || limit > memblock_end) limit = memblock_end; |
a254129e8
|
291 |
/* Reserve memory */ |
800a85d3d
|
292 |
if (fixed) { |
a254129e8
|
293 294 295 296 297 298 |
if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { ret = -EBUSY; goto err; } } else { |
16195ddd4
|
299 300 301 302 303 304 305 306 307 308 |
phys_addr_t addr = 0; /* * All pages in the reserved area must come from the same zone. * If the requested region crosses the low/high memory boundary, * try allocating from high memory first and fall back to low * memory in case of failure. */ if (base < highmem_start && limit > highmem_start) { addr = memblock_alloc_range(size, alignment, |
fc6daaf93
|
309 310 |
highmem_start, limit, MEMBLOCK_NONE); |
16195ddd4
|
311 312 |
limit = highmem_start; } |
a254129e8
|
313 |
if (!addr) { |
16195ddd4
|
314 |
addr = memblock_alloc_range(size, alignment, base, |
fc6daaf93
|
315 316 |
limit, MEMBLOCK_NONE); |
16195ddd4
|
317 318 319 320 |
if (!addr) { ret = -ENOMEM; goto err; } |
a254129e8
|
321 |
} |
16195ddd4
|
322 |
|
620951e27
|
323 324 325 326 327 |
/* * kmemleak scans/reads tracked objects for pointers to other * objects but this address isn't mapped and accessible */ kmemleak_ignore(phys_to_virt(addr)); |
16195ddd4
|
328 |
base = addr; |
a254129e8
|
329 |
} |
de9e14eeb
|
330 331 332 |
ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); if (ret) goto err; |
a254129e8
|
333 |
|
56fa4f609
|
334 335 336 |
pr_info("Reserved %ld MiB at %pa ", (unsigned long)size / SZ_1M, &base); |
a254129e8
|
337 338 339 |
return 0; err: |
0de9d2ebe
|
340 341 |
pr_err("Failed to reserve %ld MiB ", (unsigned long)size / SZ_1M); |
a254129e8
|
342 343 344 345 346 347 348 349 350 351 352 353 |
return ret; } /** * cma_alloc() - allocate pages from contiguous area * @cma: Contiguous memory region for which the allocation is performed. * @count: Requested number of pages. * @align: Requested alignment of pages (in PAGE_SIZE order). * * This function allocates part of contiguous memory on specific * contiguous memory area. */ |
67a2e213e
|
354 |
struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) |
a254129e8
|
355 |
{ |
3acaea680
|
356 357 358 |
unsigned long mask, offset; unsigned long pfn = -1; unsigned long start = 0; |
a254129e8
|
359 360 361 362 363 364 |
unsigned long bitmap_maxno, bitmap_no, bitmap_count; struct page *page = NULL; int ret; if (!cma || !cma->count) return NULL; |
67a2e213e
|
365 366 |
pr_debug("%s(cma %p, count %zu, align %d) ", __func__, (void *)cma, |
a254129e8
|
367 368 369 370 371 372 |
count, align); if (!count) return NULL; mask = cma_bitmap_aligned_mask(cma, align); |
b5be83e30
|
373 |
offset = cma_bitmap_aligned_offset(cma, align); |
a254129e8
|
374 375 376 377 378 |
bitmap_maxno = cma_bitmap_maxno(cma); bitmap_count = cma_bitmap_pages_to_bits(cma, count); for (;;) { mutex_lock(&cma->lock); |
b5be83e30
|
379 380 381 |
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, bitmap_maxno, start, bitmap_count, mask, offset); |
a254129e8
|
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 |
if (bitmap_no >= bitmap_maxno) { mutex_unlock(&cma->lock); break; } bitmap_set(cma->bitmap, bitmap_no, bitmap_count); /* * It's safe to drop the lock here. We've marked this region for * our exclusive use. If the migration fails we will take the * lock again and unmark it. */ mutex_unlock(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; |
a254129e8
|
401 |
} |
b7155e76a
|
402 |
|
a254129e8
|
403 |
cma_clear_bitmap(cma, pfn, count); |
b7155e76a
|
404 405 |
if (ret != -EBUSY) break; |
a254129e8
|
406 407 408 409 410 411 |
pr_debug("%s(): memory range at %p is busy, retrying ", __func__, pfn_to_page(pfn)); /* try again with a bit different memory target */ start = bitmap_no + mask + 1; } |
3acaea680
|
412 |
trace_cma_alloc(pfn, page, count, align); |
99e8ea6cd
|
413 |
|
a254129e8
|
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 |
pr_debug("%s(): returned %p ", __func__, page); return page; } /** * cma_release() - release allocated pages * @cma: Contiguous memory region for which the allocation is performed. * @pages: Allocated pages. * @count: Number of allocated pages. * * This function releases memory allocated by alloc_cma(). * It returns false when provided pages do not belong to contiguous area and * true otherwise. */ |
ac1738249
|
429 |
bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) |
a254129e8
|
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 |
{ unsigned long pfn; if (!cma || !pages) return false; pr_debug("%s(page %p) ", __func__, (void *)pages); pfn = page_to_pfn(pages); if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) return false; VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); free_contig_range(pfn, count); cma_clear_bitmap(cma, pfn, count); |
99e8ea6cd
|
448 |
trace_cma_release(pfn, pages, count); |
a254129e8
|
449 450 451 |
return true; } |