Commit 2f4f9b92b2582f8c1706686cdbbe07d28530f8cc
Committed by
Greg Kroah-Hartman
1 parent
522a8162a0
mm/CMA: fix boot regression due to physical address of high_memory
commit 6b101e2a3ce4d2a0312087598bd1ab4a1db2ac40 upstream. high_memory isn't direct mapped memory so retrieving it's physical address isn't appropriate. But, it would be useful to check physical address of highmem boundary so it's justfiable to get physical address from it. In x86, there is a validation check if CONFIG_DEBUG_VIRTUAL and it triggers following boot failure reported by Ingo. ... BUG: Int 6: CR2 00f06f53 ... Call Trace: dump_stack+0x41/0x52 early_idt_handler+0x6b/0x6b cma_declare_contiguous+0x33/0x212 dma_contiguous_reserve_area+0x31/0x4e dma_contiguous_reserve+0x11d/0x125 setup_arch+0x7b5/0xb63 start_kernel+0xb8/0x3e6 i386_start_kernel+0x79/0x7d To fix boot regression, this patch implements workaround to avoid validation check in x86 when retrieving physical address of high_memory. __pa_nodebug() used by this patch is implemented only in x86 so there is no choice but to use dirty #ifdef. [akpm@linux-foundation.org: tweak comment] Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reported-by: Ingo Molnar <mingo@kernel.org> Tested-by: Ingo Molnar <mingo@kernel.org> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 1 changed file with 13 additions and 1 deletions Inline Diff
mm/cma.c
1 | /* | 1 | /* |
2 | * Contiguous Memory Allocator | 2 | * Contiguous Memory Allocator |
3 | * | 3 | * |
4 | * Copyright (c) 2010-2011 by Samsung Electronics. | 4 | * Copyright (c) 2010-2011 by Samsung Electronics. |
5 | * Copyright IBM Corporation, 2013 | 5 | * Copyright IBM Corporation, 2013 |
6 | * Copyright LG Electronics Inc., 2014 | 6 | * Copyright LG Electronics Inc., 2014 |
7 | * Written by: | 7 | * Written by: |
8 | * Marek Szyprowski <m.szyprowski@samsung.com> | 8 | * Marek Szyprowski <m.szyprowski@samsung.com> |
9 | * Michal Nazarewicz <mina86@mina86.com> | 9 | * Michal Nazarewicz <mina86@mina86.com> |
10 | * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 10 | * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> |
11 | * Joonsoo Kim <iamjoonsoo.kim@lge.com> | 11 | * Joonsoo Kim <iamjoonsoo.kim@lge.com> |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or | 13 | * This program is free software; you can redistribute it and/or |
14 | * modify it under the terms of the GNU General Public License as | 14 | * modify it under the terms of the GNU General Public License as |
15 | * published by the Free Software Foundation; either version 2 of the | 15 | * published by the Free Software Foundation; either version 2 of the |
16 | * License or (at your optional) any later version of the license. | 16 | * License or (at your optional) any later version of the license. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #define pr_fmt(fmt) "cma: " fmt | 19 | #define pr_fmt(fmt) "cma: " fmt |
20 | 20 | ||
21 | #ifdef CONFIG_CMA_DEBUG | 21 | #ifdef CONFIG_CMA_DEBUG |
22 | #ifndef DEBUG | 22 | #ifndef DEBUG |
23 | # define DEBUG | 23 | # define DEBUG |
24 | #endif | 24 | #endif |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | #include <linux/memblock.h> | 27 | #include <linux/memblock.h> |
28 | #include <linux/err.h> | 28 | #include <linux/err.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include <linux/mutex.h> | 30 | #include <linux/mutex.h> |
31 | #include <linux/sizes.h> | 31 | #include <linux/sizes.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/log2.h> | 33 | #include <linux/log2.h> |
34 | #include <linux/cma.h> | 34 | #include <linux/cma.h> |
35 | #include <linux/highmem.h> | 35 | #include <linux/highmem.h> |
36 | 36 | ||
37 | struct cma { | 37 | struct cma { |
38 | unsigned long base_pfn; | 38 | unsigned long base_pfn; |
39 | unsigned long count; | 39 | unsigned long count; |
40 | unsigned long *bitmap; | 40 | unsigned long *bitmap; |
41 | unsigned int order_per_bit; /* Order of pages represented by one bit */ | 41 | unsigned int order_per_bit; /* Order of pages represented by one bit */ |
42 | struct mutex lock; | 42 | struct mutex lock; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | static struct cma cma_areas[MAX_CMA_AREAS]; | 45 | static struct cma cma_areas[MAX_CMA_AREAS]; |
46 | static unsigned cma_area_count; | 46 | static unsigned cma_area_count; |
47 | static DEFINE_MUTEX(cma_mutex); | 47 | static DEFINE_MUTEX(cma_mutex); |
48 | 48 | ||
49 | phys_addr_t cma_get_base(struct cma *cma) | 49 | phys_addr_t cma_get_base(struct cma *cma) |
50 | { | 50 | { |
51 | return PFN_PHYS(cma->base_pfn); | 51 | return PFN_PHYS(cma->base_pfn); |
52 | } | 52 | } |
53 | 53 | ||
54 | unsigned long cma_get_size(struct cma *cma) | 54 | unsigned long cma_get_size(struct cma *cma) |
55 | { | 55 | { |
56 | return cma->count << PAGE_SHIFT; | 56 | return cma->count << PAGE_SHIFT; |
57 | } | 57 | } |
58 | 58 | ||
59 | static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | 59 | static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) |
60 | { | 60 | { |
61 | if (align_order <= cma->order_per_bit) | 61 | if (align_order <= cma->order_per_bit) |
62 | return 0; | 62 | return 0; |
63 | return (1UL << (align_order - cma->order_per_bit)) - 1; | 63 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
64 | } | 64 | } |
65 | 65 | ||
66 | static unsigned long cma_bitmap_maxno(struct cma *cma) | 66 | static unsigned long cma_bitmap_maxno(struct cma *cma) |
67 | { | 67 | { |
68 | return cma->count >> cma->order_per_bit; | 68 | return cma->count >> cma->order_per_bit; |
69 | } | 69 | } |
70 | 70 | ||
71 | static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, | 71 | static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, |
72 | unsigned long pages) | 72 | unsigned long pages) |
73 | { | 73 | { |
74 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; | 74 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; |
75 | } | 75 | } |
76 | 76 | ||
77 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count) | 77 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count) |
78 | { | 78 | { |
79 | unsigned long bitmap_no, bitmap_count; | 79 | unsigned long bitmap_no, bitmap_count; |
80 | 80 | ||
81 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; | 81 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; |
82 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | 82 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
83 | 83 | ||
84 | mutex_lock(&cma->lock); | 84 | mutex_lock(&cma->lock); |
85 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); | 85 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); |
86 | mutex_unlock(&cma->lock); | 86 | mutex_unlock(&cma->lock); |
87 | } | 87 | } |
88 | 88 | ||
89 | static int __init cma_activate_area(struct cma *cma) | 89 | static int __init cma_activate_area(struct cma *cma) |
90 | { | 90 | { |
91 | int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); | 91 | int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); |
92 | unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; | 92 | unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; |
93 | unsigned i = cma->count >> pageblock_order; | 93 | unsigned i = cma->count >> pageblock_order; |
94 | struct zone *zone; | 94 | struct zone *zone; |
95 | 95 | ||
96 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | 96 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
97 | 97 | ||
98 | if (!cma->bitmap) | 98 | if (!cma->bitmap) |
99 | return -ENOMEM; | 99 | return -ENOMEM; |
100 | 100 | ||
101 | WARN_ON_ONCE(!pfn_valid(pfn)); | 101 | WARN_ON_ONCE(!pfn_valid(pfn)); |
102 | zone = page_zone(pfn_to_page(pfn)); | 102 | zone = page_zone(pfn_to_page(pfn)); |
103 | 103 | ||
104 | do { | 104 | do { |
105 | unsigned j; | 105 | unsigned j; |
106 | 106 | ||
107 | base_pfn = pfn; | 107 | base_pfn = pfn; |
108 | for (j = pageblock_nr_pages; j; --j, pfn++) { | 108 | for (j = pageblock_nr_pages; j; --j, pfn++) { |
109 | WARN_ON_ONCE(!pfn_valid(pfn)); | 109 | WARN_ON_ONCE(!pfn_valid(pfn)); |
110 | /* | 110 | /* |
111 | * alloc_contig_range requires the pfn range | 111 | * alloc_contig_range requires the pfn range |
112 | * specified to be in the same zone. Make this | 112 | * specified to be in the same zone. Make this |
113 | * simple by forcing the entire CMA resv range | 113 | * simple by forcing the entire CMA resv range |
114 | * to be in the same zone. | 114 | * to be in the same zone. |
115 | */ | 115 | */ |
116 | if (page_zone(pfn_to_page(pfn)) != zone) | 116 | if (page_zone(pfn_to_page(pfn)) != zone) |
117 | goto err; | 117 | goto err; |
118 | } | 118 | } |
119 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); | 119 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); |
120 | } while (--i); | 120 | } while (--i); |
121 | 121 | ||
122 | mutex_init(&cma->lock); | 122 | mutex_init(&cma->lock); |
123 | return 0; | 123 | return 0; |
124 | 124 | ||
125 | err: | 125 | err: |
126 | kfree(cma->bitmap); | 126 | kfree(cma->bitmap); |
127 | cma->count = 0; | 127 | cma->count = 0; |
128 | return -EINVAL; | 128 | return -EINVAL; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int __init cma_init_reserved_areas(void) | 131 | static int __init cma_init_reserved_areas(void) |
132 | { | 132 | { |
133 | int i; | 133 | int i; |
134 | 134 | ||
135 | for (i = 0; i < cma_area_count; i++) { | 135 | for (i = 0; i < cma_area_count; i++) { |
136 | int ret = cma_activate_area(&cma_areas[i]); | 136 | int ret = cma_activate_area(&cma_areas[i]); |
137 | 137 | ||
138 | if (ret) | 138 | if (ret) |
139 | return ret; | 139 | return ret; |
140 | } | 140 | } |
141 | 141 | ||
142 | return 0; | 142 | return 0; |
143 | } | 143 | } |
144 | core_initcall(cma_init_reserved_areas); | 144 | core_initcall(cma_init_reserved_areas); |
145 | 145 | ||
146 | /** | 146 | /** |
147 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory | 147 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory |
148 | * @base: Base address of the reserved area | 148 | * @base: Base address of the reserved area |
149 | * @size: Size of the reserved area (in bytes), | 149 | * @size: Size of the reserved area (in bytes), |
150 | * @order_per_bit: Order of pages represented by one bit on bitmap. | 150 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
151 | * @res_cma: Pointer to store the created cma region. | 151 | * @res_cma: Pointer to store the created cma region. |
152 | * | 152 | * |
153 | * This function creates custom contiguous area from already reserved memory. | 153 | * This function creates custom contiguous area from already reserved memory. |
154 | */ | 154 | */ |
155 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | 155 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
156 | int order_per_bit, struct cma **res_cma) | 156 | int order_per_bit, struct cma **res_cma) |
157 | { | 157 | { |
158 | struct cma *cma; | 158 | struct cma *cma; |
159 | phys_addr_t alignment; | 159 | phys_addr_t alignment; |
160 | 160 | ||
161 | /* Sanity checks */ | 161 | /* Sanity checks */ |
162 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | 162 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
163 | pr_err("Not enough slots for CMA reserved regions!\n"); | 163 | pr_err("Not enough slots for CMA reserved regions!\n"); |
164 | return -ENOSPC; | 164 | return -ENOSPC; |
165 | } | 165 | } |
166 | 166 | ||
167 | if (!size || !memblock_is_region_reserved(base, size)) | 167 | if (!size || !memblock_is_region_reserved(base, size)) |
168 | return -EINVAL; | 168 | return -EINVAL; |
169 | 169 | ||
170 | /* ensure minimal alignment requied by mm core */ | 170 | /* ensure minimal alignment requied by mm core */ |
171 | alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); | 171 | alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); |
172 | 172 | ||
173 | /* alignment should be aligned with order_per_bit */ | 173 | /* alignment should be aligned with order_per_bit */ |
174 | if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) | 174 | if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) |
175 | return -EINVAL; | 175 | return -EINVAL; |
176 | 176 | ||
177 | if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) | 177 | if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) |
178 | return -EINVAL; | 178 | return -EINVAL; |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * Each reserved area must be initialised later, when more kernel | 181 | * Each reserved area must be initialised later, when more kernel |
182 | * subsystems (like slab allocator) are available. | 182 | * subsystems (like slab allocator) are available. |
183 | */ | 183 | */ |
184 | cma = &cma_areas[cma_area_count]; | 184 | cma = &cma_areas[cma_area_count]; |
185 | cma->base_pfn = PFN_DOWN(base); | 185 | cma->base_pfn = PFN_DOWN(base); |
186 | cma->count = size >> PAGE_SHIFT; | 186 | cma->count = size >> PAGE_SHIFT; |
187 | cma->order_per_bit = order_per_bit; | 187 | cma->order_per_bit = order_per_bit; |
188 | *res_cma = cma; | 188 | *res_cma = cma; |
189 | cma_area_count++; | 189 | cma_area_count++; |
190 | 190 | ||
191 | return 0; | 191 | return 0; |
192 | } | 192 | } |
193 | 193 | ||
194 | /** | 194 | /** |
195 | * cma_declare_contiguous() - reserve custom contiguous area | 195 | * cma_declare_contiguous() - reserve custom contiguous area |
196 | * @base: Base address of the reserved area optional, use 0 for any | 196 | * @base: Base address of the reserved area optional, use 0 for any |
197 | * @size: Size of the reserved area (in bytes), | 197 | * @size: Size of the reserved area (in bytes), |
198 | * @limit: End address of the reserved memory (optional, 0 for any). | 198 | * @limit: End address of the reserved memory (optional, 0 for any). |
199 | * @alignment: Alignment for the CMA area, should be power of 2 or zero | 199 | * @alignment: Alignment for the CMA area, should be power of 2 or zero |
200 | * @order_per_bit: Order of pages represented by one bit on bitmap. | 200 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
201 | * @fixed: hint about where to place the reserved area | 201 | * @fixed: hint about where to place the reserved area |
202 | * @res_cma: Pointer to store the created cma region. | 202 | * @res_cma: Pointer to store the created cma region. |
203 | * | 203 | * |
204 | * This function reserves memory from early allocator. It should be | 204 | * This function reserves memory from early allocator. It should be |
205 | * called by arch specific code once the early allocator (memblock or bootmem) | 205 | * called by arch specific code once the early allocator (memblock or bootmem) |
206 | * has been activated and all other subsystems have already allocated/reserved | 206 | * has been activated and all other subsystems have already allocated/reserved |
207 | * memory. This function allows to create custom reserved areas. | 207 | * memory. This function allows to create custom reserved areas. |
208 | * | 208 | * |
209 | * If @fixed is true, reserve contiguous area at exactly @base. If false, | 209 | * If @fixed is true, reserve contiguous area at exactly @base. If false, |
210 | * reserve in range from @base to @limit. | 210 | * reserve in range from @base to @limit. |
211 | */ | 211 | */ |
212 | int __init cma_declare_contiguous(phys_addr_t base, | 212 | int __init cma_declare_contiguous(phys_addr_t base, |
213 | phys_addr_t size, phys_addr_t limit, | 213 | phys_addr_t size, phys_addr_t limit, |
214 | phys_addr_t alignment, unsigned int order_per_bit, | 214 | phys_addr_t alignment, unsigned int order_per_bit, |
215 | bool fixed, struct cma **res_cma) | 215 | bool fixed, struct cma **res_cma) |
216 | { | 216 | { |
217 | phys_addr_t memblock_end = memblock_end_of_DRAM(); | 217 | phys_addr_t memblock_end = memblock_end_of_DRAM(); |
218 | phys_addr_t highmem_start = __pa(high_memory); | 218 | phys_addr_t highmem_start; |
219 | int ret = 0; | 219 | int ret = 0; |
220 | 220 | ||
221 | #ifdef CONFIG_X86 | ||
222 | /* | ||
223 | * high_memory isn't direct mapped memory so retrieving its physical | ||
224 | * address isn't appropriate. But it would be useful to check the | ||
225 | * physical address of the highmem boundary so it's justfiable to get | ||
226 | * the physical address from it. On x86 there is a validation check for | ||
227 | * this case, so the following workaround is needed to avoid it. | ||
228 | */ | ||
229 | highmem_start = __pa_nodebug(high_memory); | ||
230 | #else | ||
231 | highmem_start = __pa(high_memory); | ||
232 | #endif | ||
221 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", | 233 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", |
222 | __func__, &size, &base, &limit, &alignment); | 234 | __func__, &size, &base, &limit, &alignment); |
223 | 235 | ||
224 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | 236 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
225 | pr_err("Not enough slots for CMA reserved regions!\n"); | 237 | pr_err("Not enough slots for CMA reserved regions!\n"); |
226 | return -ENOSPC; | 238 | return -ENOSPC; |
227 | } | 239 | } |
228 | 240 | ||
229 | if (!size) | 241 | if (!size) |
230 | return -EINVAL; | 242 | return -EINVAL; |
231 | 243 | ||
232 | if (alignment && !is_power_of_2(alignment)) | 244 | if (alignment && !is_power_of_2(alignment)) |
233 | return -EINVAL; | 245 | return -EINVAL; |
234 | 246 | ||
235 | /* | 247 | /* |
236 | * Sanitise input arguments. | 248 | * Sanitise input arguments. |
237 | * Pages both ends in CMA area could be merged into adjacent unmovable | 249 | * Pages both ends in CMA area could be merged into adjacent unmovable |
238 | * migratetype page by page allocator's buddy algorithm. In the case, | 250 | * migratetype page by page allocator's buddy algorithm. In the case, |
239 | * you couldn't get a contiguous memory, which is not what we want. | 251 | * you couldn't get a contiguous memory, which is not what we want. |
240 | */ | 252 | */ |
241 | alignment = max(alignment, | 253 | alignment = max(alignment, |
242 | (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); | 254 | (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); |
243 | base = ALIGN(base, alignment); | 255 | base = ALIGN(base, alignment); |
244 | size = ALIGN(size, alignment); | 256 | size = ALIGN(size, alignment); |
245 | limit &= ~(alignment - 1); | 257 | limit &= ~(alignment - 1); |
246 | 258 | ||
247 | if (!base) | 259 | if (!base) |
248 | fixed = false; | 260 | fixed = false; |
249 | 261 | ||
250 | /* size should be aligned with order_per_bit */ | 262 | /* size should be aligned with order_per_bit */ |
251 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) | 263 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) |
252 | return -EINVAL; | 264 | return -EINVAL; |
253 | 265 | ||
254 | /* | 266 | /* |
255 | * If allocating at a fixed base the request region must not cross the | 267 | * If allocating at a fixed base the request region must not cross the |
256 | * low/high memory boundary. | 268 | * low/high memory boundary. |
257 | */ | 269 | */ |
258 | if (fixed && base < highmem_start && base + size > highmem_start) { | 270 | if (fixed && base < highmem_start && base + size > highmem_start) { |
259 | ret = -EINVAL; | 271 | ret = -EINVAL; |
260 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", | 272 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", |
261 | &base, &highmem_start); | 273 | &base, &highmem_start); |
262 | goto err; | 274 | goto err; |
263 | } | 275 | } |
264 | 276 | ||
265 | /* | 277 | /* |
266 | * If the limit is unspecified or above the memblock end, its effective | 278 | * If the limit is unspecified or above the memblock end, its effective |
267 | * value will be the memblock end. Set it explicitly to simplify further | 279 | * value will be the memblock end. Set it explicitly to simplify further |
268 | * checks. | 280 | * checks. |
269 | */ | 281 | */ |
270 | if (limit == 0 || limit > memblock_end) | 282 | if (limit == 0 || limit > memblock_end) |
271 | limit = memblock_end; | 283 | limit = memblock_end; |
272 | 284 | ||
273 | /* Reserve memory */ | 285 | /* Reserve memory */ |
274 | if (fixed) { | 286 | if (fixed) { |
275 | if (memblock_is_region_reserved(base, size) || | 287 | if (memblock_is_region_reserved(base, size) || |
276 | memblock_reserve(base, size) < 0) { | 288 | memblock_reserve(base, size) < 0) { |
277 | ret = -EBUSY; | 289 | ret = -EBUSY; |
278 | goto err; | 290 | goto err; |
279 | } | 291 | } |
280 | } else { | 292 | } else { |
281 | phys_addr_t addr = 0; | 293 | phys_addr_t addr = 0; |
282 | 294 | ||
283 | /* | 295 | /* |
284 | * All pages in the reserved area must come from the same zone. | 296 | * All pages in the reserved area must come from the same zone. |
285 | * If the requested region crosses the low/high memory boundary, | 297 | * If the requested region crosses the low/high memory boundary, |
286 | * try allocating from high memory first and fall back to low | 298 | * try allocating from high memory first and fall back to low |
287 | * memory in case of failure. | 299 | * memory in case of failure. |
288 | */ | 300 | */ |
289 | if (base < highmem_start && limit > highmem_start) { | 301 | if (base < highmem_start && limit > highmem_start) { |
290 | addr = memblock_alloc_range(size, alignment, | 302 | addr = memblock_alloc_range(size, alignment, |
291 | highmem_start, limit); | 303 | highmem_start, limit); |
292 | limit = highmem_start; | 304 | limit = highmem_start; |
293 | } | 305 | } |
294 | 306 | ||
295 | if (!addr) { | 307 | if (!addr) { |
296 | addr = memblock_alloc_range(size, alignment, base, | 308 | addr = memblock_alloc_range(size, alignment, base, |
297 | limit); | 309 | limit); |
298 | if (!addr) { | 310 | if (!addr) { |
299 | ret = -ENOMEM; | 311 | ret = -ENOMEM; |
300 | goto err; | 312 | goto err; |
301 | } | 313 | } |
302 | } | 314 | } |
303 | 315 | ||
304 | base = addr; | 316 | base = addr; |
305 | } | 317 | } |
306 | 318 | ||
307 | ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); | 319 | ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); |
308 | if (ret) | 320 | if (ret) |
309 | goto err; | 321 | goto err; |
310 | 322 | ||
311 | pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, | 323 | pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, |
312 | &base); | 324 | &base); |
313 | return 0; | 325 | return 0; |
314 | 326 | ||
315 | err: | 327 | err: |
316 | pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); | 328 | pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); |
317 | return ret; | 329 | return ret; |
318 | } | 330 | } |
319 | 331 | ||
320 | /** | 332 | /** |
321 | * cma_alloc() - allocate pages from contiguous area | 333 | * cma_alloc() - allocate pages from contiguous area |
322 | * @cma: Contiguous memory region for which the allocation is performed. | 334 | * @cma: Contiguous memory region for which the allocation is performed. |
323 | * @count: Requested number of pages. | 335 | * @count: Requested number of pages. |
324 | * @align: Requested alignment of pages (in PAGE_SIZE order). | 336 | * @align: Requested alignment of pages (in PAGE_SIZE order). |
325 | * | 337 | * |
326 | * This function allocates part of contiguous memory on specific | 338 | * This function allocates part of contiguous memory on specific |
327 | * contiguous memory area. | 339 | * contiguous memory area. |
328 | */ | 340 | */ |
329 | struct page *cma_alloc(struct cma *cma, int count, unsigned int align) | 341 | struct page *cma_alloc(struct cma *cma, int count, unsigned int align) |
330 | { | 342 | { |
331 | unsigned long mask, pfn, start = 0; | 343 | unsigned long mask, pfn, start = 0; |
332 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; | 344 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
333 | struct page *page = NULL; | 345 | struct page *page = NULL; |
334 | int ret; | 346 | int ret; |
335 | 347 | ||
336 | if (!cma || !cma->count) | 348 | if (!cma || !cma->count) |
337 | return NULL; | 349 | return NULL; |
338 | 350 | ||
339 | pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, | 351 | pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, |
340 | count, align); | 352 | count, align); |
341 | 353 | ||
342 | if (!count) | 354 | if (!count) |
343 | return NULL; | 355 | return NULL; |
344 | 356 | ||
345 | mask = cma_bitmap_aligned_mask(cma, align); | 357 | mask = cma_bitmap_aligned_mask(cma, align); |
346 | bitmap_maxno = cma_bitmap_maxno(cma); | 358 | bitmap_maxno = cma_bitmap_maxno(cma); |
347 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | 359 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
348 | 360 | ||
349 | for (;;) { | 361 | for (;;) { |
350 | mutex_lock(&cma->lock); | 362 | mutex_lock(&cma->lock); |
351 | bitmap_no = bitmap_find_next_zero_area(cma->bitmap, | 363 | bitmap_no = bitmap_find_next_zero_area(cma->bitmap, |
352 | bitmap_maxno, start, bitmap_count, mask); | 364 | bitmap_maxno, start, bitmap_count, mask); |
353 | if (bitmap_no >= bitmap_maxno) { | 365 | if (bitmap_no >= bitmap_maxno) { |
354 | mutex_unlock(&cma->lock); | 366 | mutex_unlock(&cma->lock); |
355 | break; | 367 | break; |
356 | } | 368 | } |
357 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); | 369 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); |
358 | /* | 370 | /* |
359 | * It's safe to drop the lock here. We've marked this region for | 371 | * It's safe to drop the lock here. We've marked this region for |
360 | * our exclusive use. If the migration fails we will take the | 372 | * our exclusive use. If the migration fails we will take the |
361 | * lock again and unmark it. | 373 | * lock again and unmark it. |
362 | */ | 374 | */ |
363 | mutex_unlock(&cma->lock); | 375 | mutex_unlock(&cma->lock); |
364 | 376 | ||
365 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); | 377 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); |
366 | mutex_lock(&cma_mutex); | 378 | mutex_lock(&cma_mutex); |
367 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); | 379 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); |
368 | mutex_unlock(&cma_mutex); | 380 | mutex_unlock(&cma_mutex); |
369 | if (ret == 0) { | 381 | if (ret == 0) { |
370 | page = pfn_to_page(pfn); | 382 | page = pfn_to_page(pfn); |
371 | break; | 383 | break; |
372 | } | 384 | } |
373 | 385 | ||
374 | cma_clear_bitmap(cma, pfn, count); | 386 | cma_clear_bitmap(cma, pfn, count); |
375 | if (ret != -EBUSY) | 387 | if (ret != -EBUSY) |
376 | break; | 388 | break; |
377 | 389 | ||
378 | pr_debug("%s(): memory range at %p is busy, retrying\n", | 390 | pr_debug("%s(): memory range at %p is busy, retrying\n", |
379 | __func__, pfn_to_page(pfn)); | 391 | __func__, pfn_to_page(pfn)); |
380 | /* try again with a bit different memory target */ | 392 | /* try again with a bit different memory target */ |
381 | start = bitmap_no + mask + 1; | 393 | start = bitmap_no + mask + 1; |
382 | } | 394 | } |
383 | 395 | ||
384 | pr_debug("%s(): returned %p\n", __func__, page); | 396 | pr_debug("%s(): returned %p\n", __func__, page); |
385 | return page; | 397 | return page; |
386 | } | 398 | } |
387 | 399 | ||
388 | /** | 400 | /** |
389 | * cma_release() - release allocated pages | 401 | * cma_release() - release allocated pages |
390 | * @cma: Contiguous memory region for which the allocation is performed. | 402 | * @cma: Contiguous memory region for which the allocation is performed. |
391 | * @pages: Allocated pages. | 403 | * @pages: Allocated pages. |
392 | * @count: Number of allocated pages. | 404 | * @count: Number of allocated pages. |
393 | * | 405 | * |
394 | * This function releases memory allocated by alloc_cma(). | 406 | * This function releases memory allocated by alloc_cma(). |
395 | * It returns false when provided pages do not belong to contiguous area and | 407 | * It returns false when provided pages do not belong to contiguous area and |
396 | * true otherwise. | 408 | * true otherwise. |
397 | */ | 409 | */ |
398 | bool cma_release(struct cma *cma, struct page *pages, int count) | 410 | bool cma_release(struct cma *cma, struct page *pages, int count) |
399 | { | 411 | { |
400 | unsigned long pfn; | 412 | unsigned long pfn; |
401 | 413 | ||
402 | if (!cma || !pages) | 414 | if (!cma || !pages) |
403 | return false; | 415 | return false; |
404 | 416 | ||
405 | pr_debug("%s(page %p)\n", __func__, (void *)pages); | 417 | pr_debug("%s(page %p)\n", __func__, (void *)pages); |
406 | 418 | ||
407 | pfn = page_to_pfn(pages); | 419 | pfn = page_to_pfn(pages); |
408 | 420 | ||
409 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) | 421 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) |
410 | return false; | 422 | return false; |
411 | 423 | ||
412 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); | 424 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); |
413 | 425 | ||
414 | free_contig_range(pfn, count); | 426 | free_contig_range(pfn, count); |
415 | cma_clear_bitmap(cma, pfn, count); | 427 | cma_clear_bitmap(cma, pfn, count); |
416 | 428 | ||
417 | return true; | 429 | return true; |
418 | } | 430 | } |
419 | 431 |