Commit d44e0780bcc47c9b8851099c0dfc1dda3c9db5a9

Authored by Randy Dunlap
Committed by Linus Torvalds
1 parent 7f46a240b0

[PATCH] kernel-doc: fix warnings in vmalloc.c

Fix new kernel-doc errors in vmalloc.c.

Signed-off-by: Randy Dunlap <rdunlap@xenotime.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 1 changed file with 2 additions and 2 deletions Inline Diff

1 /* 1 /*
2 * linux/mm/vmalloc.c 2 * linux/mm/vmalloc.c
3 * 3 *
4 * Copyright (C) 1993 Linus Torvalds 4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005 8 * Numa awareness, Christoph Lameter, SGI, June 2005
9 */ 9 */
10 10
11 #include <linux/mm.h> 11 #include <linux/mm.h>
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/highmem.h> 13 #include <linux/highmem.h>
14 #include <linux/slab.h> 14 #include <linux/slab.h>
15 #include <linux/spinlock.h> 15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h> 16 #include <linux/interrupt.h>
17 17
18 #include <linux/vmalloc.h> 18 #include <linux/vmalloc.h>
19 19
20 #include <asm/uaccess.h> 20 #include <asm/uaccess.h>
21 #include <asm/tlbflush.h> 21 #include <asm/tlbflush.h>
22 22
23 23
24 DEFINE_RWLOCK(vmlist_lock); 24 DEFINE_RWLOCK(vmlist_lock);
25 struct vm_struct *vmlist; 25 struct vm_struct *vmlist;
26 26
27 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 27 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
28 { 28 {
29 pte_t *pte; 29 pte_t *pte;
30 30
31 pte = pte_offset_kernel(pmd, addr); 31 pte = pte_offset_kernel(pmd, addr);
32 do { 32 do {
33 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 33 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
34 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 34 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
35 } while (pte++, addr += PAGE_SIZE, addr != end); 35 } while (pte++, addr += PAGE_SIZE, addr != end);
36 } 36 }
37 37
38 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr, 38 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
39 unsigned long end) 39 unsigned long end)
40 { 40 {
41 pmd_t *pmd; 41 pmd_t *pmd;
42 unsigned long next; 42 unsigned long next;
43 43
44 pmd = pmd_offset(pud, addr); 44 pmd = pmd_offset(pud, addr);
45 do { 45 do {
46 next = pmd_addr_end(addr, end); 46 next = pmd_addr_end(addr, end);
47 if (pmd_none_or_clear_bad(pmd)) 47 if (pmd_none_or_clear_bad(pmd))
48 continue; 48 continue;
49 vunmap_pte_range(pmd, addr, next); 49 vunmap_pte_range(pmd, addr, next);
50 } while (pmd++, addr = next, addr != end); 50 } while (pmd++, addr = next, addr != end);
51 } 51 }
52 52
53 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr, 53 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
54 unsigned long end) 54 unsigned long end)
55 { 55 {
56 pud_t *pud; 56 pud_t *pud;
57 unsigned long next; 57 unsigned long next;
58 58
59 pud = pud_offset(pgd, addr); 59 pud = pud_offset(pgd, addr);
60 do { 60 do {
61 next = pud_addr_end(addr, end); 61 next = pud_addr_end(addr, end);
62 if (pud_none_or_clear_bad(pud)) 62 if (pud_none_or_clear_bad(pud))
63 continue; 63 continue;
64 vunmap_pmd_range(pud, addr, next); 64 vunmap_pmd_range(pud, addr, next);
65 } while (pud++, addr = next, addr != end); 65 } while (pud++, addr = next, addr != end);
66 } 66 }
67 67
68 void unmap_vm_area(struct vm_struct *area) 68 void unmap_vm_area(struct vm_struct *area)
69 { 69 {
70 pgd_t *pgd; 70 pgd_t *pgd;
71 unsigned long next; 71 unsigned long next;
72 unsigned long addr = (unsigned long) area->addr; 72 unsigned long addr = (unsigned long) area->addr;
73 unsigned long end = addr + area->size; 73 unsigned long end = addr + area->size;
74 74
75 BUG_ON(addr >= end); 75 BUG_ON(addr >= end);
76 pgd = pgd_offset_k(addr); 76 pgd = pgd_offset_k(addr);
77 flush_cache_vunmap(addr, end); 77 flush_cache_vunmap(addr, end);
78 do { 78 do {
79 next = pgd_addr_end(addr, end); 79 next = pgd_addr_end(addr, end);
80 if (pgd_none_or_clear_bad(pgd)) 80 if (pgd_none_or_clear_bad(pgd))
81 continue; 81 continue;
82 vunmap_pud_range(pgd, addr, next); 82 vunmap_pud_range(pgd, addr, next);
83 } while (pgd++, addr = next, addr != end); 83 } while (pgd++, addr = next, addr != end);
84 flush_tlb_kernel_range((unsigned long) area->addr, end); 84 flush_tlb_kernel_range((unsigned long) area->addr, end);
85 } 85 }
86 86
87 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 87 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
88 unsigned long end, pgprot_t prot, struct page ***pages) 88 unsigned long end, pgprot_t prot, struct page ***pages)
89 { 89 {
90 pte_t *pte; 90 pte_t *pte;
91 91
92 pte = pte_alloc_kernel(pmd, addr); 92 pte = pte_alloc_kernel(pmd, addr);
93 if (!pte) 93 if (!pte)
94 return -ENOMEM; 94 return -ENOMEM;
95 do { 95 do {
96 struct page *page = **pages; 96 struct page *page = **pages;
97 WARN_ON(!pte_none(*pte)); 97 WARN_ON(!pte_none(*pte));
98 if (!page) 98 if (!page)
99 return -ENOMEM; 99 return -ENOMEM;
100 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 100 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
101 (*pages)++; 101 (*pages)++;
102 } while (pte++, addr += PAGE_SIZE, addr != end); 102 } while (pte++, addr += PAGE_SIZE, addr != end);
103 return 0; 103 return 0;
104 } 104 }
105 105
106 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr, 106 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
107 unsigned long end, pgprot_t prot, struct page ***pages) 107 unsigned long end, pgprot_t prot, struct page ***pages)
108 { 108 {
109 pmd_t *pmd; 109 pmd_t *pmd;
110 unsigned long next; 110 unsigned long next;
111 111
112 pmd = pmd_alloc(&init_mm, pud, addr); 112 pmd = pmd_alloc(&init_mm, pud, addr);
113 if (!pmd) 113 if (!pmd)
114 return -ENOMEM; 114 return -ENOMEM;
115 do { 115 do {
116 next = pmd_addr_end(addr, end); 116 next = pmd_addr_end(addr, end);
117 if (vmap_pte_range(pmd, addr, next, prot, pages)) 117 if (vmap_pte_range(pmd, addr, next, prot, pages))
118 return -ENOMEM; 118 return -ENOMEM;
119 } while (pmd++, addr = next, addr != end); 119 } while (pmd++, addr = next, addr != end);
120 return 0; 120 return 0;
121 } 121 }
122 122
123 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr, 123 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
124 unsigned long end, pgprot_t prot, struct page ***pages) 124 unsigned long end, pgprot_t prot, struct page ***pages)
125 { 125 {
126 pud_t *pud; 126 pud_t *pud;
127 unsigned long next; 127 unsigned long next;
128 128
129 pud = pud_alloc(&init_mm, pgd, addr); 129 pud = pud_alloc(&init_mm, pgd, addr);
130 if (!pud) 130 if (!pud)
131 return -ENOMEM; 131 return -ENOMEM;
132 do { 132 do {
133 next = pud_addr_end(addr, end); 133 next = pud_addr_end(addr, end);
134 if (vmap_pmd_range(pud, addr, next, prot, pages)) 134 if (vmap_pmd_range(pud, addr, next, prot, pages))
135 return -ENOMEM; 135 return -ENOMEM;
136 } while (pud++, addr = next, addr != end); 136 } while (pud++, addr = next, addr != end);
137 return 0; 137 return 0;
138 } 138 }
139 139
140 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 140 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
141 { 141 {
142 pgd_t *pgd; 142 pgd_t *pgd;
143 unsigned long next; 143 unsigned long next;
144 unsigned long addr = (unsigned long) area->addr; 144 unsigned long addr = (unsigned long) area->addr;
145 unsigned long end = addr + area->size - PAGE_SIZE; 145 unsigned long end = addr + area->size - PAGE_SIZE;
146 int err; 146 int err;
147 147
148 BUG_ON(addr >= end); 148 BUG_ON(addr >= end);
149 pgd = pgd_offset_k(addr); 149 pgd = pgd_offset_k(addr);
150 do { 150 do {
151 next = pgd_addr_end(addr, end); 151 next = pgd_addr_end(addr, end);
152 err = vmap_pud_range(pgd, addr, next, prot, pages); 152 err = vmap_pud_range(pgd, addr, next, prot, pages);
153 if (err) 153 if (err)
154 break; 154 break;
155 } while (pgd++, addr = next, addr != end); 155 } while (pgd++, addr = next, addr != end);
156 flush_cache_vmap((unsigned long) area->addr, end); 156 flush_cache_vmap((unsigned long) area->addr, end);
157 return err; 157 return err;
158 } 158 }
159 159
160 struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, 160 struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
161 unsigned long start, unsigned long end, int node) 161 unsigned long start, unsigned long end, int node)
162 { 162 {
163 struct vm_struct **p, *tmp, *area; 163 struct vm_struct **p, *tmp, *area;
164 unsigned long align = 1; 164 unsigned long align = 1;
165 unsigned long addr; 165 unsigned long addr;
166 166
167 if (flags & VM_IOREMAP) { 167 if (flags & VM_IOREMAP) {
168 int bit = fls(size); 168 int bit = fls(size);
169 169
170 if (bit > IOREMAP_MAX_ORDER) 170 if (bit > IOREMAP_MAX_ORDER)
171 bit = IOREMAP_MAX_ORDER; 171 bit = IOREMAP_MAX_ORDER;
172 else if (bit < PAGE_SHIFT) 172 else if (bit < PAGE_SHIFT)
173 bit = PAGE_SHIFT; 173 bit = PAGE_SHIFT;
174 174
175 align = 1ul << bit; 175 align = 1ul << bit;
176 } 176 }
177 addr = ALIGN(start, align); 177 addr = ALIGN(start, align);
178 size = PAGE_ALIGN(size); 178 size = PAGE_ALIGN(size);
179 179
180 area = kmalloc_node(sizeof(*area), GFP_KERNEL, node); 180 area = kmalloc_node(sizeof(*area), GFP_KERNEL, node);
181 if (unlikely(!area)) 181 if (unlikely(!area))
182 return NULL; 182 return NULL;
183 183
184 if (unlikely(!size)) { 184 if (unlikely(!size)) {
185 kfree (area); 185 kfree (area);
186 return NULL; 186 return NULL;
187 } 187 }
188 188
189 /* 189 /*
190 * We always allocate a guard page. 190 * We always allocate a guard page.
191 */ 191 */
192 size += PAGE_SIZE; 192 size += PAGE_SIZE;
193 193
194 write_lock(&vmlist_lock); 194 write_lock(&vmlist_lock);
195 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { 195 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
196 if ((unsigned long)tmp->addr < addr) { 196 if ((unsigned long)tmp->addr < addr) {
197 if((unsigned long)tmp->addr + tmp->size >= addr) 197 if((unsigned long)tmp->addr + tmp->size >= addr)
198 addr = ALIGN(tmp->size + 198 addr = ALIGN(tmp->size +
199 (unsigned long)tmp->addr, align); 199 (unsigned long)tmp->addr, align);
200 continue; 200 continue;
201 } 201 }
202 if ((size + addr) < addr) 202 if ((size + addr) < addr)
203 goto out; 203 goto out;
204 if (size + addr <= (unsigned long)tmp->addr) 204 if (size + addr <= (unsigned long)tmp->addr)
205 goto found; 205 goto found;
206 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align); 206 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
207 if (addr > end - size) 207 if (addr > end - size)
208 goto out; 208 goto out;
209 } 209 }
210 210
211 found: 211 found:
212 area->next = *p; 212 area->next = *p;
213 *p = area; 213 *p = area;
214 214
215 area->flags = flags; 215 area->flags = flags;
216 area->addr = (void *)addr; 216 area->addr = (void *)addr;
217 area->size = size; 217 area->size = size;
218 area->pages = NULL; 218 area->pages = NULL;
219 area->nr_pages = 0; 219 area->nr_pages = 0;
220 area->phys_addr = 0; 220 area->phys_addr = 0;
221 write_unlock(&vmlist_lock); 221 write_unlock(&vmlist_lock);
222 222
223 return area; 223 return area;
224 224
225 out: 225 out:
226 write_unlock(&vmlist_lock); 226 write_unlock(&vmlist_lock);
227 kfree(area); 227 kfree(area);
228 if (printk_ratelimit()) 228 if (printk_ratelimit())
229 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n"); 229 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
230 return NULL; 230 return NULL;
231 } 231 }
232 232
233 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 233 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
234 unsigned long start, unsigned long end) 234 unsigned long start, unsigned long end)
235 { 235 {
236 return __get_vm_area_node(size, flags, start, end, -1); 236 return __get_vm_area_node(size, flags, start, end, -1);
237 } 237 }
238 238
239 /** 239 /**
240 * get_vm_area - reserve a contingous kernel virtual area 240 * get_vm_area - reserve a contingous kernel virtual area
241 * 241 *
242 * @size: size of the area 242 * @size: size of the area
243 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 243 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
244 * 244 *
245 * Search an area of @size in the kernel virtual mapping area, 245 * Search an area of @size in the kernel virtual mapping area,
246 * and reserved it for out purposes. Returns the area descriptor 246 * and reserved it for out purposes. Returns the area descriptor
247 * on success or %NULL on failure. 247 * on success or %NULL on failure.
248 */ 248 */
249 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 249 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
250 { 250 {
251 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 251 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
252 } 252 }
253 253
254 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node) 254 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node)
255 { 255 {
256 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node); 256 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
257 } 257 }
258 258
259 /* Caller must hold vmlist_lock */ 259 /* Caller must hold vmlist_lock */
260 struct vm_struct *__remove_vm_area(void *addr) 260 struct vm_struct *__remove_vm_area(void *addr)
261 { 261 {
262 struct vm_struct **p, *tmp; 262 struct vm_struct **p, *tmp;
263 263
264 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { 264 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
265 if (tmp->addr == addr) 265 if (tmp->addr == addr)
266 goto found; 266 goto found;
267 } 267 }
268 return NULL; 268 return NULL;
269 269
270 found: 270 found:
271 unmap_vm_area(tmp); 271 unmap_vm_area(tmp);
272 *p = tmp->next; 272 *p = tmp->next;
273 273
274 /* 274 /*
275 * Remove the guard page. 275 * Remove the guard page.
276 */ 276 */
277 tmp->size -= PAGE_SIZE; 277 tmp->size -= PAGE_SIZE;
278 return tmp; 278 return tmp;
279 } 279 }
280 280
281 /** 281 /**
282 * remove_vm_area - find and remove a contingous kernel virtual area 282 * remove_vm_area - find and remove a contingous kernel virtual area
283 * 283 *
284 * @addr: base address 284 * @addr: base address
285 * 285 *
286 * Search for the kernel VM area starting at @addr, and remove it. 286 * Search for the kernel VM area starting at @addr, and remove it.
287 * This function returns the found VM area, but using it is NOT safe 287 * This function returns the found VM area, but using it is NOT safe
288 * on SMP machines, except for its size or flags. 288 * on SMP machines, except for its size or flags.
289 */ 289 */
290 struct vm_struct *remove_vm_area(void *addr) 290 struct vm_struct *remove_vm_area(void *addr)
291 { 291 {
292 struct vm_struct *v; 292 struct vm_struct *v;
293 write_lock(&vmlist_lock); 293 write_lock(&vmlist_lock);
294 v = __remove_vm_area(addr); 294 v = __remove_vm_area(addr);
295 write_unlock(&vmlist_lock); 295 write_unlock(&vmlist_lock);
296 return v; 296 return v;
297 } 297 }
298 298
299 void __vunmap(void *addr, int deallocate_pages) 299 void __vunmap(void *addr, int deallocate_pages)
300 { 300 {
301 struct vm_struct *area; 301 struct vm_struct *area;
302 302
303 if (!addr) 303 if (!addr)
304 return; 304 return;
305 305
306 if ((PAGE_SIZE-1) & (unsigned long)addr) { 306 if ((PAGE_SIZE-1) & (unsigned long)addr) {
307 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 307 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
308 WARN_ON(1); 308 WARN_ON(1);
309 return; 309 return;
310 } 310 }
311 311
312 area = remove_vm_area(addr); 312 area = remove_vm_area(addr);
313 if (unlikely(!area)) { 313 if (unlikely(!area)) {
314 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 314 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
315 addr); 315 addr);
316 WARN_ON(1); 316 WARN_ON(1);
317 return; 317 return;
318 } 318 }
319 319
320 if (deallocate_pages) { 320 if (deallocate_pages) {
321 int i; 321 int i;
322 322
323 for (i = 0; i < area->nr_pages; i++) { 323 for (i = 0; i < area->nr_pages; i++) {
324 if (unlikely(!area->pages[i])) 324 if (unlikely(!area->pages[i]))
325 BUG(); 325 BUG();
326 __free_page(area->pages[i]); 326 __free_page(area->pages[i]);
327 } 327 }
328 328
329 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) 329 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *))
330 vfree(area->pages); 330 vfree(area->pages);
331 else 331 else
332 kfree(area->pages); 332 kfree(area->pages);
333 } 333 }
334 334
335 kfree(area); 335 kfree(area);
336 return; 336 return;
337 } 337 }
338 338
339 /** 339 /**
340 * vfree - release memory allocated by vmalloc() 340 * vfree - release memory allocated by vmalloc()
341 * 341 *
342 * @addr: memory base address 342 * @addr: memory base address
343 * 343 *
344 * Free the virtually contiguous memory area starting at @addr, as 344 * Free the virtually contiguous memory area starting at @addr, as
345 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 345 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
346 * NULL, no operation is performed. 346 * NULL, no operation is performed.
347 * 347 *
348 * Must not be called in interrupt context. 348 * Must not be called in interrupt context.
349 */ 349 */
350 void vfree(void *addr) 350 void vfree(void *addr)
351 { 351 {
352 BUG_ON(in_interrupt()); 352 BUG_ON(in_interrupt());
353 __vunmap(addr, 1); 353 __vunmap(addr, 1);
354 } 354 }
355 EXPORT_SYMBOL(vfree); 355 EXPORT_SYMBOL(vfree);
356 356
357 /** 357 /**
358 * vunmap - release virtual mapping obtained by vmap() 358 * vunmap - release virtual mapping obtained by vmap()
359 * 359 *
360 * @addr: memory base address 360 * @addr: memory base address
361 * 361 *
362 * Free the virtually contiguous memory area starting at @addr, 362 * Free the virtually contiguous memory area starting at @addr,
363 * which was created from the page array passed to vmap(). 363 * which was created from the page array passed to vmap().
364 * 364 *
365 * Must not be called in interrupt context. 365 * Must not be called in interrupt context.
366 */ 366 */
367 void vunmap(void *addr) 367 void vunmap(void *addr)
368 { 368 {
369 BUG_ON(in_interrupt()); 369 BUG_ON(in_interrupt());
370 __vunmap(addr, 0); 370 __vunmap(addr, 0);
371 } 371 }
372 EXPORT_SYMBOL(vunmap); 372 EXPORT_SYMBOL(vunmap);
373 373
374 /** 374 /**
375 * vmap - map an array of pages into virtually contiguous space 375 * vmap - map an array of pages into virtually contiguous space
376 * 376 *
377 * @pages: array of page pointers 377 * @pages: array of page pointers
378 * @count: number of pages to map 378 * @count: number of pages to map
379 * @flags: vm_area->flags 379 * @flags: vm_area->flags
380 * @prot: page protection for the mapping 380 * @prot: page protection for the mapping
381 * 381 *
382 * Maps @count pages from @pages into contiguous kernel virtual 382 * Maps @count pages from @pages into contiguous kernel virtual
383 * space. 383 * space.
384 */ 384 */
385 void *vmap(struct page **pages, unsigned int count, 385 void *vmap(struct page **pages, unsigned int count,
386 unsigned long flags, pgprot_t prot) 386 unsigned long flags, pgprot_t prot)
387 { 387 {
388 struct vm_struct *area; 388 struct vm_struct *area;
389 389
390 if (count > num_physpages) 390 if (count > num_physpages)
391 return NULL; 391 return NULL;
392 392
393 area = get_vm_area((count << PAGE_SHIFT), flags); 393 area = get_vm_area((count << PAGE_SHIFT), flags);
394 if (!area) 394 if (!area)
395 return NULL; 395 return NULL;
396 if (map_vm_area(area, prot, &pages)) { 396 if (map_vm_area(area, prot, &pages)) {
397 vunmap(area->addr); 397 vunmap(area->addr);
398 return NULL; 398 return NULL;
399 } 399 }
400 400
401 return area->addr; 401 return area->addr;
402 } 402 }
403 EXPORT_SYMBOL(vmap); 403 EXPORT_SYMBOL(vmap);
404 404
405 void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 405 void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
406 pgprot_t prot, int node) 406 pgprot_t prot, int node)
407 { 407 {
408 struct page **pages; 408 struct page **pages;
409 unsigned int nr_pages, array_size, i; 409 unsigned int nr_pages, array_size, i;
410 410
411 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 411 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
412 array_size = (nr_pages * sizeof(struct page *)); 412 array_size = (nr_pages * sizeof(struct page *));
413 413
414 area->nr_pages = nr_pages; 414 area->nr_pages = nr_pages;
415 /* Please note that the recursion is strictly bounded. */ 415 /* Please note that the recursion is strictly bounded. */
416 if (array_size > PAGE_SIZE) 416 if (array_size > PAGE_SIZE)
417 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); 417 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
418 else 418 else
419 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); 419 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
420 area->pages = pages; 420 area->pages = pages;
421 if (!area->pages) { 421 if (!area->pages) {
422 remove_vm_area(area->addr); 422 remove_vm_area(area->addr);
423 kfree(area); 423 kfree(area);
424 return NULL; 424 return NULL;
425 } 425 }
426 memset(area->pages, 0, array_size); 426 memset(area->pages, 0, array_size);
427 427
428 for (i = 0; i < area->nr_pages; i++) { 428 for (i = 0; i < area->nr_pages; i++) {
429 if (node < 0) 429 if (node < 0)
430 area->pages[i] = alloc_page(gfp_mask); 430 area->pages[i] = alloc_page(gfp_mask);
431 else 431 else
432 area->pages[i] = alloc_pages_node(node, gfp_mask, 0); 432 area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
433 if (unlikely(!area->pages[i])) { 433 if (unlikely(!area->pages[i])) {
434 /* Successfully allocated i pages, free them in __vunmap() */ 434 /* Successfully allocated i pages, free them in __vunmap() */
435 area->nr_pages = i; 435 area->nr_pages = i;
436 goto fail; 436 goto fail;
437 } 437 }
438 } 438 }
439 439
440 if (map_vm_area(area, prot, &pages)) 440 if (map_vm_area(area, prot, &pages))
441 goto fail; 441 goto fail;
442 return area->addr; 442 return area->addr;
443 443
444 fail: 444 fail:
445 vfree(area->addr); 445 vfree(area->addr);
446 return NULL; 446 return NULL;
447 } 447 }
448 448
449 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 449 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
450 { 450 {
451 return __vmalloc_area_node(area, gfp_mask, prot, -1); 451 return __vmalloc_area_node(area, gfp_mask, prot, -1);
452 } 452 }
453 453
454 /** 454 /**
455 * __vmalloc_node - allocate virtually contiguous memory 455 * __vmalloc_node - allocate virtually contiguous memory
456 * 456 *
457 * @size: allocation size 457 * @size: allocation size
458 * @gfp_mask: flags for the page level allocator 458 * @gfp_mask: flags for the page level allocator
459 * @prot: protection mask for the allocated pages 459 * @prot: protection mask for the allocated pages
460 * @node node to use for allocation or -1 460 * @node: node to use for allocation or -1
461 * 461 *
462 * Allocate enough pages to cover @size from the page level 462 * Allocate enough pages to cover @size from the page level
463 * allocator with @gfp_mask flags. Map them into contiguous 463 * allocator with @gfp_mask flags. Map them into contiguous
464 * kernel virtual space, using a pagetable protection of @prot. 464 * kernel virtual space, using a pagetable protection of @prot.
465 */ 465 */
466 void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 466 void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
467 int node) 467 int node)
468 { 468 {
469 struct vm_struct *area; 469 struct vm_struct *area;
470 470
471 size = PAGE_ALIGN(size); 471 size = PAGE_ALIGN(size);
472 if (!size || (size >> PAGE_SHIFT) > num_physpages) 472 if (!size || (size >> PAGE_SHIFT) > num_physpages)
473 return NULL; 473 return NULL;
474 474
475 area = get_vm_area_node(size, VM_ALLOC, node); 475 area = get_vm_area_node(size, VM_ALLOC, node);
476 if (!area) 476 if (!area)
477 return NULL; 477 return NULL;
478 478
479 return __vmalloc_area_node(area, gfp_mask, prot, node); 479 return __vmalloc_area_node(area, gfp_mask, prot, node);
480 } 480 }
481 EXPORT_SYMBOL(__vmalloc_node); 481 EXPORT_SYMBOL(__vmalloc_node);
482 482
483 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 483 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
484 { 484 {
485 return __vmalloc_node(size, gfp_mask, prot, -1); 485 return __vmalloc_node(size, gfp_mask, prot, -1);
486 } 486 }
487 EXPORT_SYMBOL(__vmalloc); 487 EXPORT_SYMBOL(__vmalloc);
488 488
489 /** 489 /**
490 * vmalloc - allocate virtually contiguous memory 490 * vmalloc - allocate virtually contiguous memory
491 * 491 *
492 * @size: allocation size 492 * @size: allocation size
493 * 493 *
494 * Allocate enough pages to cover @size from the page level 494 * Allocate enough pages to cover @size from the page level
495 * allocator and map them into contiguous kernel virtual space. 495 * allocator and map them into contiguous kernel virtual space.
496 * 496 *
497 * For tight cotrol over page level allocator and protection flags 497 * For tight cotrol over page level allocator and protection flags
498 * use __vmalloc() instead. 498 * use __vmalloc() instead.
499 */ 499 */
500 void *vmalloc(unsigned long size) 500 void *vmalloc(unsigned long size)
501 { 501 {
502 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 502 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
503 } 503 }
504 EXPORT_SYMBOL(vmalloc); 504 EXPORT_SYMBOL(vmalloc);
505 505
506 /** 506 /**
507 * vmalloc_node - allocate memory on a specific node 507 * vmalloc_node - allocate memory on a specific node
508 * 508 *
509 * @size: allocation size 509 * @size: allocation size
510 * @node; numa node 510 * @node: numa node
511 * 511 *
512 * Allocate enough pages to cover @size from the page level 512 * Allocate enough pages to cover @size from the page level
513 * allocator and map them into contiguous kernel virtual space. 513 * allocator and map them into contiguous kernel virtual space.
514 * 514 *
515 * For tight cotrol over page level allocator and protection flags 515 * For tight cotrol over page level allocator and protection flags
516 * use __vmalloc() instead. 516 * use __vmalloc() instead.
517 */ 517 */
518 void *vmalloc_node(unsigned long size, int node) 518 void *vmalloc_node(unsigned long size, int node)
519 { 519 {
520 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); 520 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
521 } 521 }
522 EXPORT_SYMBOL(vmalloc_node); 522 EXPORT_SYMBOL(vmalloc_node);
523 523
524 #ifndef PAGE_KERNEL_EXEC 524 #ifndef PAGE_KERNEL_EXEC
525 # define PAGE_KERNEL_EXEC PAGE_KERNEL 525 # define PAGE_KERNEL_EXEC PAGE_KERNEL
526 #endif 526 #endif
527 527
528 /** 528 /**
529 * vmalloc_exec - allocate virtually contiguous, executable memory 529 * vmalloc_exec - allocate virtually contiguous, executable memory
530 * 530 *
531 * @size: allocation size 531 * @size: allocation size
532 * 532 *
533 * Kernel-internal function to allocate enough pages to cover @size 533 * Kernel-internal function to allocate enough pages to cover @size
534 * the page level allocator and map them into contiguous and 534 * the page level allocator and map them into contiguous and
535 * executable kernel virtual space. 535 * executable kernel virtual space.
536 * 536 *
537 * For tight cotrol over page level allocator and protection flags 537 * For tight cotrol over page level allocator and protection flags
538 * use __vmalloc() instead. 538 * use __vmalloc() instead.
539 */ 539 */
540 540
541 void *vmalloc_exec(unsigned long size) 541 void *vmalloc_exec(unsigned long size)
542 { 542 {
543 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); 543 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
544 } 544 }
545 545
546 /** 546 /**
547 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 547 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
548 * 548 *
549 * @size: allocation size 549 * @size: allocation size
550 * 550 *
551 * Allocate enough 32bit PA addressable pages to cover @size from the 551 * Allocate enough 32bit PA addressable pages to cover @size from the
552 * page level allocator and map them into contiguous kernel virtual space. 552 * page level allocator and map them into contiguous kernel virtual space.
553 */ 553 */
554 void *vmalloc_32(unsigned long size) 554 void *vmalloc_32(unsigned long size)
555 { 555 {
556 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 556 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
557 } 557 }
558 EXPORT_SYMBOL(vmalloc_32); 558 EXPORT_SYMBOL(vmalloc_32);
559 559
560 long vread(char *buf, char *addr, unsigned long count) 560 long vread(char *buf, char *addr, unsigned long count)
561 { 561 {
562 struct vm_struct *tmp; 562 struct vm_struct *tmp;
563 char *vaddr, *buf_start = buf; 563 char *vaddr, *buf_start = buf;
564 unsigned long n; 564 unsigned long n;
565 565
566 /* Don't allow overflow */ 566 /* Don't allow overflow */
567 if ((unsigned long) addr + count < count) 567 if ((unsigned long) addr + count < count)
568 count = -(unsigned long) addr; 568 count = -(unsigned long) addr;
569 569
570 read_lock(&vmlist_lock); 570 read_lock(&vmlist_lock);
571 for (tmp = vmlist; tmp; tmp = tmp->next) { 571 for (tmp = vmlist; tmp; tmp = tmp->next) {
572 vaddr = (char *) tmp->addr; 572 vaddr = (char *) tmp->addr;
573 if (addr >= vaddr + tmp->size - PAGE_SIZE) 573 if (addr >= vaddr + tmp->size - PAGE_SIZE)
574 continue; 574 continue;
575 while (addr < vaddr) { 575 while (addr < vaddr) {
576 if (count == 0) 576 if (count == 0)
577 goto finished; 577 goto finished;
578 *buf = '\0'; 578 *buf = '\0';
579 buf++; 579 buf++;
580 addr++; 580 addr++;
581 count--; 581 count--;
582 } 582 }
583 n = vaddr + tmp->size - PAGE_SIZE - addr; 583 n = vaddr + tmp->size - PAGE_SIZE - addr;
584 do { 584 do {
585 if (count == 0) 585 if (count == 0)
586 goto finished; 586 goto finished;
587 *buf = *addr; 587 *buf = *addr;
588 buf++; 588 buf++;
589 addr++; 589 addr++;
590 count--; 590 count--;
591 } while (--n > 0); 591 } while (--n > 0);
592 } 592 }
593 finished: 593 finished:
594 read_unlock(&vmlist_lock); 594 read_unlock(&vmlist_lock);
595 return buf - buf_start; 595 return buf - buf_start;
596 } 596 }
597 597
598 long vwrite(char *buf, char *addr, unsigned long count) 598 long vwrite(char *buf, char *addr, unsigned long count)
599 { 599 {
600 struct vm_struct *tmp; 600 struct vm_struct *tmp;
601 char *vaddr, *buf_start = buf; 601 char *vaddr, *buf_start = buf;
602 unsigned long n; 602 unsigned long n;
603 603
604 /* Don't allow overflow */ 604 /* Don't allow overflow */
605 if ((unsigned long) addr + count < count) 605 if ((unsigned long) addr + count < count)
606 count = -(unsigned long) addr; 606 count = -(unsigned long) addr;
607 607
608 read_lock(&vmlist_lock); 608 read_lock(&vmlist_lock);
609 for (tmp = vmlist; tmp; tmp = tmp->next) { 609 for (tmp = vmlist; tmp; tmp = tmp->next) {
610 vaddr = (char *) tmp->addr; 610 vaddr = (char *) tmp->addr;
611 if (addr >= vaddr + tmp->size - PAGE_SIZE) 611 if (addr >= vaddr + tmp->size - PAGE_SIZE)
612 continue; 612 continue;
613 while (addr < vaddr) { 613 while (addr < vaddr) {
614 if (count == 0) 614 if (count == 0)
615 goto finished; 615 goto finished;
616 buf++; 616 buf++;
617 addr++; 617 addr++;
618 count--; 618 count--;
619 } 619 }
620 n = vaddr + tmp->size - PAGE_SIZE - addr; 620 n = vaddr + tmp->size - PAGE_SIZE - addr;
621 do { 621 do {
622 if (count == 0) 622 if (count == 0)
623 goto finished; 623 goto finished;
624 *addr = *buf; 624 *addr = *buf;
625 buf++; 625 buf++;
626 addr++; 626 addr++;
627 count--; 627 count--;
628 } while (--n > 0); 628 } while (--n > 0);
629 } 629 }
630 finished: 630 finished:
631 read_unlock(&vmlist_lock); 631 read_unlock(&vmlist_lock);
632 return buf - buf_start; 632 return buf - buf_start;
633 } 633 }
634 634