Blame view

mm/vmalloc.c 66.1 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
  /*
   *  linux/mm/vmalloc.c
   *
   *  Copyright (C) 1993  Linus Torvalds
   *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
8
   *  Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
   */
db64fe022   Nick Piggin   mm: rewrite vmap ...
10
  #include <linux/vmalloc.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
13
  #include <linux/mm.h>
  #include <linux/module.h>
  #include <linux/highmem.h>
d43c36dc6   Alexey Dobriyan   headers: remove s...
14
  #include <linux/sched.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15
16
17
  #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
5f6a6a9c4   Alexey Dobriyan   proc: move /proc/...
18
  #include <linux/proc_fs.h>
a10aa5798   Christoph Lameter   vmalloc: show vma...
19
  #include <linux/seq_file.h>
3ac7fe5a4   Thomas Gleixner   infrastructure to...
20
  #include <linux/debugobjects.h>
230169693   Christoph Lameter   vmallocinfo: add ...
21
  #include <linux/kallsyms.h>
db64fe022   Nick Piggin   mm: rewrite vmap ...
22
23
24
25
  #include <linux/list.h>
  #include <linux/rbtree.h>
  #include <linux/radix-tree.h>
  #include <linux/rcupdate.h>
f0aa66179   Tejun Heo   vmalloc: implemen...
26
  #include <linux/pfn.h>
89219d37a   Catalin Marinas   kmemleak: Add the...
27
  #include <linux/kmemleak.h>
60063497a   Arun Sharma   atomic: use <linu...
28
  #include <linux/atomic.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
29
30
  #include <asm/uaccess.h>
  #include <asm/tlbflush.h>
2dca6999e   David Miller   mm, perf_event: M...
31
  #include <asm/shmparam.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
32

db64fe022   Nick Piggin   mm: rewrite vmap ...
33
  /*** Page table manipulation functions ***/
b221385bc   Adrian Bunk   [PATCH] mm/: make...
34

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35
36
37
38
39
40
41
42
43
44
  static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
  {
  	pte_t *pte;
  
  	pte = pte_offset_kernel(pmd, addr);
  	do {
  		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  	} while (pte++, addr += PAGE_SIZE, addr != end);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
45
  static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
46
47
48
49
50
51
52
53
54
55
56
57
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pmd = pmd_offset(pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
  		if (pmd_none_or_clear_bad(pmd))
  			continue;
  		vunmap_pte_range(pmd, addr, next);
  	} while (pmd++, addr = next, addr != end);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
58
  static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
59
60
61
62
63
64
65
66
67
68
69
70
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud))
  			continue;
  		vunmap_pmd_range(pud, addr, next);
  	} while (pud++, addr = next, addr != end);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
71
  static void vunmap_page_range(unsigned long addr, unsigned long end)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
72
73
74
  {
  	pgd_t *pgd;
  	unsigned long next;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
75
76
77
  
  	BUG_ON(addr >= end);
  	pgd = pgd_offset_k(addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
78
79
80
81
82
83
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
  		vunmap_pud_range(pgd, addr, next);
  	} while (pgd++, addr = next, addr != end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
84
85
86
  }
  
  static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
db64fe022   Nick Piggin   mm: rewrite vmap ...
87
  		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
88
89
  {
  	pte_t *pte;
db64fe022   Nick Piggin   mm: rewrite vmap ...
90
91
92
93
  	/*
  	 * nr is a running index into the array which helps higher level
  	 * callers keep track of where we're up to.
  	 */
872fec16d   Hugh Dickins   [PATCH] mm: init_...
94
  	pte = pte_alloc_kernel(pmd, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
95
96
97
  	if (!pte)
  		return -ENOMEM;
  	do {
db64fe022   Nick Piggin   mm: rewrite vmap ...
98
99
100
101
102
  		struct page *page = pages[*nr];
  
  		if (WARN_ON(!pte_none(*pte)))
  			return -EBUSY;
  		if (WARN_ON(!page))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
103
104
  			return -ENOMEM;
  		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
db64fe022   Nick Piggin   mm: rewrite vmap ...
105
  		(*nr)++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
106
107
108
  	} while (pte++, addr += PAGE_SIZE, addr != end);
  	return 0;
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
109
110
  static int vmap_pmd_range(pud_t *pud, unsigned long addr,
  		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
111
112
113
114
115
116
117
118
119
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pmd = pmd_alloc(&init_mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
  	do {
  		next = pmd_addr_end(addr, end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
120
  		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
121
122
123
124
  			return -ENOMEM;
  	} while (pmd++, addr = next, addr != end);
  	return 0;
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
125
126
  static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
  		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
127
128
129
130
131
132
133
134
135
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pud = pud_alloc(&init_mm, pgd, addr);
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
136
  		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
137
138
139
140
  			return -ENOMEM;
  	} while (pud++, addr = next, addr != end);
  	return 0;
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
141
142
143
144
145
146
  /*
   * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
   * will have pfns corresponding to the "pages" array.
   *
   * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
   */
8fc489850   Tejun Heo   vmalloc: add un/m...
147
148
  static int vmap_page_range_noflush(unsigned long start, unsigned long end,
  				   pgprot_t prot, struct page **pages)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
149
150
151
  {
  	pgd_t *pgd;
  	unsigned long next;
2e4e27c7d   Adam Lackorzynski   vmalloc.c: fix fl...
152
  	unsigned long addr = start;
db64fe022   Nick Piggin   mm: rewrite vmap ...
153
154
  	int err = 0;
  	int nr = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
155
156
157
  
  	BUG_ON(addr >= end);
  	pgd = pgd_offset_k(addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
158
159
  	do {
  		next = pgd_addr_end(addr, end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
160
  		err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
161
  		if (err)
bf88c8c83   Figo.zhang   vmalloc.c: fix do...
162
  			return err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
163
  	} while (pgd++, addr = next, addr != end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
164

db64fe022   Nick Piggin   mm: rewrite vmap ...
165
  	return nr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
166
  }
8fc489850   Tejun Heo   vmalloc: add un/m...
167
168
169
170
171
172
173
174
175
  static int vmap_page_range(unsigned long start, unsigned long end,
  			   pgprot_t prot, struct page **pages)
  {
  	int ret;
  
  	ret = vmap_page_range_noflush(start, end, prot, pages);
  	flush_cache_vmap(start, end);
  	return ret;
  }
81ac3ad90   KAMEZAWA Hiroyuki   kcore: register m...
176
  int is_vmalloc_or_module_addr(const void *x)
73bdf0a60   Linus Torvalds   Introduce is_vmal...
177
178
  {
  	/*
ab4f2ee13   Russell King   [ARM] fix naming ...
179
  	 * ARM, x86-64 and sparc64 put modules in a special place,
73bdf0a60   Linus Torvalds   Introduce is_vmal...
180
181
182
183
184
185
186
187
188
189
  	 * and fall back on vmalloc() if that fails. Others
  	 * just put it in the vmalloc space.
  	 */
  #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
  	unsigned long addr = (unsigned long)x;
  	if (addr >= MODULES_VADDR && addr < MODULES_END)
  		return 1;
  #endif
  	return is_vmalloc_addr(x);
  }
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
190
  /*
db64fe022   Nick Piggin   mm: rewrite vmap ...
191
   * Walk a vmap address to the struct page it maps.
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
192
   */
b3bdda02a   Christoph Lameter   vmalloc: add cons...
193
  struct page *vmalloc_to_page(const void *vmalloc_addr)
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
194
195
196
197
  {
  	unsigned long addr = (unsigned long) vmalloc_addr;
  	struct page *page = NULL;
  	pgd_t *pgd = pgd_offset_k(addr);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
198

7aa413def   Ingo Molnar   x86, MM: virtual ...
199
200
201
202
  	/*
  	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
  	 * architectures that do not vmalloc module space
  	 */
73bdf0a60   Linus Torvalds   Introduce is_vmal...
203
  	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
59ea74633   Jiri Slaby   MM: virtual addre...
204

48667e7a4   Christoph Lameter   Move vmalloc_to_p...
205
  	if (!pgd_none(*pgd)) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
206
  		pud_t *pud = pud_offset(pgd, addr);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
207
  		if (!pud_none(*pud)) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
208
  			pmd_t *pmd = pmd_offset(pud, addr);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
209
  			if (!pmd_none(*pmd)) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
210
  				pte_t *ptep, pte;
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
  				ptep = pte_offset_map(pmd, addr);
  				pte = *ptep;
  				if (pte_present(pte))
  					page = pte_page(pte);
  				pte_unmap(ptep);
  			}
  		}
  	}
  	return page;
  }
  EXPORT_SYMBOL(vmalloc_to_page);
  
  /*
   * Map a vmalloc()-space virtual address to the physical page frame number.
   */
b3bdda02a   Christoph Lameter   vmalloc: add cons...
226
  unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
227
228
229
230
  {
  	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
  }
  EXPORT_SYMBOL(vmalloc_to_pfn);
db64fe022   Nick Piggin   mm: rewrite vmap ...
231
232
233
234
235
236
237
238
239
240
241
242
243
244
  
  /*** Global kva allocator ***/
  
  #define VM_LAZY_FREE	0x01
  #define VM_LAZY_FREEING	0x02
  #define VM_VM_AREA	0x04
  
  struct vmap_area {
  	unsigned long va_start;
  	unsigned long va_end;
  	unsigned long flags;
  	struct rb_node rb_node;		/* address sorted rbtree */
  	struct list_head list;		/* address sorted list */
  	struct list_head purge_list;	/* "lazy purge" list */
db1aecafe   Minchan Kim   mm/vmalloc.c: cha...
245
  	struct vm_struct *vm;
db64fe022   Nick Piggin   mm: rewrite vmap ...
246
247
248
249
  	struct rcu_head rcu_head;
  };
  
  static DEFINE_SPINLOCK(vmap_area_lock);
db64fe022   Nick Piggin   mm: rewrite vmap ...
250
  static LIST_HEAD(vmap_area_list);
89699605f   Nick Piggin   mm: vmap area cache
251
252
253
254
255
256
257
  static struct rb_root vmap_area_root = RB_ROOT;
  
  /* The vmap cache globals are protected by vmap_area_lock */
  static struct rb_node *free_vmap_cache;
  static unsigned long cached_hole_size;
  static unsigned long cached_vstart;
  static unsigned long cached_align;
ca23e405e   Tejun Heo   vmalloc: implemen...
258
  static unsigned long vmap_area_pcpu_hole;
db64fe022   Nick Piggin   mm: rewrite vmap ...
259
260
  
  static struct vmap_area *__find_vmap_area(unsigned long addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
261
  {
db64fe022   Nick Piggin   mm: rewrite vmap ...
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
  	struct rb_node *n = vmap_area_root.rb_node;
  
  	while (n) {
  		struct vmap_area *va;
  
  		va = rb_entry(n, struct vmap_area, rb_node);
  		if (addr < va->va_start)
  			n = n->rb_left;
  		else if (addr > va->va_start)
  			n = n->rb_right;
  		else
  			return va;
  	}
  
  	return NULL;
  }
  
  static void __insert_vmap_area(struct vmap_area *va)
  {
  	struct rb_node **p = &vmap_area_root.rb_node;
  	struct rb_node *parent = NULL;
  	struct rb_node *tmp;
  
  	while (*p) {
170168d0a   Namhyung Kim   vmalloc: rename t...
286
  		struct vmap_area *tmp_va;
db64fe022   Nick Piggin   mm: rewrite vmap ...
287
288
  
  		parent = *p;
170168d0a   Namhyung Kim   vmalloc: rename t...
289
290
  		tmp_va = rb_entry(parent, struct vmap_area, rb_node);
  		if (va->va_start < tmp_va->va_end)
db64fe022   Nick Piggin   mm: rewrite vmap ...
291
  			p = &(*p)->rb_left;
170168d0a   Namhyung Kim   vmalloc: rename t...
292
  		else if (va->va_end > tmp_va->va_start)
db64fe022   Nick Piggin   mm: rewrite vmap ...
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
  			p = &(*p)->rb_right;
  		else
  			BUG();
  	}
  
  	rb_link_node(&va->rb_node, parent, p);
  	rb_insert_color(&va->rb_node, &vmap_area_root);
  
  	/* address-sort this list so it is usable like the vmlist */
  	tmp = rb_prev(&va->rb_node);
  	if (tmp) {
  		struct vmap_area *prev;
  		prev = rb_entry(tmp, struct vmap_area, rb_node);
  		list_add_rcu(&va->list, &prev->list);
  	} else
  		list_add_rcu(&va->list, &vmap_area_list);
  }
  
  static void purge_vmap_area_lazy(void);
  
  /*
   * Allocate a region of KVA of the specified size and alignment, within the
   * vstart and vend.
   */
  static struct vmap_area *alloc_vmap_area(unsigned long size,
  				unsigned long align,
  				unsigned long vstart, unsigned long vend,
  				int node, gfp_t gfp_mask)
  {
  	struct vmap_area *va;
  	struct rb_node *n;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
324
  	unsigned long addr;
db64fe022   Nick Piggin   mm: rewrite vmap ...
325
  	int purged = 0;
89699605f   Nick Piggin   mm: vmap area cache
326
  	struct vmap_area *first;
db64fe022   Nick Piggin   mm: rewrite vmap ...
327

7766970cc   Nick Piggin   mm: vmap fix over...
328
  	BUG_ON(!size);
db64fe022   Nick Piggin   mm: rewrite vmap ...
329
  	BUG_ON(size & ~PAGE_MASK);
89699605f   Nick Piggin   mm: vmap area cache
330
  	BUG_ON(!is_power_of_2(align));
db64fe022   Nick Piggin   mm: rewrite vmap ...
331

db64fe022   Nick Piggin   mm: rewrite vmap ...
332
333
334
335
336
337
338
  	va = kmalloc_node(sizeof(struct vmap_area),
  			gfp_mask & GFP_RECLAIM_MASK, node);
  	if (unlikely(!va))
  		return ERR_PTR(-ENOMEM);
  
  retry:
  	spin_lock(&vmap_area_lock);
89699605f   Nick Piggin   mm: vmap area cache
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
  	/*
  	 * Invalidate cache if we have more permissive parameters.
  	 * cached_hole_size notes the largest hole noticed _below_
  	 * the vmap_area cached in free_vmap_cache: if size fits
  	 * into that hole, we want to scan from vstart to reuse
  	 * the hole instead of allocating above free_vmap_cache.
  	 * Note that __free_vmap_area may update free_vmap_cache
  	 * without updating cached_hole_size or cached_align.
  	 */
  	if (!free_vmap_cache ||
  			size < cached_hole_size ||
  			vstart < cached_vstart ||
  			align < cached_align) {
  nocache:
  		cached_hole_size = 0;
  		free_vmap_cache = NULL;
  	}
  	/* record if we encounter less permissive parameters */
  	cached_vstart = vstart;
  	cached_align = align;
  
  	/* find starting point for our search */
  	if (free_vmap_cache) {
  		first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
248ac0e19   Johannes Weiner   mm/vmalloc: remov...
363
  		addr = ALIGN(first->va_end, align);
89699605f   Nick Piggin   mm: vmap area cache
364
365
366
367
368
369
370
371
372
373
374
375
376
377
  		if (addr < vstart)
  			goto nocache;
  		if (addr + size - 1 < addr)
  			goto overflow;
  
  	} else {
  		addr = ALIGN(vstart, align);
  		if (addr + size - 1 < addr)
  			goto overflow;
  
  		n = vmap_area_root.rb_node;
  		first = NULL;
  
  		while (n) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
378
379
380
  			struct vmap_area *tmp;
  			tmp = rb_entry(n, struct vmap_area, rb_node);
  			if (tmp->va_end >= addr) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
381
  				first = tmp;
89699605f   Nick Piggin   mm: vmap area cache
382
383
384
385
  				if (tmp->va_start <= addr)
  					break;
  				n = n->rb_left;
  			} else
db64fe022   Nick Piggin   mm: rewrite vmap ...
386
  				n = n->rb_right;
89699605f   Nick Piggin   mm: vmap area cache
387
  		}
db64fe022   Nick Piggin   mm: rewrite vmap ...
388
389
390
  
  		if (!first)
  			goto found;
db64fe022   Nick Piggin   mm: rewrite vmap ...
391
  	}
89699605f   Nick Piggin   mm: vmap area cache
392
393
  
  	/* from the starting point, walk areas until a suitable hole is found */
248ac0e19   Johannes Weiner   mm/vmalloc: remov...
394
  	while (addr + size > first->va_start && addr + size <= vend) {
89699605f   Nick Piggin   mm: vmap area cache
395
396
  		if (addr + cached_hole_size < first->va_start)
  			cached_hole_size = first->va_start - addr;
248ac0e19   Johannes Weiner   mm/vmalloc: remov...
397
  		addr = ALIGN(first->va_end, align);
89699605f   Nick Piggin   mm: vmap area cache
398
399
  		if (addr + size - 1 < addr)
  			goto overflow;
92ca922f0   Hong zhi guo   vmalloc: walk vma...
400
  		if (list_is_last(&first->list, &vmap_area_list))
89699605f   Nick Piggin   mm: vmap area cache
401
  			goto found;
92ca922f0   Hong zhi guo   vmalloc: walk vma...
402
403
404
  
  		first = list_entry(first->list.next,
  				struct vmap_area, list);
db64fe022   Nick Piggin   mm: rewrite vmap ...
405
  	}
89699605f   Nick Piggin   mm: vmap area cache
406
407
408
  found:
  	if (addr + size > vend)
  		goto overflow;
db64fe022   Nick Piggin   mm: rewrite vmap ...
409
410
411
412
413
  
  	va->va_start = addr;
  	va->va_end = addr + size;
  	va->flags = 0;
  	__insert_vmap_area(va);
89699605f   Nick Piggin   mm: vmap area cache
414
  	free_vmap_cache = &va->rb_node;
db64fe022   Nick Piggin   mm: rewrite vmap ...
415
  	spin_unlock(&vmap_area_lock);
89699605f   Nick Piggin   mm: vmap area cache
416
417
418
  	BUG_ON(va->va_start & (align-1));
  	BUG_ON(va->va_start < vstart);
  	BUG_ON(va->va_end > vend);
db64fe022   Nick Piggin   mm: rewrite vmap ...
419
  	return va;
89699605f   Nick Piggin   mm: vmap area cache
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
  
  overflow:
  	spin_unlock(&vmap_area_lock);
  	if (!purged) {
  		purge_vmap_area_lazy();
  		purged = 1;
  		goto retry;
  	}
  	if (printk_ratelimit())
  		printk(KERN_WARNING
  			"vmap allocation for size %lu failed: "
  			"use vmalloc=<size> to increase size.
  ", size);
  	kfree(va);
  	return ERR_PTR(-EBUSY);
db64fe022   Nick Piggin   mm: rewrite vmap ...
435
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
436
437
438
  static void __free_vmap_area(struct vmap_area *va)
  {
  	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
89699605f   Nick Piggin   mm: vmap area cache
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
  
  	if (free_vmap_cache) {
  		if (va->va_end < cached_vstart) {
  			free_vmap_cache = NULL;
  		} else {
  			struct vmap_area *cache;
  			cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
  			if (va->va_start <= cache->va_start) {
  				free_vmap_cache = rb_prev(&va->rb_node);
  				/*
  				 * We don't try to update cached_hole_size or
  				 * cached_align, but it won't go very wrong.
  				 */
  			}
  		}
  	}
db64fe022   Nick Piggin   mm: rewrite vmap ...
455
456
457
  	rb_erase(&va->rb_node, &vmap_area_root);
  	RB_CLEAR_NODE(&va->rb_node);
  	list_del_rcu(&va->list);
ca23e405e   Tejun Heo   vmalloc: implemen...
458
459
460
461
462
463
464
465
  	/*
  	 * Track the highest possible candidate for pcpu area
  	 * allocation.  Areas outside of vmalloc area can be returned
  	 * here too, consider only end addresses which fall inside
  	 * vmalloc area proper.
  	 */
  	if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
  		vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
14769de93   Lai Jiangshan   vmalloc,rcu: Conv...
466
  	kfree_rcu(va, rcu_head);
db64fe022   Nick Piggin   mm: rewrite vmap ...
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
  }
  
  /*
   * Free a region of KVA allocated by alloc_vmap_area
   */
  static void free_vmap_area(struct vmap_area *va)
  {
  	spin_lock(&vmap_area_lock);
  	__free_vmap_area(va);
  	spin_unlock(&vmap_area_lock);
  }
  
  /*
   * Clear the pagetable entries of a given vmap_area
   */
  static void unmap_vmap_area(struct vmap_area *va)
  {
  	vunmap_page_range(va->va_start, va->va_end);
  }
cd52858c7   Nick Piggin   mm: vmalloc make ...
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
  static void vmap_debug_free_range(unsigned long start, unsigned long end)
  {
  	/*
  	 * Unmap page tables and force a TLB flush immediately if
  	 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
  	 * bugs similarly to those in linear kernel virtual address
  	 * space after a page has been freed.
  	 *
  	 * All the lazy freeing logic is still retained, in order to
  	 * minimise intrusiveness of this debugging feature.
  	 *
  	 * This is going to be *slow* (linear kernel virtual address
  	 * debugging doesn't do a broadcast TLB flush so it is a lot
  	 * faster).
  	 */
  #ifdef CONFIG_DEBUG_PAGEALLOC
  	vunmap_page_range(start, end);
  	flush_tlb_kernel_range(start, end);
  #endif
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
  /*
   * lazy_max_pages is the maximum amount of virtual address space we gather up
   * before attempting to purge with a TLB flush.
   *
   * There is a tradeoff here: a larger number will cover more kernel page tables
   * and take slightly longer to purge, but it will linearly reduce the number of
   * global TLB flushes that must be performed. It would seem natural to scale
   * this number up linearly with the number of CPUs (because vmapping activity
   * could also scale linearly with the number of CPUs), however it is likely
   * that in practice, workloads might be constrained in other ways that mean
   * vmap activity will not scale linearly with CPUs. Also, I want to be
   * conservative and not introduce a big latency on huge systems, so go with
   * a less aggressive log scale. It will still be an improvement over the old
   * code, and it will be simple to change the scale factor if we find that it
   * becomes a problem on bigger systems.
   */
  static unsigned long lazy_max_pages(void)
  {
  	unsigned int log;
  
  	log = fls(num_online_cpus());
  
  	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
  }
  
  static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
02b709df8   Nick Piggin   mm: purge fragmen...
532
533
  /* for per-CPU blocks */
  static void purge_fragmented_blocks_allcpus(void);
db64fe022   Nick Piggin   mm: rewrite vmap ...
534
  /*
3ee48b6af   Cliff Wickman   mm, x86: Saving v...
535
536
537
538
539
540
541
542
543
   * called before a call to iounmap() if the caller wants vm_area_struct's
   * immediately freed.
   */
  void set_iounmap_nonlazy(void)
  {
  	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
  }
  
  /*
db64fe022   Nick Piggin   mm: rewrite vmap ...
544
545
546
547
548
549
550
551
552
553
554
555
   * Purges all lazily-freed vmap areas.
   *
   * If sync is 0 then don't purge if there is already a purge in progress.
   * If force_flush is 1, then flush kernel TLBs between *start and *end even
   * if we found no lazy vmap areas to unmap (callers can use this to optimise
   * their own TLB flushing).
   * Returns with *start = min(*start, lowest purged address)
   *              *end = max(*end, highest purged address)
   */
  static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
  					int sync, int force_flush)
  {
46666d8ac   Andrew Morton   revert "mm: vmall...
556
  	static DEFINE_SPINLOCK(purge_lock);
db64fe022   Nick Piggin   mm: rewrite vmap ...
557
558
  	LIST_HEAD(valist);
  	struct vmap_area *va;
cbb766766   Vegard Nossum   mm: fix lazy vmap...
559
  	struct vmap_area *n_va;
db64fe022   Nick Piggin   mm: rewrite vmap ...
560
561
562
563
564
565
566
567
  	int nr = 0;
  
  	/*
  	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
  	 * should not expect such behaviour. This just simplifies locking for
  	 * the case that isn't actually used at the moment anyway.
  	 */
  	if (!sync && !force_flush) {
46666d8ac   Andrew Morton   revert "mm: vmall...
568
  		if (!spin_trylock(&purge_lock))
db64fe022   Nick Piggin   mm: rewrite vmap ...
569
570
  			return;
  	} else
46666d8ac   Andrew Morton   revert "mm: vmall...
571
  		spin_lock(&purge_lock);
db64fe022   Nick Piggin   mm: rewrite vmap ...
572

02b709df8   Nick Piggin   mm: purge fragmen...
573
574
  	if (sync)
  		purge_fragmented_blocks_allcpus();
db64fe022   Nick Piggin   mm: rewrite vmap ...
575
576
577
578
579
580
581
582
  	rcu_read_lock();
  	list_for_each_entry_rcu(va, &vmap_area_list, list) {
  		if (va->flags & VM_LAZY_FREE) {
  			if (va->va_start < *start)
  				*start = va->va_start;
  			if (va->va_end > *end)
  				*end = va->va_end;
  			nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
db64fe022   Nick Piggin   mm: rewrite vmap ...
583
584
585
586
587
588
  			list_add_tail(&va->purge_list, &valist);
  			va->flags |= VM_LAZY_FREEING;
  			va->flags &= ~VM_LAZY_FREE;
  		}
  	}
  	rcu_read_unlock();
88f500443   Yongseok Koh   vmalloc: remove B...
589
  	if (nr)
db64fe022   Nick Piggin   mm: rewrite vmap ...
590
  		atomic_sub(nr, &vmap_lazy_nr);
db64fe022   Nick Piggin   mm: rewrite vmap ...
591
592
593
594
595
596
  
  	if (nr || force_flush)
  		flush_tlb_kernel_range(*start, *end);
  
  	if (nr) {
  		spin_lock(&vmap_area_lock);
cbb766766   Vegard Nossum   mm: fix lazy vmap...
597
  		list_for_each_entry_safe(va, n_va, &valist, purge_list)
db64fe022   Nick Piggin   mm: rewrite vmap ...
598
599
600
  			__free_vmap_area(va);
  		spin_unlock(&vmap_area_lock);
  	}
46666d8ac   Andrew Morton   revert "mm: vmall...
601
  	spin_unlock(&purge_lock);
db64fe022   Nick Piggin   mm: rewrite vmap ...
602
603
604
  }
  
  /*
496850e5f   Nick Piggin   mm: vmalloc failu...
605
606
607
608
609
610
611
612
613
614
615
   * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
   * is already purging.
   */
  static void try_purge_vmap_area_lazy(void)
  {
  	unsigned long start = ULONG_MAX, end = 0;
  
  	__purge_vmap_area_lazy(&start, &end, 0, 0);
  }
  
  /*
db64fe022   Nick Piggin   mm: rewrite vmap ...
616
617
618
619
620
   * Kick off a purge of the outstanding lazy areas.
   */
  static void purge_vmap_area_lazy(void)
  {
  	unsigned long start = ULONG_MAX, end = 0;
496850e5f   Nick Piggin   mm: vmalloc failu...
621
  	__purge_vmap_area_lazy(&start, &end, 1, 0);
db64fe022   Nick Piggin   mm: rewrite vmap ...
622
623
624
  }
  
  /*
64141da58   Jeremy Fitzhardinge   vmalloc: eagerly ...
625
626
627
   * Free a vmap area, caller ensuring that the area has been unmapped
   * and flush_cache_vunmap had been called for the correct range
   * previously.
db64fe022   Nick Piggin   mm: rewrite vmap ...
628
   */
64141da58   Jeremy Fitzhardinge   vmalloc: eagerly ...
629
  static void free_vmap_area_noflush(struct vmap_area *va)
db64fe022   Nick Piggin   mm: rewrite vmap ...
630
631
632
633
  {
  	va->flags |= VM_LAZY_FREE;
  	atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
  	if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
496850e5f   Nick Piggin   mm: vmalloc failu...
634
  		try_purge_vmap_area_lazy();
db64fe022   Nick Piggin   mm: rewrite vmap ...
635
  }
b29acbdcf   Nick Piggin   mm: vmalloc fix l...
636
  /*
64141da58   Jeremy Fitzhardinge   vmalloc: eagerly ...
637
638
639
640
641
642
643
644
645
646
   * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
   * called for the correct range previously.
   */
  static void free_unmap_vmap_area_noflush(struct vmap_area *va)
  {
  	unmap_vmap_area(va);
  	free_vmap_area_noflush(va);
  }
  
  /*
b29acbdcf   Nick Piggin   mm: vmalloc fix l...
647
648
649
650
651
652
653
   * Free and unmap a vmap area
   */
  static void free_unmap_vmap_area(struct vmap_area *va)
  {
  	flush_cache_vunmap(va->va_start, va->va_end);
  	free_unmap_vmap_area_noflush(va);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
  static struct vmap_area *find_vmap_area(unsigned long addr)
  {
  	struct vmap_area *va;
  
  	spin_lock(&vmap_area_lock);
  	va = __find_vmap_area(addr);
  	spin_unlock(&vmap_area_lock);
  
  	return va;
  }
  
  static void free_unmap_vmap_area_addr(unsigned long addr)
  {
  	struct vmap_area *va;
  
  	va = find_vmap_area(addr);
  	BUG_ON(!va);
  	free_unmap_vmap_area(va);
  }
  
  
  /*** Per cpu kva allocator ***/
  
  /*
   * vmap space is limited especially on 32 bit architectures. Ensure there is
   * room for at least 16 percpu vmap blocks per CPU.
   */
  /*
   * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
   * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
   * instead (we just need a rough idea)
   */
  #if BITS_PER_LONG == 32
  #define VMALLOC_SPACE		(128UL*1024*1024)
  #else
  #define VMALLOC_SPACE		(128UL*1024*1024*1024)
  #endif
  
  #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
  #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
  #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
  #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
  #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
  #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
f982f9151   Clemens Ladisch   mm: fix wrong vma...
698
699
700
701
  #define VMAP_BBMAP_BITS		\
  		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
  		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
  			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
db64fe022   Nick Piggin   mm: rewrite vmap ...
702
703
  
  #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
9b4633340   Jeremy Fitzhardinge   vmap: cope with v...
704
  static bool vmap_initialized __read_mostly = false;
db64fe022   Nick Piggin   mm: rewrite vmap ...
705
706
707
  struct vmap_block_queue {
  	spinlock_t lock;
  	struct list_head free;
db64fe022   Nick Piggin   mm: rewrite vmap ...
708
709
710
711
712
713
714
715
716
  };
  
  struct vmap_block {
  	spinlock_t lock;
  	struct vmap_area *va;
  	struct vmap_block_queue *vbq;
  	unsigned long free, dirty;
  	DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
  	DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
de5604231   Nick Piggin   mm: percpu-vmap f...
717
718
  	struct list_head free_list;
  	struct rcu_head rcu_head;
02b709df8   Nick Piggin   mm: purge fragmen...
719
  	struct list_head purge;
db64fe022   Nick Piggin   mm: rewrite vmap ...
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
  };
  
  /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
  static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
  
  /*
   * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
   * in the free path. Could get rid of this if we change the API to return a
   * "cookie" from alloc, to be passed to free. But no big deal yet.
   */
  static DEFINE_SPINLOCK(vmap_block_tree_lock);
  static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
  
  /*
   * We should probably have a fallback mechanism to allocate virtual memory
   * out of partially filled vmap blocks. However vmap block sizing should be
   * fairly reasonable according to the vmalloc size, so it shouldn't be a
   * big problem.
   */
  
  static unsigned long addr_to_vb_idx(unsigned long addr)
  {
  	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
  	addr /= VMAP_BLOCK_SIZE;
  	return addr;
  }
  
  static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
  {
  	struct vmap_block_queue *vbq;
  	struct vmap_block *vb;
  	struct vmap_area *va;
  	unsigned long vb_idx;
  	int node, err;
  
  	node = numa_node_id();
  
  	vb = kmalloc_node(sizeof(struct vmap_block),
  			gfp_mask & GFP_RECLAIM_MASK, node);
  	if (unlikely(!vb))
  		return ERR_PTR(-ENOMEM);
  
  	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
  					VMALLOC_START, VMALLOC_END,
  					node, gfp_mask);
ddf9c6d47   Tobias Klauser   vmalloc: remove r...
765
  	if (IS_ERR(va)) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
766
  		kfree(vb);
e7d863407   Julia Lawall   mm: use ERR_CAST
767
  		return ERR_CAST(va);
db64fe022   Nick Piggin   mm: rewrite vmap ...
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
  	}
  
  	err = radix_tree_preload(gfp_mask);
  	if (unlikely(err)) {
  		kfree(vb);
  		free_vmap_area(va);
  		return ERR_PTR(err);
  	}
  
  	spin_lock_init(&vb->lock);
  	vb->va = va;
  	vb->free = VMAP_BBMAP_BITS;
  	vb->dirty = 0;
  	bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
  	bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
  	INIT_LIST_HEAD(&vb->free_list);
db64fe022   Nick Piggin   mm: rewrite vmap ...
784
785
786
787
788
789
790
791
792
793
794
  
  	vb_idx = addr_to_vb_idx(va->va_start);
  	spin_lock(&vmap_block_tree_lock);
  	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
  	spin_unlock(&vmap_block_tree_lock);
  	BUG_ON(err);
  	radix_tree_preload_end();
  
  	vbq = &get_cpu_var(vmap_block_queue);
  	vb->vbq = vbq;
  	spin_lock(&vbq->lock);
de5604231   Nick Piggin   mm: percpu-vmap f...
795
  	list_add_rcu(&vb->free_list, &vbq->free);
db64fe022   Nick Piggin   mm: rewrite vmap ...
796
  	spin_unlock(&vbq->lock);
3f04ba859   Tejun Heo   vmalloc: fix use ...
797
  	put_cpu_var(vmap_block_queue);
db64fe022   Nick Piggin   mm: rewrite vmap ...
798
799
800
  
  	return vb;
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
801
802
803
804
  static void free_vmap_block(struct vmap_block *vb)
  {
  	struct vmap_block *tmp;
  	unsigned long vb_idx;
db64fe022   Nick Piggin   mm: rewrite vmap ...
805
806
807
808
809
  	vb_idx = addr_to_vb_idx(vb->va->va_start);
  	spin_lock(&vmap_block_tree_lock);
  	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
  	spin_unlock(&vmap_block_tree_lock);
  	BUG_ON(tmp != vb);
64141da58   Jeremy Fitzhardinge   vmalloc: eagerly ...
810
  	free_vmap_area_noflush(vb->va);
22a3c7d18   Lai Jiangshan   vmalloc,rcu: Conv...
811
  	kfree_rcu(vb, rcu_head);
db64fe022   Nick Piggin   mm: rewrite vmap ...
812
  }
02b709df8   Nick Piggin   mm: purge fragmen...
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
  static void purge_fragmented_blocks(int cpu)
  {
  	LIST_HEAD(purge);
  	struct vmap_block *vb;
  	struct vmap_block *n_vb;
  	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
  
  	rcu_read_lock();
  	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  
  		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
  			continue;
  
  		spin_lock(&vb->lock);
  		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
  			vb->free = 0; /* prevent further allocs after releasing lock */
  			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
  			bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
  			bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
  			spin_lock(&vbq->lock);
  			list_del_rcu(&vb->free_list);
  			spin_unlock(&vbq->lock);
  			spin_unlock(&vb->lock);
  			list_add_tail(&vb->purge, &purge);
  		} else
  			spin_unlock(&vb->lock);
  	}
  	rcu_read_unlock();
  
  	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
  		list_del(&vb->purge);
  		free_vmap_block(vb);
  	}
  }
  
  static void purge_fragmented_blocks_thiscpu(void)
  {
  	purge_fragmented_blocks(smp_processor_id());
  }
  
  static void purge_fragmented_blocks_allcpus(void)
  {
  	int cpu;
  
  	for_each_possible_cpu(cpu)
  		purge_fragmented_blocks(cpu);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
860
861
862
863
864
865
  static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
  {
  	struct vmap_block_queue *vbq;
  	struct vmap_block *vb;
  	unsigned long addr = 0;
  	unsigned int order;
02b709df8   Nick Piggin   mm: purge fragmen...
866
  	int purge = 0;
db64fe022   Nick Piggin   mm: rewrite vmap ...
867
868
869
  
  	BUG_ON(size & ~PAGE_MASK);
  	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
aa91c4d89   Jan Kara   mm: make vb_alloc...
870
871
872
873
874
875
876
877
  	if (WARN_ON(size == 0)) {
  		/*
  		 * Allocating 0 bytes isn't what caller wants since
  		 * get_order(0) returns funny result. Just warn and terminate
  		 * early.
  		 */
  		return NULL;
  	}
db64fe022   Nick Piggin   mm: rewrite vmap ...
878
879
880
881
882
883
884
885
886
  	order = get_order(size);
  
  again:
  	rcu_read_lock();
  	vbq = &get_cpu_var(vmap_block_queue);
  	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  		int i;
  
  		spin_lock(&vb->lock);
02b709df8   Nick Piggin   mm: purge fragmen...
887
888
  		if (vb->free < 1UL << order)
  			goto next;
db64fe022   Nick Piggin   mm: rewrite vmap ...
889
890
  		i = bitmap_find_free_region(vb->alloc_map,
  						VMAP_BBMAP_BITS, order);
02b709df8   Nick Piggin   mm: purge fragmen...
891
892
893
894
895
  		if (i < 0) {
  			if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
  				/* fragmented and no outstanding allocations */
  				BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
  				purge = 1;
db64fe022   Nick Piggin   mm: rewrite vmap ...
896
  			}
02b709df8   Nick Piggin   mm: purge fragmen...
897
  			goto next;
db64fe022   Nick Piggin   mm: rewrite vmap ...
898
  		}
02b709df8   Nick Piggin   mm: purge fragmen...
899
900
901
902
903
904
905
906
907
908
909
910
  		addr = vb->va->va_start + (i << PAGE_SHIFT);
  		BUG_ON(addr_to_vb_idx(addr) !=
  				addr_to_vb_idx(vb->va->va_start));
  		vb->free -= 1UL << order;
  		if (vb->free == 0) {
  			spin_lock(&vbq->lock);
  			list_del_rcu(&vb->free_list);
  			spin_unlock(&vbq->lock);
  		}
  		spin_unlock(&vb->lock);
  		break;
  next:
db64fe022   Nick Piggin   mm: rewrite vmap ...
911
912
  		spin_unlock(&vb->lock);
  	}
02b709df8   Nick Piggin   mm: purge fragmen...
913
914
915
  
  	if (purge)
  		purge_fragmented_blocks_thiscpu();
3f04ba859   Tejun Heo   vmalloc: fix use ...
916
  	put_cpu_var(vmap_block_queue);
db64fe022   Nick Piggin   mm: rewrite vmap ...
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
  	rcu_read_unlock();
  
  	if (!addr) {
  		vb = new_vmap_block(gfp_mask);
  		if (IS_ERR(vb))
  			return vb;
  		goto again;
  	}
  
  	return (void *)addr;
  }
  
  static void vb_free(const void *addr, unsigned long size)
  {
  	unsigned long offset;
  	unsigned long vb_idx;
  	unsigned int order;
  	struct vmap_block *vb;
  
  	BUG_ON(size & ~PAGE_MASK);
  	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
b29acbdcf   Nick Piggin   mm: vmalloc fix l...
938
939
  
  	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
db64fe022   Nick Piggin   mm: rewrite vmap ...
940
941
942
943
944
945
946
947
948
  	order = get_order(size);
  
  	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
  
  	vb_idx = addr_to_vb_idx((unsigned long)addr);
  	rcu_read_lock();
  	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
  	rcu_read_unlock();
  	BUG_ON(!vb);
64141da58   Jeremy Fitzhardinge   vmalloc: eagerly ...
949
  	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
db64fe022   Nick Piggin   mm: rewrite vmap ...
950
  	spin_lock(&vb->lock);
de5604231   Nick Piggin   mm: percpu-vmap f...
951
  	BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
d086817dc   MinChan Kim   vmap: remove need...
952

db64fe022   Nick Piggin   mm: rewrite vmap ...
953
954
  	vb->dirty += 1UL << order;
  	if (vb->dirty == VMAP_BBMAP_BITS) {
de5604231   Nick Piggin   mm: percpu-vmap f...
955
  		BUG_ON(vb->free);
db64fe022   Nick Piggin   mm: rewrite vmap ...
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
  		spin_unlock(&vb->lock);
  		free_vmap_block(vb);
  	} else
  		spin_unlock(&vb->lock);
  }
  
  /**
   * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
   *
   * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
   * to amortize TLB flushing overheads. What this means is that any page you
   * have now, may, in a former life, have been mapped into kernel virtual
   * address by the vmap layer and so there might be some CPUs with TLB entries
   * still referencing that page (additional to the regular 1:1 kernel mapping).
   *
   * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
   * be sure that none of the pages we have control over will have any aliases
   * from the vmap layer.
   */
  void vm_unmap_aliases(void)
  {
  	unsigned long start = ULONG_MAX, end = 0;
  	int cpu;
  	int flush = 0;
9b4633340   Jeremy Fitzhardinge   vmap: cope with v...
980
981
  	if (unlikely(!vmap_initialized))
  		return;
db64fe022   Nick Piggin   mm: rewrite vmap ...
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
  	for_each_possible_cpu(cpu) {
  		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
  		struct vmap_block *vb;
  
  		rcu_read_lock();
  		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  			int i;
  
  			spin_lock(&vb->lock);
  			i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
  			while (i < VMAP_BBMAP_BITS) {
  				unsigned long s, e;
  				int j;
  				j = find_next_zero_bit(vb->dirty_map,
  					VMAP_BBMAP_BITS, i);
  
  				s = vb->va->va_start + (i << PAGE_SHIFT);
  				e = vb->va->va_start + (j << PAGE_SHIFT);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
  				flush = 1;
  
  				if (s < start)
  					start = s;
  				if (e > end)
  					end = e;
  
  				i = j;
  				i = find_next_bit(vb->dirty_map,
  							VMAP_BBMAP_BITS, i);
  			}
  			spin_unlock(&vb->lock);
  		}
  		rcu_read_unlock();
  	}
  
  	__purge_vmap_area_lazy(&start, &end, 1, flush);
  }
  EXPORT_SYMBOL_GPL(vm_unmap_aliases);
  
  /**
   * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
   * @mem: the pointer returned by vm_map_ram
   * @count: the count passed to that vm_map_ram call (cannot unmap partial)
   */
  void vm_unmap_ram(const void *mem, unsigned int count)
  {
  	unsigned long size = count << PAGE_SHIFT;
  	unsigned long addr = (unsigned long)mem;
  
  	BUG_ON(!addr);
  	BUG_ON(addr < VMALLOC_START);
  	BUG_ON(addr > VMALLOC_END);
  	BUG_ON(addr & (PAGE_SIZE-1));
  
  	debug_check_no_locks_freed(mem, size);
cd52858c7   Nick Piggin   mm: vmalloc make ...
1036
  	vmap_debug_free_range(addr, addr+size);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
  
  	if (likely(count <= VMAP_MAX_ALLOC))
  		vb_free(mem, size);
  	else
  		free_unmap_vmap_area_addr(addr);
  }
  EXPORT_SYMBOL(vm_unmap_ram);
  
  /**
   * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
   * @pages: an array of pointers to the pages to be mapped
   * @count: number of pages
   * @node: prefer to allocate data structures on this node
   * @prot: memory protection to use. PAGE_KERNEL for regular RAM
e99c97ade   Randy Dunlap   mm: fix kernel-do...
1051
1052
   *
   * Returns: a pointer to the address that has been mapped, or %NULL on failure
db64fe022   Nick Piggin   mm: rewrite vmap ...
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
   */
  void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
  {
  	unsigned long size = count << PAGE_SHIFT;
  	unsigned long addr;
  	void *mem;
  
  	if (likely(count <= VMAP_MAX_ALLOC)) {
  		mem = vb_alloc(size, GFP_KERNEL);
  		if (IS_ERR(mem))
  			return NULL;
  		addr = (unsigned long)mem;
  	} else {
  		struct vmap_area *va;
  		va = alloc_vmap_area(size, PAGE_SIZE,
  				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
  		if (IS_ERR(va))
  			return NULL;
  
  		addr = va->va_start;
  		mem = (void *)addr;
  	}
  	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
  		vm_unmap_ram(mem, count);
  		return NULL;
  	}
  	return mem;
  }
  EXPORT_SYMBOL(vm_map_ram);
f0aa66179   Tejun Heo   vmalloc: implemen...
1082
  /**
be9b7335e   Nicolas Pitre   mm: add vm_area_a...
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
   * vm_area_add_early - add vmap area early during boot
   * @vm: vm_struct to add
   *
   * This function is used to add fixed kernel vm area to vmlist before
   * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
   * should contain proper values and the other fields should be zero.
   *
   * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
   */
  void __init vm_area_add_early(struct vm_struct *vm)
  {
  	struct vm_struct *tmp, **p;
  
  	BUG_ON(vmap_initialized);
  	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
  		if (tmp->addr >= vm->addr) {
  			BUG_ON(tmp->addr < vm->addr + vm->size);
  			break;
  		} else
  			BUG_ON(tmp->addr + tmp->size > vm->addr);
  	}
  	vm->next = *p;
  	*p = vm;
  }
  
  /**
f0aa66179   Tejun Heo   vmalloc: implemen...
1109
1110
   * vm_area_register_early - register vmap area early during boot
   * @vm: vm_struct to register
c0c0a2937   Tejun Heo   vmalloc: add @ali...
1111
   * @align: requested alignment
f0aa66179   Tejun Heo   vmalloc: implemen...
1112
1113
1114
1115
1116
1117
1118
1119
   *
   * This function is used to register kernel vm area before
   * vmalloc_init() is called.  @vm->size and @vm->flags should contain
   * proper values on entry and other fields should be zero.  On return,
   * vm->addr contains the allocated address.
   *
   * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
   */
c0c0a2937   Tejun Heo   vmalloc: add @ali...
1120
  void __init vm_area_register_early(struct vm_struct *vm, size_t align)
f0aa66179   Tejun Heo   vmalloc: implemen...
1121
1122
  {
  	static size_t vm_init_off __initdata;
c0c0a2937   Tejun Heo   vmalloc: add @ali...
1123
1124
1125
1126
  	unsigned long addr;
  
  	addr = ALIGN(VMALLOC_START + vm_init_off, align);
  	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
f0aa66179   Tejun Heo   vmalloc: implemen...
1127

c0c0a2937   Tejun Heo   vmalloc: add @ali...
1128
  	vm->addr = (void *)addr;
f0aa66179   Tejun Heo   vmalloc: implemen...
1129

be9b7335e   Nicolas Pitre   mm: add vm_area_a...
1130
  	vm_area_add_early(vm);
f0aa66179   Tejun Heo   vmalloc: implemen...
1131
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
1132
1133
  void __init vmalloc_init(void)
  {
822c18f2e   Ivan Kokshaysky   alpha: fix vmallo...
1134
1135
  	struct vmap_area *va;
  	struct vm_struct *tmp;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1136
1137
1138
1139
1140
1141
1142
1143
  	int i;
  
  	for_each_possible_cpu(i) {
  		struct vmap_block_queue *vbq;
  
  		vbq = &per_cpu(vmap_block_queue, i);
  		spin_lock_init(&vbq->lock);
  		INIT_LIST_HEAD(&vbq->free);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1144
  	}
9b4633340   Jeremy Fitzhardinge   vmap: cope with v...
1145

822c18f2e   Ivan Kokshaysky   alpha: fix vmallo...
1146
1147
  	/* Import existing vmlist entries. */
  	for (tmp = vmlist; tmp; tmp = tmp->next) {
43ebdac42   Pekka Enberg   vmalloc: use kzal...
1148
  		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
dbda591d9   KyongHo   mm: fix faulty in...
1149
  		va->flags = VM_VM_AREA;
822c18f2e   Ivan Kokshaysky   alpha: fix vmallo...
1150
1151
  		va->va_start = (unsigned long)tmp->addr;
  		va->va_end = va->va_start + tmp->size;
dbda591d9   KyongHo   mm: fix faulty in...
1152
  		va->vm = tmp;
822c18f2e   Ivan Kokshaysky   alpha: fix vmallo...
1153
1154
  		__insert_vmap_area(va);
  	}
ca23e405e   Tejun Heo   vmalloc: implemen...
1155
1156
  
  	vmap_area_pcpu_hole = VMALLOC_END;
9b4633340   Jeremy Fitzhardinge   vmap: cope with v...
1157
  	vmap_initialized = true;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1158
  }
8fc489850   Tejun Heo   vmalloc: add un/m...
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
  /**
   * map_kernel_range_noflush - map kernel VM area with the specified pages
   * @addr: start of the VM area to map
   * @size: size of the VM area to map
   * @prot: page protection flags to use
   * @pages: pages to map
   *
   * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
   * specify should have been allocated using get_vm_area() and its
   * friends.
   *
   * NOTE:
   * This function does NOT do any cache flushing.  The caller is
   * responsible for calling flush_cache_vmap() on to-be-mapped areas
   * before calling this function.
   *
   * RETURNS:
   * The number of pages mapped on success, -errno on failure.
   */
  int map_kernel_range_noflush(unsigned long addr, unsigned long size,
  			     pgprot_t prot, struct page **pages)
  {
  	return vmap_page_range_noflush(addr, addr + size, prot, pages);
  }
  
  /**
   * unmap_kernel_range_noflush - unmap kernel VM area
   * @addr: start of the VM area to unmap
   * @size: size of the VM area to unmap
   *
   * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
   * specify should have been allocated using get_vm_area() and its
   * friends.
   *
   * NOTE:
   * This function does NOT do any cache flushing.  The caller is
   * responsible for calling flush_cache_vunmap() on to-be-mapped areas
   * before calling this function and flush_tlb_kernel_range() after.
   */
  void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
  {
  	vunmap_page_range(addr, addr + size);
  }
81e88fdc4   Huang Ying   ACPI, APEI, Gener...
1202
  EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
8fc489850   Tejun Heo   vmalloc: add un/m...
1203
1204
1205
1206
1207
1208
1209
1210
1211
  
  /**
   * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
   * @addr: start of the VM area to unmap
   * @size: size of the VM area to unmap
   *
   * Similar to unmap_kernel_range_noflush() but flushes vcache before
   * the unmapping and tlb after.
   */
db64fe022   Nick Piggin   mm: rewrite vmap ...
1212
1213
1214
  void unmap_kernel_range(unsigned long addr, unsigned long size)
  {
  	unsigned long end = addr + size;
f6fcba701   Tejun Heo   vmalloc: call flu...
1215
1216
  
  	flush_cache_vunmap(addr, end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
  	vunmap_page_range(addr, end);
  	flush_tlb_kernel_range(addr, end);
  }
  
  int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
  {
  	unsigned long addr = (unsigned long)area->addr;
  	unsigned long end = addr + area->size - PAGE_SIZE;
  	int err;
  
  	err = vmap_page_range(addr, end, prot, *pages);
  	if (err > 0) {
  		*pages += err;
  		err = 0;
  	}
  
  	return err;
  }
  EXPORT_SYMBOL_GPL(map_vm_area);
  
  /*** Old vmalloc interfaces ***/
  DEFINE_RWLOCK(vmlist_lock);
  struct vm_struct *vmlist;
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1240
  static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1241
  			      unsigned long flags, const void *caller)
cf88c7900   Tejun Heo   vmalloc: separate...
1242
  {
cf88c7900   Tejun Heo   vmalloc: separate...
1243
1244
1245
1246
  	vm->flags = flags;
  	vm->addr = (void *)va->va_start;
  	vm->size = va->va_end - va->va_start;
  	vm->caller = caller;
db1aecafe   Minchan Kim   mm/vmalloc.c: cha...
1247
  	va->vm = vm;
cf88c7900   Tejun Heo   vmalloc: separate...
1248
  	va->flags |= VM_VM_AREA;
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1249
  }
cf88c7900   Tejun Heo   vmalloc: separate...
1250

f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1251
1252
1253
1254
1255
  static void insert_vmalloc_vmlist(struct vm_struct *vm)
  {
  	struct vm_struct *tmp, **p;
  
  	vm->flags &= ~VM_UNLIST;
cf88c7900   Tejun Heo   vmalloc: separate...
1256
1257
1258
1259
1260
1261
1262
1263
1264
  	write_lock(&vmlist_lock);
  	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
  		if (tmp->addr >= vm->addr)
  			break;
  	}
  	vm->next = *p;
  	*p = vm;
  	write_unlock(&vmlist_lock);
  }
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1265
  static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1266
  			      unsigned long flags, const void *caller)
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1267
1268
1269
1270
  {
  	setup_vmalloc_vm(vm, va, flags, caller);
  	insert_vmalloc_vmlist(vm);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
1271
  static struct vm_struct *__get_vm_area_node(unsigned long size,
2dca6999e   David Miller   mm, perf_event: M...
1272
  		unsigned long align, unsigned long flags, unsigned long start,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1273
  		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
db64fe022   Nick Piggin   mm: rewrite vmap ...
1274
  {
0006526d7   Kautuk Consul   mm/vmalloc.c: rem...
1275
  	struct vmap_area *va;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1276
  	struct vm_struct *area;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1277

52fd24ca1   Giridhar Pemmasani   [PATCH] __vmalloc...
1278
  	BUG_ON(in_interrupt());
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
  	if (flags & VM_IOREMAP) {
  		int bit = fls(size);
  
  		if (bit > IOREMAP_MAX_ORDER)
  			bit = IOREMAP_MAX_ORDER;
  		else if (bit < PAGE_SHIFT)
  			bit = PAGE_SHIFT;
  
  		align = 1ul << bit;
  	}
db64fe022   Nick Piggin   mm: rewrite vmap ...
1289

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1290
  	size = PAGE_ALIGN(size);
31be83095   OGAWA Hirofumi   [PATCH] Fix stran...
1291
1292
  	if (unlikely(!size))
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1293

cf88c7900   Tejun Heo   vmalloc: separate...
1294
  	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1295
1296
  	if (unlikely(!area))
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1297
1298
1299
1300
  	/*
  	 * We always allocate a guard page.
  	 */
  	size += PAGE_SIZE;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1301
1302
1303
1304
  	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
  	if (IS_ERR(va)) {
  		kfree(area);
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1305
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1306

f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
  	/*
  	 * When this function is called from __vmalloc_node_range,
  	 * we do not add vm_struct to vmlist here to avoid
  	 * accessing uninitialized members of vm_struct such as
  	 * pages and nr_pages fields. They will be set later.
  	 * To distinguish it from others, we use a VM_UNLIST flag.
  	 */
  	if (flags & VM_UNLIST)
  		setup_vmalloc_vm(area, va, flags, caller);
  	else
  		insert_vmalloc_vm(area, va, flags, caller);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1318
  	return area;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1319
  }
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1320
1321
1322
  struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
  				unsigned long start, unsigned long end)
  {
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1323
1324
  	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
  				  GFP_KERNEL, __builtin_return_address(0));
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1325
  }
5992b6dac   Rusty Russell   lguest: export sy...
1326
  EXPORT_SYMBOL_GPL(__get_vm_area);
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1327

c29686129   Benjamin Herrenschmidt   vmalloc: add __ge...
1328
1329
  struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
  				       unsigned long start, unsigned long end,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1330
  				       const void *caller)
c29686129   Benjamin Herrenschmidt   vmalloc: add __ge...
1331
  {
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1332
1333
  	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
  				  GFP_KERNEL, caller);
c29686129   Benjamin Herrenschmidt   vmalloc: add __ge...
1334
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1335
  /**
183ff22bb   Simon Arlott   spelling fixes: mm/
1336
   *	get_vm_area  -  reserve a contiguous kernel virtual area
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1337
1338
1339
1340
1341
1342
1343
1344
1345
   *	@size:		size of the area
   *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
   *
   *	Search an area of @size in the kernel virtual mapping area,
   *	and reserved it for out purposes.  Returns the area descriptor
   *	on success or %NULL on failure.
   */
  struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
  {
2dca6999e   David Miller   mm, perf_event: M...
1346
  	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1347
1348
  				  NUMA_NO_NODE, GFP_KERNEL,
  				  __builtin_return_address(0));
230169693   Christoph Lameter   vmallocinfo: add ...
1349
1350
1351
  }
  
  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1352
  				const void *caller)
230169693   Christoph Lameter   vmallocinfo: add ...
1353
  {
2dca6999e   David Miller   mm, perf_event: M...
1354
  	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1355
  				  NUMA_NO_NODE, GFP_KERNEL, caller);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1356
  }
e9da6e990   Marek Szyprowski   ARM: dma-mapping:...
1357
1358
1359
1360
1361
1362
1363
1364
1365
  /**
   *	find_vm_area  -  find a continuous kernel virtual area
   *	@addr:		base address
   *
   *	Search for the kernel VM area starting at @addr, and return it.
   *	It is up to the caller to do all required locking to keep the returned
   *	pointer valid.
   */
  struct vm_struct *find_vm_area(const void *addr)
833423143   Nick Piggin   [PATCH] mm: intro...
1366
  {
db64fe022   Nick Piggin   mm: rewrite vmap ...
1367
  	struct vmap_area *va;
833423143   Nick Piggin   [PATCH] mm: intro...
1368

db64fe022   Nick Piggin   mm: rewrite vmap ...
1369
1370
  	va = find_vmap_area((unsigned long)addr);
  	if (va && va->flags & VM_VM_AREA)
db1aecafe   Minchan Kim   mm/vmalloc.c: cha...
1371
  		return va->vm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1372

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1373
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1374
  }
7856dfeb2   Andi Kleen   [PATCH] x86_64: F...
1375
  /**
183ff22bb   Simon Arlott   spelling fixes: mm/
1376
   *	remove_vm_area  -  find and remove a continuous kernel virtual area
7856dfeb2   Andi Kleen   [PATCH] x86_64: F...
1377
1378
1379
1380
1381
1382
   *	@addr:		base address
   *
   *	Search for the kernel VM area starting at @addr, and remove it.
   *	This function returns the found VM area, but using it is NOT safe
   *	on SMP machines, except for its size or flags.
   */
b3bdda02a   Christoph Lameter   vmalloc: add cons...
1383
  struct vm_struct *remove_vm_area(const void *addr)
7856dfeb2   Andi Kleen   [PATCH] x86_64: F...
1384
  {
db64fe022   Nick Piggin   mm: rewrite vmap ...
1385
1386
1387
1388
  	struct vmap_area *va;
  
  	va = find_vmap_area((unsigned long)addr);
  	if (va && va->flags & VM_VM_AREA) {
db1aecafe   Minchan Kim   mm/vmalloc.c: cha...
1389
  		struct vm_struct *vm = va->vm;
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
  
  		if (!(vm->flags & VM_UNLIST)) {
  			struct vm_struct *tmp, **p;
  			/*
  			 * remove from list and disallow access to
  			 * this vm_struct before unmap. (address range
  			 * confliction is maintained by vmap.)
  			 */
  			write_lock(&vmlist_lock);
  			for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
  				;
  			*p = tmp->next;
  			write_unlock(&vmlist_lock);
  		}
db64fe022   Nick Piggin   mm: rewrite vmap ...
1404

dd32c2799   KAMEZAWA Hiroyuki   vmalloc: unmap vm...
1405
1406
1407
  		vmap_debug_free_range(va->va_start, va->va_end);
  		free_unmap_vmap_area(va);
  		vm->size -= PAGE_SIZE;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1408
1409
1410
  		return vm;
  	}
  	return NULL;
7856dfeb2   Andi Kleen   [PATCH] x86_64: F...
1411
  }
b3bdda02a   Christoph Lameter   vmalloc: add cons...
1412
  static void __vunmap(const void *addr, int deallocate_pages)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1413
1414
1415
1416
1417
1418
1419
  {
  	struct vm_struct *area;
  
  	if (!addr)
  		return;
  
  	if ((PAGE_SIZE-1) & (unsigned long)addr) {
4c8573e25   Arjan van de Ven   Use WARN() in mm/...
1420
1421
  		WARN(1, KERN_ERR "Trying to vfree() bad address (%p)
  ", addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1422
1423
1424
1425
1426
  		return;
  	}
  
  	area = remove_vm_area(addr);
  	if (unlikely(!area)) {
4c8573e25   Arjan van de Ven   Use WARN() in mm/...
1427
1428
  		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)
  ",
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1429
  				addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1430
1431
  		return;
  	}
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
1432
  	debug_check_no_locks_freed(addr, area->size);
3ac7fe5a4   Thomas Gleixner   infrastructure to...
1433
  	debug_check_no_obj_freed(addr, area->size);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
1434

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1435
1436
1437
1438
  	if (deallocate_pages) {
  		int i;
  
  		for (i = 0; i < area->nr_pages; i++) {
bf53d6f8f   Christoph Lameter   vmalloc: clean up...
1439
1440
1441
1442
  			struct page *page = area->pages[i];
  
  			BUG_ON(!page);
  			__free_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1443
  		}
8757d5fa6   Jan Kiszka   [PATCH] mm: fix o...
1444
  		if (area->flags & VM_VPAGES)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
  			vfree(area->pages);
  		else
  			kfree(area->pages);
  	}
  
  	kfree(area);
  	return;
  }
  
  /**
   *	vfree  -  release memory allocated by vmalloc()
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1456
1457
   *	@addr:		memory base address
   *
183ff22bb   Simon Arlott   spelling fixes: mm/
1458
   *	Free the virtually continuous memory area starting at @addr, as
80e93effc   Pekka Enberg   [PATCH] update kf...
1459
1460
   *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
   *	NULL, no operation is performed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1461
   *
80e93effc   Pekka Enberg   [PATCH] update kf...
1462
   *	Must not be called in interrupt context.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1463
   */
b3bdda02a   Christoph Lameter   vmalloc: add cons...
1464
  void vfree(const void *addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1465
1466
  {
  	BUG_ON(in_interrupt());
89219d37a   Catalin Marinas   kmemleak: Add the...
1467
1468
  
  	kmemleak_free(addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1469
1470
  	__vunmap(addr, 1);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1471
1472
1473
1474
  EXPORT_SYMBOL(vfree);
  
  /**
   *	vunmap  -  release virtual mapping obtained by vmap()
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1475
1476
1477
1478
1479
   *	@addr:		memory base address
   *
   *	Free the virtually contiguous memory area starting at @addr,
   *	which was created from the page array passed to vmap().
   *
80e93effc   Pekka Enberg   [PATCH] update kf...
1480
   *	Must not be called in interrupt context.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1481
   */
b3bdda02a   Christoph Lameter   vmalloc: add cons...
1482
  void vunmap(const void *addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1483
1484
  {
  	BUG_ON(in_interrupt());
34754b69a   Peter Zijlstra   x86: make vmap ye...
1485
  	might_sleep();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1486
1487
  	__vunmap(addr, 0);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1488
1489
1490
1491
  EXPORT_SYMBOL(vunmap);
  
  /**
   *	vmap  -  map an array of pages into virtually contiguous space
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
   *	@pages:		array of page pointers
   *	@count:		number of pages to map
   *	@flags:		vm_area->flags
   *	@prot:		page protection for the mapping
   *
   *	Maps @count pages from @pages into contiguous kernel virtual
   *	space.
   */
  void *vmap(struct page **pages, unsigned int count,
  		unsigned long flags, pgprot_t prot)
  {
  	struct vm_struct *area;
34754b69a   Peter Zijlstra   x86: make vmap ye...
1504
  	might_sleep();
4481374ce   Jan Beulich   mm: replace vario...
1505
  	if (count > totalram_pages)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1506
  		return NULL;
230169693   Christoph Lameter   vmallocinfo: add ...
1507
1508
  	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
  					__builtin_return_address(0));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1509
1510
  	if (!area)
  		return NULL;
230169693   Christoph Lameter   vmallocinfo: add ...
1511

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1512
1513
1514
1515
1516
1517
1518
  	if (map_vm_area(area, prot, &pages)) {
  		vunmap(area->addr);
  		return NULL;
  	}
  
  	return area->addr;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1519
  EXPORT_SYMBOL(vmap);
2dca6999e   David Miller   mm, perf_event: M...
1520
1521
  static void *__vmalloc_node(unsigned long size, unsigned long align,
  			    gfp_t gfp_mask, pgprot_t prot,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1522
  			    int node, const void *caller);
e31d9eb5c   Adrian Bunk   make __vmalloc_ar...
1523
  static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1524
  				 pgprot_t prot, int node, const void *caller)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1525
  {
22943ab11   Dave Hansen   mm: print vmalloc...
1526
  	const int order = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1527
1528
  	struct page **pages;
  	unsigned int nr_pages, array_size, i;
976d6dfbb   Jan Beulich   vmalloc(): adjust...
1529
  	gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1530
1531
1532
1533
1534
1535
  
  	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
  	array_size = (nr_pages * sizeof(struct page *));
  
  	area->nr_pages = nr_pages;
  	/* Please note that the recursion is strictly bounded. */
8757d5fa6   Jan Kiszka   [PATCH] mm: fix o...
1536
  	if (array_size > PAGE_SIZE) {
976d6dfbb   Jan Beulich   vmalloc(): adjust...
1537
  		pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
230169693   Christoph Lameter   vmallocinfo: add ...
1538
  				PAGE_KERNEL, node, caller);
8757d5fa6   Jan Kiszka   [PATCH] mm: fix o...
1539
  		area->flags |= VM_VPAGES;
286e1ea3a   Andrew Morton   [PATCH] vmalloc()...
1540
  	} else {
976d6dfbb   Jan Beulich   vmalloc(): adjust...
1541
  		pages = kmalloc_node(array_size, nested_gfp, node);
286e1ea3a   Andrew Morton   [PATCH] vmalloc()...
1542
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1543
  	area->pages = pages;
230169693   Christoph Lameter   vmallocinfo: add ...
1544
  	area->caller = caller;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1545
1546
1547
1548
1549
  	if (!area->pages) {
  		remove_vm_area(area->addr);
  		kfree(area);
  		return NULL;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1550
1551
  
  	for (i = 0; i < area->nr_pages; i++) {
bf53d6f8f   Christoph Lameter   vmalloc: clean up...
1552
  		struct page *page;
22943ab11   Dave Hansen   mm: print vmalloc...
1553
  		gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
bf53d6f8f   Christoph Lameter   vmalloc: clean up...
1554

930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1555
  		if (node < 0)
22943ab11   Dave Hansen   mm: print vmalloc...
1556
  			page = alloc_page(tmp_mask);
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1557
  		else
22943ab11   Dave Hansen   mm: print vmalloc...
1558
  			page = alloc_pages_node(node, tmp_mask, order);
bf53d6f8f   Christoph Lameter   vmalloc: clean up...
1559
1560
  
  		if (unlikely(!page)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1561
1562
1563
1564
  			/* Successfully allocated i pages, free them in __vunmap() */
  			area->nr_pages = i;
  			goto fail;
  		}
bf53d6f8f   Christoph Lameter   vmalloc: clean up...
1565
  		area->pages[i] = page;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1566
1567
1568
1569
1570
1571
1572
  	}
  
  	if (map_vm_area(area, prot, &pages))
  		goto fail;
  	return area->addr;
  
  fail:
3ee9a4f08   Joe Perches   mm: neaten warn_a...
1573
1574
1575
  	warn_alloc_failed(gfp_mask, order,
  			  "vmalloc: allocation failure, allocated %ld of %ld bytes
  ",
22943ab11   Dave Hansen   mm: print vmalloc...
1576
  			  (area->nr_pages*PAGE_SIZE), area->size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1577
1578
1579
1580
1581
  	vfree(area->addr);
  	return NULL;
  }
  
  /**
d0a21265d   David Rientjes   mm: unify module_...
1582
   *	__vmalloc_node_range  -  allocate virtually contiguous memory
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1583
   *	@size:		allocation size
2dca6999e   David Miller   mm, perf_event: M...
1584
   *	@align:		desired alignment
d0a21265d   David Rientjes   mm: unify module_...
1585
1586
   *	@start:		vm area range start
   *	@end:		vm area range end
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1587
1588
   *	@gfp_mask:	flags for the page level allocator
   *	@prot:		protection mask for the allocated pages
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1589
   *	@node:		node to use for allocation or NUMA_NO_NODE
c85d194bf   Randy Dunlap   docbook: fix vmal...
1590
   *	@caller:	caller's return address
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1591
1592
1593
1594
1595
   *
   *	Allocate enough pages to cover @size from the page level
   *	allocator with @gfp_mask flags.  Map them into contiguous
   *	kernel virtual space, using a pagetable protection of @prot.
   */
d0a21265d   David Rientjes   mm: unify module_...
1596
1597
  void *__vmalloc_node_range(unsigned long size, unsigned long align,
  			unsigned long start, unsigned long end, gfp_t gfp_mask,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1598
  			pgprot_t prot, int node, const void *caller)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1599
1600
  {
  	struct vm_struct *area;
89219d37a   Catalin Marinas   kmemleak: Add the...
1601
1602
  	void *addr;
  	unsigned long real_size = size;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1603
1604
  
  	size = PAGE_ALIGN(size);
4481374ce   Jan Beulich   mm: replace vario...
1605
  	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
de7d2b567   Joe Perches   mm/vmalloc.c: rep...
1606
  		goto fail;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1607

f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1608
1609
  	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
  				  start, end, node, gfp_mask, caller);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1610
  	if (!area)
de7d2b567   Joe Perches   mm/vmalloc.c: rep...
1611
  		goto fail;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1612

89219d37a   Catalin Marinas   kmemleak: Add the...
1613
  	addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1368edf06   Mel Gorman   mm: vmalloc: chec...
1614
1615
  	if (!addr)
  		return NULL;
89219d37a   Catalin Marinas   kmemleak: Add the...
1616
1617
  
  	/*
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1618
1619
1620
1621
1622
1623
  	 * In this function, newly allocated vm_struct is not added
  	 * to vmlist at __get_vm_area_node(). so, it is added here.
  	 */
  	insert_vmalloc_vmlist(area);
  
  	/*
89219d37a   Catalin Marinas   kmemleak: Add the...
1624
1625
1626
1627
1628
1629
1630
  	 * A ref_count = 3 is needed because the vm_struct and vmap_area
  	 * structures allocated in the __get_vm_area_node() function contain
  	 * references to the virtual address of the vmalloc'ed block.
  	 */
  	kmemleak_alloc(addr, real_size, 3, gfp_mask);
  
  	return addr;
de7d2b567   Joe Perches   mm/vmalloc.c: rep...
1631
1632
1633
1634
1635
1636
1637
  
  fail:
  	warn_alloc_failed(gfp_mask, 0,
  			  "vmalloc: allocation failure: %lu bytes
  ",
  			  real_size);
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1638
  }
d0a21265d   David Rientjes   mm: unify module_...
1639
1640
1641
1642
1643
1644
  /**
   *	__vmalloc_node  -  allocate virtually contiguous memory
   *	@size:		allocation size
   *	@align:		desired alignment
   *	@gfp_mask:	flags for the page level allocator
   *	@prot:		protection mask for the allocated pages
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1645
   *	@node:		node to use for allocation or NUMA_NO_NODE
d0a21265d   David Rientjes   mm: unify module_...
1646
1647
1648
1649
1650
1651
1652
1653
   *	@caller:	caller's return address
   *
   *	Allocate enough pages to cover @size from the page level
   *	allocator with @gfp_mask flags.  Map them into contiguous
   *	kernel virtual space, using a pagetable protection of @prot.
   */
  static void *__vmalloc_node(unsigned long size, unsigned long align,
  			    gfp_t gfp_mask, pgprot_t prot,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1654
  			    int node, const void *caller)
d0a21265d   David Rientjes   mm: unify module_...
1655
1656
1657
1658
  {
  	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
  				gfp_mask, prot, node, caller);
  }
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1659
1660
  void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
  {
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1661
  	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
230169693   Christoph Lameter   vmallocinfo: add ...
1662
  				__builtin_return_address(0));
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1663
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1664
  EXPORT_SYMBOL(__vmalloc);
e1ca7788d   Dave Young   mm: add vzalloc()...
1665
1666
1667
1668
1669
1670
  static inline void *__vmalloc_node_flags(unsigned long size,
  					int node, gfp_t flags)
  {
  	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
  					node, __builtin_return_address(0));
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1671
1672
  /**
   *	vmalloc  -  allocate virtually contiguous memory
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1673
   *	@size:		allocation size
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1674
1675
1676
   *	Allocate enough pages to cover @size from the page level
   *	allocator and map them into contiguous kernel virtual space.
   *
c1c8897f8   Michael Opdenacker   Spelling fix: "co...
1677
   *	For tight control over page level allocator and protection flags
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1678
1679
1680
1681
   *	use __vmalloc() instead.
   */
  void *vmalloc(unsigned long size)
  {
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1682
1683
  	return __vmalloc_node_flags(size, NUMA_NO_NODE,
  				    GFP_KERNEL | __GFP_HIGHMEM);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1684
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1685
  EXPORT_SYMBOL(vmalloc);
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1686
  /**
e1ca7788d   Dave Young   mm: add vzalloc()...
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
   *	vzalloc - allocate virtually contiguous memory with zero fill
   *	@size:	allocation size
   *	Allocate enough pages to cover @size from the page level
   *	allocator and map them into contiguous kernel virtual space.
   *	The memory allocated is set to zero.
   *
   *	For tight control over page level allocator and protection flags
   *	use __vmalloc() instead.
   */
  void *vzalloc(unsigned long size)
  {
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1698
  	return __vmalloc_node_flags(size, NUMA_NO_NODE,
e1ca7788d   Dave Young   mm: add vzalloc()...
1699
1700
1701
1702
1703
  				GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
  }
  EXPORT_SYMBOL(vzalloc);
  
  /**
ead04089b   Rolf Eike Beer   [PATCH] Fix kerne...
1704
1705
   * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
   * @size: allocation size
833423143   Nick Piggin   [PATCH] mm: intro...
1706
   *
ead04089b   Rolf Eike Beer   [PATCH] Fix kerne...
1707
1708
   * The resulting memory area is zeroed so it can be mapped to userspace
   * without leaking data.
833423143   Nick Piggin   [PATCH] mm: intro...
1709
1710
1711
1712
1713
   */
  void *vmalloc_user(unsigned long size)
  {
  	struct vm_struct *area;
  	void *ret;
2dca6999e   David Miller   mm, perf_event: M...
1714
1715
  	ret = __vmalloc_node(size, SHMLBA,
  			     GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1716
1717
  			     PAGE_KERNEL, NUMA_NO_NODE,
  			     __builtin_return_address(0));
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1718
  	if (ret) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
1719
  		area = find_vm_area(ret);
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1720
  		area->flags |= VM_USERMAP;
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1721
  	}
833423143   Nick Piggin   [PATCH] mm: intro...
1722
1723
1724
1725
1726
  	return ret;
  }
  EXPORT_SYMBOL(vmalloc_user);
  
  /**
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1727
   *	vmalloc_node  -  allocate memory on a specific node
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1728
   *	@size:		allocation size
d44e0780b   Randy Dunlap   [PATCH] kernel-do...
1729
   *	@node:		numa node
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1730
1731
1732
1733
   *
   *	Allocate enough pages to cover @size from the page level
   *	allocator and map them into contiguous kernel virtual space.
   *
c1c8897f8   Michael Opdenacker   Spelling fix: "co...
1734
   *	For tight control over page level allocator and protection flags
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1735
1736
1737
1738
   *	use __vmalloc() instead.
   */
  void *vmalloc_node(unsigned long size, int node)
  {
2dca6999e   David Miller   mm, perf_event: M...
1739
  	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
230169693   Christoph Lameter   vmallocinfo: add ...
1740
  					node, __builtin_return_address(0));
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1741
1742
  }
  EXPORT_SYMBOL(vmalloc_node);
e1ca7788d   Dave Young   mm: add vzalloc()...
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
  /**
   * vzalloc_node - allocate memory on a specific node with zero fill
   * @size:	allocation size
   * @node:	numa node
   *
   * Allocate enough pages to cover @size from the page level
   * allocator and map them into contiguous kernel virtual space.
   * The memory allocated is set to zero.
   *
   * For tight control over page level allocator and protection flags
   * use __vmalloc_node() instead.
   */
  void *vzalloc_node(unsigned long size, int node)
  {
  	return __vmalloc_node_flags(size, node,
  			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
  }
  EXPORT_SYMBOL(vzalloc_node);
4dc3b16ba   Pavel Pisa   [PATCH] DocBook: ...
1761
1762
1763
  #ifndef PAGE_KERNEL_EXEC
  # define PAGE_KERNEL_EXEC PAGE_KERNEL
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1764
1765
  /**
   *	vmalloc_exec  -  allocate virtually contiguous, executable memory
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1766
1767
1768
1769
1770
1771
   *	@size:		allocation size
   *
   *	Kernel-internal function to allocate enough pages to cover @size
   *	the page level allocator and map them into contiguous and
   *	executable kernel virtual space.
   *
c1c8897f8   Michael Opdenacker   Spelling fix: "co...
1772
   *	For tight control over page level allocator and protection flags
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1773
1774
   *	use __vmalloc() instead.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1775
1776
  void *vmalloc_exec(unsigned long size)
  {
2dca6999e   David Miller   mm, perf_event: M...
1777
  	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1778
  			      NUMA_NO_NODE, __builtin_return_address(0));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1779
  }
0d08e0d3a   Andi Kleen   [PATCH] x86-64: F...
1780
  #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
7ac674f52   Benjamin Herrenschmidt   vmalloc_32 should...
1781
  #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
0d08e0d3a   Andi Kleen   [PATCH] x86-64: F...
1782
  #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
7ac674f52   Benjamin Herrenschmidt   vmalloc_32 should...
1783
  #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
0d08e0d3a   Andi Kleen   [PATCH] x86-64: F...
1784
1785
1786
  #else
  #define GFP_VMALLOC32 GFP_KERNEL
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1787
1788
  /**
   *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1789
1790
1791
1792
1793
1794
1795
   *	@size:		allocation size
   *
   *	Allocate enough 32bit PA addressable pages to cover @size from the
   *	page level allocator and map them into contiguous kernel virtual space.
   */
  void *vmalloc_32(unsigned long size)
  {
2dca6999e   David Miller   mm, perf_event: M...
1796
  	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1797
  			      NUMA_NO_NODE, __builtin_return_address(0));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1798
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1799
  EXPORT_SYMBOL(vmalloc_32);
833423143   Nick Piggin   [PATCH] mm: intro...
1800
  /**
ead04089b   Rolf Eike Beer   [PATCH] Fix kerne...
1801
   * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
833423143   Nick Piggin   [PATCH] mm: intro...
1802
   *	@size:		allocation size
ead04089b   Rolf Eike Beer   [PATCH] Fix kerne...
1803
1804
1805
   *
   * The resulting memory area is 32bit addressable and zeroed so it can be
   * mapped to userspace without leaking data.
833423143   Nick Piggin   [PATCH] mm: intro...
1806
1807
1808
1809
1810
   */
  void *vmalloc_32_user(unsigned long size)
  {
  	struct vm_struct *area;
  	void *ret;
2dca6999e   David Miller   mm, perf_event: M...
1811
  	ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1812
  			     NUMA_NO_NODE, __builtin_return_address(0));
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1813
  	if (ret) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
1814
  		area = find_vm_area(ret);
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1815
  		area->flags |= VM_USERMAP;
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1816
  	}
833423143   Nick Piggin   [PATCH] mm: intro...
1817
1818
1819
  	return ret;
  }
  EXPORT_SYMBOL(vmalloc_32_user);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
  /*
   * small helper routine , copy contents to buf from addr.
   * If the page is not present, fill zero.
   */
  
  static int aligned_vread(char *buf, char *addr, unsigned long count)
  {
  	struct page *p;
  	int copied = 0;
  
  	while (count) {
  		unsigned long offset, length;
  
  		offset = (unsigned long)addr & ~PAGE_MASK;
  		length = PAGE_SIZE - offset;
  		if (length > count)
  			length = count;
  		p = vmalloc_to_page(addr);
  		/*
  		 * To do safe access to this _mapped_ area, we need
  		 * lock. But adding lock here means that we need to add
  		 * overhead of vmalloc()/vfree() calles for this _debug_
  		 * interface, rarely used. Instead of that, we'll use
  		 * kmap() and get small overhead in this access function.
  		 */
  		if (p) {
  			/*
  			 * we can expect USER0 is not used (see vread/vwrite's
  			 * function description)
  			 */
9b04c5fec   Cong Wang   mm: remove the se...
1850
  			void *map = kmap_atomic(p);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1851
  			memcpy(buf, map + offset, length);
9b04c5fec   Cong Wang   mm: remove the se...
1852
  			kunmap_atomic(map);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
  		} else
  			memset(buf, 0, length);
  
  		addr += length;
  		buf += length;
  		copied += length;
  		count -= length;
  	}
  	return copied;
  }
  
  static int aligned_vwrite(char *buf, char *addr, unsigned long count)
  {
  	struct page *p;
  	int copied = 0;
  
  	while (count) {
  		unsigned long offset, length;
  
  		offset = (unsigned long)addr & ~PAGE_MASK;
  		length = PAGE_SIZE - offset;
  		if (length > count)
  			length = count;
  		p = vmalloc_to_page(addr);
  		/*
  		 * To do safe access to this _mapped_ area, we need
  		 * lock. But adding lock here means that we need to add
  		 * overhead of vmalloc()/vfree() calles for this _debug_
  		 * interface, rarely used. Instead of that, we'll use
  		 * kmap() and get small overhead in this access function.
  		 */
  		if (p) {
  			/*
  			 * we can expect USER0 is not used (see vread/vwrite's
  			 * function description)
  			 */
9b04c5fec   Cong Wang   mm: remove the se...
1889
  			void *map = kmap_atomic(p);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1890
  			memcpy(map + offset, buf, length);
9b04c5fec   Cong Wang   mm: remove the se...
1891
  			kunmap_atomic(map);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
  		}
  		addr += length;
  		buf += length;
  		copied += length;
  		count -= length;
  	}
  	return copied;
  }
  
  /**
   *	vread() -  read vmalloc area in a safe way.
   *	@buf:		buffer for reading data
   *	@addr:		vm address.
   *	@count:		number of bytes to be read.
   *
   *	Returns # of bytes which addr and buf should be increased.
   *	(same number to @count). Returns 0 if [addr...addr+count) doesn't
   *	includes any intersect with alive vmalloc area.
   *
   *	This function checks that addr is a valid vmalloc'ed area, and
   *	copy data from that area to a given buffer. If the given memory range
   *	of [addr...addr+count) includes some valid address, data is copied to
   *	proper area of @buf. If there are memory holes, they'll be zero-filled.
   *	IOREMAP area is treated as memory hole and no copy is done.
   *
   *	If [addr...addr+count) doesn't includes any intersects with alive
a8e5202d0   Cong Wang   vmalloc: remove K...
1918
   *	vm_struct area, returns 0. @buf should be kernel's buffer.
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1919
1920
1921
1922
1923
1924
1925
   *
   *	Note: In usual ops, vread() is never necessary because the caller
   *	should know vmalloc() area is valid and can use memcpy().
   *	This is for routines which have to access vmalloc area without
   *	any informaion, as /dev/kmem.
   *
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1926
1927
1928
1929
  long vread(char *buf, char *addr, unsigned long count)
  {
  	struct vm_struct *tmp;
  	char *vaddr, *buf_start = buf;
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1930
  	unsigned long buflen = count;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1931
1932
1933
1934
1935
1936
1937
  	unsigned long n;
  
  	/* Don't allow overflow */
  	if ((unsigned long) addr + count < count)
  		count = -(unsigned long) addr;
  
  	read_lock(&vmlist_lock);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1938
  	for (tmp = vmlist; count && tmp; tmp = tmp->next) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
  		vaddr = (char *) tmp->addr;
  		if (addr >= vaddr + tmp->size - PAGE_SIZE)
  			continue;
  		while (addr < vaddr) {
  			if (count == 0)
  				goto finished;
  			*buf = '\0';
  			buf++;
  			addr++;
  			count--;
  		}
  		n = vaddr + tmp->size - PAGE_SIZE - addr;
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1951
1952
1953
1954
1955
1956
1957
1958
1959
  		if (n > count)
  			n = count;
  		if (!(tmp->flags & VM_IOREMAP))
  			aligned_vread(buf, addr, n);
  		else /* IOREMAP area is treated as memory hole */
  			memset(buf, 0, n);
  		buf += n;
  		addr += n;
  		count -= n;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1960
1961
1962
  	}
  finished:
  	read_unlock(&vmlist_lock);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1963
1964
1965
1966
1967
1968
1969
1970
  
  	if (buf == buf_start)
  		return 0;
  	/* zero-fill memory holes */
  	if (buf != buf_start + buflen)
  		memset(buf, 0, buflen - (buf - buf_start));
  
  	return buflen;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1971
  }
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
  /**
   *	vwrite() -  write vmalloc area in a safe way.
   *	@buf:		buffer for source data
   *	@addr:		vm address.
   *	@count:		number of bytes to be read.
   *
   *	Returns # of bytes which addr and buf should be incresed.
   *	(same number to @count).
   *	If [addr...addr+count) doesn't includes any intersect with valid
   *	vmalloc area, returns 0.
   *
   *	This function checks that addr is a valid vmalloc'ed area, and
   *	copy data from a buffer to the given addr. If specified range of
   *	[addr...addr+count) includes some valid address, data is copied from
   *	proper area of @buf. If there are memory holes, no copy to hole.
   *	IOREMAP area is treated as memory hole and no copy is done.
   *
   *	If [addr...addr+count) doesn't includes any intersects with alive
a8e5202d0   Cong Wang   vmalloc: remove K...
1990
   *	vm_struct area, returns 0. @buf should be kernel's buffer.
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1991
1992
1993
1994
1995
   *
   *	Note: In usual ops, vwrite() is never necessary because the caller
   *	should know vmalloc() area is valid and can use memcpy().
   *	This is for routines which have to access vmalloc area without
   *	any informaion, as /dev/kmem.
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1996
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1997
1998
1999
  long vwrite(char *buf, char *addr, unsigned long count)
  {
  	struct vm_struct *tmp;
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
2000
2001
2002
  	char *vaddr;
  	unsigned long n, buflen;
  	int copied = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2003
2004
2005
2006
  
  	/* Don't allow overflow */
  	if ((unsigned long) addr + count < count)
  		count = -(unsigned long) addr;
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
2007
  	buflen = count;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2008
2009
  
  	read_lock(&vmlist_lock);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
2010
  	for (tmp = vmlist; count && tmp; tmp = tmp->next) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
  		vaddr = (char *) tmp->addr;
  		if (addr >= vaddr + tmp->size - PAGE_SIZE)
  			continue;
  		while (addr < vaddr) {
  			if (count == 0)
  				goto finished;
  			buf++;
  			addr++;
  			count--;
  		}
  		n = vaddr + tmp->size - PAGE_SIZE - addr;
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
2022
2023
2024
2025
2026
2027
2028
2029
2030
  		if (n > count)
  			n = count;
  		if (!(tmp->flags & VM_IOREMAP)) {
  			aligned_vwrite(buf, addr, n);
  			copied++;
  		}
  		buf += n;
  		addr += n;
  		count -= n;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2031
2032
2033
  	}
  finished:
  	read_unlock(&vmlist_lock);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
2034
2035
2036
  	if (!copied)
  		return 0;
  	return buflen;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2037
  }
833423143   Nick Piggin   [PATCH] mm: intro...
2038
2039
2040
  
  /**
   *	remap_vmalloc_range  -  map vmalloc pages to userspace
833423143   Nick Piggin   [PATCH] mm: intro...
2041
2042
2043
   *	@vma:		vma to cover (map full range of vma)
   *	@addr:		vmalloc memory
   *	@pgoff:		number of pages into addr before first page to map
7682486b3   Randy Dunlap   mm: fix various k...
2044
2045
   *
   *	Returns:	0 for success, -Exxx on failure
833423143   Nick Piggin   [PATCH] mm: intro...
2046
2047
2048
2049
2050
   *
   *	This function checks that addr is a valid vmalloc'ed area, and
   *	that it is big enough to cover the vma. Will return failure if
   *	that criteria isn't met.
   *
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
2051
   *	Similar to remap_pfn_range() (see mm/memory.c)
833423143   Nick Piggin   [PATCH] mm: intro...
2052
2053
2054
2055
2056
2057
2058
   */
  int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
  						unsigned long pgoff)
  {
  	struct vm_struct *area;
  	unsigned long uaddr = vma->vm_start;
  	unsigned long usize = vma->vm_end - vma->vm_start;
833423143   Nick Piggin   [PATCH] mm: intro...
2059
2060
2061
  
  	if ((PAGE_SIZE-1) & (unsigned long)addr)
  		return -EINVAL;
db64fe022   Nick Piggin   mm: rewrite vmap ...
2062
  	area = find_vm_area(addr);
833423143   Nick Piggin   [PATCH] mm: intro...
2063
  	if (!area)
db64fe022   Nick Piggin   mm: rewrite vmap ...
2064
  		return -EINVAL;
833423143   Nick Piggin   [PATCH] mm: intro...
2065
2066
  
  	if (!(area->flags & VM_USERMAP))
db64fe022   Nick Piggin   mm: rewrite vmap ...
2067
  		return -EINVAL;
833423143   Nick Piggin   [PATCH] mm: intro...
2068
2069
  
  	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
db64fe022   Nick Piggin   mm: rewrite vmap ...
2070
  		return -EINVAL;
833423143   Nick Piggin   [PATCH] mm: intro...
2071
2072
2073
2074
  
  	addr += pgoff << PAGE_SHIFT;
  	do {
  		struct page *page = vmalloc_to_page(addr);
db64fe022   Nick Piggin   mm: rewrite vmap ...
2075
  		int ret;
833423143   Nick Piggin   [PATCH] mm: intro...
2076
2077
2078
2079
2080
2081
2082
2083
  		ret = vm_insert_page(vma, uaddr, page);
  		if (ret)
  			return ret;
  
  		uaddr += PAGE_SIZE;
  		addr += PAGE_SIZE;
  		usize -= PAGE_SIZE;
  	} while (usize > 0);
314e51b98   Konstantin Khlebnikov   mm: kill vma flag...
2084
  	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
833423143   Nick Piggin   [PATCH] mm: intro...
2085

db64fe022   Nick Piggin   mm: rewrite vmap ...
2086
  	return 0;
833423143   Nick Piggin   [PATCH] mm: intro...
2087
2088
  }
  EXPORT_SYMBOL(remap_vmalloc_range);
1eeb66a1b   Christoph Hellwig   move die notifier...
2089
2090
2091
2092
2093
2094
2095
  /*
   * Implement a stub for vmalloc_sync_all() if the architecture chose not to
   * have one.
   */
  void  __attribute__((weak)) vmalloc_sync_all(void)
  {
  }
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2096

2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
2097
  static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2098
  {
cd12909cb   David Vrabel   xen: map foreign ...
2099
2100
2101
2102
2103
2104
  	pte_t ***p = data;
  
  	if (p) {
  		*(*p) = pte;
  		(*p)++;
  	}
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2105
2106
2107
2108
2109
2110
  	return 0;
  }
  
  /**
   *	alloc_vm_area - allocate a range of kernel address space
   *	@size:		size of the area
cd12909cb   David Vrabel   xen: map foreign ...
2111
   *	@ptes:		returns the PTEs for the address space
7682486b3   Randy Dunlap   mm: fix various k...
2112
2113
   *
   *	Returns:	NULL on failure, vm_struct on success
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2114
2115
2116
   *
   *	This function reserves a range of kernel address space, and
   *	allocates pagetables to map that range.  No actual mappings
cd12909cb   David Vrabel   xen: map foreign ...
2117
2118
2119
2120
   *	are created.
   *
   *	If @ptes is non-NULL, pointers to the PTEs (in init_mm)
   *	allocated for the VM area are returned.
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2121
   */
cd12909cb   David Vrabel   xen: map foreign ...
2122
  struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2123
2124
  {
  	struct vm_struct *area;
230169693   Christoph Lameter   vmallocinfo: add ...
2125
2126
  	area = get_vm_area_caller(size, VM_IOREMAP,
  				__builtin_return_address(0));
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2127
2128
2129
2130
2131
2132
2133
2134
  	if (area == NULL)
  		return NULL;
  
  	/*
  	 * This ensures that page tables are constructed for this region
  	 * of kernel virtual address space and mapped into init_mm.
  	 */
  	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
cd12909cb   David Vrabel   xen: map foreign ...
2135
  				size, f, ptes ? &ptes : NULL)) {
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2136
2137
2138
  		free_vm_area(area);
  		return NULL;
  	}
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
  	return area;
  }
  EXPORT_SYMBOL_GPL(alloc_vm_area);
  
  void free_vm_area(struct vm_struct *area)
  {
  	struct vm_struct *ret;
  	ret = remove_vm_area(area->addr);
  	BUG_ON(ret != area);
  	kfree(area);
  }
  EXPORT_SYMBOL_GPL(free_vm_area);
a10aa5798   Christoph Lameter   vmalloc: show vma...
2151

4f8b02b4e   Tejun Heo   vmalloc: pcpu_get...
2152
  #ifdef CONFIG_SMP
ca23e405e   Tejun Heo   vmalloc: implemen...
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
  static struct vmap_area *node_to_va(struct rb_node *n)
  {
  	return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
  }
  
  /**
   * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
   * @end: target address
   * @pnext: out arg for the next vmap_area
   * @pprev: out arg for the previous vmap_area
   *
   * Returns: %true if either or both of next and prev are found,
   *	    %false if no vmap_area exists
   *
   * Find vmap_areas end addresses of which enclose @end.  ie. if not
   * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
   */
  static bool pvm_find_next_prev(unsigned long end,
  			       struct vmap_area **pnext,
  			       struct vmap_area **pprev)
  {
  	struct rb_node *n = vmap_area_root.rb_node;
  	struct vmap_area *va = NULL;
  
  	while (n) {
  		va = rb_entry(n, struct vmap_area, rb_node);
  		if (end < va->va_end)
  			n = n->rb_left;
  		else if (end > va->va_end)
  			n = n->rb_right;
  		else
  			break;
  	}
  
  	if (!va)
  		return false;
  
  	if (va->va_end > end) {
  		*pnext = va;
  		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
  	} else {
  		*pprev = va;
  		*pnext = node_to_va(rb_next(&(*pprev)->rb_node));
  	}
  	return true;
  }
  
  /**
   * pvm_determine_end - find the highest aligned address between two vmap_areas
   * @pnext: in/out arg for the next vmap_area
   * @pprev: in/out arg for the previous vmap_area
   * @align: alignment
   *
   * Returns: determined end address
   *
   * Find the highest aligned address between *@pnext and *@pprev below
   * VMALLOC_END.  *@pnext and *@pprev are adjusted so that the aligned
   * down address is between the end addresses of the two vmap_areas.
   *
   * Please note that the address returned by this function may fall
   * inside *@pnext vmap_area.  The caller is responsible for checking
   * that.
   */
  static unsigned long pvm_determine_end(struct vmap_area **pnext,
  				       struct vmap_area **pprev,
  				       unsigned long align)
  {
  	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
  	unsigned long addr;
  
  	if (*pnext)
  		addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
  	else
  		addr = vmalloc_end;
  
  	while (*pprev && (*pprev)->va_end > addr) {
  		*pnext = *pprev;
  		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
  	}
  
  	return addr;
  }
  
  /**
   * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
   * @offsets: array containing offset of each area
   * @sizes: array containing size of each area
   * @nr_vms: the number of areas to allocate
   * @align: alignment, all entries in @offsets and @sizes must be aligned to this
ca23e405e   Tejun Heo   vmalloc: implemen...
2242
2243
2244
2245
2246
2247
   *
   * Returns: kmalloc'd vm_struct pointer array pointing to allocated
   *	    vm_structs on success, %NULL on failure
   *
   * Percpu allocator wants to use congruent vm areas so that it can
   * maintain the offsets among percpu areas.  This function allocates
ec3f64fc9   David Rientjes   mm: remove gfp ma...
2248
2249
2250
2251
   * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
   * be scattered pretty far, distance between two areas easily going up
   * to gigabytes.  To avoid interacting with regular vmallocs, these
   * areas are allocated from top.
ca23e405e   Tejun Heo   vmalloc: implemen...
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
   *
   * Despite its complicated look, this allocator is rather simple.  It
   * does everything top-down and scans areas from the end looking for
   * matching slot.  While scanning, if any of the areas overlaps with
   * existing vmap_area, the base address is pulled down to fit the
   * area.  Scanning is repeated till all the areas fit and then all
   * necessary data structres are inserted and the result is returned.
   */
  struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
  				     const size_t *sizes, int nr_vms,
ec3f64fc9   David Rientjes   mm: remove gfp ma...
2262
  				     size_t align)
ca23e405e   Tejun Heo   vmalloc: implemen...
2263
2264
2265
2266
2267
2268
2269
2270
  {
  	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
  	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
  	struct vmap_area **vas, *prev, *next;
  	struct vm_struct **vms;
  	int area, area2, last_area, term_area;
  	unsigned long base, start, end, last_end;
  	bool purged = false;
ca23e405e   Tejun Heo   vmalloc: implemen...
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
  	/* verify parameters and allocate data structures */
  	BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
  	for (last_area = 0, area = 0; area < nr_vms; area++) {
  		start = offsets[area];
  		end = start + sizes[area];
  
  		/* is everything aligned properly? */
  		BUG_ON(!IS_ALIGNED(offsets[area], align));
  		BUG_ON(!IS_ALIGNED(sizes[area], align));
  
  		/* detect the area with the highest address */
  		if (start > offsets[last_area])
  			last_area = area;
  
  		for (area2 = 0; area2 < nr_vms; area2++) {
  			unsigned long start2 = offsets[area2];
  			unsigned long end2 = start2 + sizes[area2];
  
  			if (area2 == area)
  				continue;
  
  			BUG_ON(start2 >= start && start2 < end);
  			BUG_ON(end2 <= end && end2 > start);
  		}
  	}
  	last_end = offsets[last_area] + sizes[last_area];
  
  	if (vmalloc_end - vmalloc_start < last_end) {
  		WARN_ON(true);
  		return NULL;
  	}
4d67d8605   Thomas Meyer   mm: use kcalloc()...
2302
2303
  	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
  	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
ca23e405e   Tejun Heo   vmalloc: implemen...
2304
  	if (!vas || !vms)
f1db7afd9   Kautuk Consul   mm/vmalloc.c: eli...
2305
  		goto err_free2;
ca23e405e   Tejun Heo   vmalloc: implemen...
2306
2307
  
  	for (area = 0; area < nr_vms; area++) {
ec3f64fc9   David Rientjes   mm: remove gfp ma...
2308
2309
  		vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
  		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
ca23e405e   Tejun Heo   vmalloc: implemen...
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
  		if (!vas[area] || !vms[area])
  			goto err_free;
  	}
  retry:
  	spin_lock(&vmap_area_lock);
  
  	/* start scanning - we scan from the top, begin with the last area */
  	area = term_area = last_area;
  	start = offsets[area];
  	end = start + sizes[area];
  
  	if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
  		base = vmalloc_end - last_end;
  		goto found;
  	}
  	base = pvm_determine_end(&next, &prev, align) - end;
  
  	while (true) {
  		BUG_ON(next && next->va_end <= base + end);
  		BUG_ON(prev && prev->va_end > base + end);
  
  		/*
  		 * base might have underflowed, add last_end before
  		 * comparing.
  		 */
  		if (base + last_end < vmalloc_start + last_end) {
  			spin_unlock(&vmap_area_lock);
  			if (!purged) {
  				purge_vmap_area_lazy();
  				purged = true;
  				goto retry;
  			}
  			goto err_free;
  		}
  
  		/*
  		 * If next overlaps, move base downwards so that it's
  		 * right below next and then recheck.
  		 */
  		if (next && next->va_start < base + end) {
  			base = pvm_determine_end(&next, &prev, align) - end;
  			term_area = area;
  			continue;
  		}
  
  		/*
  		 * If prev overlaps, shift down next and prev and move
  		 * base so that it's right below new next and then
  		 * recheck.
  		 */
  		if (prev && prev->va_end > base + start)  {
  			next = prev;
  			prev = node_to_va(rb_prev(&next->rb_node));
  			base = pvm_determine_end(&next, &prev, align) - end;
  			term_area = area;
  			continue;
  		}
  
  		/*
  		 * This area fits, move on to the previous one.  If
  		 * the previous one is the terminal one, we're done.
  		 */
  		area = (area + nr_vms - 1) % nr_vms;
  		if (area == term_area)
  			break;
  		start = offsets[area];
  		end = start + sizes[area];
  		pvm_find_next_prev(base + end, &next, &prev);
  	}
  found:
  	/* we've found a fitting base, insert all va's */
  	for (area = 0; area < nr_vms; area++) {
  		struct vmap_area *va = vas[area];
  
  		va->va_start = base + offsets[area];
  		va->va_end = va->va_start + sizes[area];
  		__insert_vmap_area(va);
  	}
  
  	vmap_area_pcpu_hole = base + offsets[last_area];
  
  	spin_unlock(&vmap_area_lock);
  
  	/* insert all vm's */
  	for (area = 0; area < nr_vms; area++)
  		insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
  				  pcpu_get_vm_areas);
  
  	kfree(vas);
  	return vms;
  
  err_free:
  	for (area = 0; area < nr_vms; area++) {
f1db7afd9   Kautuk Consul   mm/vmalloc.c: eli...
2403
2404
  		kfree(vas[area]);
  		kfree(vms[area]);
ca23e405e   Tejun Heo   vmalloc: implemen...
2405
  	}
f1db7afd9   Kautuk Consul   mm/vmalloc.c: eli...
2406
  err_free2:
ca23e405e   Tejun Heo   vmalloc: implemen...
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
  	kfree(vas);
  	kfree(vms);
  	return NULL;
  }
  
  /**
   * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
   * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
   * @nr_vms: the number of allocated areas
   *
   * Free vm_structs and the array allocated by pcpu_get_vm_areas().
   */
  void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
  {
  	int i;
  
  	for (i = 0; i < nr_vms; i++)
  		free_vm_area(vms[i]);
  	kfree(vms);
  }
4f8b02b4e   Tejun Heo   vmalloc: pcpu_get...
2427
  #endif	/* CONFIG_SMP */
a10aa5798   Christoph Lameter   vmalloc: show vma...
2428
2429
2430
  
  #ifdef CONFIG_PROC_FS
  static void *s_start(struct seq_file *m, loff_t *pos)
e199b5d1f   Namhyung Kim   vmalloc: annotate...
2431
  	__acquires(&vmlist_lock)
a10aa5798   Christoph Lameter   vmalloc: show vma...
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
  {
  	loff_t n = *pos;
  	struct vm_struct *v;
  
  	read_lock(&vmlist_lock);
  	v = vmlist;
  	while (n > 0 && v) {
  		n--;
  		v = v->next;
  	}
  	if (!n)
  		return v;
  
  	return NULL;
  
  }
  
  static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  {
  	struct vm_struct *v = p;
  
  	++*pos;
  	return v->next;
  }
  
  static void s_stop(struct seq_file *m, void *p)
e199b5d1f   Namhyung Kim   vmalloc: annotate...
2458
  	__releases(&vmlist_lock)
a10aa5798   Christoph Lameter   vmalloc: show vma...
2459
2460
2461
  {
  	read_unlock(&vmlist_lock);
  }
a47a126ad   Eric Dumazet   vmallocinfo: add ...
2462
2463
  static void show_numa_info(struct seq_file *m, struct vm_struct *v)
  {
e5adfffc8   Kirill A. Shutemov   mm: use IS_ENABLE...
2464
  	if (IS_ENABLED(CONFIG_NUMA)) {
a47a126ad   Eric Dumazet   vmallocinfo: add ...
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
  		unsigned int nr, *counters = m->private;
  
  		if (!counters)
  			return;
  
  		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
  
  		for (nr = 0; nr < v->nr_pages; nr++)
  			counters[page_to_nid(v->pages[nr])]++;
  
  		for_each_node_state(nr, N_HIGH_MEMORY)
  			if (counters[nr])
  				seq_printf(m, " N%u=%u", nr, counters[nr]);
  	}
  }
a10aa5798   Christoph Lameter   vmalloc: show vma...
2480
2481
2482
  static int s_show(struct seq_file *m, void *p)
  {
  	struct vm_struct *v = p;
45ec16908   Kees Cook   mm: use %pK for /...
2483
  	seq_printf(m, "0x%pK-0x%pK %7ld",
a10aa5798   Christoph Lameter   vmalloc: show vma...
2484
  		v->addr, v->addr + v->size, v->size);
62c70bce8   Joe Perches   mm: convert sprin...
2485
2486
  	if (v->caller)
  		seq_printf(m, " %pS", v->caller);
230169693   Christoph Lameter   vmallocinfo: add ...
2487

a10aa5798   Christoph Lameter   vmalloc: show vma...
2488
2489
2490
2491
  	if (v->nr_pages)
  		seq_printf(m, " pages=%d", v->nr_pages);
  
  	if (v->phys_addr)
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
2492
  		seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
a10aa5798   Christoph Lameter   vmalloc: show vma...
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
  
  	if (v->flags & VM_IOREMAP)
  		seq_printf(m, " ioremap");
  
  	if (v->flags & VM_ALLOC)
  		seq_printf(m, " vmalloc");
  
  	if (v->flags & VM_MAP)
  		seq_printf(m, " vmap");
  
  	if (v->flags & VM_USERMAP)
  		seq_printf(m, " user");
  
  	if (v->flags & VM_VPAGES)
  		seq_printf(m, " vpages");
a47a126ad   Eric Dumazet   vmallocinfo: add ...
2508
  	show_numa_info(m, v);
a10aa5798   Christoph Lameter   vmalloc: show vma...
2509
2510
2511
2512
  	seq_putc(m, '
  ');
  	return 0;
  }
5f6a6a9c4   Alexey Dobriyan   proc: move /proc/...
2513
  static const struct seq_operations vmalloc_op = {
a10aa5798   Christoph Lameter   vmalloc: show vma...
2514
2515
2516
2517
2518
  	.start = s_start,
  	.next = s_next,
  	.stop = s_stop,
  	.show = s_show,
  };
5f6a6a9c4   Alexey Dobriyan   proc: move /proc/...
2519
2520
2521
2522
2523
  
  static int vmalloc_open(struct inode *inode, struct file *file)
  {
  	unsigned int *ptr = NULL;
  	int ret;
e5adfffc8   Kirill A. Shutemov   mm: use IS_ENABLE...
2524
  	if (IS_ENABLED(CONFIG_NUMA)) {
5f6a6a9c4   Alexey Dobriyan   proc: move /proc/...
2525
  		ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
51980ac9e   Kulikov Vasiliy   mm/vmalloc.c: che...
2526
2527
2528
  		if (ptr == NULL)
  			return -ENOMEM;
  	}
5f6a6a9c4   Alexey Dobriyan   proc: move /proc/...
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
  	ret = seq_open(file, &vmalloc_op);
  	if (!ret) {
  		struct seq_file *m = file->private_data;
  		m->private = ptr;
  	} else
  		kfree(ptr);
  	return ret;
  }
  
  static const struct file_operations proc_vmalloc_operations = {
  	.open		= vmalloc_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release_private,
  };
  
  static int __init proc_vmalloc_init(void)
  {
  	proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
  	return 0;
  }
  module_init(proc_vmalloc_init);
a10aa5798   Christoph Lameter   vmalloc: show vma...
2551
  #endif