Blame view

mm/vmalloc.c 69 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
  /*
   *  linux/mm/vmalloc.c
   *
   *  Copyright (C) 1993  Linus Torvalds
   *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
8
   *  Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
   */
db64fe022   Nick Piggin   mm: rewrite vmap ...
10
  #include <linux/vmalloc.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
13
  #include <linux/mm.h>
  #include <linux/module.h>
  #include <linux/highmem.h>
d43c36dc6   Alexey Dobriyan   headers: remove s...
14
  #include <linux/sched.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15
16
17
  #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
5f6a6a9c4   Alexey Dobriyan   proc: move /proc/...
18
  #include <linux/proc_fs.h>
a10aa5798   Christoph Lameter   vmalloc: show vma...
19
  #include <linux/seq_file.h>
3ac7fe5a4   Thomas Gleixner   infrastructure to...
20
  #include <linux/debugobjects.h>
230169693   Christoph Lameter   vmallocinfo: add ...
21
  #include <linux/kallsyms.h>
db64fe022   Nick Piggin   mm: rewrite vmap ...
22
23
24
25
  #include <linux/list.h>
  #include <linux/rbtree.h>
  #include <linux/radix-tree.h>
  #include <linux/rcupdate.h>
f0aa66179   Tejun Heo   vmalloc: implemen...
26
  #include <linux/pfn.h>
89219d37a   Catalin Marinas   kmemleak: Add the...
27
  #include <linux/kmemleak.h>
60063497a   Arun Sharma   atomic: use <linu...
28
  #include <linux/atomic.h>
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
29
  #include <linux/compiler.h>
32fcfd407   Al Viro   make vfree() safe...
30
  #include <linux/llist.h>
0f616be12   Toshi Kani   mm: change __get_...
31
  #include <linux/bitops.h>
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
32

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33
34
  #include <asm/uaccess.h>
  #include <asm/tlbflush.h>
2dca6999e   David Miller   mm, perf_event: M...
35
  #include <asm/shmparam.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
36

32fcfd407   Al Viro   make vfree() safe...
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
  struct vfree_deferred {
  	struct llist_head list;
  	struct work_struct wq;
  };
  static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
  
  static void __vunmap(const void *, int);
  
  static void free_work(struct work_struct *w)
  {
  	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
  	struct llist_node *llnode = llist_del_all(&p->list);
  	while (llnode) {
  		void *p = llnode;
  		llnode = llist_next(llnode);
  		__vunmap(p, 1);
  	}
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
55
  /*** Page table manipulation functions ***/
b221385bc   Adrian Bunk   [PATCH] mm/: make...
56

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
57
58
59
60
61
62
63
64
65
66
  static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
  {
  	pte_t *pte;
  
  	pte = pte_offset_kernel(pmd, addr);
  	do {
  		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  	} while (pte++, addr += PAGE_SIZE, addr != end);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
67
  static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
68
69
70
71
72
73
74
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pmd = pmd_offset(pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
b9820d8f3   Toshi Kani   mm: change vunmap...
75
76
  		if (pmd_clear_huge(pmd))
  			continue;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
77
78
79
80
81
  		if (pmd_none_or_clear_bad(pmd))
  			continue;
  		vunmap_pte_range(pmd, addr, next);
  	} while (pmd++, addr = next, addr != end);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
82
  static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
83
84
85
86
87
88
89
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
b9820d8f3   Toshi Kani   mm: change vunmap...
90
91
  		if (pud_clear_huge(pud))
  			continue;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
92
93
94
95
96
  		if (pud_none_or_clear_bad(pud))
  			continue;
  		vunmap_pmd_range(pud, addr, next);
  	} while (pud++, addr = next, addr != end);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
97
  static void vunmap_page_range(unsigned long addr, unsigned long end)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
98
99
100
  {
  	pgd_t *pgd;
  	unsigned long next;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
101
102
103
  
  	BUG_ON(addr >= end);
  	pgd = pgd_offset_k(addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
104
105
106
107
108
109
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
  		vunmap_pud_range(pgd, addr, next);
  	} while (pgd++, addr = next, addr != end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
110
111
112
  }
  
  static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
db64fe022   Nick Piggin   mm: rewrite vmap ...
113
  		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
114
115
  {
  	pte_t *pte;
db64fe022   Nick Piggin   mm: rewrite vmap ...
116
117
118
119
  	/*
  	 * nr is a running index into the array which helps higher level
  	 * callers keep track of where we're up to.
  	 */
872fec16d   Hugh Dickins   [PATCH] mm: init_...
120
  	pte = pte_alloc_kernel(pmd, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
121
122
123
  	if (!pte)
  		return -ENOMEM;
  	do {
db64fe022   Nick Piggin   mm: rewrite vmap ...
124
125
126
127
128
  		struct page *page = pages[*nr];
  
  		if (WARN_ON(!pte_none(*pte)))
  			return -EBUSY;
  		if (WARN_ON(!page))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
129
130
  			return -ENOMEM;
  		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
db64fe022   Nick Piggin   mm: rewrite vmap ...
131
  		(*nr)++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
132
133
134
  	} while (pte++, addr += PAGE_SIZE, addr != end);
  	return 0;
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
135
136
  static int vmap_pmd_range(pud_t *pud, unsigned long addr,
  		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
137
138
139
140
141
142
143
144
145
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pmd = pmd_alloc(&init_mm, pud, addr);
  	if (!pmd)
  		return -ENOMEM;
  	do {
  		next = pmd_addr_end(addr, end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
146
  		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
147
148
149
150
  			return -ENOMEM;
  	} while (pmd++, addr = next, addr != end);
  	return 0;
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
151
152
  static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
  		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
153
154
155
156
157
158
159
160
161
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pud = pud_alloc(&init_mm, pgd, addr);
  	if (!pud)
  		return -ENOMEM;
  	do {
  		next = pud_addr_end(addr, end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
162
  		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
163
164
165
166
  			return -ENOMEM;
  	} while (pud++, addr = next, addr != end);
  	return 0;
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
167
168
169
170
171
172
  /*
   * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
   * will have pfns corresponding to the "pages" array.
   *
   * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
   */
8fc489850   Tejun Heo   vmalloc: add un/m...
173
174
  static int vmap_page_range_noflush(unsigned long start, unsigned long end,
  				   pgprot_t prot, struct page **pages)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
175
176
177
  {
  	pgd_t *pgd;
  	unsigned long next;
2e4e27c7d   Adam Lackorzynski   vmalloc.c: fix fl...
178
  	unsigned long addr = start;
db64fe022   Nick Piggin   mm: rewrite vmap ...
179
180
  	int err = 0;
  	int nr = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
181
182
183
  
  	BUG_ON(addr >= end);
  	pgd = pgd_offset_k(addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
184
185
  	do {
  		next = pgd_addr_end(addr, end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
186
  		err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
187
  		if (err)
bf88c8c83   Figo.zhang   vmalloc.c: fix do...
188
  			return err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
189
  	} while (pgd++, addr = next, addr != end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
190

db64fe022   Nick Piggin   mm: rewrite vmap ...
191
  	return nr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
192
  }
8fc489850   Tejun Heo   vmalloc: add un/m...
193
194
195
196
197
198
199
200
201
  static int vmap_page_range(unsigned long start, unsigned long end,
  			   pgprot_t prot, struct page **pages)
  {
  	int ret;
  
  	ret = vmap_page_range_noflush(start, end, prot, pages);
  	flush_cache_vmap(start, end);
  	return ret;
  }
81ac3ad90   KAMEZAWA Hiroyuki   kcore: register m...
202
  int is_vmalloc_or_module_addr(const void *x)
73bdf0a60   Linus Torvalds   Introduce is_vmal...
203
204
  {
  	/*
ab4f2ee13   Russell King   [ARM] fix naming ...
205
  	 * ARM, x86-64 and sparc64 put modules in a special place,
73bdf0a60   Linus Torvalds   Introduce is_vmal...
206
207
208
209
210
211
212
213
214
215
  	 * and fall back on vmalloc() if that fails. Others
  	 * just put it in the vmalloc space.
  	 */
  #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
  	unsigned long addr = (unsigned long)x;
  	if (addr >= MODULES_VADDR && addr < MODULES_END)
  		return 1;
  #endif
  	return is_vmalloc_addr(x);
  }
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
216
  /*
add688fbd   malc   Revert "mm/vmallo...
217
   * Walk a vmap address to the struct page it maps.
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
218
   */
add688fbd   malc   Revert "mm/vmallo...
219
  struct page *vmalloc_to_page(const void *vmalloc_addr)
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
220
221
  {
  	unsigned long addr = (unsigned long) vmalloc_addr;
add688fbd   malc   Revert "mm/vmallo...
222
  	struct page *page = NULL;
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
223
  	pgd_t *pgd = pgd_offset_k(addr);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
224

7aa413def   Ingo Molnar   x86, MM: virtual ...
225
226
227
228
  	/*
  	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
  	 * architectures that do not vmalloc module space
  	 */
73bdf0a60   Linus Torvalds   Introduce is_vmal...
229
  	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
59ea74633   Jiri Slaby   MM: virtual addre...
230

48667e7a4   Christoph Lameter   Move vmalloc_to_p...
231
  	if (!pgd_none(*pgd)) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
232
  		pud_t *pud = pud_offset(pgd, addr);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
233
  		if (!pud_none(*pud)) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
234
  			pmd_t *pmd = pmd_offset(pud, addr);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
235
  			if (!pmd_none(*pmd)) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
236
  				pte_t *ptep, pte;
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
237
238
239
  				ptep = pte_offset_map(pmd, addr);
  				pte = *ptep;
  				if (pte_present(pte))
add688fbd   malc   Revert "mm/vmallo...
240
  					page = pte_page(pte);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
241
242
243
244
  				pte_unmap(ptep);
  			}
  		}
  	}
add688fbd   malc   Revert "mm/vmallo...
245
  	return page;
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
246
  }
add688fbd   malc   Revert "mm/vmallo...
247
  EXPORT_SYMBOL(vmalloc_to_page);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
248
249
  
  /*
add688fbd   malc   Revert "mm/vmallo...
250
   * Map a vmalloc()-space virtual address to the physical page frame number.
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
251
   */
add688fbd   malc   Revert "mm/vmallo...
252
  unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
253
  {
add688fbd   malc   Revert "mm/vmallo...
254
  	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
255
  }
add688fbd   malc   Revert "mm/vmallo...
256
  EXPORT_SYMBOL(vmalloc_to_pfn);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
257

db64fe022   Nick Piggin   mm: rewrite vmap ...
258
259
260
261
262
263
  
  /*** Global kva allocator ***/
  
  #define VM_LAZY_FREE	0x01
  #define VM_LAZY_FREEING	0x02
  #define VM_VM_AREA	0x04
db64fe022   Nick Piggin   mm: rewrite vmap ...
264
  static DEFINE_SPINLOCK(vmap_area_lock);
f1c4069e1   Joonsoo Kim   mm, vmalloc: expo...
265
266
  /* Export for kexec only */
  LIST_HEAD(vmap_area_list);
89699605f   Nick Piggin   mm: vmap area cache
267
268
269
270
271
272
273
  static struct rb_root vmap_area_root = RB_ROOT;
  
  /* The vmap cache globals are protected by vmap_area_lock */
  static struct rb_node *free_vmap_cache;
  static unsigned long cached_hole_size;
  static unsigned long cached_vstart;
  static unsigned long cached_align;
ca23e405e   Tejun Heo   vmalloc: implemen...
274
  static unsigned long vmap_area_pcpu_hole;
db64fe022   Nick Piggin   mm: rewrite vmap ...
275
276
  
  static struct vmap_area *__find_vmap_area(unsigned long addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
277
  {
db64fe022   Nick Piggin   mm: rewrite vmap ...
278
279
280
281
282
283
284
285
  	struct rb_node *n = vmap_area_root.rb_node;
  
  	while (n) {
  		struct vmap_area *va;
  
  		va = rb_entry(n, struct vmap_area, rb_node);
  		if (addr < va->va_start)
  			n = n->rb_left;
cef2ac3f6   HATAYAMA Daisuke   vmalloc: make fin...
286
  		else if (addr >= va->va_end)
db64fe022   Nick Piggin   mm: rewrite vmap ...
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
  			n = n->rb_right;
  		else
  			return va;
  	}
  
  	return NULL;
  }
  
  static void __insert_vmap_area(struct vmap_area *va)
  {
  	struct rb_node **p = &vmap_area_root.rb_node;
  	struct rb_node *parent = NULL;
  	struct rb_node *tmp;
  
  	while (*p) {
170168d0a   Namhyung Kim   vmalloc: rename t...
302
  		struct vmap_area *tmp_va;
db64fe022   Nick Piggin   mm: rewrite vmap ...
303
304
  
  		parent = *p;
170168d0a   Namhyung Kim   vmalloc: rename t...
305
306
  		tmp_va = rb_entry(parent, struct vmap_area, rb_node);
  		if (va->va_start < tmp_va->va_end)
db64fe022   Nick Piggin   mm: rewrite vmap ...
307
  			p = &(*p)->rb_left;
170168d0a   Namhyung Kim   vmalloc: rename t...
308
  		else if (va->va_end > tmp_va->va_start)
db64fe022   Nick Piggin   mm: rewrite vmap ...
309
310
311
312
313
314
315
  			p = &(*p)->rb_right;
  		else
  			BUG();
  	}
  
  	rb_link_node(&va->rb_node, parent, p);
  	rb_insert_color(&va->rb_node, &vmap_area_root);
4341fa454   Joonsoo Kim   mm, vmalloc: remo...
316
  	/* address-sort this list */
db64fe022   Nick Piggin   mm: rewrite vmap ...
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
  	tmp = rb_prev(&va->rb_node);
  	if (tmp) {
  		struct vmap_area *prev;
  		prev = rb_entry(tmp, struct vmap_area, rb_node);
  		list_add_rcu(&va->list, &prev->list);
  	} else
  		list_add_rcu(&va->list, &vmap_area_list);
  }
  
  static void purge_vmap_area_lazy(void);
  
  /*
   * Allocate a region of KVA of the specified size and alignment, within the
   * vstart and vend.
   */
  static struct vmap_area *alloc_vmap_area(unsigned long size,
  				unsigned long align,
  				unsigned long vstart, unsigned long vend,
  				int node, gfp_t gfp_mask)
  {
  	struct vmap_area *va;
  	struct rb_node *n;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
339
  	unsigned long addr;
db64fe022   Nick Piggin   mm: rewrite vmap ...
340
  	int purged = 0;
89699605f   Nick Piggin   mm: vmap area cache
341
  	struct vmap_area *first;
db64fe022   Nick Piggin   mm: rewrite vmap ...
342

7766970cc   Nick Piggin   mm: vmap fix over...
343
  	BUG_ON(!size);
db64fe022   Nick Piggin   mm: rewrite vmap ...
344
  	BUG_ON(size & ~PAGE_MASK);
89699605f   Nick Piggin   mm: vmap area cache
345
  	BUG_ON(!is_power_of_2(align));
db64fe022   Nick Piggin   mm: rewrite vmap ...
346

db64fe022   Nick Piggin   mm: rewrite vmap ...
347
348
349
350
  	va = kmalloc_node(sizeof(struct vmap_area),
  			gfp_mask & GFP_RECLAIM_MASK, node);
  	if (unlikely(!va))
  		return ERR_PTR(-ENOMEM);
7f88f88f8   Catalin Marinas   mm: kmemleak: avo...
351
352
353
354
355
  	/*
  	 * Only scan the relevant parts containing pointers to other objects
  	 * to avoid false negatives.
  	 */
  	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
db64fe022   Nick Piggin   mm: rewrite vmap ...
356
357
  retry:
  	spin_lock(&vmap_area_lock);
89699605f   Nick Piggin   mm: vmap area cache
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
  	/*
  	 * Invalidate cache if we have more permissive parameters.
  	 * cached_hole_size notes the largest hole noticed _below_
  	 * the vmap_area cached in free_vmap_cache: if size fits
  	 * into that hole, we want to scan from vstart to reuse
  	 * the hole instead of allocating above free_vmap_cache.
  	 * Note that __free_vmap_area may update free_vmap_cache
  	 * without updating cached_hole_size or cached_align.
  	 */
  	if (!free_vmap_cache ||
  			size < cached_hole_size ||
  			vstart < cached_vstart ||
  			align < cached_align) {
  nocache:
  		cached_hole_size = 0;
  		free_vmap_cache = NULL;
  	}
  	/* record if we encounter less permissive parameters */
  	cached_vstart = vstart;
  	cached_align = align;
  
  	/* find starting point for our search */
  	if (free_vmap_cache) {
  		first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
248ac0e19   Johannes Weiner   mm/vmalloc: remov...
382
  		addr = ALIGN(first->va_end, align);
89699605f   Nick Piggin   mm: vmap area cache
383
384
  		if (addr < vstart)
  			goto nocache;
bcb615a81   Zhang Yanfei   mm/vmalloc.c: fix...
385
  		if (addr + size < addr)
89699605f   Nick Piggin   mm: vmap area cache
386
387
388
389
  			goto overflow;
  
  	} else {
  		addr = ALIGN(vstart, align);
bcb615a81   Zhang Yanfei   mm/vmalloc.c: fix...
390
  		if (addr + size < addr)
89699605f   Nick Piggin   mm: vmap area cache
391
392
393
394
395
396
  			goto overflow;
  
  		n = vmap_area_root.rb_node;
  		first = NULL;
  
  		while (n) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
397
398
399
  			struct vmap_area *tmp;
  			tmp = rb_entry(n, struct vmap_area, rb_node);
  			if (tmp->va_end >= addr) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
400
  				first = tmp;
89699605f   Nick Piggin   mm: vmap area cache
401
402
403
404
  				if (tmp->va_start <= addr)
  					break;
  				n = n->rb_left;
  			} else
db64fe022   Nick Piggin   mm: rewrite vmap ...
405
  				n = n->rb_right;
89699605f   Nick Piggin   mm: vmap area cache
406
  		}
db64fe022   Nick Piggin   mm: rewrite vmap ...
407
408
409
  
  		if (!first)
  			goto found;
db64fe022   Nick Piggin   mm: rewrite vmap ...
410
  	}
89699605f   Nick Piggin   mm: vmap area cache
411
412
  
  	/* from the starting point, walk areas until a suitable hole is found */
248ac0e19   Johannes Weiner   mm/vmalloc: remov...
413
  	while (addr + size > first->va_start && addr + size <= vend) {
89699605f   Nick Piggin   mm: vmap area cache
414
415
  		if (addr + cached_hole_size < first->va_start)
  			cached_hole_size = first->va_start - addr;
248ac0e19   Johannes Weiner   mm/vmalloc: remov...
416
  		addr = ALIGN(first->va_end, align);
bcb615a81   Zhang Yanfei   mm/vmalloc.c: fix...
417
  		if (addr + size < addr)
89699605f   Nick Piggin   mm: vmap area cache
418
  			goto overflow;
92ca922f0   Hong zhi guo   vmalloc: walk vma...
419
  		if (list_is_last(&first->list, &vmap_area_list))
89699605f   Nick Piggin   mm: vmap area cache
420
  			goto found;
92ca922f0   Hong zhi guo   vmalloc: walk vma...
421
422
423
  
  		first = list_entry(first->list.next,
  				struct vmap_area, list);
db64fe022   Nick Piggin   mm: rewrite vmap ...
424
  	}
89699605f   Nick Piggin   mm: vmap area cache
425
426
427
  found:
  	if (addr + size > vend)
  		goto overflow;
db64fe022   Nick Piggin   mm: rewrite vmap ...
428
429
430
431
432
  
  	va->va_start = addr;
  	va->va_end = addr + size;
  	va->flags = 0;
  	__insert_vmap_area(va);
89699605f   Nick Piggin   mm: vmap area cache
433
  	free_vmap_cache = &va->rb_node;
db64fe022   Nick Piggin   mm: rewrite vmap ...
434
  	spin_unlock(&vmap_area_lock);
89699605f   Nick Piggin   mm: vmap area cache
435
436
437
  	BUG_ON(va->va_start & (align-1));
  	BUG_ON(va->va_start < vstart);
  	BUG_ON(va->va_end > vend);
db64fe022   Nick Piggin   mm: rewrite vmap ...
438
  	return va;
89699605f   Nick Piggin   mm: vmap area cache
439
440
441
442
443
444
445
446
447
  
  overflow:
  	spin_unlock(&vmap_area_lock);
  	if (!purged) {
  		purge_vmap_area_lazy();
  		purged = 1;
  		goto retry;
  	}
  	if (printk_ratelimit())
0cbc8533b   Pintu Kumar   mm/vmalloc.c: rep...
448
  		pr_warn("vmap allocation for size %lu failed: "
89699605f   Nick Piggin   mm: vmap area cache
449
450
451
452
  			"use vmalloc=<size> to increase size.
  ", size);
  	kfree(va);
  	return ERR_PTR(-EBUSY);
db64fe022   Nick Piggin   mm: rewrite vmap ...
453
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
454
455
456
  static void __free_vmap_area(struct vmap_area *va)
  {
  	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
89699605f   Nick Piggin   mm: vmap area cache
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
  
  	if (free_vmap_cache) {
  		if (va->va_end < cached_vstart) {
  			free_vmap_cache = NULL;
  		} else {
  			struct vmap_area *cache;
  			cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
  			if (va->va_start <= cache->va_start) {
  				free_vmap_cache = rb_prev(&va->rb_node);
  				/*
  				 * We don't try to update cached_hole_size or
  				 * cached_align, but it won't go very wrong.
  				 */
  			}
  		}
  	}
db64fe022   Nick Piggin   mm: rewrite vmap ...
473
474
475
  	rb_erase(&va->rb_node, &vmap_area_root);
  	RB_CLEAR_NODE(&va->rb_node);
  	list_del_rcu(&va->list);
ca23e405e   Tejun Heo   vmalloc: implemen...
476
477
478
479
480
481
482
483
  	/*
  	 * Track the highest possible candidate for pcpu area
  	 * allocation.  Areas outside of vmalloc area can be returned
  	 * here too, consider only end addresses which fall inside
  	 * vmalloc area proper.
  	 */
  	if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
  		vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
14769de93   Lai Jiangshan   vmalloc,rcu: Conv...
484
  	kfree_rcu(va, rcu_head);
db64fe022   Nick Piggin   mm: rewrite vmap ...
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
  }
  
  /*
   * Free a region of KVA allocated by alloc_vmap_area
   */
  static void free_vmap_area(struct vmap_area *va)
  {
  	spin_lock(&vmap_area_lock);
  	__free_vmap_area(va);
  	spin_unlock(&vmap_area_lock);
  }
  
  /*
   * Clear the pagetable entries of a given vmap_area
   */
  static void unmap_vmap_area(struct vmap_area *va)
  {
  	vunmap_page_range(va->va_start, va->va_end);
  }
cd52858c7   Nick Piggin   mm: vmalloc make ...
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
  static void vmap_debug_free_range(unsigned long start, unsigned long end)
  {
  	/*
  	 * Unmap page tables and force a TLB flush immediately if
  	 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
  	 * bugs similarly to those in linear kernel virtual address
  	 * space after a page has been freed.
  	 *
  	 * All the lazy freeing logic is still retained, in order to
  	 * minimise intrusiveness of this debugging feature.
  	 *
  	 * This is going to be *slow* (linear kernel virtual address
  	 * debugging doesn't do a broadcast TLB flush so it is a lot
  	 * faster).
  	 */
  #ifdef CONFIG_DEBUG_PAGEALLOC
  	vunmap_page_range(start, end);
  	flush_tlb_kernel_range(start, end);
  #endif
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
  /*
   * lazy_max_pages is the maximum amount of virtual address space we gather up
   * before attempting to purge with a TLB flush.
   *
   * There is a tradeoff here: a larger number will cover more kernel page tables
   * and take slightly longer to purge, but it will linearly reduce the number of
   * global TLB flushes that must be performed. It would seem natural to scale
   * this number up linearly with the number of CPUs (because vmapping activity
   * could also scale linearly with the number of CPUs), however it is likely
   * that in practice, workloads might be constrained in other ways that mean
   * vmap activity will not scale linearly with CPUs. Also, I want to be
   * conservative and not introduce a big latency on huge systems, so go with
   * a less aggressive log scale. It will still be an improvement over the old
   * code, and it will be simple to change the scale factor if we find that it
   * becomes a problem on bigger systems.
   */
  static unsigned long lazy_max_pages(void)
  {
  	unsigned int log;
  
  	log = fls(num_online_cpus());
  
  	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
  }
  
  static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
02b709df8   Nick Piggin   mm: purge fragmen...
550
551
  /* for per-CPU blocks */
  static void purge_fragmented_blocks_allcpus(void);
db64fe022   Nick Piggin   mm: rewrite vmap ...
552
  /*
3ee48b6af   Cliff Wickman   mm, x86: Saving v...
553
554
555
556
557
558
559
560
561
   * called before a call to iounmap() if the caller wants vm_area_struct's
   * immediately freed.
   */
  void set_iounmap_nonlazy(void)
  {
  	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
  }
  
  /*
db64fe022   Nick Piggin   mm: rewrite vmap ...
562
563
564
565
566
567
568
569
570
571
572
573
   * Purges all lazily-freed vmap areas.
   *
   * If sync is 0 then don't purge if there is already a purge in progress.
   * If force_flush is 1, then flush kernel TLBs between *start and *end even
   * if we found no lazy vmap areas to unmap (callers can use this to optimise
   * their own TLB flushing).
   * Returns with *start = min(*start, lowest purged address)
   *              *end = max(*end, highest purged address)
   */
  static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
  					int sync, int force_flush)
  {
46666d8ac   Andrew Morton   revert "mm: vmall...
574
  	static DEFINE_SPINLOCK(purge_lock);
db64fe022   Nick Piggin   mm: rewrite vmap ...
575
576
  	LIST_HEAD(valist);
  	struct vmap_area *va;
cbb766766   Vegard Nossum   mm: fix lazy vmap...
577
  	struct vmap_area *n_va;
db64fe022   Nick Piggin   mm: rewrite vmap ...
578
579
580
581
582
583
584
585
  	int nr = 0;
  
  	/*
  	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
  	 * should not expect such behaviour. This just simplifies locking for
  	 * the case that isn't actually used at the moment anyway.
  	 */
  	if (!sync && !force_flush) {
46666d8ac   Andrew Morton   revert "mm: vmall...
586
  		if (!spin_trylock(&purge_lock))
db64fe022   Nick Piggin   mm: rewrite vmap ...
587
588
  			return;
  	} else
46666d8ac   Andrew Morton   revert "mm: vmall...
589
  		spin_lock(&purge_lock);
db64fe022   Nick Piggin   mm: rewrite vmap ...
590

02b709df8   Nick Piggin   mm: purge fragmen...
591
592
  	if (sync)
  		purge_fragmented_blocks_allcpus();
db64fe022   Nick Piggin   mm: rewrite vmap ...
593
594
595
596
597
598
599
600
  	rcu_read_lock();
  	list_for_each_entry_rcu(va, &vmap_area_list, list) {
  		if (va->flags & VM_LAZY_FREE) {
  			if (va->va_start < *start)
  				*start = va->va_start;
  			if (va->va_end > *end)
  				*end = va->va_end;
  			nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
db64fe022   Nick Piggin   mm: rewrite vmap ...
601
602
603
604
605
606
  			list_add_tail(&va->purge_list, &valist);
  			va->flags |= VM_LAZY_FREEING;
  			va->flags &= ~VM_LAZY_FREE;
  		}
  	}
  	rcu_read_unlock();
88f500443   Yongseok Koh   vmalloc: remove B...
607
  	if (nr)
db64fe022   Nick Piggin   mm: rewrite vmap ...
608
  		atomic_sub(nr, &vmap_lazy_nr);
db64fe022   Nick Piggin   mm: rewrite vmap ...
609
610
611
612
613
614
  
  	if (nr || force_flush)
  		flush_tlb_kernel_range(*start, *end);
  
  	if (nr) {
  		spin_lock(&vmap_area_lock);
cbb766766   Vegard Nossum   mm: fix lazy vmap...
615
  		list_for_each_entry_safe(va, n_va, &valist, purge_list)
db64fe022   Nick Piggin   mm: rewrite vmap ...
616
617
618
  			__free_vmap_area(va);
  		spin_unlock(&vmap_area_lock);
  	}
46666d8ac   Andrew Morton   revert "mm: vmall...
619
  	spin_unlock(&purge_lock);
db64fe022   Nick Piggin   mm: rewrite vmap ...
620
621
622
  }
  
  /*
496850e5f   Nick Piggin   mm: vmalloc failu...
623
624
625
626
627
628
629
630
631
632
633
   * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
   * is already purging.
   */
  static void try_purge_vmap_area_lazy(void)
  {
  	unsigned long start = ULONG_MAX, end = 0;
  
  	__purge_vmap_area_lazy(&start, &end, 0, 0);
  }
  
  /*
db64fe022   Nick Piggin   mm: rewrite vmap ...
634
635
636
637
638
   * Kick off a purge of the outstanding lazy areas.
   */
  static void purge_vmap_area_lazy(void)
  {
  	unsigned long start = ULONG_MAX, end = 0;
496850e5f   Nick Piggin   mm: vmalloc failu...
639
  	__purge_vmap_area_lazy(&start, &end, 1, 0);
db64fe022   Nick Piggin   mm: rewrite vmap ...
640
641
642
  }
  
  /*
64141da58   Jeremy Fitzhardinge   vmalloc: eagerly ...
643
644
645
   * Free a vmap area, caller ensuring that the area has been unmapped
   * and flush_cache_vunmap had been called for the correct range
   * previously.
db64fe022   Nick Piggin   mm: rewrite vmap ...
646
   */
64141da58   Jeremy Fitzhardinge   vmalloc: eagerly ...
647
  static void free_vmap_area_noflush(struct vmap_area *va)
db64fe022   Nick Piggin   mm: rewrite vmap ...
648
649
650
651
  {
  	va->flags |= VM_LAZY_FREE;
  	atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
  	if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
496850e5f   Nick Piggin   mm: vmalloc failu...
652
  		try_purge_vmap_area_lazy();
db64fe022   Nick Piggin   mm: rewrite vmap ...
653
  }
b29acbdcf   Nick Piggin   mm: vmalloc fix l...
654
  /*
64141da58   Jeremy Fitzhardinge   vmalloc: eagerly ...
655
656
657
658
659
660
661
662
663
664
   * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
   * called for the correct range previously.
   */
  static void free_unmap_vmap_area_noflush(struct vmap_area *va)
  {
  	unmap_vmap_area(va);
  	free_vmap_area_noflush(va);
  }
  
  /*
b29acbdcf   Nick Piggin   mm: vmalloc fix l...
665
666
667
668
669
670
671
   * Free and unmap a vmap area
   */
  static void free_unmap_vmap_area(struct vmap_area *va)
  {
  	flush_cache_vunmap(va->va_start, va->va_end);
  	free_unmap_vmap_area_noflush(va);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
  static struct vmap_area *find_vmap_area(unsigned long addr)
  {
  	struct vmap_area *va;
  
  	spin_lock(&vmap_area_lock);
  	va = __find_vmap_area(addr);
  	spin_unlock(&vmap_area_lock);
  
  	return va;
  }
  
  static void free_unmap_vmap_area_addr(unsigned long addr)
  {
  	struct vmap_area *va;
  
  	va = find_vmap_area(addr);
  	BUG_ON(!va);
  	free_unmap_vmap_area(va);
  }
  
  
  /*** Per cpu kva allocator ***/
  
  /*
   * vmap space is limited especially on 32 bit architectures. Ensure there is
   * room for at least 16 percpu vmap blocks per CPU.
   */
  /*
   * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
   * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
   * instead (we just need a rough idea)
   */
  #if BITS_PER_LONG == 32
  #define VMALLOC_SPACE		(128UL*1024*1024)
  #else
  #define VMALLOC_SPACE		(128UL*1024*1024*1024)
  #endif
  
  #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
  #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
  #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
  #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
  #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
  #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
f982f9151   Clemens Ladisch   mm: fix wrong vma...
716
717
718
719
  #define VMAP_BBMAP_BITS		\
  		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
  		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
  			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
db64fe022   Nick Piggin   mm: rewrite vmap ...
720
721
  
  #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
9b4633340   Jeremy Fitzhardinge   vmap: cope with v...
722
  static bool vmap_initialized __read_mostly = false;
db64fe022   Nick Piggin   mm: rewrite vmap ...
723
724
725
  struct vmap_block_queue {
  	spinlock_t lock;
  	struct list_head free;
db64fe022   Nick Piggin   mm: rewrite vmap ...
726
727
728
729
730
  };
  
  struct vmap_block {
  	spinlock_t lock;
  	struct vmap_area *va;
db64fe022   Nick Piggin   mm: rewrite vmap ...
731
  	unsigned long free, dirty;
7d61bfe8f   Roman Pen   mm/vmalloc: get r...
732
  	unsigned long dirty_min, dirty_max; /*< dirty range */
de5604231   Nick Piggin   mm: percpu-vmap f...
733
734
  	struct list_head free_list;
  	struct rcu_head rcu_head;
02b709df8   Nick Piggin   mm: purge fragmen...
735
  	struct list_head purge;
db64fe022   Nick Piggin   mm: rewrite vmap ...
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
  };
  
  /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
  static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
  
  /*
   * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
   * in the free path. Could get rid of this if we change the API to return a
   * "cookie" from alloc, to be passed to free. But no big deal yet.
   */
  static DEFINE_SPINLOCK(vmap_block_tree_lock);
  static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
  
  /*
   * We should probably have a fallback mechanism to allocate virtual memory
   * out of partially filled vmap blocks. However vmap block sizing should be
   * fairly reasonable according to the vmalloc size, so it shouldn't be a
   * big problem.
   */
  
  static unsigned long addr_to_vb_idx(unsigned long addr)
  {
  	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
  	addr /= VMAP_BLOCK_SIZE;
  	return addr;
  }
cf725ce27   Roman Pen   mm/vmalloc: occup...
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
  static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
  {
  	unsigned long addr;
  
  	addr = va_start + (pages_off << PAGE_SHIFT);
  	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
  	return (void *)addr;
  }
  
  /**
   * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
   *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
   * @order:    how many 2^order pages should be occupied in newly allocated block
   * @gfp_mask: flags for the page level allocator
   *
   * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
   */
  static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
db64fe022   Nick Piggin   mm: rewrite vmap ...
780
781
782
783
784
785
  {
  	struct vmap_block_queue *vbq;
  	struct vmap_block *vb;
  	struct vmap_area *va;
  	unsigned long vb_idx;
  	int node, err;
cf725ce27   Roman Pen   mm/vmalloc: occup...
786
  	void *vaddr;
db64fe022   Nick Piggin   mm: rewrite vmap ...
787
788
789
790
791
792
793
794
795
796
797
  
  	node = numa_node_id();
  
  	vb = kmalloc_node(sizeof(struct vmap_block),
  			gfp_mask & GFP_RECLAIM_MASK, node);
  	if (unlikely(!vb))
  		return ERR_PTR(-ENOMEM);
  
  	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
  					VMALLOC_START, VMALLOC_END,
  					node, gfp_mask);
ddf9c6d47   Tobias Klauser   vmalloc: remove r...
798
  	if (IS_ERR(va)) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
799
  		kfree(vb);
e7d863407   Julia Lawall   mm: use ERR_CAST
800
  		return ERR_CAST(va);
db64fe022   Nick Piggin   mm: rewrite vmap ...
801
802
803
804
805
806
807
808
  	}
  
  	err = radix_tree_preload(gfp_mask);
  	if (unlikely(err)) {
  		kfree(vb);
  		free_vmap_area(va);
  		return ERR_PTR(err);
  	}
cf725ce27   Roman Pen   mm/vmalloc: occup...
809
  	vaddr = vmap_block_vaddr(va->va_start, 0);
db64fe022   Nick Piggin   mm: rewrite vmap ...
810
811
  	spin_lock_init(&vb->lock);
  	vb->va = va;
cf725ce27   Roman Pen   mm/vmalloc: occup...
812
813
814
  	/* At least something should be left free */
  	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
  	vb->free = VMAP_BBMAP_BITS - (1UL << order);
db64fe022   Nick Piggin   mm: rewrite vmap ...
815
  	vb->dirty = 0;
7d61bfe8f   Roman Pen   mm/vmalloc: get r...
816
817
  	vb->dirty_min = VMAP_BBMAP_BITS;
  	vb->dirty_max = 0;
db64fe022   Nick Piggin   mm: rewrite vmap ...
818
  	INIT_LIST_HEAD(&vb->free_list);
db64fe022   Nick Piggin   mm: rewrite vmap ...
819
820
821
822
823
824
825
826
827
  
  	vb_idx = addr_to_vb_idx(va->va_start);
  	spin_lock(&vmap_block_tree_lock);
  	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
  	spin_unlock(&vmap_block_tree_lock);
  	BUG_ON(err);
  	radix_tree_preload_end();
  
  	vbq = &get_cpu_var(vmap_block_queue);
db64fe022   Nick Piggin   mm: rewrite vmap ...
828
  	spin_lock(&vbq->lock);
68ac546f2   Roman Pen   mm/vmalloc: fix p...
829
  	list_add_tail_rcu(&vb->free_list, &vbq->free);
db64fe022   Nick Piggin   mm: rewrite vmap ...
830
  	spin_unlock(&vbq->lock);
3f04ba859   Tejun Heo   vmalloc: fix use ...
831
  	put_cpu_var(vmap_block_queue);
db64fe022   Nick Piggin   mm: rewrite vmap ...
832

cf725ce27   Roman Pen   mm/vmalloc: occup...
833
  	return vaddr;
db64fe022   Nick Piggin   mm: rewrite vmap ...
834
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
835
836
837
838
  static void free_vmap_block(struct vmap_block *vb)
  {
  	struct vmap_block *tmp;
  	unsigned long vb_idx;
db64fe022   Nick Piggin   mm: rewrite vmap ...
839
840
841
842
843
  	vb_idx = addr_to_vb_idx(vb->va->va_start);
  	spin_lock(&vmap_block_tree_lock);
  	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
  	spin_unlock(&vmap_block_tree_lock);
  	BUG_ON(tmp != vb);
64141da58   Jeremy Fitzhardinge   vmalloc: eagerly ...
844
  	free_vmap_area_noflush(vb->va);
22a3c7d18   Lai Jiangshan   vmalloc,rcu: Conv...
845
  	kfree_rcu(vb, rcu_head);
db64fe022   Nick Piggin   mm: rewrite vmap ...
846
  }
02b709df8   Nick Piggin   mm: purge fragmen...
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
  static void purge_fragmented_blocks(int cpu)
  {
  	LIST_HEAD(purge);
  	struct vmap_block *vb;
  	struct vmap_block *n_vb;
  	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
  
  	rcu_read_lock();
  	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  
  		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
  			continue;
  
  		spin_lock(&vb->lock);
  		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
  			vb->free = 0; /* prevent further allocs after releasing lock */
  			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
7d61bfe8f   Roman Pen   mm/vmalloc: get r...
864
865
  			vb->dirty_min = 0;
  			vb->dirty_max = VMAP_BBMAP_BITS;
02b709df8   Nick Piggin   mm: purge fragmen...
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
  			spin_lock(&vbq->lock);
  			list_del_rcu(&vb->free_list);
  			spin_unlock(&vbq->lock);
  			spin_unlock(&vb->lock);
  			list_add_tail(&vb->purge, &purge);
  		} else
  			spin_unlock(&vb->lock);
  	}
  	rcu_read_unlock();
  
  	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
  		list_del(&vb->purge);
  		free_vmap_block(vb);
  	}
  }
02b709df8   Nick Piggin   mm: purge fragmen...
881
882
883
884
885
886
887
  static void purge_fragmented_blocks_allcpus(void)
  {
  	int cpu;
  
  	for_each_possible_cpu(cpu)
  		purge_fragmented_blocks(cpu);
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
888
889
890
891
  static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
  {
  	struct vmap_block_queue *vbq;
  	struct vmap_block *vb;
cf725ce27   Roman Pen   mm/vmalloc: occup...
892
  	void *vaddr = NULL;
db64fe022   Nick Piggin   mm: rewrite vmap ...
893
894
895
896
  	unsigned int order;
  
  	BUG_ON(size & ~PAGE_MASK);
  	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
aa91c4d89   Jan Kara   mm: make vb_alloc...
897
898
899
900
901
902
903
904
  	if (WARN_ON(size == 0)) {
  		/*
  		 * Allocating 0 bytes isn't what caller wants since
  		 * get_order(0) returns funny result. Just warn and terminate
  		 * early.
  		 */
  		return NULL;
  	}
db64fe022   Nick Piggin   mm: rewrite vmap ...
905
  	order = get_order(size);
db64fe022   Nick Piggin   mm: rewrite vmap ...
906
907
908
  	rcu_read_lock();
  	vbq = &get_cpu_var(vmap_block_queue);
  	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
cf725ce27   Roman Pen   mm/vmalloc: occup...
909
  		unsigned long pages_off;
db64fe022   Nick Piggin   mm: rewrite vmap ...
910
911
  
  		spin_lock(&vb->lock);
cf725ce27   Roman Pen   mm/vmalloc: occup...
912
913
914
915
  		if (vb->free < (1UL << order)) {
  			spin_unlock(&vb->lock);
  			continue;
  		}
02b709df8   Nick Piggin   mm: purge fragmen...
916

cf725ce27   Roman Pen   mm/vmalloc: occup...
917
918
  		pages_off = VMAP_BBMAP_BITS - vb->free;
  		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
02b709df8   Nick Piggin   mm: purge fragmen...
919
920
921
922
923
924
  		vb->free -= 1UL << order;
  		if (vb->free == 0) {
  			spin_lock(&vbq->lock);
  			list_del_rcu(&vb->free_list);
  			spin_unlock(&vbq->lock);
  		}
cf725ce27   Roman Pen   mm/vmalloc: occup...
925

02b709df8   Nick Piggin   mm: purge fragmen...
926
927
  		spin_unlock(&vb->lock);
  		break;
db64fe022   Nick Piggin   mm: rewrite vmap ...
928
  	}
02b709df8   Nick Piggin   mm: purge fragmen...
929

3f04ba859   Tejun Heo   vmalloc: fix use ...
930
  	put_cpu_var(vmap_block_queue);
db64fe022   Nick Piggin   mm: rewrite vmap ...
931
  	rcu_read_unlock();
cf725ce27   Roman Pen   mm/vmalloc: occup...
932
933
934
  	/* Allocate new block if nothing was found */
  	if (!vaddr)
  		vaddr = new_vmap_block(order, gfp_mask);
db64fe022   Nick Piggin   mm: rewrite vmap ...
935

cf725ce27   Roman Pen   mm/vmalloc: occup...
936
  	return vaddr;
db64fe022   Nick Piggin   mm: rewrite vmap ...
937
938
939
940
941
942
943
944
945
946
947
  }
  
  static void vb_free(const void *addr, unsigned long size)
  {
  	unsigned long offset;
  	unsigned long vb_idx;
  	unsigned int order;
  	struct vmap_block *vb;
  
  	BUG_ON(size & ~PAGE_MASK);
  	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
b29acbdcf   Nick Piggin   mm: vmalloc fix l...
948
949
  
  	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
db64fe022   Nick Piggin   mm: rewrite vmap ...
950
951
952
  	order = get_order(size);
  
  	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
7d61bfe8f   Roman Pen   mm/vmalloc: get r...
953
  	offset >>= PAGE_SHIFT;
db64fe022   Nick Piggin   mm: rewrite vmap ...
954
955
956
957
958
959
  
  	vb_idx = addr_to_vb_idx((unsigned long)addr);
  	rcu_read_lock();
  	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
  	rcu_read_unlock();
  	BUG_ON(!vb);
64141da58   Jeremy Fitzhardinge   vmalloc: eagerly ...
960
  	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
db64fe022   Nick Piggin   mm: rewrite vmap ...
961
  	spin_lock(&vb->lock);
7d61bfe8f   Roman Pen   mm/vmalloc: get r...
962
963
964
965
  
  	/* Expand dirty range */
  	vb->dirty_min = min(vb->dirty_min, offset);
  	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
d086817dc   MinChan Kim   vmap: remove need...
966

db64fe022   Nick Piggin   mm: rewrite vmap ...
967
968
  	vb->dirty += 1UL << order;
  	if (vb->dirty == VMAP_BBMAP_BITS) {
de5604231   Nick Piggin   mm: percpu-vmap f...
969
  		BUG_ON(vb->free);
db64fe022   Nick Piggin   mm: rewrite vmap ...
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
  		spin_unlock(&vb->lock);
  		free_vmap_block(vb);
  	} else
  		spin_unlock(&vb->lock);
  }
  
  /**
   * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
   *
   * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
   * to amortize TLB flushing overheads. What this means is that any page you
   * have now, may, in a former life, have been mapped into kernel virtual
   * address by the vmap layer and so there might be some CPUs with TLB entries
   * still referencing that page (additional to the regular 1:1 kernel mapping).
   *
   * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
   * be sure that none of the pages we have control over will have any aliases
   * from the vmap layer.
   */
  void vm_unmap_aliases(void)
  {
  	unsigned long start = ULONG_MAX, end = 0;
  	int cpu;
  	int flush = 0;
9b4633340   Jeremy Fitzhardinge   vmap: cope with v...
994
995
  	if (unlikely(!vmap_initialized))
  		return;
db64fe022   Nick Piggin   mm: rewrite vmap ...
996
997
998
999
1000
1001
  	for_each_possible_cpu(cpu) {
  		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
  		struct vmap_block *vb;
  
  		rcu_read_lock();
  		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
1002
  			spin_lock(&vb->lock);
7d61bfe8f   Roman Pen   mm/vmalloc: get r...
1003
1004
  			if (vb->dirty) {
  				unsigned long va_start = vb->va->va_start;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1005
  				unsigned long s, e;
b136be5e0   Joonsoo Kim   mm, vmalloc: use ...
1006

7d61bfe8f   Roman Pen   mm/vmalloc: get r...
1007
1008
  				s = va_start + (vb->dirty_min << PAGE_SHIFT);
  				e = va_start + (vb->dirty_max << PAGE_SHIFT);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1009

7d61bfe8f   Roman Pen   mm/vmalloc: get r...
1010
1011
  				start = min(s, start);
  				end   = max(e, end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1012

7d61bfe8f   Roman Pen   mm/vmalloc: get r...
1013
  				flush = 1;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
  			}
  			spin_unlock(&vb->lock);
  		}
  		rcu_read_unlock();
  	}
  
  	__purge_vmap_area_lazy(&start, &end, 1, flush);
  }
  EXPORT_SYMBOL_GPL(vm_unmap_aliases);
  
  /**
   * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
   * @mem: the pointer returned by vm_map_ram
   * @count: the count passed to that vm_map_ram call (cannot unmap partial)
   */
  void vm_unmap_ram(const void *mem, unsigned int count)
  {
  	unsigned long size = count << PAGE_SHIFT;
  	unsigned long addr = (unsigned long)mem;
  
  	BUG_ON(!addr);
  	BUG_ON(addr < VMALLOC_START);
  	BUG_ON(addr > VMALLOC_END);
  	BUG_ON(addr & (PAGE_SIZE-1));
  
  	debug_check_no_locks_freed(mem, size);
cd52858c7   Nick Piggin   mm: vmalloc make ...
1040
  	vmap_debug_free_range(addr, addr+size);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
  
  	if (likely(count <= VMAP_MAX_ALLOC))
  		vb_free(mem, size);
  	else
  		free_unmap_vmap_area_addr(addr);
  }
  EXPORT_SYMBOL(vm_unmap_ram);
  
  /**
   * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
   * @pages: an array of pointers to the pages to be mapped
   * @count: number of pages
   * @node: prefer to allocate data structures on this node
   * @prot: memory protection to use. PAGE_KERNEL for regular RAM
e99c97ade   Randy Dunlap   mm: fix kernel-do...
1055
   *
364376383   Gioh Kim   mm/vmalloc.c: enh...
1056
1057
1058
1059
1060
1061
   * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
   * faster than vmap so it's good.  But if you mix long-life and short-life
   * objects with vm_map_ram(), it could consume lots of address space through
   * fragmentation (especially on a 32bit machine).  You could see failures in
   * the end.  Please use this function for short-lived objects.
   *
e99c97ade   Randy Dunlap   mm: fix kernel-do...
1062
   * Returns: a pointer to the address that has been mapped, or %NULL on failure
db64fe022   Nick Piggin   mm: rewrite vmap ...
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
   */
  void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
  {
  	unsigned long size = count << PAGE_SHIFT;
  	unsigned long addr;
  	void *mem;
  
  	if (likely(count <= VMAP_MAX_ALLOC)) {
  		mem = vb_alloc(size, GFP_KERNEL);
  		if (IS_ERR(mem))
  			return NULL;
  		addr = (unsigned long)mem;
  	} else {
  		struct vmap_area *va;
  		va = alloc_vmap_area(size, PAGE_SIZE,
  				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
  		if (IS_ERR(va))
  			return NULL;
  
  		addr = va->va_start;
  		mem = (void *)addr;
  	}
  	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
  		vm_unmap_ram(mem, count);
  		return NULL;
  	}
  	return mem;
  }
  EXPORT_SYMBOL(vm_map_ram);
4341fa454   Joonsoo Kim   mm, vmalloc: remo...
1092
  static struct vm_struct *vmlist __initdata;
f0aa66179   Tejun Heo   vmalloc: implemen...
1093
  /**
be9b7335e   Nicolas Pitre   mm: add vm_area_a...
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
   * vm_area_add_early - add vmap area early during boot
   * @vm: vm_struct to add
   *
   * This function is used to add fixed kernel vm area to vmlist before
   * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
   * should contain proper values and the other fields should be zero.
   *
   * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
   */
  void __init vm_area_add_early(struct vm_struct *vm)
  {
  	struct vm_struct *tmp, **p;
  
  	BUG_ON(vmap_initialized);
  	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
  		if (tmp->addr >= vm->addr) {
  			BUG_ON(tmp->addr < vm->addr + vm->size);
  			break;
  		} else
  			BUG_ON(tmp->addr + tmp->size > vm->addr);
  	}
  	vm->next = *p;
  	*p = vm;
  }
  
  /**
f0aa66179   Tejun Heo   vmalloc: implemen...
1120
1121
   * vm_area_register_early - register vmap area early during boot
   * @vm: vm_struct to register
c0c0a2937   Tejun Heo   vmalloc: add @ali...
1122
   * @align: requested alignment
f0aa66179   Tejun Heo   vmalloc: implemen...
1123
1124
1125
1126
1127
1128
1129
1130
   *
   * This function is used to register kernel vm area before
   * vmalloc_init() is called.  @vm->size and @vm->flags should contain
   * proper values on entry and other fields should be zero.  On return,
   * vm->addr contains the allocated address.
   *
   * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
   */
c0c0a2937   Tejun Heo   vmalloc: add @ali...
1131
  void __init vm_area_register_early(struct vm_struct *vm, size_t align)
f0aa66179   Tejun Heo   vmalloc: implemen...
1132
1133
  {
  	static size_t vm_init_off __initdata;
c0c0a2937   Tejun Heo   vmalloc: add @ali...
1134
1135
1136
1137
  	unsigned long addr;
  
  	addr = ALIGN(VMALLOC_START + vm_init_off, align);
  	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
f0aa66179   Tejun Heo   vmalloc: implemen...
1138

c0c0a2937   Tejun Heo   vmalloc: add @ali...
1139
  	vm->addr = (void *)addr;
f0aa66179   Tejun Heo   vmalloc: implemen...
1140

be9b7335e   Nicolas Pitre   mm: add vm_area_a...
1141
  	vm_area_add_early(vm);
f0aa66179   Tejun Heo   vmalloc: implemen...
1142
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
1143
1144
  void __init vmalloc_init(void)
  {
822c18f2e   Ivan Kokshaysky   alpha: fix vmallo...
1145
1146
  	struct vmap_area *va;
  	struct vm_struct *tmp;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1147
1148
1149
1150
  	int i;
  
  	for_each_possible_cpu(i) {
  		struct vmap_block_queue *vbq;
32fcfd407   Al Viro   make vfree() safe...
1151
  		struct vfree_deferred *p;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1152
1153
1154
1155
  
  		vbq = &per_cpu(vmap_block_queue, i);
  		spin_lock_init(&vbq->lock);
  		INIT_LIST_HEAD(&vbq->free);
32fcfd407   Al Viro   make vfree() safe...
1156
1157
1158
  		p = &per_cpu(vfree_deferred, i);
  		init_llist_head(&p->list);
  		INIT_WORK(&p->wq, free_work);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1159
  	}
9b4633340   Jeremy Fitzhardinge   vmap: cope with v...
1160

822c18f2e   Ivan Kokshaysky   alpha: fix vmallo...
1161
1162
  	/* Import existing vmlist entries. */
  	for (tmp = vmlist; tmp; tmp = tmp->next) {
43ebdac42   Pekka Enberg   vmalloc: use kzal...
1163
  		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
dbda591d9   KyongHo   mm: fix faulty in...
1164
  		va->flags = VM_VM_AREA;
822c18f2e   Ivan Kokshaysky   alpha: fix vmallo...
1165
1166
  		va->va_start = (unsigned long)tmp->addr;
  		va->va_end = va->va_start + tmp->size;
dbda591d9   KyongHo   mm: fix faulty in...
1167
  		va->vm = tmp;
822c18f2e   Ivan Kokshaysky   alpha: fix vmallo...
1168
1169
  		__insert_vmap_area(va);
  	}
ca23e405e   Tejun Heo   vmalloc: implemen...
1170
1171
  
  	vmap_area_pcpu_hole = VMALLOC_END;
9b4633340   Jeremy Fitzhardinge   vmap: cope with v...
1172
  	vmap_initialized = true;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1173
  }
8fc489850   Tejun Heo   vmalloc: add un/m...
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
  /**
   * map_kernel_range_noflush - map kernel VM area with the specified pages
   * @addr: start of the VM area to map
   * @size: size of the VM area to map
   * @prot: page protection flags to use
   * @pages: pages to map
   *
   * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
   * specify should have been allocated using get_vm_area() and its
   * friends.
   *
   * NOTE:
   * This function does NOT do any cache flushing.  The caller is
   * responsible for calling flush_cache_vmap() on to-be-mapped areas
   * before calling this function.
   *
   * RETURNS:
   * The number of pages mapped on success, -errno on failure.
   */
  int map_kernel_range_noflush(unsigned long addr, unsigned long size,
  			     pgprot_t prot, struct page **pages)
  {
  	return vmap_page_range_noflush(addr, addr + size, prot, pages);
  }
  
  /**
   * unmap_kernel_range_noflush - unmap kernel VM area
   * @addr: start of the VM area to unmap
   * @size: size of the VM area to unmap
   *
   * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
   * specify should have been allocated using get_vm_area() and its
   * friends.
   *
   * NOTE:
   * This function does NOT do any cache flushing.  The caller is
   * responsible for calling flush_cache_vunmap() on to-be-mapped areas
   * before calling this function and flush_tlb_kernel_range() after.
   */
  void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
  {
  	vunmap_page_range(addr, addr + size);
  }
81e88fdc4   Huang Ying   ACPI, APEI, Gener...
1217
  EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
8fc489850   Tejun Heo   vmalloc: add un/m...
1218
1219
1220
1221
1222
1223
1224
1225
1226
  
  /**
   * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
   * @addr: start of the VM area to unmap
   * @size: size of the VM area to unmap
   *
   * Similar to unmap_kernel_range_noflush() but flushes vcache before
   * the unmapping and tlb after.
   */
db64fe022   Nick Piggin   mm: rewrite vmap ...
1227
1228
1229
  void unmap_kernel_range(unsigned long addr, unsigned long size)
  {
  	unsigned long end = addr + size;
f6fcba701   Tejun Heo   vmalloc: call flu...
1230
1231
  
  	flush_cache_vunmap(addr, end);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1232
1233
1234
  	vunmap_page_range(addr, end);
  	flush_tlb_kernel_range(addr, end);
  }
93ef6d6ca   Minchan Kim   mm/vmalloc.c: exp...
1235
  EXPORT_SYMBOL_GPL(unmap_kernel_range);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1236

f6f8ed473   WANG Chao   mm/vmalloc.c: cle...
1237
  int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
db64fe022   Nick Piggin   mm: rewrite vmap ...
1238
1239
  {
  	unsigned long addr = (unsigned long)area->addr;
762216ab4   Wanpeng Li   mm/vmalloc: use w...
1240
  	unsigned long end = addr + get_vm_area_size(area);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1241
  	int err;
f6f8ed473   WANG Chao   mm/vmalloc.c: cle...
1242
  	err = vmap_page_range(addr, end, prot, pages);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1243

f6f8ed473   WANG Chao   mm/vmalloc.c: cle...
1244
  	return err > 0 ? 0 : err;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1245
1246
  }
  EXPORT_SYMBOL_GPL(map_vm_area);
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1247
  static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1248
  			      unsigned long flags, const void *caller)
cf88c7900   Tejun Heo   vmalloc: separate...
1249
  {
c69480ade   Joonsoo Kim   mm, vmalloc: prot...
1250
  	spin_lock(&vmap_area_lock);
cf88c7900   Tejun Heo   vmalloc: separate...
1251
1252
1253
1254
  	vm->flags = flags;
  	vm->addr = (void *)va->va_start;
  	vm->size = va->va_end - va->va_start;
  	vm->caller = caller;
db1aecafe   Minchan Kim   mm/vmalloc.c: cha...
1255
  	va->vm = vm;
cf88c7900   Tejun Heo   vmalloc: separate...
1256
  	va->flags |= VM_VM_AREA;
c69480ade   Joonsoo Kim   mm, vmalloc: prot...
1257
  	spin_unlock(&vmap_area_lock);
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1258
  }
cf88c7900   Tejun Heo   vmalloc: separate...
1259

20fc02b47   Zhang Yanfei   mm/vmalloc.c: ren...
1260
  static void clear_vm_uninitialized_flag(struct vm_struct *vm)
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1261
  {
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
1262
  	/*
20fc02b47   Zhang Yanfei   mm/vmalloc.c: ren...
1263
  	 * Before removing VM_UNINITIALIZED,
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
1264
1265
1266
1267
  	 * we should make sure that vm has proper values.
  	 * Pair with smp_rmb() in show_numa_info().
  	 */
  	smp_wmb();
20fc02b47   Zhang Yanfei   mm/vmalloc.c: ren...
1268
  	vm->flags &= ~VM_UNINITIALIZED;
cf88c7900   Tejun Heo   vmalloc: separate...
1269
  }
db64fe022   Nick Piggin   mm: rewrite vmap ...
1270
  static struct vm_struct *__get_vm_area_node(unsigned long size,
2dca6999e   David Miller   mm, perf_event: M...
1271
  		unsigned long align, unsigned long flags, unsigned long start,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1272
  		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
db64fe022   Nick Piggin   mm: rewrite vmap ...
1273
  {
0006526d7   Kautuk Consul   mm/vmalloc.c: rem...
1274
  	struct vmap_area *va;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1275
  	struct vm_struct *area;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1276

52fd24ca1   Giridhar Pemmasani   [PATCH] __vmalloc...
1277
  	BUG_ON(in_interrupt());
0f2d4a8e2   Zhang Yanfei   mm, vmalloc: use ...
1278
  	if (flags & VM_IOREMAP)
0f616be12   Toshi Kani   mm: change __get_...
1279
1280
  		align = 1ul << clamp_t(int, fls_long(size),
  				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
db64fe022   Nick Piggin   mm: rewrite vmap ...
1281

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1282
  	size = PAGE_ALIGN(size);
31be83095   OGAWA Hirofumi   [PATCH] Fix stran...
1283
1284
  	if (unlikely(!size))
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1285

cf88c7900   Tejun Heo   vmalloc: separate...
1286
  	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1287
1288
  	if (unlikely(!area))
  		return NULL;
71394fe50   Andrey Ryabinin   mm: vmalloc: add ...
1289
1290
  	if (!(flags & VM_NO_GUARD))
  		size += PAGE_SIZE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1291

db64fe022   Nick Piggin   mm: rewrite vmap ...
1292
1293
1294
1295
  	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
  	if (IS_ERR(va)) {
  		kfree(area);
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1296
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1297

d82b1d857   Zhang Yanfei   mm, vmalloc: only...
1298
  	setup_vmalloc_vm(area, va, flags, caller);
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1299

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1300
  	return area;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1301
  }
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1302
1303
1304
  struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
  				unsigned long start, unsigned long end)
  {
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1305
1306
  	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
  				  GFP_KERNEL, __builtin_return_address(0));
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1307
  }
5992b6dac   Rusty Russell   lguest: export sy...
1308
  EXPORT_SYMBOL_GPL(__get_vm_area);
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1309

c29686129   Benjamin Herrenschmidt   vmalloc: add __ge...
1310
1311
  struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
  				       unsigned long start, unsigned long end,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1312
  				       const void *caller)
c29686129   Benjamin Herrenschmidt   vmalloc: add __ge...
1313
  {
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1314
1315
  	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
  				  GFP_KERNEL, caller);
c29686129   Benjamin Herrenschmidt   vmalloc: add __ge...
1316
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1317
  /**
183ff22bb   Simon Arlott   spelling fixes: mm/
1318
   *	get_vm_area  -  reserve a contiguous kernel virtual area
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1319
1320
1321
1322
1323
1324
1325
1326
1327
   *	@size:		size of the area
   *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
   *
   *	Search an area of @size in the kernel virtual mapping area,
   *	and reserved it for out purposes.  Returns the area descriptor
   *	on success or %NULL on failure.
   */
  struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
  {
2dca6999e   David Miller   mm, perf_event: M...
1328
  	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1329
1330
  				  NUMA_NO_NODE, GFP_KERNEL,
  				  __builtin_return_address(0));
230169693   Christoph Lameter   vmallocinfo: add ...
1331
1332
1333
  }
  
  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1334
  				const void *caller)
230169693   Christoph Lameter   vmallocinfo: add ...
1335
  {
2dca6999e   David Miller   mm, perf_event: M...
1336
  	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1337
  				  NUMA_NO_NODE, GFP_KERNEL, caller);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1338
  }
e9da6e990   Marek Szyprowski   ARM: dma-mapping:...
1339
1340
1341
1342
1343
1344
1345
1346
1347
  /**
   *	find_vm_area  -  find a continuous kernel virtual area
   *	@addr:		base address
   *
   *	Search for the kernel VM area starting at @addr, and return it.
   *	It is up to the caller to do all required locking to keep the returned
   *	pointer valid.
   */
  struct vm_struct *find_vm_area(const void *addr)
833423143   Nick Piggin   [PATCH] mm: intro...
1348
  {
db64fe022   Nick Piggin   mm: rewrite vmap ...
1349
  	struct vmap_area *va;
833423143   Nick Piggin   [PATCH] mm: intro...
1350

db64fe022   Nick Piggin   mm: rewrite vmap ...
1351
1352
  	va = find_vmap_area((unsigned long)addr);
  	if (va && va->flags & VM_VM_AREA)
db1aecafe   Minchan Kim   mm/vmalloc.c: cha...
1353
  		return va->vm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1354

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1355
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1356
  }
7856dfeb2   Andi Kleen   [PATCH] x86_64: F...
1357
  /**
183ff22bb   Simon Arlott   spelling fixes: mm/
1358
   *	remove_vm_area  -  find and remove a continuous kernel virtual area
7856dfeb2   Andi Kleen   [PATCH] x86_64: F...
1359
1360
1361
1362
1363
1364
   *	@addr:		base address
   *
   *	Search for the kernel VM area starting at @addr, and remove it.
   *	This function returns the found VM area, but using it is NOT safe
   *	on SMP machines, except for its size or flags.
   */
b3bdda02a   Christoph Lameter   vmalloc: add cons...
1365
  struct vm_struct *remove_vm_area(const void *addr)
7856dfeb2   Andi Kleen   [PATCH] x86_64: F...
1366
  {
db64fe022   Nick Piggin   mm: rewrite vmap ...
1367
1368
1369
1370
  	struct vmap_area *va;
  
  	va = find_vmap_area((unsigned long)addr);
  	if (va && va->flags & VM_VM_AREA) {
db1aecafe   Minchan Kim   mm/vmalloc.c: cha...
1371
  		struct vm_struct *vm = va->vm;
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1372

c69480ade   Joonsoo Kim   mm, vmalloc: prot...
1373
1374
1375
1376
  		spin_lock(&vmap_area_lock);
  		va->vm = NULL;
  		va->flags &= ~VM_VM_AREA;
  		spin_unlock(&vmap_area_lock);
dd32c2799   KAMEZAWA Hiroyuki   vmalloc: unmap vm...
1377
  		vmap_debug_free_range(va->va_start, va->va_end);
a5af5aa8b   Andrey Ryabinin   kasan, module, vm...
1378
  		kasan_free_shadow(vm);
dd32c2799   KAMEZAWA Hiroyuki   vmalloc: unmap vm...
1379
1380
  		free_unmap_vmap_area(va);
  		vm->size -= PAGE_SIZE;
db64fe022   Nick Piggin   mm: rewrite vmap ...
1381
1382
1383
  		return vm;
  	}
  	return NULL;
7856dfeb2   Andi Kleen   [PATCH] x86_64: F...
1384
  }
b3bdda02a   Christoph Lameter   vmalloc: add cons...
1385
  static void __vunmap(const void *addr, int deallocate_pages)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1386
1387
1388
1389
1390
  {
  	struct vm_struct *area;
  
  	if (!addr)
  		return;
e69e9d4ae   HATAYAMA Daisuke   vmalloc: introduc...
1391
1392
  	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)
  ",
ab15d9b4c   Dan Carpenter   mm/vmalloc.c: unb...
1393
  			addr))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1394
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1395
1396
1397
  
  	area = remove_vm_area(addr);
  	if (unlikely(!area)) {
4c8573e25   Arjan van de Ven   Use WARN() in mm/...
1398
1399
  		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)
  ",
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1400
  				addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1401
1402
  		return;
  	}
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
1403
  	debug_check_no_locks_freed(addr, area->size);
3ac7fe5a4   Thomas Gleixner   infrastructure to...
1404
  	debug_check_no_obj_freed(addr, area->size);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
1405

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1406
1407
1408
1409
  	if (deallocate_pages) {
  		int i;
  
  		for (i = 0; i < area->nr_pages; i++) {
bf53d6f8f   Christoph Lameter   vmalloc: clean up...
1410
1411
1412
1413
  			struct page *page = area->pages[i];
  
  			BUG_ON(!page);
  			__free_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1414
  		}
8757d5fa6   Jan Kiszka   [PATCH] mm: fix o...
1415
  		if (area->flags & VM_VPAGES)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1416
1417
1418
1419
1420
1421
1422
1423
  			vfree(area->pages);
  		else
  			kfree(area->pages);
  	}
  
  	kfree(area);
  	return;
  }
32fcfd407   Al Viro   make vfree() safe...
1424
   
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1425
1426
  /**
   *	vfree  -  release memory allocated by vmalloc()
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1427
1428
   *	@addr:		memory base address
   *
183ff22bb   Simon Arlott   spelling fixes: mm/
1429
   *	Free the virtually continuous memory area starting at @addr, as
80e93effc   Pekka Enberg   [PATCH] update kf...
1430
1431
   *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
   *	NULL, no operation is performed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1432
   *
32fcfd407   Al Viro   make vfree() safe...
1433
1434
1435
   *	Must not be called in NMI context (strictly speaking, only if we don't
   *	have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
   *	conventions for vfree() arch-depenedent would be a really bad idea)
c9fcee513   Andrew Morton   mm/vmalloc.c: add...
1436
1437
   *
   *	NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1438
   */
b3bdda02a   Christoph Lameter   vmalloc: add cons...
1439
  void vfree(const void *addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1440
  {
32fcfd407   Al Viro   make vfree() safe...
1441
  	BUG_ON(in_nmi());
89219d37a   Catalin Marinas   kmemleak: Add the...
1442
1443
  
  	kmemleak_free(addr);
32fcfd407   Al Viro   make vfree() safe...
1444
1445
1446
  	if (!addr)
  		return;
  	if (unlikely(in_interrupt())) {
7c8e0181e   Christoph Lameter   mm: replace __get...
1447
  		struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
59d3132f8   Oleg Nesterov   vfree: don't sche...
1448
1449
  		if (llist_add((struct llist_node *)addr, &p->list))
  			schedule_work(&p->wq);
32fcfd407   Al Viro   make vfree() safe...
1450
1451
  	} else
  		__vunmap(addr, 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1452
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1453
1454
1455
1456
  EXPORT_SYMBOL(vfree);
  
  /**
   *	vunmap  -  release virtual mapping obtained by vmap()
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1457
1458
1459
1460
1461
   *	@addr:		memory base address
   *
   *	Free the virtually contiguous memory area starting at @addr,
   *	which was created from the page array passed to vmap().
   *
80e93effc   Pekka Enberg   [PATCH] update kf...
1462
   *	Must not be called in interrupt context.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1463
   */
b3bdda02a   Christoph Lameter   vmalloc: add cons...
1464
  void vunmap(const void *addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1465
1466
  {
  	BUG_ON(in_interrupt());
34754b69a   Peter Zijlstra   x86: make vmap ye...
1467
  	might_sleep();
32fcfd407   Al Viro   make vfree() safe...
1468
1469
  	if (addr)
  		__vunmap(addr, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1470
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1471
1472
1473
1474
  EXPORT_SYMBOL(vunmap);
  
  /**
   *	vmap  -  map an array of pages into virtually contiguous space
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
   *	@pages:		array of page pointers
   *	@count:		number of pages to map
   *	@flags:		vm_area->flags
   *	@prot:		page protection for the mapping
   *
   *	Maps @count pages from @pages into contiguous kernel virtual
   *	space.
   */
  void *vmap(struct page **pages, unsigned int count,
  		unsigned long flags, pgprot_t prot)
  {
  	struct vm_struct *area;
34754b69a   Peter Zijlstra   x86: make vmap ye...
1487
  	might_sleep();
4481374ce   Jan Beulich   mm: replace vario...
1488
  	if (count > totalram_pages)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1489
  		return NULL;
230169693   Christoph Lameter   vmallocinfo: add ...
1490
1491
  	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
  					__builtin_return_address(0));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1492
1493
  	if (!area)
  		return NULL;
230169693   Christoph Lameter   vmallocinfo: add ...
1494

f6f8ed473   WANG Chao   mm/vmalloc.c: cle...
1495
  	if (map_vm_area(area, prot, pages)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1496
1497
1498
1499
1500
1501
  		vunmap(area->addr);
  		return NULL;
  	}
  
  	return area->addr;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1502
  EXPORT_SYMBOL(vmap);
2dca6999e   David Miller   mm, perf_event: M...
1503
1504
  static void *__vmalloc_node(unsigned long size, unsigned long align,
  			    gfp_t gfp_mask, pgprot_t prot,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1505
  			    int node, const void *caller);
e31d9eb5c   Adrian Bunk   make __vmalloc_ar...
1506
  static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3722e13cf   Wanpeng Li   mm/vmalloc: don't...
1507
  				 pgprot_t prot, int node)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1508
  {
22943ab11   Dave Hansen   mm: print vmalloc...
1509
  	const int order = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1510
1511
  	struct page **pages;
  	unsigned int nr_pages, array_size, i;
930f036b4   David Rientjes   mm, vmalloc: cons...
1512
1513
  	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
  	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1514

762216ab4   Wanpeng Li   mm/vmalloc: use w...
1515
  	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1516
1517
1518
1519
  	array_size = (nr_pages * sizeof(struct page *));
  
  	area->nr_pages = nr_pages;
  	/* Please note that the recursion is strictly bounded. */
8757d5fa6   Jan Kiszka   [PATCH] mm: fix o...
1520
  	if (array_size > PAGE_SIZE) {
976d6dfbb   Jan Beulich   vmalloc(): adjust...
1521
  		pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
3722e13cf   Wanpeng Li   mm/vmalloc: don't...
1522
  				PAGE_KERNEL, node, area->caller);
8757d5fa6   Jan Kiszka   [PATCH] mm: fix o...
1523
  		area->flags |= VM_VPAGES;
286e1ea3a   Andrew Morton   [PATCH] vmalloc()...
1524
  	} else {
976d6dfbb   Jan Beulich   vmalloc(): adjust...
1525
  		pages = kmalloc_node(array_size, nested_gfp, node);
286e1ea3a   Andrew Morton   [PATCH] vmalloc()...
1526
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1527
1528
1529
1530
1531
1532
  	area->pages = pages;
  	if (!area->pages) {
  		remove_vm_area(area->addr);
  		kfree(area);
  		return NULL;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1533
1534
  
  	for (i = 0; i < area->nr_pages; i++) {
bf53d6f8f   Christoph Lameter   vmalloc: clean up...
1535
  		struct page *page;
4b90951c0   Jianguo Wu   mm/vmalloc: use N...
1536
  		if (node == NUMA_NO_NODE)
930f036b4   David Rientjes   mm, vmalloc: cons...
1537
  			page = alloc_page(alloc_mask);
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1538
  		else
930f036b4   David Rientjes   mm, vmalloc: cons...
1539
  			page = alloc_pages_node(node, alloc_mask, order);
bf53d6f8f   Christoph Lameter   vmalloc: clean up...
1540
1541
  
  		if (unlikely(!page)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1542
1543
1544
1545
  			/* Successfully allocated i pages, free them in __vunmap() */
  			area->nr_pages = i;
  			goto fail;
  		}
bf53d6f8f   Christoph Lameter   vmalloc: clean up...
1546
  		area->pages[i] = page;
660654f90   Eric Dumazet   mm/vmalloc.c: add...
1547
1548
  		if (gfp_mask & __GFP_WAIT)
  			cond_resched();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1549
  	}
f6f8ed473   WANG Chao   mm/vmalloc.c: cle...
1550
  	if (map_vm_area(area, prot, pages))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1551
1552
1553
1554
  		goto fail;
  	return area->addr;
  
  fail:
3ee9a4f08   Joe Perches   mm: neaten warn_a...
1555
1556
1557
  	warn_alloc_failed(gfp_mask, order,
  			  "vmalloc: allocation failure, allocated %ld of %ld bytes
  ",
22943ab11   Dave Hansen   mm: print vmalloc...
1558
  			  (area->nr_pages*PAGE_SIZE), area->size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1559
1560
1561
1562
1563
  	vfree(area->addr);
  	return NULL;
  }
  
  /**
d0a21265d   David Rientjes   mm: unify module_...
1564
   *	__vmalloc_node_range  -  allocate virtually contiguous memory
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1565
   *	@size:		allocation size
2dca6999e   David Miller   mm, perf_event: M...
1566
   *	@align:		desired alignment
d0a21265d   David Rientjes   mm: unify module_...
1567
1568
   *	@start:		vm area range start
   *	@end:		vm area range end
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1569
1570
   *	@gfp_mask:	flags for the page level allocator
   *	@prot:		protection mask for the allocated pages
cb9e3c292   Andrey Ryabinin   mm: vmalloc: pass...
1571
   *	@vm_flags:	additional vm area flags (e.g. %VM_NO_GUARD)
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1572
   *	@node:		node to use for allocation or NUMA_NO_NODE
c85d194bf   Randy Dunlap   docbook: fix vmal...
1573
   *	@caller:	caller's return address
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1574
1575
1576
1577
1578
   *
   *	Allocate enough pages to cover @size from the page level
   *	allocator with @gfp_mask flags.  Map them into contiguous
   *	kernel virtual space, using a pagetable protection of @prot.
   */
d0a21265d   David Rientjes   mm: unify module_...
1579
1580
  void *__vmalloc_node_range(unsigned long size, unsigned long align,
  			unsigned long start, unsigned long end, gfp_t gfp_mask,
cb9e3c292   Andrey Ryabinin   mm: vmalloc: pass...
1581
1582
  			pgprot_t prot, unsigned long vm_flags, int node,
  			const void *caller)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1583
1584
  {
  	struct vm_struct *area;
89219d37a   Catalin Marinas   kmemleak: Add the...
1585
1586
  	void *addr;
  	unsigned long real_size = size;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1587
1588
  
  	size = PAGE_ALIGN(size);
4481374ce   Jan Beulich   mm: replace vario...
1589
  	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
de7d2b567   Joe Perches   mm/vmalloc.c: rep...
1590
  		goto fail;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1591

cb9e3c292   Andrey Ryabinin   mm: vmalloc: pass...
1592
1593
  	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
  				vm_flags, start, end, node, gfp_mask, caller);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1594
  	if (!area)
de7d2b567   Joe Perches   mm/vmalloc.c: rep...
1595
  		goto fail;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1596

3722e13cf   Wanpeng Li   mm/vmalloc: don't...
1597
  	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1368edf06   Mel Gorman   mm: vmalloc: chec...
1598
  	if (!addr)
b82225f3f   Wanpeng Li   revert mm/vmalloc...
1599
  		return NULL;
89219d37a   Catalin Marinas   kmemleak: Add the...
1600
1601
  
  	/*
20fc02b47   Zhang Yanfei   mm/vmalloc.c: ren...
1602
1603
  	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
  	 * flag. It means that vm_struct is not fully initialized.
4341fa454   Joonsoo Kim   mm, vmalloc: remo...
1604
  	 * Now, it is fully initialized, so remove this flag here.
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1605
  	 */
20fc02b47   Zhang Yanfei   mm/vmalloc.c: ren...
1606
  	clear_vm_uninitialized_flag(area);
f5252e009   Mitsuo Hayasaka   mm: avoid null po...
1607
1608
  
  	/*
7f88f88f8   Catalin Marinas   mm: kmemleak: avo...
1609
1610
1611
  	 * A ref_count = 2 is needed because vm_struct allocated in
  	 * __get_vm_area_node() contains a reference to the virtual address of
  	 * the vmalloc'ed block.
89219d37a   Catalin Marinas   kmemleak: Add the...
1612
  	 */
7f88f88f8   Catalin Marinas   mm: kmemleak: avo...
1613
  	kmemleak_alloc(addr, real_size, 2, gfp_mask);
89219d37a   Catalin Marinas   kmemleak: Add the...
1614
1615
  
  	return addr;
de7d2b567   Joe Perches   mm/vmalloc.c: rep...
1616
1617
1618
1619
1620
1621
1622
  
  fail:
  	warn_alloc_failed(gfp_mask, 0,
  			  "vmalloc: allocation failure: %lu bytes
  ",
  			  real_size);
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1623
  }
d0a21265d   David Rientjes   mm: unify module_...
1624
1625
1626
1627
1628
1629
  /**
   *	__vmalloc_node  -  allocate virtually contiguous memory
   *	@size:		allocation size
   *	@align:		desired alignment
   *	@gfp_mask:	flags for the page level allocator
   *	@prot:		protection mask for the allocated pages
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1630
   *	@node:		node to use for allocation or NUMA_NO_NODE
d0a21265d   David Rientjes   mm: unify module_...
1631
1632
1633
1634
1635
1636
1637
1638
   *	@caller:	caller's return address
   *
   *	Allocate enough pages to cover @size from the page level
   *	allocator with @gfp_mask flags.  Map them into contiguous
   *	kernel virtual space, using a pagetable protection of @prot.
   */
  static void *__vmalloc_node(unsigned long size, unsigned long align,
  			    gfp_t gfp_mask, pgprot_t prot,
5e6cafc83   Marek Szyprowski   mm: vmalloc: use ...
1639
  			    int node, const void *caller)
d0a21265d   David Rientjes   mm: unify module_...
1640
1641
  {
  	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
cb9e3c292   Andrey Ryabinin   mm: vmalloc: pass...
1642
  				gfp_mask, prot, 0, node, caller);
d0a21265d   David Rientjes   mm: unify module_...
1643
  }
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1644
1645
  void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
  {
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1646
  	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
230169693   Christoph Lameter   vmallocinfo: add ...
1647
  				__builtin_return_address(0));
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1648
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1649
  EXPORT_SYMBOL(__vmalloc);
e1ca7788d   Dave Young   mm: add vzalloc()...
1650
1651
1652
1653
1654
1655
  static inline void *__vmalloc_node_flags(unsigned long size,
  					int node, gfp_t flags)
  {
  	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
  					node, __builtin_return_address(0));
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1656
1657
  /**
   *	vmalloc  -  allocate virtually contiguous memory
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1658
   *	@size:		allocation size
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1659
1660
1661
   *	Allocate enough pages to cover @size from the page level
   *	allocator and map them into contiguous kernel virtual space.
   *
c1c8897f8   Michael Opdenacker   Spelling fix: "co...
1662
   *	For tight control over page level allocator and protection flags
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1663
1664
1665
1666
   *	use __vmalloc() instead.
   */
  void *vmalloc(unsigned long size)
  {
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1667
1668
  	return __vmalloc_node_flags(size, NUMA_NO_NODE,
  				    GFP_KERNEL | __GFP_HIGHMEM);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1669
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1670
  EXPORT_SYMBOL(vmalloc);
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1671
  /**
e1ca7788d   Dave Young   mm: add vzalloc()...
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
   *	vzalloc - allocate virtually contiguous memory with zero fill
   *	@size:	allocation size
   *	Allocate enough pages to cover @size from the page level
   *	allocator and map them into contiguous kernel virtual space.
   *	The memory allocated is set to zero.
   *
   *	For tight control over page level allocator and protection flags
   *	use __vmalloc() instead.
   */
  void *vzalloc(unsigned long size)
  {
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1683
  	return __vmalloc_node_flags(size, NUMA_NO_NODE,
e1ca7788d   Dave Young   mm: add vzalloc()...
1684
1685
1686
1687
1688
  				GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
  }
  EXPORT_SYMBOL(vzalloc);
  
  /**
ead04089b   Rolf Eike Beer   [PATCH] Fix kerne...
1689
1690
   * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
   * @size: allocation size
833423143   Nick Piggin   [PATCH] mm: intro...
1691
   *
ead04089b   Rolf Eike Beer   [PATCH] Fix kerne...
1692
1693
   * The resulting memory area is zeroed so it can be mapped to userspace
   * without leaking data.
833423143   Nick Piggin   [PATCH] mm: intro...
1694
1695
1696
1697
1698
   */
  void *vmalloc_user(unsigned long size)
  {
  	struct vm_struct *area;
  	void *ret;
2dca6999e   David Miller   mm, perf_event: M...
1699
1700
  	ret = __vmalloc_node(size, SHMLBA,
  			     GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1701
1702
  			     PAGE_KERNEL, NUMA_NO_NODE,
  			     __builtin_return_address(0));
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1703
  	if (ret) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
1704
  		area = find_vm_area(ret);
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1705
  		area->flags |= VM_USERMAP;
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1706
  	}
833423143   Nick Piggin   [PATCH] mm: intro...
1707
1708
1709
1710
1711
  	return ret;
  }
  EXPORT_SYMBOL(vmalloc_user);
  
  /**
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1712
   *	vmalloc_node  -  allocate memory on a specific node
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1713
   *	@size:		allocation size
d44e0780b   Randy Dunlap   [PATCH] kernel-do...
1714
   *	@node:		numa node
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1715
1716
1717
1718
   *
   *	Allocate enough pages to cover @size from the page level
   *	allocator and map them into contiguous kernel virtual space.
   *
c1c8897f8   Michael Opdenacker   Spelling fix: "co...
1719
   *	For tight control over page level allocator and protection flags
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1720
1721
1722
1723
   *	use __vmalloc() instead.
   */
  void *vmalloc_node(unsigned long size, int node)
  {
2dca6999e   David Miller   mm, perf_event: M...
1724
  	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
230169693   Christoph Lameter   vmallocinfo: add ...
1725
  					node, __builtin_return_address(0));
930fc45a4   Christoph Lameter   [PATCH] vmalloc_node
1726
1727
  }
  EXPORT_SYMBOL(vmalloc_node);
e1ca7788d   Dave Young   mm: add vzalloc()...
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
  /**
   * vzalloc_node - allocate memory on a specific node with zero fill
   * @size:	allocation size
   * @node:	numa node
   *
   * Allocate enough pages to cover @size from the page level
   * allocator and map them into contiguous kernel virtual space.
   * The memory allocated is set to zero.
   *
   * For tight control over page level allocator and protection flags
   * use __vmalloc_node() instead.
   */
  void *vzalloc_node(unsigned long size, int node)
  {
  	return __vmalloc_node_flags(size, node,
  			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
  }
  EXPORT_SYMBOL(vzalloc_node);
4dc3b16ba   Pavel Pisa   [PATCH] DocBook: ...
1746
1747
1748
  #ifndef PAGE_KERNEL_EXEC
  # define PAGE_KERNEL_EXEC PAGE_KERNEL
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1749
1750
  /**
   *	vmalloc_exec  -  allocate virtually contiguous, executable memory
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1751
1752
1753
1754
1755
1756
   *	@size:		allocation size
   *
   *	Kernel-internal function to allocate enough pages to cover @size
   *	the page level allocator and map them into contiguous and
   *	executable kernel virtual space.
   *
c1c8897f8   Michael Opdenacker   Spelling fix: "co...
1757
   *	For tight control over page level allocator and protection flags
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1758
1759
   *	use __vmalloc() instead.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1760
1761
  void *vmalloc_exec(unsigned long size)
  {
2dca6999e   David Miller   mm, perf_event: M...
1762
  	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1763
  			      NUMA_NO_NODE, __builtin_return_address(0));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1764
  }
0d08e0d3a   Andi Kleen   [PATCH] x86-64: F...
1765
  #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
7ac674f52   Benjamin Herrenschmidt   vmalloc_32 should...
1766
  #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
0d08e0d3a   Andi Kleen   [PATCH] x86-64: F...
1767
  #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
7ac674f52   Benjamin Herrenschmidt   vmalloc_32 should...
1768
  #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
0d08e0d3a   Andi Kleen   [PATCH] x86-64: F...
1769
1770
1771
  #else
  #define GFP_VMALLOC32 GFP_KERNEL
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1772
1773
  /**
   *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1774
1775
1776
1777
1778
1779
1780
   *	@size:		allocation size
   *
   *	Allocate enough 32bit PA addressable pages to cover @size from the
   *	page level allocator and map them into contiguous kernel virtual space.
   */
  void *vmalloc_32(unsigned long size)
  {
2dca6999e   David Miller   mm, perf_event: M...
1781
  	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1782
  			      NUMA_NO_NODE, __builtin_return_address(0));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1783
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1784
  EXPORT_SYMBOL(vmalloc_32);
833423143   Nick Piggin   [PATCH] mm: intro...
1785
  /**
ead04089b   Rolf Eike Beer   [PATCH] Fix kerne...
1786
   * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
833423143   Nick Piggin   [PATCH] mm: intro...
1787
   *	@size:		allocation size
ead04089b   Rolf Eike Beer   [PATCH] Fix kerne...
1788
1789
1790
   *
   * The resulting memory area is 32bit addressable and zeroed so it can be
   * mapped to userspace without leaking data.
833423143   Nick Piggin   [PATCH] mm: intro...
1791
1792
1793
1794
1795
   */
  void *vmalloc_32_user(unsigned long size)
  {
  	struct vm_struct *area;
  	void *ret;
2dca6999e   David Miller   mm, perf_event: M...
1796
  	ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
00ef2d2f8   David Rientjes   mm: use NUMA_NO_NODE
1797
  			     NUMA_NO_NODE, __builtin_return_address(0));
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1798
  	if (ret) {
db64fe022   Nick Piggin   mm: rewrite vmap ...
1799
  		area = find_vm_area(ret);
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1800
  		area->flags |= VM_USERMAP;
2b4ac44e7   Eric Dumazet   [PATCH] vmalloc: ...
1801
  	}
833423143   Nick Piggin   [PATCH] mm: intro...
1802
1803
1804
  	return ret;
  }
  EXPORT_SYMBOL(vmalloc_32_user);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
  /*
   * small helper routine , copy contents to buf from addr.
   * If the page is not present, fill zero.
   */
  
  static int aligned_vread(char *buf, char *addr, unsigned long count)
  {
  	struct page *p;
  	int copied = 0;
  
  	while (count) {
  		unsigned long offset, length;
  
  		offset = (unsigned long)addr & ~PAGE_MASK;
  		length = PAGE_SIZE - offset;
  		if (length > count)
  			length = count;
  		p = vmalloc_to_page(addr);
  		/*
  		 * To do safe access to this _mapped_ area, we need
  		 * lock. But adding lock here means that we need to add
  		 * overhead of vmalloc()/vfree() calles for this _debug_
  		 * interface, rarely used. Instead of that, we'll use
  		 * kmap() and get small overhead in this access function.
  		 */
  		if (p) {
  			/*
  			 * we can expect USER0 is not used (see vread/vwrite's
  			 * function description)
  			 */
9b04c5fec   Cong Wang   mm: remove the se...
1835
  			void *map = kmap_atomic(p);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1836
  			memcpy(buf, map + offset, length);
9b04c5fec   Cong Wang   mm: remove the se...
1837
  			kunmap_atomic(map);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
  		} else
  			memset(buf, 0, length);
  
  		addr += length;
  		buf += length;
  		copied += length;
  		count -= length;
  	}
  	return copied;
  }
  
  static int aligned_vwrite(char *buf, char *addr, unsigned long count)
  {
  	struct page *p;
  	int copied = 0;
  
  	while (count) {
  		unsigned long offset, length;
  
  		offset = (unsigned long)addr & ~PAGE_MASK;
  		length = PAGE_SIZE - offset;
  		if (length > count)
  			length = count;
  		p = vmalloc_to_page(addr);
  		/*
  		 * To do safe access to this _mapped_ area, we need
  		 * lock. But adding lock here means that we need to add
  		 * overhead of vmalloc()/vfree() calles for this _debug_
  		 * interface, rarely used. Instead of that, we'll use
  		 * kmap() and get small overhead in this access function.
  		 */
  		if (p) {
  			/*
  			 * we can expect USER0 is not used (see vread/vwrite's
  			 * function description)
  			 */
9b04c5fec   Cong Wang   mm: remove the se...
1874
  			void *map = kmap_atomic(p);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1875
  			memcpy(map + offset, buf, length);
9b04c5fec   Cong Wang   mm: remove the se...
1876
  			kunmap_atomic(map);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
  		}
  		addr += length;
  		buf += length;
  		copied += length;
  		count -= length;
  	}
  	return copied;
  }
  
  /**
   *	vread() -  read vmalloc area in a safe way.
   *	@buf:		buffer for reading data
   *	@addr:		vm address.
   *	@count:		number of bytes to be read.
   *
   *	Returns # of bytes which addr and buf should be increased.
   *	(same number to @count). Returns 0 if [addr...addr+count) doesn't
   *	includes any intersect with alive vmalloc area.
   *
   *	This function checks that addr is a valid vmalloc'ed area, and
   *	copy data from that area to a given buffer. If the given memory range
   *	of [addr...addr+count) includes some valid address, data is copied to
   *	proper area of @buf. If there are memory holes, they'll be zero-filled.
   *	IOREMAP area is treated as memory hole and no copy is done.
   *
   *	If [addr...addr+count) doesn't includes any intersects with alive
a8e5202d0   Cong Wang   vmalloc: remove K...
1903
   *	vm_struct area, returns 0. @buf should be kernel's buffer.
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1904
1905
1906
1907
1908
1909
1910
   *
   *	Note: In usual ops, vread() is never necessary because the caller
   *	should know vmalloc() area is valid and can use memcpy().
   *	This is for routines which have to access vmalloc area without
   *	any informaion, as /dev/kmem.
   *
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1911
1912
  long vread(char *buf, char *addr, unsigned long count)
  {
e81ce85f9   Joonsoo Kim   mm, vmalloc: iter...
1913
1914
  	struct vmap_area *va;
  	struct vm_struct *vm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1915
  	char *vaddr, *buf_start = buf;
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1916
  	unsigned long buflen = count;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1917
1918
1919
1920
1921
  	unsigned long n;
  
  	/* Don't allow overflow */
  	if ((unsigned long) addr + count < count)
  		count = -(unsigned long) addr;
e81ce85f9   Joonsoo Kim   mm, vmalloc: iter...
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
  	spin_lock(&vmap_area_lock);
  	list_for_each_entry(va, &vmap_area_list, list) {
  		if (!count)
  			break;
  
  		if (!(va->flags & VM_VM_AREA))
  			continue;
  
  		vm = va->vm;
  		vaddr = (char *) vm->addr;
762216ab4   Wanpeng Li   mm/vmalloc: use w...
1932
  		if (addr >= vaddr + get_vm_area_size(vm))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1933
1934
1935
1936
1937
1938
1939
1940
1941
  			continue;
  		while (addr < vaddr) {
  			if (count == 0)
  				goto finished;
  			*buf = '\0';
  			buf++;
  			addr++;
  			count--;
  		}
762216ab4   Wanpeng Li   mm/vmalloc: use w...
1942
  		n = vaddr + get_vm_area_size(vm) - addr;
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1943
1944
  		if (n > count)
  			n = count;
e81ce85f9   Joonsoo Kim   mm, vmalloc: iter...
1945
  		if (!(vm->flags & VM_IOREMAP))
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1946
1947
1948
1949
1950
1951
  			aligned_vread(buf, addr, n);
  		else /* IOREMAP area is treated as memory hole */
  			memset(buf, 0, n);
  		buf += n;
  		addr += n;
  		count -= n;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1952
1953
  	}
  finished:
e81ce85f9   Joonsoo Kim   mm, vmalloc: iter...
1954
  	spin_unlock(&vmap_area_lock);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1955
1956
1957
1958
1959
1960
1961
1962
  
  	if (buf == buf_start)
  		return 0;
  	/* zero-fill memory holes */
  	if (buf != buf_start + buflen)
  		memset(buf, 0, buflen - (buf - buf_start));
  
  	return buflen;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1963
  }
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
  /**
   *	vwrite() -  write vmalloc area in a safe way.
   *	@buf:		buffer for source data
   *	@addr:		vm address.
   *	@count:		number of bytes to be read.
   *
   *	Returns # of bytes which addr and buf should be incresed.
   *	(same number to @count).
   *	If [addr...addr+count) doesn't includes any intersect with valid
   *	vmalloc area, returns 0.
   *
   *	This function checks that addr is a valid vmalloc'ed area, and
   *	copy data from a buffer to the given addr. If specified range of
   *	[addr...addr+count) includes some valid address, data is copied from
   *	proper area of @buf. If there are memory holes, no copy to hole.
   *	IOREMAP area is treated as memory hole and no copy is done.
   *
   *	If [addr...addr+count) doesn't includes any intersects with alive
a8e5202d0   Cong Wang   vmalloc: remove K...
1982
   *	vm_struct area, returns 0. @buf should be kernel's buffer.
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1983
1984
1985
1986
1987
   *
   *	Note: In usual ops, vwrite() is never necessary because the caller
   *	should know vmalloc() area is valid and can use memcpy().
   *	This is for routines which have to access vmalloc area without
   *	any informaion, as /dev/kmem.
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1988
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1989
1990
  long vwrite(char *buf, char *addr, unsigned long count)
  {
e81ce85f9   Joonsoo Kim   mm, vmalloc: iter...
1991
1992
  	struct vmap_area *va;
  	struct vm_struct *vm;
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
1993
1994
1995
  	char *vaddr;
  	unsigned long n, buflen;
  	int copied = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1996
1997
1998
1999
  
  	/* Don't allow overflow */
  	if ((unsigned long) addr + count < count)
  		count = -(unsigned long) addr;
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
2000
  	buflen = count;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2001

e81ce85f9   Joonsoo Kim   mm, vmalloc: iter...
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
  	spin_lock(&vmap_area_lock);
  	list_for_each_entry(va, &vmap_area_list, list) {
  		if (!count)
  			break;
  
  		if (!(va->flags & VM_VM_AREA))
  			continue;
  
  		vm = va->vm;
  		vaddr = (char *) vm->addr;
762216ab4   Wanpeng Li   mm/vmalloc: use w...
2012
  		if (addr >= vaddr + get_vm_area_size(vm))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2013
2014
2015
2016
2017
2018
2019
2020
  			continue;
  		while (addr < vaddr) {
  			if (count == 0)
  				goto finished;
  			buf++;
  			addr++;
  			count--;
  		}
762216ab4   Wanpeng Li   mm/vmalloc: use w...
2021
  		n = vaddr + get_vm_area_size(vm) - addr;
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
2022
2023
  		if (n > count)
  			n = count;
e81ce85f9   Joonsoo Kim   mm, vmalloc: iter...
2024
  		if (!(vm->flags & VM_IOREMAP)) {
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
2025
2026
2027
2028
2029
2030
  			aligned_vwrite(buf, addr, n);
  			copied++;
  		}
  		buf += n;
  		addr += n;
  		count -= n;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2031
2032
  	}
  finished:
e81ce85f9   Joonsoo Kim   mm, vmalloc: iter...
2033
  	spin_unlock(&vmap_area_lock);
d0107eb07   KAMEZAWA Hiroyuki   kcore: fix vread/...
2034
2035
2036
  	if (!copied)
  		return 0;
  	return buflen;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2037
  }
833423143   Nick Piggin   [PATCH] mm: intro...
2038
2039
  
  /**
e69e9d4ae   HATAYAMA Daisuke   vmalloc: introduc...
2040
2041
2042
2043
2044
   *	remap_vmalloc_range_partial  -  map vmalloc pages to userspace
   *	@vma:		vma to cover
   *	@uaddr:		target user address to start at
   *	@kaddr:		virtual address of vmalloc kernel memory
   *	@size:		size of map area
7682486b3   Randy Dunlap   mm: fix various k...
2045
2046
   *
   *	Returns:	0 for success, -Exxx on failure
833423143   Nick Piggin   [PATCH] mm: intro...
2047
   *
e69e9d4ae   HATAYAMA Daisuke   vmalloc: introduc...
2048
2049
2050
2051
   *	This function checks that @kaddr is a valid vmalloc'ed area,
   *	and that it is big enough to cover the range starting at
   *	@uaddr in @vma. Will return failure if that criteria isn't
   *	met.
833423143   Nick Piggin   [PATCH] mm: intro...
2052
   *
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
2053
   *	Similar to remap_pfn_range() (see mm/memory.c)
833423143   Nick Piggin   [PATCH] mm: intro...
2054
   */
e69e9d4ae   HATAYAMA Daisuke   vmalloc: introduc...
2055
2056
  int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
  				void *kaddr, unsigned long size)
833423143   Nick Piggin   [PATCH] mm: intro...
2057
2058
  {
  	struct vm_struct *area;
833423143   Nick Piggin   [PATCH] mm: intro...
2059

e69e9d4ae   HATAYAMA Daisuke   vmalloc: introduc...
2060
2061
2062
  	size = PAGE_ALIGN(size);
  
  	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
833423143   Nick Piggin   [PATCH] mm: intro...
2063
  		return -EINVAL;
e69e9d4ae   HATAYAMA Daisuke   vmalloc: introduc...
2064
  	area = find_vm_area(kaddr);
833423143   Nick Piggin   [PATCH] mm: intro...
2065
  	if (!area)
db64fe022   Nick Piggin   mm: rewrite vmap ...
2066
  		return -EINVAL;
833423143   Nick Piggin   [PATCH] mm: intro...
2067
2068
  
  	if (!(area->flags & VM_USERMAP))
db64fe022   Nick Piggin   mm: rewrite vmap ...
2069
  		return -EINVAL;
833423143   Nick Piggin   [PATCH] mm: intro...
2070

e69e9d4ae   HATAYAMA Daisuke   vmalloc: introduc...
2071
  	if (kaddr + size > area->addr + area->size)
db64fe022   Nick Piggin   mm: rewrite vmap ...
2072
  		return -EINVAL;
833423143   Nick Piggin   [PATCH] mm: intro...
2073

833423143   Nick Piggin   [PATCH] mm: intro...
2074
  	do {
e69e9d4ae   HATAYAMA Daisuke   vmalloc: introduc...
2075
  		struct page *page = vmalloc_to_page(kaddr);
db64fe022   Nick Piggin   mm: rewrite vmap ...
2076
  		int ret;
833423143   Nick Piggin   [PATCH] mm: intro...
2077
2078
2079
2080
2081
  		ret = vm_insert_page(vma, uaddr, page);
  		if (ret)
  			return ret;
  
  		uaddr += PAGE_SIZE;
e69e9d4ae   HATAYAMA Daisuke   vmalloc: introduc...
2082
2083
2084
  		kaddr += PAGE_SIZE;
  		size -= PAGE_SIZE;
  	} while (size > 0);
833423143   Nick Piggin   [PATCH] mm: intro...
2085

314e51b98   Konstantin Khlebnikov   mm: kill vma flag...
2086
  	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
833423143   Nick Piggin   [PATCH] mm: intro...
2087

db64fe022   Nick Piggin   mm: rewrite vmap ...
2088
  	return 0;
833423143   Nick Piggin   [PATCH] mm: intro...
2089
  }
e69e9d4ae   HATAYAMA Daisuke   vmalloc: introduc...
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
  EXPORT_SYMBOL(remap_vmalloc_range_partial);
  
  /**
   *	remap_vmalloc_range  -  map vmalloc pages to userspace
   *	@vma:		vma to cover (map full range of vma)
   *	@addr:		vmalloc memory
   *	@pgoff:		number of pages into addr before first page to map
   *
   *	Returns:	0 for success, -Exxx on failure
   *
   *	This function checks that addr is a valid vmalloc'ed area, and
   *	that it is big enough to cover the vma. Will return failure if
   *	that criteria isn't met.
   *
   *	Similar to remap_pfn_range() (see mm/memory.c)
   */
  int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
  						unsigned long pgoff)
  {
  	return remap_vmalloc_range_partial(vma, vma->vm_start,
  					   addr + (pgoff << PAGE_SHIFT),
  					   vma->vm_end - vma->vm_start);
  }
833423143   Nick Piggin   [PATCH] mm: intro...
2113
  EXPORT_SYMBOL(remap_vmalloc_range);
1eeb66a1b   Christoph Hellwig   move die notifier...
2114
2115
2116
2117
  /*
   * Implement a stub for vmalloc_sync_all() if the architecture chose not to
   * have one.
   */
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
2118
  void __weak vmalloc_sync_all(void)
1eeb66a1b   Christoph Hellwig   move die notifier...
2119
2120
  {
  }
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2121

2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
2122
  static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2123
  {
cd12909cb   David Vrabel   xen: map foreign ...
2124
2125
2126
2127
2128
2129
  	pte_t ***p = data;
  
  	if (p) {
  		*(*p) = pte;
  		(*p)++;
  	}
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2130
2131
2132
2133
2134
2135
  	return 0;
  }
  
  /**
   *	alloc_vm_area - allocate a range of kernel address space
   *	@size:		size of the area
cd12909cb   David Vrabel   xen: map foreign ...
2136
   *	@ptes:		returns the PTEs for the address space
7682486b3   Randy Dunlap   mm: fix various k...
2137
2138
   *
   *	Returns:	NULL on failure, vm_struct on success
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2139
2140
2141
   *
   *	This function reserves a range of kernel address space, and
   *	allocates pagetables to map that range.  No actual mappings
cd12909cb   David Vrabel   xen: map foreign ...
2142
2143
2144
2145
   *	are created.
   *
   *	If @ptes is non-NULL, pointers to the PTEs (in init_mm)
   *	allocated for the VM area are returned.
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2146
   */
cd12909cb   David Vrabel   xen: map foreign ...
2147
  struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2148
2149
  {
  	struct vm_struct *area;
230169693   Christoph Lameter   vmallocinfo: add ...
2150
2151
  	area = get_vm_area_caller(size, VM_IOREMAP,
  				__builtin_return_address(0));
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2152
2153
2154
2155
2156
2157
2158
2159
  	if (area == NULL)
  		return NULL;
  
  	/*
  	 * This ensures that page tables are constructed for this region
  	 * of kernel virtual address space and mapped into init_mm.
  	 */
  	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
cd12909cb   David Vrabel   xen: map foreign ...
2160
  				size, f, ptes ? &ptes : NULL)) {
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2161
2162
2163
  		free_vm_area(area);
  		return NULL;
  	}
5f4352fbf   Jeremy Fitzhardinge   Allocate and free...
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
  	return area;
  }
  EXPORT_SYMBOL_GPL(alloc_vm_area);
  
  void free_vm_area(struct vm_struct *area)
  {
  	struct vm_struct *ret;
  	ret = remove_vm_area(area->addr);
  	BUG_ON(ret != area);
  	kfree(area);
  }
  EXPORT_SYMBOL_GPL(free_vm_area);
a10aa5798   Christoph Lameter   vmalloc: show vma...
2176

4f8b02b4e   Tejun Heo   vmalloc: pcpu_get...
2177
  #ifdef CONFIG_SMP
ca23e405e   Tejun Heo   vmalloc: implemen...
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
  static struct vmap_area *node_to_va(struct rb_node *n)
  {
  	return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
  }
  
  /**
   * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
   * @end: target address
   * @pnext: out arg for the next vmap_area
   * @pprev: out arg for the previous vmap_area
   *
   * Returns: %true if either or both of next and prev are found,
   *	    %false if no vmap_area exists
   *
   * Find vmap_areas end addresses of which enclose @end.  ie. if not
   * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
   */
  static bool pvm_find_next_prev(unsigned long end,
  			       struct vmap_area **pnext,
  			       struct vmap_area **pprev)
  {
  	struct rb_node *n = vmap_area_root.rb_node;
  	struct vmap_area *va = NULL;
  
  	while (n) {
  		va = rb_entry(n, struct vmap_area, rb_node);
  		if (end < va->va_end)
  			n = n->rb_left;
  		else if (end > va->va_end)
  			n = n->rb_right;
  		else
  			break;
  	}
  
  	if (!va)
  		return false;
  
  	if (va->va_end > end) {
  		*pnext = va;
  		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
  	} else {
  		*pprev = va;
  		*pnext = node_to_va(rb_next(&(*pprev)->rb_node));
  	}
  	return true;
  }
  
  /**
   * pvm_determine_end - find the highest aligned address between two vmap_areas
   * @pnext: in/out arg for the next vmap_area
   * @pprev: in/out arg for the previous vmap_area
   * @align: alignment
   *
   * Returns: determined end address
   *
   * Find the highest aligned address between *@pnext and *@pprev below
   * VMALLOC_END.  *@pnext and *@pprev are adjusted so that the aligned
   * down address is between the end addresses of the two vmap_areas.
   *
   * Please note that the address returned by this function may fall
   * inside *@pnext vmap_area.  The caller is responsible for checking
   * that.
   */
  static unsigned long pvm_determine_end(struct vmap_area **pnext,
  				       struct vmap_area **pprev,
  				       unsigned long align)
  {
  	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
  	unsigned long addr;
  
  	if (*pnext)
  		addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
  	else
  		addr = vmalloc_end;
  
  	while (*pprev && (*pprev)->va_end > addr) {
  		*pnext = *pprev;
  		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
  	}
  
  	return addr;
  }
  
  /**
   * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
   * @offsets: array containing offset of each area
   * @sizes: array containing size of each area
   * @nr_vms: the number of areas to allocate
   * @align: alignment, all entries in @offsets and @sizes must be aligned to this
ca23e405e   Tejun Heo   vmalloc: implemen...
2267
2268
2269
2270
2271
2272
   *
   * Returns: kmalloc'd vm_struct pointer array pointing to allocated
   *	    vm_structs on success, %NULL on failure
   *
   * Percpu allocator wants to use congruent vm areas so that it can
   * maintain the offsets among percpu areas.  This function allocates
ec3f64fc9   David Rientjes   mm: remove gfp ma...
2273
2274
2275
2276
   * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
   * be scattered pretty far, distance between two areas easily going up
   * to gigabytes.  To avoid interacting with regular vmallocs, these
   * areas are allocated from top.
ca23e405e   Tejun Heo   vmalloc: implemen...
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
   *
   * Despite its complicated look, this allocator is rather simple.  It
   * does everything top-down and scans areas from the end looking for
   * matching slot.  While scanning, if any of the areas overlaps with
   * existing vmap_area, the base address is pulled down to fit the
   * area.  Scanning is repeated till all the areas fit and then all
   * necessary data structres are inserted and the result is returned.
   */
  struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
  				     const size_t *sizes, int nr_vms,
ec3f64fc9   David Rientjes   mm: remove gfp ma...
2287
  				     size_t align)
ca23e405e   Tejun Heo   vmalloc: implemen...
2288
2289
2290
2291
2292
2293
2294
2295
  {
  	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
  	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
  	struct vmap_area **vas, *prev, *next;
  	struct vm_struct **vms;
  	int area, area2, last_area, term_area;
  	unsigned long base, start, end, last_end;
  	bool purged = false;
ca23e405e   Tejun Heo   vmalloc: implemen...
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
  	/* verify parameters and allocate data structures */
  	BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
  	for (last_area = 0, area = 0; area < nr_vms; area++) {
  		start = offsets[area];
  		end = start + sizes[area];
  
  		/* is everything aligned properly? */
  		BUG_ON(!IS_ALIGNED(offsets[area], align));
  		BUG_ON(!IS_ALIGNED(sizes[area], align));
  
  		/* detect the area with the highest address */
  		if (start > offsets[last_area])
  			last_area = area;
  
  		for (area2 = 0; area2 < nr_vms; area2++) {
  			unsigned long start2 = offsets[area2];
  			unsigned long end2 = start2 + sizes[area2];
  
  			if (area2 == area)
  				continue;
  
  			BUG_ON(start2 >= start && start2 < end);
  			BUG_ON(end2 <= end && end2 > start);
  		}
  	}
  	last_end = offsets[last_area] + sizes[last_area];
  
  	if (vmalloc_end - vmalloc_start < last_end) {
  		WARN_ON(true);
  		return NULL;
  	}
4d67d8605   Thomas Meyer   mm: use kcalloc()...
2327
2328
  	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
  	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
ca23e405e   Tejun Heo   vmalloc: implemen...
2329
  	if (!vas || !vms)
f1db7afd9   Kautuk Consul   mm/vmalloc.c: eli...
2330
  		goto err_free2;
ca23e405e   Tejun Heo   vmalloc: implemen...
2331
2332
  
  	for (area = 0; area < nr_vms; area++) {
ec3f64fc9   David Rientjes   mm: remove gfp ma...
2333
2334
  		vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
  		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
ca23e405e   Tejun Heo   vmalloc: implemen...
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
  		if (!vas[area] || !vms[area])
  			goto err_free;
  	}
  retry:
  	spin_lock(&vmap_area_lock);
  
  	/* start scanning - we scan from the top, begin with the last area */
  	area = term_area = last_area;
  	start = offsets[area];
  	end = start + sizes[area];
  
  	if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
  		base = vmalloc_end - last_end;
  		goto found;
  	}
  	base = pvm_determine_end(&next, &prev, align) - end;
  
  	while (true) {
  		BUG_ON(next && next->va_end <= base + end);
  		BUG_ON(prev && prev->va_end > base + end);
  
  		/*
  		 * base might have underflowed, add last_end before
  		 * comparing.
  		 */
  		if (base + last_end < vmalloc_start + last_end) {
  			spin_unlock(&vmap_area_lock);
  			if (!purged) {
  				purge_vmap_area_lazy();
  				purged = true;
  				goto retry;
  			}
  			goto err_free;
  		}
  
  		/*
  		 * If next overlaps, move base downwards so that it's
  		 * right below next and then recheck.
  		 */
  		if (next && next->va_start < base + end) {
  			base = pvm_determine_end(&next, &prev, align) - end;
  			term_area = area;
  			continue;
  		}
  
  		/*
  		 * If prev overlaps, shift down next and prev and move
  		 * base so that it's right below new next and then
  		 * recheck.
  		 */
  		if (prev && prev->va_end > base + start)  {
  			next = prev;
  			prev = node_to_va(rb_prev(&next->rb_node));
  			base = pvm_determine_end(&next, &prev, align) - end;
  			term_area = area;
  			continue;
  		}
  
  		/*
  		 * This area fits, move on to the previous one.  If
  		 * the previous one is the terminal one, we're done.
  		 */
  		area = (area + nr_vms - 1) % nr_vms;
  		if (area == term_area)
  			break;
  		start = offsets[area];
  		end = start + sizes[area];
  		pvm_find_next_prev(base + end, &next, &prev);
  	}
  found:
  	/* we've found a fitting base, insert all va's */
  	for (area = 0; area < nr_vms; area++) {
  		struct vmap_area *va = vas[area];
  
  		va->va_start = base + offsets[area];
  		va->va_end = va->va_start + sizes[area];
  		__insert_vmap_area(va);
  	}
  
  	vmap_area_pcpu_hole = base + offsets[last_area];
  
  	spin_unlock(&vmap_area_lock);
  
  	/* insert all vm's */
  	for (area = 0; area < nr_vms; area++)
3645cb4a4   Zhang Yanfei   mm, vmalloc: call...
2420
2421
  		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
  				 pcpu_get_vm_areas);
ca23e405e   Tejun Heo   vmalloc: implemen...
2422
2423
2424
2425
2426
2427
  
  	kfree(vas);
  	return vms;
  
  err_free:
  	for (area = 0; area < nr_vms; area++) {
f1db7afd9   Kautuk Consul   mm/vmalloc.c: eli...
2428
2429
  		kfree(vas[area]);
  		kfree(vms[area]);
ca23e405e   Tejun Heo   vmalloc: implemen...
2430
  	}
f1db7afd9   Kautuk Consul   mm/vmalloc.c: eli...
2431
  err_free2:
ca23e405e   Tejun Heo   vmalloc: implemen...
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
  	kfree(vas);
  	kfree(vms);
  	return NULL;
  }
  
  /**
   * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
   * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
   * @nr_vms: the number of allocated areas
   *
   * Free vm_structs and the array allocated by pcpu_get_vm_areas().
   */
  void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
  {
  	int i;
  
  	for (i = 0; i < nr_vms; i++)
  		free_vm_area(vms[i]);
  	kfree(vms);
  }
4f8b02b4e   Tejun Heo   vmalloc: pcpu_get...
2452
  #endif	/* CONFIG_SMP */
a10aa5798   Christoph Lameter   vmalloc: show vma...
2453
2454
2455
  
  #ifdef CONFIG_PROC_FS
  static void *s_start(struct seq_file *m, loff_t *pos)
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2456
  	__acquires(&vmap_area_lock)
a10aa5798   Christoph Lameter   vmalloc: show vma...
2457
2458
  {
  	loff_t n = *pos;
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2459
  	struct vmap_area *va;
a10aa5798   Christoph Lameter   vmalloc: show vma...
2460

d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2461
2462
2463
  	spin_lock(&vmap_area_lock);
  	va = list_entry((&vmap_area_list)->next, typeof(*va), list);
  	while (n > 0 && &va->list != &vmap_area_list) {
a10aa5798   Christoph Lameter   vmalloc: show vma...
2464
  		n--;
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2465
  		va = list_entry(va->list.next, typeof(*va), list);
a10aa5798   Christoph Lameter   vmalloc: show vma...
2466
  	}
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2467
2468
  	if (!n && &va->list != &vmap_area_list)
  		return va;
a10aa5798   Christoph Lameter   vmalloc: show vma...
2469
2470
2471
2472
2473
2474
2475
  
  	return NULL;
  
  }
  
  static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  {
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2476
  	struct vmap_area *va = p, *next;
a10aa5798   Christoph Lameter   vmalloc: show vma...
2477
2478
  
  	++*pos;
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2479
2480
2481
2482
2483
  	next = list_entry(va->list.next, typeof(*va), list);
  	if (&next->list != &vmap_area_list)
  		return next;
  
  	return NULL;
a10aa5798   Christoph Lameter   vmalloc: show vma...
2484
2485
2486
  }
  
  static void s_stop(struct seq_file *m, void *p)
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2487
  	__releases(&vmap_area_lock)
a10aa5798   Christoph Lameter   vmalloc: show vma...
2488
  {
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2489
  	spin_unlock(&vmap_area_lock);
a10aa5798   Christoph Lameter   vmalloc: show vma...
2490
  }
a47a126ad   Eric Dumazet   vmallocinfo: add ...
2491
2492
  static void show_numa_info(struct seq_file *m, struct vm_struct *v)
  {
e5adfffc8   Kirill A. Shutemov   mm: use IS_ENABLE...
2493
  	if (IS_ENABLED(CONFIG_NUMA)) {
a47a126ad   Eric Dumazet   vmallocinfo: add ...
2494
2495
2496
2497
  		unsigned int nr, *counters = m->private;
  
  		if (!counters)
  			return;
af12346cd   Wanpeng Li   mm/vmalloc: rever...
2498
2499
  		if (v->flags & VM_UNINITIALIZED)
  			return;
7e5b528b4   Dmitry Vyukov   mm/vmalloc.c: fix...
2500
2501
  		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
  		smp_rmb();
af12346cd   Wanpeng Li   mm/vmalloc: rever...
2502

a47a126ad   Eric Dumazet   vmallocinfo: add ...
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
  		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
  
  		for (nr = 0; nr < v->nr_pages; nr++)
  			counters[page_to_nid(v->pages[nr])]++;
  
  		for_each_node_state(nr, N_HIGH_MEMORY)
  			if (counters[nr])
  				seq_printf(m, " N%u=%u", nr, counters[nr]);
  	}
  }
a10aa5798   Christoph Lameter   vmalloc: show vma...
2513
2514
  static int s_show(struct seq_file *m, void *p)
  {
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2515
2516
  	struct vmap_area *va = p;
  	struct vm_struct *v;
c2ce8c142   Wanpeng Li   mm/vmalloc: fix s...
2517
2518
2519
2520
2521
  	/*
  	 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
  	 * behalf of vmap area is being tear down or vm_map_ram allocation.
  	 */
  	if (!(va->flags & VM_VM_AREA))
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2522
  		return 0;
d4033afdf   Joonsoo Kim   mm, vmalloc: iter...
2523
2524
  
  	v = va->vm;
a10aa5798   Christoph Lameter   vmalloc: show vma...
2525

45ec16908   Kees Cook   mm: use %pK for /...
2526
  	seq_printf(m, "0x%pK-0x%pK %7ld",
a10aa5798   Christoph Lameter   vmalloc: show vma...
2527
  		v->addr, v->addr + v->size, v->size);
62c70bce8   Joe Perches   mm: convert sprin...
2528
2529
  	if (v->caller)
  		seq_printf(m, " %pS", v->caller);
230169693   Christoph Lameter   vmallocinfo: add ...
2530

a10aa5798   Christoph Lameter   vmalloc: show vma...
2531
2532
2533
2534
  	if (v->nr_pages)
  		seq_printf(m, " pages=%d", v->nr_pages);
  
  	if (v->phys_addr)
ffa71f33a   Kenji Kaneshige   x86, ioremap: Fix...
2535
  		seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
a10aa5798   Christoph Lameter   vmalloc: show vma...
2536
2537
  
  	if (v->flags & VM_IOREMAP)
f4527c908   Fabian Frederick   mm/vmalloc.c: rep...
2538
  		seq_puts(m, " ioremap");
a10aa5798   Christoph Lameter   vmalloc: show vma...
2539
2540
  
  	if (v->flags & VM_ALLOC)
f4527c908   Fabian Frederick   mm/vmalloc.c: rep...
2541
  		seq_puts(m, " vmalloc");
a10aa5798   Christoph Lameter   vmalloc: show vma...
2542
2543
  
  	if (v->flags & VM_MAP)
f4527c908   Fabian Frederick   mm/vmalloc.c: rep...
2544
  		seq_puts(m, " vmap");
a10aa5798   Christoph Lameter   vmalloc: show vma...
2545
2546
  
  	if (v->flags & VM_USERMAP)
f4527c908   Fabian Frederick   mm/vmalloc.c: rep...
2547
  		seq_puts(m, " user");
a10aa5798   Christoph Lameter   vmalloc: show vma...
2548
2549
  
  	if (v->flags & VM_VPAGES)
f4527c908   Fabian Frederick   mm/vmalloc.c: rep...
2550
  		seq_puts(m, " vpages");
a10aa5798   Christoph Lameter   vmalloc: show vma...
2551

a47a126ad   Eric Dumazet   vmallocinfo: add ...
2552
  	show_numa_info(m, v);
a10aa5798   Christoph Lameter   vmalloc: show vma...
2553
2554
2555
2556
  	seq_putc(m, '
  ');
  	return 0;
  }
5f6a6a9c4   Alexey Dobriyan   proc: move /proc/...
2557
  static const struct seq_operations vmalloc_op = {
a10aa5798   Christoph Lameter   vmalloc: show vma...
2558
2559
2560
2561
2562
  	.start = s_start,
  	.next = s_next,
  	.stop = s_stop,
  	.show = s_show,
  };
5f6a6a9c4   Alexey Dobriyan   proc: move /proc/...
2563
2564
2565
  
  static int vmalloc_open(struct inode *inode, struct file *file)
  {
703394c10   Rob Jones   mm/vmalloc.c: use...
2566
2567
2568
2569
2570
  	if (IS_ENABLED(CONFIG_NUMA))
  		return seq_open_private(file, &vmalloc_op,
  					nr_node_ids * sizeof(unsigned int));
  	else
  		return seq_open(file, &vmalloc_op);
5f6a6a9c4   Alexey Dobriyan   proc: move /proc/...
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
  }
  
  static const struct file_operations proc_vmalloc_operations = {
  	.open		= vmalloc_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release_private,
  };
  
  static int __init proc_vmalloc_init(void)
  {
  	proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
  	return 0;
  }
  module_init(proc_vmalloc_init);
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2586
2587
2588
  
  void get_vmalloc_info(struct vmalloc_info *vmi)
  {
f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2589
  	struct vmap_area *va;
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2590
2591
2592
2593
  	unsigned long free_area_size;
  	unsigned long prev_end;
  
  	vmi->used = 0;
f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2594
  	vmi->largest_chunk = 0;
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2595

f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2596
  	prev_end = VMALLOC_START;
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2597

474750aba   Joonsoo Kim   vmalloc: use rcu ...
2598
  	rcu_read_lock();
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2599

f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2600
2601
2602
2603
  	if (list_empty(&vmap_area_list)) {
  		vmi->largest_chunk = VMALLOC_TOTAL;
  		goto out;
  	}
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2604

474750aba   Joonsoo Kim   vmalloc: use rcu ...
2605
  	list_for_each_entry_rcu(va, &vmap_area_list, list) {
f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2606
  		unsigned long addr = va->va_start;
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2607

f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2608
2609
2610
2611
2612
2613
2614
  		/*
  		 * Some archs keep another range for modules in vmalloc space
  		 */
  		if (addr < VMALLOC_START)
  			continue;
  		if (addr >= VMALLOC_END)
  			break;
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2615

f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2616
2617
  		if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
  			continue;
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2618

f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2619
  		vmi->used += (va->va_end - va->va_start);
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2620

f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2621
2622
2623
  		free_area_size = addr - prev_end;
  		if (vmi->largest_chunk < free_area_size)
  			vmi->largest_chunk = free_area_size;
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2624

f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2625
  		prev_end = va->va_end;
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2626
  	}
f98782ddd   Joonsoo Kim   mm, vmalloc: iter...
2627
2628
2629
2630
2631
  
  	if (VMALLOC_END - prev_end > vmi->largest_chunk)
  		vmi->largest_chunk = VMALLOC_END - prev_end;
  
  out:
474750aba   Joonsoo Kim   vmalloc: use rcu ...
2632
  	rcu_read_unlock();
db3808c1b   Joonsoo Kim   mm, vmalloc: move...
2633
  }
a10aa5798   Christoph Lameter   vmalloc: show vma...
2634
  #endif