Blame view

drivers/xen/balloon.c 16.2 KB
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
1
  /******************************************************************************
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
2
3
4
5
6
   * Xen balloon driver - enables returning/claiming memory to/from Xen.
   *
   * Copyright (c) 2003, B Dragovic
   * Copyright (c) 2003-2004, M Williamson, K Fraser
   * Copyright (c) 2005 Dan M. Smith, IBM Corporation
080e2be78   Daniel Kiper   xen/balloon: memo...
7
8
9
10
11
12
   * Copyright (c) 2010 Daniel Kiper
   *
   * Memory hotplug support was written by Daniel Kiper. Work on
   * it was sponsored by Google under Google Summer of Code 2010
   * program. Jeremy Fitzhardinge from Citrix was the mentor for
   * this project.
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License version 2
   * as published by the Free Software Foundation; or, when distributed
   * separately from the Linux kernel or incorporated into other
   * software packages, subject to the following license:
   *
   * Permission is hereby granted, free of charge, to any person obtaining a copy
   * of this source file (the "Software"), to deal in the Software without
   * restriction, including without limitation the rights to use, copy, modify,
   * merge, publish, distribute, sublicense, and/or sell copies of the Software,
   * and to permit persons to whom the Software is furnished to do so, subject to
   * the following conditions:
   *
   * The above copyright notice and this permission notice shall be included in
   * all copies or substantial portions of the Software.
   *
   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
   * IN THE SOFTWARE.
   */
  
  #include <linux/kernel.h>
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
40
41
  #include <linux/sched.h>
  #include <linux/errno.h>
72ee5112a   Paul Gortmaker   xen: Add module.h...
42
  #include <linux/module.h>
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
43
44
45
46
47
  #include <linux/mm.h>
  #include <linux/bootmem.h>
  #include <linux/pagemap.h>
  #include <linux/highmem.h>
  #include <linux/mutex.h>
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
48
  #include <linux/list.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
49
  #include <linux/gfp.h>
080e2be78   Daniel Kiper   xen/balloon: memo...
50
51
52
  #include <linux/notifier.h>
  #include <linux/memory.h>
  #include <linux/memory_hotplug.h>
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
53

1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
54
55
56
  #include <asm/page.h>
  #include <asm/pgalloc.h>
  #include <asm/pgtable.h>
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
57
  #include <asm/tlb.h>
66946f676   Jeremy Fitzhardinge   xen/balloon: make...
58
  #include <asm/e820.h>
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
59

ecbf29cdb   Jeremy Fitzhardinge   xen: clean up asm...
60
61
  #include <asm/xen/hypervisor.h>
  #include <asm/xen/hypercall.h>
1ccbf5344   Jeremy Fitzhardinge   xen: move Xen-tes...
62
63
  
  #include <xen/xen.h>
ecbf29cdb   Jeremy Fitzhardinge   xen: clean up asm...
64
  #include <xen/interface/xen.h>
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
65
  #include <xen/interface/memory.h>
803eb047a   Daniel De Graaf   xen-balloon: Move...
66
  #include <xen/balloon.h>
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
67
68
  #include <xen/features.h>
  #include <xen/page.h>
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
69
70
71
72
73
74
75
  /*
   * balloon_process() state:
   *
   * BP_DONE: done or nothing to do,
   * BP_EAGAIN: error, go to sleep,
   * BP_ECANCELED: error, balloon operation canceled.
   */
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
76

95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
77
78
79
80
  enum bp_state {
  	BP_DONE,
  	BP_EAGAIN,
  	BP_ECANCELED
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
81
  };
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
82

1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
83
  static DEFINE_MUTEX(balloon_mutex);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
84

803eb047a   Daniel De Graaf   xen-balloon: Move...
85
86
  struct balloon_stats balloon_stats;
  EXPORT_SYMBOL_GPL(balloon_stats);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
87
88
89
  
  /* We increase/decrease in batches which fit in a page */
  static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
90
  #ifdef CONFIG_HIGHMEM
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
91
92
93
  #define inc_totalhigh_pages() (totalhigh_pages++)
  #define dec_totalhigh_pages() (totalhigh_pages--)
  #else
e882dc9c8   Ruslan Pisarev   Xen: fix whitespa...
94
95
  #define inc_totalhigh_pages() do {} while (0)
  #define dec_totalhigh_pages() do {} while (0)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
96
97
98
99
100
101
102
  #endif
  
  /* List of ballooned pages, threaded through the mem_map array. */
  static LIST_HEAD(ballooned_pages);
  
  /* Main work function, always executed in process context. */
  static void balloon_process(struct work_struct *work);
95170b2e2   Daniel Kiper   xen/balloon: Migr...
103
  static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
104
105
106
107
108
109
110
111
112
  
  /* When ballooning out (allocating memory to return to Xen) we don't really
     want the kernel to try too hard since that can trigger the oom killer. */
  #define GFP_BALLOON \
  	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
  
  static void scrub_page(struct page *page)
  {
  #ifdef CONFIG_XEN_SCRUB_PAGES
26a3e9916   Jeremy Fitzhardinge   xen: fix scrub_pa...
113
  	clear_highpage(page);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
114
115
116
117
  #endif
  }
  
  /* balloon_append: add the given page to the balloon. */
9be4d4575   Jeremy Fitzhardinge   xen: add extra pa...
118
  static void __balloon_append(struct page *page)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
119
120
121
122
123
  {
  	/* Lowmem is re-populated first, so highmem pages go at list tail. */
  	if (PageHighMem(page)) {
  		list_add_tail(&page->lru, &ballooned_pages);
  		balloon_stats.balloon_high++;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
124
125
126
127
  	} else {
  		list_add(&page->lru, &ballooned_pages);
  		balloon_stats.balloon_low++;
  	}
9be4d4575   Jeremy Fitzhardinge   xen: add extra pa...
128
  }
3d65c9488   Gianluca Guida   Xen balloon: fix ...
129

9be4d4575   Jeremy Fitzhardinge   xen: add extra pa...
130
131
132
  static void balloon_append(struct page *page)
  {
  	__balloon_append(page);
09ca132a8   Daniel Kiper   xen/balloon: Move...
133
134
  	if (PageHighMem(page))
  		dec_totalhigh_pages();
3d65c9488   Gianluca Guida   Xen balloon: fix ...
135
  	totalram_pages--;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
136
137
138
  }
  
  /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
139
  static struct page *balloon_retrieve(bool prefer_highmem)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
140
141
142
143
144
  {
  	struct page *page;
  
  	if (list_empty(&ballooned_pages))
  		return NULL;
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
145
146
147
148
  	if (prefer_highmem)
  		page = list_entry(ballooned_pages.prev, struct page, lru);
  	else
  		page = list_entry(ballooned_pages.next, struct page, lru);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
149
150
151
152
153
  	list_del(&page->lru);
  
  	if (PageHighMem(page)) {
  		balloon_stats.balloon_high--;
  		inc_totalhigh_pages();
e882dc9c8   Ruslan Pisarev   Xen: fix whitespa...
154
  	} else
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
155
  		balloon_stats.balloon_low--;
3d65c9488   Gianluca Guida   Xen balloon: fix ...
156
  	totalram_pages++;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
  	return page;
  }
  
  static struct page *balloon_first_page(void)
  {
  	if (list_empty(&ballooned_pages))
  		return NULL;
  	return list_entry(ballooned_pages.next, struct page, lru);
  }
  
  static struct page *balloon_next_page(struct page *page)
  {
  	struct list_head *next = page->lru.next;
  	if (next == &ballooned_pages)
  		return NULL;
  	return list_entry(next, struct page, lru);
  }
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
174
  static enum bp_state update_schedule(enum bp_state state)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
175
  {
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
176
177
178
179
180
  	if (state == BP_DONE) {
  		balloon_stats.schedule_delay = 1;
  		balloon_stats.retry_count = 1;
  		return BP_DONE;
  	}
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
181
182
183
184
  	++balloon_stats.retry_count;
  
  	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
  			balloon_stats.retry_count > balloon_stats.max_retry_count) {
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
185
186
187
188
189
190
191
192
193
194
195
  		balloon_stats.schedule_delay = 1;
  		balloon_stats.retry_count = 1;
  		return BP_ECANCELED;
  	}
  
  	balloon_stats.schedule_delay <<= 1;
  
  	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
  		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
  
  	return BP_EAGAIN;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
196
  }
080e2be78   Daniel Kiper   xen/balloon: memo...
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
  #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
  static long current_credit(void)
  {
  	return balloon_stats.target_pages - balloon_stats.current_pages -
  		balloon_stats.hotplug_pages;
  }
  
  static bool balloon_is_inflated(void)
  {
  	if (balloon_stats.balloon_low || balloon_stats.balloon_high ||
  			balloon_stats.balloon_hotplug)
  		return true;
  	else
  		return false;
  }
  
  /*
   * reserve_additional_memory() adds memory region of size >= credit above
   * max_pfn. New region is section aligned and size is modified to be multiple
   * of section size. Those features allow optimal use of address space and
   * establish proper alignment when this function is called first time after
   * boot (last section not fully populated at boot time contains unused memory
   * pages with PG_reserved bit not set; online_pages_range() does not allow page
   * onlining in whole range if first onlined page does not have PG_reserved
   * bit set). Real size of added memory is established at page onlining stage.
   */
  
  static enum bp_state reserve_additional_memory(long credit)
  {
  	int nid, rc;
  	u64 hotplug_start_paddr;
  	unsigned long balloon_hotplug = credit;
  
  	hotplug_start_paddr = PFN_PHYS(SECTION_ALIGN_UP(max_pfn));
  	balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
  	nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
  
  	rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
  
  	if (rc) {
  		pr_info("xen_balloon: %s: add_memory() failed: %i
  ", __func__, rc);
  		return BP_EAGAIN;
  	}
  
  	balloon_hotplug -= credit;
  
  	balloon_stats.hotplug_pages += credit;
  	balloon_stats.balloon_hotplug = balloon_hotplug;
  
  	return BP_DONE;
  }
  
  static void xen_online_page(struct page *page)
  {
  	__online_page_set_limits(page);
  
  	mutex_lock(&balloon_mutex);
  
  	__balloon_append(page);
  
  	if (balloon_stats.hotplug_pages)
  		--balloon_stats.hotplug_pages;
  	else
  		--balloon_stats.balloon_hotplug;
  
  	mutex_unlock(&balloon_mutex);
  }
  
  static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
  {
  	if (val == MEM_ONLINE)
  		schedule_delayed_work(&balloon_worker, 0);
  
  	return NOTIFY_OK;
  }
  
  static struct notifier_block xen_memory_nb = {
  	.notifier_call = xen_memory_notifier,
  	.priority = 0
  };
  #else
83be7e52d   Daniel Kiper   xen/balloon: Clar...
279
  static long current_credit(void)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
280
  {
bc2c03032   Ian Campbell   xen: try harder t...
281
  	unsigned long target = balloon_stats.target_pages;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
282
283
284
285
286
  
  	target = min(target,
  		     balloon_stats.current_pages +
  		     balloon_stats.balloon_low +
  		     balloon_stats.balloon_high);
83be7e52d   Daniel Kiper   xen/balloon: Clar...
287
  	return target - balloon_stats.current_pages;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
288
  }
080e2be78   Daniel Kiper   xen/balloon: memo...
289
290
291
292
293
294
295
296
297
298
299
300
301
302
  static bool balloon_is_inflated(void)
  {
  	if (balloon_stats.balloon_low || balloon_stats.balloon_high)
  		return true;
  	else
  		return false;
  }
  
  static enum bp_state reserve_additional_memory(long credit)
  {
  	balloon_stats.target_pages = balloon_stats.current_pages;
  	return BP_DONE;
  }
  #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
303
  static enum bp_state increase_reservation(unsigned long nr_pages)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
304
  {
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
305
  	int rc;
2f70e0acd   Jeremy Fitzhardinge   xen/balloon: the ...
306
  	unsigned long  pfn, i;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
307
  	struct page   *page;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
308
309
310
311
312
  	struct xen_memory_reservation reservation = {
  		.address_bits = 0,
  		.extent_order = 0,
  		.domid        = DOMID_SELF
  	};
080e2be78   Daniel Kiper   xen/balloon: memo...
313
314
315
316
317
318
319
320
  #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
  	if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) {
  		nr_pages = min(nr_pages, balloon_stats.balloon_hotplug);
  		balloon_stats.hotplug_pages += nr_pages;
  		balloon_stats.balloon_hotplug -= nr_pages;
  		return BP_DONE;
  	}
  #endif
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
321
322
  	if (nr_pages > ARRAY_SIZE(frame_list))
  		nr_pages = ARRAY_SIZE(frame_list);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
323
324
  	page = balloon_first_page();
  	for (i = 0; i < nr_pages; i++) {
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
325
326
327
328
  		if (!page) {
  			nr_pages = i;
  			break;
  		}
a419aef8b   Joe Perches   trivial: remove u...
329
  		frame_list[i] = page_to_pfn(page);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
330
331
  		page = balloon_next_page(page);
  	}
a90971ebd   Isaku Yamahata   xen: compilation ...
332
  	set_xen_guest_handle(reservation.extent_start, frame_list);
fde28e8f4   Jeremy Fitzhardinge   xen-balloon: clea...
333
334
  	reservation.nr_extents = nr_pages;
  	rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
40095de1f   Konrad Rzeszutek Wilk   xen/balloon: Remo...
335
  	if (rc <= 0)
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
336
  		return BP_EAGAIN;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
337

bc2c03032   Ian Campbell   xen: try harder t...
338
  	for (i = 0; i < rc; i++) {
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
339
  		page = balloon_retrieve(false);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
340
341
342
343
344
345
346
347
348
  		BUG_ON(page == NULL);
  
  		pfn = page_to_pfn(page);
  		BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
  		       phys_to_machine_mapping_valid(pfn));
  
  		set_phys_to_machine(pfn, frame_list[i]);
  
  		/* Link back into the page tables if not highmem. */
4dfe22f5f   Daniel Kiper   xen/balloon: Simp...
349
  		if (xen_pv_domain() && !PageHighMem(page)) {
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
350
351
352
353
354
355
356
357
358
359
360
361
362
  			int ret;
  			ret = HYPERVISOR_update_va_mapping(
  				(unsigned long)__va(pfn << PAGE_SHIFT),
  				mfn_pte(frame_list[i], PAGE_KERNEL),
  				0);
  			BUG_ON(ret);
  		}
  
  		/* Relinquish the page back to the allocator. */
  		ClearPageReserved(page);
  		init_page_count(page);
  		__free_page(page);
  	}
bc2c03032   Ian Campbell   xen: try harder t...
363
  	balloon_stats.current_pages += rc;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
364

95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
365
  	return BP_DONE;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
366
  }
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
367
  static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
368
  {
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
369
  	enum bp_state state = BP_DONE;
2f70e0acd   Jeremy Fitzhardinge   xen/balloon: the ...
370
  	unsigned long  pfn, i;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
371
  	struct page   *page;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
372
373
374
375
376
377
  	int ret;
  	struct xen_memory_reservation reservation = {
  		.address_bits = 0,
  		.extent_order = 0,
  		.domid        = DOMID_SELF
  	};
080e2be78   Daniel Kiper   xen/balloon: memo...
378
379
380
381
382
383
384
385
  #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
  	if (balloon_stats.hotplug_pages) {
  		nr_pages = min(nr_pages, balloon_stats.hotplug_pages);
  		balloon_stats.hotplug_pages -= nr_pages;
  		balloon_stats.balloon_hotplug += nr_pages;
  		return BP_DONE;
  	}
  #endif
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
386
387
388
389
  	if (nr_pages > ARRAY_SIZE(frame_list))
  		nr_pages = ARRAY_SIZE(frame_list);
  
  	for (i = 0; i < nr_pages; i++) {
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
390
  		if ((page = alloc_page(gfp)) == NULL) {
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
391
  			nr_pages = i;
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
392
  			state = BP_EAGAIN;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
393
394
395
396
397
398
399
  			break;
  		}
  
  		pfn = page_to_pfn(page);
  		frame_list[i] = pfn_to_mfn(pfn);
  
  		scrub_page(page);
1058a75f0   Dan Magenheimer   xen: actually rel...
400

4dfe22f5f   Daniel Kiper   xen/balloon: Simp...
401
  		if (xen_pv_domain() && !PageHighMem(page)) {
ff4ce8c33   Ian Campbell   xen: handle highm...
402
403
404
405
  			ret = HYPERVISOR_update_va_mapping(
  				(unsigned long)__va(pfn << PAGE_SHIFT),
  				__pte_ma(0), 0);
  			BUG_ON(ret);
e882dc9c8   Ruslan Pisarev   Xen: fix whitespa...
406
  		}
ff4ce8c33   Ian Campbell   xen: handle highm...
407

1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
408
409
410
411
412
  	}
  
  	/* Ensure that ballooned highmem pages don't have kmaps. */
  	kmap_flush_unused();
  	flush_tlb_all();
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
413
414
415
  	/* No more mappings: invalidate P2M and add to balloon. */
  	for (i = 0; i < nr_pages; i++) {
  		pfn = mfn_to_pfn(frame_list[i]);
6eaa412f2   Konrad Rzeszutek Wilk   xen: Mark all ini...
416
  		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
417
418
  		balloon_append(pfn_to_page(pfn));
  	}
a90971ebd   Isaku Yamahata   xen: compilation ...
419
  	set_xen_guest_handle(reservation.extent_start, frame_list);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
420
421
422
423
424
  	reservation.nr_extents   = nr_pages;
  	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
  	BUG_ON(ret != nr_pages);
  
  	balloon_stats.current_pages -= nr_pages;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
425

95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
426
  	return state;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
427
428
429
430
431
432
433
434
435
436
  }
  
  /*
   * We avoid multiple worker processes conflicting via the balloon mutex.
   * We may of course race updates of the target counts (which are protected
   * by the balloon lock), or with changes to the Xen hard limit, but we will
   * recover from these in time.
   */
  static void balloon_process(struct work_struct *work)
  {
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
437
  	enum bp_state state = BP_DONE;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
438
439
440
441
442
  	long credit;
  
  	mutex_lock(&balloon_mutex);
  
  	do {
83be7e52d   Daniel Kiper   xen/balloon: Clar...
443
  		credit = current_credit();
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
444

080e2be78   Daniel Kiper   xen/balloon: memo...
445
446
447
448
449
450
  		if (credit > 0) {
  			if (balloon_is_inflated())
  				state = increase_reservation(credit);
  			else
  				state = reserve_additional_memory(credit);
  		}
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
451

1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
452
  		if (credit < 0)
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
453
  			state = decrease_reservation(-credit, GFP_BALLOON);
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
454
455
  
  		state = update_schedule(state);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
456
457
458
459
460
  
  #ifndef CONFIG_PREEMPT
  		if (need_resched())
  			schedule();
  #endif
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
461
  	} while (credit && state == BP_DONE);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
462
463
  
  	/* Schedule more work if there is some still to be done. */
95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
464
465
  	if (state == BP_EAGAIN)
  		schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
466
467
468
469
470
  
  	mutex_unlock(&balloon_mutex);
  }
  
  /* Resets the Xen limit, sets new target, and kicks off processing. */
803eb047a   Daniel De Graaf   xen-balloon: Move...
471
  void balloon_set_new_target(unsigned long target)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
472
473
  {
  	/* No need for lock. Not read-modify-write updates. */
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
474
  	balloon_stats.target_pages = target;
95170b2e2   Daniel Kiper   xen/balloon: Migr...
475
  	schedule_delayed_work(&balloon_worker, 0);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
476
  }
803eb047a   Daniel De Graaf   xen-balloon: Move...
477
  EXPORT_SYMBOL_GPL(balloon_set_new_target);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
478

b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
479
480
481
482
  /**
   * alloc_xenballooned_pages - get pages that have been ballooned out
   * @nr_pages: Number of pages to get
   * @pages: pages returned
72e9cf2ab   Daniel De Graaf   xen/balloon: Avoi...
483
   * @highmem: allow highmem pages
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
484
485
   * @return 0 on success, error otherwise
   */
693394b8c   Stefano Stabellini   xen: add an "high...
486
  int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
487
  {
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
488
  	int pgno = 0;
e882dc9c8   Ruslan Pisarev   Xen: fix whitespa...
489
  	struct page *page;
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
490
491
  	mutex_lock(&balloon_mutex);
  	while (pgno < nr_pages) {
693394b8c   Stefano Stabellini   xen: add an "high...
492
  		page = balloon_retrieve(highmem);
72e9cf2ab   Daniel De Graaf   xen/balloon: Avoi...
493
  		if (page && (highmem || !PageHighMem(page))) {
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
494
495
496
  			pages[pgno++] = page;
  		} else {
  			enum bp_state st;
693394b8c   Stefano Stabellini   xen: add an "high...
497
498
499
500
  			if (page)
  				balloon_append(page);
  			st = decrease_reservation(nr_pages - pgno,
  					highmem ? GFP_HIGHUSER : GFP_USER);
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
501
502
503
  			if (st != BP_DONE)
  				goto out_undo;
  		}
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
504
  	}
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
505
506
507
508
509
510
511
512
513
  	mutex_unlock(&balloon_mutex);
  	return 0;
   out_undo:
  	while (pgno)
  		balloon_append(pages[--pgno]);
  	/* Free the memory back to the kernel soon */
  	schedule_delayed_work(&balloon_worker, 0);
  	mutex_unlock(&balloon_mutex);
  	return -ENOMEM;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
514
  }
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
515
  EXPORT_SYMBOL(alloc_xenballooned_pages);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
516

b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
517
518
519
520
521
  /**
   * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
   * @nr_pages: Number of pages
   * @pages: pages to return
   */
e882dc9c8   Ruslan Pisarev   Xen: fix whitespa...
522
  void free_xenballooned_pages(int nr_pages, struct page **pages)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
523
  {
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
524
  	int i;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
525

b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
526
  	mutex_lock(&balloon_mutex);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
527

b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
528
529
530
531
532
533
  	for (i = 0; i < nr_pages; i++) {
  		if (pages[i])
  			balloon_append(pages[i]);
  	}
  
  	/* The balloon may be too large now. Shrink it if needed. */
83be7e52d   Daniel Kiper   xen/balloon: Clar...
534
  	if (current_credit())
b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
535
  		schedule_delayed_work(&balloon_worker, 0);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
536

b6f306798   Konrad Rzeszutek Wilk   xen-balloon: Add ...
537
538
539
  	mutex_unlock(&balloon_mutex);
  }
  EXPORT_SYMBOL(free_xenballooned_pages);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
540

8b5d44a5a   David Vrabel   xen: allow balloo...
541
542
  static void __init balloon_add_region(unsigned long start_pfn,
  				      unsigned long pages)
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
543
  {
4dfe22f5f   Daniel Kiper   xen/balloon: Simp...
544
  	unsigned long pfn, extra_pfn_end;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
545
  	struct page *page;
8b5d44a5a   David Vrabel   xen: allow balloo...
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
  	/*
  	 * If the amount of usable memory has been limited (e.g., with
  	 * the 'mem' command line parameter), don't add pages beyond
  	 * this limit.
  	 */
  	extra_pfn_end = min(max_pfn, start_pfn + pages);
  
  	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
  		page = pfn_to_page(pfn);
  		/* totalram_pages and totalhigh_pages do not
  		   include the boot-time balloon extension, so
  		   don't subtract from it. */
  		__balloon_append(page);
  	}
  }
  
  static int __init balloon_init(void)
  {
  	int i;
53d5522ca   Stefano Stabellini   xen: make the bal...
565
  	if (!xen_domain())
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
566
  		return -ENODEV;
803eb047a   Daniel De Graaf   xen-balloon: Move...
567
568
  	pr_info("xen/balloon: Initialising balloon driver.
  ");
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
569

aa24411b6   David Vrabel   xen/balloon: acco...
570
571
572
  	balloon_stats.current_pages = xen_pv_domain()
  		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
  		: max_pfn;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
573
574
575
  	balloon_stats.target_pages  = balloon_stats.current_pages;
  	balloon_stats.balloon_low   = 0;
  	balloon_stats.balloon_high  = 0;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
576

95d2ac4a0   Daniel Kiper   xen/balloon: Prot...
577
578
579
  	balloon_stats.schedule_delay = 1;
  	balloon_stats.max_schedule_delay = 32;
  	balloon_stats.retry_count = 1;
40095de1f   Konrad Rzeszutek Wilk   xen/balloon: Remo...
580
  	balloon_stats.max_retry_count = RETRY_UNLIMITED;
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
581

080e2be78   Daniel Kiper   xen/balloon: memo...
582
583
584
585
586
587
588
  #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
  	balloon_stats.hotplug_pages = 0;
  	balloon_stats.balloon_hotplug = 0;
  
  	set_online_page_callback(&xen_online_page);
  	register_memory_notifier(&xen_memory_nb);
  #endif
2a4c92fa2   Jeremy Fitzhardinge   xen: prevent cras...
589
  	/*
b1cbf9b1d   David Vrabel   xen/balloon: simp...
590
  	 * Initialize the balloon with pages from the extra memory
8b5d44a5a   David Vrabel   xen: allow balloo...
591
  	 * regions (see arch/x86/xen/setup.c).
2a4c92fa2   Jeremy Fitzhardinge   xen: prevent cras...
592
  	 */
8b5d44a5a   David Vrabel   xen: allow balloo...
593
594
595
596
  	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
  		if (xen_extra_mem[i].size)
  			balloon_add_region(PFN_UP(xen_extra_mem[i].start),
  					   PFN_DOWN(xen_extra_mem[i].size));
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
597

1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
598
599
600
601
  	return 0;
  }
  
  subsys_initcall(balloon_init);
1775826ce   Jeremy Fitzhardinge   xen: add balloon ...
602
  MODULE_LICENSE("GPL");