Blame view

mm/zsmalloc.c 28.7 KB
61989a80f   Nitin Gupta   staging: zsmalloc...
1
2
3
4
  /*
   * zsmalloc memory allocator
   *
   * Copyright (C) 2011  Nitin Gupta
31fc00bb7   Minchan Kim   zsmalloc: add cop...
5
   * Copyright (C) 2012, 2013 Minchan Kim
61989a80f   Nitin Gupta   staging: zsmalloc...
6
7
8
9
10
11
12
   *
   * This code is released using a dual license strategy: BSD/GPL
   * You can choose the license that better fits your requirements.
   *
   * Released under the terms of 3-clause BSD License
   * Released under the terms of GNU General Public License Version 2.0
   */
2db51dae5   Nitin Gupta   staging: zsmalloc...
13
  /*
c3e3e88ad   Nitin Cupta   zsmalloc: add mor...
14
15
16
17
18
19
20
   * This allocator is designed for use with zram. Thus, the allocator is
   * supposed to work well under low memory conditions. In particular, it
   * never attempts higher order page allocation which is very likely to
   * fail under memory pressure. On the other hand, if we just use single
   * (0-order) pages, it would suffer from very high fragmentation --
   * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
   * This was one of the major issues with its predecessor (xvmalloc).
2db51dae5   Nitin Gupta   staging: zsmalloc...
21
22
23
24
25
26
27
   *
   * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
   * and links them together using various 'struct page' fields. These linked
   * pages act as a single higher-order page i.e. an object can span 0-order
   * page boundaries. The code refers to these linked pages as a single entity
   * called zspage.
   *
c3e3e88ad   Nitin Cupta   zsmalloc: add mor...
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
   * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
   * since this satisfies the requirements of all its current users (in the
   * worst case, page is incompressible and is thus stored "as-is" i.e. in
   * uncompressed form). For allocation requests larger than this size, failure
   * is returned (see zs_malloc).
   *
   * Additionally, zs_malloc() does not return a dereferenceable pointer.
   * Instead, it returns an opaque handle (unsigned long) which encodes actual
   * location of the allocated object. The reason for this indirection is that
   * zsmalloc does not keep zspages permanently mapped since that would cause
   * issues on 32-bit systems where the VA region for kernel space mappings
   * is very small. So, before using the allocating memory, the object has to
   * be mapped using zs_map_object() to get a usable pointer and subsequently
   * unmapped using zs_unmap_object().
   *
2db51dae5   Nitin Gupta   staging: zsmalloc...
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
   * Following is how we use various fields and flags of underlying
   * struct page(s) to form a zspage.
   *
   * Usage of struct page fields:
   *	page->first_page: points to the first component (0-order) page
   *	page->index (union with page->freelist): offset of the first object
   *		starting in this page. For the first page, this is
   *		always 0, so we use this field (aka freelist) to point
   *		to the first free object in zspage.
   *	page->lru: links together all component pages (except the first page)
   *		of a zspage
   *
   *	For _first_ page only:
   *
   *	page->private (union with page->first_page): refers to the
   *		component page after the first page
   *	page->freelist: points to the first free object in zspage.
   *		Free objects are linked together using in-place
   *		metadata.
   *	page->objects: maximum number of objects we can store in this
   *		zspage (class->zspage_order * PAGE_SIZE / class->size)
   *	page->lru: links together first pages of various zspages.
   *		Basically forming list of zspages in a fullness group.
   *	page->mapping: class index and fullness group of the zspage
   *
   * Usage of struct page flags:
   *	PG_private: identifies the first component page
   *	PG_private2: identifies the last component page
   *
   */
61989a80f   Nitin Gupta   staging: zsmalloc...
73
74
75
76
77
78
79
80
81
  #ifdef CONFIG_ZSMALLOC_DEBUG
  #define DEBUG
  #endif
  
  #include <linux/module.h>
  #include <linux/kernel.h>
  #include <linux/bitops.h>
  #include <linux/errno.h>
  #include <linux/highmem.h>
61989a80f   Nitin Gupta   staging: zsmalloc...
82
83
84
85
86
87
  #include <linux/string.h>
  #include <linux/slab.h>
  #include <asm/tlbflush.h>
  #include <asm/pgtable.h>
  #include <linux/cpumask.h>
  #include <linux/cpu.h>
0cbb613fa   Seth Jennings   staging: fix powe...
88
  #include <linux/vmalloc.h>
c60369f01   Seth Jennings   staging: zsmalloc...
89
  #include <linux/hardirq.h>
0959c63f1   Seth Jennings   zsmalloc: collaps...
90
91
  #include <linux/spinlock.h>
  #include <linux/types.h>
bcf1647d0   Minchan Kim   zsmalloc: move it...
92
  #include <linux/zsmalloc.h>
0959c63f1   Seth Jennings   zsmalloc: collaps...
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
  
  /*
   * This must be power of 2 and greater than of equal to sizeof(link_free).
   * These two conditions ensure that any 'struct link_free' itself doesn't
   * span more than 1 page which avoids complex case of mapping 2 pages simply
   * to restore link_free pointer values.
   */
  #define ZS_ALIGN		8
  
  /*
   * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
   * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
   */
  #define ZS_MAX_ZSPAGE_ORDER 2
  #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
  
  /*
   * Object location (<PFN>, <obj_idx>) is encoded as
c3e3e88ad   Nitin Cupta   zsmalloc: add mor...
111
   * as single (unsigned long) handle value.
0959c63f1   Seth Jennings   zsmalloc: collaps...
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
   *
   * Note that object index <obj_idx> is relative to system
   * page <PFN> it is stored in, so for each sub-page belonging
   * to a zspage, obj_idx starts with 0.
   *
   * This is made more complicated by various memory models and PAE.
   */
  
  #ifndef MAX_PHYSMEM_BITS
  #ifdef CONFIG_HIGHMEM64G
  #define MAX_PHYSMEM_BITS 36
  #else /* !CONFIG_HIGHMEM64G */
  /*
   * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
   * be PAGE_SHIFT
   */
  #define MAX_PHYSMEM_BITS BITS_PER_LONG
  #endif
  #endif
  #define _PFN_BITS		(MAX_PHYSMEM_BITS - PAGE_SHIFT)
  #define OBJ_INDEX_BITS	(BITS_PER_LONG - _PFN_BITS)
  #define OBJ_INDEX_MASK	((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
  
  #define MAX(a, b) ((a) >= (b) ? (a) : (b))
  /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
  #define ZS_MIN_ALLOC_SIZE \
  	MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
  #define ZS_MAX_ALLOC_SIZE	PAGE_SIZE
  
  /*
7eb52512a   Weijie Yang   zsmalloc: fixup t...
142
   * On systems with 4K page size, this gives 255 size classes! There is a
0959c63f1   Seth Jennings   zsmalloc: collaps...
143
144
145
146
147
148
149
150
151
152
153
   * trader-off here:
   *  - Large number of size classes is potentially wasteful as free page are
   *    spread across these classes
   *  - Small number of size classes causes large internal fragmentation
   *  - Probably its better to use specific size classes (empirically
   *    determined). NOTE: all those class sizes must be set as multiple of
   *    ZS_ALIGN to make sure link_free itself never has to span 2 pages.
   *
   *  ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
   *  (reason above)
   */
d662b8eba   Seth Jennings   staging: zsmalloc...
154
  #define ZS_SIZE_CLASS_DELTA	(PAGE_SIZE >> 8)
0959c63f1   Seth Jennings   zsmalloc: collaps...
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
  #define ZS_SIZE_CLASSES		((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
  					ZS_SIZE_CLASS_DELTA + 1)
  
  /*
   * We do not maintain any list for completely empty or full pages
   */
  enum fullness_group {
  	ZS_ALMOST_FULL,
  	ZS_ALMOST_EMPTY,
  	_ZS_NR_FULLNESS_GROUPS,
  
  	ZS_EMPTY,
  	ZS_FULL
  };
  
  /*
   * We assign a page to ZS_ALMOST_EMPTY fullness group when:
   *	n <= N / f, where
   * n = number of allocated objects
   * N = total number of objects zspage can store
   * f = 1/fullness_threshold_frac
   *
   * Similarly, we assign zspage to:
   *	ZS_ALMOST_FULL	when n > N / f
   *	ZS_EMPTY	when n == 0
   *	ZS_FULL		when n == N
   *
   * (see: fix_fullness_group())
   */
  static const int fullness_threshold_frac = 4;
  
  struct size_class {
  	/*
  	 * Size of objects stored in this class. Must be multiple
  	 * of ZS_ALIGN.
  	 */
  	int size;
  	unsigned int index;
  
  	/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
  	int pages_per_zspage;
  
  	spinlock_t lock;
  
  	/* stats */
  	u64 pages_allocated;
  
  	struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
  };
  
  /*
   * Placed within free objects to form a singly linked list.
   * For every zspage, first_page->freelist gives head of this list.
   *
   * This must be power of 2 and less than or equal to ZS_ALIGN
   */
  struct link_free {
  	/* Handle of next free chunk (encodes <PFN, obj_idx>) */
  	void *next;
  };
  
  struct zs_pool {
  	struct size_class size_class[ZS_SIZE_CLASSES];
  
  	gfp_t flags;	/* allocation flags used when growing pool */
0959c63f1   Seth Jennings   zsmalloc: collaps...
220
  };
61989a80f   Nitin Gupta   staging: zsmalloc...
221
222
223
224
225
226
227
228
229
  
  /*
   * A zspage's class index and fullness group
   * are encoded in its (first)page->mapping
   */
  #define CLASS_IDX_BITS	28
  #define FULLNESS_BITS	4
  #define CLASS_IDX_MASK	((1 << CLASS_IDX_BITS) - 1)
  #define FULLNESS_MASK	((1 << FULLNESS_BITS) - 1)
f553646a6   Seth Jennings   staging: zsmalloc...
230
  struct mapping_area {
1b945aeef   Minchan Kim   zsmalloc: add Kco...
231
  #ifdef CONFIG_PGTABLE_MAPPING
f553646a6   Seth Jennings   staging: zsmalloc...
232
233
234
235
236
237
238
  	struct vm_struct *vm; /* vm area for mapping object that span pages */
  #else
  	char *vm_buf; /* copy buffer for objects that span pages */
  #endif
  	char *vm_addr; /* address of kmap_atomic()'ed pages */
  	enum zs_mapmode vm_mm; /* mapping mode */
  };
61989a80f   Nitin Gupta   staging: zsmalloc...
239
240
241
242
243
  /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
  static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
  
  static int is_first_page(struct page *page)
  {
a27545bf0   Minchan Kim   zsmalloc: use Pag...
244
  	return PagePrivate(page);
61989a80f   Nitin Gupta   staging: zsmalloc...
245
246
247
248
  }
  
  static int is_last_page(struct page *page)
  {
a27545bf0   Minchan Kim   zsmalloc: use Pag...
249
  	return PagePrivate2(page);
61989a80f   Nitin Gupta   staging: zsmalloc...
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
  }
  
  static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
  				enum fullness_group *fullness)
  {
  	unsigned long m;
  	BUG_ON(!is_first_page(page));
  
  	m = (unsigned long)page->mapping;
  	*fullness = m & FULLNESS_MASK;
  	*class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
  }
  
  static void set_zspage_mapping(struct page *page, unsigned int class_idx,
  				enum fullness_group fullness)
  {
  	unsigned long m;
  	BUG_ON(!is_first_page(page));
  
  	m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
  			(fullness & FULLNESS_MASK);
  	page->mapping = (struct address_space *)m;
  }
c3e3e88ad   Nitin Cupta   zsmalloc: add mor...
273
274
275
276
277
278
279
  /*
   * zsmalloc divides the pool into various size classes where each
   * class maintains a list of zspages where each zspage is divided
   * into equal sized chunks. Each allocation falls into one of these
   * classes depending on its size. This function returns index of the
   * size class which has chunk size big enough to hold the give size.
   */
61989a80f   Nitin Gupta   staging: zsmalloc...
280
281
282
283
284
285
286
287
288
289
  static int get_size_class_index(int size)
  {
  	int idx = 0;
  
  	if (likely(size > ZS_MIN_ALLOC_SIZE))
  		idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
  				ZS_SIZE_CLASS_DELTA);
  
  	return idx;
  }
c3e3e88ad   Nitin Cupta   zsmalloc: add mor...
290
291
292
293
294
295
296
  /*
   * For each size class, zspages are divided into different groups
   * depending on how "full" they are. This was done so that we could
   * easily find empty or nearly empty zspages when we try to shrink
   * the pool (not yet implemented). This function returns fullness
   * status of the given page.
   */
61989a80f   Nitin Gupta   staging: zsmalloc...
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
  static enum fullness_group get_fullness_group(struct page *page)
  {
  	int inuse, max_objects;
  	enum fullness_group fg;
  	BUG_ON(!is_first_page(page));
  
  	inuse = page->inuse;
  	max_objects = page->objects;
  
  	if (inuse == 0)
  		fg = ZS_EMPTY;
  	else if (inuse == max_objects)
  		fg = ZS_FULL;
  	else if (inuse <= max_objects / fullness_threshold_frac)
  		fg = ZS_ALMOST_EMPTY;
  	else
  		fg = ZS_ALMOST_FULL;
  
  	return fg;
  }
c3e3e88ad   Nitin Cupta   zsmalloc: add mor...
317
318
319
320
321
322
  /*
   * Each size class maintains various freelists and zspages are assigned
   * to one of these freelists based on the number of live objects they
   * have. This functions inserts the given zspage into the freelist
   * identified by <class, fullness_group>.
   */
61989a80f   Nitin Gupta   staging: zsmalloc...
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
  static void insert_zspage(struct page *page, struct size_class *class,
  				enum fullness_group fullness)
  {
  	struct page **head;
  
  	BUG_ON(!is_first_page(page));
  
  	if (fullness >= _ZS_NR_FULLNESS_GROUPS)
  		return;
  
  	head = &class->fullness_list[fullness];
  	if (*head)
  		list_add_tail(&page->lru, &(*head)->lru);
  
  	*head = page;
  }
c3e3e88ad   Nitin Cupta   zsmalloc: add mor...
339
340
341
342
  /*
   * This function removes the given zspage from the freelist identified
   * by <class, fullness_group>.
   */
61989a80f   Nitin Gupta   staging: zsmalloc...
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
  static void remove_zspage(struct page *page, struct size_class *class,
  				enum fullness_group fullness)
  {
  	struct page **head;
  
  	BUG_ON(!is_first_page(page));
  
  	if (fullness >= _ZS_NR_FULLNESS_GROUPS)
  		return;
  
  	head = &class->fullness_list[fullness];
  	BUG_ON(!*head);
  	if (list_empty(&(*head)->lru))
  		*head = NULL;
  	else if (*head == page)
  		*head = (struct page *)list_entry((*head)->lru.next,
  					struct page, lru);
  
  	list_del_init(&page->lru);
  }
c3e3e88ad   Nitin Cupta   zsmalloc: add mor...
363
364
365
366
367
368
369
370
371
  /*
   * Each size class maintains zspages in different fullness groups depending
   * on the number of live objects they contain. When allocating or freeing
   * objects, the fullness status of the page can change, say, from ALMOST_FULL
   * to ALMOST_EMPTY when freeing an object. This function checks if such
   * a status change has occurred for the given page and accordingly moves the
   * page from the freelist of the old fullness group to that of the new
   * fullness group.
   */
61989a80f   Nitin Gupta   staging: zsmalloc...
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
  static enum fullness_group fix_fullness_group(struct zs_pool *pool,
  						struct page *page)
  {
  	int class_idx;
  	struct size_class *class;
  	enum fullness_group currfg, newfg;
  
  	BUG_ON(!is_first_page(page));
  
  	get_zspage_mapping(page, &class_idx, &currfg);
  	newfg = get_fullness_group(page);
  	if (newfg == currfg)
  		goto out;
  
  	class = &pool->size_class[class_idx];
  	remove_zspage(page, class, currfg);
  	insert_zspage(page, class, newfg);
  	set_zspage_mapping(page, class_idx, newfg);
  
  out:
  	return newfg;
  }
  
  /*
   * We have to decide on how many pages to link together
   * to form a zspage for each size class. This is important
   * to reduce wastage due to unusable space left at end of
   * each zspage which is given as:
   *	wastage = Zp - Zp % size_class
   * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
   *
   * For example, for size class of 3/8 * PAGE_SIZE, we should
   * link together 3 PAGE_SIZE sized pages to form a zspage
   * since then we can perfectly fit in 8 such objects.
   */
2e3b61547   Minchan Kim   staging: zsmalloc...
407
  static int get_pages_per_zspage(int class_size)
61989a80f   Nitin Gupta   staging: zsmalloc...
408
409
410
411
  {
  	int i, max_usedpc = 0;
  	/* zspage order which gives maximum used size per KB */
  	int max_usedpc_order = 1;
84d4faaba   Seth Jennings   staging: zsmalloc...
412
  	for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
61989a80f   Nitin Gupta   staging: zsmalloc...
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
  		int zspage_size;
  		int waste, usedpc;
  
  		zspage_size = i * PAGE_SIZE;
  		waste = zspage_size % class_size;
  		usedpc = (zspage_size - waste) * 100 / zspage_size;
  
  		if (usedpc > max_usedpc) {
  			max_usedpc = usedpc;
  			max_usedpc_order = i;
  		}
  	}
  
  	return max_usedpc_order;
  }
  
  /*
   * A single 'zspage' is composed of many system pages which are
   * linked together using fields in struct page. This function finds
   * the first/head page, given any component page of a zspage.
   */
  static struct page *get_first_page(struct page *page)
  {
  	if (is_first_page(page))
  		return page;
  	else
  		return page->first_page;
  }
  
  static struct page *get_next_page(struct page *page)
  {
  	struct page *next;
  
  	if (is_last_page(page))
  		next = NULL;
  	else if (is_first_page(page))
e842b976a   Sunghan Suh   staging: zsmalloc...
449
  		next = (struct page *)page_private(page);
61989a80f   Nitin Gupta   staging: zsmalloc...
450
451
452
453
454
  	else
  		next = list_entry(page->lru.next, struct page, lru);
  
  	return next;
  }
67296874e   Olav Haugan   staging: zsmalloc...
455
456
457
458
459
460
  /*
   * Encode <page, obj_idx> as a single handle value.
   * On hardware platforms with physical memory starting at 0x0 the pfn
   * could be 0 so we ensure that the handle will never be 0 by adjusting the
   * encoded obj_idx value before encoding.
   */
61989a80f   Nitin Gupta   staging: zsmalloc...
461
462
463
464
465
466
467
468
469
470
  static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
  {
  	unsigned long handle;
  
  	if (!page) {
  		BUG_ON(obj_idx);
  		return NULL;
  	}
  
  	handle = page_to_pfn(page) << OBJ_INDEX_BITS;
67296874e   Olav Haugan   staging: zsmalloc...
471
  	handle |= ((obj_idx + 1) & OBJ_INDEX_MASK);
61989a80f   Nitin Gupta   staging: zsmalloc...
472
473
474
  
  	return (void *)handle;
  }
67296874e   Olav Haugan   staging: zsmalloc...
475
476
477
478
479
  /*
   * Decode <page, obj_idx> pair from the given object handle. We adjust the
   * decoded obj_idx back to its original value since it was adjusted in
   * obj_location_to_handle().
   */
c23443483   Minchan Kim   staging: zsmalloc...
480
  static void obj_handle_to_location(unsigned long handle, struct page **page,
61989a80f   Nitin Gupta   staging: zsmalloc...
481
482
  				unsigned long *obj_idx)
  {
c23443483   Minchan Kim   staging: zsmalloc...
483
  	*page = pfn_to_page(handle >> OBJ_INDEX_BITS);
67296874e   Olav Haugan   staging: zsmalloc...
484
  	*obj_idx = (handle & OBJ_INDEX_MASK) - 1;
61989a80f   Nitin Gupta   staging: zsmalloc...
485
486
487
488
489
490
491
492
493
494
495
496
  }
  
  static unsigned long obj_idx_to_offset(struct page *page,
  				unsigned long obj_idx, int class_size)
  {
  	unsigned long off = 0;
  
  	if (!is_first_page(page))
  		off = page->index;
  
  	return off + obj_idx * class_size;
  }
f4477e90b   Nitin Gupta   staging: zsmalloc...
497
498
499
500
501
502
503
  static void reset_page(struct page *page)
  {
  	clear_bit(PG_private, &page->flags);
  	clear_bit(PG_private_2, &page->flags);
  	set_page_private(page, 0);
  	page->mapping = NULL;
  	page->freelist = NULL;
22b751c3d   Mel Gorman   mm: rename page s...
504
  	page_mapcount_reset(page);
f4477e90b   Nitin Gupta   staging: zsmalloc...
505
  }
61989a80f   Nitin Gupta   staging: zsmalloc...
506
507
  static void free_zspage(struct page *first_page)
  {
f4477e90b   Nitin Gupta   staging: zsmalloc...
508
  	struct page *nextp, *tmp, *head_extra;
61989a80f   Nitin Gupta   staging: zsmalloc...
509
510
511
  
  	BUG_ON(!is_first_page(first_page));
  	BUG_ON(first_page->inuse);
f4477e90b   Nitin Gupta   staging: zsmalloc...
512
  	head_extra = (struct page *)page_private(first_page);
61989a80f   Nitin Gupta   staging: zsmalloc...
513

f4477e90b   Nitin Gupta   staging: zsmalloc...
514
  	reset_page(first_page);
61989a80f   Nitin Gupta   staging: zsmalloc...
515
516
517
  	__free_page(first_page);
  
  	/* zspage with only 1 system page */
f4477e90b   Nitin Gupta   staging: zsmalloc...
518
  	if (!head_extra)
61989a80f   Nitin Gupta   staging: zsmalloc...
519
  		return;
f4477e90b   Nitin Gupta   staging: zsmalloc...
520
  	list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
61989a80f   Nitin Gupta   staging: zsmalloc...
521
  		list_del(&nextp->lru);
f4477e90b   Nitin Gupta   staging: zsmalloc...
522
  		reset_page(nextp);
61989a80f   Nitin Gupta   staging: zsmalloc...
523
524
  		__free_page(nextp);
  	}
f4477e90b   Nitin Gupta   staging: zsmalloc...
525
526
  	reset_page(head_extra);
  	__free_page(head_extra);
61989a80f   Nitin Gupta   staging: zsmalloc...
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
  }
  
  /* Initialize a newly allocated zspage */
  static void init_zspage(struct page *first_page, struct size_class *class)
  {
  	unsigned long off = 0;
  	struct page *page = first_page;
  
  	BUG_ON(!is_first_page(first_page));
  	while (page) {
  		struct page *next_page;
  		struct link_free *link;
  		unsigned int i, objs_on_page;
  
  		/*
  		 * page->index stores offset of first object starting
  		 * in the page. For the first page, this is always 0,
  		 * so we use first_page->index (aka ->freelist) to store
  		 * head of corresponding zspage's freelist.
  		 */
  		if (page != first_page)
  			page->index = off;
  
  		link = (struct link_free *)kmap_atomic(page) +
  						off / sizeof(*link);
  		objs_on_page = (PAGE_SIZE - off) / class->size;
  
  		for (i = 1; i <= objs_on_page; i++) {
  			off += class->size;
  			if (off < PAGE_SIZE) {
  				link->next = obj_location_to_handle(page, i);
  				link += class->size / sizeof(*link);
  			}
  		}
  
  		/*
  		 * We now come to the last (full or partial) object on this
  		 * page, which must point to the first object on the next
  		 * page (if present)
  		 */
  		next_page = get_next_page(page);
  		link->next = obj_location_to_handle(next_page, 0);
  		kunmap_atomic(link);
  		page = next_page;
  		off = (off + class->size) % PAGE_SIZE;
  	}
  }
  
  /*
   * Allocate a zspage for the given size class
   */
  static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
  {
  	int i, error;
b4b700c5a   Seth Jennings   staging: zsmalloc...
581
  	struct page *first_page = NULL, *uninitialized_var(prev_page);
61989a80f   Nitin Gupta   staging: zsmalloc...
582
583
584
585
586
587
588
589
590
591
592
593
594
  
  	/*
  	 * Allocate individual pages and link them together as:
  	 * 1. first page->private = first sub-page
  	 * 2. all sub-pages are linked together using page->lru
  	 * 3. each sub-page is linked to the first page using page->first_page
  	 *
  	 * For each size class, First/Head pages are linked together using
  	 * page->lru. Also, we set PG_private to identify the first page
  	 * (i.e. no other sub-page has this flag set) and PG_private_2 to
  	 * identify the last page.
  	 */
  	error = -ENOMEM;
2e3b61547   Minchan Kim   staging: zsmalloc...
595
  	for (i = 0; i < class->pages_per_zspage; i++) {
b4b700c5a   Seth Jennings   staging: zsmalloc...
596
  		struct page *page;
61989a80f   Nitin Gupta   staging: zsmalloc...
597
598
599
600
601
602
603
  
  		page = alloc_page(flags);
  		if (!page)
  			goto cleanup;
  
  		INIT_LIST_HEAD(&page->lru);
  		if (i == 0) {	/* first page */
a27545bf0   Minchan Kim   zsmalloc: use Pag...
604
  			SetPagePrivate(page);
61989a80f   Nitin Gupta   staging: zsmalloc...
605
606
607
608
609
  			set_page_private(page, 0);
  			first_page = page;
  			first_page->inuse = 0;
  		}
  		if (i == 1)
e842b976a   Sunghan Suh   staging: zsmalloc...
610
  			set_page_private(first_page, (unsigned long)page);
61989a80f   Nitin Gupta   staging: zsmalloc...
611
612
613
614
  		if (i >= 1)
  			page->first_page = first_page;
  		if (i >= 2)
  			list_add(&page->lru, &prev_page->lru);
2e3b61547   Minchan Kim   staging: zsmalloc...
615
  		if (i == class->pages_per_zspage - 1)	/* last page */
a27545bf0   Minchan Kim   zsmalloc: use Pag...
616
  			SetPagePrivate2(page);
61989a80f   Nitin Gupta   staging: zsmalloc...
617
618
619
620
621
622
623
  		prev_page = page;
  	}
  
  	init_zspage(first_page, class);
  
  	first_page->freelist = obj_location_to_handle(first_page, 0);
  	/* Maximum number of objects we can store in this zspage */
2e3b61547   Minchan Kim   staging: zsmalloc...
624
  	first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
61989a80f   Nitin Gupta   staging: zsmalloc...
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
  
  	error = 0; /* Success */
  
  cleanup:
  	if (unlikely(error) && first_page) {
  		free_zspage(first_page);
  		first_page = NULL;
  	}
  
  	return first_page;
  }
  
  static struct page *find_get_zspage(struct size_class *class)
  {
  	int i;
  	struct page *page;
  
  	for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
  		page = class->fullness_list[i];
  		if (page)
  			break;
  	}
  
  	return page;
  }
1b945aeef   Minchan Kim   zsmalloc: add Kco...
650
  #ifdef CONFIG_PGTABLE_MAPPING
f553646a6   Seth Jennings   staging: zsmalloc...
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
  static inline int __zs_cpu_up(struct mapping_area *area)
  {
  	/*
  	 * Make sure we don't leak memory if a cpu UP notification
  	 * and zs_init() race and both call zs_cpu_up() on the same cpu
  	 */
  	if (area->vm)
  		return 0;
  	area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
  	if (!area->vm)
  		return -ENOMEM;
  	return 0;
  }
  
  static inline void __zs_cpu_down(struct mapping_area *area)
  {
  	if (area->vm)
  		free_vm_area(area->vm);
  	area->vm = NULL;
  }
  
  static inline void *__zs_map_object(struct mapping_area *area,
  				struct page *pages[2], int off, int size)
  {
  	BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages));
  	area->vm_addr = area->vm->addr;
  	return area->vm_addr + off;
  }
  
  static inline void __zs_unmap_object(struct mapping_area *area,
  				struct page *pages[2], int off, int size)
  {
  	unsigned long addr = (unsigned long)area->vm_addr;
f553646a6   Seth Jennings   staging: zsmalloc...
684

d95abbbb2   Joerg Roedel   staging: zsmalloc...
685
  	unmap_kernel_range(addr, PAGE_SIZE * 2);
f553646a6   Seth Jennings   staging: zsmalloc...
686
  }
1b945aeef   Minchan Kim   zsmalloc: add Kco...
687
  #else /* CONFIG_PGTABLE_MAPPING */
f553646a6   Seth Jennings   staging: zsmalloc...
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
  
  static inline int __zs_cpu_up(struct mapping_area *area)
  {
  	/*
  	 * Make sure we don't leak memory if a cpu UP notification
  	 * and zs_init() race and both call zs_cpu_up() on the same cpu
  	 */
  	if (area->vm_buf)
  		return 0;
  	area->vm_buf = (char *)__get_free_page(GFP_KERNEL);
  	if (!area->vm_buf)
  		return -ENOMEM;
  	return 0;
  }
  
  static inline void __zs_cpu_down(struct mapping_area *area)
  {
  	if (area->vm_buf)
  		free_page((unsigned long)area->vm_buf);
  	area->vm_buf = NULL;
  }
  
  static void *__zs_map_object(struct mapping_area *area,
  			struct page *pages[2], int off, int size)
5f601902c   Seth Jennings   staging: zsmalloc...
712
  {
5f601902c   Seth Jennings   staging: zsmalloc...
713
714
  	int sizes[2];
  	void *addr;
f553646a6   Seth Jennings   staging: zsmalloc...
715
  	char *buf = area->vm_buf;
5f601902c   Seth Jennings   staging: zsmalloc...
716

f553646a6   Seth Jennings   staging: zsmalloc...
717
718
719
720
721
722
  	/* disable page faults to match kmap_atomic() return conditions */
  	pagefault_disable();
  
  	/* no read fastpath */
  	if (area->vm_mm == ZS_MM_WO)
  		goto out;
5f601902c   Seth Jennings   staging: zsmalloc...
723
724
725
  
  	sizes[0] = PAGE_SIZE - off;
  	sizes[1] = size - sizes[0];
5f601902c   Seth Jennings   staging: zsmalloc...
726
727
728
729
730
731
732
  	/* copy object to per-cpu buffer */
  	addr = kmap_atomic(pages[0]);
  	memcpy(buf, addr + off, sizes[0]);
  	kunmap_atomic(addr);
  	addr = kmap_atomic(pages[1]);
  	memcpy(buf + sizes[0], addr, sizes[1]);
  	kunmap_atomic(addr);
f553646a6   Seth Jennings   staging: zsmalloc...
733
734
  out:
  	return area->vm_buf;
5f601902c   Seth Jennings   staging: zsmalloc...
735
  }
f553646a6   Seth Jennings   staging: zsmalloc...
736
737
  static void __zs_unmap_object(struct mapping_area *area,
  			struct page *pages[2], int off, int size)
5f601902c   Seth Jennings   staging: zsmalloc...
738
  {
5f601902c   Seth Jennings   staging: zsmalloc...
739
740
  	int sizes[2];
  	void *addr;
f553646a6   Seth Jennings   staging: zsmalloc...
741
  	char *buf = area->vm_buf;
5f601902c   Seth Jennings   staging: zsmalloc...
742

f553646a6   Seth Jennings   staging: zsmalloc...
743
744
745
  	/* no write fastpath */
  	if (area->vm_mm == ZS_MM_RO)
  		goto out;
5f601902c   Seth Jennings   staging: zsmalloc...
746
747
748
749
750
751
752
753
754
755
756
  
  	sizes[0] = PAGE_SIZE - off;
  	sizes[1] = size - sizes[0];
  
  	/* copy per-cpu buffer to object */
  	addr = kmap_atomic(pages[0]);
  	memcpy(addr + off, buf, sizes[0]);
  	kunmap_atomic(addr);
  	addr = kmap_atomic(pages[1]);
  	memcpy(addr, buf + sizes[0], sizes[1]);
  	kunmap_atomic(addr);
f553646a6   Seth Jennings   staging: zsmalloc...
757
758
759
760
  
  out:
  	/* enable page faults to match kunmap_atomic() return conditions */
  	pagefault_enable();
5f601902c   Seth Jennings   staging: zsmalloc...
761
  }
61989a80f   Nitin Gupta   staging: zsmalloc...
762

1b945aeef   Minchan Kim   zsmalloc: add Kco...
763
  #endif /* CONFIG_PGTABLE_MAPPING */
f553646a6   Seth Jennings   staging: zsmalloc...
764

61989a80f   Nitin Gupta   staging: zsmalloc...
765
766
767
  static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
  				void *pcpu)
  {
f553646a6   Seth Jennings   staging: zsmalloc...
768
  	int ret, cpu = (long)pcpu;
61989a80f   Nitin Gupta   staging: zsmalloc...
769
770
771
772
773
  	struct mapping_area *area;
  
  	switch (action) {
  	case CPU_UP_PREPARE:
  		area = &per_cpu(zs_map_area, cpu);
f553646a6   Seth Jennings   staging: zsmalloc...
774
775
776
  		ret = __zs_cpu_up(area);
  		if (ret)
  			return notifier_from_errno(ret);
61989a80f   Nitin Gupta   staging: zsmalloc...
777
778
779
780
  		break;
  	case CPU_DEAD:
  	case CPU_UP_CANCELED:
  		area = &per_cpu(zs_map_area, cpu);
f553646a6   Seth Jennings   staging: zsmalloc...
781
  		__zs_cpu_down(area);
61989a80f   Nitin Gupta   staging: zsmalloc...
782
783
784
785
786
787
788
789
790
791
792
793
794
  		break;
  	}
  
  	return NOTIFY_OK;
  }
  
  static struct notifier_block zs_cpu_nb = {
  	.notifier_call = zs_cpu_notifier
  };
  
  static void zs_exit(void)
  {
  	int cpu;
f0e71fcd0   Srivatsa S. Bhat   zsmalloc: Fix CPU...
795
  	cpu_notifier_register_begin();
61989a80f   Nitin Gupta   staging: zsmalloc...
796
797
  	for_each_online_cpu(cpu)
  		zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
f0e71fcd0   Srivatsa S. Bhat   zsmalloc: Fix CPU...
798
799
800
  	__unregister_cpu_notifier(&zs_cpu_nb);
  
  	cpu_notifier_register_done();
61989a80f   Nitin Gupta   staging: zsmalloc...
801
802
803
804
805
  }
  
  static int zs_init(void)
  {
  	int cpu, ret;
f0e71fcd0   Srivatsa S. Bhat   zsmalloc: Fix CPU...
806
807
808
  	cpu_notifier_register_begin();
  
  	__register_cpu_notifier(&zs_cpu_nb);
61989a80f   Nitin Gupta   staging: zsmalloc...
809
810
  	for_each_online_cpu(cpu) {
  		ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
f0e71fcd0   Srivatsa S. Bhat   zsmalloc: Fix CPU...
811
812
  		if (notifier_to_errno(ret)) {
  			cpu_notifier_register_done();
61989a80f   Nitin Gupta   staging: zsmalloc...
813
  			goto fail;
f0e71fcd0   Srivatsa S. Bhat   zsmalloc: Fix CPU...
814
  		}
61989a80f   Nitin Gupta   staging: zsmalloc...
815
  	}
f0e71fcd0   Srivatsa S. Bhat   zsmalloc: Fix CPU...
816
817
  
  	cpu_notifier_register_done();
61989a80f   Nitin Gupta   staging: zsmalloc...
818
819
820
821
822
  	return 0;
  fail:
  	zs_exit();
  	return notifier_to_errno(ret);
  }
4bbc0bc06   Davidlohr Bueso   staging: zsmalloc...
823
824
  /**
   * zs_create_pool - Creates an allocation pool to work from.
0d145a501   Seth Jennings   staging: zsmalloc...
825
   * @flags: allocation flags used to allocate pool metadata
4bbc0bc06   Davidlohr Bueso   staging: zsmalloc...
826
827
828
829
830
831
832
   *
   * This function must be called before anything when using
   * the zsmalloc allocator.
   *
   * On success, a pointer to the newly created pool is returned,
   * otherwise NULL.
   */
0d145a501   Seth Jennings   staging: zsmalloc...
833
  struct zs_pool *zs_create_pool(gfp_t flags)
61989a80f   Nitin Gupta   staging: zsmalloc...
834
  {
069f101fa   Ben Hutchings   staging: zsmalloc...
835
  	int i, ovhd_size;
61989a80f   Nitin Gupta   staging: zsmalloc...
836
  	struct zs_pool *pool;
61989a80f   Nitin Gupta   staging: zsmalloc...
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
  	ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
  	pool = kzalloc(ovhd_size, GFP_KERNEL);
  	if (!pool)
  		return NULL;
  
  	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
  		int size;
  		struct size_class *class;
  
  		size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
  		if (size > ZS_MAX_ALLOC_SIZE)
  			size = ZS_MAX_ALLOC_SIZE;
  
  		class = &pool->size_class[i];
  		class->size = size;
  		class->index = i;
  		spin_lock_init(&class->lock);
2e3b61547   Minchan Kim   staging: zsmalloc...
854
  		class->pages_per_zspage = get_pages_per_zspage(size);
61989a80f   Nitin Gupta   staging: zsmalloc...
855
856
  
  	}
61989a80f   Nitin Gupta   staging: zsmalloc...
857
  	pool->flags = flags;
61989a80f   Nitin Gupta   staging: zsmalloc...
858

61989a80f   Nitin Gupta   staging: zsmalloc...
859
860
861
862
863
864
865
866
867
868
869
870
871
872
  	return pool;
  }
  EXPORT_SYMBOL_GPL(zs_create_pool);
  
  void zs_destroy_pool(struct zs_pool *pool)
  {
  	int i;
  
  	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
  		int fg;
  		struct size_class *class = &pool->size_class[i];
  
  		for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
  			if (class->fullness_list[fg]) {
93ad5ab50   Marlies Ruck   Staging: Fixes st...
873
874
  				pr_info("Freeing non-empty class with size %db, fullness group %d
  ",
61989a80f   Nitin Gupta   staging: zsmalloc...
875
876
877
878
879
880
881
882
883
884
885
886
  					class->size, fg);
  			}
  		}
  	}
  	kfree(pool);
  }
  EXPORT_SYMBOL_GPL(zs_destroy_pool);
  
  /**
   * zs_malloc - Allocate block of given size from pool.
   * @pool: pool to allocate from
   * @size: size of block to allocate
61989a80f   Nitin Gupta   staging: zsmalloc...
887
   *
00a61d861   Minchan Kim   staging: zsmalloc...
888
   * On success, handle to the allocated object is returned,
c23443483   Minchan Kim   staging: zsmalloc...
889
   * otherwise 0.
61989a80f   Nitin Gupta   staging: zsmalloc...
890
891
   * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
   */
c23443483   Minchan Kim   staging: zsmalloc...
892
  unsigned long zs_malloc(struct zs_pool *pool, size_t size)
61989a80f   Nitin Gupta   staging: zsmalloc...
893
  {
c23443483   Minchan Kim   staging: zsmalloc...
894
  	unsigned long obj;
61989a80f   Nitin Gupta   staging: zsmalloc...
895
896
897
898
899
900
901
902
  	struct link_free *link;
  	int class_idx;
  	struct size_class *class;
  
  	struct page *first_page, *m_page;
  	unsigned long m_objidx, m_offset;
  
  	if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
c23443483   Minchan Kim   staging: zsmalloc...
903
  		return 0;
61989a80f   Nitin Gupta   staging: zsmalloc...
904
905
906
907
908
909
910
911
912
913
914
915
  
  	class_idx = get_size_class_index(size);
  	class = &pool->size_class[class_idx];
  	BUG_ON(class_idx != class->index);
  
  	spin_lock(&class->lock);
  	first_page = find_get_zspage(class);
  
  	if (!first_page) {
  		spin_unlock(&class->lock);
  		first_page = alloc_zspage(class, pool->flags);
  		if (unlikely(!first_page))
c23443483   Minchan Kim   staging: zsmalloc...
916
  			return 0;
61989a80f   Nitin Gupta   staging: zsmalloc...
917
918
919
  
  		set_zspage_mapping(first_page, class->index, ZS_EMPTY);
  		spin_lock(&class->lock);
2e3b61547   Minchan Kim   staging: zsmalloc...
920
  		class->pages_allocated += class->pages_per_zspage;
61989a80f   Nitin Gupta   staging: zsmalloc...
921
  	}
c23443483   Minchan Kim   staging: zsmalloc...
922
  	obj = (unsigned long)first_page->freelist;
61989a80f   Nitin Gupta   staging: zsmalloc...
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
  	obj_handle_to_location(obj, &m_page, &m_objidx);
  	m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
  
  	link = (struct link_free *)kmap_atomic(m_page) +
  					m_offset / sizeof(*link);
  	first_page->freelist = link->next;
  	memset(link, POISON_INUSE, sizeof(*link));
  	kunmap_atomic(link);
  
  	first_page->inuse++;
  	/* Now move the zspage to another fullness group, if required */
  	fix_fullness_group(pool, first_page);
  	spin_unlock(&class->lock);
  
  	return obj;
  }
  EXPORT_SYMBOL_GPL(zs_malloc);
c23443483   Minchan Kim   staging: zsmalloc...
940
  void zs_free(struct zs_pool *pool, unsigned long obj)
61989a80f   Nitin Gupta   staging: zsmalloc...
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
  {
  	struct link_free *link;
  	struct page *first_page, *f_page;
  	unsigned long f_objidx, f_offset;
  
  	int class_idx;
  	struct size_class *class;
  	enum fullness_group fullness;
  
  	if (unlikely(!obj))
  		return;
  
  	obj_handle_to_location(obj, &f_page, &f_objidx);
  	first_page = get_first_page(f_page);
  
  	get_zspage_mapping(first_page, &class_idx, &fullness);
  	class = &pool->size_class[class_idx];
  	f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
  
  	spin_lock(&class->lock);
  
  	/* Insert this object in containing zspage's freelist */
  	link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
  							+ f_offset);
  	link->next = first_page->freelist;
  	kunmap_atomic(link);
c23443483   Minchan Kim   staging: zsmalloc...
967
  	first_page->freelist = (void *)obj;
61989a80f   Nitin Gupta   staging: zsmalloc...
968
969
970
971
972
  
  	first_page->inuse--;
  	fullness = fix_fullness_group(pool, first_page);
  
  	if (fullness == ZS_EMPTY)
2e3b61547   Minchan Kim   staging: zsmalloc...
973
  		class->pages_allocated -= class->pages_per_zspage;
61989a80f   Nitin Gupta   staging: zsmalloc...
974
975
976
977
978
979
980
  
  	spin_unlock(&class->lock);
  
  	if (fullness == ZS_EMPTY)
  		free_zspage(first_page);
  }
  EXPORT_SYMBOL_GPL(zs_free);
00a61d861   Minchan Kim   staging: zsmalloc...
981
982
983
984
985
986
987
  /**
   * zs_map_object - get address of allocated object from handle.
   * @pool: pool from which the object was allocated
   * @handle: handle returned from zs_malloc
   *
   * Before using an object allocated from zs_malloc, it must be mapped using
   * this function. When done with the object, it must be unmapped using
166cfda75   Seth Jennings   staging: zsmalloc...
988
989
990
991
992
993
   * zs_unmap_object.
   *
   * Only one object can be mapped per cpu at a time. There is no protection
   * against nested mappings.
   *
   * This function returns with preemption and page faults disabled.
396b7fd6f   Sara Bird   staging/zsmalloc:...
994
   */
b74185108   Seth Jennings   staging: zsmalloc...
995
996
  void *zs_map_object(struct zs_pool *pool, unsigned long handle,
  			enum zs_mapmode mm)
61989a80f   Nitin Gupta   staging: zsmalloc...
997
998
999
1000
1001
1002
1003
1004
  {
  	struct page *page;
  	unsigned long obj_idx, off;
  
  	unsigned int class_idx;
  	enum fullness_group fg;
  	struct size_class *class;
  	struct mapping_area *area;
f553646a6   Seth Jennings   staging: zsmalloc...
1005
  	struct page *pages[2];
61989a80f   Nitin Gupta   staging: zsmalloc...
1006
1007
  
  	BUG_ON(!handle);
c60369f01   Seth Jennings   staging: zsmalloc...
1008
1009
1010
1011
1012
1013
  	/*
  	 * Because we use per-cpu mapping areas shared among the
  	 * pools/users, we can't allow mapping in interrupt context
  	 * because it can corrupt another users mappings.
  	 */
  	BUG_ON(in_interrupt());
61989a80f   Nitin Gupta   staging: zsmalloc...
1014
1015
1016
1017
1018
1019
  	obj_handle_to_location(handle, &page, &obj_idx);
  	get_zspage_mapping(get_first_page(page), &class_idx, &fg);
  	class = &pool->size_class[class_idx];
  	off = obj_idx_to_offset(page, obj_idx, class->size);
  
  	area = &get_cpu_var(zs_map_area);
f553646a6   Seth Jennings   staging: zsmalloc...
1020
  	area->vm_mm = mm;
61989a80f   Nitin Gupta   staging: zsmalloc...
1021
1022
1023
  	if (off + class->size <= PAGE_SIZE) {
  		/* this object is contained entirely within a page */
  		area->vm_addr = kmap_atomic(page);
5f601902c   Seth Jennings   staging: zsmalloc...
1024
  		return area->vm_addr + off;
61989a80f   Nitin Gupta   staging: zsmalloc...
1025
  	}
f553646a6   Seth Jennings   staging: zsmalloc...
1026
1027
1028
1029
  	/* this object spans two pages */
  	pages[0] = page;
  	pages[1] = get_next_page(page);
  	BUG_ON(!pages[1]);
b74185108   Seth Jennings   staging: zsmalloc...
1030

f553646a6   Seth Jennings   staging: zsmalloc...
1031
  	return __zs_map_object(area, pages, off, class->size);
61989a80f   Nitin Gupta   staging: zsmalloc...
1032
1033
  }
  EXPORT_SYMBOL_GPL(zs_map_object);
c23443483   Minchan Kim   staging: zsmalloc...
1034
  void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
61989a80f   Nitin Gupta   staging: zsmalloc...
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
  {
  	struct page *page;
  	unsigned long obj_idx, off;
  
  	unsigned int class_idx;
  	enum fullness_group fg;
  	struct size_class *class;
  	struct mapping_area *area;
  
  	BUG_ON(!handle);
  
  	obj_handle_to_location(handle, &page, &obj_idx);
  	get_zspage_mapping(get_first_page(page), &class_idx, &fg);
  	class = &pool->size_class[class_idx];
  	off = obj_idx_to_offset(page, obj_idx, class->size);
7c8e0181e   Christoph Lameter   mm: replace __get...
1050
  	area = this_cpu_ptr(&zs_map_area);
f553646a6   Seth Jennings   staging: zsmalloc...
1051
1052
1053
1054
1055
1056
1057
1058
  	if (off + class->size <= PAGE_SIZE)
  		kunmap_atomic(area->vm_addr);
  	else {
  		struct page *pages[2];
  
  		pages[0] = page;
  		pages[1] = get_next_page(page);
  		BUG_ON(!pages[1]);
b74185108   Seth Jennings   staging: zsmalloc...
1059

f553646a6   Seth Jennings   staging: zsmalloc...
1060
1061
  		__zs_unmap_object(area, pages, off, class->size);
  	}
61989a80f   Nitin Gupta   staging: zsmalloc...
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
  	put_cpu_var(zs_map_area);
  }
  EXPORT_SYMBOL_GPL(zs_unmap_object);
  
  u64 zs_get_total_size_bytes(struct zs_pool *pool)
  {
  	int i;
  	u64 npages = 0;
  
  	for (i = 0; i < ZS_SIZE_CLASSES; i++)
  		npages += pool->size_class[i].pages_allocated;
  
  	return npages << PAGE_SHIFT;
  }
  EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
069f101fa   Ben Hutchings   staging: zsmalloc...
1077
1078
1079
1080
1081
1082
  
  module_init(zs_init);
  module_exit(zs_exit);
  
  MODULE_LICENSE("Dual BSD/GPL");
  MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");