Blame view

mm/slob.c 16 KB
10cef6029   Matt Mackall   [PATCH] slob: int...
1
2
3
4
5
  /*
   * SLOB Allocator: Simple List Of Blocks
   *
   * Matt Mackall <mpm@selenic.com> 12/30/03
   *
6193a2ff1   Paul Mundt   slob: initial NUM...
6
7
   * NUMA support by Paul Mundt, 2007.
   *
10cef6029   Matt Mackall   [PATCH] slob: int...
8
9
10
11
   * How SLOB works:
   *
   * The core of SLOB is a traditional K&R style heap allocator, with
   * support for returning aligned objects. The granularity of this
553948491   Nick Piggin   slob: improved al...
12
13
   * allocator is as little as 2 bytes, however typically most architectures
   * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
95b35127f   Nick Piggin   slob: rework free...
14
   *
20cecbae4   Matt Mackall   slob: reduce exte...
15
16
17
18
19
20
21
22
23
24
25
   * The slob heap is a set of linked list of pages from alloc_pages(),
   * and within each page, there is a singly-linked list of free blocks
   * (slob_t). The heap is grown on demand. To reduce fragmentation,
   * heap pages are segregated into three lists, with objects less than
   * 256 bytes, objects less than 1024 bytes, and all other objects.
   *
   * Allocation from heap involves first searching for a page with
   * sufficient free blocks (using a next-fit-like approach) followed by
   * a first-fit scan of the page. Deallocation inserts objects back
   * into the free list in address order, so this is effectively an
   * address-ordered first fit.
10cef6029   Matt Mackall   [PATCH] slob: int...
26
27
   *
   * Above this is an implementation of kmalloc/kfree. Blocks returned
553948491   Nick Piggin   slob: improved al...
28
   * from kmalloc are prepended with a 4-byte header with the kmalloc size.
10cef6029   Matt Mackall   [PATCH] slob: int...
29
   * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
6193a2ff1   Paul Mundt   slob: initial NUM...
30
   * alloc_pages() directly, allocating compound pages so the page order
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
31
32
   * does not have to be separately tracked.
   * These objects are detected in kfree() because PageSlab()
d87a133fc   Nick Piggin   slob: remove bigb...
33
   * is false for them.
10cef6029   Matt Mackall   [PATCH] slob: int...
34
35
   *
   * SLAB is emulated on top of SLOB by simply calling constructors and
95b35127f   Nick Piggin   slob: rework free...
36
37
38
39
   * destructors for every SLAB allocation. Objects are returned with the
   * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
   * case the low-level allocator will fragment blocks to create the proper
   * alignment. Again, objects of page-size or greater are allocated by
6193a2ff1   Paul Mundt   slob: initial NUM...
40
   * calling alloc_pages(). As SLAB objects know their size, no separate
95b35127f   Nick Piggin   slob: rework free...
41
   * size bookkeeping is necessary and there is essentially no allocation
d87a133fc   Nick Piggin   slob: remove bigb...
42
43
   * space overhead, and compound pages aren't needed for multi-page
   * allocations.
6193a2ff1   Paul Mundt   slob: initial NUM...
44
45
46
47
   *
   * NUMA support in SLOB is fairly simplistic, pushing most of the real
   * logic down to the page allocator, and simply doing the node accounting
   * on the upper levels. In the event that a node id is explicitly
96db800f5   Vlastimil Babka   mm: rename alloc_...
48
   * provided, __alloc_pages_node() with the specified node id is used
6193a2ff1   Paul Mundt   slob: initial NUM...
49
50
51
52
53
54
55
56
   * instead. The common case (or when the node id isn't explicitly provided)
   * will default to the current node, as per numa_node_id().
   *
   * Node aware pages are still inserted in to the global freelist, and
   * these are scanned for by matching against the node id encoded in the
   * page flags. As a result, block allocations that can be satisfied from
   * the freelist will only be done so on pages residing on the same node,
   * in order to prevent random node placement.
10cef6029   Matt Mackall   [PATCH] slob: int...
57
   */
95b35127f   Nick Piggin   slob: rework free...
58
  #include <linux/kernel.h>
10cef6029   Matt Mackall   [PATCH] slob: int...
59
  #include <linux/slab.h>
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
60

10cef6029   Matt Mackall   [PATCH] slob: int...
61
  #include <linux/mm.h>
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
62
  #include <linux/swap.h> /* struct reclaim_state */
10cef6029   Matt Mackall   [PATCH] slob: int...
63
64
  #include <linux/cache.h>
  #include <linux/init.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
65
  #include <linux/export.h>
afc0cedbe   Nick Piggin   slob: implement R...
66
  #include <linux/rcupdate.h>
95b35127f   Nick Piggin   slob: rework free...
67
  #include <linux/list.h>
4374e616d   Catalin Marinas   kmemleak: Add the...
68
  #include <linux/kmemleak.h>
039ca4e74   Li Zefan   tracing: Remove k...
69
70
  
  #include <trace/events/kmem.h>
60063497a   Arun Sharma   atomic: use <linu...
71
  #include <linux/atomic.h>
95b35127f   Nick Piggin   slob: rework free...
72

b9ce5ef49   Glauber Costa   sl[au]b: always g...
73
  #include "slab.h"
95b35127f   Nick Piggin   slob: rework free...
74
75
76
77
78
79
80
81
  /*
   * slob_block has a field 'units', which indicates size of block if +ve,
   * or offset of next block if -ve (in SLOB_UNITs).
   *
   * Free blocks of size 1 unit simply contain the offset of the next block.
   * Those with larger size contain their size in the first SLOB_UNIT of
   * memory, and the offset of the next free block in the second SLOB_UNIT.
   */
553948491   Nick Piggin   slob: improved al...
82
  #if PAGE_SIZE <= (32767 * 2)
95b35127f   Nick Piggin   slob: rework free...
83
84
85
86
  typedef s16 slobidx_t;
  #else
  typedef s32 slobidx_t;
  #endif
10cef6029   Matt Mackall   [PATCH] slob: int...
87
  struct slob_block {
95b35127f   Nick Piggin   slob: rework free...
88
  	slobidx_t units;
553948491   Nick Piggin   slob: improved al...
89
  };
10cef6029   Matt Mackall   [PATCH] slob: int...
90
  typedef struct slob_block slob_t;
95b35127f   Nick Piggin   slob: rework free...
91
  /*
20cecbae4   Matt Mackall   slob: reduce exte...
92
   * All partially free slob pages go on these lists.
95b35127f   Nick Piggin   slob: rework free...
93
   */
20cecbae4   Matt Mackall   slob: reduce exte...
94
95
96
97
98
  #define SLOB_BREAK1 256
  #define SLOB_BREAK2 1024
  static LIST_HEAD(free_slob_small);
  static LIST_HEAD(free_slob_medium);
  static LIST_HEAD(free_slob_large);
95b35127f   Nick Piggin   slob: rework free...
99
100
  
  /*
95b35127f   Nick Piggin   slob: rework free...
101
102
   * slob_page_free: true for pages on free_slob_pages list.
   */
b8c24c4ae   Christoph Lameter   slob: Define page...
103
  static inline int slob_page_free(struct page *sp)
95b35127f   Nick Piggin   slob: rework free...
104
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
105
  	return PageSlobFree(sp);
95b35127f   Nick Piggin   slob: rework free...
106
  }
b8c24c4ae   Christoph Lameter   slob: Define page...
107
  static void set_slob_page_free(struct page *sp, struct list_head *list)
95b35127f   Nick Piggin   slob: rework free...
108
  {
34bf6ef94   Dave Hansen   mm: slab/slub: us...
109
  	list_add(&sp->lru, list);
b8c24c4ae   Christoph Lameter   slob: Define page...
110
  	__SetPageSlobFree(sp);
95b35127f   Nick Piggin   slob: rework free...
111
  }
b8c24c4ae   Christoph Lameter   slob: Define page...
112
  static inline void clear_slob_page_free(struct page *sp)
95b35127f   Nick Piggin   slob: rework free...
113
  {
34bf6ef94   Dave Hansen   mm: slab/slub: us...
114
  	list_del(&sp->lru);
b8c24c4ae   Christoph Lameter   slob: Define page...
115
  	__ClearPageSlobFree(sp);
95b35127f   Nick Piggin   slob: rework free...
116
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
117
  #define SLOB_UNIT sizeof(slob_t)
a6d78159f   Sasha Levin   slob: use DIV_ROU...
118
  #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
10cef6029   Matt Mackall   [PATCH] slob: int...
119

afc0cedbe   Nick Piggin   slob: implement R...
120
121
122
123
124
125
126
127
128
  /*
   * struct slob_rcu is inserted at the tail of allocated slob blocks, which
   * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
   * the block using call_rcu.
   */
  struct slob_rcu {
  	struct rcu_head head;
  	int size;
  };
95b35127f   Nick Piggin   slob: rework free...
129
130
131
  /*
   * slob_lock protects all slob allocator structures.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
132
  static DEFINE_SPINLOCK(slob_lock);
10cef6029   Matt Mackall   [PATCH] slob: int...
133

95b35127f   Nick Piggin   slob: rework free...
134
135
136
137
138
139
140
  /*
   * Encode the given size and next info into a free slob block s.
   */
  static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
  {
  	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  	slobidx_t offset = next - base;
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
141

95b35127f   Nick Piggin   slob: rework free...
142
143
144
145
146
147
  	if (size > 1) {
  		s[0].units = size;
  		s[1].units = offset;
  	} else
  		s[0].units = -offset;
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
148

95b35127f   Nick Piggin   slob: rework free...
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
  /*
   * Return the size of a slob block.
   */
  static slobidx_t slob_units(slob_t *s)
  {
  	if (s->units > 0)
  		return s->units;
  	return 1;
  }
  
  /*
   * Return the next free slob block pointer after this one.
   */
  static slob_t *slob_next(slob_t *s)
  {
  	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  	slobidx_t next;
  
  	if (s[0].units < 0)
  		next = -s[0].units;
  	else
  		next = s[1].units;
  	return base+next;
  }
  
  /*
   * Returns true if s is the last free block in its page.
   */
  static int slob_last(slob_t *s)
  {
  	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
181
  static void *slob_new_pages(gfp_t gfp, int order, int node)
6193a2ff1   Paul Mundt   slob: initial NUM...
182
183
184
185
  {
  	void *page;
  
  #ifdef CONFIG_NUMA
90f2cbbc4   Ezequiel Garcia   mm, slob: Use NUM...
186
  	if (node != NUMA_NO_NODE)
96db800f5   Vlastimil Babka   mm: rename alloc_...
187
  		page = __alloc_pages_node(node, gfp, order);
6193a2ff1   Paul Mundt   slob: initial NUM...
188
189
190
191
192
193
194
195
196
  	else
  #endif
  		page = alloc_pages(gfp, order);
  
  	if (!page)
  		return NULL;
  
  	return page_address(page);
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
197
198
  static void slob_free_pages(void *b, int order)
  {
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
199
200
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += 1 << order;
6e9ed0cc4   Américo Wang   slob: clean up th...
201
202
  	free_pages((unsigned long)b, order);
  }
95b35127f   Nick Piggin   slob: rework free...
203
204
205
  /*
   * Allocate a slob block within a given slob_page sp.
   */
b8c24c4ae   Christoph Lameter   slob: Define page...
206
  static void *slob_page_alloc(struct page *sp, size_t size, int align)
10cef6029   Matt Mackall   [PATCH] slob: int...
207
  {
6e9ed0cc4   Américo Wang   slob: clean up th...
208
  	slob_t *prev, *cur, *aligned = NULL;
10cef6029   Matt Mackall   [PATCH] slob: int...
209
  	int delta = 0, units = SLOB_UNITS(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
210

b8c24c4ae   Christoph Lameter   slob: Define page...
211
  	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
95b35127f   Nick Piggin   slob: rework free...
212
  		slobidx_t avail = slob_units(cur);
10cef6029   Matt Mackall   [PATCH] slob: int...
213
214
215
216
  		if (align) {
  			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
  			delta = aligned - cur;
  		}
95b35127f   Nick Piggin   slob: rework free...
217
218
  		if (avail >= units + delta) { /* room enough? */
  			slob_t *next;
10cef6029   Matt Mackall   [PATCH] slob: int...
219
  			if (delta) { /* need to fragment head to align? */
95b35127f   Nick Piggin   slob: rework free...
220
221
222
  				next = slob_next(cur);
  				set_slob(aligned, avail - delta, next);
  				set_slob(cur, delta, aligned);
10cef6029   Matt Mackall   [PATCH] slob: int...
223
224
  				prev = cur;
  				cur = aligned;
95b35127f   Nick Piggin   slob: rework free...
225
  				avail = slob_units(cur);
10cef6029   Matt Mackall   [PATCH] slob: int...
226
  			}
95b35127f   Nick Piggin   slob: rework free...
227
228
229
230
231
  			next = slob_next(cur);
  			if (avail == units) { /* exact fit? unlink. */
  				if (prev)
  					set_slob(prev, slob_units(prev), next);
  				else
b8c24c4ae   Christoph Lameter   slob: Define page...
232
  					sp->freelist = next;
95b35127f   Nick Piggin   slob: rework free...
233
234
235
236
  			} else { /* fragment */
  				if (prev)
  					set_slob(prev, slob_units(prev), cur + units);
  				else
b8c24c4ae   Christoph Lameter   slob: Define page...
237
  					sp->freelist = cur + units;
95b35127f   Nick Piggin   slob: rework free...
238
  				set_slob(cur + units, avail - units, next);
10cef6029   Matt Mackall   [PATCH] slob: int...
239
  			}
95b35127f   Nick Piggin   slob: rework free...
240
241
242
  			sp->units -= units;
  			if (!sp->units)
  				clear_slob_page_free(sp);
10cef6029   Matt Mackall   [PATCH] slob: int...
243
244
  			return cur;
  		}
95b35127f   Nick Piggin   slob: rework free...
245
246
247
248
  		if (slob_last(cur))
  			return NULL;
  	}
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
249

95b35127f   Nick Piggin   slob: rework free...
250
251
252
  /*
   * slob_alloc: entry point into the slob allocator.
   */
6193a2ff1   Paul Mundt   slob: initial NUM...
253
  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
95b35127f   Nick Piggin   slob: rework free...
254
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
255
  	struct page *sp;
d6269543e   Matt Mackall   slob: reduce list...
256
  	struct list_head *prev;
20cecbae4   Matt Mackall   slob: reduce exte...
257
  	struct list_head *slob_list;
95b35127f   Nick Piggin   slob: rework free...
258
259
  	slob_t *b = NULL;
  	unsigned long flags;
10cef6029   Matt Mackall   [PATCH] slob: int...
260

20cecbae4   Matt Mackall   slob: reduce exte...
261
262
263
264
265
266
  	if (size < SLOB_BREAK1)
  		slob_list = &free_slob_small;
  	else if (size < SLOB_BREAK2)
  		slob_list = &free_slob_medium;
  	else
  		slob_list = &free_slob_large;
95b35127f   Nick Piggin   slob: rework free...
267
268
  	spin_lock_irqsave(&slob_lock, flags);
  	/* Iterate through each partially free page, try to find room */
34bf6ef94   Dave Hansen   mm: slab/slub: us...
269
  	list_for_each_entry(sp, slob_list, lru) {
6193a2ff1   Paul Mundt   slob: initial NUM...
270
271
272
273
274
  #ifdef CONFIG_NUMA
  		/*
  		 * If there's a node specification, search for a partial
  		 * page with a matching node id in the freelist.
  		 */
90f2cbbc4   Ezequiel Garcia   mm, slob: Use NUM...
275
  		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
6193a2ff1   Paul Mundt   slob: initial NUM...
276
277
  			continue;
  #endif
d6269543e   Matt Mackall   slob: reduce list...
278
279
280
  		/* Enough room on this page? */
  		if (sp->units < SLOB_UNITS(size))
  			continue;
6193a2ff1   Paul Mundt   slob: initial NUM...
281

d6269543e   Matt Mackall   slob: reduce list...
282
  		/* Attempt to alloc */
34bf6ef94   Dave Hansen   mm: slab/slub: us...
283
  		prev = sp->lru.prev;
d6269543e   Matt Mackall   slob: reduce list...
284
285
286
287
288
289
290
  		b = slob_page_alloc(sp, size, align);
  		if (!b)
  			continue;
  
  		/* Improve fragment distribution and reduce our average
  		 * search time by starting our next search here. (see
  		 * Knuth vol 1, sec 2.5, pg 449) */
20cecbae4   Matt Mackall   slob: reduce exte...
291
292
293
  		if (prev != slob_list->prev &&
  				slob_list->next != prev->next)
  			list_move_tail(slob_list, prev->next);
d6269543e   Matt Mackall   slob: reduce list...
294
  		break;
10cef6029   Matt Mackall   [PATCH] slob: int...
295
  	}
95b35127f   Nick Piggin   slob: rework free...
296
297
298
299
  	spin_unlock_irqrestore(&slob_lock, flags);
  
  	/* Not enough space: must allocate a new page */
  	if (!b) {
6e9ed0cc4   Américo Wang   slob: clean up th...
300
  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
95b35127f   Nick Piggin   slob: rework free...
301
  		if (!b)
6e9ed0cc4   Américo Wang   slob: clean up th...
302
  			return NULL;
b5568280c   Christoph Lameter   slob: Remove vari...
303
304
  		sp = virt_to_page(b);
  		__SetPageSlab(sp);
95b35127f   Nick Piggin   slob: rework free...
305
306
307
  
  		spin_lock_irqsave(&slob_lock, flags);
  		sp->units = SLOB_UNITS(PAGE_SIZE);
b8c24c4ae   Christoph Lameter   slob: Define page...
308
  		sp->freelist = b;
34bf6ef94   Dave Hansen   mm: slab/slub: us...
309
  		INIT_LIST_HEAD(&sp->lru);
95b35127f   Nick Piggin   slob: rework free...
310
  		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
20cecbae4   Matt Mackall   slob: reduce exte...
311
  		set_slob_page_free(sp, slob_list);
95b35127f   Nick Piggin   slob: rework free...
312
313
314
315
  		b = slob_page_alloc(sp, size, align);
  		BUG_ON(!b);
  		spin_unlock_irqrestore(&slob_lock, flags);
  	}
d07dbea46   Christoph Lameter   Slab allocators: ...
316
317
  	if (unlikely((gfp & __GFP_ZERO) && b))
  		memset(b, 0, size);
95b35127f   Nick Piggin   slob: rework free...
318
  	return b;
10cef6029   Matt Mackall   [PATCH] slob: int...
319
  }
95b35127f   Nick Piggin   slob: rework free...
320
321
322
  /*
   * slob_free: entry point into the slob allocator.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
323
324
  static void slob_free(void *block, int size)
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
325
  	struct page *sp;
95b35127f   Nick Piggin   slob: rework free...
326
327
  	slob_t *prev, *next, *b = (slob_t *)block;
  	slobidx_t units;
10cef6029   Matt Mackall   [PATCH] slob: int...
328
  	unsigned long flags;
d602dabae   Bob Liu   SLOB: Free object...
329
  	struct list_head *slob_list;
10cef6029   Matt Mackall   [PATCH] slob: int...
330

2408c5503   Satyam Sharma   {slub, slob}: use...
331
  	if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef6029   Matt Mackall   [PATCH] slob: int...
332
  		return;
95b35127f   Nick Piggin   slob: rework free...
333
  	BUG_ON(!size);
10cef6029   Matt Mackall   [PATCH] slob: int...
334

b5568280c   Christoph Lameter   slob: Remove vari...
335
  	sp = virt_to_page(block);
95b35127f   Nick Piggin   slob: rework free...
336
  	units = SLOB_UNITS(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
337

10cef6029   Matt Mackall   [PATCH] slob: int...
338
  	spin_lock_irqsave(&slob_lock, flags);
10cef6029   Matt Mackall   [PATCH] slob: int...
339

95b35127f   Nick Piggin   slob: rework free...
340
341
342
343
  	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
  		/* Go directly to page allocator. Do not pass slob allocator */
  		if (slob_page_free(sp))
  			clear_slob_page_free(sp);
6fb8f4243   Nick Piggin   slob: fix lockup ...
344
  		spin_unlock_irqrestore(&slob_lock, flags);
b5568280c   Christoph Lameter   slob: Remove vari...
345
  		__ClearPageSlab(sp);
22b751c3d   Mel Gorman   mm: rename page s...
346
  		page_mapcount_reset(sp);
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
347
  		slob_free_pages(b, 0);
6fb8f4243   Nick Piggin   slob: fix lockup ...
348
  		return;
95b35127f   Nick Piggin   slob: rework free...
349
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
350

95b35127f   Nick Piggin   slob: rework free...
351
352
353
  	if (!slob_page_free(sp)) {
  		/* This slob page is about to become partially free. Easy! */
  		sp->units = units;
b8c24c4ae   Christoph Lameter   slob: Define page...
354
  		sp->freelist = b;
95b35127f   Nick Piggin   slob: rework free...
355
356
357
  		set_slob(b, units,
  			(void *)((unsigned long)(b +
  					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
d602dabae   Bob Liu   SLOB: Free object...
358
359
360
361
362
363
364
  		if (size < SLOB_BREAK1)
  			slob_list = &free_slob_small;
  		else if (size < SLOB_BREAK2)
  			slob_list = &free_slob_medium;
  		else
  			slob_list = &free_slob_large;
  		set_slob_page_free(sp, slob_list);
95b35127f   Nick Piggin   slob: rework free...
365
366
367
368
369
370
371
372
  		goto out;
  	}
  
  	/*
  	 * Otherwise the page is already partially free, so find reinsertion
  	 * point.
  	 */
  	sp->units += units;
10cef6029   Matt Mackall   [PATCH] slob: int...
373

b8c24c4ae   Christoph Lameter   slob: Define page...
374
375
376
377
  	if (b < (slob_t *)sp->freelist) {
  		if (b + units == sp->freelist) {
  			units += slob_units(sp->freelist);
  			sp->freelist = slob_next(sp->freelist);
679299b32   Matt Mackall   slob: fix free bl...
378
  		}
b8c24c4ae   Christoph Lameter   slob: Define page...
379
380
  		set_slob(b, units, sp->freelist);
  		sp->freelist = b;
95b35127f   Nick Piggin   slob: rework free...
381
  	} else {
b8c24c4ae   Christoph Lameter   slob: Define page...
382
  		prev = sp->freelist;
95b35127f   Nick Piggin   slob: rework free...
383
384
385
386
387
  		next = slob_next(prev);
  		while (b > next) {
  			prev = next;
  			next = slob_next(prev);
  		}
10cef6029   Matt Mackall   [PATCH] slob: int...
388

95b35127f   Nick Piggin   slob: rework free...
389
390
391
392
393
394
395
396
397
398
399
400
401
  		if (!slob_last(prev) && b + units == next) {
  			units += slob_units(next);
  			set_slob(b, units, slob_next(next));
  		} else
  			set_slob(b, units, next);
  
  		if (prev + slob_units(prev) == b) {
  			units = slob_units(b) + slob_units(prev);
  			set_slob(prev, units, slob_next(b));
  		} else
  			set_slob(prev, slob_units(prev), b);
  	}
  out:
10cef6029   Matt Mackall   [PATCH] slob: int...
402
403
  	spin_unlock_irqrestore(&slob_lock, flags);
  }
95b35127f   Nick Piggin   slob: rework free...
404
405
406
  /*
   * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
   */
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
407
408
  static __always_inline void *
  __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
10cef6029   Matt Mackall   [PATCH] slob: int...
409
  {
6cb8f9132   Christoph Lameter   Slab allocators: ...
410
  	unsigned int *m;
789306e5a   Arnd Bergmann   mm/slob: use min_...
411
  	int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
412
  	void *ret;
553948491   Nick Piggin   slob: improved al...
413

bd50cfa89   Steven Rostedt   slob/lockdep: Fix...
414
  	gfp &= gfp_allowed_mask;
19cefdffb   Ingo Molnar   lockdep: annotate...
415
  	lockdep_trace_alloc(gfp);
cf40bd16f   Nick Piggin   lockdep: annotate...
416

553948491   Nick Piggin   slob: improved al...
417
  	if (size < PAGE_SIZE - align) {
6cb8f9132   Christoph Lameter   Slab allocators: ...
418
419
  		if (!size)
  			return ZERO_SIZE_PTR;
6193a2ff1   Paul Mundt   slob: initial NUM...
420
  		m = slob_alloc(size + align, gfp, align, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
421

239f49c08   MinChan Kim   slob: Fix to retu...
422
423
424
  		if (!m)
  			return NULL;
  		*m = size;
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
425
  		ret = (void *)m + align;
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
426
  		trace_kmalloc_node(caller, ret,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
427
  				   size, size + align, gfp, node);
d87a133fc   Nick Piggin   slob: remove bigb...
428
  	} else {
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
429
  		unsigned int order = get_order(size);
d87a133fc   Nick Piggin   slob: remove bigb...
430

8df275af8   David Rientjes   slob: fix gfp fla...
431
432
433
  		if (likely(order))
  			gfp |= __GFP_COMP;
  		ret = slob_new_pages(gfp, order, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
434

f3f741019   Ezequiel Garcia   mm, slob: Add sup...
435
  		trace_kmalloc_node(caller, ret,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
436
  				   size, PAGE_SIZE << order, gfp, node);
10cef6029   Matt Mackall   [PATCH] slob: int...
437
  	}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
438

4374e616d   Catalin Marinas   kmemleak: Add the...
439
  	kmemleak_alloc(ret, size, 1, gfp);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
440
  	return ret;
10cef6029   Matt Mackall   [PATCH] slob: int...
441
  }
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
442

f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
443
  void *__kmalloc(size_t size, gfp_t gfp)
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
444
  {
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
445
  	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
446
  }
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
447
  EXPORT_SYMBOL(__kmalloc);
10cef6029   Matt Mackall   [PATCH] slob: int...
448

f3f741019   Ezequiel Garcia   mm, slob: Add sup...
449
450
451
452
453
454
  void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
  {
  	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
  }
  
  #ifdef CONFIG_NUMA
82bd5508b   David Rientjes   mm, slob: fix bui...
455
  void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
456
457
458
459
460
  					int node, unsigned long caller)
  {
  	return __do_kmalloc_node(size, gfp, node, caller);
  }
  #endif
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
461

10cef6029   Matt Mackall   [PATCH] slob: int...
462
463
  void kfree(const void *block)
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
464
  	struct page *sp;
10cef6029   Matt Mackall   [PATCH] slob: int...
465

2121db74b   Pekka Enberg   kmemtrace: trace ...
466
  	trace_kfree(_RET_IP_, block);
2408c5503   Satyam Sharma   {slub, slob}: use...
467
  	if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef6029   Matt Mackall   [PATCH] slob: int...
468
  		return;
4374e616d   Catalin Marinas   kmemleak: Add the...
469
  	kmemleak_free(block);
10cef6029   Matt Mackall   [PATCH] slob: int...
470

b5568280c   Christoph Lameter   slob: Remove vari...
471
472
  	sp = virt_to_page(block);
  	if (PageSlab(sp)) {
789306e5a   Arnd Bergmann   mm/slob: use min_...
473
  		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
553948491   Nick Piggin   slob: improved al...
474
475
  		unsigned int *m = (unsigned int *)(block - align);
  		slob_free(m, *m + align);
d87a133fc   Nick Piggin   slob: remove bigb...
476
  	} else
8cf9864b1   Ezequiel Garcia   mm/slob: Use free...
477
  		__free_pages(sp, compound_order(sp));
10cef6029   Matt Mackall   [PATCH] slob: int...
478
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
479
  EXPORT_SYMBOL(kfree);
d87a133fc   Nick Piggin   slob: remove bigb...
480
  /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
fd76bab2f   Pekka Enberg   slab: introduce k...
481
  size_t ksize(const void *block)
10cef6029   Matt Mackall   [PATCH] slob: int...
482
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
483
  	struct page *sp;
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
484
485
  	int align;
  	unsigned int *m;
10cef6029   Matt Mackall   [PATCH] slob: int...
486

ef8b4520b   Christoph Lameter   Slab allocators: ...
487
488
  	BUG_ON(!block);
  	if (unlikely(block == ZERO_SIZE_PTR))
10cef6029   Matt Mackall   [PATCH] slob: int...
489
  		return 0;
b5568280c   Christoph Lameter   slob: Remove vari...
490
  	sp = virt_to_page(block);
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
491
492
  	if (unlikely(!PageSlab(sp)))
  		return PAGE_SIZE << compound_order(sp);
789306e5a   Arnd Bergmann   mm/slob: use min_...
493
  	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
494
495
  	m = (unsigned int *)(block - align);
  	return SLOB_UNITS(*m) * SLOB_UNIT;
10cef6029   Matt Mackall   [PATCH] slob: int...
496
  }
b1aabecd5   Kirill A. Shutemov   mm: Export symbol...
497
  EXPORT_SYMBOL(ksize);
10cef6029   Matt Mackall   [PATCH] slob: int...
498

8a13a4cc8   Christoph Lameter   mm/sl[aou]b: Shri...
499
  int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
10cef6029   Matt Mackall   [PATCH] slob: int...
500
  {
278b1bb13   Christoph Lameter   mm/sl[aou]b: Move...
501
502
503
  	if (flags & SLAB_DESTROY_BY_RCU) {
  		/* leave room for rcu footer at the end of object */
  		c->size += sizeof(struct slob_rcu);
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
504
  	}
278b1bb13   Christoph Lameter   mm/sl[aou]b: Move...
505
  	c->flags = flags;
278b1bb13   Christoph Lameter   mm/sl[aou]b: Move...
506
  	return 0;
10cef6029   Matt Mackall   [PATCH] slob: int...
507
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
508

c21a6daf4   Fabian Frederick   slob: make slob_a...
509
  static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
10cef6029   Matt Mackall   [PATCH] slob: int...
510
511
  {
  	void *b;
bd50cfa89   Steven Rostedt   slob/lockdep: Fix...
512
513
514
  	flags &= gfp_allowed_mask;
  
  	lockdep_trace_alloc(flags);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
515
  	if (c->size < PAGE_SIZE) {
6193a2ff1   Paul Mundt   slob: initial NUM...
516
  		b = slob_alloc(c->size, flags, c->align, node);
fe74fe2bf   Ezequiel Garcia   mm/slob: Use obje...
517
  		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
518
519
  					    SLOB_UNITS(c->size) * SLOB_UNIT,
  					    flags, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
520
  	} else {
6e9ed0cc4   Américo Wang   slob: clean up th...
521
  		b = slob_new_pages(flags, get_order(c->size), node);
fe74fe2bf   Ezequiel Garcia   mm/slob: Use obje...
522
  		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
523
524
  					    PAGE_SIZE << get_order(c->size),
  					    flags, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
525
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
526

c1e854e92   Steven Rostedt   slob: Check for N...
527
  	if (b && c->ctor)
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
528
  		c->ctor(b);
10cef6029   Matt Mackall   [PATCH] slob: int...
529

4374e616d   Catalin Marinas   kmemleak: Add the...
530
  	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
10cef6029   Matt Mackall   [PATCH] slob: int...
531
532
  	return b;
  }
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
  
  void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  {
  	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
  }
  EXPORT_SYMBOL(kmem_cache_alloc);
  
  #ifdef CONFIG_NUMA
  void *__kmalloc_node(size_t size, gfp_t gfp, int node)
  {
  	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
  }
  EXPORT_SYMBOL(__kmalloc_node);
  
  void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
  {
  	return slob_alloc_node(cachep, gfp, node);
  }
6193a2ff1   Paul Mundt   slob: initial NUM...
551
  EXPORT_SYMBOL(kmem_cache_alloc_node);
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
552
  #endif
10cef6029   Matt Mackall   [PATCH] slob: int...
553

afc0cedbe   Nick Piggin   slob: implement R...
554
  static void __kmem_cache_free(void *b, int size)
10cef6029   Matt Mackall   [PATCH] slob: int...
555
  {
afc0cedbe   Nick Piggin   slob: implement R...
556
557
  	if (size < PAGE_SIZE)
  		slob_free(b, size);
10cef6029   Matt Mackall   [PATCH] slob: int...
558
  	else
6e9ed0cc4   Américo Wang   slob: clean up th...
559
  		slob_free_pages(b, get_order(size));
afc0cedbe   Nick Piggin   slob: implement R...
560
561
562
563
564
565
566
567
568
569
570
571
  }
  
  static void kmem_rcu_free(struct rcu_head *head)
  {
  	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
  	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
  
  	__kmem_cache_free(b, slob_rcu->size);
  }
  
  void kmem_cache_free(struct kmem_cache *c, void *b)
  {
4374e616d   Catalin Marinas   kmemleak: Add the...
572
  	kmemleak_free_recursive(b, c->flags);
afc0cedbe   Nick Piggin   slob: implement R...
573
574
575
  	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
  		struct slob_rcu *slob_rcu;
  		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
afc0cedbe   Nick Piggin   slob: implement R...
576
577
578
  		slob_rcu->size = c->size;
  		call_rcu(&slob_rcu->head, kmem_rcu_free);
  	} else {
afc0cedbe   Nick Piggin   slob: implement R...
579
580
  		__kmem_cache_free(b, c->size);
  	}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
581

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
582
  	trace_kmem_cache_free(_RET_IP_, b);
10cef6029   Matt Mackall   [PATCH] slob: int...
583
584
  }
  EXPORT_SYMBOL(kmem_cache_free);
484748f0b   Christoph Lameter   slab: infrastruct...
585
586
587
588
589
  void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
  {
  	__kmem_cache_free_bulk(s, size, p);
  }
  EXPORT_SYMBOL(kmem_cache_free_bulk);
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
590
  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
484748f0b   Christoph Lameter   slab: infrastruct...
591
592
593
594
595
  								void **p)
  {
  	return __kmem_cache_alloc_bulk(s, flags, size, p);
  }
  EXPORT_SYMBOL(kmem_cache_alloc_bulk);
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
596
597
598
599
600
  int __kmem_cache_shutdown(struct kmem_cache *c)
  {
  	/* No way to check for remaining objects */
  	return 0;
  }
52b4b950b   Dmitry Safonov   mm: slab: free km...
601
602
603
  void __kmem_cache_release(struct kmem_cache *c)
  {
  }
d6e0b7fa1   Vladimir Davydov   slub: make dead c...
604
  int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
605
606
607
  {
  	return 0;
  }
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
608

9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
609
610
611
612
613
614
  struct kmem_cache kmem_cache_boot = {
  	.name = "kmem_cache",
  	.size = sizeof(struct kmem_cache),
  	.flags = SLAB_PANIC,
  	.align = ARCH_KMALLOC_MINALIGN,
  };
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
615
616
  void __init kmem_cache_init(void)
  {
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
617
  	kmem_cache = &kmem_cache_boot;
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
618
  	slab_state = UP;
10cef6029   Matt Mackall   [PATCH] slob: int...
619
  }
bbff2e433   Wu Fengguang   slab: remove dupl...
620
621
622
  
  void __init kmem_cache_init_late(void)
  {
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
623
  	slab_state = FULL;
bbff2e433   Wu Fengguang   slab: remove dupl...
624
  }