Blame view

mm/slob.c 17 KB
10cef6029   Matt Mackall   [PATCH] slob: int...
1
2
3
4
5
  /*
   * SLOB Allocator: Simple List Of Blocks
   *
   * Matt Mackall <mpm@selenic.com> 12/30/03
   *
6193a2ff1   Paul Mundt   slob: initial NUM...
6
7
   * NUMA support by Paul Mundt, 2007.
   *
10cef6029   Matt Mackall   [PATCH] slob: int...
8
9
10
11
   * How SLOB works:
   *
   * The core of SLOB is a traditional K&R style heap allocator, with
   * support for returning aligned objects. The granularity of this
553948491   Nick Piggin   slob: improved al...
12
13
   * allocator is as little as 2 bytes, however typically most architectures
   * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
95b35127f   Nick Piggin   slob: rework free...
14
   *
20cecbae4   Matt Mackall   slob: reduce exte...
15
16
17
18
19
20
21
22
23
24
25
   * The slob heap is a set of linked list of pages from alloc_pages(),
   * and within each page, there is a singly-linked list of free blocks
   * (slob_t). The heap is grown on demand. To reduce fragmentation,
   * heap pages are segregated into three lists, with objects less than
   * 256 bytes, objects less than 1024 bytes, and all other objects.
   *
   * Allocation from heap involves first searching for a page with
   * sufficient free blocks (using a next-fit-like approach) followed by
   * a first-fit scan of the page. Deallocation inserts objects back
   * into the free list in address order, so this is effectively an
   * address-ordered first fit.
10cef6029   Matt Mackall   [PATCH] slob: int...
26
27
   *
   * Above this is an implementation of kmalloc/kfree. Blocks returned
553948491   Nick Piggin   slob: improved al...
28
   * from kmalloc are prepended with a 4-byte header with the kmalloc size.
10cef6029   Matt Mackall   [PATCH] slob: int...
29
   * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
6193a2ff1   Paul Mundt   slob: initial NUM...
30
   * alloc_pages() directly, allocating compound pages so the page order
d87a133fc   Nick Piggin   slob: remove bigb...
31
32
33
34
   * does not have to be separately tracked, and also stores the exact
   * allocation size in page->private so that it can be used to accurately
   * provide ksize(). These objects are detected in kfree() because slob_page()
   * is false for them.
10cef6029   Matt Mackall   [PATCH] slob: int...
35
36
   *
   * SLAB is emulated on top of SLOB by simply calling constructors and
95b35127f   Nick Piggin   slob: rework free...
37
38
39
40
   * destructors for every SLAB allocation. Objects are returned with the
   * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
   * case the low-level allocator will fragment blocks to create the proper
   * alignment. Again, objects of page-size or greater are allocated by
6193a2ff1   Paul Mundt   slob: initial NUM...
41
   * calling alloc_pages(). As SLAB objects know their size, no separate
95b35127f   Nick Piggin   slob: rework free...
42
   * size bookkeeping is necessary and there is essentially no allocation
d87a133fc   Nick Piggin   slob: remove bigb...
43
44
   * space overhead, and compound pages aren't needed for multi-page
   * allocations.
6193a2ff1   Paul Mundt   slob: initial NUM...
45
46
47
48
   *
   * NUMA support in SLOB is fairly simplistic, pushing most of the real
   * logic down to the page allocator, and simply doing the node accounting
   * on the upper levels. In the event that a node id is explicitly
6484eb3e2   Mel Gorman   page allocator: d...
49
   * provided, alloc_pages_exact_node() with the specified node id is used
6193a2ff1   Paul Mundt   slob: initial NUM...
50
51
52
53
54
55
56
57
   * instead. The common case (or when the node id isn't explicitly provided)
   * will default to the current node, as per numa_node_id().
   *
   * Node aware pages are still inserted in to the global freelist, and
   * these are scanned for by matching against the node id encoded in the
   * page flags. As a result, block allocations that can be satisfied from
   * the freelist will only be done so on pages residing on the same node,
   * in order to prevent random node placement.
10cef6029   Matt Mackall   [PATCH] slob: int...
58
   */
95b35127f   Nick Piggin   slob: rework free...
59
  #include <linux/kernel.h>
10cef6029   Matt Mackall   [PATCH] slob: int...
60
61
  #include <linux/slab.h>
  #include <linux/mm.h>
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
62
  #include <linux/swap.h> /* struct reclaim_state */
10cef6029   Matt Mackall   [PATCH] slob: int...
63
64
65
  #include <linux/cache.h>
  #include <linux/init.h>
  #include <linux/module.h>
afc0cedbe   Nick Piggin   slob: implement R...
66
  #include <linux/rcupdate.h>
95b35127f   Nick Piggin   slob: rework free...
67
  #include <linux/list.h>
4374e616d   Catalin Marinas   kmemleak: Add the...
68
  #include <linux/kmemleak.h>
039ca4e74   Li Zefan   tracing: Remove k...
69
70
  
  #include <trace/events/kmem.h>
95b35127f   Nick Piggin   slob: rework free...
71
  #include <asm/atomic.h>
95b35127f   Nick Piggin   slob: rework free...
72
73
74
75
76
77
78
79
  /*
   * slob_block has a field 'units', which indicates size of block if +ve,
   * or offset of next block if -ve (in SLOB_UNITs).
   *
   * Free blocks of size 1 unit simply contain the offset of the next block.
   * Those with larger size contain their size in the first SLOB_UNIT of
   * memory, and the offset of the next free block in the second SLOB_UNIT.
   */
553948491   Nick Piggin   slob: improved al...
80
  #if PAGE_SIZE <= (32767 * 2)
95b35127f   Nick Piggin   slob: rework free...
81
82
83
84
  typedef s16 slobidx_t;
  #else
  typedef s32 slobidx_t;
  #endif
10cef6029   Matt Mackall   [PATCH] slob: int...
85
  struct slob_block {
95b35127f   Nick Piggin   slob: rework free...
86
  	slobidx_t units;
553948491   Nick Piggin   slob: improved al...
87
  };
10cef6029   Matt Mackall   [PATCH] slob: int...
88
  typedef struct slob_block slob_t;
95b35127f   Nick Piggin   slob: rework free...
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
  /*
   * We use struct page fields to manage some slob allocation aspects,
   * however to avoid the horrible mess in include/linux/mm_types.h, we'll
   * just define our own struct page type variant here.
   */
  struct slob_page {
  	union {
  		struct {
  			unsigned long flags;	/* mandatory */
  			atomic_t _count;	/* mandatory */
  			slobidx_t units;	/* free units left in page */
  			unsigned long pad[2];
  			slob_t *free;		/* first free slob_t in page */
  			struct list_head list;	/* linked list of free pages */
  		};
  		struct page page;
  	};
  };
  static inline void struct_slob_page_wrong_size(void)
  { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
  
  /*
   * free_slob_page: call before a slob_page is returned to the page allocator.
   */
  static inline void free_slob_page(struct slob_page *sp)
  {
  	reset_page_mapcount(&sp->page);
  	sp->page.mapping = NULL;
  }
  
  /*
20cecbae4   Matt Mackall   slob: reduce exte...
120
   * All partially free slob pages go on these lists.
95b35127f   Nick Piggin   slob: rework free...
121
   */
20cecbae4   Matt Mackall   slob: reduce exte...
122
123
124
125
126
  #define SLOB_BREAK1 256
  #define SLOB_BREAK2 1024
  static LIST_HEAD(free_slob_small);
  static LIST_HEAD(free_slob_medium);
  static LIST_HEAD(free_slob_large);
95b35127f   Nick Piggin   slob: rework free...
127
128
  
  /*
6e9ed0cc4   Américo Wang   slob: clean up th...
129
   * is_slob_page: True for all slob pages (false for bigblock pages)
95b35127f   Nick Piggin   slob: rework free...
130
   */
6e9ed0cc4   Américo Wang   slob: clean up th...
131
  static inline int is_slob_page(struct slob_page *sp)
95b35127f   Nick Piggin   slob: rework free...
132
  {
7303f2409   Wu Fengguang   slob: use PG_slab...
133
  	return PageSlab((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
134
135
136
137
  }
  
  static inline void set_slob_page(struct slob_page *sp)
  {
7303f2409   Wu Fengguang   slob: use PG_slab...
138
  	__SetPageSlab((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
139
140
141
142
  }
  
  static inline void clear_slob_page(struct slob_page *sp)
  {
7303f2409   Wu Fengguang   slob: use PG_slab...
143
  	__ClearPageSlab((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
144
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
145
146
147
148
  static inline struct slob_page *slob_page(const void *addr)
  {
  	return (struct slob_page *)virt_to_page(addr);
  }
95b35127f   Nick Piggin   slob: rework free...
149
150
151
152
153
  /*
   * slob_page_free: true for pages on free_slob_pages list.
   */
  static inline int slob_page_free(struct slob_page *sp)
  {
9023cb7e8   Andy Whitcroft   slob: record page...
154
  	return PageSlobFree((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
155
  }
20cecbae4   Matt Mackall   slob: reduce exte...
156
  static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
95b35127f   Nick Piggin   slob: rework free...
157
  {
20cecbae4   Matt Mackall   slob: reduce exte...
158
  	list_add(&sp->list, list);
9023cb7e8   Andy Whitcroft   slob: record page...
159
  	__SetPageSlobFree((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
160
161
162
163
164
  }
  
  static inline void clear_slob_page_free(struct slob_page *sp)
  {
  	list_del(&sp->list);
9023cb7e8   Andy Whitcroft   slob: record page...
165
  	__ClearPageSlobFree((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
166
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
167
168
169
  #define SLOB_UNIT sizeof(slob_t)
  #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
  #define SLOB_ALIGN L1_CACHE_BYTES
afc0cedbe   Nick Piggin   slob: implement R...
170
171
172
173
174
175
176
177
178
  /*
   * struct slob_rcu is inserted at the tail of allocated slob blocks, which
   * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
   * the block using call_rcu.
   */
  struct slob_rcu {
  	struct rcu_head head;
  	int size;
  };
95b35127f   Nick Piggin   slob: rework free...
179
180
181
  /*
   * slob_lock protects all slob allocator structures.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
182
  static DEFINE_SPINLOCK(slob_lock);
10cef6029   Matt Mackall   [PATCH] slob: int...
183

95b35127f   Nick Piggin   slob: rework free...
184
185
186
187
188
189
190
  /*
   * Encode the given size and next info into a free slob block s.
   */
  static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
  {
  	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  	slobidx_t offset = next - base;
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
191

95b35127f   Nick Piggin   slob: rework free...
192
193
194
195
196
197
  	if (size > 1) {
  		s[0].units = size;
  		s[1].units = offset;
  	} else
  		s[0].units = -offset;
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
198

95b35127f   Nick Piggin   slob: rework free...
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
  /*
   * Return the size of a slob block.
   */
  static slobidx_t slob_units(slob_t *s)
  {
  	if (s->units > 0)
  		return s->units;
  	return 1;
  }
  
  /*
   * Return the next free slob block pointer after this one.
   */
  static slob_t *slob_next(slob_t *s)
  {
  	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  	slobidx_t next;
  
  	if (s[0].units < 0)
  		next = -s[0].units;
  	else
  		next = s[1].units;
  	return base+next;
  }
  
  /*
   * Returns true if s is the last free block in its page.
   */
  static int slob_last(slob_t *s)
  {
  	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
231
  static void *slob_new_pages(gfp_t gfp, int order, int node)
6193a2ff1   Paul Mundt   slob: initial NUM...
232
233
234
235
236
  {
  	void *page;
  
  #ifdef CONFIG_NUMA
  	if (node != -1)
6484eb3e2   Mel Gorman   page allocator: d...
237
  		page = alloc_pages_exact_node(node, gfp, order);
6193a2ff1   Paul Mundt   slob: initial NUM...
238
239
240
241
242
243
244
245
246
  	else
  #endif
  		page = alloc_pages(gfp, order);
  
  	if (!page)
  		return NULL;
  
  	return page_address(page);
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
247
248
  static void slob_free_pages(void *b, int order)
  {
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
249
250
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += 1 << order;
6e9ed0cc4   Américo Wang   slob: clean up th...
251
252
  	free_pages((unsigned long)b, order);
  }
95b35127f   Nick Piggin   slob: rework free...
253
254
255
256
  /*
   * Allocate a slob block within a given slob_page sp.
   */
  static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
10cef6029   Matt Mackall   [PATCH] slob: int...
257
  {
6e9ed0cc4   Américo Wang   slob: clean up th...
258
  	slob_t *prev, *cur, *aligned = NULL;
10cef6029   Matt Mackall   [PATCH] slob: int...
259
  	int delta = 0, units = SLOB_UNITS(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
260

95b35127f   Nick Piggin   slob: rework free...
261
262
  	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
  		slobidx_t avail = slob_units(cur);
10cef6029   Matt Mackall   [PATCH] slob: int...
263
264
265
266
  		if (align) {
  			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
  			delta = aligned - cur;
  		}
95b35127f   Nick Piggin   slob: rework free...
267
268
  		if (avail >= units + delta) { /* room enough? */
  			slob_t *next;
10cef6029   Matt Mackall   [PATCH] slob: int...
269
  			if (delta) { /* need to fragment head to align? */
95b35127f   Nick Piggin   slob: rework free...
270
271
272
  				next = slob_next(cur);
  				set_slob(aligned, avail - delta, next);
  				set_slob(cur, delta, aligned);
10cef6029   Matt Mackall   [PATCH] slob: int...
273
274
  				prev = cur;
  				cur = aligned;
95b35127f   Nick Piggin   slob: rework free...
275
  				avail = slob_units(cur);
10cef6029   Matt Mackall   [PATCH] slob: int...
276
  			}
95b35127f   Nick Piggin   slob: rework free...
277
278
279
280
281
282
283
284
285
286
287
288
  			next = slob_next(cur);
  			if (avail == units) { /* exact fit? unlink. */
  				if (prev)
  					set_slob(prev, slob_units(prev), next);
  				else
  					sp->free = next;
  			} else { /* fragment */
  				if (prev)
  					set_slob(prev, slob_units(prev), cur + units);
  				else
  					sp->free = cur + units;
  				set_slob(cur + units, avail - units, next);
10cef6029   Matt Mackall   [PATCH] slob: int...
289
  			}
95b35127f   Nick Piggin   slob: rework free...
290
291
292
  			sp->units -= units;
  			if (!sp->units)
  				clear_slob_page_free(sp);
10cef6029   Matt Mackall   [PATCH] slob: int...
293
294
  			return cur;
  		}
95b35127f   Nick Piggin   slob: rework free...
295
296
297
298
  		if (slob_last(cur))
  			return NULL;
  	}
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
299

95b35127f   Nick Piggin   slob: rework free...
300
301
302
  /*
   * slob_alloc: entry point into the slob allocator.
   */
6193a2ff1   Paul Mundt   slob: initial NUM...
303
  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
95b35127f   Nick Piggin   slob: rework free...
304
305
  {
  	struct slob_page *sp;
d6269543e   Matt Mackall   slob: reduce list...
306
  	struct list_head *prev;
20cecbae4   Matt Mackall   slob: reduce exte...
307
  	struct list_head *slob_list;
95b35127f   Nick Piggin   slob: rework free...
308
309
  	slob_t *b = NULL;
  	unsigned long flags;
10cef6029   Matt Mackall   [PATCH] slob: int...
310

20cecbae4   Matt Mackall   slob: reduce exte...
311
312
313
314
315
316
  	if (size < SLOB_BREAK1)
  		slob_list = &free_slob_small;
  	else if (size < SLOB_BREAK2)
  		slob_list = &free_slob_medium;
  	else
  		slob_list = &free_slob_large;
95b35127f   Nick Piggin   slob: rework free...
317
318
  	spin_lock_irqsave(&slob_lock, flags);
  	/* Iterate through each partially free page, try to find room */
20cecbae4   Matt Mackall   slob: reduce exte...
319
  	list_for_each_entry(sp, slob_list, list) {
6193a2ff1   Paul Mundt   slob: initial NUM...
320
321
322
323
324
325
326
327
  #ifdef CONFIG_NUMA
  		/*
  		 * If there's a node specification, search for a partial
  		 * page with a matching node id in the freelist.
  		 */
  		if (node != -1 && page_to_nid(&sp->page) != node)
  			continue;
  #endif
d6269543e   Matt Mackall   slob: reduce list...
328
329
330
  		/* Enough room on this page? */
  		if (sp->units < SLOB_UNITS(size))
  			continue;
6193a2ff1   Paul Mundt   slob: initial NUM...
331

d6269543e   Matt Mackall   slob: reduce list...
332
333
334
335
336
337
338
339
340
  		/* Attempt to alloc */
  		prev = sp->list.prev;
  		b = slob_page_alloc(sp, size, align);
  		if (!b)
  			continue;
  
  		/* Improve fragment distribution and reduce our average
  		 * search time by starting our next search here. (see
  		 * Knuth vol 1, sec 2.5, pg 449) */
20cecbae4   Matt Mackall   slob: reduce exte...
341
342
343
  		if (prev != slob_list->prev &&
  				slob_list->next != prev->next)
  			list_move_tail(slob_list, prev->next);
d6269543e   Matt Mackall   slob: reduce list...
344
  		break;
10cef6029   Matt Mackall   [PATCH] slob: int...
345
  	}
95b35127f   Nick Piggin   slob: rework free...
346
347
348
349
  	spin_unlock_irqrestore(&slob_lock, flags);
  
  	/* Not enough space: must allocate a new page */
  	if (!b) {
6e9ed0cc4   Américo Wang   slob: clean up th...
350
  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
95b35127f   Nick Piggin   slob: rework free...
351
  		if (!b)
6e9ed0cc4   Américo Wang   slob: clean up th...
352
353
  			return NULL;
  		sp = slob_page(b);
95b35127f   Nick Piggin   slob: rework free...
354
355
356
357
358
359
360
  		set_slob_page(sp);
  
  		spin_lock_irqsave(&slob_lock, flags);
  		sp->units = SLOB_UNITS(PAGE_SIZE);
  		sp->free = b;
  		INIT_LIST_HEAD(&sp->list);
  		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
20cecbae4   Matt Mackall   slob: reduce exte...
361
  		set_slob_page_free(sp, slob_list);
95b35127f   Nick Piggin   slob: rework free...
362
363
364
365
  		b = slob_page_alloc(sp, size, align);
  		BUG_ON(!b);
  		spin_unlock_irqrestore(&slob_lock, flags);
  	}
d07dbea46   Christoph Lameter   Slab allocators: ...
366
367
  	if (unlikely((gfp & __GFP_ZERO) && b))
  		memset(b, 0, size);
95b35127f   Nick Piggin   slob: rework free...
368
  	return b;
10cef6029   Matt Mackall   [PATCH] slob: int...
369
  }
95b35127f   Nick Piggin   slob: rework free...
370
371
372
  /*
   * slob_free: entry point into the slob allocator.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
373
374
  static void slob_free(void *block, int size)
  {
95b35127f   Nick Piggin   slob: rework free...
375
376
377
  	struct slob_page *sp;
  	slob_t *prev, *next, *b = (slob_t *)block;
  	slobidx_t units;
10cef6029   Matt Mackall   [PATCH] slob: int...
378
  	unsigned long flags;
d602dabae   Bob Liu   SLOB: Free object...
379
  	struct list_head *slob_list;
10cef6029   Matt Mackall   [PATCH] slob: int...
380

2408c5503   Satyam Sharma   {slub, slob}: use...
381
  	if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef6029   Matt Mackall   [PATCH] slob: int...
382
  		return;
95b35127f   Nick Piggin   slob: rework free...
383
  	BUG_ON(!size);
10cef6029   Matt Mackall   [PATCH] slob: int...
384

6e9ed0cc4   Américo Wang   slob: clean up th...
385
  	sp = slob_page(block);
95b35127f   Nick Piggin   slob: rework free...
386
  	units = SLOB_UNITS(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
387

10cef6029   Matt Mackall   [PATCH] slob: int...
388
  	spin_lock_irqsave(&slob_lock, flags);
10cef6029   Matt Mackall   [PATCH] slob: int...
389

95b35127f   Nick Piggin   slob: rework free...
390
391
392
393
  	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
  		/* Go directly to page allocator. Do not pass slob allocator */
  		if (slob_page_free(sp))
  			clear_slob_page_free(sp);
6fb8f4243   Nick Piggin   slob: fix lockup ...
394
  		spin_unlock_irqrestore(&slob_lock, flags);
95b35127f   Nick Piggin   slob: rework free...
395
396
  		clear_slob_page(sp);
  		free_slob_page(sp);
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
397
  		slob_free_pages(b, 0);
6fb8f4243   Nick Piggin   slob: fix lockup ...
398
  		return;
95b35127f   Nick Piggin   slob: rework free...
399
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
400

95b35127f   Nick Piggin   slob: rework free...
401
402
403
404
405
406
407
  	if (!slob_page_free(sp)) {
  		/* This slob page is about to become partially free. Easy! */
  		sp->units = units;
  		sp->free = b;
  		set_slob(b, units,
  			(void *)((unsigned long)(b +
  					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
d602dabae   Bob Liu   SLOB: Free object...
408
409
410
411
412
413
414
  		if (size < SLOB_BREAK1)
  			slob_list = &free_slob_small;
  		else if (size < SLOB_BREAK2)
  			slob_list = &free_slob_medium;
  		else
  			slob_list = &free_slob_large;
  		set_slob_page_free(sp, slob_list);
95b35127f   Nick Piggin   slob: rework free...
415
416
417
418
419
420
421
422
  		goto out;
  	}
  
  	/*
  	 * Otherwise the page is already partially free, so find reinsertion
  	 * point.
  	 */
  	sp->units += units;
10cef6029   Matt Mackall   [PATCH] slob: int...
423

95b35127f   Nick Piggin   slob: rework free...
424
  	if (b < sp->free) {
679299b32   Matt Mackall   slob: fix free bl...
425
426
427
428
  		if (b + units == sp->free) {
  			units += slob_units(sp->free);
  			sp->free = slob_next(sp->free);
  		}
95b35127f   Nick Piggin   slob: rework free...
429
430
431
432
433
434
435
436
437
  		set_slob(b, units, sp->free);
  		sp->free = b;
  	} else {
  		prev = sp->free;
  		next = slob_next(prev);
  		while (b > next) {
  			prev = next;
  			next = slob_next(prev);
  		}
10cef6029   Matt Mackall   [PATCH] slob: int...
438

95b35127f   Nick Piggin   slob: rework free...
439
440
441
442
443
444
445
446
447
448
449
450
451
  		if (!slob_last(prev) && b + units == next) {
  			units += slob_units(next);
  			set_slob(b, units, slob_next(next));
  		} else
  			set_slob(b, units, next);
  
  		if (prev + slob_units(prev) == b) {
  			units = slob_units(b) + slob_units(prev);
  			set_slob(prev, units, slob_next(b));
  		} else
  			set_slob(prev, slob_units(prev), b);
  	}
  out:
10cef6029   Matt Mackall   [PATCH] slob: int...
452
453
  	spin_unlock_irqrestore(&slob_lock, flags);
  }
95b35127f   Nick Piggin   slob: rework free...
454
455
456
  /*
   * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
   */
6193a2ff1   Paul Mundt   slob: initial NUM...
457
  void *__kmalloc_node(size_t size, gfp_t gfp, int node)
10cef6029   Matt Mackall   [PATCH] slob: int...
458
  {
6cb8f9132   Christoph Lameter   Slab allocators: ...
459
  	unsigned int *m;
553948491   Nick Piggin   slob: improved al...
460
  	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
461
  	void *ret;
553948491   Nick Piggin   slob: improved al...
462

19cefdffb   Ingo Molnar   lockdep: annotate...
463
  	lockdep_trace_alloc(gfp);
cf40bd16f   Nick Piggin   lockdep: annotate...
464

553948491   Nick Piggin   slob: improved al...
465
  	if (size < PAGE_SIZE - align) {
6cb8f9132   Christoph Lameter   Slab allocators: ...
466
467
  		if (!size)
  			return ZERO_SIZE_PTR;
6193a2ff1   Paul Mundt   slob: initial NUM...
468
  		m = slob_alloc(size + align, gfp, align, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
469

239f49c08   MinChan Kim   slob: Fix to retu...
470
471
472
  		if (!m)
  			return NULL;
  		*m = size;
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
473
  		ret = (void *)m + align;
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
474
475
  		trace_kmalloc_node(_RET_IP_, ret,
  				   size, size + align, gfp, node);
d87a133fc   Nick Piggin   slob: remove bigb...
476
  	} else {
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
477
  		unsigned int order = get_order(size);
d87a133fc   Nick Piggin   slob: remove bigb...
478

8df275af8   David Rientjes   slob: fix gfp fla...
479
480
481
  		if (likely(order))
  			gfp |= __GFP_COMP;
  		ret = slob_new_pages(gfp, order, node);
d87a133fc   Nick Piggin   slob: remove bigb...
482
483
484
485
486
  		if (ret) {
  			struct page *page;
  			page = virt_to_page(ret);
  			page->private = size;
  		}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
487

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
488
489
  		trace_kmalloc_node(_RET_IP_, ret,
  				   size, PAGE_SIZE << order, gfp, node);
10cef6029   Matt Mackall   [PATCH] slob: int...
490
  	}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
491

4374e616d   Catalin Marinas   kmemleak: Add the...
492
  	kmemleak_alloc(ret, size, 1, gfp);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
493
  	return ret;
10cef6029   Matt Mackall   [PATCH] slob: int...
494
  }
6193a2ff1   Paul Mundt   slob: initial NUM...
495
  EXPORT_SYMBOL(__kmalloc_node);
10cef6029   Matt Mackall   [PATCH] slob: int...
496
497
498
  
  void kfree(const void *block)
  {
95b35127f   Nick Piggin   slob: rework free...
499
  	struct slob_page *sp;
10cef6029   Matt Mackall   [PATCH] slob: int...
500

2121db74b   Pekka Enberg   kmemtrace: trace ...
501
  	trace_kfree(_RET_IP_, block);
2408c5503   Satyam Sharma   {slub, slob}: use...
502
  	if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef6029   Matt Mackall   [PATCH] slob: int...
503
  		return;
4374e616d   Catalin Marinas   kmemleak: Add the...
504
  	kmemleak_free(block);
10cef6029   Matt Mackall   [PATCH] slob: int...
505

6e9ed0cc4   Américo Wang   slob: clean up th...
506
507
  	sp = slob_page(block);
  	if (is_slob_page(sp)) {
553948491   Nick Piggin   slob: improved al...
508
509
510
  		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
  		unsigned int *m = (unsigned int *)(block - align);
  		slob_free(m, *m + align);
d87a133fc   Nick Piggin   slob: remove bigb...
511
512
  	} else
  		put_page(&sp->page);
10cef6029   Matt Mackall   [PATCH] slob: int...
513
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
514
  EXPORT_SYMBOL(kfree);
d87a133fc   Nick Piggin   slob: remove bigb...
515
  /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
fd76bab2f   Pekka Enberg   slab: introduce k...
516
  size_t ksize(const void *block)
10cef6029   Matt Mackall   [PATCH] slob: int...
517
  {
95b35127f   Nick Piggin   slob: rework free...
518
  	struct slob_page *sp;
10cef6029   Matt Mackall   [PATCH] slob: int...
519

ef8b4520b   Christoph Lameter   Slab allocators: ...
520
521
  	BUG_ON(!block);
  	if (unlikely(block == ZERO_SIZE_PTR))
10cef6029   Matt Mackall   [PATCH] slob: int...
522
  		return 0;
6e9ed0cc4   Américo Wang   slob: clean up th...
523
524
  	sp = slob_page(block);
  	if (is_slob_page(sp)) {
70096a561   Matt Mackall   SLOB: fix bogus k...
525
526
527
528
  		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
  		unsigned int *m = (unsigned int *)(block - align);
  		return SLOB_UNITS(*m) * SLOB_UNIT;
  	} else
d87a133fc   Nick Piggin   slob: remove bigb...
529
  		return sp->page.private;
10cef6029   Matt Mackall   [PATCH] slob: int...
530
  }
b1aabecd5   Kirill A. Shutemov   mm: Export symbol...
531
  EXPORT_SYMBOL(ksize);
10cef6029   Matt Mackall   [PATCH] slob: int...
532
533
534
  
  struct kmem_cache {
  	unsigned int size, align;
afc0cedbe   Nick Piggin   slob: implement R...
535
  	unsigned long flags;
10cef6029   Matt Mackall   [PATCH] slob: int...
536
  	const char *name;
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
537
  	void (*ctor)(void *);
10cef6029   Matt Mackall   [PATCH] slob: int...
538
539
540
  };
  
  struct kmem_cache *kmem_cache_create(const char *name, size_t size,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
541
  	size_t align, unsigned long flags, void (*ctor)(void *))
10cef6029   Matt Mackall   [PATCH] slob: int...
542
543
  {
  	struct kmem_cache *c;
0701a9e64   Yi Li   slob: fix bug - w...
544
  	c = slob_alloc(sizeof(struct kmem_cache),
5e18e2b8b   Catalin Marinas   slob: do not pass...
545
  		GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
10cef6029   Matt Mackall   [PATCH] slob: int...
546
547
548
549
  
  	if (c) {
  		c->name = name;
  		c->size = size;
afc0cedbe   Nick Piggin   slob: implement R...
550
  		if (flags & SLAB_DESTROY_BY_RCU) {
afc0cedbe   Nick Piggin   slob: implement R...
551
552
553
554
  			/* leave room for rcu footer at the end of object */
  			c->size += sizeof(struct slob_rcu);
  		}
  		c->flags = flags;
10cef6029   Matt Mackall   [PATCH] slob: int...
555
  		c->ctor = ctor;
10cef6029   Matt Mackall   [PATCH] slob: int...
556
  		/* ignore alignment unless it's forced */
5af608399   Christoph Lameter   slab allocators: ...
557
  		c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
553948491   Nick Piggin   slob: improved al...
558
559
  		if (c->align < ARCH_SLAB_MINALIGN)
  			c->align = ARCH_SLAB_MINALIGN;
10cef6029   Matt Mackall   [PATCH] slob: int...
560
561
  		if (c->align < align)
  			c->align = align;
bc0055aee   Akinobu Mita   slob: handle SLAB...
562
563
564
  	} else if (flags & SLAB_PANIC)
  		panic("Cannot create slab cache %s
  ", name);
10cef6029   Matt Mackall   [PATCH] slob: int...
565

4374e616d   Catalin Marinas   kmemleak: Add the...
566
  	kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
10cef6029   Matt Mackall   [PATCH] slob: int...
567
568
569
  	return c;
  }
  EXPORT_SYMBOL(kmem_cache_create);
133d205a1   Alexey Dobriyan   [PATCH] Make kmem...
570
  void kmem_cache_destroy(struct kmem_cache *c)
10cef6029   Matt Mackall   [PATCH] slob: int...
571
  {
4374e616d   Catalin Marinas   kmemleak: Add the...
572
  	kmemleak_free(c);
7ed9f7e5d   Paul E. McKenney   fix RCU-callback-...
573
574
  	if (c->flags & SLAB_DESTROY_BY_RCU)
  		rcu_barrier();
10cef6029   Matt Mackall   [PATCH] slob: int...
575
  	slob_free(c, sizeof(struct kmem_cache));
10cef6029   Matt Mackall   [PATCH] slob: int...
576
577
  }
  EXPORT_SYMBOL(kmem_cache_destroy);
6193a2ff1   Paul Mundt   slob: initial NUM...
578
  void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
10cef6029   Matt Mackall   [PATCH] slob: int...
579
580
  {
  	void *b;
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
581
  	if (c->size < PAGE_SIZE) {
6193a2ff1   Paul Mundt   slob: initial NUM...
582
  		b = slob_alloc(c->size, flags, c->align, node);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
583
584
585
  		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
  					    SLOB_UNITS(c->size) * SLOB_UNIT,
  					    flags, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
586
  	} else {
6e9ed0cc4   Américo Wang   slob: clean up th...
587
  		b = slob_new_pages(flags, get_order(c->size), node);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
588
589
590
  		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
  					    PAGE_SIZE << get_order(c->size),
  					    flags, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
591
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
592
593
  
  	if (c->ctor)
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
594
  		c->ctor(b);
10cef6029   Matt Mackall   [PATCH] slob: int...
595

4374e616d   Catalin Marinas   kmemleak: Add the...
596
  	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
10cef6029   Matt Mackall   [PATCH] slob: int...
597
598
  	return b;
  }
6193a2ff1   Paul Mundt   slob: initial NUM...
599
  EXPORT_SYMBOL(kmem_cache_alloc_node);
10cef6029   Matt Mackall   [PATCH] slob: int...
600

afc0cedbe   Nick Piggin   slob: implement R...
601
  static void __kmem_cache_free(void *b, int size)
10cef6029   Matt Mackall   [PATCH] slob: int...
602
  {
afc0cedbe   Nick Piggin   slob: implement R...
603
604
  	if (size < PAGE_SIZE)
  		slob_free(b, size);
10cef6029   Matt Mackall   [PATCH] slob: int...
605
  	else
6e9ed0cc4   Américo Wang   slob: clean up th...
606
  		slob_free_pages(b, get_order(size));
afc0cedbe   Nick Piggin   slob: implement R...
607
608
609
610
611
612
613
614
615
616
617
618
  }
  
  static void kmem_rcu_free(struct rcu_head *head)
  {
  	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
  	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
  
  	__kmem_cache_free(b, slob_rcu->size);
  }
  
  void kmem_cache_free(struct kmem_cache *c, void *b)
  {
4374e616d   Catalin Marinas   kmemleak: Add the...
619
  	kmemleak_free_recursive(b, c->flags);
afc0cedbe   Nick Piggin   slob: implement R...
620
621
622
  	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
  		struct slob_rcu *slob_rcu;
  		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
afc0cedbe   Nick Piggin   slob: implement R...
623
624
625
  		slob_rcu->size = c->size;
  		call_rcu(&slob_rcu->head, kmem_rcu_free);
  	} else {
afc0cedbe   Nick Piggin   slob: implement R...
626
627
  		__kmem_cache_free(b, c->size);
  	}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
628

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
629
  	trace_kmem_cache_free(_RET_IP_, b);
10cef6029   Matt Mackall   [PATCH] slob: int...
630
631
632
633
634
635
636
637
  }
  EXPORT_SYMBOL(kmem_cache_free);
  
  unsigned int kmem_cache_size(struct kmem_cache *c)
  {
  	return c->size;
  }
  EXPORT_SYMBOL(kmem_cache_size);
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
638
639
640
641
642
  int kmem_cache_shrink(struct kmem_cache *d)
  {
  	return 0;
  }
  EXPORT_SYMBOL(kmem_cache_shrink);
84a01c2f8   Paul Mundt   slob: sparsemem s...
643
644
645
646
647
648
  static unsigned int slob_ready __read_mostly;
  
  int slab_is_available(void)
  {
  	return slob_ready;
  }
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
649
650
  void __init kmem_cache_init(void)
  {
84a01c2f8   Paul Mundt   slob: sparsemem s...
651
  	slob_ready = 1;
10cef6029   Matt Mackall   [PATCH] slob: int...
652
  }
bbff2e433   Wu Fengguang   slab: remove dupl...
653
654
655
656
657
  
  void __init kmem_cache_init_late(void)
  {
  	/* Nothing to do */
  }