Blame view

mm/slob.c 17.1 KB
10cef6029   Matt Mackall   [PATCH] slob: int...
1
2
3
4
5
  /*
   * SLOB Allocator: Simple List Of Blocks
   *
   * Matt Mackall <mpm@selenic.com> 12/30/03
   *
6193a2ff1   Paul Mundt   slob: initial NUM...
6
7
   * NUMA support by Paul Mundt, 2007.
   *
10cef6029   Matt Mackall   [PATCH] slob: int...
8
9
10
11
   * How SLOB works:
   *
   * The core of SLOB is a traditional K&R style heap allocator, with
   * support for returning aligned objects. The granularity of this
553948491   Nick Piggin   slob: improved al...
12
13
   * allocator is as little as 2 bytes, however typically most architectures
   * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
95b35127f   Nick Piggin   slob: rework free...
14
   *
20cecbae4   Matt Mackall   slob: reduce exte...
15
16
17
18
19
20
21
22
23
24
25
   * The slob heap is a set of linked list of pages from alloc_pages(),
   * and within each page, there is a singly-linked list of free blocks
   * (slob_t). The heap is grown on demand. To reduce fragmentation,
   * heap pages are segregated into three lists, with objects less than
   * 256 bytes, objects less than 1024 bytes, and all other objects.
   *
   * Allocation from heap involves first searching for a page with
   * sufficient free blocks (using a next-fit-like approach) followed by
   * a first-fit scan of the page. Deallocation inserts objects back
   * into the free list in address order, so this is effectively an
   * address-ordered first fit.
10cef6029   Matt Mackall   [PATCH] slob: int...
26
27
   *
   * Above this is an implementation of kmalloc/kfree. Blocks returned
553948491   Nick Piggin   slob: improved al...
28
   * from kmalloc are prepended with a 4-byte header with the kmalloc size.
10cef6029   Matt Mackall   [PATCH] slob: int...
29
   * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
6193a2ff1   Paul Mundt   slob: initial NUM...
30
   * alloc_pages() directly, allocating compound pages so the page order
d87a133fc   Nick Piggin   slob: remove bigb...
31
32
33
34
   * does not have to be separately tracked, and also stores the exact
   * allocation size in page->private so that it can be used to accurately
   * provide ksize(). These objects are detected in kfree() because slob_page()
   * is false for them.
10cef6029   Matt Mackall   [PATCH] slob: int...
35
36
   *
   * SLAB is emulated on top of SLOB by simply calling constructors and
95b35127f   Nick Piggin   slob: rework free...
37
38
39
40
   * destructors for every SLAB allocation. Objects are returned with the
   * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
   * case the low-level allocator will fragment blocks to create the proper
   * alignment. Again, objects of page-size or greater are allocated by
6193a2ff1   Paul Mundt   slob: initial NUM...
41
   * calling alloc_pages(). As SLAB objects know their size, no separate
95b35127f   Nick Piggin   slob: rework free...
42
   * size bookkeeping is necessary and there is essentially no allocation
d87a133fc   Nick Piggin   slob: remove bigb...
43
44
   * space overhead, and compound pages aren't needed for multi-page
   * allocations.
6193a2ff1   Paul Mundt   slob: initial NUM...
45
46
47
48
   *
   * NUMA support in SLOB is fairly simplistic, pushing most of the real
   * logic down to the page allocator, and simply doing the node accounting
   * on the upper levels. In the event that a node id is explicitly
6484eb3e2   Mel Gorman   page allocator: d...
49
   * provided, alloc_pages_exact_node() with the specified node id is used
6193a2ff1   Paul Mundt   slob: initial NUM...
50
51
52
53
54
55
56
57
   * instead. The common case (or when the node id isn't explicitly provided)
   * will default to the current node, as per numa_node_id().
   *
   * Node aware pages are still inserted in to the global freelist, and
   * these are scanned for by matching against the node id encoded in the
   * page flags. As a result, block allocations that can be satisfied from
   * the freelist will only be done so on pages residing on the same node,
   * in order to prevent random node placement.
10cef6029   Matt Mackall   [PATCH] slob: int...
58
   */
95b35127f   Nick Piggin   slob: rework free...
59
  #include <linux/kernel.h>
10cef6029   Matt Mackall   [PATCH] slob: int...
60
61
  #include <linux/slab.h>
  #include <linux/mm.h>
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
62
  #include <linux/swap.h> /* struct reclaim_state */
10cef6029   Matt Mackall   [PATCH] slob: int...
63
64
  #include <linux/cache.h>
  #include <linux/init.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
65
  #include <linux/export.h>
afc0cedbe   Nick Piggin   slob: implement R...
66
  #include <linux/rcupdate.h>
95b35127f   Nick Piggin   slob: rework free...
67
  #include <linux/list.h>
4374e616d   Catalin Marinas   kmemleak: Add the...
68
  #include <linux/kmemleak.h>
039ca4e74   Li Zefan   tracing: Remove k...
69
70
  
  #include <trace/events/kmem.h>
60063497a   Arun Sharma   atomic: use <linu...
71
  #include <linux/atomic.h>
95b35127f   Nick Piggin   slob: rework free...
72

95b35127f   Nick Piggin   slob: rework free...
73
74
75
76
77
78
79
80
  /*
   * slob_block has a field 'units', which indicates size of block if +ve,
   * or offset of next block if -ve (in SLOB_UNITs).
   *
   * Free blocks of size 1 unit simply contain the offset of the next block.
   * Those with larger size contain their size in the first SLOB_UNIT of
   * memory, and the offset of the next free block in the second SLOB_UNIT.
   */
553948491   Nick Piggin   slob: improved al...
81
  #if PAGE_SIZE <= (32767 * 2)
95b35127f   Nick Piggin   slob: rework free...
82
83
84
85
  typedef s16 slobidx_t;
  #else
  typedef s32 slobidx_t;
  #endif
10cef6029   Matt Mackall   [PATCH] slob: int...
86
  struct slob_block {
95b35127f   Nick Piggin   slob: rework free...
87
  	slobidx_t units;
553948491   Nick Piggin   slob: improved al...
88
  };
10cef6029   Matt Mackall   [PATCH] slob: int...
89
  typedef struct slob_block slob_t;
95b35127f   Nick Piggin   slob: rework free...
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
  /*
   * We use struct page fields to manage some slob allocation aspects,
   * however to avoid the horrible mess in include/linux/mm_types.h, we'll
   * just define our own struct page type variant here.
   */
  struct slob_page {
  	union {
  		struct {
  			unsigned long flags;	/* mandatory */
  			atomic_t _count;	/* mandatory */
  			slobidx_t units;	/* free units left in page */
  			unsigned long pad[2];
  			slob_t *free;		/* first free slob_t in page */
  			struct list_head list;	/* linked list of free pages */
  		};
  		struct page page;
  	};
  };
  static inline void struct_slob_page_wrong_size(void)
  { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
  
  /*
   * free_slob_page: call before a slob_page is returned to the page allocator.
   */
  static inline void free_slob_page(struct slob_page *sp)
  {
  	reset_page_mapcount(&sp->page);
  	sp->page.mapping = NULL;
  }
  
  /*
20cecbae4   Matt Mackall   slob: reduce exte...
121
   * All partially free slob pages go on these lists.
95b35127f   Nick Piggin   slob: rework free...
122
   */
20cecbae4   Matt Mackall   slob: reduce exte...
123
124
125
126
127
  #define SLOB_BREAK1 256
  #define SLOB_BREAK2 1024
  static LIST_HEAD(free_slob_small);
  static LIST_HEAD(free_slob_medium);
  static LIST_HEAD(free_slob_large);
95b35127f   Nick Piggin   slob: rework free...
128
129
  
  /*
6e9ed0cc4   Américo Wang   slob: clean up th...
130
   * is_slob_page: True for all slob pages (false for bigblock pages)
95b35127f   Nick Piggin   slob: rework free...
131
   */
6e9ed0cc4   Américo Wang   slob: clean up th...
132
  static inline int is_slob_page(struct slob_page *sp)
95b35127f   Nick Piggin   slob: rework free...
133
  {
7303f2409   Wu Fengguang   slob: use PG_slab...
134
  	return PageSlab((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
135
136
137
138
  }
  
  static inline void set_slob_page(struct slob_page *sp)
  {
7303f2409   Wu Fengguang   slob: use PG_slab...
139
  	__SetPageSlab((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
140
141
142
143
  }
  
  static inline void clear_slob_page(struct slob_page *sp)
  {
7303f2409   Wu Fengguang   slob: use PG_slab...
144
  	__ClearPageSlab((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
145
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
146
147
148
149
  static inline struct slob_page *slob_page(const void *addr)
  {
  	return (struct slob_page *)virt_to_page(addr);
  }
95b35127f   Nick Piggin   slob: rework free...
150
151
152
153
154
  /*
   * slob_page_free: true for pages on free_slob_pages list.
   */
  static inline int slob_page_free(struct slob_page *sp)
  {
9023cb7e8   Andy Whitcroft   slob: record page...
155
  	return PageSlobFree((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
156
  }
20cecbae4   Matt Mackall   slob: reduce exte...
157
  static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
95b35127f   Nick Piggin   slob: rework free...
158
  {
20cecbae4   Matt Mackall   slob: reduce exte...
159
  	list_add(&sp->list, list);
9023cb7e8   Andy Whitcroft   slob: record page...
160
  	__SetPageSlobFree((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
161
162
163
164
165
  }
  
  static inline void clear_slob_page_free(struct slob_page *sp)
  {
  	list_del(&sp->list);
9023cb7e8   Andy Whitcroft   slob: record page...
166
  	__ClearPageSlobFree((struct page *)sp);
95b35127f   Nick Piggin   slob: rework free...
167
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
168
169
170
  #define SLOB_UNIT sizeof(slob_t)
  #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
  #define SLOB_ALIGN L1_CACHE_BYTES
afc0cedbe   Nick Piggin   slob: implement R...
171
172
173
174
175
176
177
178
179
  /*
   * struct slob_rcu is inserted at the tail of allocated slob blocks, which
   * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
   * the block using call_rcu.
   */
  struct slob_rcu {
  	struct rcu_head head;
  	int size;
  };
95b35127f   Nick Piggin   slob: rework free...
180
181
182
  /*
   * slob_lock protects all slob allocator structures.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
183
  static DEFINE_SPINLOCK(slob_lock);
10cef6029   Matt Mackall   [PATCH] slob: int...
184

95b35127f   Nick Piggin   slob: rework free...
185
186
187
188
189
190
191
  /*
   * Encode the given size and next info into a free slob block s.
   */
  static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
  {
  	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  	slobidx_t offset = next - base;
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
192

95b35127f   Nick Piggin   slob: rework free...
193
194
195
196
197
198
  	if (size > 1) {
  		s[0].units = size;
  		s[1].units = offset;
  	} else
  		s[0].units = -offset;
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
199

95b35127f   Nick Piggin   slob: rework free...
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
  /*
   * Return the size of a slob block.
   */
  static slobidx_t slob_units(slob_t *s)
  {
  	if (s->units > 0)
  		return s->units;
  	return 1;
  }
  
  /*
   * Return the next free slob block pointer after this one.
   */
  static slob_t *slob_next(slob_t *s)
  {
  	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  	slobidx_t next;
  
  	if (s[0].units < 0)
  		next = -s[0].units;
  	else
  		next = s[1].units;
  	return base+next;
  }
  
  /*
   * Returns true if s is the last free block in its page.
   */
  static int slob_last(slob_t *s)
  {
  	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
232
  static void *slob_new_pages(gfp_t gfp, int order, int node)
6193a2ff1   Paul Mundt   slob: initial NUM...
233
234
235
236
237
  {
  	void *page;
  
  #ifdef CONFIG_NUMA
  	if (node != -1)
6484eb3e2   Mel Gorman   page allocator: d...
238
  		page = alloc_pages_exact_node(node, gfp, order);
6193a2ff1   Paul Mundt   slob: initial NUM...
239
240
241
242
243
244
245
246
247
  	else
  #endif
  		page = alloc_pages(gfp, order);
  
  	if (!page)
  		return NULL;
  
  	return page_address(page);
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
248
249
  static void slob_free_pages(void *b, int order)
  {
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
250
251
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += 1 << order;
6e9ed0cc4   Américo Wang   slob: clean up th...
252
253
  	free_pages((unsigned long)b, order);
  }
95b35127f   Nick Piggin   slob: rework free...
254
255
256
257
  /*
   * Allocate a slob block within a given slob_page sp.
   */
  static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
10cef6029   Matt Mackall   [PATCH] slob: int...
258
  {
6e9ed0cc4   Américo Wang   slob: clean up th...
259
  	slob_t *prev, *cur, *aligned = NULL;
10cef6029   Matt Mackall   [PATCH] slob: int...
260
  	int delta = 0, units = SLOB_UNITS(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
261

95b35127f   Nick Piggin   slob: rework free...
262
263
  	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
  		slobidx_t avail = slob_units(cur);
10cef6029   Matt Mackall   [PATCH] slob: int...
264
265
266
267
  		if (align) {
  			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
  			delta = aligned - cur;
  		}
95b35127f   Nick Piggin   slob: rework free...
268
269
  		if (avail >= units + delta) { /* room enough? */
  			slob_t *next;
10cef6029   Matt Mackall   [PATCH] slob: int...
270
  			if (delta) { /* need to fragment head to align? */
95b35127f   Nick Piggin   slob: rework free...
271
272
273
  				next = slob_next(cur);
  				set_slob(aligned, avail - delta, next);
  				set_slob(cur, delta, aligned);
10cef6029   Matt Mackall   [PATCH] slob: int...
274
275
  				prev = cur;
  				cur = aligned;
95b35127f   Nick Piggin   slob: rework free...
276
  				avail = slob_units(cur);
10cef6029   Matt Mackall   [PATCH] slob: int...
277
  			}
95b35127f   Nick Piggin   slob: rework free...
278
279
280
281
282
283
284
285
286
287
288
289
  			next = slob_next(cur);
  			if (avail == units) { /* exact fit? unlink. */
  				if (prev)
  					set_slob(prev, slob_units(prev), next);
  				else
  					sp->free = next;
  			} else { /* fragment */
  				if (prev)
  					set_slob(prev, slob_units(prev), cur + units);
  				else
  					sp->free = cur + units;
  				set_slob(cur + units, avail - units, next);
10cef6029   Matt Mackall   [PATCH] slob: int...
290
  			}
95b35127f   Nick Piggin   slob: rework free...
291
292
293
  			sp->units -= units;
  			if (!sp->units)
  				clear_slob_page_free(sp);
10cef6029   Matt Mackall   [PATCH] slob: int...
294
295
  			return cur;
  		}
95b35127f   Nick Piggin   slob: rework free...
296
297
298
299
  		if (slob_last(cur))
  			return NULL;
  	}
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
300

95b35127f   Nick Piggin   slob: rework free...
301
302
303
  /*
   * slob_alloc: entry point into the slob allocator.
   */
6193a2ff1   Paul Mundt   slob: initial NUM...
304
  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
95b35127f   Nick Piggin   slob: rework free...
305
306
  {
  	struct slob_page *sp;
d6269543e   Matt Mackall   slob: reduce list...
307
  	struct list_head *prev;
20cecbae4   Matt Mackall   slob: reduce exte...
308
  	struct list_head *slob_list;
95b35127f   Nick Piggin   slob: rework free...
309
310
  	slob_t *b = NULL;
  	unsigned long flags;
10cef6029   Matt Mackall   [PATCH] slob: int...
311

20cecbae4   Matt Mackall   slob: reduce exte...
312
313
314
315
316
317
  	if (size < SLOB_BREAK1)
  		slob_list = &free_slob_small;
  	else if (size < SLOB_BREAK2)
  		slob_list = &free_slob_medium;
  	else
  		slob_list = &free_slob_large;
95b35127f   Nick Piggin   slob: rework free...
318
319
  	spin_lock_irqsave(&slob_lock, flags);
  	/* Iterate through each partially free page, try to find room */
20cecbae4   Matt Mackall   slob: reduce exte...
320
  	list_for_each_entry(sp, slob_list, list) {
6193a2ff1   Paul Mundt   slob: initial NUM...
321
322
323
324
325
326
327
328
  #ifdef CONFIG_NUMA
  		/*
  		 * If there's a node specification, search for a partial
  		 * page with a matching node id in the freelist.
  		 */
  		if (node != -1 && page_to_nid(&sp->page) != node)
  			continue;
  #endif
d6269543e   Matt Mackall   slob: reduce list...
329
330
331
  		/* Enough room on this page? */
  		if (sp->units < SLOB_UNITS(size))
  			continue;
6193a2ff1   Paul Mundt   slob: initial NUM...
332

d6269543e   Matt Mackall   slob: reduce list...
333
334
335
336
337
338
339
340
341
  		/* Attempt to alloc */
  		prev = sp->list.prev;
  		b = slob_page_alloc(sp, size, align);
  		if (!b)
  			continue;
  
  		/* Improve fragment distribution and reduce our average
  		 * search time by starting our next search here. (see
  		 * Knuth vol 1, sec 2.5, pg 449) */
20cecbae4   Matt Mackall   slob: reduce exte...
342
343
344
  		if (prev != slob_list->prev &&
  				slob_list->next != prev->next)
  			list_move_tail(slob_list, prev->next);
d6269543e   Matt Mackall   slob: reduce list...
345
  		break;
10cef6029   Matt Mackall   [PATCH] slob: int...
346
  	}
95b35127f   Nick Piggin   slob: rework free...
347
348
349
350
  	spin_unlock_irqrestore(&slob_lock, flags);
  
  	/* Not enough space: must allocate a new page */
  	if (!b) {
6e9ed0cc4   Américo Wang   slob: clean up th...
351
  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
95b35127f   Nick Piggin   slob: rework free...
352
  		if (!b)
6e9ed0cc4   Américo Wang   slob: clean up th...
353
354
  			return NULL;
  		sp = slob_page(b);
95b35127f   Nick Piggin   slob: rework free...
355
356
357
358
359
360
361
  		set_slob_page(sp);
  
  		spin_lock_irqsave(&slob_lock, flags);
  		sp->units = SLOB_UNITS(PAGE_SIZE);
  		sp->free = b;
  		INIT_LIST_HEAD(&sp->list);
  		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
20cecbae4   Matt Mackall   slob: reduce exte...
362
  		set_slob_page_free(sp, slob_list);
95b35127f   Nick Piggin   slob: rework free...
363
364
365
366
  		b = slob_page_alloc(sp, size, align);
  		BUG_ON(!b);
  		spin_unlock_irqrestore(&slob_lock, flags);
  	}
d07dbea46   Christoph Lameter   Slab allocators: ...
367
368
  	if (unlikely((gfp & __GFP_ZERO) && b))
  		memset(b, 0, size);
95b35127f   Nick Piggin   slob: rework free...
369
  	return b;
10cef6029   Matt Mackall   [PATCH] slob: int...
370
  }
95b35127f   Nick Piggin   slob: rework free...
371
372
373
  /*
   * slob_free: entry point into the slob allocator.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
374
375
  static void slob_free(void *block, int size)
  {
95b35127f   Nick Piggin   slob: rework free...
376
377
378
  	struct slob_page *sp;
  	slob_t *prev, *next, *b = (slob_t *)block;
  	slobidx_t units;
10cef6029   Matt Mackall   [PATCH] slob: int...
379
  	unsigned long flags;
d602dabae   Bob Liu   SLOB: Free object...
380
  	struct list_head *slob_list;
10cef6029   Matt Mackall   [PATCH] slob: int...
381

2408c5503   Satyam Sharma   {slub, slob}: use...
382
  	if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef6029   Matt Mackall   [PATCH] slob: int...
383
  		return;
95b35127f   Nick Piggin   slob: rework free...
384
  	BUG_ON(!size);
10cef6029   Matt Mackall   [PATCH] slob: int...
385

6e9ed0cc4   Américo Wang   slob: clean up th...
386
  	sp = slob_page(block);
95b35127f   Nick Piggin   slob: rework free...
387
  	units = SLOB_UNITS(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
388

10cef6029   Matt Mackall   [PATCH] slob: int...
389
  	spin_lock_irqsave(&slob_lock, flags);
10cef6029   Matt Mackall   [PATCH] slob: int...
390

95b35127f   Nick Piggin   slob: rework free...
391
392
393
394
  	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
  		/* Go directly to page allocator. Do not pass slob allocator */
  		if (slob_page_free(sp))
  			clear_slob_page_free(sp);
6fb8f4243   Nick Piggin   slob: fix lockup ...
395
  		spin_unlock_irqrestore(&slob_lock, flags);
95b35127f   Nick Piggin   slob: rework free...
396
397
  		clear_slob_page(sp);
  		free_slob_page(sp);
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
398
  		slob_free_pages(b, 0);
6fb8f4243   Nick Piggin   slob: fix lockup ...
399
  		return;
95b35127f   Nick Piggin   slob: rework free...
400
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
401

95b35127f   Nick Piggin   slob: rework free...
402
403
404
405
406
407
408
  	if (!slob_page_free(sp)) {
  		/* This slob page is about to become partially free. Easy! */
  		sp->units = units;
  		sp->free = b;
  		set_slob(b, units,
  			(void *)((unsigned long)(b +
  					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
d602dabae   Bob Liu   SLOB: Free object...
409
410
411
412
413
414
415
  		if (size < SLOB_BREAK1)
  			slob_list = &free_slob_small;
  		else if (size < SLOB_BREAK2)
  			slob_list = &free_slob_medium;
  		else
  			slob_list = &free_slob_large;
  		set_slob_page_free(sp, slob_list);
95b35127f   Nick Piggin   slob: rework free...
416
417
418
419
420
421
422
423
  		goto out;
  	}
  
  	/*
  	 * Otherwise the page is already partially free, so find reinsertion
  	 * point.
  	 */
  	sp->units += units;
10cef6029   Matt Mackall   [PATCH] slob: int...
424

95b35127f   Nick Piggin   slob: rework free...
425
  	if (b < sp->free) {
679299b32   Matt Mackall   slob: fix free bl...
426
427
428
429
  		if (b + units == sp->free) {
  			units += slob_units(sp->free);
  			sp->free = slob_next(sp->free);
  		}
95b35127f   Nick Piggin   slob: rework free...
430
431
432
433
434
435
436
437
438
  		set_slob(b, units, sp->free);
  		sp->free = b;
  	} else {
  		prev = sp->free;
  		next = slob_next(prev);
  		while (b > next) {
  			prev = next;
  			next = slob_next(prev);
  		}
10cef6029   Matt Mackall   [PATCH] slob: int...
439

95b35127f   Nick Piggin   slob: rework free...
440
441
442
443
444
445
446
447
448
449
450
451
452
  		if (!slob_last(prev) && b + units == next) {
  			units += slob_units(next);
  			set_slob(b, units, slob_next(next));
  		} else
  			set_slob(b, units, next);
  
  		if (prev + slob_units(prev) == b) {
  			units = slob_units(b) + slob_units(prev);
  			set_slob(prev, units, slob_next(b));
  		} else
  			set_slob(prev, slob_units(prev), b);
  	}
  out:
10cef6029   Matt Mackall   [PATCH] slob: int...
453
454
  	spin_unlock_irqrestore(&slob_lock, flags);
  }
95b35127f   Nick Piggin   slob: rework free...
455
456
457
  /*
   * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
   */
6193a2ff1   Paul Mundt   slob: initial NUM...
458
  void *__kmalloc_node(size_t size, gfp_t gfp, int node)
10cef6029   Matt Mackall   [PATCH] slob: int...
459
  {
6cb8f9132   Christoph Lameter   Slab allocators: ...
460
  	unsigned int *m;
553948491   Nick Piggin   slob: improved al...
461
  	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
462
  	void *ret;
553948491   Nick Piggin   slob: improved al...
463

bd50cfa89   Steven Rostedt   slob/lockdep: Fix...
464
  	gfp &= gfp_allowed_mask;
19cefdffb   Ingo Molnar   lockdep: annotate...
465
  	lockdep_trace_alloc(gfp);
cf40bd16f   Nick Piggin   lockdep: annotate...
466

553948491   Nick Piggin   slob: improved al...
467
  	if (size < PAGE_SIZE - align) {
6cb8f9132   Christoph Lameter   Slab allocators: ...
468
469
  		if (!size)
  			return ZERO_SIZE_PTR;
6193a2ff1   Paul Mundt   slob: initial NUM...
470
  		m = slob_alloc(size + align, gfp, align, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
471

239f49c08   MinChan Kim   slob: Fix to retu...
472
473
474
  		if (!m)
  			return NULL;
  		*m = size;
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
475
  		ret = (void *)m + align;
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
476
477
  		trace_kmalloc_node(_RET_IP_, ret,
  				   size, size + align, gfp, node);
d87a133fc   Nick Piggin   slob: remove bigb...
478
  	} else {
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
479
  		unsigned int order = get_order(size);
d87a133fc   Nick Piggin   slob: remove bigb...
480

8df275af8   David Rientjes   slob: fix gfp fla...
481
482
483
  		if (likely(order))
  			gfp |= __GFP_COMP;
  		ret = slob_new_pages(gfp, order, node);
d87a133fc   Nick Piggin   slob: remove bigb...
484
485
486
487
488
  		if (ret) {
  			struct page *page;
  			page = virt_to_page(ret);
  			page->private = size;
  		}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
489

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
490
491
  		trace_kmalloc_node(_RET_IP_, ret,
  				   size, PAGE_SIZE << order, gfp, node);
10cef6029   Matt Mackall   [PATCH] slob: int...
492
  	}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
493

4374e616d   Catalin Marinas   kmemleak: Add the...
494
  	kmemleak_alloc(ret, size, 1, gfp);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
495
  	return ret;
10cef6029   Matt Mackall   [PATCH] slob: int...
496
  }
6193a2ff1   Paul Mundt   slob: initial NUM...
497
  EXPORT_SYMBOL(__kmalloc_node);
10cef6029   Matt Mackall   [PATCH] slob: int...
498
499
500
  
  void kfree(const void *block)
  {
95b35127f   Nick Piggin   slob: rework free...
501
  	struct slob_page *sp;
10cef6029   Matt Mackall   [PATCH] slob: int...
502

2121db74b   Pekka Enberg   kmemtrace: trace ...
503
  	trace_kfree(_RET_IP_, block);
2408c5503   Satyam Sharma   {slub, slob}: use...
504
  	if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef6029   Matt Mackall   [PATCH] slob: int...
505
  		return;
4374e616d   Catalin Marinas   kmemleak: Add the...
506
  	kmemleak_free(block);
10cef6029   Matt Mackall   [PATCH] slob: int...
507

6e9ed0cc4   Américo Wang   slob: clean up th...
508
509
  	sp = slob_page(block);
  	if (is_slob_page(sp)) {
553948491   Nick Piggin   slob: improved al...
510
511
512
  		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
  		unsigned int *m = (unsigned int *)(block - align);
  		slob_free(m, *m + align);
d87a133fc   Nick Piggin   slob: remove bigb...
513
514
  	} else
  		put_page(&sp->page);
10cef6029   Matt Mackall   [PATCH] slob: int...
515
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
516
  EXPORT_SYMBOL(kfree);
d87a133fc   Nick Piggin   slob: remove bigb...
517
  /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
fd76bab2f   Pekka Enberg   slab: introduce k...
518
  size_t ksize(const void *block)
10cef6029   Matt Mackall   [PATCH] slob: int...
519
  {
95b35127f   Nick Piggin   slob: rework free...
520
  	struct slob_page *sp;
10cef6029   Matt Mackall   [PATCH] slob: int...
521

ef8b4520b   Christoph Lameter   Slab allocators: ...
522
523
  	BUG_ON(!block);
  	if (unlikely(block == ZERO_SIZE_PTR))
10cef6029   Matt Mackall   [PATCH] slob: int...
524
  		return 0;
6e9ed0cc4   Américo Wang   slob: clean up th...
525
526
  	sp = slob_page(block);
  	if (is_slob_page(sp)) {
70096a561   Matt Mackall   SLOB: fix bogus k...
527
528
529
530
  		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
  		unsigned int *m = (unsigned int *)(block - align);
  		return SLOB_UNITS(*m) * SLOB_UNIT;
  	} else
d87a133fc   Nick Piggin   slob: remove bigb...
531
  		return sp->page.private;
10cef6029   Matt Mackall   [PATCH] slob: int...
532
  }
b1aabecd5   Kirill A. Shutemov   mm: Export symbol...
533
  EXPORT_SYMBOL(ksize);
10cef6029   Matt Mackall   [PATCH] slob: int...
534
535
536
  
  struct kmem_cache {
  	unsigned int size, align;
afc0cedbe   Nick Piggin   slob: implement R...
537
  	unsigned long flags;
10cef6029   Matt Mackall   [PATCH] slob: int...
538
  	const char *name;
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
539
  	void (*ctor)(void *);
10cef6029   Matt Mackall   [PATCH] slob: int...
540
541
542
  };
  
  struct kmem_cache *kmem_cache_create(const char *name, size_t size,
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
543
  	size_t align, unsigned long flags, void (*ctor)(void *))
10cef6029   Matt Mackall   [PATCH] slob: int...
544
545
  {
  	struct kmem_cache *c;
0701a9e64   Yi Li   slob: fix bug - w...
546
  	c = slob_alloc(sizeof(struct kmem_cache),
5e18e2b8b   Catalin Marinas   slob: do not pass...
547
  		GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
10cef6029   Matt Mackall   [PATCH] slob: int...
548
549
550
551
  
  	if (c) {
  		c->name = name;
  		c->size = size;
afc0cedbe   Nick Piggin   slob: implement R...
552
  		if (flags & SLAB_DESTROY_BY_RCU) {
afc0cedbe   Nick Piggin   slob: implement R...
553
554
555
556
  			/* leave room for rcu footer at the end of object */
  			c->size += sizeof(struct slob_rcu);
  		}
  		c->flags = flags;
10cef6029   Matt Mackall   [PATCH] slob: int...
557
  		c->ctor = ctor;
10cef6029   Matt Mackall   [PATCH] slob: int...
558
  		/* ignore alignment unless it's forced */
5af608399   Christoph Lameter   slab allocators: ...
559
  		c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
553948491   Nick Piggin   slob: improved al...
560
561
  		if (c->align < ARCH_SLAB_MINALIGN)
  			c->align = ARCH_SLAB_MINALIGN;
10cef6029   Matt Mackall   [PATCH] slob: int...
562
563
  		if (c->align < align)
  			c->align = align;
bc0055aee   Akinobu Mita   slob: handle SLAB...
564
565
566
  	} else if (flags & SLAB_PANIC)
  		panic("Cannot create slab cache %s
  ", name);
10cef6029   Matt Mackall   [PATCH] slob: int...
567

4374e616d   Catalin Marinas   kmemleak: Add the...
568
  	kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
10cef6029   Matt Mackall   [PATCH] slob: int...
569
570
571
  	return c;
  }
  EXPORT_SYMBOL(kmem_cache_create);
133d205a1   Alexey Dobriyan   [PATCH] Make kmem...
572
  void kmem_cache_destroy(struct kmem_cache *c)
10cef6029   Matt Mackall   [PATCH] slob: int...
573
  {
4374e616d   Catalin Marinas   kmemleak: Add the...
574
  	kmemleak_free(c);
7ed9f7e5d   Paul E. McKenney   fix RCU-callback-...
575
576
  	if (c->flags & SLAB_DESTROY_BY_RCU)
  		rcu_barrier();
10cef6029   Matt Mackall   [PATCH] slob: int...
577
  	slob_free(c, sizeof(struct kmem_cache));
10cef6029   Matt Mackall   [PATCH] slob: int...
578
579
  }
  EXPORT_SYMBOL(kmem_cache_destroy);
6193a2ff1   Paul Mundt   slob: initial NUM...
580
  void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
10cef6029   Matt Mackall   [PATCH] slob: int...
581
582
  {
  	void *b;
bd50cfa89   Steven Rostedt   slob/lockdep: Fix...
583
584
585
  	flags &= gfp_allowed_mask;
  
  	lockdep_trace_alloc(flags);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
586
  	if (c->size < PAGE_SIZE) {
6193a2ff1   Paul Mundt   slob: initial NUM...
587
  		b = slob_alloc(c->size, flags, c->align, node);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
588
589
590
  		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
  					    SLOB_UNITS(c->size) * SLOB_UNIT,
  					    flags, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
591
  	} else {
6e9ed0cc4   Américo Wang   slob: clean up th...
592
  		b = slob_new_pages(flags, get_order(c->size), node);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
593
594
595
  		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
  					    PAGE_SIZE << get_order(c->size),
  					    flags, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
596
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
597
598
  
  	if (c->ctor)
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
599
  		c->ctor(b);
10cef6029   Matt Mackall   [PATCH] slob: int...
600

4374e616d   Catalin Marinas   kmemleak: Add the...
601
  	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
10cef6029   Matt Mackall   [PATCH] slob: int...
602
603
  	return b;
  }
6193a2ff1   Paul Mundt   slob: initial NUM...
604
  EXPORT_SYMBOL(kmem_cache_alloc_node);
10cef6029   Matt Mackall   [PATCH] slob: int...
605

afc0cedbe   Nick Piggin   slob: implement R...
606
  static void __kmem_cache_free(void *b, int size)
10cef6029   Matt Mackall   [PATCH] slob: int...
607
  {
afc0cedbe   Nick Piggin   slob: implement R...
608
609
  	if (size < PAGE_SIZE)
  		slob_free(b, size);
10cef6029   Matt Mackall   [PATCH] slob: int...
610
  	else
6e9ed0cc4   Américo Wang   slob: clean up th...
611
  		slob_free_pages(b, get_order(size));
afc0cedbe   Nick Piggin   slob: implement R...
612
613
614
615
616
617
618
619
620
621
622
623
  }
  
  static void kmem_rcu_free(struct rcu_head *head)
  {
  	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
  	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
  
  	__kmem_cache_free(b, slob_rcu->size);
  }
  
  void kmem_cache_free(struct kmem_cache *c, void *b)
  {
4374e616d   Catalin Marinas   kmemleak: Add the...
624
  	kmemleak_free_recursive(b, c->flags);
afc0cedbe   Nick Piggin   slob: implement R...
625
626
627
  	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
  		struct slob_rcu *slob_rcu;
  		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
afc0cedbe   Nick Piggin   slob: implement R...
628
629
630
  		slob_rcu->size = c->size;
  		call_rcu(&slob_rcu->head, kmem_rcu_free);
  	} else {
afc0cedbe   Nick Piggin   slob: implement R...
631
632
  		__kmem_cache_free(b, c->size);
  	}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
633

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
634
  	trace_kmem_cache_free(_RET_IP_, b);
10cef6029   Matt Mackall   [PATCH] slob: int...
635
636
637
638
639
640
641
642
  }
  EXPORT_SYMBOL(kmem_cache_free);
  
  unsigned int kmem_cache_size(struct kmem_cache *c)
  {
  	return c->size;
  }
  EXPORT_SYMBOL(kmem_cache_size);
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
643
644
645
646
647
  int kmem_cache_shrink(struct kmem_cache *d)
  {
  	return 0;
  }
  EXPORT_SYMBOL(kmem_cache_shrink);
84a01c2f8   Paul Mundt   slob: sparsemem s...
648
649
650
651
652
653
  static unsigned int slob_ready __read_mostly;
  
  int slab_is_available(void)
  {
  	return slob_ready;
  }
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
654
655
  void __init kmem_cache_init(void)
  {
84a01c2f8   Paul Mundt   slob: sparsemem s...
656
  	slob_ready = 1;
10cef6029   Matt Mackall   [PATCH] slob: int...
657
  }
bbff2e433   Wu Fengguang   slab: remove dupl...
658
659
660
661
662
  
  void __init kmem_cache_init_late(void)
  {
  	/* Nothing to do */
  }