Blame view

mm/slob.c 16.1 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
10cef6029   Matt Mackall   [PATCH] slob: int...
2
3
4
5
6
  /*
   * SLOB Allocator: Simple List Of Blocks
   *
   * Matt Mackall <mpm@selenic.com> 12/30/03
   *
6193a2ff1   Paul Mundt   slob: initial NUM...
7
8
   * NUMA support by Paul Mundt, 2007.
   *
10cef6029   Matt Mackall   [PATCH] slob: int...
9
10
11
12
   * How SLOB works:
   *
   * The core of SLOB is a traditional K&R style heap allocator, with
   * support for returning aligned objects. The granularity of this
553948491   Nick Piggin   slob: improved al...
13
14
   * allocator is as little as 2 bytes, however typically most architectures
   * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
95b35127f   Nick Piggin   slob: rework free...
15
   *
20cecbae4   Matt Mackall   slob: reduce exte...
16
17
18
19
20
21
22
23
24
25
26
   * The slob heap is a set of linked list of pages from alloc_pages(),
   * and within each page, there is a singly-linked list of free blocks
   * (slob_t). The heap is grown on demand. To reduce fragmentation,
   * heap pages are segregated into three lists, with objects less than
   * 256 bytes, objects less than 1024 bytes, and all other objects.
   *
   * Allocation from heap involves first searching for a page with
   * sufficient free blocks (using a next-fit-like approach) followed by
   * a first-fit scan of the page. Deallocation inserts objects back
   * into the free list in address order, so this is effectively an
   * address-ordered first fit.
10cef6029   Matt Mackall   [PATCH] slob: int...
27
28
   *
   * Above this is an implementation of kmalloc/kfree. Blocks returned
553948491   Nick Piggin   slob: improved al...
29
   * from kmalloc are prepended with a 4-byte header with the kmalloc size.
10cef6029   Matt Mackall   [PATCH] slob: int...
30
   * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
6193a2ff1   Paul Mundt   slob: initial NUM...
31
   * alloc_pages() directly, allocating compound pages so the page order
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
32
33
   * does not have to be separately tracked.
   * These objects are detected in kfree() because PageSlab()
d87a133fc   Nick Piggin   slob: remove bigb...
34
   * is false for them.
10cef6029   Matt Mackall   [PATCH] slob: int...
35
36
   *
   * SLAB is emulated on top of SLOB by simply calling constructors and
95b35127f   Nick Piggin   slob: rework free...
37
38
39
40
   * destructors for every SLAB allocation. Objects are returned with the
   * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
   * case the low-level allocator will fragment blocks to create the proper
   * alignment. Again, objects of page-size or greater are allocated by
6193a2ff1   Paul Mundt   slob: initial NUM...
41
   * calling alloc_pages(). As SLAB objects know their size, no separate
95b35127f   Nick Piggin   slob: rework free...
42
   * size bookkeeping is necessary and there is essentially no allocation
d87a133fc   Nick Piggin   slob: remove bigb...
43
44
   * space overhead, and compound pages aren't needed for multi-page
   * allocations.
6193a2ff1   Paul Mundt   slob: initial NUM...
45
46
47
48
   *
   * NUMA support in SLOB is fairly simplistic, pushing most of the real
   * logic down to the page allocator, and simply doing the node accounting
   * on the upper levels. In the event that a node id is explicitly
96db800f5   Vlastimil Babka   mm: rename alloc_...
49
   * provided, __alloc_pages_node() with the specified node id is used
6193a2ff1   Paul Mundt   slob: initial NUM...
50
51
52
53
54
55
56
57
   * instead. The common case (or when the node id isn't explicitly provided)
   * will default to the current node, as per numa_node_id().
   *
   * Node aware pages are still inserted in to the global freelist, and
   * these are scanned for by matching against the node id encoded in the
   * page flags. As a result, block allocations that can be satisfied from
   * the freelist will only be done so on pages residing on the same node,
   * in order to prevent random node placement.
10cef6029   Matt Mackall   [PATCH] slob: int...
58
   */
95b35127f   Nick Piggin   slob: rework free...
59
  #include <linux/kernel.h>
10cef6029   Matt Mackall   [PATCH] slob: int...
60
  #include <linux/slab.h>
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
61

10cef6029   Matt Mackall   [PATCH] slob: int...
62
  #include <linux/mm.h>
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
63
  #include <linux/swap.h> /* struct reclaim_state */
10cef6029   Matt Mackall   [PATCH] slob: int...
64
65
  #include <linux/cache.h>
  #include <linux/init.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
66
  #include <linux/export.h>
afc0cedbe   Nick Piggin   slob: implement R...
67
  #include <linux/rcupdate.h>
95b35127f   Nick Piggin   slob: rework free...
68
  #include <linux/list.h>
4374e616d   Catalin Marinas   kmemleak: Add the...
69
  #include <linux/kmemleak.h>
039ca4e74   Li Zefan   tracing: Remove k...
70
71
  
  #include <trace/events/kmem.h>
60063497a   Arun Sharma   atomic: use <linu...
72
  #include <linux/atomic.h>
95b35127f   Nick Piggin   slob: rework free...
73

b9ce5ef49   Glauber Costa   sl[au]b: always g...
74
  #include "slab.h"
95b35127f   Nick Piggin   slob: rework free...
75
76
77
78
79
80
81
82
  /*
   * slob_block has a field 'units', which indicates size of block if +ve,
   * or offset of next block if -ve (in SLOB_UNITs).
   *
   * Free blocks of size 1 unit simply contain the offset of the next block.
   * Those with larger size contain their size in the first SLOB_UNIT of
   * memory, and the offset of the next free block in the second SLOB_UNIT.
   */
553948491   Nick Piggin   slob: improved al...
83
  #if PAGE_SIZE <= (32767 * 2)
95b35127f   Nick Piggin   slob: rework free...
84
85
86
87
  typedef s16 slobidx_t;
  #else
  typedef s32 slobidx_t;
  #endif
10cef6029   Matt Mackall   [PATCH] slob: int...
88
  struct slob_block {
95b35127f   Nick Piggin   slob: rework free...
89
  	slobidx_t units;
553948491   Nick Piggin   slob: improved al...
90
  };
10cef6029   Matt Mackall   [PATCH] slob: int...
91
  typedef struct slob_block slob_t;
95b35127f   Nick Piggin   slob: rework free...
92
  /*
20cecbae4   Matt Mackall   slob: reduce exte...
93
   * All partially free slob pages go on these lists.
95b35127f   Nick Piggin   slob: rework free...
94
   */
20cecbae4   Matt Mackall   slob: reduce exte...
95
96
97
98
99
  #define SLOB_BREAK1 256
  #define SLOB_BREAK2 1024
  static LIST_HEAD(free_slob_small);
  static LIST_HEAD(free_slob_medium);
  static LIST_HEAD(free_slob_large);
95b35127f   Nick Piggin   slob: rework free...
100
101
  
  /*
95b35127f   Nick Piggin   slob: rework free...
102
103
   * slob_page_free: true for pages on free_slob_pages list.
   */
b8c24c4ae   Christoph Lameter   slob: Define page...
104
  static inline int slob_page_free(struct page *sp)
95b35127f   Nick Piggin   slob: rework free...
105
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
106
  	return PageSlobFree(sp);
95b35127f   Nick Piggin   slob: rework free...
107
  }
b8c24c4ae   Christoph Lameter   slob: Define page...
108
  static void set_slob_page_free(struct page *sp, struct list_head *list)
95b35127f   Nick Piggin   slob: rework free...
109
  {
34bf6ef94   Dave Hansen   mm: slab/slub: us...
110
  	list_add(&sp->lru, list);
b8c24c4ae   Christoph Lameter   slob: Define page...
111
  	__SetPageSlobFree(sp);
95b35127f   Nick Piggin   slob: rework free...
112
  }
b8c24c4ae   Christoph Lameter   slob: Define page...
113
  static inline void clear_slob_page_free(struct page *sp)
95b35127f   Nick Piggin   slob: rework free...
114
  {
34bf6ef94   Dave Hansen   mm: slab/slub: us...
115
  	list_del(&sp->lru);
b8c24c4ae   Christoph Lameter   slob: Define page...
116
  	__ClearPageSlobFree(sp);
95b35127f   Nick Piggin   slob: rework free...
117
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
118
  #define SLOB_UNIT sizeof(slob_t)
a6d78159f   Sasha Levin   slob: use DIV_ROU...
119
  #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
10cef6029   Matt Mackall   [PATCH] slob: int...
120

afc0cedbe   Nick Piggin   slob: implement R...
121
122
  /*
   * struct slob_rcu is inserted at the tail of allocated slob blocks, which
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
123
   * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
afc0cedbe   Nick Piggin   slob: implement R...
124
125
126
127
128
129
   * the block using call_rcu.
   */
  struct slob_rcu {
  	struct rcu_head head;
  	int size;
  };
95b35127f   Nick Piggin   slob: rework free...
130
131
132
  /*
   * slob_lock protects all slob allocator structures.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
133
  static DEFINE_SPINLOCK(slob_lock);
10cef6029   Matt Mackall   [PATCH] slob: int...
134

95b35127f   Nick Piggin   slob: rework free...
135
136
137
138
139
140
141
  /*
   * Encode the given size and next info into a free slob block s.
   */
  static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
  {
  	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  	slobidx_t offset = next - base;
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
142

95b35127f   Nick Piggin   slob: rework free...
143
144
145
146
147
148
  	if (size > 1) {
  		s[0].units = size;
  		s[1].units = offset;
  	} else
  		s[0].units = -offset;
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
149

95b35127f   Nick Piggin   slob: rework free...
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
  /*
   * Return the size of a slob block.
   */
  static slobidx_t slob_units(slob_t *s)
  {
  	if (s->units > 0)
  		return s->units;
  	return 1;
  }
  
  /*
   * Return the next free slob block pointer after this one.
   */
  static slob_t *slob_next(slob_t *s)
  {
  	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  	slobidx_t next;
  
  	if (s[0].units < 0)
  		next = -s[0].units;
  	else
  		next = s[1].units;
  	return base+next;
  }
  
  /*
   * Returns true if s is the last free block in its page.
   */
  static int slob_last(slob_t *s)
  {
  	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
182
  static void *slob_new_pages(gfp_t gfp, int order, int node)
6193a2ff1   Paul Mundt   slob: initial NUM...
183
184
185
186
  {
  	void *page;
  
  #ifdef CONFIG_NUMA
90f2cbbc4   Ezequiel Garcia   mm, slob: Use NUM...
187
  	if (node != NUMA_NO_NODE)
96db800f5   Vlastimil Babka   mm: rename alloc_...
188
  		page = __alloc_pages_node(node, gfp, order);
6193a2ff1   Paul Mundt   slob: initial NUM...
189
190
191
192
193
194
195
196
197
  	else
  #endif
  		page = alloc_pages(gfp, order);
  
  	if (!page)
  		return NULL;
  
  	return page_address(page);
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
198
199
  static void slob_free_pages(void *b, int order)
  {
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
200
201
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += 1 << order;
6e9ed0cc4   Américo Wang   slob: clean up th...
202
203
  	free_pages((unsigned long)b, order);
  }
95b35127f   Nick Piggin   slob: rework free...
204
205
206
  /*
   * Allocate a slob block within a given slob_page sp.
   */
b8c24c4ae   Christoph Lameter   slob: Define page...
207
  static void *slob_page_alloc(struct page *sp, size_t size, int align)
10cef6029   Matt Mackall   [PATCH] slob: int...
208
  {
6e9ed0cc4   Américo Wang   slob: clean up th...
209
  	slob_t *prev, *cur, *aligned = NULL;
10cef6029   Matt Mackall   [PATCH] slob: int...
210
  	int delta = 0, units = SLOB_UNITS(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
211

b8c24c4ae   Christoph Lameter   slob: Define page...
212
  	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
95b35127f   Nick Piggin   slob: rework free...
213
  		slobidx_t avail = slob_units(cur);
10cef6029   Matt Mackall   [PATCH] slob: int...
214
215
216
217
  		if (align) {
  			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
  			delta = aligned - cur;
  		}
95b35127f   Nick Piggin   slob: rework free...
218
219
  		if (avail >= units + delta) { /* room enough? */
  			slob_t *next;
10cef6029   Matt Mackall   [PATCH] slob: int...
220
  			if (delta) { /* need to fragment head to align? */
95b35127f   Nick Piggin   slob: rework free...
221
222
223
  				next = slob_next(cur);
  				set_slob(aligned, avail - delta, next);
  				set_slob(cur, delta, aligned);
10cef6029   Matt Mackall   [PATCH] slob: int...
224
225
  				prev = cur;
  				cur = aligned;
95b35127f   Nick Piggin   slob: rework free...
226
  				avail = slob_units(cur);
10cef6029   Matt Mackall   [PATCH] slob: int...
227
  			}
95b35127f   Nick Piggin   slob: rework free...
228
229
230
231
232
  			next = slob_next(cur);
  			if (avail == units) { /* exact fit? unlink. */
  				if (prev)
  					set_slob(prev, slob_units(prev), next);
  				else
b8c24c4ae   Christoph Lameter   slob: Define page...
233
  					sp->freelist = next;
95b35127f   Nick Piggin   slob: rework free...
234
235
236
237
  			} else { /* fragment */
  				if (prev)
  					set_slob(prev, slob_units(prev), cur + units);
  				else
b8c24c4ae   Christoph Lameter   slob: Define page...
238
  					sp->freelist = cur + units;
95b35127f   Nick Piggin   slob: rework free...
239
  				set_slob(cur + units, avail - units, next);
10cef6029   Matt Mackall   [PATCH] slob: int...
240
  			}
95b35127f   Nick Piggin   slob: rework free...
241
242
243
  			sp->units -= units;
  			if (!sp->units)
  				clear_slob_page_free(sp);
10cef6029   Matt Mackall   [PATCH] slob: int...
244
245
  			return cur;
  		}
95b35127f   Nick Piggin   slob: rework free...
246
247
248
249
  		if (slob_last(cur))
  			return NULL;
  	}
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
250

95b35127f   Nick Piggin   slob: rework free...
251
252
253
  /*
   * slob_alloc: entry point into the slob allocator.
   */
6193a2ff1   Paul Mundt   slob: initial NUM...
254
  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
95b35127f   Nick Piggin   slob: rework free...
255
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
256
  	struct page *sp;
d6269543e   Matt Mackall   slob: reduce list...
257
  	struct list_head *prev;
20cecbae4   Matt Mackall   slob: reduce exte...
258
  	struct list_head *slob_list;
95b35127f   Nick Piggin   slob: rework free...
259
260
  	slob_t *b = NULL;
  	unsigned long flags;
10cef6029   Matt Mackall   [PATCH] slob: int...
261

20cecbae4   Matt Mackall   slob: reduce exte...
262
263
264
265
266
267
  	if (size < SLOB_BREAK1)
  		slob_list = &free_slob_small;
  	else if (size < SLOB_BREAK2)
  		slob_list = &free_slob_medium;
  	else
  		slob_list = &free_slob_large;
95b35127f   Nick Piggin   slob: rework free...
268
269
  	spin_lock_irqsave(&slob_lock, flags);
  	/* Iterate through each partially free page, try to find room */
34bf6ef94   Dave Hansen   mm: slab/slub: us...
270
  	list_for_each_entry(sp, slob_list, lru) {
6193a2ff1   Paul Mundt   slob: initial NUM...
271
272
273
274
275
  #ifdef CONFIG_NUMA
  		/*
  		 * If there's a node specification, search for a partial
  		 * page with a matching node id in the freelist.
  		 */
90f2cbbc4   Ezequiel Garcia   mm, slob: Use NUM...
276
  		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
6193a2ff1   Paul Mundt   slob: initial NUM...
277
278
  			continue;
  #endif
d6269543e   Matt Mackall   slob: reduce list...
279
280
281
  		/* Enough room on this page? */
  		if (sp->units < SLOB_UNITS(size))
  			continue;
6193a2ff1   Paul Mundt   slob: initial NUM...
282

d6269543e   Matt Mackall   slob: reduce list...
283
  		/* Attempt to alloc */
34bf6ef94   Dave Hansen   mm: slab/slub: us...
284
  		prev = sp->lru.prev;
d6269543e   Matt Mackall   slob: reduce list...
285
286
287
288
289
290
291
  		b = slob_page_alloc(sp, size, align);
  		if (!b)
  			continue;
  
  		/* Improve fragment distribution and reduce our average
  		 * search time by starting our next search here. (see
  		 * Knuth vol 1, sec 2.5, pg 449) */
20cecbae4   Matt Mackall   slob: reduce exte...
292
293
294
  		if (prev != slob_list->prev &&
  				slob_list->next != prev->next)
  			list_move_tail(slob_list, prev->next);
d6269543e   Matt Mackall   slob: reduce list...
295
  		break;
10cef6029   Matt Mackall   [PATCH] slob: int...
296
  	}
95b35127f   Nick Piggin   slob: rework free...
297
298
299
300
  	spin_unlock_irqrestore(&slob_lock, flags);
  
  	/* Not enough space: must allocate a new page */
  	if (!b) {
6e9ed0cc4   Américo Wang   slob: clean up th...
301
  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
95b35127f   Nick Piggin   slob: rework free...
302
  		if (!b)
6e9ed0cc4   Américo Wang   slob: clean up th...
303
  			return NULL;
b5568280c   Christoph Lameter   slob: Remove vari...
304
305
  		sp = virt_to_page(b);
  		__SetPageSlab(sp);
95b35127f   Nick Piggin   slob: rework free...
306
307
308
  
  		spin_lock_irqsave(&slob_lock, flags);
  		sp->units = SLOB_UNITS(PAGE_SIZE);
b8c24c4ae   Christoph Lameter   slob: Define page...
309
  		sp->freelist = b;
34bf6ef94   Dave Hansen   mm: slab/slub: us...
310
  		INIT_LIST_HEAD(&sp->lru);
95b35127f   Nick Piggin   slob: rework free...
311
  		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
20cecbae4   Matt Mackall   slob: reduce exte...
312
  		set_slob_page_free(sp, slob_list);
95b35127f   Nick Piggin   slob: rework free...
313
314
315
316
  		b = slob_page_alloc(sp, size, align);
  		BUG_ON(!b);
  		spin_unlock_irqrestore(&slob_lock, flags);
  	}
d07dbea46   Christoph Lameter   Slab allocators: ...
317
318
  	if (unlikely((gfp & __GFP_ZERO) && b))
  		memset(b, 0, size);
95b35127f   Nick Piggin   slob: rework free...
319
  	return b;
10cef6029   Matt Mackall   [PATCH] slob: int...
320
  }
95b35127f   Nick Piggin   slob: rework free...
321
322
323
  /*
   * slob_free: entry point into the slob allocator.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
324
325
  static void slob_free(void *block, int size)
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
326
  	struct page *sp;
95b35127f   Nick Piggin   slob: rework free...
327
328
  	slob_t *prev, *next, *b = (slob_t *)block;
  	slobidx_t units;
10cef6029   Matt Mackall   [PATCH] slob: int...
329
  	unsigned long flags;
d602dabae   Bob Liu   SLOB: Free object...
330
  	struct list_head *slob_list;
10cef6029   Matt Mackall   [PATCH] slob: int...
331

2408c5503   Satyam Sharma   {slub, slob}: use...
332
  	if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef6029   Matt Mackall   [PATCH] slob: int...
333
  		return;
95b35127f   Nick Piggin   slob: rework free...
334
  	BUG_ON(!size);
10cef6029   Matt Mackall   [PATCH] slob: int...
335

b5568280c   Christoph Lameter   slob: Remove vari...
336
  	sp = virt_to_page(block);
95b35127f   Nick Piggin   slob: rework free...
337
  	units = SLOB_UNITS(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
338

10cef6029   Matt Mackall   [PATCH] slob: int...
339
  	spin_lock_irqsave(&slob_lock, flags);
10cef6029   Matt Mackall   [PATCH] slob: int...
340

95b35127f   Nick Piggin   slob: rework free...
341
342
343
344
  	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
  		/* Go directly to page allocator. Do not pass slob allocator */
  		if (slob_page_free(sp))
  			clear_slob_page_free(sp);
6fb8f4243   Nick Piggin   slob: fix lockup ...
345
  		spin_unlock_irqrestore(&slob_lock, flags);
b5568280c   Christoph Lameter   slob: Remove vari...
346
  		__ClearPageSlab(sp);
22b751c3d   Mel Gorman   mm: rename page s...
347
  		page_mapcount_reset(sp);
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
348
  		slob_free_pages(b, 0);
6fb8f4243   Nick Piggin   slob: fix lockup ...
349
  		return;
95b35127f   Nick Piggin   slob: rework free...
350
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
351

95b35127f   Nick Piggin   slob: rework free...
352
353
354
  	if (!slob_page_free(sp)) {
  		/* This slob page is about to become partially free. Easy! */
  		sp->units = units;
b8c24c4ae   Christoph Lameter   slob: Define page...
355
  		sp->freelist = b;
95b35127f   Nick Piggin   slob: rework free...
356
357
358
  		set_slob(b, units,
  			(void *)((unsigned long)(b +
  					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
d602dabae   Bob Liu   SLOB: Free object...
359
360
361
362
363
364
365
  		if (size < SLOB_BREAK1)
  			slob_list = &free_slob_small;
  		else if (size < SLOB_BREAK2)
  			slob_list = &free_slob_medium;
  		else
  			slob_list = &free_slob_large;
  		set_slob_page_free(sp, slob_list);
95b35127f   Nick Piggin   slob: rework free...
366
367
368
369
370
371
372
373
  		goto out;
  	}
  
  	/*
  	 * Otherwise the page is already partially free, so find reinsertion
  	 * point.
  	 */
  	sp->units += units;
10cef6029   Matt Mackall   [PATCH] slob: int...
374

b8c24c4ae   Christoph Lameter   slob: Define page...
375
376
377
378
  	if (b < (slob_t *)sp->freelist) {
  		if (b + units == sp->freelist) {
  			units += slob_units(sp->freelist);
  			sp->freelist = slob_next(sp->freelist);
679299b32   Matt Mackall   slob: fix free bl...
379
  		}
b8c24c4ae   Christoph Lameter   slob: Define page...
380
381
  		set_slob(b, units, sp->freelist);
  		sp->freelist = b;
95b35127f   Nick Piggin   slob: rework free...
382
  	} else {
b8c24c4ae   Christoph Lameter   slob: Define page...
383
  		prev = sp->freelist;
95b35127f   Nick Piggin   slob: rework free...
384
385
386
387
388
  		next = slob_next(prev);
  		while (b > next) {
  			prev = next;
  			next = slob_next(prev);
  		}
10cef6029   Matt Mackall   [PATCH] slob: int...
389

95b35127f   Nick Piggin   slob: rework free...
390
391
392
393
394
395
396
397
398
399
400
401
402
  		if (!slob_last(prev) && b + units == next) {
  			units += slob_units(next);
  			set_slob(b, units, slob_next(next));
  		} else
  			set_slob(b, units, next);
  
  		if (prev + slob_units(prev) == b) {
  			units = slob_units(b) + slob_units(prev);
  			set_slob(prev, units, slob_next(b));
  		} else
  			set_slob(prev, slob_units(prev), b);
  	}
  out:
10cef6029   Matt Mackall   [PATCH] slob: int...
403
404
  	spin_unlock_irqrestore(&slob_lock, flags);
  }
95b35127f   Nick Piggin   slob: rework free...
405
406
407
  /*
   * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
   */
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
408
409
  static __always_inline void *
  __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
10cef6029   Matt Mackall   [PATCH] slob: int...
410
  {
6cb8f9132   Christoph Lameter   Slab allocators: ...
411
  	unsigned int *m;
789306e5a   Arnd Bergmann   mm/slob: use min_...
412
  	int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
413
  	void *ret;
553948491   Nick Piggin   slob: improved al...
414

bd50cfa89   Steven Rostedt   slob/lockdep: Fix...
415
  	gfp &= gfp_allowed_mask;
d92a8cfcb   Peter Zijlstra   locking/lockdep: ...
416
417
  	fs_reclaim_acquire(gfp);
  	fs_reclaim_release(gfp);
cf40bd16f   Nick Piggin   lockdep: annotate...
418

553948491   Nick Piggin   slob: improved al...
419
  	if (size < PAGE_SIZE - align) {
6cb8f9132   Christoph Lameter   Slab allocators: ...
420
421
  		if (!size)
  			return ZERO_SIZE_PTR;
6193a2ff1   Paul Mundt   slob: initial NUM...
422
  		m = slob_alloc(size + align, gfp, align, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
423

239f49c08   MinChan Kim   slob: Fix to retu...
424
425
426
  		if (!m)
  			return NULL;
  		*m = size;
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
427
  		ret = (void *)m + align;
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
428
  		trace_kmalloc_node(caller, ret,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
429
  				   size, size + align, gfp, node);
d87a133fc   Nick Piggin   slob: remove bigb...
430
  	} else {
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
431
  		unsigned int order = get_order(size);
d87a133fc   Nick Piggin   slob: remove bigb...
432

8df275af8   David Rientjes   slob: fix gfp fla...
433
434
435
  		if (likely(order))
  			gfp |= __GFP_COMP;
  		ret = slob_new_pages(gfp, order, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
436

f3f741019   Ezequiel Garcia   mm, slob: Add sup...
437
  		trace_kmalloc_node(caller, ret,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
438
  				   size, PAGE_SIZE << order, gfp, node);
10cef6029   Matt Mackall   [PATCH] slob: int...
439
  	}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
440

4374e616d   Catalin Marinas   kmemleak: Add the...
441
  	kmemleak_alloc(ret, size, 1, gfp);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
442
  	return ret;
10cef6029   Matt Mackall   [PATCH] slob: int...
443
  }
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
444

f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
445
  void *__kmalloc(size_t size, gfp_t gfp)
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
446
  {
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
447
  	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
448
  }
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
449
  EXPORT_SYMBOL(__kmalloc);
10cef6029   Matt Mackall   [PATCH] slob: int...
450

f3f741019   Ezequiel Garcia   mm, slob: Add sup...
451
452
453
454
455
456
  void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
  {
  	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
  }
  
  #ifdef CONFIG_NUMA
82bd5508b   David Rientjes   mm, slob: fix bui...
457
  void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
458
459
460
461
462
  					int node, unsigned long caller)
  {
  	return __do_kmalloc_node(size, gfp, node, caller);
  }
  #endif
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
463

10cef6029   Matt Mackall   [PATCH] slob: int...
464
465
  void kfree(const void *block)
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
466
  	struct page *sp;
10cef6029   Matt Mackall   [PATCH] slob: int...
467

2121db74b   Pekka Enberg   kmemtrace: trace ...
468
  	trace_kfree(_RET_IP_, block);
2408c5503   Satyam Sharma   {slub, slob}: use...
469
  	if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef6029   Matt Mackall   [PATCH] slob: int...
470
  		return;
4374e616d   Catalin Marinas   kmemleak: Add the...
471
  	kmemleak_free(block);
10cef6029   Matt Mackall   [PATCH] slob: int...
472

b5568280c   Christoph Lameter   slob: Remove vari...
473
474
  	sp = virt_to_page(block);
  	if (PageSlab(sp)) {
789306e5a   Arnd Bergmann   mm/slob: use min_...
475
  		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
553948491   Nick Piggin   slob: improved al...
476
477
  		unsigned int *m = (unsigned int *)(block - align);
  		slob_free(m, *m + align);
d87a133fc   Nick Piggin   slob: remove bigb...
478
  	} else
8cf9864b1   Ezequiel Garcia   mm/slob: Use free...
479
  		__free_pages(sp, compound_order(sp));
10cef6029   Matt Mackall   [PATCH] slob: int...
480
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
481
  EXPORT_SYMBOL(kfree);
d87a133fc   Nick Piggin   slob: remove bigb...
482
  /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
fd76bab2f   Pekka Enberg   slab: introduce k...
483
  size_t ksize(const void *block)
10cef6029   Matt Mackall   [PATCH] slob: int...
484
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
485
  	struct page *sp;
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
486
487
  	int align;
  	unsigned int *m;
10cef6029   Matt Mackall   [PATCH] slob: int...
488

ef8b4520b   Christoph Lameter   Slab allocators: ...
489
490
  	BUG_ON(!block);
  	if (unlikely(block == ZERO_SIZE_PTR))
10cef6029   Matt Mackall   [PATCH] slob: int...
491
  		return 0;
b5568280c   Christoph Lameter   slob: Remove vari...
492
  	sp = virt_to_page(block);
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
493
494
  	if (unlikely(!PageSlab(sp)))
  		return PAGE_SIZE << compound_order(sp);
789306e5a   Arnd Bergmann   mm/slob: use min_...
495
  	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
496
497
  	m = (unsigned int *)(block - align);
  	return SLOB_UNITS(*m) * SLOB_UNIT;
10cef6029   Matt Mackall   [PATCH] slob: int...
498
  }
b1aabecd5   Kirill A. Shutemov   mm: Export symbol...
499
  EXPORT_SYMBOL(ksize);
10cef6029   Matt Mackall   [PATCH] slob: int...
500

8a13a4cc8   Christoph Lameter   mm/sl[aou]b: Shri...
501
  int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
10cef6029   Matt Mackall   [PATCH] slob: int...
502
  {
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
503
  	if (flags & SLAB_TYPESAFE_BY_RCU) {
278b1bb13   Christoph Lameter   mm/sl[aou]b: Move...
504
505
  		/* leave room for rcu footer at the end of object */
  		c->size += sizeof(struct slob_rcu);
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
506
  	}
278b1bb13   Christoph Lameter   mm/sl[aou]b: Move...
507
  	c->flags = flags;
278b1bb13   Christoph Lameter   mm/sl[aou]b: Move...
508
  	return 0;
10cef6029   Matt Mackall   [PATCH] slob: int...
509
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
510

c21a6daf4   Fabian Frederick   slob: make slob_a...
511
  static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
10cef6029   Matt Mackall   [PATCH] slob: int...
512
513
  {
  	void *b;
bd50cfa89   Steven Rostedt   slob/lockdep: Fix...
514
  	flags &= gfp_allowed_mask;
d92a8cfcb   Peter Zijlstra   locking/lockdep: ...
515
516
  	fs_reclaim_acquire(flags);
  	fs_reclaim_release(flags);
bd50cfa89   Steven Rostedt   slob/lockdep: Fix...
517

3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
518
  	if (c->size < PAGE_SIZE) {
6193a2ff1   Paul Mundt   slob: initial NUM...
519
  		b = slob_alloc(c->size, flags, c->align, node);
fe74fe2bf   Ezequiel Garcia   mm/slob: Use obje...
520
  		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
521
522
  					    SLOB_UNITS(c->size) * SLOB_UNIT,
  					    flags, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
523
  	} else {
6e9ed0cc4   Américo Wang   slob: clean up th...
524
  		b = slob_new_pages(flags, get_order(c->size), node);
fe74fe2bf   Ezequiel Garcia   mm/slob: Use obje...
525
  		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
526
527
  					    PAGE_SIZE << get_order(c->size),
  					    flags, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
528
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
529

c1e854e92   Steven Rostedt   slob: Check for N...
530
  	if (b && c->ctor)
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
531
  		c->ctor(b);
10cef6029   Matt Mackall   [PATCH] slob: int...
532

4374e616d   Catalin Marinas   kmemleak: Add the...
533
  	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
10cef6029   Matt Mackall   [PATCH] slob: int...
534
535
  	return b;
  }
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
  
  void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  {
  	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
  }
  EXPORT_SYMBOL(kmem_cache_alloc);
  
  #ifdef CONFIG_NUMA
  void *__kmalloc_node(size_t size, gfp_t gfp, int node)
  {
  	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
  }
  EXPORT_SYMBOL(__kmalloc_node);
  
  void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
  {
  	return slob_alloc_node(cachep, gfp, node);
  }
6193a2ff1   Paul Mundt   slob: initial NUM...
554
  EXPORT_SYMBOL(kmem_cache_alloc_node);
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
555
  #endif
10cef6029   Matt Mackall   [PATCH] slob: int...
556

afc0cedbe   Nick Piggin   slob: implement R...
557
  static void __kmem_cache_free(void *b, int size)
10cef6029   Matt Mackall   [PATCH] slob: int...
558
  {
afc0cedbe   Nick Piggin   slob: implement R...
559
560
  	if (size < PAGE_SIZE)
  		slob_free(b, size);
10cef6029   Matt Mackall   [PATCH] slob: int...
561
  	else
6e9ed0cc4   Américo Wang   slob: clean up th...
562
  		slob_free_pages(b, get_order(size));
afc0cedbe   Nick Piggin   slob: implement R...
563
564
565
566
567
568
569
570
571
572
573
574
  }
  
  static void kmem_rcu_free(struct rcu_head *head)
  {
  	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
  	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
  
  	__kmem_cache_free(b, slob_rcu->size);
  }
  
  void kmem_cache_free(struct kmem_cache *c, void *b)
  {
4374e616d   Catalin Marinas   kmemleak: Add the...
575
  	kmemleak_free_recursive(b, c->flags);
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
576
  	if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
afc0cedbe   Nick Piggin   slob: implement R...
577
578
  		struct slob_rcu *slob_rcu;
  		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
afc0cedbe   Nick Piggin   slob: implement R...
579
580
581
  		slob_rcu->size = c->size;
  		call_rcu(&slob_rcu->head, kmem_rcu_free);
  	} else {
afc0cedbe   Nick Piggin   slob: implement R...
582
583
  		__kmem_cache_free(b, c->size);
  	}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
584

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
585
  	trace_kmem_cache_free(_RET_IP_, b);
10cef6029   Matt Mackall   [PATCH] slob: int...
586
587
  }
  EXPORT_SYMBOL(kmem_cache_free);
484748f0b   Christoph Lameter   slab: infrastruct...
588
589
590
591
592
  void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
  {
  	__kmem_cache_free_bulk(s, size, p);
  }
  EXPORT_SYMBOL(kmem_cache_free_bulk);
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
593
  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
484748f0b   Christoph Lameter   slab: infrastruct...
594
595
596
597
598
  								void **p)
  {
  	return __kmem_cache_alloc_bulk(s, flags, size, p);
  }
  EXPORT_SYMBOL(kmem_cache_alloc_bulk);
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
599
600
601
602
603
  int __kmem_cache_shutdown(struct kmem_cache *c)
  {
  	/* No way to check for remaining objects */
  	return 0;
  }
52b4b950b   Dmitry Safonov   mm: slab: free km...
604
605
606
  void __kmem_cache_release(struct kmem_cache *c)
  {
  }
89e364db7   Vladimir Davydov   slub: move synchr...
607
  int __kmem_cache_shrink(struct kmem_cache *d)
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
608
609
610
  {
  	return 0;
  }
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
611

9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
612
613
614
615
616
617
  struct kmem_cache kmem_cache_boot = {
  	.name = "kmem_cache",
  	.size = sizeof(struct kmem_cache),
  	.flags = SLAB_PANIC,
  	.align = ARCH_KMALLOC_MINALIGN,
  };
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
618
619
  void __init kmem_cache_init(void)
  {
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
620
  	kmem_cache = &kmem_cache_boot;
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
621
  	slab_state = UP;
10cef6029   Matt Mackall   [PATCH] slob: int...
622
  }
bbff2e433   Wu Fengguang   slab: remove dupl...
623
624
625
  
  void __init kmem_cache_init_late(void)
  {
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
626
  	slab_state = FULL;
bbff2e433   Wu Fengguang   slab: remove dupl...
627
  }