Blame view

mm/slob.c 18.1 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
10cef6029   Matt Mackall   [PATCH] slob: int...
2
3
4
5
6
  /*
   * SLOB Allocator: Simple List Of Blocks
   *
   * Matt Mackall <mpm@selenic.com> 12/30/03
   *
6193a2ff1   Paul Mundt   slob: initial NUM...
7
8
   * NUMA support by Paul Mundt, 2007.
   *
10cef6029   Matt Mackall   [PATCH] slob: int...
9
10
11
12
   * How SLOB works:
   *
   * The core of SLOB is a traditional K&R style heap allocator, with
   * support for returning aligned objects. The granularity of this
553948491   Nick Piggin   slob: improved al...
13
14
   * allocator is as little as 2 bytes, however typically most architectures
   * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
95b35127f   Nick Piggin   slob: rework free...
15
   *
20cecbae4   Matt Mackall   slob: reduce exte...
16
17
18
19
20
21
22
23
24
25
26
   * The slob heap is a set of linked list of pages from alloc_pages(),
   * and within each page, there is a singly-linked list of free blocks
   * (slob_t). The heap is grown on demand. To reduce fragmentation,
   * heap pages are segregated into three lists, with objects less than
   * 256 bytes, objects less than 1024 bytes, and all other objects.
   *
   * Allocation from heap involves first searching for a page with
   * sufficient free blocks (using a next-fit-like approach) followed by
   * a first-fit scan of the page. Deallocation inserts objects back
   * into the free list in address order, so this is effectively an
   * address-ordered first fit.
10cef6029   Matt Mackall   [PATCH] slob: int...
27
28
   *
   * Above this is an implementation of kmalloc/kfree. Blocks returned
553948491   Nick Piggin   slob: improved al...
29
   * from kmalloc are prepended with a 4-byte header with the kmalloc size.
10cef6029   Matt Mackall   [PATCH] slob: int...
30
   * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
6193a2ff1   Paul Mundt   slob: initial NUM...
31
   * alloc_pages() directly, allocating compound pages so the page order
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
32
33
   * does not have to be separately tracked.
   * These objects are detected in kfree() because PageSlab()
d87a133fc   Nick Piggin   slob: remove bigb...
34
   * is false for them.
10cef6029   Matt Mackall   [PATCH] slob: int...
35
36
   *
   * SLAB is emulated on top of SLOB by simply calling constructors and
95b35127f   Nick Piggin   slob: rework free...
37
38
39
40
   * destructors for every SLAB allocation. Objects are returned with the
   * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
   * case the low-level allocator will fragment blocks to create the proper
   * alignment. Again, objects of page-size or greater are allocated by
6193a2ff1   Paul Mundt   slob: initial NUM...
41
   * calling alloc_pages(). As SLAB objects know their size, no separate
95b35127f   Nick Piggin   slob: rework free...
42
   * size bookkeeping is necessary and there is essentially no allocation
d87a133fc   Nick Piggin   slob: remove bigb...
43
44
   * space overhead, and compound pages aren't needed for multi-page
   * allocations.
6193a2ff1   Paul Mundt   slob: initial NUM...
45
46
47
48
   *
   * NUMA support in SLOB is fairly simplistic, pushing most of the real
   * logic down to the page allocator, and simply doing the node accounting
   * on the upper levels. In the event that a node id is explicitly
96db800f5   Vlastimil Babka   mm: rename alloc_...
49
   * provided, __alloc_pages_node() with the specified node id is used
6193a2ff1   Paul Mundt   slob: initial NUM...
50
51
52
53
54
55
56
57
   * instead. The common case (or when the node id isn't explicitly provided)
   * will default to the current node, as per numa_node_id().
   *
   * Node aware pages are still inserted in to the global freelist, and
   * these are scanned for by matching against the node id encoded in the
   * page flags. As a result, block allocations that can be satisfied from
   * the freelist will only be done so on pages residing on the same node,
   * in order to prevent random node placement.
10cef6029   Matt Mackall   [PATCH] slob: int...
58
   */
95b35127f   Nick Piggin   slob: rework free...
59
  #include <linux/kernel.h>
10cef6029   Matt Mackall   [PATCH] slob: int...
60
  #include <linux/slab.h>
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
61

10cef6029   Matt Mackall   [PATCH] slob: int...
62
  #include <linux/mm.h>
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
63
  #include <linux/swap.h> /* struct reclaim_state */
10cef6029   Matt Mackall   [PATCH] slob: int...
64
65
  #include <linux/cache.h>
  #include <linux/init.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
66
  #include <linux/export.h>
afc0cedbe   Nick Piggin   slob: implement R...
67
  #include <linux/rcupdate.h>
95b35127f   Nick Piggin   slob: rework free...
68
  #include <linux/list.h>
4374e616d   Catalin Marinas   kmemleak: Add the...
69
  #include <linux/kmemleak.h>
039ca4e74   Li Zefan   tracing: Remove k...
70
71
  
  #include <trace/events/kmem.h>
60063497a   Arun Sharma   atomic: use <linu...
72
  #include <linux/atomic.h>
95b35127f   Nick Piggin   slob: rework free...
73

b9ce5ef49   Glauber Costa   sl[au]b: always g...
74
  #include "slab.h"
95b35127f   Nick Piggin   slob: rework free...
75
76
77
78
79
80
81
82
  /*
   * slob_block has a field 'units', which indicates size of block if +ve,
   * or offset of next block if -ve (in SLOB_UNITs).
   *
   * Free blocks of size 1 unit simply contain the offset of the next block.
   * Those with larger size contain their size in the first SLOB_UNIT of
   * memory, and the offset of the next free block in the second SLOB_UNIT.
   */
553948491   Nick Piggin   slob: improved al...
83
  #if PAGE_SIZE <= (32767 * 2)
95b35127f   Nick Piggin   slob: rework free...
84
85
86
87
  typedef s16 slobidx_t;
  #else
  typedef s32 slobidx_t;
  #endif
10cef6029   Matt Mackall   [PATCH] slob: int...
88
  struct slob_block {
95b35127f   Nick Piggin   slob: rework free...
89
  	slobidx_t units;
553948491   Nick Piggin   slob: improved al...
90
  };
10cef6029   Matt Mackall   [PATCH] slob: int...
91
  typedef struct slob_block slob_t;
95b35127f   Nick Piggin   slob: rework free...
92
  /*
20cecbae4   Matt Mackall   slob: reduce exte...
93
   * All partially free slob pages go on these lists.
95b35127f   Nick Piggin   slob: rework free...
94
   */
20cecbae4   Matt Mackall   slob: reduce exte...
95
96
97
98
99
  #define SLOB_BREAK1 256
  #define SLOB_BREAK2 1024
  static LIST_HEAD(free_slob_small);
  static LIST_HEAD(free_slob_medium);
  static LIST_HEAD(free_slob_large);
95b35127f   Nick Piggin   slob: rework free...
100
101
  
  /*
95b35127f   Nick Piggin   slob: rework free...
102
103
   * slob_page_free: true for pages on free_slob_pages list.
   */
b8c24c4ae   Christoph Lameter   slob: Define page...
104
  static inline int slob_page_free(struct page *sp)
95b35127f   Nick Piggin   slob: rework free...
105
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
106
  	return PageSlobFree(sp);
95b35127f   Nick Piggin   slob: rework free...
107
  }
b8c24c4ae   Christoph Lameter   slob: Define page...
108
  static void set_slob_page_free(struct page *sp, struct list_head *list)
95b35127f   Nick Piggin   slob: rework free...
109
  {
adab7b681   Tobin C. Harding   slob: use slab_li...
110
  	list_add(&sp->slab_list, list);
b8c24c4ae   Christoph Lameter   slob: Define page...
111
  	__SetPageSlobFree(sp);
95b35127f   Nick Piggin   slob: rework free...
112
  }
b8c24c4ae   Christoph Lameter   slob: Define page...
113
  static inline void clear_slob_page_free(struct page *sp)
95b35127f   Nick Piggin   slob: rework free...
114
  {
adab7b681   Tobin C. Harding   slob: use slab_li...
115
  	list_del(&sp->slab_list);
b8c24c4ae   Christoph Lameter   slob: Define page...
116
  	__ClearPageSlobFree(sp);
95b35127f   Nick Piggin   slob: rework free...
117
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
118
  #define SLOB_UNIT sizeof(slob_t)
a6d78159f   Sasha Levin   slob: use DIV_ROU...
119
  #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
10cef6029   Matt Mackall   [PATCH] slob: int...
120

afc0cedbe   Nick Piggin   slob: implement R...
121
122
  /*
   * struct slob_rcu is inserted at the tail of allocated slob blocks, which
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
123
   * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
afc0cedbe   Nick Piggin   slob: implement R...
124
125
126
127
128
129
   * the block using call_rcu.
   */
  struct slob_rcu {
  	struct rcu_head head;
  	int size;
  };
95b35127f   Nick Piggin   slob: rework free...
130
131
132
  /*
   * slob_lock protects all slob allocator structures.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
133
  static DEFINE_SPINLOCK(slob_lock);
10cef6029   Matt Mackall   [PATCH] slob: int...
134

95b35127f   Nick Piggin   slob: rework free...
135
136
137
138
139
140
141
  /*
   * Encode the given size and next info into a free slob block s.
   */
  static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
  {
  	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  	slobidx_t offset = next - base;
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
142

95b35127f   Nick Piggin   slob: rework free...
143
144
145
146
147
148
  	if (size > 1) {
  		s[0].units = size;
  		s[1].units = offset;
  	} else
  		s[0].units = -offset;
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
149

95b35127f   Nick Piggin   slob: rework free...
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
  /*
   * Return the size of a slob block.
   */
  static slobidx_t slob_units(slob_t *s)
  {
  	if (s->units > 0)
  		return s->units;
  	return 1;
  }
  
  /*
   * Return the next free slob block pointer after this one.
   */
  static slob_t *slob_next(slob_t *s)
  {
  	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  	slobidx_t next;
  
  	if (s[0].units < 0)
  		next = -s[0].units;
  	else
  		next = s[1].units;
  	return base+next;
  }
  
  /*
   * Returns true if s is the last free block in its page.
   */
  static int slob_last(slob_t *s)
  {
  	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
182
  static void *slob_new_pages(gfp_t gfp, int order, int node)
6193a2ff1   Paul Mundt   slob: initial NUM...
183
  {
6a486c0ad   Vlastimil Babka   mm, sl[ou]b: impr...
184
  	struct page *page;
6193a2ff1   Paul Mundt   slob: initial NUM...
185
186
  
  #ifdef CONFIG_NUMA
90f2cbbc4   Ezequiel Garcia   mm, slob: Use NUM...
187
  	if (node != NUMA_NO_NODE)
96db800f5   Vlastimil Babka   mm: rename alloc_...
188
  		page = __alloc_pages_node(node, gfp, order);
6193a2ff1   Paul Mundt   slob: initial NUM...
189
190
191
192
193
194
  	else
  #endif
  		page = alloc_pages(gfp, order);
  
  	if (!page)
  		return NULL;
d42f3245c   Roman Gushchin   mm: memcg: conver...
195
196
  	mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
  			    PAGE_SIZE << order);
6193a2ff1   Paul Mundt   slob: initial NUM...
197
198
  	return page_address(page);
  }
6e9ed0cc4   Américo Wang   slob: clean up th...
199
200
  static void slob_free_pages(void *b, int order)
  {
6a486c0ad   Vlastimil Babka   mm, sl[ou]b: impr...
201
  	struct page *sp = virt_to_page(b);
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
202
203
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += 1 << order;
6a486c0ad   Vlastimil Babka   mm, sl[ou]b: impr...
204

d42f3245c   Roman Gushchin   mm: memcg: conver...
205
206
  	mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
  			    -(PAGE_SIZE << order));
6a486c0ad   Vlastimil Babka   mm, sl[ou]b: impr...
207
  	__free_pages(sp, order);
6e9ed0cc4   Américo Wang   slob: clean up th...
208
  }
95b35127f   Nick Piggin   slob: rework free...
209
  /*
130e8e09e   Tobin C. Harding   slob: respect lis...
210
211
212
213
   * slob_page_alloc() - Allocate a slob block within a given slob_page sp.
   * @sp: Page to look in.
   * @size: Size of the allocation.
   * @align: Allocation alignment.
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
214
   * @align_offset: Offset in the allocated block that will be aligned.
130e8e09e   Tobin C. Harding   slob: respect lis...
215
216
217
218
219
220
221
222
   * @page_removed_from_list: Return parameter.
   *
   * Tries to find a chunk of memory at least @size bytes big within @page.
   *
   * Return: Pointer to memory if allocated, %NULL otherwise.  If the
   *         allocation fills up @page then the page is removed from the
   *         freelist, in this case @page_removed_from_list will be set to
   *         true (set to false otherwise).
95b35127f   Nick Piggin   slob: rework free...
223
   */
130e8e09e   Tobin C. Harding   slob: respect lis...
224
  static void *slob_page_alloc(struct page *sp, size_t size, int align,
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
225
  			      int align_offset, bool *page_removed_from_list)
10cef6029   Matt Mackall   [PATCH] slob: int...
226
  {
6e9ed0cc4   Américo Wang   slob: clean up th...
227
  	slob_t *prev, *cur, *aligned = NULL;
10cef6029   Matt Mackall   [PATCH] slob: int...
228
  	int delta = 0, units = SLOB_UNITS(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
229

130e8e09e   Tobin C. Harding   slob: respect lis...
230
  	*page_removed_from_list = false;
b8c24c4ae   Christoph Lameter   slob: Define page...
231
  	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
95b35127f   Nick Piggin   slob: rework free...
232
  		slobidx_t avail = slob_units(cur);
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
233
234
235
236
237
238
239
  		/*
  		 * 'aligned' will hold the address of the slob block so that the
  		 * address 'aligned'+'align_offset' is aligned according to the
  		 * 'align' parameter. This is for kmalloc() which prepends the
  		 * allocated block with its size, so that the block itself is
  		 * aligned when needed.
  		 */
10cef6029   Matt Mackall   [PATCH] slob: int...
240
  		if (align) {
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
241
242
243
  			aligned = (slob_t *)
  				(ALIGN((unsigned long)cur + align_offset, align)
  				 - align_offset);
10cef6029   Matt Mackall   [PATCH] slob: int...
244
245
  			delta = aligned - cur;
  		}
95b35127f   Nick Piggin   slob: rework free...
246
247
  		if (avail >= units + delta) { /* room enough? */
  			slob_t *next;
10cef6029   Matt Mackall   [PATCH] slob: int...
248
  			if (delta) { /* need to fragment head to align? */
95b35127f   Nick Piggin   slob: rework free...
249
250
251
  				next = slob_next(cur);
  				set_slob(aligned, avail - delta, next);
  				set_slob(cur, delta, aligned);
10cef6029   Matt Mackall   [PATCH] slob: int...
252
253
  				prev = cur;
  				cur = aligned;
95b35127f   Nick Piggin   slob: rework free...
254
  				avail = slob_units(cur);
10cef6029   Matt Mackall   [PATCH] slob: int...
255
  			}
95b35127f   Nick Piggin   slob: rework free...
256
257
258
259
260
  			next = slob_next(cur);
  			if (avail == units) { /* exact fit? unlink. */
  				if (prev)
  					set_slob(prev, slob_units(prev), next);
  				else
b8c24c4ae   Christoph Lameter   slob: Define page...
261
  					sp->freelist = next;
95b35127f   Nick Piggin   slob: rework free...
262
263
264
265
  			} else { /* fragment */
  				if (prev)
  					set_slob(prev, slob_units(prev), cur + units);
  				else
b8c24c4ae   Christoph Lameter   slob: Define page...
266
  					sp->freelist = cur + units;
95b35127f   Nick Piggin   slob: rework free...
267
  				set_slob(cur + units, avail - units, next);
10cef6029   Matt Mackall   [PATCH] slob: int...
268
  			}
95b35127f   Nick Piggin   slob: rework free...
269
  			sp->units -= units;
130e8e09e   Tobin C. Harding   slob: respect lis...
270
  			if (!sp->units) {
95b35127f   Nick Piggin   slob: rework free...
271
  				clear_slob_page_free(sp);
130e8e09e   Tobin C. Harding   slob: respect lis...
272
273
  				*page_removed_from_list = true;
  			}
10cef6029   Matt Mackall   [PATCH] slob: int...
274
275
  			return cur;
  		}
95b35127f   Nick Piggin   slob: rework free...
276
277
278
279
  		if (slob_last(cur))
  			return NULL;
  	}
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
280

95b35127f   Nick Piggin   slob: rework free...
281
282
283
  /*
   * slob_alloc: entry point into the slob allocator.
   */
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
284
285
  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
  							int align_offset)
95b35127f   Nick Piggin   slob: rework free...
286
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
287
  	struct page *sp;
20cecbae4   Matt Mackall   slob: reduce exte...
288
  	struct list_head *slob_list;
95b35127f   Nick Piggin   slob: rework free...
289
290
  	slob_t *b = NULL;
  	unsigned long flags;
130e8e09e   Tobin C. Harding   slob: respect lis...
291
  	bool _unused;
10cef6029   Matt Mackall   [PATCH] slob: int...
292

20cecbae4   Matt Mackall   slob: reduce exte...
293
294
295
296
297
298
  	if (size < SLOB_BREAK1)
  		slob_list = &free_slob_small;
  	else if (size < SLOB_BREAK2)
  		slob_list = &free_slob_medium;
  	else
  		slob_list = &free_slob_large;
95b35127f   Nick Piggin   slob: rework free...
299
300
  	spin_lock_irqsave(&slob_lock, flags);
  	/* Iterate through each partially free page, try to find room */
adab7b681   Tobin C. Harding   slob: use slab_li...
301
  	list_for_each_entry(sp, slob_list, slab_list) {
130e8e09e   Tobin C. Harding   slob: respect lis...
302
  		bool page_removed_from_list = false;
6193a2ff1   Paul Mundt   slob: initial NUM...
303
304
305
306
307
  #ifdef CONFIG_NUMA
  		/*
  		 * If there's a node specification, search for a partial
  		 * page with a matching node id in the freelist.
  		 */
90f2cbbc4   Ezequiel Garcia   mm, slob: Use NUM...
308
  		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
6193a2ff1   Paul Mundt   slob: initial NUM...
309
310
  			continue;
  #endif
d6269543e   Matt Mackall   slob: reduce list...
311
312
313
  		/* Enough room on this page? */
  		if (sp->units < SLOB_UNITS(size))
  			continue;
6193a2ff1   Paul Mundt   slob: initial NUM...
314

59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
315
  		b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list);
d6269543e   Matt Mackall   slob: reduce list...
316
317
  		if (!b)
  			continue;
130e8e09e   Tobin C. Harding   slob: respect lis...
318
319
320
321
322
323
324
325
326
327
328
329
  		/*
  		 * If slob_page_alloc() removed sp from the list then we
  		 * cannot call list functions on sp.  If so allocation
  		 * did not fragment the page anyway so optimisation is
  		 * unnecessary.
  		 */
  		if (!page_removed_from_list) {
  			/*
  			 * Improve fragment distribution and reduce our average
  			 * search time by starting our next search here. (see
  			 * Knuth vol 1, sec 2.5, pg 449)
  			 */
adab7b681   Tobin C. Harding   slob: use slab_li...
330
331
  			if (!list_is_first(&sp->slab_list, slob_list))
  				list_rotate_to_front(&sp->slab_list, slob_list);
130e8e09e   Tobin C. Harding   slob: respect lis...
332
  		}
d6269543e   Matt Mackall   slob: reduce list...
333
  		break;
10cef6029   Matt Mackall   [PATCH] slob: int...
334
  	}
95b35127f   Nick Piggin   slob: rework free...
335
336
337
338
  	spin_unlock_irqrestore(&slob_lock, flags);
  
  	/* Not enough space: must allocate a new page */
  	if (!b) {
6e9ed0cc4   Américo Wang   slob: clean up th...
339
  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
95b35127f   Nick Piggin   slob: rework free...
340
  		if (!b)
6e9ed0cc4   Américo Wang   slob: clean up th...
341
  			return NULL;
b5568280c   Christoph Lameter   slob: Remove vari...
342
343
  		sp = virt_to_page(b);
  		__SetPageSlab(sp);
95b35127f   Nick Piggin   slob: rework free...
344
345
346
  
  		spin_lock_irqsave(&slob_lock, flags);
  		sp->units = SLOB_UNITS(PAGE_SIZE);
b8c24c4ae   Christoph Lameter   slob: Define page...
347
  		sp->freelist = b;
adab7b681   Tobin C. Harding   slob: use slab_li...
348
  		INIT_LIST_HEAD(&sp->slab_list);
95b35127f   Nick Piggin   slob: rework free...
349
  		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
20cecbae4   Matt Mackall   slob: reduce exte...
350
  		set_slob_page_free(sp, slob_list);
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
351
  		b = slob_page_alloc(sp, size, align, align_offset, &_unused);
95b35127f   Nick Piggin   slob: rework free...
352
353
354
  		BUG_ON(!b);
  		spin_unlock_irqrestore(&slob_lock, flags);
  	}
9f88faee3   Miles Chen   mm/slob.c: remove...
355
  	if (unlikely(gfp & __GFP_ZERO))
d07dbea46   Christoph Lameter   Slab allocators: ...
356
  		memset(b, 0, size);
95b35127f   Nick Piggin   slob: rework free...
357
  	return b;
10cef6029   Matt Mackall   [PATCH] slob: int...
358
  }
95b35127f   Nick Piggin   slob: rework free...
359
360
361
  /*
   * slob_free: entry point into the slob allocator.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
362
363
  static void slob_free(void *block, int size)
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
364
  	struct page *sp;
95b35127f   Nick Piggin   slob: rework free...
365
366
  	slob_t *prev, *next, *b = (slob_t *)block;
  	slobidx_t units;
10cef6029   Matt Mackall   [PATCH] slob: int...
367
  	unsigned long flags;
d602dabae   Bob Liu   SLOB: Free object...
368
  	struct list_head *slob_list;
10cef6029   Matt Mackall   [PATCH] slob: int...
369

2408c5503   Satyam Sharma   {slub, slob}: use...
370
  	if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef6029   Matt Mackall   [PATCH] slob: int...
371
  		return;
95b35127f   Nick Piggin   slob: rework free...
372
  	BUG_ON(!size);
10cef6029   Matt Mackall   [PATCH] slob: int...
373

b5568280c   Christoph Lameter   slob: Remove vari...
374
  	sp = virt_to_page(block);
95b35127f   Nick Piggin   slob: rework free...
375
  	units = SLOB_UNITS(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
376

10cef6029   Matt Mackall   [PATCH] slob: int...
377
  	spin_lock_irqsave(&slob_lock, flags);
10cef6029   Matt Mackall   [PATCH] slob: int...
378

95b35127f   Nick Piggin   slob: rework free...
379
380
381
382
  	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
  		/* Go directly to page allocator. Do not pass slob allocator */
  		if (slob_page_free(sp))
  			clear_slob_page_free(sp);
6fb8f4243   Nick Piggin   slob: fix lockup ...
383
  		spin_unlock_irqrestore(&slob_lock, flags);
b5568280c   Christoph Lameter   slob: Remove vari...
384
  		__ClearPageSlab(sp);
22b751c3d   Mel Gorman   mm: rename page s...
385
  		page_mapcount_reset(sp);
1f0532eb6   Nick Piggin   mm: SLOB fix recl...
386
  		slob_free_pages(b, 0);
6fb8f4243   Nick Piggin   slob: fix lockup ...
387
  		return;
95b35127f   Nick Piggin   slob: rework free...
388
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
389

95b35127f   Nick Piggin   slob: rework free...
390
391
392
  	if (!slob_page_free(sp)) {
  		/* This slob page is about to become partially free. Easy! */
  		sp->units = units;
b8c24c4ae   Christoph Lameter   slob: Define page...
393
  		sp->freelist = b;
95b35127f   Nick Piggin   slob: rework free...
394
395
396
  		set_slob(b, units,
  			(void *)((unsigned long)(b +
  					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
d602dabae   Bob Liu   SLOB: Free object...
397
398
399
400
401
402
403
  		if (size < SLOB_BREAK1)
  			slob_list = &free_slob_small;
  		else if (size < SLOB_BREAK2)
  			slob_list = &free_slob_medium;
  		else
  			slob_list = &free_slob_large;
  		set_slob_page_free(sp, slob_list);
95b35127f   Nick Piggin   slob: rework free...
404
405
406
407
408
409
410
411
  		goto out;
  	}
  
  	/*
  	 * Otherwise the page is already partially free, so find reinsertion
  	 * point.
  	 */
  	sp->units += units;
10cef6029   Matt Mackall   [PATCH] slob: int...
412

b8c24c4ae   Christoph Lameter   slob: Define page...
413
414
415
416
  	if (b < (slob_t *)sp->freelist) {
  		if (b + units == sp->freelist) {
  			units += slob_units(sp->freelist);
  			sp->freelist = slob_next(sp->freelist);
679299b32   Matt Mackall   slob: fix free bl...
417
  		}
b8c24c4ae   Christoph Lameter   slob: Define page...
418
419
  		set_slob(b, units, sp->freelist);
  		sp->freelist = b;
95b35127f   Nick Piggin   slob: rework free...
420
  	} else {
b8c24c4ae   Christoph Lameter   slob: Define page...
421
  		prev = sp->freelist;
95b35127f   Nick Piggin   slob: rework free...
422
423
424
425
426
  		next = slob_next(prev);
  		while (b > next) {
  			prev = next;
  			next = slob_next(prev);
  		}
10cef6029   Matt Mackall   [PATCH] slob: int...
427

95b35127f   Nick Piggin   slob: rework free...
428
429
430
431
432
433
434
435
436
437
438
439
440
  		if (!slob_last(prev) && b + units == next) {
  			units += slob_units(next);
  			set_slob(b, units, slob_next(next));
  		} else
  			set_slob(b, units, next);
  
  		if (prev + slob_units(prev) == b) {
  			units = slob_units(b) + slob_units(prev);
  			set_slob(prev, units, slob_next(b));
  		} else
  			set_slob(prev, slob_units(prev), b);
  	}
  out:
10cef6029   Matt Mackall   [PATCH] slob: int...
441
442
  	spin_unlock_irqrestore(&slob_lock, flags);
  }
95b35127f   Nick Piggin   slob: rework free...
443
444
445
  /*
   * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
   */
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
446
447
  static __always_inline void *
  __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
10cef6029   Matt Mackall   [PATCH] slob: int...
448
  {
6cb8f9132   Christoph Lameter   Slab allocators: ...
449
  	unsigned int *m;
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
450
  	int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
451
  	void *ret;
553948491   Nick Piggin   slob: improved al...
452

bd50cfa89   Steven Rostedt   slob/lockdep: Fix...
453
  	gfp &= gfp_allowed_mask;
d92a8cfcb   Peter Zijlstra   locking/lockdep: ...
454
455
  	fs_reclaim_acquire(gfp);
  	fs_reclaim_release(gfp);
cf40bd16f   Nick Piggin   lockdep: annotate...
456

59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
457
458
459
460
461
462
463
464
465
  	if (size < PAGE_SIZE - minalign) {
  		int align = minalign;
  
  		/*
  		 * For power of two sizes, guarantee natural alignment for
  		 * kmalloc()'d objects.
  		 */
  		if (is_power_of_2(size))
  			align = max(minalign, (int) size);
6cb8f9132   Christoph Lameter   Slab allocators: ...
466
467
  		if (!size)
  			return ZERO_SIZE_PTR;
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
468
  		m = slob_alloc(size + minalign, gfp, align, node, minalign);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
469

239f49c08   MinChan Kim   slob: Fix to retu...
470
471
472
  		if (!m)
  			return NULL;
  		*m = size;
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
473
  		ret = (void *)m + minalign;
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
474

f3f741019   Ezequiel Garcia   mm, slob: Add sup...
475
  		trace_kmalloc_node(caller, ret,
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
476
  				   size, size + minalign, gfp, node);
d87a133fc   Nick Piggin   slob: remove bigb...
477
  	} else {
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
478
  		unsigned int order = get_order(size);
d87a133fc   Nick Piggin   slob: remove bigb...
479

8df275af8   David Rientjes   slob: fix gfp fla...
480
481
482
  		if (likely(order))
  			gfp |= __GFP_COMP;
  		ret = slob_new_pages(gfp, order, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
483

f3f741019   Ezequiel Garcia   mm, slob: Add sup...
484
  		trace_kmalloc_node(caller, ret,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
485
  				   size, PAGE_SIZE << order, gfp, node);
10cef6029   Matt Mackall   [PATCH] slob: int...
486
  	}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
487

4374e616d   Catalin Marinas   kmemleak: Add the...
488
  	kmemleak_alloc(ret, size, 1, gfp);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
489
  	return ret;
10cef6029   Matt Mackall   [PATCH] slob: int...
490
  }
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
491

f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
492
  void *__kmalloc(size_t size, gfp_t gfp)
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
493
  {
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
494
  	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
495
  }
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
496
  EXPORT_SYMBOL(__kmalloc);
10cef6029   Matt Mackall   [PATCH] slob: int...
497

f3f741019   Ezequiel Garcia   mm, slob: Add sup...
498
499
500
501
  void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
  {
  	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
  }
fd7cb5753   Daniel Vetter   mm/sl[uo]b: expor...
502
  EXPORT_SYMBOL(__kmalloc_track_caller);
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
503
504
  
  #ifdef CONFIG_NUMA
82bd5508b   David Rientjes   mm, slob: fix bui...
505
  void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
506
507
508
509
  					int node, unsigned long caller)
  {
  	return __do_kmalloc_node(size, gfp, node, caller);
  }
fd7cb5753   Daniel Vetter   mm/sl[uo]b: expor...
510
  EXPORT_SYMBOL(__kmalloc_node_track_caller);
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
511
  #endif
f3f741019   Ezequiel Garcia   mm, slob: Add sup...
512

10cef6029   Matt Mackall   [PATCH] slob: int...
513
514
  void kfree(const void *block)
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
515
  	struct page *sp;
10cef6029   Matt Mackall   [PATCH] slob: int...
516

2121db74b   Pekka Enberg   kmemtrace: trace ...
517
  	trace_kfree(_RET_IP_, block);
2408c5503   Satyam Sharma   {slub, slob}: use...
518
  	if (unlikely(ZERO_OR_NULL_PTR(block)))
10cef6029   Matt Mackall   [PATCH] slob: int...
519
  		return;
4374e616d   Catalin Marinas   kmemleak: Add the...
520
  	kmemleak_free(block);
10cef6029   Matt Mackall   [PATCH] slob: int...
521

b5568280c   Christoph Lameter   slob: Remove vari...
522
523
  	sp = virt_to_page(block);
  	if (PageSlab(sp)) {
789306e5a   Arnd Bergmann   mm/slob: use min_...
524
  		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
553948491   Nick Piggin   slob: improved al...
525
526
  		unsigned int *m = (unsigned int *)(block - align);
  		slob_free(m, *m + align);
6a486c0ad   Vlastimil Babka   mm, sl[ou]b: impr...
527
528
  	} else {
  		unsigned int order = compound_order(sp);
d42f3245c   Roman Gushchin   mm: memcg: conver...
529
530
  		mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
  				    -(PAGE_SIZE << order));
6a486c0ad   Vlastimil Babka   mm, sl[ou]b: impr...
531
532
533
  		__free_pages(sp, order);
  
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
534
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
535
  EXPORT_SYMBOL(kfree);
d87a133fc   Nick Piggin   slob: remove bigb...
536
  /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
10d1f8cb3   Marco Elver   mm/slab: refactor...
537
  size_t __ksize(const void *block)
10cef6029   Matt Mackall   [PATCH] slob: int...
538
  {
b8c24c4ae   Christoph Lameter   slob: Define page...
539
  	struct page *sp;
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
540
541
  	int align;
  	unsigned int *m;
10cef6029   Matt Mackall   [PATCH] slob: int...
542

ef8b4520b   Christoph Lameter   Slab allocators: ...
543
544
  	BUG_ON(!block);
  	if (unlikely(block == ZERO_SIZE_PTR))
10cef6029   Matt Mackall   [PATCH] slob: int...
545
  		return 0;
b5568280c   Christoph Lameter   slob: Remove vari...
546
  	sp = virt_to_page(block);
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
547
  	if (unlikely(!PageSlab(sp)))
a50b854e0   Matthew Wilcox (Oracle)   mm: introduce pag...
548
  		return page_size(sp);
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
549

789306e5a   Arnd Bergmann   mm/slob: use min_...
550
  	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
999d8795d   Ezequiel Garcia   mm/slob: Drop usa...
551
552
  	m = (unsigned int *)(block - align);
  	return SLOB_UNITS(*m) * SLOB_UNIT;
10cef6029   Matt Mackall   [PATCH] slob: int...
553
  }
10d1f8cb3   Marco Elver   mm/slab: refactor...
554
  EXPORT_SYMBOL(__ksize);
10cef6029   Matt Mackall   [PATCH] slob: int...
555

d50112edd   Alexey Dobriyan   slab, slub, slob:...
556
  int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
10cef6029   Matt Mackall   [PATCH] slob: int...
557
  {
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
558
  	if (flags & SLAB_TYPESAFE_BY_RCU) {
278b1bb13   Christoph Lameter   mm/sl[aou]b: Move...
559
560
  		/* leave room for rcu footer at the end of object */
  		c->size += sizeof(struct slob_rcu);
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
561
  	}
278b1bb13   Christoph Lameter   mm/sl[aou]b: Move...
562
  	c->flags = flags;
278b1bb13   Christoph Lameter   mm/sl[aou]b: Move...
563
  	return 0;
10cef6029   Matt Mackall   [PATCH] slob: int...
564
  }
10cef6029   Matt Mackall   [PATCH] slob: int...
565

c21a6daf4   Fabian Frederick   slob: make slob_a...
566
  static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
10cef6029   Matt Mackall   [PATCH] slob: int...
567
568
  {
  	void *b;
bd50cfa89   Steven Rostedt   slob/lockdep: Fix...
569
  	flags &= gfp_allowed_mask;
d92a8cfcb   Peter Zijlstra   locking/lockdep: ...
570
571
  	fs_reclaim_acquire(flags);
  	fs_reclaim_release(flags);
bd50cfa89   Steven Rostedt   slob/lockdep: Fix...
572

3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
573
  	if (c->size < PAGE_SIZE) {
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
574
  		b = slob_alloc(c->size, flags, c->align, node, 0);
fe74fe2bf   Ezequiel Garcia   mm/slob: Use obje...
575
  		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
576
577
  					    SLOB_UNITS(c->size) * SLOB_UNIT,
  					    flags, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
578
  	} else {
6e9ed0cc4   Américo Wang   slob: clean up th...
579
  		b = slob_new_pages(flags, get_order(c->size), node);
fe74fe2bf   Ezequiel Garcia   mm/slob: Use obje...
580
  		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
581
582
  					    PAGE_SIZE << get_order(c->size),
  					    flags, node);
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
583
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
584

128227e7f   Matthew Wilcox   slab: __GFP_ZERO ...
585
586
  	if (b && c->ctor) {
  		WARN_ON_ONCE(flags & __GFP_ZERO);
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
587
  		c->ctor(b);
128227e7f   Matthew Wilcox   slab: __GFP_ZERO ...
588
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
589

4374e616d   Catalin Marinas   kmemleak: Add the...
590
  	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
10cef6029   Matt Mackall   [PATCH] slob: int...
591
592
  	return b;
  }
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
  
  void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  {
  	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
  }
  EXPORT_SYMBOL(kmem_cache_alloc);
  
  #ifdef CONFIG_NUMA
  void *__kmalloc_node(size_t size, gfp_t gfp, int node)
  {
  	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
  }
  EXPORT_SYMBOL(__kmalloc_node);
  
  void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
  {
  	return slob_alloc_node(cachep, gfp, node);
  }
6193a2ff1   Paul Mundt   slob: initial NUM...
611
  EXPORT_SYMBOL(kmem_cache_alloc_node);
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
612
  #endif
10cef6029   Matt Mackall   [PATCH] slob: int...
613

afc0cedbe   Nick Piggin   slob: implement R...
614
  static void __kmem_cache_free(void *b, int size)
10cef6029   Matt Mackall   [PATCH] slob: int...
615
  {
afc0cedbe   Nick Piggin   slob: implement R...
616
617
  	if (size < PAGE_SIZE)
  		slob_free(b, size);
10cef6029   Matt Mackall   [PATCH] slob: int...
618
  	else
6e9ed0cc4   Américo Wang   slob: clean up th...
619
  		slob_free_pages(b, get_order(size));
afc0cedbe   Nick Piggin   slob: implement R...
620
621
622
623
624
625
626
627
628
629
630
631
  }
  
  static void kmem_rcu_free(struct rcu_head *head)
  {
  	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
  	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
  
  	__kmem_cache_free(b, slob_rcu->size);
  }
  
  void kmem_cache_free(struct kmem_cache *c, void *b)
  {
4374e616d   Catalin Marinas   kmemleak: Add the...
632
  	kmemleak_free_recursive(b, c->flags);
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
633
  	if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
afc0cedbe   Nick Piggin   slob: implement R...
634
635
  		struct slob_rcu *slob_rcu;
  		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
afc0cedbe   Nick Piggin   slob: implement R...
636
637
638
  		slob_rcu->size = c->size;
  		call_rcu(&slob_rcu->head, kmem_rcu_free);
  	} else {
afc0cedbe   Nick Piggin   slob: implement R...
639
640
  		__kmem_cache_free(b, c->size);
  	}
3eae2cb24   Eduard - Gabriel Munteanu   kmemtrace: SLOB h...
641

ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
642
  	trace_kmem_cache_free(_RET_IP_, b);
10cef6029   Matt Mackall   [PATCH] slob: int...
643
644
  }
  EXPORT_SYMBOL(kmem_cache_free);
484748f0b   Christoph Lameter   slab: infrastruct...
645
646
647
648
649
  void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
  {
  	__kmem_cache_free_bulk(s, size, p);
  }
  EXPORT_SYMBOL(kmem_cache_free_bulk);
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
650
  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
484748f0b   Christoph Lameter   slab: infrastruct...
651
652
653
654
655
  								void **p)
  {
  	return __kmem_cache_alloc_bulk(s, flags, size, p);
  }
  EXPORT_SYMBOL(kmem_cache_alloc_bulk);
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
656
657
658
659
660
  int __kmem_cache_shutdown(struct kmem_cache *c)
  {
  	/* No way to check for remaining objects */
  	return 0;
  }
52b4b950b   Dmitry Safonov   mm: slab: free km...
661
662
663
  void __kmem_cache_release(struct kmem_cache *c)
  {
  }
89e364db7   Vladimir Davydov   slub: move synchr...
664
  int __kmem_cache_shrink(struct kmem_cache *d)
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
665
666
667
  {
  	return 0;
  }
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
668

9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
669
670
671
672
673
674
  struct kmem_cache kmem_cache_boot = {
  	.name = "kmem_cache",
  	.size = sizeof(struct kmem_cache),
  	.flags = SLAB_PANIC,
  	.align = ARCH_KMALLOC_MINALIGN,
  };
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
675
676
  void __init kmem_cache_init(void)
  {
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
677
  	kmem_cache = &kmem_cache_boot;
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
678
  	slab_state = UP;
10cef6029   Matt Mackall   [PATCH] slob: int...
679
  }
bbff2e433   Wu Fengguang   slab: remove dupl...
680
681
682
  
  void __init kmem_cache_init_late(void)
  {
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
683
  	slab_state = FULL;
bbff2e433   Wu Fengguang   slab: remove dupl...
684
  }