Blame view

mm/slob.c 9.74 KB
10cef6029   Matt Mackall   [PATCH] slob: int...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
  /*
   * SLOB Allocator: Simple List Of Blocks
   *
   * Matt Mackall <mpm@selenic.com> 12/30/03
   *
   * How SLOB works:
   *
   * The core of SLOB is a traditional K&R style heap allocator, with
   * support for returning aligned objects. The granularity of this
   * allocator is 8 bytes on x86, though it's perhaps possible to reduce
   * this to 4 if it's deemed worth the effort. The slob heap is a
   * singly-linked list of pages from __get_free_page, grown on demand
   * and allocation from the heap is currently first-fit.
   *
   * Above this is an implementation of kmalloc/kfree. Blocks returned
   * from kmalloc are 8-byte aligned and prepended with a 8-byte header.
   * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
   * __get_free_pages directly so that it can return page-aligned blocks
   * and keeps a linked list of such pages and their orders. These
   * objects are detected in kfree() by their page alignment.
   *
   * SLAB is emulated on top of SLOB by simply calling constructors and
   * destructors for every SLAB allocation. Objects are returned with
5af608399   Christoph Lameter   slab allocators: ...
24
   * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is
10cef6029   Matt Mackall   [PATCH] slob: int...
25
26
27
28
29
30
   * set, in which case the low-level allocator will fragment blocks to
   * create the proper alignment. Again, objects of page-size or greater
   * are allocated by calling __get_free_pages. As SLAB objects know
   * their size, no separate size bookkeeping is necessary and there is
   * essentially no allocation space overhead.
   */
10cef6029   Matt Mackall   [PATCH] slob: int...
31
32
33
34
35
36
  #include <linux/slab.h>
  #include <linux/mm.h>
  #include <linux/cache.h>
  #include <linux/init.h>
  #include <linux/module.h>
  #include <linux/timer.h>
afc0cedbe   Nick Piggin   slob: implement R...
37
  #include <linux/rcupdate.h>
10cef6029   Matt Mackall   [PATCH] slob: int...
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
  
  struct slob_block {
  	int units;
  	struct slob_block *next;
  };
  typedef struct slob_block slob_t;
  
  #define SLOB_UNIT sizeof(slob_t)
  #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
  #define SLOB_ALIGN L1_CACHE_BYTES
  
  struct bigblock {
  	int order;
  	void *pages;
  	struct bigblock *next;
  };
  typedef struct bigblock bigblock_t;
afc0cedbe   Nick Piggin   slob: implement R...
55
56
57
58
59
60
61
62
63
  /*
   * struct slob_rcu is inserted at the tail of allocated slob blocks, which
   * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
   * the block using call_rcu.
   */
  struct slob_rcu {
  	struct rcu_head head;
  	int size;
  };
10cef6029   Matt Mackall   [PATCH] slob: int...
64
65
66
67
68
69
70
  static slob_t arena = { .next = &arena, .units = 1 };
  static slob_t *slobfree = &arena;
  static bigblock_t *bigblocks;
  static DEFINE_SPINLOCK(slob_lock);
  static DEFINE_SPINLOCK(block_lock);
  
  static void slob_free(void *b, int size);
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
71
  static void slob_timer_cbk(void);
10cef6029   Matt Mackall   [PATCH] slob: int...
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
  
  static void *slob_alloc(size_t size, gfp_t gfp, int align)
  {
  	slob_t *prev, *cur, *aligned = 0;
  	int delta = 0, units = SLOB_UNITS(size);
  	unsigned long flags;
  
  	spin_lock_irqsave(&slob_lock, flags);
  	prev = slobfree;
  	for (cur = prev->next; ; prev = cur, cur = cur->next) {
  		if (align) {
  			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
  			delta = aligned - cur;
  		}
  		if (cur->units >= units + delta) { /* room enough? */
  			if (delta) { /* need to fragment head to align? */
  				aligned->units = cur->units - delta;
  				aligned->next = cur->next;
  				cur->next = aligned;
  				cur->units = delta;
  				prev = cur;
  				cur = aligned;
  			}
  
  			if (cur->units == units) /* exact fit? */
  				prev->next = cur->next; /* unlink */
  			else { /* fragment */
  				prev->next = cur + units;
  				prev->next->units = cur->units - units;
  				prev->next->next = cur->next;
  				cur->units = units;
  			}
  
  			slobfree = prev;
  			spin_unlock_irqrestore(&slob_lock, flags);
  			return cur;
  		}
  		if (cur == slobfree) {
  			spin_unlock_irqrestore(&slob_lock, flags);
  
  			if (size == PAGE_SIZE) /* trying to shrink arena? */
  				return 0;
  
  			cur = (slob_t *)__get_free_page(gfp);
  			if (!cur)
  				return 0;
  
  			slob_free(cur, PAGE_SIZE);
  			spin_lock_irqsave(&slob_lock, flags);
  			cur = slobfree;
  		}
  	}
  }
  
  static void slob_free(void *block, int size)
  {
  	slob_t *cur, *b = (slob_t *)block;
  	unsigned long flags;
  
  	if (!block)
  		return;
  
  	if (size)
  		b->units = SLOB_UNITS(size);
  
  	/* Find reinsertion point */
  	spin_lock_irqsave(&slob_lock, flags);
  	for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
  		if (cur >= cur->next && (b > cur || b < cur->next))
  			break;
  
  	if (b + b->units == cur->next) {
  		b->units += cur->next->units;
  		b->next = cur->next->next;
  	} else
  		b->next = cur->next;
  
  	if (cur + cur->units == b) {
  		cur->units += b->units;
  		cur->next = b->next;
  	} else
  		cur->next = b;
  
  	slobfree = cur;
  
  	spin_unlock_irqrestore(&slob_lock, flags);
  }
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
159
  void *__kmalloc(size_t size, gfp_t gfp)
10cef6029   Matt Mackall   [PATCH] slob: int...
160
161
162
163
164
165
166
167
168
169
170
171
172
  {
  	slob_t *m;
  	bigblock_t *bb;
  	unsigned long flags;
  
  	if (size < PAGE_SIZE - SLOB_UNIT) {
  		m = slob_alloc(size + SLOB_UNIT, gfp, 0);
  		return m ? (void *)(m + 1) : 0;
  	}
  
  	bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
  	if (!bb)
  		return 0;
4ab688c51   Akinobu Mita   slob: fix page or...
173
  	bb->order = get_order(size);
10cef6029   Matt Mackall   [PATCH] slob: int...
174
175
176
177
178
179
180
181
182
183
184
185
186
  	bb->pages = (void *)__get_free_pages(gfp, bb->order);
  
  	if (bb->pages) {
  		spin_lock_irqsave(&block_lock, flags);
  		bb->next = bigblocks;
  		bigblocks = bb;
  		spin_unlock_irqrestore(&block_lock, flags);
  		return bb->pages;
  	}
  
  	slob_free(bb, sizeof(bigblock_t));
  	return 0;
  }
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
187
  EXPORT_SYMBOL(__kmalloc);
10cef6029   Matt Mackall   [PATCH] slob: int...
188

fd76bab2f   Pekka Enberg   slab: introduce k...
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
  /**
   * krealloc - reallocate memory. The contents will remain unchanged.
   *
   * @p: object to reallocate memory for.
   * @new_size: how many bytes of memory are required.
   * @flags: the type of memory to allocate.
   *
   * The contents of the object pointed to are preserved up to the
   * lesser of the new and old sizes.  If @p is %NULL, krealloc()
   * behaves exactly like kmalloc().  If @size is 0 and @p is not a
   * %NULL pointer, the object pointed to is freed.
   */
  void *krealloc(const void *p, size_t new_size, gfp_t flags)
  {
  	void *ret;
  
  	if (unlikely(!p))
  		return kmalloc_track_caller(new_size, flags);
  
  	if (unlikely(!new_size)) {
  		kfree(p);
  		return NULL;
  	}
  
  	ret = kmalloc_track_caller(new_size, flags);
  	if (ret) {
  		memcpy(ret, p, min(new_size, ksize(p)));
  		kfree(p);
  	}
  	return ret;
  }
  EXPORT_SYMBOL(krealloc);
10cef6029   Matt Mackall   [PATCH] slob: int...
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
  void kfree(const void *block)
  {
  	bigblock_t *bb, **last = &bigblocks;
  	unsigned long flags;
  
  	if (!block)
  		return;
  
  	if (!((unsigned long)block & (PAGE_SIZE-1))) {
  		/* might be on the big block list */
  		spin_lock_irqsave(&block_lock, flags);
  		for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
  			if (bb->pages == block) {
  				*last = bb->next;
  				spin_unlock_irqrestore(&block_lock, flags);
  				free_pages((unsigned long)block, bb->order);
  				slob_free(bb, sizeof(bigblock_t));
  				return;
  			}
  		}
  		spin_unlock_irqrestore(&block_lock, flags);
  	}
  
  	slob_free((slob_t *)block - 1, 0);
  	return;
  }
  
  EXPORT_SYMBOL(kfree);
fd76bab2f   Pekka Enberg   slab: introduce k...
249
  size_t ksize(const void *block)
10cef6029   Matt Mackall   [PATCH] slob: int...
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
  {
  	bigblock_t *bb;
  	unsigned long flags;
  
  	if (!block)
  		return 0;
  
  	if (!((unsigned long)block & (PAGE_SIZE-1))) {
  		spin_lock_irqsave(&block_lock, flags);
  		for (bb = bigblocks; bb; bb = bb->next)
  			if (bb->pages == block) {
  				spin_unlock_irqrestore(&slob_lock, flags);
  				return PAGE_SIZE << bb->order;
  			}
  		spin_unlock_irqrestore(&block_lock, flags);
  	}
  
  	return ((slob_t *)block - 1)->units * SLOB_UNIT;
  }
  
  struct kmem_cache {
  	unsigned int size, align;
afc0cedbe   Nick Piggin   slob: implement R...
272
  	unsigned long flags;
10cef6029   Matt Mackall   [PATCH] slob: int...
273
274
  	const char *name;
  	void (*ctor)(void *, struct kmem_cache *, unsigned long);
10cef6029   Matt Mackall   [PATCH] slob: int...
275
276
277
278
279
280
281
282
283
284
285
286
287
288
  };
  
  struct kmem_cache *kmem_cache_create(const char *name, size_t size,
  	size_t align, unsigned long flags,
  	void (*ctor)(void*, struct kmem_cache *, unsigned long),
  	void (*dtor)(void*, struct kmem_cache *, unsigned long))
  {
  	struct kmem_cache *c;
  
  	c = slob_alloc(sizeof(struct kmem_cache), flags, 0);
  
  	if (c) {
  		c->name = name;
  		c->size = size;
afc0cedbe   Nick Piggin   slob: implement R...
289
  		if (flags & SLAB_DESTROY_BY_RCU) {
afc0cedbe   Nick Piggin   slob: implement R...
290
291
292
293
  			/* leave room for rcu footer at the end of object */
  			c->size += sizeof(struct slob_rcu);
  		}
  		c->flags = flags;
10cef6029   Matt Mackall   [PATCH] slob: int...
294
  		c->ctor = ctor;
10cef6029   Matt Mackall   [PATCH] slob: int...
295
  		/* ignore alignment unless it's forced */
5af608399   Christoph Lameter   slab allocators: ...
296
  		c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
10cef6029   Matt Mackall   [PATCH] slob: int...
297
298
  		if (c->align < align)
  			c->align = align;
bc0055aee   Akinobu Mita   slob: handle SLAB...
299
300
301
  	} else if (flags & SLAB_PANIC)
  		panic("Cannot create slab cache %s
  ", name);
10cef6029   Matt Mackall   [PATCH] slob: int...
302
303
304
305
  
  	return c;
  }
  EXPORT_SYMBOL(kmem_cache_create);
133d205a1   Alexey Dobriyan   [PATCH] Make kmem...
306
  void kmem_cache_destroy(struct kmem_cache *c)
10cef6029   Matt Mackall   [PATCH] slob: int...
307
308
  {
  	slob_free(c, sizeof(struct kmem_cache));
10cef6029   Matt Mackall   [PATCH] slob: int...
309
310
311
312
313
314
315
316
317
318
  }
  EXPORT_SYMBOL(kmem_cache_destroy);
  
  void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
  {
  	void *b;
  
  	if (c->size < PAGE_SIZE)
  		b = slob_alloc(c->size, flags, c->align);
  	else
4ab688c51   Akinobu Mita   slob: fix page or...
319
  		b = (void *)__get_free_pages(flags, get_order(c->size));
10cef6029   Matt Mackall   [PATCH] slob: int...
320
321
  
  	if (c->ctor)
a35afb830   Christoph Lameter   Remove SLAB_CTOR_...
322
  		c->ctor(b, c, 0);
10cef6029   Matt Mackall   [PATCH] slob: int...
323
324
325
326
  
  	return b;
  }
  EXPORT_SYMBOL(kmem_cache_alloc);
a8c0f9a41   Pekka Enberg   [PATCH] slab: int...
327
328
329
330
331
332
333
334
335
  void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
  {
  	void *ret = kmem_cache_alloc(c, flags);
  	if (ret)
  		memset(ret, 0, c->size);
  
  	return ret;
  }
  EXPORT_SYMBOL(kmem_cache_zalloc);
afc0cedbe   Nick Piggin   slob: implement R...
336
  static void __kmem_cache_free(void *b, int size)
10cef6029   Matt Mackall   [PATCH] slob: int...
337
  {
afc0cedbe   Nick Piggin   slob: implement R...
338
339
  	if (size < PAGE_SIZE)
  		slob_free(b, size);
10cef6029   Matt Mackall   [PATCH] slob: int...
340
  	else
afc0cedbe   Nick Piggin   slob: implement R...
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
  		free_pages((unsigned long)b, get_order(size));
  }
  
  static void kmem_rcu_free(struct rcu_head *head)
  {
  	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
  	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
  
  	__kmem_cache_free(b, slob_rcu->size);
  }
  
  void kmem_cache_free(struct kmem_cache *c, void *b)
  {
  	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
  		struct slob_rcu *slob_rcu;
  		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
  		INIT_RCU_HEAD(&slob_rcu->head);
  		slob_rcu->size = c->size;
  		call_rcu(&slob_rcu->head, kmem_rcu_free);
  	} else {
afc0cedbe   Nick Piggin   slob: implement R...
361
362
  		__kmem_cache_free(b, c->size);
  	}
10cef6029   Matt Mackall   [PATCH] slob: int...
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
  }
  EXPORT_SYMBOL(kmem_cache_free);
  
  unsigned int kmem_cache_size(struct kmem_cache *c)
  {
  	return c->size;
  }
  EXPORT_SYMBOL(kmem_cache_size);
  
  const char *kmem_cache_name(struct kmem_cache *c)
  {
  	return c->name;
  }
  EXPORT_SYMBOL(kmem_cache_name);
  
  static struct timer_list slob_timer = TIMER_INITIALIZER(
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
379
  	(void (*)(unsigned long))slob_timer_cbk, 0, 0);
10cef6029   Matt Mackall   [PATCH] slob: int...
380

2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
381
382
383
384
385
  int kmem_cache_shrink(struct kmem_cache *d)
  {
  	return 0;
  }
  EXPORT_SYMBOL(kmem_cache_shrink);
55935a34a   Christoph Lameter   [PATCH] More slab...
386
  int kmem_ptr_validate(struct kmem_cache *a, const void *b)
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
387
388
389
  {
  	return 0;
  }
bcb4ddb46   Dimitri Gorokhovik   [PATCH] MM: SLOB ...
390
391
392
393
394
395
  void __init kmem_cache_init(void)
  {
  	slob_timer_cbk();
  }
  
  static void slob_timer_cbk(void)
10cef6029   Matt Mackall   [PATCH] slob: int...
396
397
398
399
400
401
402
403
  {
  	void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1);
  
  	if (p)
  		free_page((unsigned long)p);
  
  	mod_timer(&slob_timer, jiffies + HZ);
  }