Commit 4ae537892ab9858f71c78701f4651ad1ca531a1b
Committed by
Linus Torvalds
1 parent
2027d1abc2
Exists in
master
and in
20 other branches
idr: rename some of the idr APIs internal routines
This is a trivial patch that renames: . alloc_layer to get_from_free_list since it idr_pre_get that actually allocates memory. . free_layer to move_to_free_list since memory is not actually freed there. This makes things more clear for the next patches. Signed-off-by: Nadia Derbey <Nadia.Derbey@bull.net> Reviewed-by: "Paul E. McKenney" <paulmck@us.ibm.com> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Jim Houston <jim.houston@comcast.net> Cc: Pierre Peiffer <peifferp@gmail.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 16 additions and 15 deletions Side-by-side Diff
lib/idr.c
... | ... | @@ -35,7 +35,7 @@ |
35 | 35 | |
36 | 36 | static struct kmem_cache *idr_layer_cache; |
37 | 37 | |
38 | -static struct idr_layer *alloc_layer(struct idr *idp) | |
38 | +static struct idr_layer *get_from_free_list(struct idr *idp) | |
39 | 39 | { |
40 | 40 | struct idr_layer *p; |
41 | 41 | unsigned long flags; |
42 | 42 | |
... | ... | @@ -51,14 +51,14 @@ |
51 | 51 | } |
52 | 52 | |
53 | 53 | /* only called when idp->lock is held */ |
54 | -static void __free_layer(struct idr *idp, struct idr_layer *p) | |
54 | +static void __move_to_free_list(struct idr *idp, struct idr_layer *p) | |
55 | 55 | { |
56 | 56 | p->ary[0] = idp->id_free; |
57 | 57 | idp->id_free = p; |
58 | 58 | idp->id_free_cnt++; |
59 | 59 | } |
60 | 60 | |
61 | -static void free_layer(struct idr *idp, struct idr_layer *p) | |
61 | +static void move_to_free_list(struct idr *idp, struct idr_layer *p) | |
62 | 62 | { |
63 | 63 | unsigned long flags; |
64 | 64 | |
... | ... | @@ -66,7 +66,7 @@ |
66 | 66 | * Depends on the return element being zeroed. |
67 | 67 | */ |
68 | 68 | spin_lock_irqsave(&idp->lock, flags); |
69 | - __free_layer(idp, p); | |
69 | + __move_to_free_list(idp, p); | |
70 | 70 | spin_unlock_irqrestore(&idp->lock, flags); |
71 | 71 | } |
72 | 72 | |
... | ... | @@ -109,7 +109,7 @@ |
109 | 109 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); |
110 | 110 | if (new == NULL) |
111 | 111 | return (0); |
112 | - free_layer(idp, new); | |
112 | + move_to_free_list(idp, new); | |
113 | 113 | } |
114 | 114 | return 1; |
115 | 115 | } |
... | ... | @@ -167,7 +167,8 @@ |
167 | 167 | * Create the layer below if it is missing. |
168 | 168 | */ |
169 | 169 | if (!p->ary[m]) { |
170 | - if (!(new = alloc_layer(idp))) | |
170 | + new = get_from_free_list(idp); | |
171 | + if (!new) | |
171 | 172 | return -1; |
172 | 173 | p->ary[m] = new; |
173 | 174 | p->count++; |
... | ... | @@ -192,7 +193,7 @@ |
192 | 193 | p = idp->top; |
193 | 194 | layers = idp->layers; |
194 | 195 | if (unlikely(!p)) { |
195 | - if (!(p = alloc_layer(idp))) | |
196 | + if (!(p = get_from_free_list(idp))) | |
196 | 197 | return -1; |
197 | 198 | layers = 1; |
198 | 199 | } |
... | ... | @@ -204,7 +205,7 @@ |
204 | 205 | layers++; |
205 | 206 | if (!p->count) |
206 | 207 | continue; |
207 | - if (!(new = alloc_layer(idp))) { | |
208 | + if (!(new = get_from_free_list(idp))) { | |
208 | 209 | /* |
209 | 210 | * The allocation failed. If we built part of |
210 | 211 | * the structure tear it down. |
... | ... | @@ -214,7 +215,7 @@ |
214 | 215 | p = p->ary[0]; |
215 | 216 | new->ary[0] = NULL; |
216 | 217 | new->bitmap = new->count = 0; |
217 | - __free_layer(idp, new); | |
218 | + __move_to_free_list(idp, new); | |
218 | 219 | } |
219 | 220 | spin_unlock_irqrestore(&idp->lock, flags); |
220 | 221 | return -1; |
... | ... | @@ -351,7 +352,7 @@ |
351 | 352 | __clear_bit(n, &p->bitmap); |
352 | 353 | p->ary[n] = NULL; |
353 | 354 | while(*paa && ! --((**paa)->count)){ |
354 | - free_layer(idp, **paa); | |
355 | + move_to_free_list(idp, **paa); | |
355 | 356 | **paa-- = NULL; |
356 | 357 | } |
357 | 358 | if (!*paa) |
358 | 359 | |
... | ... | @@ -378,12 +379,12 @@ |
378 | 379 | |
379 | 380 | p = idp->top->ary[0]; |
380 | 381 | idp->top->bitmap = idp->top->count = 0; |
381 | - free_layer(idp, idp->top); | |
382 | + move_to_free_list(idp, idp->top); | |
382 | 383 | idp->top = p; |
383 | 384 | --idp->layers; |
384 | 385 | } |
385 | 386 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
386 | - p = alloc_layer(idp); | |
387 | + p = get_from_free_list(idp); | |
387 | 388 | kmem_cache_free(idr_layer_cache, p); |
388 | 389 | } |
389 | 390 | return; |
... | ... | @@ -426,7 +427,7 @@ |
426 | 427 | while (n < fls(id)) { |
427 | 428 | if (p) { |
428 | 429 | memset(p, 0, sizeof *p); |
429 | - free_layer(idp, p); | |
430 | + move_to_free_list(idp, p); | |
430 | 431 | } |
431 | 432 | n += IDR_BITS; |
432 | 433 | p = *--paa; |
... | ... | @@ -444,7 +445,7 @@ |
444 | 445 | void idr_destroy(struct idr *idp) |
445 | 446 | { |
446 | 447 | while (idp->id_free_cnt) { |
447 | - struct idr_layer *p = alloc_layer(idp); | |
448 | + struct idr_layer *p = get_from_free_list(idp); | |
448 | 449 | kmem_cache_free(idr_layer_cache, p); |
449 | 450 | } |
450 | 451 | } |
... | ... | @@ -749,7 +750,7 @@ |
749 | 750 | * allocation. |
750 | 751 | */ |
751 | 752 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
752 | - struct idr_layer *p = alloc_layer(&ida->idr); | |
753 | + struct idr_layer *p = get_from_free_list(&ida->idr); | |
753 | 754 | if (p) |
754 | 755 | kmem_cache_free(idr_layer_cache, p); |
755 | 756 | } |