Commit 94f6030ca792c57422f04a73e7a872d8325946d3
Committed by
Linus Torvalds
1 parent
81cda66261
Slab allocators: Replace explicit zeroing with __GFP_ZERO
kmalloc_node() and kmem_cache_alloc_node() were not available in a zeroing variant in the past. But with __GFP_ZERO it is possible now to do zeroing while allocating. Use __GFP_ZERO to remove the explicit clearing of memory via memset whereever we can. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 12 changed files with 30 additions and 38 deletions Side-by-side Diff
block/as-iosched.c
... | ... | @@ -1322,10 +1322,9 @@ |
1322 | 1322 | { |
1323 | 1323 | struct as_data *ad; |
1324 | 1324 | |
1325 | - ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); | |
1325 | + ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node); | |
1326 | 1326 | if (!ad) |
1327 | 1327 | return NULL; |
1328 | - memset(ad, 0, sizeof(*ad)); | |
1329 | 1328 | |
1330 | 1329 | ad->q = q; /* Identify what queue the data belongs to */ |
1331 | 1330 |
block/cfq-iosched.c
... | ... | @@ -1251,9 +1251,9 @@ |
1251 | 1251 | { |
1252 | 1252 | struct cfq_io_context *cic; |
1253 | 1253 | |
1254 | - cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node); | |
1254 | + cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, | |
1255 | + cfqd->queue->node); | |
1255 | 1256 | if (cic) { |
1256 | - memset(cic, 0, sizeof(*cic)); | |
1257 | 1257 | cic->last_end_request = jiffies; |
1258 | 1258 | INIT_LIST_HEAD(&cic->queue_list); |
1259 | 1259 | cic->dtor = cfq_free_io_context; |
1260 | 1260 | |
1261 | 1261 | |
... | ... | @@ -1376,17 +1376,19 @@ |
1376 | 1376 | * free memory. |
1377 | 1377 | */ |
1378 | 1378 | spin_unlock_irq(cfqd->queue->queue_lock); |
1379 | - new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node); | |
1379 | + new_cfqq = kmem_cache_alloc_node(cfq_pool, | |
1380 | + gfp_mask | __GFP_NOFAIL | __GFP_ZERO, | |
1381 | + cfqd->queue->node); | |
1380 | 1382 | spin_lock_irq(cfqd->queue->queue_lock); |
1381 | 1383 | goto retry; |
1382 | 1384 | } else { |
1383 | - cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node); | |
1385 | + cfqq = kmem_cache_alloc_node(cfq_pool, | |
1386 | + gfp_mask | __GFP_ZERO, | |
1387 | + cfqd->queue->node); | |
1384 | 1388 | if (!cfqq) |
1385 | 1389 | goto out; |
1386 | 1390 | } |
1387 | 1391 | |
1388 | - memset(cfqq, 0, sizeof(*cfqq)); | |
1389 | - | |
1390 | 1392 | RB_CLEAR_NODE(&cfqq->rb_node); |
1391 | 1393 | INIT_LIST_HEAD(&cfqq->fifo); |
1392 | 1394 | |
1393 | 1395 | |
... | ... | @@ -2079,11 +2081,9 @@ |
2079 | 2081 | { |
2080 | 2082 | struct cfq_data *cfqd; |
2081 | 2083 | |
2082 | - cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); | |
2084 | + cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); | |
2083 | 2085 | if (!cfqd) |
2084 | 2086 | return NULL; |
2085 | - | |
2086 | - memset(cfqd, 0, sizeof(*cfqd)); | |
2087 | 2087 | |
2088 | 2088 | cfqd->service_tree = CFQ_RB_ROOT; |
2089 | 2089 | INIT_LIST_HEAD(&cfqd->cic_list); |
block/deadline-iosched.c
... | ... | @@ -360,10 +360,9 @@ |
360 | 360 | { |
361 | 361 | struct deadline_data *dd; |
362 | 362 | |
363 | - dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); | |
363 | + dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); | |
364 | 364 | if (!dd) |
365 | 365 | return NULL; |
366 | - memset(dd, 0, sizeof(*dd)); | |
367 | 366 | |
368 | 367 | INIT_LIST_HEAD(&dd->fifo_list[READ]); |
369 | 368 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); |
block/elevator.c
... | ... | @@ -177,11 +177,10 @@ |
177 | 177 | elevator_t *eq; |
178 | 178 | int i; |
179 | 179 | |
180 | - eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node); | |
180 | + eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node); | |
181 | 181 | if (unlikely(!eq)) |
182 | 182 | goto err; |
183 | 183 | |
184 | - memset(eq, 0, sizeof(*eq)); | |
185 | 184 | eq->ops = &e->ops; |
186 | 185 | eq->elevator_type = e; |
187 | 186 | kobject_init(&eq->kobj); |
block/genhd.c
... | ... | @@ -726,21 +726,21 @@ |
726 | 726 | { |
727 | 727 | struct gendisk *disk; |
728 | 728 | |
729 | - disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); | |
729 | + disk = kmalloc_node(sizeof(struct gendisk), | |
730 | + GFP_KERNEL | __GFP_ZERO, node_id); | |
730 | 731 | if (disk) { |
731 | - memset(disk, 0, sizeof(struct gendisk)); | |
732 | 732 | if (!init_disk_stats(disk)) { |
733 | 733 | kfree(disk); |
734 | 734 | return NULL; |
735 | 735 | } |
736 | 736 | if (minors > 1) { |
737 | 737 | int size = (minors - 1) * sizeof(struct hd_struct *); |
738 | - disk->part = kmalloc_node(size, GFP_KERNEL, node_id); | |
738 | + disk->part = kmalloc_node(size, | |
739 | + GFP_KERNEL | __GFP_ZERO, node_id); | |
739 | 740 | if (!disk->part) { |
740 | 741 | kfree(disk); |
741 | 742 | return NULL; |
742 | 743 | } |
743 | - memset(disk->part, 0, size); | |
744 | 744 | } |
745 | 745 | disk->minors = minors; |
746 | 746 | kobj_set_kset_s(disk,block_subsys); |
block/ll_rw_blk.c
... | ... | @@ -1829,11 +1829,11 @@ |
1829 | 1829 | { |
1830 | 1830 | request_queue_t *q; |
1831 | 1831 | |
1832 | - q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id); | |
1832 | + q = kmem_cache_alloc_node(requestq_cachep, | |
1833 | + gfp_mask | __GFP_ZERO, node_id); | |
1833 | 1834 | if (!q) |
1834 | 1835 | return NULL; |
1835 | 1836 | |
1836 | - memset(q, 0, sizeof(*q)); | |
1837 | 1837 | init_timer(&q->unplug_timer); |
1838 | 1838 | |
1839 | 1839 | snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); |
drivers/ide/ide-probe.c
... | ... | @@ -1073,14 +1073,14 @@ |
1073 | 1073 | hwgroup->hwif->next = hwif; |
1074 | 1074 | spin_unlock_irq(&ide_lock); |
1075 | 1075 | } else { |
1076 | - hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL, | |
1076 | + hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), | |
1077 | + GFP_KERNEL | __GFP_ZERO, | |
1077 | 1078 | hwif_to_node(hwif->drives[0].hwif)); |
1078 | 1079 | if (!hwgroup) |
1079 | 1080 | goto out_up; |
1080 | 1081 | |
1081 | 1082 | hwif->hwgroup = hwgroup; |
1082 | 1083 | |
1083 | - memset(hwgroup, 0, sizeof(ide_hwgroup_t)); | |
1084 | 1084 | hwgroup->hwif = hwif->next = hwif; |
1085 | 1085 | hwgroup->rq = NULL; |
1086 | 1086 | hwgroup->handler = NULL; |
kernel/timer.c
... | ... | @@ -1221,7 +1221,8 @@ |
1221 | 1221 | /* |
1222 | 1222 | * The APs use this path later in boot |
1223 | 1223 | */ |
1224 | - base = kmalloc_node(sizeof(*base), GFP_KERNEL, | |
1224 | + base = kmalloc_node(sizeof(*base), | |
1225 | + GFP_KERNEL | __GFP_ZERO, | |
1225 | 1226 | cpu_to_node(cpu)); |
1226 | 1227 | if (!base) |
1227 | 1228 | return -ENOMEM; |
... | ... | @@ -1232,7 +1233,6 @@ |
1232 | 1233 | kfree(base); |
1233 | 1234 | return -ENOMEM; |
1234 | 1235 | } |
1235 | - memset(base, 0, sizeof(*base)); | |
1236 | 1236 | per_cpu(tvec_bases, cpu) = base; |
1237 | 1237 | } else { |
1238 | 1238 | /* |
lib/genalloc.c
... | ... | @@ -54,11 +54,10 @@ |
54 | 54 | int nbytes = sizeof(struct gen_pool_chunk) + |
55 | 55 | (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; |
56 | 56 | |
57 | - chunk = kmalloc_node(nbytes, GFP_KERNEL, nid); | |
57 | + chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); | |
58 | 58 | if (unlikely(chunk == NULL)) |
59 | 59 | return -1; |
60 | 60 | |
61 | - memset(chunk, 0, nbytes); | |
62 | 61 | spin_lock_init(&chunk->lock); |
63 | 62 | chunk->start_addr = addr; |
64 | 63 | chunk->end_addr = addr + size; |
mm/allocpercpu.c
... | ... | @@ -53,12 +53,9 @@ |
53 | 53 | int node = cpu_to_node(cpu); |
54 | 54 | |
55 | 55 | BUG_ON(pdata->ptrs[cpu]); |
56 | - if (node_online(node)) { | |
57 | - /* FIXME: kzalloc_node(size, gfp, node) */ | |
58 | - pdata->ptrs[cpu] = kmalloc_node(size, gfp, node); | |
59 | - if (pdata->ptrs[cpu]) | |
60 | - memset(pdata->ptrs[cpu], 0, size); | |
61 | - } else | |
56 | + if (node_online(node)) | |
57 | + pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); | |
58 | + else | |
62 | 59 | pdata->ptrs[cpu] = kzalloc(size, gfp); |
63 | 60 | return pdata->ptrs[cpu]; |
64 | 61 | } |
mm/mempool.c
... | ... | @@ -62,10 +62,9 @@ |
62 | 62 | mempool_free_t *free_fn, void *pool_data, int node_id) |
63 | 63 | { |
64 | 64 | mempool_t *pool; |
65 | - pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id); | |
65 | + pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id); | |
66 | 66 | if (!pool) |
67 | 67 | return NULL; |
68 | - memset(pool, 0, sizeof(*pool)); | |
69 | 68 | pool->elements = kmalloc_node(min_nr * sizeof(void *), |
70 | 69 | GFP_KERNEL, node_id); |
71 | 70 | if (!pool->elements) { |
mm/vmalloc.c
... | ... | @@ -432,11 +432,12 @@ |
432 | 432 | area->nr_pages = nr_pages; |
433 | 433 | /* Please note that the recursion is strictly bounded. */ |
434 | 434 | if (array_size > PAGE_SIZE) { |
435 | - pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); | |
435 | + pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, | |
436 | + PAGE_KERNEL, node); | |
436 | 437 | area->flags |= VM_VPAGES; |
437 | 438 | } else { |
438 | 439 | pages = kmalloc_node(array_size, |
439 | - (gfp_mask & GFP_LEVEL_MASK), | |
440 | + (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO, | |
440 | 441 | node); |
441 | 442 | } |
442 | 443 | area->pages = pages; |
... | ... | @@ -445,7 +446,6 @@ |
445 | 446 | kfree(area); |
446 | 447 | return NULL; |
447 | 448 | } |
448 | - memset(area->pages, 0, array_size); | |
449 | 449 | |
450 | 450 | for (i = 0; i < area->nr_pages; i++) { |
451 | 451 | if (node < 0) |