Blame view
block/blk-ioc.c
3.66 KB
86db1e297 block: continue l... |
1 2 3 4 5 6 7 8 9 |
/* * Functions related to io context handling */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
5a0e3ad6a include cleanup: ... |
10 |
#include <linux/slab.h> |
86db1e297 block: continue l... |
11 12 13 14 15 16 17 18 19 20 |
#include "blk.h" /* * For io context allocations */ static struct kmem_cache *iocontext_cachep; static void cfq_dtor(struct io_context *ioc) { |
ffc4e7595 cfq-iosched: add ... |
21 22 |
if (!hlist_empty(&ioc->cic_list)) { struct cfq_io_context *cic; |
e2bd9678f block: Use hlist_... |
23 |
cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, |
ffc4e7595 cfq-iosched: add ... |
24 25 26 |
cic_list); cic->dtor(ioc); } |
86db1e297 block: continue l... |
27 28 29 30 31 32 33 34 35 36 |
} /* * IO Context helper functions. put_io_context() returns 1 if there are no * more users of this io context, 0 otherwise. */ int put_io_context(struct io_context *ioc) { if (ioc == NULL) return 1; |
d9c7d394a block: prevent po... |
37 |
BUG_ON(atomic_long_read(&ioc->refcount) == 0); |
86db1e297 block: continue l... |
38 |
|
d9c7d394a block: prevent po... |
39 |
if (atomic_long_dec_and_test(&ioc->refcount)) { |
86db1e297 block: continue l... |
40 |
rcu_read_lock(); |
86db1e297 block: continue l... |
41 |
cfq_dtor(ioc); |
07416d29b cfq-iosched: fix ... |
42 |
rcu_read_unlock(); |
86db1e297 block: continue l... |
43 44 45 46 47 48 49 50 51 52 |
kmem_cache_free(iocontext_cachep, ioc); return 1; } return 0; } EXPORT_SYMBOL(put_io_context); static void cfq_exit(struct io_context *ioc) { |
86db1e297 block: continue l... |
53 |
rcu_read_lock(); |
86db1e297 block: continue l... |
54 |
|
ffc4e7595 cfq-iosched: add ... |
55 56 |
if (!hlist_empty(&ioc->cic_list)) { struct cfq_io_context *cic; |
e2bd9678f block: Use hlist_... |
57 |
cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, |
ffc4e7595 cfq-iosched: add ... |
58 59 60 61 |
cic_list); cic->exit(ioc); } rcu_read_unlock(); |
86db1e297 block: continue l... |
62 |
} |
27667c996 block: Clean up e... |
63 |
/* Called by the exiting task */ |
b69f22920 block: Fix io_con... |
64 |
void exit_io_context(struct task_struct *task) |
86db1e297 block: continue l... |
65 66 |
{ struct io_context *ioc; |
b69f22920 block: Fix io_con... |
67 68 69 70 |
task_lock(task); ioc = task->io_context; task->io_context = NULL; task_unlock(task); |
86db1e297 block: continue l... |
71 |
|
27667c996 block: Clean up e... |
72 |
if (atomic_dec_and_test(&ioc->nr_tasks)) |
86db1e297 block: continue l... |
73 |
cfq_exit(ioc); |
61cc74fbb block: Fix io_con... |
74 |
put_io_context(ioc); |
86db1e297 block: continue l... |
75 76 77 78 79 80 81 82 |
} struct io_context *alloc_io_context(gfp_t gfp_flags, int node) { struct io_context *ret; ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); if (ret) { |
d9c7d394a block: prevent po... |
83 |
atomic_long_set(&ret->refcount, 1); |
86db1e297 block: continue l... |
84 85 86 87 |
atomic_set(&ret->nr_tasks, 1); spin_lock_init(&ret->lock); ret->ioprio_changed = 0; ret->ioprio = 0; |
4671a1322 block: don't acce... |
88 |
ret->last_waited = 0; /* doesn't matter... */ |
86db1e297 block: continue l... |
89 |
ret->nr_batch_requests = 0; /* because this is 0 */ |
86db1e297 block: continue l... |
90 |
INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); |
ffc4e7595 cfq-iosched: add ... |
91 |
INIT_HLIST_HEAD(&ret->cic_list); |
86db1e297 block: continue l... |
92 |
ret->ioc_data = NULL; |
4cbadbd16 blk-cgroup: Initi... |
93 94 95 |
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) ret->cgroup_changed = 0; #endif |
86db1e297 block: continue l... |
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
} return ret; } /* * If the current task has no IO context then create one and initialise it. * Otherwise, return its existing IO context. * * This returned IO context doesn't have a specifically elevated refcount, * but since the current task itself holds a reference, the context can be * used in general code, so long as it stays within `current` context. */ struct io_context *current_io_context(gfp_t gfp_flags, int node) { struct task_struct *tsk = current; struct io_context *ret; ret = tsk->io_context; if (likely(ret)) return ret; ret = alloc_io_context(gfp_flags, node); if (ret) { /* make sure set_task_ioprio() sees the settings above */ smp_wmb(); tsk->io_context = ret; } return ret; } /* * If the current task has no IO context then create one and initialise it. * If it does have a context, take a ref on it. * * This is always called in the context of the task which submitted the I/O. */ struct io_context *get_io_context(gfp_t gfp_flags, int node) { struct io_context *ret = NULL; /* * Check for unlikely race with exiting task. ioc ref count is * zero when ioc is being detached. */ do { ret = current_io_context(gfp_flags, node); if (unlikely(!ret)) break; |
d9c7d394a block: prevent po... |
146 |
} while (!atomic_long_inc_not_zero(&ret->refcount)); |
86db1e297 block: continue l... |
147 148 149 150 |
return ret; } EXPORT_SYMBOL(get_io_context); |
133415982 make blk_ioc_init... |
151 |
static int __init blk_ioc_init(void) |
86db1e297 block: continue l... |
152 153 154 155 156 157 |
{ iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL); return 0; } subsys_initcall(blk_ioc_init); |