Commit bae386f7884aa3720cc7880b36a41a1d2b9c327b

Authored by Akinobu Mita
Committed by Jens Axboe
1 parent 199f4c9f76

[PATCH] iosched: use hlist for request hashtable

Use hlist instead of list_head for request hashtable in deadline-iosched
and as-iosched. It also can remove the flag to know hashed or unhashed.

Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Jens Axboe <axboe@suse.de>

 block/as-iosched.c       |   45 +++++++++++++++++++--------------------------
 block/deadline-iosched.c |   39 ++++++++++++++++-----------------------
 2 files changed, 35 insertions(+), 49 deletions(-)

Showing 2 changed files with 35 additions and 49 deletions Side-by-side Diff

... ... @@ -96,7 +96,7 @@
96 96  
97 97 struct as_rq *next_arq[2]; /* next in sort order */
98 98 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
99   - struct list_head *hash; /* request hash */
  99 + struct hlist_head *hash; /* request hash */
100 100  
101 101 unsigned long exit_prob; /* probability a task will exit while
102 102 being waited on */
... ... @@ -165,8 +165,7 @@
165 165 /*
166 166 * request hash, key is the ending offset (for back merge lookup)
167 167 */
168   - struct list_head hash;
169   - unsigned int on_hash;
  168 + struct hlist_node hash;
170 169  
171 170 /*
172 171 * expire fifo
173 172  
174 173  
... ... @@ -282,17 +281,15 @@
282 281 #define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
283 282 #define AS_HASH_ENTRIES (1 << as_hash_shift)
284 283 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
285   -#define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash)
286 284  
287 285 static inline void __as_del_arq_hash(struct as_rq *arq)
288 286 {
289   - arq->on_hash = 0;
290   - list_del_init(&arq->hash);
  287 + hlist_del_init(&arq->hash);
291 288 }
292 289  
293 290 static inline void as_del_arq_hash(struct as_rq *arq)
294 291 {
295   - if (arq->on_hash)
  292 + if (!hlist_unhashed(&arq->hash))
296 293 __as_del_arq_hash(arq);
297 294 }
298 295  
299 296  
... ... @@ -300,10 +297,9 @@
300 297 {
301 298 struct request *rq = arq->request;
302 299  
303   - BUG_ON(arq->on_hash);
  300 + BUG_ON(!hlist_unhashed(&arq->hash));
304 301  
305   - arq->on_hash = 1;
306   - list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
  302 + hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
307 303 }
308 304  
309 305 /*
310 306  
311 307  
312 308  
313 309  
314 310  
315 311  
... ... @@ -312,32 +308,30 @@
312 308 static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
313 309 {
314 310 struct request *rq = arq->request;
315   - struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
  311 + struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
316 312  
317   - if (!arq->on_hash) {
  313 + if (hlist_unhashed(&arq->hash)) {
318 314 WARN_ON(1);
319 315 return;
320 316 }
321 317  
322   - if (arq->hash.prev != head) {
323   - list_del(&arq->hash);
324   - list_add(&arq->hash, head);
  318 + if (&arq->hash != head->first) {
  319 + hlist_del(&arq->hash);
  320 + hlist_add_head(&arq->hash, head);
325 321 }
326 322 }
327 323  
328 324 static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
329 325 {
330   - struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
331   - struct list_head *entry, *next = hash_list->next;
  326 + struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
  327 + struct hlist_node *entry, *next;
  328 + struct as_rq *arq;
332 329  
333   - while ((entry = next) != hash_list) {
334   - struct as_rq *arq = list_entry_hash(entry);
  330 + hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) {
335 331 struct request *__rq = arq->request;
336 332  
337   - next = entry->next;
  333 + BUG_ON(hlist_unhashed(&arq->hash));
338 334  
339   - BUG_ON(!arq->on_hash);
340   -
341 335 if (!rq_mergeable(__rq)) {
342 336 as_del_arq_hash(arq);
343 337 continue;
... ... @@ -1601,8 +1595,7 @@
1601 1595 arq->request = rq;
1602 1596 arq->state = AS_RQ_PRESCHED;
1603 1597 arq->io_context = NULL;
1604   - INIT_LIST_HEAD(&arq->hash);
1605   - arq->on_hash = 0;
  1598 + INIT_HLIST_NODE(&arq->hash);
1606 1599 INIT_LIST_HEAD(&arq->fifo);
1607 1600 rq->elevator_private = arq;
1608 1601 return 0;
... ... @@ -1662,7 +1655,7 @@
1662 1655  
1663 1656 ad->q = q; /* Identify what queue the data belongs to */
1664 1657  
1665   - ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES,
  1658 + ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES,
1666 1659 GFP_KERNEL, q->node);
1667 1660 if (!ad->hash) {
1668 1661 kfree(ad);
... ... @@ -1684,7 +1677,7 @@
1684 1677 INIT_WORK(&ad->antic_work, as_work_handler, q);
1685 1678  
1686 1679 for (i = 0; i < AS_HASH_ENTRIES; i++)
1687   - INIT_LIST_HEAD(&ad->hash[i]);
  1680 + INIT_HLIST_HEAD(&ad->hash[i]);
1688 1681  
1689 1682 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
1690 1683 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
block/deadline-iosched.c
... ... @@ -30,8 +30,7 @@
30 30 #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
31 31 #define DL_HASH_ENTRIES (1 << deadline_hash_shift)
32 32 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
33   -#define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash)
34   -#define ON_HASH(drq) (drq)->on_hash
  33 +#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash))
35 34  
36 35 struct deadline_data {
37 36 /*
... ... @@ -48,7 +47,7 @@
48 47 * next in sort order. read, write or both are NULL
49 48 */
50 49 struct deadline_rq *next_drq[2];
51   - struct list_head *hash; /* request hash */
  50 + struct hlist_head *hash; /* request hash */
52 51 unsigned int batching; /* number of sequential requests made */
53 52 sector_t last_sector; /* head position */
54 53 unsigned int starved; /* times reads have starved writes */
... ... @@ -79,8 +78,7 @@
79 78 /*
80 79 * request hash, key is the ending offset (for back merge lookup)
81 80 */
82   - struct list_head hash;
83   - char on_hash;
  81 + struct hlist_node hash;
84 82  
85 83 /*
86 84 * expire fifo
... ... @@ -100,8 +98,7 @@
100 98 */
101 99 static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
102 100 {
103   - drq->on_hash = 0;
104   - list_del_init(&drq->hash);
  101 + hlist_del_init(&drq->hash);
105 102 }
106 103  
107 104 static inline void deadline_del_drq_hash(struct deadline_rq *drq)
... ... @@ -117,8 +114,7 @@
117 114  
118 115 BUG_ON(ON_HASH(drq));
119 116  
120   - drq->on_hash = 1;
121   - list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
  117 + hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
122 118 }
123 119  
124 120 /*
125 121  
126 122  
127 123  
128 124  
... ... @@ -128,26 +124,24 @@
128 124 deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
129 125 {
130 126 struct request *rq = drq->request;
131   - struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
  127 + struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
132 128  
133   - if (ON_HASH(drq) && drq->hash.prev != head) {
134   - list_del(&drq->hash);
135   - list_add(&drq->hash, head);
  129 + if (ON_HASH(drq) && &drq->hash != head->first) {
  130 + hlist_del(&drq->hash);
  131 + hlist_add_head(&drq->hash, head);
136 132 }
137 133 }
138 134  
139 135 static struct request *
140 136 deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
141 137 {
142   - struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
143   - struct list_head *entry, *next = hash_list->next;
  138 + struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
  139 + struct hlist_node *entry, *next;
  140 + struct deadline_rq *drq;
144 141  
145   - while ((entry = next) != hash_list) {
146   - struct deadline_rq *drq = list_entry_hash(entry);
  142 + hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
147 143 struct request *__rq = drq->request;
148 144  
149   - next = entry->next;
150   -
151 145 BUG_ON(!ON_HASH(drq));
152 146  
153 147 if (!rq_mergeable(__rq)) {
... ... @@ -625,7 +619,7 @@
625 619 return NULL;
626 620 memset(dd, 0, sizeof(*dd));
627 621  
628   - dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
  622 + dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
629 623 GFP_KERNEL, q->node);
630 624 if (!dd->hash) {
631 625 kfree(dd);
... ... @@ -641,7 +635,7 @@
641 635 }
642 636  
643 637 for (i = 0; i < DL_HASH_ENTRIES; i++)
644   - INIT_LIST_HEAD(&dd->hash[i]);
  638 + INIT_HLIST_HEAD(&dd->hash[i]);
645 639  
646 640 INIT_LIST_HEAD(&dd->fifo_list[READ]);
647 641 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
... ... @@ -677,8 +671,7 @@
677 671 RB_CLEAR(&drq->rb_node);
678 672 drq->request = rq;
679 673  
680   - INIT_LIST_HEAD(&drq->hash);
681   - drq->on_hash = 0;
  674 + INIT_HLIST_NODE(&drq->hash);
682 675  
683 676 INIT_LIST_HEAD(&drq->fifo);
684 677