Commit 6f0d7a9eb60d70f22d71f00b2c762e255881ab31

Authored by Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "Four small fixes that should be merged for the current 3.18-rc series.
  This pull request contains:

   - a minor bugfix for computation of best IO priority given two
     merging requests.  From Jan Kara.

   - the final (final) merge count issue that has been plaguing
     virtio-blk.  From Ming Lei.

   - enable parallel reinit notify for blk-mq queues, to combine the
     cost of an RCU grace period across lots of devices.  From Tejun
     Heo.

   - an error handling fix for the SCSI_IOCTL_SEND_COMMAND ioctl.  From
     Tony Battersby"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: blk-merge: fix blk_recount_segments()
  scsi: Fix more error handling in SCSI_IOCTL_SEND_COMMAND
  blk-mq: make mq_queue_reinit_notify() freeze queues in parallel
  block: Fix computation of merged request priority

Showing 4 changed files Side-by-side Diff

... ... @@ -97,19 +97,22 @@
97 97  
98 98 void blk_recount_segments(struct request_queue *q, struct bio *bio)
99 99 {
100   - bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
101   - &q->queue_flags);
102   - bool merge_not_need = bio->bi_vcnt < queue_max_segments(q);
  100 + unsigned short seg_cnt;
103 101  
104   - if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) &&
105   - merge_not_need)
106   - bio->bi_phys_segments = bio->bi_vcnt;
  102 + /* estimate segment number by bi_vcnt for non-cloned bio */
  103 + if (bio_flagged(bio, BIO_CLONED))
  104 + seg_cnt = bio_segments(bio);
  105 + else
  106 + seg_cnt = bio->bi_vcnt;
  107 +
  108 + if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
  109 + (seg_cnt < queue_max_segments(q)))
  110 + bio->bi_phys_segments = seg_cnt;
107 111 else {
108 112 struct bio *nxt = bio->bi_next;
109 113  
110 114 bio->bi_next = NULL;
111   - bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio,
112   - no_sg_merge && merge_not_need);
  115 + bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
113 116 bio->bi_next = nxt;
114 117 }
115 118  
... ... @@ -107,11 +107,7 @@
107 107 wake_up_all(&q->mq_freeze_wq);
108 108 }
109 109  
110   -/*
111   - * Guarantee no request is in use, so we can change any data structure of
112   - * the queue afterward.
113   - */
114   -void blk_mq_freeze_queue(struct request_queue *q)
  110 +static void blk_mq_freeze_queue_start(struct request_queue *q)
115 111 {
116 112 bool freeze;
117 113  
118 114  
... ... @@ -123,9 +119,23 @@
123 119 percpu_ref_kill(&q->mq_usage_counter);
124 120 blk_mq_run_queues(q, false);
125 121 }
  122 +}
  123 +
  124 +static void blk_mq_freeze_queue_wait(struct request_queue *q)
  125 +{
126 126 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
127 127 }
128 128  
  129 +/*
  130 + * Guarantee no request is in use, so we can change any data structure of
  131 + * the queue afterward.
  132 + */
  133 +void blk_mq_freeze_queue(struct request_queue *q)
  134 +{
  135 + blk_mq_freeze_queue_start(q);
  136 + blk_mq_freeze_queue_wait(q);
  137 +}
  138 +
129 139 static void blk_mq_unfreeze_queue(struct request_queue *q)
130 140 {
131 141 bool wake;
... ... @@ -1921,7 +1931,7 @@
1921 1931 /* Basically redo blk_mq_init_queue with queue frozen */
1922 1932 static void blk_mq_queue_reinit(struct request_queue *q)
1923 1933 {
1924   - blk_mq_freeze_queue(q);
  1934 + WARN_ON_ONCE(!q->mq_freeze_depth);
1925 1935  
1926 1936 blk_mq_sysfs_unregister(q);
1927 1937  
... ... @@ -1936,8 +1946,6 @@
1936 1946 blk_mq_map_swqueue(q);
1937 1947  
1938 1948 blk_mq_sysfs_register(q);
1939   -
1940   - blk_mq_unfreeze_queue(q);
1941 1949 }
1942 1950  
1943 1951 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1944 1952  
1945 1953  
... ... @@ -1956,8 +1964,25 @@
1956 1964 return NOTIFY_OK;
1957 1965  
1958 1966 mutex_lock(&all_q_mutex);
  1967 +
  1968 + /*
  1969 + * We need to freeze and reinit all existing queues. Freezing
  1970 + * involves synchronous wait for an RCU grace period and doing it
  1971 + * one by one may take a long time. Start freezing all queues in
  1972 + * one swoop and then wait for the completions so that freezing can
  1973 + * take place in parallel.
  1974 + */
1959 1975 list_for_each_entry(q, &all_q_list, all_q_node)
  1976 + blk_mq_freeze_queue_start(q);
  1977 + list_for_each_entry(q, &all_q_list, all_q_node)
  1978 + blk_mq_freeze_queue_wait(q);
  1979 +
  1980 + list_for_each_entry(q, &all_q_list, all_q_node)
1960 1981 blk_mq_queue_reinit(q);
  1982 +
  1983 + list_for_each_entry(q, &all_q_list, all_q_node)
  1984 + blk_mq_unfreeze_queue(q);
  1985 +
1961 1986 mutex_unlock(&all_q_mutex);
1962 1987 return NOTIFY_OK;
1963 1988 }
... ... @@ -157,14 +157,16 @@
157 157  
158 158 int ioprio_best(unsigned short aprio, unsigned short bprio)
159 159 {
160   - unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
161   - unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
  160 + unsigned short aclass;
  161 + unsigned short bclass;
162 162  
163   - if (aclass == IOPRIO_CLASS_NONE)
164   - aclass = IOPRIO_CLASS_BE;
165   - if (bclass == IOPRIO_CLASS_NONE)
166   - bclass = IOPRIO_CLASS_BE;
  163 + if (!ioprio_valid(aprio))
  164 + aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
  165 + if (!ioprio_valid(bprio))
  166 + bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
167 167  
  168 + aclass = IOPRIO_PRIO_CLASS(aprio);
  169 + bclass = IOPRIO_PRIO_CLASS(bprio);
168 170 if (aclass == bclass)
169 171 return min(aprio, bprio);
170 172 if (aclass > bclass)
... ... @@ -458,7 +458,7 @@
458 458 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
459 459 if (IS_ERR(rq)) {
460 460 err = PTR_ERR(rq);
461   - goto error;
  461 + goto error_free_buffer;
462 462 }
463 463 blk_rq_set_block_pc(rq);
464 464  
465 465  
... ... @@ -531,9 +531,11 @@
531 531 }
532 532  
533 533 error:
  534 + blk_put_request(rq);
  535 +
  536 +error_free_buffer:
534 537 kfree(buffer);
535   - if (rq)
536   - blk_put_request(rq);
  538 +
537 539 return err;
538 540 }
539 541 EXPORT_SYMBOL_GPL(sg_scsi_ioctl);