Commit 88ee5ef157202624de2b43b3512fdcb54fda1ab5

Authored by Jens Axboe
Committed by Jens Axboe
1 parent ef9be1d336

[BLOCK] ll_rw_blk: fastpath get_request()

Originally from: Nick Piggin <nickpiggin@yahoo.com.au>

Move current_io_context out of the get_request fastpth.  Also try to
streamline a few other things in this area.

Signed-off-by: Jens Axboe <axboe@suse.de>

Showing 1 changed file with 37 additions and 33 deletions Side-by-side Diff

... ... @@ -1908,40 +1908,40 @@
1908 1908 {
1909 1909 struct request *rq = NULL;
1910 1910 struct request_list *rl = &q->rq;
1911   - struct io_context *ioc = current_io_context(GFP_ATOMIC);
1912   - int priv;
  1911 + struct io_context *ioc = NULL;
  1912 + int may_queue, priv;
1913 1913  
1914   - if (rl->count[rw]+1 >= q->nr_requests) {
1915   - /*
1916   - * The queue will fill after this allocation, so set it as
1917   - * full, and mark this process as "batching". This process
1918   - * will be allowed to complete a batch of requests, others
1919   - * will be blocked.
1920   - */
1921   - if (!blk_queue_full(q, rw)) {
1922   - ioc_set_batching(q, ioc);
1923   - blk_set_queue_full(q, rw);
  1914 + may_queue = elv_may_queue(q, rw, bio);
  1915 + if (may_queue == ELV_MQUEUE_NO)
  1916 + goto rq_starved;
  1917 +
  1918 + if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
  1919 + if (rl->count[rw]+1 >= q->nr_requests) {
  1920 + ioc = current_io_context(GFP_ATOMIC);
  1921 + /*
  1922 + * The queue will fill after this allocation, so set
  1923 + * it as full, and mark this process as "batching".
  1924 + * This process will be allowed to complete a batch of
  1925 + * requests, others will be blocked.
  1926 + */
  1927 + if (!blk_queue_full(q, rw)) {
  1928 + ioc_set_batching(q, ioc);
  1929 + blk_set_queue_full(q, rw);
  1930 + } else {
  1931 + if (may_queue != ELV_MQUEUE_MUST
  1932 + && !ioc_batching(q, ioc)) {
  1933 + /*
  1934 + * The queue is full and the allocating
  1935 + * process is not a "batcher", and not
  1936 + * exempted by the IO scheduler
  1937 + */
  1938 + goto out;
  1939 + }
  1940 + }
1924 1941 }
  1942 + set_queue_congested(q, rw);
1925 1943 }
1926 1944  
1927   - switch (elv_may_queue(q, rw, bio)) {
1928   - case ELV_MQUEUE_NO:
1929   - goto rq_starved;
1930   - case ELV_MQUEUE_MAY:
1931   - break;
1932   - case ELV_MQUEUE_MUST:
1933   - goto get_rq;
1934   - }
1935   -
1936   - if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
1937   - /*
1938   - * The queue is full and the allocating process is not a
1939   - * "batcher", and not exempted by the IO scheduler
1940   - */
1941   - goto out;
1942   - }
1943   -
1944   -get_rq:
1945 1945 /*
1946 1946 * Only allow batching queuers to allocate up to 50% over the defined
1947 1947 * limit of requests, otherwise we could have thousands of requests
... ... @@ -1952,8 +1952,6 @@
1952 1952  
1953 1953 rl->count[rw]++;
1954 1954 rl->starved[rw] = 0;
1955   - if (rl->count[rw] >= queue_congestion_on_threshold(q))
1956   - set_queue_congested(q, rw);
1957 1955  
1958 1956 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1959 1957 if (priv)
... ... @@ -1962,7 +1960,7 @@
1962 1960 spin_unlock_irq(q->queue_lock);
1963 1961  
1964 1962 rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
1965   - if (!rq) {
  1963 + if (unlikely(!rq)) {
1966 1964 /*
1967 1965 * Allocation failed presumably due to memory. Undo anything
1968 1966 * we might have messed up.
... ... @@ -1987,6 +1985,12 @@
1987 1985 goto out;
1988 1986 }
1989 1987  
  1988 + /*
  1989 + * ioc may be NULL here, and ioc_batching will be false. That's
  1990 + * OK, if the queue is under the request limit then requests need
  1991 + * not count toward the nr_batch_requests limit. There will always
  1992 + * be some limit enforced by BLK_BATCH_TIME.
  1993 + */
1990 1994 if (ioc_batching(q, ioc))
1991 1995 ioc->nr_batch_requests--;
1992 1996