Commit 9d4df77fab7347a74a9938521ffad8d8fab2671d

Authored by Sebastian Ott
Committed by Martin Schwidefsky
1 parent e2578b82c4

s390/scm_block: use mempool to manage aidaw requests

We currently use one preallocated page per HW request to store
aidaws. With this patch we use mempool to allocate an aidaw page
whenever we need it.

Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Showing 3 changed files with 55 additions and 12 deletions Side-by-side Diff

drivers/s390/block/scm_blk.c
... ... @@ -10,6 +10,7 @@
10 10  
11 11 #include <linux/interrupt.h>
12 12 #include <linux/spinlock.h>
  13 +#include <linux/mempool.h>
13 14 #include <linux/module.h>
14 15 #include <linux/blkdev.h>
15 16 #include <linux/genhd.h>
... ... @@ -20,6 +21,7 @@
20 21  
21 22 debug_info_t *scm_debug;
22 23 static int scm_major;
  24 +static mempool_t *aidaw_pool;
23 25 static DEFINE_SPINLOCK(list_lock);
24 26 static LIST_HEAD(inactive_requests);
25 27 static unsigned int nr_requests = 64;
... ... @@ -36,7 +38,6 @@
36 38 struct aob_rq_header *aobrq = to_aobrq(scmrq);
37 39  
38 40 free_page((unsigned long) scmrq->aob);
39   - free_page((unsigned long) scmrq->aidaw);
40 41 __scm_free_rq_cluster(scmrq);
41 42 kfree(aobrq);
42 43 }
... ... @@ -53,6 +54,8 @@
53 54 __scm_free_rq(scmrq);
54 55 }
55 56 spin_unlock_irq(&list_lock);
  57 +
  58 + mempool_destroy(aidaw_pool);
56 59 }
57 60  
58 61 static int __scm_alloc_rq(void)
59 62  
... ... @@ -65,9 +68,8 @@
65 68 return -ENOMEM;
66 69  
67 70 scmrq = (void *) aobrq->data;
68   - scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
69 71 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
70   - if (!scmrq->aob || !scmrq->aidaw) {
  72 + if (!scmrq->aob) {
71 73 __scm_free_rq(scmrq);
72 74 return -ENOMEM;
73 75 }
... ... @@ -89,6 +91,10 @@
89 91 {
90 92 int ret = 0;
91 93  
  94 + aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
  95 + if (!aidaw_pool)
  96 + return -ENOMEM;
  97 +
92 98 while (nrqs-- && !ret)
93 99 ret = __scm_alloc_rq();
94 100  
95 101  
... ... @@ -111,8 +117,13 @@
111 117  
112 118 static void scm_request_done(struct scm_request *scmrq)
113 119 {
  120 + struct msb *msb = &scmrq->aob->msb[0];
  121 + u64 aidaw = msb->data_addr;
114 122 unsigned long flags;
115 123  
  124 + if ((msb->flags & MSB_FLAG_IDA) && aidaw)
  125 + mempool_free(virt_to_page(aidaw), aidaw_pool);
  126 +
116 127 spin_lock_irqsave(&list_lock, flags);
117 128 list_add(&scmrq->list, &inactive_requests);
118 129 spin_unlock_irqrestore(&list_lock, flags);
119 130  
120 131  
121 132  
... ... @@ -123,15 +134,26 @@
123 134 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
124 135 }
125 136  
126   -static void scm_request_prepare(struct scm_request *scmrq)
  137 +struct aidaw *scm_aidaw_alloc(void)
127 138 {
  139 + struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
  140 +
  141 + return page ? page_address(page) : NULL;
  142 +}
  143 +
  144 +static int scm_request_prepare(struct scm_request *scmrq)
  145 +{
128 146 struct scm_blk_dev *bdev = scmrq->bdev;
129 147 struct scm_device *scmdev = bdev->gendisk->private_data;
130   - struct aidaw *aidaw = scmrq->aidaw;
  148 + struct aidaw *aidaw = scm_aidaw_alloc();
131 149 struct msb *msb = &scmrq->aob->msb[0];
132 150 struct req_iterator iter;
133 151 struct bio_vec bv;
134 152  
  153 + if (!aidaw)
  154 + return -ENOMEM;
  155 +
  156 + memset(aidaw, 0, PAGE_SIZE);
135 157 msb->bs = MSB_BS_4K;
136 158 scmrq->aob->request.msb_count = 1;
137 159 msb->scm_addr = scmdev->address +
... ... @@ -147,6 +169,8 @@
147 169 aidaw->data_addr = (u64) page_address(bv.bv_page);
148 170 aidaw++;
149 171 }
  172 +
  173 + return 0;
150 174 }
151 175  
152 176 static inline void scm_request_init(struct scm_blk_dev *bdev,
... ... @@ -157,7 +181,6 @@
157 181 struct aob *aob = scmrq->aob;
158 182  
159 183 memset(aob, 0, sizeof(*aob));
160   - memset(scmrq->aidaw, 0, PAGE_SIZE);
161 184 aobrq->scmdev = bdev->scmdev;
162 185 aob->request.cmd_code = ARQB_CMD_MOVE;
163 186 aob->request.data = (u64) aobrq;
... ... @@ -236,7 +259,15 @@
236 259 scm_initiate_cluster_request(scmrq);
237 260 return;
238 261 }
239   - scm_request_prepare(scmrq);
  262 +
  263 + if (scm_request_prepare(scmrq)) {
  264 + SCM_LOG(5, "no aidaw");
  265 + scm_release_cluster(scmrq);
  266 + scm_request_done(scmrq);
  267 + scm_ensure_queue_restart(bdev);
  268 + return;
  269 + }
  270 +
240 271 atomic_inc(&bdev->queued_reqs);
241 272 blk_start_request(req);
242 273  
drivers/s390/block/scm_blk.h
... ... @@ -31,7 +31,6 @@
31 31 struct scm_request {
32 32 struct scm_blk_dev *bdev;
33 33 struct request *request;
34   - struct aidaw *aidaw;
35 34 struct aob *aob;
36 35 struct list_head list;
37 36 u8 retries;
... ... @@ -54,6 +53,8 @@
54 53  
55 54 void scm_request_finish(struct scm_request *);
56 55 void scm_request_requeue(struct scm_request *);
  56 +
  57 +struct aidaw *scm_aidaw_alloc(void);
57 58  
58 59 int scm_drv_init(void);
59 60 void scm_drv_cleanup(void);
drivers/s390/block/scm_blk_cluster.c
... ... @@ -114,14 +114,14 @@
114 114 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
115 115 }
116 116  
117   -static void scm_prepare_cluster_request(struct scm_request *scmrq)
  117 +static int scm_prepare_cluster_request(struct scm_request *scmrq)
118 118 {
119 119 struct scm_blk_dev *bdev = scmrq->bdev;
120 120 struct scm_device *scmdev = bdev->gendisk->private_data;
121 121 struct request *req = scmrq->request;
122   - struct aidaw *aidaw = scmrq->aidaw;
123 122 struct msb *msb = &scmrq->aob->msb[0];
124 123 struct req_iterator iter;
  124 + struct aidaw *aidaw;
125 125 struct bio_vec bv;
126 126 int i = 0;
127 127 u64 addr;
... ... @@ -131,6 +131,11 @@
131 131 scmrq->cluster.state = CLUSTER_READ;
132 132 /* fall through */
133 133 case CLUSTER_READ:
  134 + aidaw = scm_aidaw_alloc();
  135 + if (!aidaw)
  136 + return -ENOMEM;
  137 +
  138 + memset(aidaw, 0, PAGE_SIZE);
134 139 scmrq->aob->request.msb_count = 1;
135 140 msb->bs = MSB_BS_4K;
136 141 msb->oc = MSB_OC_READ;
... ... @@ -153,6 +158,7 @@
153 158  
154 159 break;
155 160 case CLUSTER_WRITE:
  161 + aidaw = (void *) msb->data_addr;
156 162 msb->oc = MSB_OC_WRITE;
157 163  
158 164 for (addr = msb->scm_addr;
... ... @@ -173,6 +179,7 @@
173 179 }
174 180 break;
175 181 }
  182 + return 0;
176 183 }
177 184  
178 185 bool scm_need_cluster_request(struct scm_request *scmrq)
179 186  
... ... @@ -186,9 +193,13 @@
186 193 /* Called with queue lock held. */
187 194 void scm_initiate_cluster_request(struct scm_request *scmrq)
188 195 {
189   - scm_prepare_cluster_request(scmrq);
  196 + if (scm_prepare_cluster_request(scmrq))
  197 + goto requeue;
190 198 if (eadm_start_aob(scmrq->aob))
191   - scm_request_requeue(scmrq);
  199 + goto requeue;
  200 + return;
  201 +requeue:
  202 + scm_request_requeue(scmrq);
192 203 }
193 204  
194 205 bool scm_test_cluster_request(struct scm_request *scmrq)