Commit 7fbaee72ff62843198980c258d09590536681b15
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
Merge branch 'stable/for-jens-3.9' of git://git.kernel.org/pub/scm/linux/kernel/…
…git/konrad/xen into for-linus Konrad writes: [the branch] has a bunch of fixes. They vary from being able to deal with unknown requests, overflow in statistics, compile warnings, bug in the error path, removal of unnecessary logic. There is also one performance fix - which is to allocate pages for requests when the driver loads - instead of doing it per request
Showing 5 changed files Side-by-side Diff
drivers/block/xen-blkback/blkback.c
... | ... | @@ -164,7 +164,7 @@ |
164 | 164 | |
165 | 165 | #define foreach_grant_safe(pos, n, rbtree, node) \ |
166 | 166 | for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ |
167 | - (n) = rb_next(&(pos)->node); \ | |
167 | + (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ | |
168 | 168 | &(pos)->node != NULL; \ |
169 | 169 | (pos) = container_of(n, typeof(*(pos)), node), \ |
170 | 170 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) |
... | ... | @@ -381,8 +381,8 @@ |
381 | 381 | |
382 | 382 | static void print_stats(struct xen_blkif *blkif) |
383 | 383 | { |
384 | - pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d" | |
385 | - " | ds %4d\n", | |
384 | + pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" | |
385 | + " | ds %4llu\n", | |
386 | 386 | current->comm, blkif->st_oo_req, |
387 | 387 | blkif->st_rd_req, blkif->st_wr_req, |
388 | 388 | blkif->st_f_req, blkif->st_ds_req); |
... | ... | @@ -442,7 +442,7 @@ |
442 | 442 | } |
443 | 443 | |
444 | 444 | struct seg_buf { |
445 | - unsigned long buf; | |
445 | + unsigned int offset; | |
446 | 446 | unsigned int nsec; |
447 | 447 | }; |
448 | 448 | /* |
449 | 449 | |
450 | 450 | |
451 | 451 | |
452 | 452 | |
453 | 453 | |
... | ... | @@ -621,30 +621,21 @@ |
621 | 621 | * If this is a new persistent grant |
622 | 622 | * save the handler |
623 | 623 | */ |
624 | - persistent_gnts[i]->handle = map[j].handle; | |
625 | - persistent_gnts[i]->dev_bus_addr = | |
626 | - map[j++].dev_bus_addr; | |
624 | + persistent_gnts[i]->handle = map[j++].handle; | |
627 | 625 | } |
628 | 626 | pending_handle(pending_req, i) = |
629 | 627 | persistent_gnts[i]->handle; |
630 | 628 | |
631 | 629 | if (ret) |
632 | 630 | continue; |
633 | - | |
634 | - seg[i].buf = persistent_gnts[i]->dev_bus_addr | | |
635 | - (req->u.rw.seg[i].first_sect << 9); | |
636 | 631 | } else { |
637 | - pending_handle(pending_req, i) = map[j].handle; | |
632 | + pending_handle(pending_req, i) = map[j++].handle; | |
638 | 633 | bitmap_set(pending_req->unmap_seg, i, 1); |
639 | 634 | |
640 | - if (ret) { | |
641 | - j++; | |
635 | + if (ret) | |
642 | 636 | continue; |
643 | - } | |
644 | - | |
645 | - seg[i].buf = map[j++].dev_bus_addr | | |
646 | - (req->u.rw.seg[i].first_sect << 9); | |
647 | 637 | } |
638 | + seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | |
648 | 639 | } |
649 | 640 | return ret; |
650 | 641 | } |
... | ... | @@ -679,6 +670,16 @@ |
679 | 670 | return err; |
680 | 671 | } |
681 | 672 | |
673 | +static int dispatch_other_io(struct xen_blkif *blkif, | |
674 | + struct blkif_request *req, | |
675 | + struct pending_req *pending_req) | |
676 | +{ | |
677 | + free_req(pending_req); | |
678 | + make_response(blkif, req->u.other.id, req->operation, | |
679 | + BLKIF_RSP_EOPNOTSUPP); | |
680 | + return -EIO; | |
681 | +} | |
682 | + | |
682 | 683 | static void xen_blk_drain_io(struct xen_blkif *blkif) |
683 | 684 | { |
684 | 685 | atomic_set(&blkif->drain, 1); |
685 | 686 | |
686 | 687 | |
687 | 688 | |
... | ... | @@ -800,17 +801,30 @@ |
800 | 801 | |
801 | 802 | /* Apply all sanity checks to /private copy/ of request. */ |
802 | 803 | barrier(); |
803 | - if (unlikely(req.operation == BLKIF_OP_DISCARD)) { | |
804 | + | |
805 | + switch (req.operation) { | |
806 | + case BLKIF_OP_READ: | |
807 | + case BLKIF_OP_WRITE: | |
808 | + case BLKIF_OP_WRITE_BARRIER: | |
809 | + case BLKIF_OP_FLUSH_DISKCACHE: | |
810 | + if (dispatch_rw_block_io(blkif, &req, pending_req)) | |
811 | + goto done; | |
812 | + break; | |
813 | + case BLKIF_OP_DISCARD: | |
804 | 814 | free_req(pending_req); |
805 | 815 | if (dispatch_discard_io(blkif, &req)) |
806 | - break; | |
807 | - } else if (dispatch_rw_block_io(blkif, &req, pending_req)) | |
816 | + goto done; | |
808 | 817 | break; |
818 | + default: | |
819 | + if (dispatch_other_io(blkif, &req, pending_req)) | |
820 | + goto done; | |
821 | + break; | |
822 | + } | |
809 | 823 | |
810 | 824 | /* Yield point for this unbounded loop. */ |
811 | 825 | cond_resched(); |
812 | 826 | } |
813 | - | |
827 | +done: | |
814 | 828 | return more_to_do; |
815 | 829 | } |
816 | 830 | |
... | ... | @@ -904,7 +918,8 @@ |
904 | 918 | pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", |
905 | 919 | operation == READ ? "read" : "write", |
906 | 920 | preq.sector_number, |
907 | - preq.sector_number + preq.nr_sects, preq.dev); | |
921 | + preq.sector_number + preq.nr_sects, | |
922 | + blkif->vbd.pdevice); | |
908 | 923 | goto fail_response; |
909 | 924 | } |
910 | 925 | |
... | ... | @@ -947,7 +962,7 @@ |
947 | 962 | (bio_add_page(bio, |
948 | 963 | pages[i], |
949 | 964 | seg[i].nsec << 9, |
950 | - seg[i].buf & ~PAGE_MASK) == 0)) { | |
965 | + seg[i].offset) == 0)) { | |
951 | 966 | |
952 | 967 | bio = bio_alloc(GFP_KERNEL, nseg-i); |
953 | 968 | if (unlikely(bio == NULL)) |
954 | 969 | |
... | ... | @@ -977,13 +992,7 @@ |
977 | 992 | bio->bi_end_io = end_block_io_op; |
978 | 993 | } |
979 | 994 | |
980 | - /* | |
981 | - * We set it one so that the last submit_bio does not have to call | |
982 | - * atomic_inc. | |
983 | - */ | |
984 | 995 | atomic_set(&pending_req->pendcnt, nbio); |
985 | - | |
986 | - /* Get a reference count for the disk queue and start sending I/O */ | |
987 | 996 | blk_start_plug(&plug); |
988 | 997 | |
989 | 998 | for (i = 0; i < nbio; i++) |
... | ... | @@ -1011,6 +1020,7 @@ |
1011 | 1020 | fail_put_bio: |
1012 | 1021 | for (i = 0; i < nbio; i++) |
1013 | 1022 | bio_put(biolist[i]); |
1023 | + atomic_set(&pending_req->pendcnt, 1); | |
1014 | 1024 | __end_block_io_op(pending_req, -EINVAL); |
1015 | 1025 | msleep(1); /* back off a bit */ |
1016 | 1026 | return -EIO; |
drivers/block/xen-blkback/common.h
... | ... | @@ -77,11 +77,18 @@ |
77 | 77 | uint64_t nr_sectors; |
78 | 78 | } __attribute__((__packed__)); |
79 | 79 | |
80 | +struct blkif_x86_32_request_other { | |
81 | + uint8_t _pad1; | |
82 | + blkif_vdev_t _pad2; | |
83 | + uint64_t id; /* private guest value, echoed in resp */ | |
84 | +} __attribute__((__packed__)); | |
85 | + | |
80 | 86 | struct blkif_x86_32_request { |
81 | 87 | uint8_t operation; /* BLKIF_OP_??? */ |
82 | 88 | union { |
83 | 89 | struct blkif_x86_32_request_rw rw; |
84 | 90 | struct blkif_x86_32_request_discard discard; |
91 | + struct blkif_x86_32_request_other other; | |
85 | 92 | } u; |
86 | 93 | } __attribute__((__packed__)); |
87 | 94 | |
88 | 95 | |
... | ... | @@ -113,11 +120,19 @@ |
113 | 120 | uint64_t nr_sectors; |
114 | 121 | } __attribute__((__packed__)); |
115 | 122 | |
123 | +struct blkif_x86_64_request_other { | |
124 | + uint8_t _pad1; | |
125 | + blkif_vdev_t _pad2; | |
126 | + uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */ | |
127 | + uint64_t id; /* private guest value, echoed in resp */ | |
128 | +} __attribute__((__packed__)); | |
129 | + | |
116 | 130 | struct blkif_x86_64_request { |
117 | 131 | uint8_t operation; /* BLKIF_OP_??? */ |
118 | 132 | union { |
119 | 133 | struct blkif_x86_64_request_rw rw; |
120 | 134 | struct blkif_x86_64_request_discard discard; |
135 | + struct blkif_x86_64_request_other other; | |
121 | 136 | } u; |
122 | 137 | } __attribute__((__packed__)); |
123 | 138 | |
... | ... | @@ -172,7 +187,6 @@ |
172 | 187 | struct page *page; |
173 | 188 | grant_ref_t gnt; |
174 | 189 | grant_handle_t handle; |
175 | - uint64_t dev_bus_addr; | |
176 | 190 | struct rb_node node; |
177 | 191 | }; |
178 | 192 | |
... | ... | @@ -208,13 +222,13 @@ |
208 | 222 | |
209 | 223 | /* statistics */ |
210 | 224 | unsigned long st_print; |
211 | - int st_rd_req; | |
212 | - int st_wr_req; | |
213 | - int st_oo_req; | |
214 | - int st_f_req; | |
215 | - int st_ds_req; | |
216 | - int st_rd_sect; | |
217 | - int st_wr_sect; | |
225 | + unsigned long long st_rd_req; | |
226 | + unsigned long long st_wr_req; | |
227 | + unsigned long long st_oo_req; | |
228 | + unsigned long long st_f_req; | |
229 | + unsigned long long st_ds_req; | |
230 | + unsigned long long st_rd_sect; | |
231 | + unsigned long long st_wr_sect; | |
218 | 232 | |
219 | 233 | wait_queue_head_t waiting_to_free; |
220 | 234 | }; |
... | ... | @@ -278,6 +292,11 @@ |
278 | 292 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; |
279 | 293 | break; |
280 | 294 | default: |
295 | + /* | |
296 | + * Don't know how to translate this op. Only get the | |
297 | + * ID so failure can be reported to the frontend. | |
298 | + */ | |
299 | + dst->u.other.id = src->u.other.id; | |
281 | 300 | break; |
282 | 301 | } |
283 | 302 | } |
... | ... | @@ -309,6 +328,11 @@ |
309 | 328 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; |
310 | 329 | break; |
311 | 330 | default: |
331 | + /* | |
332 | + * Don't know how to translate this op. Only get the | |
333 | + * ID so failure can be reported to the frontend. | |
334 | + */ | |
335 | + dst->u.other.id = src->u.other.id; | |
312 | 336 | break; |
313 | 337 | } |
314 | 338 | } |
drivers/block/xen-blkback/xenbus.c
... | ... | @@ -230,13 +230,13 @@ |
230 | 230 | } \ |
231 | 231 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) |
232 | 232 | |
233 | -VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req); | |
234 | -VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req); | |
235 | -VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req); | |
236 | -VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req); | |
237 | -VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req); | |
238 | -VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect); | |
239 | -VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect); | |
233 | +VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req); | |
234 | +VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req); | |
235 | +VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req); | |
236 | +VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req); | |
237 | +VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req); | |
238 | +VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect); | |
239 | +VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect); | |
240 | 240 | |
241 | 241 | static struct attribute *xen_vbdstat_attrs[] = { |
242 | 242 | &dev_attr_oo_req.attr, |
drivers/block/xen-blkfront.c
... | ... | @@ -44,7 +44,7 @@ |
44 | 44 | #include <linux/mutex.h> |
45 | 45 | #include <linux/scatterlist.h> |
46 | 46 | #include <linux/bitmap.h> |
47 | -#include <linux/llist.h> | |
47 | +#include <linux/list.h> | |
48 | 48 | |
49 | 49 | #include <xen/xen.h> |
50 | 50 | #include <xen/xenbus.h> |
51 | 51 | |
... | ... | @@ -68,13 +68,12 @@ |
68 | 68 | struct grant { |
69 | 69 | grant_ref_t gref; |
70 | 70 | unsigned long pfn; |
71 | - struct llist_node node; | |
71 | + struct list_head node; | |
72 | 72 | }; |
73 | 73 | |
74 | 74 | struct blk_shadow { |
75 | 75 | struct blkif_request req; |
76 | 76 | struct request *request; |
77 | - unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
78 | 77 | struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
79 | 78 | }; |
80 | 79 | |
... | ... | @@ -105,7 +104,7 @@ |
105 | 104 | struct work_struct work; |
106 | 105 | struct gnttab_free_callback callback; |
107 | 106 | struct blk_shadow shadow[BLK_RING_SIZE]; |
108 | - struct llist_head persistent_gnts; | |
107 | + struct list_head persistent_gnts; | |
109 | 108 | unsigned int persistent_gnts_c; |
110 | 109 | unsigned long shadow_free; |
111 | 110 | unsigned int feature_flush; |
... | ... | @@ -165,6 +164,69 @@ |
165 | 164 | return 0; |
166 | 165 | } |
167 | 166 | |
167 | +static int fill_grant_buffer(struct blkfront_info *info, int num) | |
168 | +{ | |
169 | + struct page *granted_page; | |
170 | + struct grant *gnt_list_entry, *n; | |
171 | + int i = 0; | |
172 | + | |
173 | + while(i < num) { | |
174 | + gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); | |
175 | + if (!gnt_list_entry) | |
176 | + goto out_of_memory; | |
177 | + | |
178 | + granted_page = alloc_page(GFP_NOIO); | |
179 | + if (!granted_page) { | |
180 | + kfree(gnt_list_entry); | |
181 | + goto out_of_memory; | |
182 | + } | |
183 | + | |
184 | + gnt_list_entry->pfn = page_to_pfn(granted_page); | |
185 | + gnt_list_entry->gref = GRANT_INVALID_REF; | |
186 | + list_add(&gnt_list_entry->node, &info->persistent_gnts); | |
187 | + i++; | |
188 | + } | |
189 | + | |
190 | + return 0; | |
191 | + | |
192 | +out_of_memory: | |
193 | + list_for_each_entry_safe(gnt_list_entry, n, | |
194 | + &info->persistent_gnts, node) { | |
195 | + list_del(&gnt_list_entry->node); | |
196 | + __free_page(pfn_to_page(gnt_list_entry->pfn)); | |
197 | + kfree(gnt_list_entry); | |
198 | + i--; | |
199 | + } | |
200 | + BUG_ON(i != 0); | |
201 | + return -ENOMEM; | |
202 | +} | |
203 | + | |
204 | +static struct grant *get_grant(grant_ref_t *gref_head, | |
205 | + struct blkfront_info *info) | |
206 | +{ | |
207 | + struct grant *gnt_list_entry; | |
208 | + unsigned long buffer_mfn; | |
209 | + | |
210 | + BUG_ON(list_empty(&info->persistent_gnts)); | |
211 | + gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant, | |
212 | + node); | |
213 | + list_del(&gnt_list_entry->node); | |
214 | + | |
215 | + if (gnt_list_entry->gref != GRANT_INVALID_REF) { | |
216 | + info->persistent_gnts_c--; | |
217 | + return gnt_list_entry; | |
218 | + } | |
219 | + | |
220 | + /* Assign a gref to this page */ | |
221 | + gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); | |
222 | + BUG_ON(gnt_list_entry->gref == -ENOSPC); | |
223 | + buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); | |
224 | + gnttab_grant_foreign_access_ref(gnt_list_entry->gref, | |
225 | + info->xbdev->otherend_id, | |
226 | + buffer_mfn, 0); | |
227 | + return gnt_list_entry; | |
228 | +} | |
229 | + | |
168 | 230 | static const char *op_name(int op) |
169 | 231 | { |
170 | 232 | static const char *const names[] = { |
... | ... | @@ -293,7 +355,6 @@ |
293 | 355 | static int blkif_queue_request(struct request *req) |
294 | 356 | { |
295 | 357 | struct blkfront_info *info = req->rq_disk->private_data; |
296 | - unsigned long buffer_mfn; | |
297 | 358 | struct blkif_request *ring_req; |
298 | 359 | unsigned long id; |
299 | 360 | unsigned int fsect, lsect; |
... | ... | @@ -306,7 +367,6 @@ |
306 | 367 | */ |
307 | 368 | bool new_persistent_gnts; |
308 | 369 | grant_ref_t gref_head; |
309 | - struct page *granted_page; | |
310 | 370 | struct grant *gnt_list_entry = NULL; |
311 | 371 | struct scatterlist *sg; |
312 | 372 | |
313 | 373 | |
... | ... | @@ -370,42 +430,9 @@ |
370 | 430 | fsect = sg->offset >> 9; |
371 | 431 | lsect = fsect + (sg->length >> 9) - 1; |
372 | 432 | |
373 | - if (info->persistent_gnts_c) { | |
374 | - BUG_ON(llist_empty(&info->persistent_gnts)); | |
375 | - gnt_list_entry = llist_entry( | |
376 | - llist_del_first(&info->persistent_gnts), | |
377 | - struct grant, node); | |
433 | + gnt_list_entry = get_grant(&gref_head, info); | |
434 | + ref = gnt_list_entry->gref; | |
378 | 435 | |
379 | - ref = gnt_list_entry->gref; | |
380 | - buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); | |
381 | - info->persistent_gnts_c--; | |
382 | - } else { | |
383 | - ref = gnttab_claim_grant_reference(&gref_head); | |
384 | - BUG_ON(ref == -ENOSPC); | |
385 | - | |
386 | - gnt_list_entry = | |
387 | - kmalloc(sizeof(struct grant), | |
388 | - GFP_ATOMIC); | |
389 | - if (!gnt_list_entry) | |
390 | - return -ENOMEM; | |
391 | - | |
392 | - granted_page = alloc_page(GFP_ATOMIC); | |
393 | - if (!granted_page) { | |
394 | - kfree(gnt_list_entry); | |
395 | - return -ENOMEM; | |
396 | - } | |
397 | - | |
398 | - gnt_list_entry->pfn = | |
399 | - page_to_pfn(granted_page); | |
400 | - gnt_list_entry->gref = ref; | |
401 | - | |
402 | - buffer_mfn = pfn_to_mfn(page_to_pfn( | |
403 | - granted_page)); | |
404 | - gnttab_grant_foreign_access_ref(ref, | |
405 | - info->xbdev->otherend_id, | |
406 | - buffer_mfn, 0); | |
407 | - } | |
408 | - | |
409 | 436 | info->shadow[id].grants_used[i] = gnt_list_entry; |
410 | 437 | |
411 | 438 | if (rq_data_dir(req)) { |
... | ... | @@ -435,7 +462,6 @@ |
435 | 462 | kunmap_atomic(shared_data); |
436 | 463 | } |
437 | 464 | |
438 | - info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); | |
439 | 465 | ring_req->u.rw.seg[i] = |
440 | 466 | (struct blkif_request_segment) { |
441 | 467 | .gref = ref, |
... | ... | @@ -790,9 +816,8 @@ |
790 | 816 | |
791 | 817 | static void blkif_free(struct blkfront_info *info, int suspend) |
792 | 818 | { |
793 | - struct llist_node *all_gnts; | |
794 | - struct grant *persistent_gnt, *tmp; | |
795 | - struct llist_node *n; | |
819 | + struct grant *persistent_gnt; | |
820 | + struct grant *n; | |
796 | 821 | |
797 | 822 | /* Prevent new requests being issued until we fix things up. */ |
798 | 823 | spin_lock_irq(&info->io_lock); |
799 | 824 | |
800 | 825 | |
801 | 826 | |
... | ... | @@ -803,22 +828,20 @@ |
803 | 828 | blk_stop_queue(info->rq); |
804 | 829 | |
805 | 830 | /* Remove all persistent grants */ |
806 | - if (info->persistent_gnts_c) { | |
807 | - all_gnts = llist_del_all(&info->persistent_gnts); | |
808 | - persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node); | |
809 | - while (persistent_gnt) { | |
810 | - gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); | |
831 | + if (!list_empty(&info->persistent_gnts)) { | |
832 | + list_for_each_entry_safe(persistent_gnt, n, | |
833 | + &info->persistent_gnts, node) { | |
834 | + list_del(&persistent_gnt->node); | |
835 | + if (persistent_gnt->gref != GRANT_INVALID_REF) { | |
836 | + gnttab_end_foreign_access(persistent_gnt->gref, | |
837 | + 0, 0UL); | |
838 | + info->persistent_gnts_c--; | |
839 | + } | |
811 | 840 | __free_page(pfn_to_page(persistent_gnt->pfn)); |
812 | - tmp = persistent_gnt; | |
813 | - n = persistent_gnt->node.next; | |
814 | - if (n) | |
815 | - persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node); | |
816 | - else | |
817 | - persistent_gnt = NULL; | |
818 | - kfree(tmp); | |
841 | + kfree(persistent_gnt); | |
819 | 842 | } |
820 | - info->persistent_gnts_c = 0; | |
821 | 843 | } |
844 | + BUG_ON(info->persistent_gnts_c != 0); | |
822 | 845 | |
823 | 846 | /* No more gnttab callback work. */ |
824 | 847 | gnttab_cancel_free_callback(&info->callback); |
... | ... | @@ -875,7 +898,7 @@ |
875 | 898 | } |
876 | 899 | /* Add the persistent grant into the list of free grants */ |
877 | 900 | for (i = 0; i < s->req.u.rw.nr_segments; i++) { |
878 | - llist_add(&s->grants_used[i]->node, &info->persistent_gnts); | |
901 | + list_add(&s->grants_used[i]->node, &info->persistent_gnts); | |
879 | 902 | info->persistent_gnts_c++; |
880 | 903 | } |
881 | 904 | } |
... | ... | @@ -1013,6 +1036,12 @@ |
1013 | 1036 | |
1014 | 1037 | sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
1015 | 1038 | |
1039 | + /* Allocate memory for grants */ | |
1040 | + err = fill_grant_buffer(info, BLK_RING_SIZE * | |
1041 | + BLKIF_MAX_SEGMENTS_PER_REQUEST); | |
1042 | + if (err) | |
1043 | + goto fail; | |
1044 | + | |
1016 | 1045 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); |
1017 | 1046 | if (err < 0) { |
1018 | 1047 | free_page((unsigned long)sring); |
... | ... | @@ -1171,7 +1200,7 @@ |
1171 | 1200 | spin_lock_init(&info->io_lock); |
1172 | 1201 | info->xbdev = dev; |
1173 | 1202 | info->vdevice = vdevice; |
1174 | - init_llist_head(&info->persistent_gnts); | |
1203 | + INIT_LIST_HEAD(&info->persistent_gnts); | |
1175 | 1204 | info->persistent_gnts_c = 0; |
1176 | 1205 | info->connected = BLKIF_STATE_DISCONNECTED; |
1177 | 1206 | INIT_WORK(&info->work, blkif_restart_queue); |
1178 | 1207 | |
... | ... | @@ -1203,11 +1232,10 @@ |
1203 | 1232 | int j; |
1204 | 1233 | |
1205 | 1234 | /* Stage 1: Make a safe copy of the shadow state. */ |
1206 | - copy = kmalloc(sizeof(info->shadow), | |
1235 | + copy = kmemdup(info->shadow, sizeof(info->shadow), | |
1207 | 1236 | GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); |
1208 | 1237 | if (!copy) |
1209 | 1238 | return -ENOMEM; |
1210 | - memcpy(copy, info->shadow, sizeof(info->shadow)); | |
1211 | 1239 | |
1212 | 1240 | /* Stage 2: Set up free list. */ |
1213 | 1241 | memset(&info->shadow, 0, sizeof(info->shadow)); |
... | ... | @@ -1236,7 +1264,7 @@ |
1236 | 1264 | gnttab_grant_foreign_access_ref( |
1237 | 1265 | req->u.rw.seg[j].gref, |
1238 | 1266 | info->xbdev->otherend_id, |
1239 | - pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), | |
1267 | + pfn_to_mfn(copy[i].grants_used[j]->pfn), | |
1240 | 1268 | 0); |
1241 | 1269 | } |
1242 | 1270 | info->shadow[req->u.rw.id].req = *req; |
include/xen/interface/io/blkif.h
... | ... | @@ -138,11 +138,21 @@ |
138 | 138 | uint8_t _pad3; |
139 | 139 | } __attribute__((__packed__)); |
140 | 140 | |
141 | +struct blkif_request_other { | |
142 | + uint8_t _pad1; | |
143 | + blkif_vdev_t _pad2; /* only for read/write requests */ | |
144 | +#ifdef CONFIG_X86_64 | |
145 | + uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ | |
146 | +#endif | |
147 | + uint64_t id; /* private guest value, echoed in resp */ | |
148 | +} __attribute__((__packed__)); | |
149 | + | |
141 | 150 | struct blkif_request { |
142 | 151 | uint8_t operation; /* BLKIF_OP_??? */ |
143 | 152 | union { |
144 | 153 | struct blkif_request_rw rw; |
145 | 154 | struct blkif_request_discard discard; |
155 | + struct blkif_request_other other; | |
146 | 156 | } u; |
147 | 157 | } __attribute__((__packed__)); |
148 | 158 |