Commit f6ff53d3611b564661896be23369b54d84941a0e
Committed by
Jens Axboe
1 parent
1a9b4993b7
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
block: reorganize rounding of max_discard_sectors
Mostly a preparation for the next patch. In principle this fixes an infinite loop if max_discard_sectors < granularity, but that really shouldn't happen. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Acked-by: Vivek Goyal <vgoyal@redhat.com> Tested-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Showing 1 changed file with 5 additions and 4 deletions Inline Diff
block/blk-lib.c
1 | /* | 1 | /* |
2 | * Functions related to generic helpers functions | 2 | * Functions related to generic helpers functions |
3 | */ | 3 | */ |
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/bio.h> | 6 | #include <linux/bio.h> |
7 | #include <linux/blkdev.h> | 7 | #include <linux/blkdev.h> |
8 | #include <linux/scatterlist.h> | 8 | #include <linux/scatterlist.h> |
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | struct bio_batch { | 12 | struct bio_batch { |
13 | atomic_t done; | 13 | atomic_t done; |
14 | unsigned long flags; | 14 | unsigned long flags; |
15 | struct completion *wait; | 15 | struct completion *wait; |
16 | }; | 16 | }; |
17 | 17 | ||
18 | static void bio_batch_end_io(struct bio *bio, int err) | 18 | static void bio_batch_end_io(struct bio *bio, int err) |
19 | { | 19 | { |
20 | struct bio_batch *bb = bio->bi_private; | 20 | struct bio_batch *bb = bio->bi_private; |
21 | 21 | ||
22 | if (err && (err != -EOPNOTSUPP)) | 22 | if (err && (err != -EOPNOTSUPP)) |
23 | clear_bit(BIO_UPTODATE, &bb->flags); | 23 | clear_bit(BIO_UPTODATE, &bb->flags); |
24 | if (atomic_dec_and_test(&bb->done)) | 24 | if (atomic_dec_and_test(&bb->done)) |
25 | complete(bb->wait); | 25 | complete(bb->wait); |
26 | bio_put(bio); | 26 | bio_put(bio); |
27 | } | 27 | } |
28 | 28 | ||
29 | /** | 29 | /** |
30 | * blkdev_issue_discard - queue a discard | 30 | * blkdev_issue_discard - queue a discard |
31 | * @bdev: blockdev to issue discard for | 31 | * @bdev: blockdev to issue discard for |
32 | * @sector: start sector | 32 | * @sector: start sector |
33 | * @nr_sects: number of sectors to discard | 33 | * @nr_sects: number of sectors to discard |
34 | * @gfp_mask: memory allocation flags (for bio_alloc) | 34 | * @gfp_mask: memory allocation flags (for bio_alloc) |
35 | * @flags: BLKDEV_IFL_* flags to control behaviour | 35 | * @flags: BLKDEV_IFL_* flags to control behaviour |
36 | * | 36 | * |
37 | * Description: | 37 | * Description: |
38 | * Issue a discard request for the sectors in question. | 38 | * Issue a discard request for the sectors in question. |
39 | */ | 39 | */ |
40 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 40 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
41 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | 41 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) |
42 | { | 42 | { |
43 | DECLARE_COMPLETION_ONSTACK(wait); | 43 | DECLARE_COMPLETION_ONSTACK(wait); |
44 | struct request_queue *q = bdev_get_queue(bdev); | 44 | struct request_queue *q = bdev_get_queue(bdev); |
45 | int type = REQ_WRITE | REQ_DISCARD; | 45 | int type = REQ_WRITE | REQ_DISCARD; |
46 | unsigned int max_discard_sectors; | 46 | unsigned int max_discard_sectors; |
47 | unsigned int granularity; | ||
47 | struct bio_batch bb; | 48 | struct bio_batch bb; |
48 | struct bio *bio; | 49 | struct bio *bio; |
49 | int ret = 0; | 50 | int ret = 0; |
50 | 51 | ||
51 | if (!q) | 52 | if (!q) |
52 | return -ENXIO; | 53 | return -ENXIO; |
53 | 54 | ||
54 | if (!blk_queue_discard(q)) | 55 | if (!blk_queue_discard(q)) |
55 | return -EOPNOTSUPP; | 56 | return -EOPNOTSUPP; |
56 | 57 | ||
58 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | ||
59 | granularity = max(q->limits.discard_granularity >> 9, 1U); | ||
60 | |||
57 | /* | 61 | /* |
58 | * Ensure that max_discard_sectors is of the proper | 62 | * Ensure that max_discard_sectors is of the proper |
59 | * granularity | 63 | * granularity |
60 | */ | 64 | */ |
61 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | 65 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
66 | max_discard_sectors = round_down(max_discard_sectors, granularity); | ||
62 | if (unlikely(!max_discard_sectors)) { | 67 | if (unlikely(!max_discard_sectors)) { |
63 | /* Avoid infinite loop below. Being cautious never hurts. */ | 68 | /* Avoid infinite loop below. Being cautious never hurts. */ |
64 | return -EOPNOTSUPP; | 69 | return -EOPNOTSUPP; |
65 | } else if (q->limits.discard_granularity) { | ||
66 | unsigned int disc_sects = q->limits.discard_granularity >> 9; | ||
67 | |||
68 | max_discard_sectors &= ~(disc_sects - 1); | ||
69 | } | 70 | } |
70 | 71 | ||
71 | if (flags & BLKDEV_DISCARD_SECURE) { | 72 | if (flags & BLKDEV_DISCARD_SECURE) { |
72 | if (!blk_queue_secdiscard(q)) | 73 | if (!blk_queue_secdiscard(q)) |
73 | return -EOPNOTSUPP; | 74 | return -EOPNOTSUPP; |
74 | type |= REQ_SECURE; | 75 | type |= REQ_SECURE; |
75 | } | 76 | } |
76 | 77 | ||
77 | atomic_set(&bb.done, 1); | 78 | atomic_set(&bb.done, 1); |
78 | bb.flags = 1 << BIO_UPTODATE; | 79 | bb.flags = 1 << BIO_UPTODATE; |
79 | bb.wait = &wait; | 80 | bb.wait = &wait; |
80 | 81 | ||
81 | while (nr_sects) { | 82 | while (nr_sects) { |
82 | bio = bio_alloc(gfp_mask, 1); | 83 | bio = bio_alloc(gfp_mask, 1); |
83 | if (!bio) { | 84 | if (!bio) { |
84 | ret = -ENOMEM; | 85 | ret = -ENOMEM; |
85 | break; | 86 | break; |
86 | } | 87 | } |
87 | 88 | ||
88 | bio->bi_sector = sector; | 89 | bio->bi_sector = sector; |
89 | bio->bi_end_io = bio_batch_end_io; | 90 | bio->bi_end_io = bio_batch_end_io; |
90 | bio->bi_bdev = bdev; | 91 | bio->bi_bdev = bdev; |
91 | bio->bi_private = &bb; | 92 | bio->bi_private = &bb; |
92 | 93 | ||
93 | if (nr_sects > max_discard_sectors) { | 94 | if (nr_sects > max_discard_sectors) { |
94 | bio->bi_size = max_discard_sectors << 9; | 95 | bio->bi_size = max_discard_sectors << 9; |
95 | nr_sects -= max_discard_sectors; | 96 | nr_sects -= max_discard_sectors; |
96 | sector += max_discard_sectors; | 97 | sector += max_discard_sectors; |
97 | } else { | 98 | } else { |
98 | bio->bi_size = nr_sects << 9; | 99 | bio->bi_size = nr_sects << 9; |
99 | nr_sects = 0; | 100 | nr_sects = 0; |
100 | } | 101 | } |
101 | 102 | ||
102 | atomic_inc(&bb.done); | 103 | atomic_inc(&bb.done); |
103 | submit_bio(type, bio); | 104 | submit_bio(type, bio); |
104 | } | 105 | } |
105 | 106 | ||
106 | /* Wait for bios in-flight */ | 107 | /* Wait for bios in-flight */ |
107 | if (!atomic_dec_and_test(&bb.done)) | 108 | if (!atomic_dec_and_test(&bb.done)) |
108 | wait_for_completion(&wait); | 109 | wait_for_completion(&wait); |
109 | 110 | ||
110 | if (!test_bit(BIO_UPTODATE, &bb.flags)) | 111 | if (!test_bit(BIO_UPTODATE, &bb.flags)) |
111 | ret = -EIO; | 112 | ret = -EIO; |
112 | 113 | ||
113 | return ret; | 114 | return ret; |
114 | } | 115 | } |
115 | EXPORT_SYMBOL(blkdev_issue_discard); | 116 | EXPORT_SYMBOL(blkdev_issue_discard); |
116 | 117 | ||
117 | /** | 118 | /** |
118 | * blkdev_issue_zeroout - generate number of zero filed write bios | 119 | * blkdev_issue_zeroout - generate number of zero filed write bios |
119 | * @bdev: blockdev to issue | 120 | * @bdev: blockdev to issue |
120 | * @sector: start sector | 121 | * @sector: start sector |
121 | * @nr_sects: number of sectors to write | 122 | * @nr_sects: number of sectors to write |
122 | * @gfp_mask: memory allocation flags (for bio_alloc) | 123 | * @gfp_mask: memory allocation flags (for bio_alloc) |
123 | * | 124 | * |
124 | * Description: | 125 | * Description: |
125 | * Generate and issue number of bios with zerofiled pages. | 126 | * Generate and issue number of bios with zerofiled pages. |
126 | */ | 127 | */ |
127 | 128 | ||
128 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 129 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
129 | sector_t nr_sects, gfp_t gfp_mask) | 130 | sector_t nr_sects, gfp_t gfp_mask) |
130 | { | 131 | { |
131 | int ret; | 132 | int ret; |
132 | struct bio *bio; | 133 | struct bio *bio; |
133 | struct bio_batch bb; | 134 | struct bio_batch bb; |
134 | unsigned int sz; | 135 | unsigned int sz; |
135 | DECLARE_COMPLETION_ONSTACK(wait); | 136 | DECLARE_COMPLETION_ONSTACK(wait); |
136 | 137 | ||
137 | atomic_set(&bb.done, 1); | 138 | atomic_set(&bb.done, 1); |
138 | bb.flags = 1 << BIO_UPTODATE; | 139 | bb.flags = 1 << BIO_UPTODATE; |
139 | bb.wait = &wait; | 140 | bb.wait = &wait; |
140 | 141 | ||
141 | ret = 0; | 142 | ret = 0; |
142 | while (nr_sects != 0) { | 143 | while (nr_sects != 0) { |
143 | bio = bio_alloc(gfp_mask, | 144 | bio = bio_alloc(gfp_mask, |
144 | min(nr_sects, (sector_t)BIO_MAX_PAGES)); | 145 | min(nr_sects, (sector_t)BIO_MAX_PAGES)); |
145 | if (!bio) { | 146 | if (!bio) { |
146 | ret = -ENOMEM; | 147 | ret = -ENOMEM; |
147 | break; | 148 | break; |
148 | } | 149 | } |
149 | 150 | ||
150 | bio->bi_sector = sector; | 151 | bio->bi_sector = sector; |
151 | bio->bi_bdev = bdev; | 152 | bio->bi_bdev = bdev; |
152 | bio->bi_end_io = bio_batch_end_io; | 153 | bio->bi_end_io = bio_batch_end_io; |
153 | bio->bi_private = &bb; | 154 | bio->bi_private = &bb; |
154 | 155 | ||
155 | while (nr_sects != 0) { | 156 | while (nr_sects != 0) { |
156 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); | 157 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); |
157 | ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); | 158 | ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); |
158 | nr_sects -= ret >> 9; | 159 | nr_sects -= ret >> 9; |
159 | sector += ret >> 9; | 160 | sector += ret >> 9; |
160 | if (ret < (sz << 9)) | 161 | if (ret < (sz << 9)) |
161 | break; | 162 | break; |
162 | } | 163 | } |
163 | ret = 0; | 164 | ret = 0; |
164 | atomic_inc(&bb.done); | 165 | atomic_inc(&bb.done); |
165 | submit_bio(WRITE, bio); | 166 | submit_bio(WRITE, bio); |
166 | } | 167 | } |
167 | 168 | ||
168 | /* Wait for bios in-flight */ | 169 | /* Wait for bios in-flight */ |
169 | if (!atomic_dec_and_test(&bb.done)) | 170 | if (!atomic_dec_and_test(&bb.done)) |
170 | wait_for_completion(&wait); | 171 | wait_for_completion(&wait); |
171 | 172 | ||
172 | if (!test_bit(BIO_UPTODATE, &bb.flags)) | 173 | if (!test_bit(BIO_UPTODATE, &bb.flags)) |
173 | /* One of bios in the batch was completed with error.*/ | 174 | /* One of bios in the batch was completed with error.*/ |
174 | ret = -EIO; | 175 | ret = -EIO; |
175 | 176 |