Blame view
block/t10-pi.c
7.67 KB
8c16567d8
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
2341c2f8c
|
2 3 4 |
/* * t10_pi.c - Functions for generating and verifying T10 Protection * Information. |
2341c2f8c
|
5 6 7 8 9 |
*/ #include <linux/t10-pi.h> #include <linux/blkdev.h> #include <linux/crc-t10dif.h> |
a754bd5f1
|
10 |
#include <linux/module.h> |
2341c2f8c
|
11 12 13 |
#include <net/checksum.h> typedef __be16 (csum_fn) (void *, unsigned int); |
2341c2f8c
|
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
static __be16 t10_pi_crc_fn(void *data, unsigned int len) { return cpu_to_be16(crc_t10dif(data, len)); } static __be16 t10_pi_ip_fn(void *data, unsigned int len) { return (__force __be16)ip_compute_csum(data, len); } /* * Type 1 and Type 2 protection use the same format: 16 bit guard tag, * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref * tag. */ |
4e4cbee93
|
29 |
static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter, |
5eaed68dd
|
30 |
csum_fn *fn, enum t10_dif_type type) |
2341c2f8c
|
31 32 33 34 35 36 37 38 |
{ unsigned int i; for (i = 0 ; i < iter->data_size ; i += iter->interval) { struct t10_pi_tuple *pi = iter->prot_buf; pi->guard_tag = fn(iter->data_buf, iter->interval); pi->app_tag = 0; |
5eaed68dd
|
39 |
if (type == T10_PI_TYPE1_PROTECTION) |
2341c2f8c
|
40 41 42 43 44 45 46 47 |
pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed)); else pi->ref_tag = 0; iter->data_buf += iter->interval; iter->prot_buf += sizeof(struct t10_pi_tuple); iter->seed++; } |
4e4cbee93
|
48 |
return BLK_STS_OK; |
2341c2f8c
|
49 |
} |
4e4cbee93
|
50 |
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, |
5eaed68dd
|
51 |
csum_fn *fn, enum t10_dif_type type) |
2341c2f8c
|
52 53 |
{ unsigned int i; |
be21683e4
|
54 |
BUG_ON(type == T10_PI_TYPE0_PROTECTION); |
2341c2f8c
|
55 56 57 |
for (i = 0 ; i < iter->data_size ; i += iter->interval) { struct t10_pi_tuple *pi = iter->prot_buf; __be16 csum; |
be21683e4
|
58 59 |
if (type == T10_PI_TYPE1_PROTECTION || type == T10_PI_TYPE2_PROTECTION) { |
128b6f9fd
|
60 |
if (pi->app_tag == T10_PI_APP_ESCAPE) |
2341c2f8c
|
61 62 63 64 65 66 67 68 69 |
goto next; if (be32_to_cpu(pi->ref_tag) != lower_32_bits(iter->seed)) { pr_err("%s: ref tag error at location %llu " \ "(rcvd %u) ", iter->disk_name, (unsigned long long) iter->seed, be32_to_cpu(pi->ref_tag)); |
a462b9508
|
70 |
return BLK_STS_PROTECTION; |
2341c2f8c
|
71 |
} |
be21683e4
|
72 |
} else if (type == T10_PI_TYPE3_PROTECTION) { |
128b6f9fd
|
73 74 |
if (pi->app_tag == T10_PI_APP_ESCAPE && pi->ref_tag == T10_PI_REF_ESCAPE) |
2341c2f8c
|
75 |
goto next; |
2341c2f8c
|
76 77 78 79 80 81 82 83 84 85 |
} csum = fn(iter->data_buf, iter->interval); if (pi->guard_tag != csum) { pr_err("%s: guard tag error at sector %llu " \ "(rcvd %04x, want %04x) ", iter->disk_name, (unsigned long long)iter->seed, be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); |
4e4cbee93
|
86 |
return BLK_STS_PROTECTION; |
2341c2f8c
|
87 88 89 90 91 92 93 |
} next: iter->data_buf += iter->interval; iter->prot_buf += sizeof(struct t10_pi_tuple); iter->seed++; } |
4e4cbee93
|
94 |
return BLK_STS_OK; |
2341c2f8c
|
95 |
} |
4e4cbee93
|
96 |
static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) |
2341c2f8c
|
97 |
{ |
5eaed68dd
|
98 |
return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION); |
2341c2f8c
|
99 |
} |
4e4cbee93
|
100 |
static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) |
2341c2f8c
|
101 |
{ |
5eaed68dd
|
102 |
return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION); |
2341c2f8c
|
103 |
} |
4e4cbee93
|
104 |
static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) |
2341c2f8c
|
105 |
{ |
5eaed68dd
|
106 |
return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION); |
2341c2f8c
|
107 |
} |
4e4cbee93
|
108 |
static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) |
2341c2f8c
|
109 |
{ |
5eaed68dd
|
110 |
return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION); |
2341c2f8c
|
111 |
} |
10c41ddd6
|
112 |
/** |
54d4e6ab9
|
113 |
* t10_pi_type1_prepare - prepare PI prior submitting request to device |
10c41ddd6
|
114 |
* @rq: request with PI that should be prepared |
10c41ddd6
|
115 116 117 118 119 120 |
* * For Type 1/Type 2, the virtual start sector is the one that was * originally submitted by the block layer for the ref_tag usage. Due to * partitioning, MD/DM cloning, etc. the actual physical start sector is * likely to be different. Remap protection information to match the * physical LBA. |
10c41ddd6
|
121 |
*/ |
54d4e6ab9
|
122 |
static void t10_pi_type1_prepare(struct request *rq) |
10c41ddd6
|
123 124 125 126 |
{ const int tuple_sz = rq->q->integrity.tuple_size; u32 ref_tag = t10_pi_ref_tag(rq); struct bio *bio; |
10c41ddd6
|
127 128 129 130 131 132 133 134 135 136 137 |
__rq_for_each_bio(bio, rq) { struct bio_integrity_payload *bip = bio_integrity(bio); u32 virt = bip_get_seed(bip) & 0xffffffff; struct bio_vec iv; struct bvec_iter iter; /* Already remapped? */ if (bip->bip_flags & BIP_MAPPED_INTEGRITY) break; bip_for_each_vec(iv, bip, iter) { |
10c41ddd6
|
138 |
unsigned int j; |
8aec120a9
|
139 |
void *p; |
10c41ddd6
|
140 |
|
8aec120a9
|
141 |
p = bvec_kmap_local(&iv); |
10c41ddd6
|
142 143 144 145 146 147 148 149 150 |
for (j = 0; j < iv.bv_len; j += tuple_sz) { struct t10_pi_tuple *pi = p; if (be32_to_cpu(pi->ref_tag) == virt) pi->ref_tag = cpu_to_be32(ref_tag); virt++; ref_tag++; p += tuple_sz; } |
8aec120a9
|
151 |
kunmap_local(p); |
10c41ddd6
|
152 153 154 155 156 |
} bip->bip_flags |= BIP_MAPPED_INTEGRITY; } } |
10c41ddd6
|
157 158 |
/** |
54d4e6ab9
|
159 |
* t10_pi_type1_complete - prepare PI prior returning request to the blk layer |
10c41ddd6
|
160 |
* @rq: request with PI that should be prepared |
54d4e6ab9
|
161 |
* @nr_bytes: total bytes to prepare |
10c41ddd6
|
162 163 164 165 166 167 168 |
* * For Type 1/Type 2, the virtual start sector is the one that was * originally submitted by the block layer for the ref_tag usage. Due to * partitioning, MD/DM cloning, etc. the actual physical start sector is * likely to be different. Since the physical start sector was submitted * to the device, we should remap it back to virtual values expected by the * block layer. |
10c41ddd6
|
169 |
*/ |
54d4e6ab9
|
170 |
static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) |
10c41ddd6
|
171 |
{ |
54d4e6ab9
|
172 |
unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp; |
10c41ddd6
|
173 174 175 |
const int tuple_sz = rq->q->integrity.tuple_size; u32 ref_tag = t10_pi_ref_tag(rq); struct bio *bio; |
10c41ddd6
|
176 177 178 179 180 181 182 |
__rq_for_each_bio(bio, rq) { struct bio_integrity_payload *bip = bio_integrity(bio); u32 virt = bip_get_seed(bip) & 0xffffffff; struct bio_vec iv; struct bvec_iter iter; bip_for_each_vec(iv, bip, iter) { |
10c41ddd6
|
183 |
unsigned int j; |
8aec120a9
|
184 |
void *p; |
10c41ddd6
|
185 |
|
8aec120a9
|
186 |
p = bvec_kmap_local(&iv); |
10c41ddd6
|
187 188 189 190 191 192 193 194 195 196 |
for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { struct t10_pi_tuple *pi = p; if (be32_to_cpu(pi->ref_tag) == ref_tag) pi->ref_tag = cpu_to_be32(virt); virt++; ref_tag++; intervals--; p += tuple_sz; } |
8aec120a9
|
197 |
kunmap_local(p); |
10c41ddd6
|
198 199 200 |
} } } |
54d4e6ab9
|
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) { return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION); } static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) { return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION); } static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) { return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION); } static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) { return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION); } |
98e544027
|
221 |
/* Type 3 does not have a reference tag so no remapping is required. */ |
54d4e6ab9
|
222 223 224 |
static void t10_pi_type3_prepare(struct request *rq) { } |
98e544027
|
225 |
/* Type 3 does not have a reference tag so no remapping is required. */ |
54d4e6ab9
|
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 |
static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes) { } const struct blk_integrity_profile t10_pi_type1_crc = { .name = "T10-DIF-TYPE1-CRC", .generate_fn = t10_pi_type1_generate_crc, .verify_fn = t10_pi_type1_verify_crc, .prepare_fn = t10_pi_type1_prepare, .complete_fn = t10_pi_type1_complete, }; EXPORT_SYMBOL(t10_pi_type1_crc); const struct blk_integrity_profile t10_pi_type1_ip = { .name = "T10-DIF-TYPE1-IP", .generate_fn = t10_pi_type1_generate_ip, .verify_fn = t10_pi_type1_verify_ip, .prepare_fn = t10_pi_type1_prepare, .complete_fn = t10_pi_type1_complete, }; EXPORT_SYMBOL(t10_pi_type1_ip); const struct blk_integrity_profile t10_pi_type3_crc = { .name = "T10-DIF-TYPE3-CRC", .generate_fn = t10_pi_type3_generate_crc, .verify_fn = t10_pi_type3_verify_crc, .prepare_fn = t10_pi_type3_prepare, .complete_fn = t10_pi_type3_complete, }; EXPORT_SYMBOL(t10_pi_type3_crc); const struct blk_integrity_profile t10_pi_type3_ip = { .name = "T10-DIF-TYPE3-IP", .generate_fn = t10_pi_type3_generate_ip, .verify_fn = t10_pi_type3_verify_ip, .prepare_fn = t10_pi_type3_prepare, .complete_fn = t10_pi_type3_complete, }; EXPORT_SYMBOL(t10_pi_type3_ip); |
a754bd5f1
|
265 266 |
MODULE_LICENSE("GPL"); |