Blame view

block/t10-pi.c 7.76 KB
8c16567d8   Christoph Hellwig   block: switch all...
1
  // SPDX-License-Identifier: GPL-2.0
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
2
3
4
  /*
   * t10_pi.c - Functions for generating and verifying T10 Protection
   *	      Information.
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
5
6
7
8
9
   */
  
  #include <linux/t10-pi.h>
  #include <linux/blkdev.h>
  #include <linux/crc-t10dif.h>
a754bd5f1   Herbert Xu   block: Allow t10-...
10
  #include <linux/module.h>
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
11
12
13
  #include <net/checksum.h>
  
  typedef __be16 (csum_fn) (void *, unsigned int);
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
  static __be16 t10_pi_crc_fn(void *data, unsigned int len)
  {
  	return cpu_to_be16(crc_t10dif(data, len));
  }
  
  static __be16 t10_pi_ip_fn(void *data, unsigned int len)
  {
  	return (__force __be16)ip_compute_csum(data, len);
  }
  
  /*
   * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
   * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
   * tag.
   */
4e4cbee93   Christoph Hellwig   block: switch bio...
29
  static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
5eaed68dd   Max Gurtovoy   block: use symbol...
30
  		csum_fn *fn, enum t10_dif_type type)
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
31
32
33
34
35
36
37
38
  {
  	unsigned int i;
  
  	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  		struct t10_pi_tuple *pi = iter->prot_buf;
  
  		pi->guard_tag = fn(iter->data_buf, iter->interval);
  		pi->app_tag = 0;
5eaed68dd   Max Gurtovoy   block: use symbol...
39
  		if (type == T10_PI_TYPE1_PROTECTION)
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
40
41
42
43
44
45
46
47
  			pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
  		else
  			pi->ref_tag = 0;
  
  		iter->data_buf += iter->interval;
  		iter->prot_buf += sizeof(struct t10_pi_tuple);
  		iter->seed++;
  	}
4e4cbee93   Christoph Hellwig   block: switch bio...
48
  	return BLK_STS_OK;
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
49
  }
4e4cbee93   Christoph Hellwig   block: switch bio...
50
  static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
5eaed68dd   Max Gurtovoy   block: use symbol...
51
  		csum_fn *fn, enum t10_dif_type type)
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
52
53
  {
  	unsigned int i;
be21683e4   Max Gurtovoy   block: t10-pi: fi...
54
  	BUG_ON(type == T10_PI_TYPE0_PROTECTION);
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
55
56
57
  	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  		struct t10_pi_tuple *pi = iter->prot_buf;
  		__be16 csum;
be21683e4   Max Gurtovoy   block: t10-pi: fi...
58
59
  		if (type == T10_PI_TYPE1_PROTECTION ||
  		    type == T10_PI_TYPE2_PROTECTION) {
128b6f9fd   Dmitry Monakhov   t10-pi: Move open...
60
  			if (pi->app_tag == T10_PI_APP_ESCAPE)
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
61
62
63
64
65
66
67
68
69
  				goto next;
  
  			if (be32_to_cpu(pi->ref_tag) !=
  			    lower_32_bits(iter->seed)) {
  				pr_err("%s: ref tag error at location %llu " \
  				       "(rcvd %u)
  ", iter->disk_name,
  				       (unsigned long long)
  				       iter->seed, be32_to_cpu(pi->ref_tag));
a462b9508   Bart Van Assche   block: Dedicated ...
70
  				return BLK_STS_PROTECTION;
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
71
  			}
be21683e4   Max Gurtovoy   block: t10-pi: fi...
72
  		} else if (type == T10_PI_TYPE3_PROTECTION) {
128b6f9fd   Dmitry Monakhov   t10-pi: Move open...
73
74
  			if (pi->app_tag == T10_PI_APP_ESCAPE &&
  			    pi->ref_tag == T10_PI_REF_ESCAPE)
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
75
  				goto next;
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
76
77
78
79
80
81
82
83
84
85
  		}
  
  		csum = fn(iter->data_buf, iter->interval);
  
  		if (pi->guard_tag != csum) {
  			pr_err("%s: guard tag error at sector %llu " \
  			       "(rcvd %04x, want %04x)
  ", iter->disk_name,
  			       (unsigned long long)iter->seed,
  			       be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
4e4cbee93   Christoph Hellwig   block: switch bio...
86
  			return BLK_STS_PROTECTION;
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
87
88
89
90
91
92
93
  		}
  
  next:
  		iter->data_buf += iter->interval;
  		iter->prot_buf += sizeof(struct t10_pi_tuple);
  		iter->seed++;
  	}
4e4cbee93   Christoph Hellwig   block: switch bio...
94
  	return BLK_STS_OK;
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
95
  }
4e4cbee93   Christoph Hellwig   block: switch bio...
96
  static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
97
  {
5eaed68dd   Max Gurtovoy   block: use symbol...
98
  	return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
99
  }
4e4cbee93   Christoph Hellwig   block: switch bio...
100
  static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
101
  {
5eaed68dd   Max Gurtovoy   block: use symbol...
102
  	return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
103
  }
4e4cbee93   Christoph Hellwig   block: switch bio...
104
  static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
105
  {
5eaed68dd   Max Gurtovoy   block: use symbol...
106
  	return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
107
  }
4e4cbee93   Christoph Hellwig   block: switch bio...
108
  static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
109
  {
5eaed68dd   Max Gurtovoy   block: use symbol...
110
  	return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
2341c2f8c   Martin K. Petersen   block: Add T10 Pr...
111
  }
10c41ddd6   Max Gurtovoy   block: move dif_p...
112
  /**
54d4e6ab9   Max Gurtovoy   block: centralize...
113
   * t10_pi_type1_prepare - prepare PI prior submitting request to device
10c41ddd6   Max Gurtovoy   block: move dif_p...
114
   * @rq:              request with PI that should be prepared
10c41ddd6   Max Gurtovoy   block: move dif_p...
115
116
117
118
119
120
   *
   * For Type 1/Type 2, the virtual start sector is the one that was
   * originally submitted by the block layer for the ref_tag usage. Due to
   * partitioning, MD/DM cloning, etc. the actual physical start sector is
   * likely to be different. Remap protection information to match the
   * physical LBA.
10c41ddd6   Max Gurtovoy   block: move dif_p...
121
   */
54d4e6ab9   Max Gurtovoy   block: centralize...
122
  static void t10_pi_type1_prepare(struct request *rq)
10c41ddd6   Max Gurtovoy   block: move dif_p...
123
124
125
126
  {
  	const int tuple_sz = rq->q->integrity.tuple_size;
  	u32 ref_tag = t10_pi_ref_tag(rq);
  	struct bio *bio;
10c41ddd6   Max Gurtovoy   block: move dif_p...
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
  	__rq_for_each_bio(bio, rq) {
  		struct bio_integrity_payload *bip = bio_integrity(bio);
  		u32 virt = bip_get_seed(bip) & 0xffffffff;
  		struct bio_vec iv;
  		struct bvec_iter iter;
  
  		/* Already remapped? */
  		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
  			break;
  
  		bip_for_each_vec(iv, bip, iter) {
  			void *p, *pmap;
  			unsigned int j;
  
  			pmap = kmap_atomic(iv.bv_page);
  			p = pmap + iv.bv_offset;
  			for (j = 0; j < iv.bv_len; j += tuple_sz) {
  				struct t10_pi_tuple *pi = p;
  
  				if (be32_to_cpu(pi->ref_tag) == virt)
  					pi->ref_tag = cpu_to_be32(ref_tag);
  				virt++;
  				ref_tag++;
  				p += tuple_sz;
  			}
  
  			kunmap_atomic(pmap);
  		}
  
  		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
  	}
  }
10c41ddd6   Max Gurtovoy   block: move dif_p...
159
160
  
  /**
54d4e6ab9   Max Gurtovoy   block: centralize...
161
   * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
10c41ddd6   Max Gurtovoy   block: move dif_p...
162
   * @rq:              request with PI that should be prepared
54d4e6ab9   Max Gurtovoy   block: centralize...
163
   * @nr_bytes:        total bytes to prepare
10c41ddd6   Max Gurtovoy   block: move dif_p...
164
165
166
167
168
169
170
   *
   * For Type 1/Type 2, the virtual start sector is the one that was
   * originally submitted by the block layer for the ref_tag usage. Due to
   * partitioning, MD/DM cloning, etc. the actual physical start sector is
   * likely to be different. Since the physical start sector was submitted
   * to the device, we should remap it back to virtual values expected by the
   * block layer.
10c41ddd6   Max Gurtovoy   block: move dif_p...
171
   */
54d4e6ab9   Max Gurtovoy   block: centralize...
172
  static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
10c41ddd6   Max Gurtovoy   block: move dif_p...
173
  {
54d4e6ab9   Max Gurtovoy   block: centralize...
174
  	unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
10c41ddd6   Max Gurtovoy   block: move dif_p...
175
176
177
  	const int tuple_sz = rq->q->integrity.tuple_size;
  	u32 ref_tag = t10_pi_ref_tag(rq);
  	struct bio *bio;
10c41ddd6   Max Gurtovoy   block: move dif_p...
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
  	__rq_for_each_bio(bio, rq) {
  		struct bio_integrity_payload *bip = bio_integrity(bio);
  		u32 virt = bip_get_seed(bip) & 0xffffffff;
  		struct bio_vec iv;
  		struct bvec_iter iter;
  
  		bip_for_each_vec(iv, bip, iter) {
  			void *p, *pmap;
  			unsigned int j;
  
  			pmap = kmap_atomic(iv.bv_page);
  			p = pmap + iv.bv_offset;
  			for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
  				struct t10_pi_tuple *pi = p;
  
  				if (be32_to_cpu(pi->ref_tag) == ref_tag)
  					pi->ref_tag = cpu_to_be32(virt);
  				virt++;
  				ref_tag++;
  				intervals--;
  				p += tuple_sz;
  			}
  
  			kunmap_atomic(pmap);
  		}
  	}
  }
54d4e6ab9   Max Gurtovoy   block: centralize...
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
  
  static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
  {
  	return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
  }
  
  static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
  {
  	return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
  }
  
  static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
  {
  	return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
  }
  
  static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
  {
  	return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
  }
98e544027   Bart Van Assche   block: Fix three ...
225
  /* Type 3 does not have a reference tag so no remapping is required. */
54d4e6ab9   Max Gurtovoy   block: centralize...
226
227
228
  static void t10_pi_type3_prepare(struct request *rq)
  {
  }
98e544027   Bart Van Assche   block: Fix three ...
229
  /* Type 3 does not have a reference tag so no remapping is required. */
54d4e6ab9   Max Gurtovoy   block: centralize...
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
  static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
  {
  }
  
  const struct blk_integrity_profile t10_pi_type1_crc = {
  	.name			= "T10-DIF-TYPE1-CRC",
  	.generate_fn		= t10_pi_type1_generate_crc,
  	.verify_fn		= t10_pi_type1_verify_crc,
  	.prepare_fn		= t10_pi_type1_prepare,
  	.complete_fn		= t10_pi_type1_complete,
  };
  EXPORT_SYMBOL(t10_pi_type1_crc);
  
  const struct blk_integrity_profile t10_pi_type1_ip = {
  	.name			= "T10-DIF-TYPE1-IP",
  	.generate_fn		= t10_pi_type1_generate_ip,
  	.verify_fn		= t10_pi_type1_verify_ip,
  	.prepare_fn		= t10_pi_type1_prepare,
  	.complete_fn		= t10_pi_type1_complete,
  };
  EXPORT_SYMBOL(t10_pi_type1_ip);
  
  const struct blk_integrity_profile t10_pi_type3_crc = {
  	.name			= "T10-DIF-TYPE3-CRC",
  	.generate_fn		= t10_pi_type3_generate_crc,
  	.verify_fn		= t10_pi_type3_verify_crc,
  	.prepare_fn		= t10_pi_type3_prepare,
  	.complete_fn		= t10_pi_type3_complete,
  };
  EXPORT_SYMBOL(t10_pi_type3_crc);
  
  const struct blk_integrity_profile t10_pi_type3_ip = {
  	.name			= "T10-DIF-TYPE3-IP",
  	.generate_fn		= t10_pi_type3_generate_ip,
  	.verify_fn		= t10_pi_type3_verify_ip,
  	.prepare_fn		= t10_pi_type3_prepare,
  	.complete_fn		= t10_pi_type3_complete,
  };
  EXPORT_SYMBOL(t10_pi_type3_ip);
a754bd5f1   Herbert Xu   block: Allow t10-...
269
270
  
  MODULE_LICENSE("GPL");