Commit 304a204ec9d4b9b31f3491e736bfbba44feaa1b0

Authored by Shane Wang
Committed by Herbert Xu
1 parent 32cbd7dfce

crypto: vmac - Fix big-endian support

This patch is to fix the vmac algorithm, add more test cases for vmac,
and fix the test failure on some big endian system like s390.

Signed-off-by: Shane Wang <shane.wang@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Showing 2 changed files with 96 additions and 43 deletions Side-by-side Diff

... ... @@ -1669,17 +1669,73 @@
1669 1669 }
1670 1670 };
1671 1671  
1672   -#define VMAC_AES_TEST_VECTORS 1
1673   -static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
  1672 +#define VMAC_AES_TEST_VECTORS 8
  1673 +static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
1674 1674 '\x02', '\x03', '\x02', '\x02',
1675 1675 '\x02', '\x04', '\x01', '\x07',
1676 1676 '\x04', '\x01', '\x04', '\x03',};
  1677 +static char vmac_string2[128] = {'a', 'b', 'c',};
  1678 +static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
  1679 + 'a', 'b', 'c', 'a', 'b', 'c',
  1680 + 'a', 'b', 'c', 'a', 'b', 'c',
  1681 + 'a', 'b', 'c', 'a', 'b', 'c',
  1682 + 'a', 'b', 'c', 'a', 'b', 'c',
  1683 + 'a', 'b', 'c', 'a', 'b', 'c',
  1684 + 'a', 'b', 'c', 'a', 'b', 'c',
  1685 + 'a', 'b', 'c', 'a', 'b', 'c',
  1686 + };
  1687 +
1677 1688 static struct hash_testvec aes_vmac128_tv_template[] = {
1678 1689 {
  1690 + .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
  1691 + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
  1692 + .plaintext = NULL,
  1693 + .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
  1694 + .psize = 0,
  1695 + .ksize = 16,
  1696 + }, {
1679 1697 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1680 1698 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1681   - .plaintext = vmac_string,
1682   - .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
  1699 + .plaintext = vmac_string1,
  1700 + .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
  1701 + .psize = 128,
  1702 + .ksize = 16,
  1703 + }, {
  1704 + .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
  1705 + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
  1706 + .plaintext = vmac_string2,
  1707 + .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
  1708 + .psize = 128,
  1709 + .ksize = 16,
  1710 + }, {
  1711 + .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
  1712 + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
  1713 + .plaintext = vmac_string3,
  1714 + .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
  1715 + .psize = 128,
  1716 + .ksize = 16,
  1717 + }, {
  1718 + .key = "abcdefghijklmnop",
  1719 + .plaintext = NULL,
  1720 + .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
  1721 + .psize = 0,
  1722 + .ksize = 16,
  1723 + }, {
  1724 + .key = "abcdefghijklmnop",
  1725 + .plaintext = vmac_string1,
  1726 + .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
  1727 + .psize = 128,
  1728 + .ksize = 16,
  1729 + }, {
  1730 + .key = "abcdefghijklmnop",
  1731 + .plaintext = vmac_string2,
  1732 + .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
  1733 + .psize = 128,
  1734 + .ksize = 16,
  1735 + }, {
  1736 + .key = "abcdefghijklmnop",
  1737 + .plaintext = vmac_string3,
  1738 + .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
1683 1739 .psize = 128,
1684 1740 .ksize = 16,
1685 1741 },
... ... @@ -43,6 +43,8 @@
43 43 const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
44 44 const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
45 45  
  46 +#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
  47 +
46 48 #ifdef __LITTLE_ENDIAN
47 49 #define INDEX_HIGH 1
48 50 #define INDEX_LOW 0
... ... @@ -110,8 +112,8 @@
110 112 int i; u64 th, tl; \
111 113 rh = rl = 0; \
112 114 for (i = 0; i < nw; i += 2) { \
113   - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
114   - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
  115 + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
  116 + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
115 117 ADD128(rh, rl, th, tl); \
116 118 } \
117 119 } while (0)
118 120  
... ... @@ -121,11 +123,11 @@
121 123 int i; u64 th, tl; \
122 124 rh1 = rl1 = rh = rl = 0; \
123 125 for (i = 0; i < nw; i += 2) { \
124   - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
125   - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
  126 + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
  127 + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
126 128 ADD128(rh, rl, th, tl); \
127   - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
128   - le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
  129 + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
  130 + pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
129 131 ADD128(rh1, rl1, th, tl); \
130 132 } \
131 133 } while (0)
132 134  
133 135  
134 136  
... ... @@ -136,17 +138,17 @@
136 138 int i; u64 th, tl; \
137 139 rh = rl = 0; \
138 140 for (i = 0; i < nw; i += 8) { \
139   - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
140   - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
  141 + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
  142 + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
141 143 ADD128(rh, rl, th, tl); \
142   - MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
143   - le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
  144 + MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
  145 + pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
144 146 ADD128(rh, rl, th, tl); \
145   - MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
146   - le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
  147 + MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
  148 + pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
147 149 ADD128(rh, rl, th, tl); \
148   - MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
149   - le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
  150 + MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
  151 + pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
150 152 ADD128(rh, rl, th, tl); \
151 153 } \
152 154 } while (0)
153 155  
154 156  
155 157  
156 158  
157 159  
158 160  
159 161  
... ... @@ -156,29 +158,29 @@
156 158 int i; u64 th, tl; \
157 159 rh1 = rl1 = rh = rl = 0; \
158 160 for (i = 0; i < nw; i += 8) { \
159   - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
160   - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
  161 + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
  162 + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
161 163 ADD128(rh, rl, th, tl); \
162   - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
163   - le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
  164 + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
  165 + pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
164 166 ADD128(rh1, rl1, th, tl); \
165   - MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
166   - le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
  167 + MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
  168 + pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
167 169 ADD128(rh, rl, th, tl); \
168   - MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \
169   - le64_to_cpup((mp)+i+3)+(kp)[i+5]); \
  170 + MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
  171 + pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
170 172 ADD128(rh1, rl1, th, tl); \
171   - MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
172   - le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
  173 + MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
  174 + pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
173 175 ADD128(rh, rl, th, tl); \
174   - MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \
175   - le64_to_cpup((mp)+i+5)+(kp)[i+7]); \
  176 + MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
  177 + pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
176 178 ADD128(rh1, rl1, th, tl); \
177   - MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
178   - le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
  179 + MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
  180 + pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
179 181 ADD128(rh, rl, th, tl); \
180   - MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \
181   - le64_to_cpup((mp)+i+7)+(kp)[i+9]); \
  182 + MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
  183 + pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
182 184 ADD128(rh1, rl1, th, tl); \
183 185 } \
184 186 } while (0)
... ... @@ -216,8 +218,8 @@
216 218 int i; \
217 219 rh = rl = t = 0; \
218 220 for (i = 0; i < nw; i += 2) { \
219   - t1 = le64_to_cpup(mp+i) + kp[i]; \
220   - t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \
  221 + t1 = pe64_to_cpup(mp+i) + kp[i]; \
  222 + t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
221 223 m2 = MUL32(t1 >> 32, t2); \
222 224 m1 = MUL32(t1, t2 >> 32); \
223 225 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
... ... @@ -322,8 +324,7 @@
322 324 ctx->first_block_processed = 0;
323 325 }
324 326  
325   -static u64 l3hash(u64 p1, u64 p2,
326   - u64 k1, u64 k2, u64 len)
  327 +static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
327 328 {
328 329 u64 rh, rl, t, z = 0;
329 330  
... ... @@ -474,7 +475,7 @@
474 475 }
475 476 p = be64_to_cpup(out_p + i);
476 477 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
477   - return p + h;
  478 + return le64_to_cpu(p + h);
478 479 }
479 480  
480 481 static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
... ... @@ -549,10 +550,6 @@
549 550  
550 551 static int vmac_init(struct shash_desc *pdesc)
551 552 {
552   - struct crypto_shash *parent = pdesc->tfm;
553   - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
554   -
555   - memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
556 553 return 0;
557 554 }
558 555