Commit 9751bfd1c9177a8ab0a910fe279a8815e498561e
1 parent
d554a3f9d3
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
n2_crypto: remove IRQF_SAMPLE_RANDOM which is now a no-op
With the changes in the random tree, IRQF_SAMPLE_RANDOM is now a no-op; interrupt randomness is now collected unconditionally in a very low-overhead fashion; see commit 775f4b297b. The IRQF_SAMPLE_RANDOM flag was scheduled to be removed in 2009 on the feature-removal-schedule, so this patch is preparation for the final removal of this flag. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Showing 1 changed file with 1 additions and 2 deletions Inline Diff
drivers/crypto/n2_core.c
1 | /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. | 1 | /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. |
2 | * | 2 | * |
3 | * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> | 3 | * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
7 | 7 | ||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/of.h> | 10 | #include <linux/of.h> |
11 | #include <linux/of_device.h> | 11 | #include <linux/of_device.h> |
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
16 | #include <crypto/md5.h> | 16 | #include <crypto/md5.h> |
17 | #include <crypto/sha.h> | 17 | #include <crypto/sha.h> |
18 | #include <crypto/aes.h> | 18 | #include <crypto/aes.h> |
19 | #include <crypto/des.h> | 19 | #include <crypto/des.h> |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | 23 | ||
24 | #include <crypto/internal/hash.h> | 24 | #include <crypto/internal/hash.h> |
25 | #include <crypto/scatterwalk.h> | 25 | #include <crypto/scatterwalk.h> |
26 | #include <crypto/algapi.h> | 26 | #include <crypto/algapi.h> |
27 | 27 | ||
28 | #include <asm/hypervisor.h> | 28 | #include <asm/hypervisor.h> |
29 | #include <asm/mdesc.h> | 29 | #include <asm/mdesc.h> |
30 | 30 | ||
31 | #include "n2_core.h" | 31 | #include "n2_core.h" |
32 | 32 | ||
33 | #define DRV_MODULE_NAME "n2_crypto" | 33 | #define DRV_MODULE_NAME "n2_crypto" |
34 | #define DRV_MODULE_VERSION "0.2" | 34 | #define DRV_MODULE_VERSION "0.2" |
35 | #define DRV_MODULE_RELDATE "July 28, 2011" | 35 | #define DRV_MODULE_RELDATE "July 28, 2011" |
36 | 36 | ||
37 | static char version[] __devinitdata = | 37 | static char version[] __devinitdata = |
38 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 38 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
39 | 39 | ||
40 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | 40 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); |
41 | MODULE_DESCRIPTION("Niagara2 Crypto driver"); | 41 | MODULE_DESCRIPTION("Niagara2 Crypto driver"); |
42 | MODULE_LICENSE("GPL"); | 42 | MODULE_LICENSE("GPL"); |
43 | MODULE_VERSION(DRV_MODULE_VERSION); | 43 | MODULE_VERSION(DRV_MODULE_VERSION); |
44 | 44 | ||
45 | #define N2_CRA_PRIORITY 300 | 45 | #define N2_CRA_PRIORITY 300 |
46 | 46 | ||
47 | static DEFINE_MUTEX(spu_lock); | 47 | static DEFINE_MUTEX(spu_lock); |
48 | 48 | ||
49 | struct spu_queue { | 49 | struct spu_queue { |
50 | cpumask_t sharing; | 50 | cpumask_t sharing; |
51 | unsigned long qhandle; | 51 | unsigned long qhandle; |
52 | 52 | ||
53 | spinlock_t lock; | 53 | spinlock_t lock; |
54 | u8 q_type; | 54 | u8 q_type; |
55 | void *q; | 55 | void *q; |
56 | unsigned long head; | 56 | unsigned long head; |
57 | unsigned long tail; | 57 | unsigned long tail; |
58 | struct list_head jobs; | 58 | struct list_head jobs; |
59 | 59 | ||
60 | unsigned long devino; | 60 | unsigned long devino; |
61 | 61 | ||
62 | char irq_name[32]; | 62 | char irq_name[32]; |
63 | unsigned int irq; | 63 | unsigned int irq; |
64 | 64 | ||
65 | struct list_head list; | 65 | struct list_head list; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static struct spu_queue **cpu_to_cwq; | 68 | static struct spu_queue **cpu_to_cwq; |
69 | static struct spu_queue **cpu_to_mau; | 69 | static struct spu_queue **cpu_to_mau; |
70 | 70 | ||
71 | static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) | 71 | static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) |
72 | { | 72 | { |
73 | if (q->q_type == HV_NCS_QTYPE_MAU) { | 73 | if (q->q_type == HV_NCS_QTYPE_MAU) { |
74 | off += MAU_ENTRY_SIZE; | 74 | off += MAU_ENTRY_SIZE; |
75 | if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) | 75 | if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) |
76 | off = 0; | 76 | off = 0; |
77 | } else { | 77 | } else { |
78 | off += CWQ_ENTRY_SIZE; | 78 | off += CWQ_ENTRY_SIZE; |
79 | if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) | 79 | if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) |
80 | off = 0; | 80 | off = 0; |
81 | } | 81 | } |
82 | return off; | 82 | return off; |
83 | } | 83 | } |
84 | 84 | ||
85 | struct n2_request_common { | 85 | struct n2_request_common { |
86 | struct list_head entry; | 86 | struct list_head entry; |
87 | unsigned int offset; | 87 | unsigned int offset; |
88 | }; | 88 | }; |
89 | #define OFFSET_NOT_RUNNING (~(unsigned int)0) | 89 | #define OFFSET_NOT_RUNNING (~(unsigned int)0) |
90 | 90 | ||
91 | /* An async job request records the final tail value it used in | 91 | /* An async job request records the final tail value it used in |
92 | * n2_request_common->offset, test to see if that offset is in | 92 | * n2_request_common->offset, test to see if that offset is in |
93 | * the range old_head, new_head, inclusive. | 93 | * the range old_head, new_head, inclusive. |
94 | */ | 94 | */ |
95 | static inline bool job_finished(struct spu_queue *q, unsigned int offset, | 95 | static inline bool job_finished(struct spu_queue *q, unsigned int offset, |
96 | unsigned long old_head, unsigned long new_head) | 96 | unsigned long old_head, unsigned long new_head) |
97 | { | 97 | { |
98 | if (old_head <= new_head) { | 98 | if (old_head <= new_head) { |
99 | if (offset > old_head && offset <= new_head) | 99 | if (offset > old_head && offset <= new_head) |
100 | return true; | 100 | return true; |
101 | } else { | 101 | } else { |
102 | if (offset > old_head || offset <= new_head) | 102 | if (offset > old_head || offset <= new_head) |
103 | return true; | 103 | return true; |
104 | } | 104 | } |
105 | return false; | 105 | return false; |
106 | } | 106 | } |
107 | 107 | ||
108 | /* When the HEAD marker is unequal to the actual HEAD, we get | 108 | /* When the HEAD marker is unequal to the actual HEAD, we get |
109 | * a virtual device INO interrupt. We should process the | 109 | * a virtual device INO interrupt. We should process the |
110 | * completed CWQ entries and adjust the HEAD marker to clear | 110 | * completed CWQ entries and adjust the HEAD marker to clear |
111 | * the IRQ. | 111 | * the IRQ. |
112 | */ | 112 | */ |
113 | static irqreturn_t cwq_intr(int irq, void *dev_id) | 113 | static irqreturn_t cwq_intr(int irq, void *dev_id) |
114 | { | 114 | { |
115 | unsigned long off, new_head, hv_ret; | 115 | unsigned long off, new_head, hv_ret; |
116 | struct spu_queue *q = dev_id; | 116 | struct spu_queue *q = dev_id; |
117 | 117 | ||
118 | pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", | 118 | pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", |
119 | smp_processor_id(), q->qhandle); | 119 | smp_processor_id(), q->qhandle); |
120 | 120 | ||
121 | spin_lock(&q->lock); | 121 | spin_lock(&q->lock); |
122 | 122 | ||
123 | hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); | 123 | hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); |
124 | 124 | ||
125 | pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", | 125 | pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", |
126 | smp_processor_id(), new_head, hv_ret); | 126 | smp_processor_id(), new_head, hv_ret); |
127 | 127 | ||
128 | for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { | 128 | for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { |
129 | /* XXX ... XXX */ | 129 | /* XXX ... XXX */ |
130 | } | 130 | } |
131 | 131 | ||
132 | hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); | 132 | hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); |
133 | if (hv_ret == HV_EOK) | 133 | if (hv_ret == HV_EOK) |
134 | q->head = new_head; | 134 | q->head = new_head; |
135 | 135 | ||
136 | spin_unlock(&q->lock); | 136 | spin_unlock(&q->lock); |
137 | 137 | ||
138 | return IRQ_HANDLED; | 138 | return IRQ_HANDLED; |
139 | } | 139 | } |
140 | 140 | ||
141 | static irqreturn_t mau_intr(int irq, void *dev_id) | 141 | static irqreturn_t mau_intr(int irq, void *dev_id) |
142 | { | 142 | { |
143 | struct spu_queue *q = dev_id; | 143 | struct spu_queue *q = dev_id; |
144 | unsigned long head, hv_ret; | 144 | unsigned long head, hv_ret; |
145 | 145 | ||
146 | spin_lock(&q->lock); | 146 | spin_lock(&q->lock); |
147 | 147 | ||
148 | pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", | 148 | pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", |
149 | smp_processor_id(), q->qhandle); | 149 | smp_processor_id(), q->qhandle); |
150 | 150 | ||
151 | hv_ret = sun4v_ncs_gethead(q->qhandle, &head); | 151 | hv_ret = sun4v_ncs_gethead(q->qhandle, &head); |
152 | 152 | ||
153 | pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", | 153 | pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", |
154 | smp_processor_id(), head, hv_ret); | 154 | smp_processor_id(), head, hv_ret); |
155 | 155 | ||
156 | sun4v_ncs_sethead_marker(q->qhandle, head); | 156 | sun4v_ncs_sethead_marker(q->qhandle, head); |
157 | 157 | ||
158 | spin_unlock(&q->lock); | 158 | spin_unlock(&q->lock); |
159 | 159 | ||
160 | return IRQ_HANDLED; | 160 | return IRQ_HANDLED; |
161 | } | 161 | } |
162 | 162 | ||
163 | static void *spu_queue_next(struct spu_queue *q, void *cur) | 163 | static void *spu_queue_next(struct spu_queue *q, void *cur) |
164 | { | 164 | { |
165 | return q->q + spu_next_offset(q, cur - q->q); | 165 | return q->q + spu_next_offset(q, cur - q->q); |
166 | } | 166 | } |
167 | 167 | ||
168 | static int spu_queue_num_free(struct spu_queue *q) | 168 | static int spu_queue_num_free(struct spu_queue *q) |
169 | { | 169 | { |
170 | unsigned long head = q->head; | 170 | unsigned long head = q->head; |
171 | unsigned long tail = q->tail; | 171 | unsigned long tail = q->tail; |
172 | unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); | 172 | unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); |
173 | unsigned long diff; | 173 | unsigned long diff; |
174 | 174 | ||
175 | if (head > tail) | 175 | if (head > tail) |
176 | diff = head - tail; | 176 | diff = head - tail; |
177 | else | 177 | else |
178 | diff = (end - tail) + head; | 178 | diff = (end - tail) + head; |
179 | 179 | ||
180 | return (diff / CWQ_ENTRY_SIZE) - 1; | 180 | return (diff / CWQ_ENTRY_SIZE) - 1; |
181 | } | 181 | } |
182 | 182 | ||
183 | static void *spu_queue_alloc(struct spu_queue *q, int num_entries) | 183 | static void *spu_queue_alloc(struct spu_queue *q, int num_entries) |
184 | { | 184 | { |
185 | int avail = spu_queue_num_free(q); | 185 | int avail = spu_queue_num_free(q); |
186 | 186 | ||
187 | if (avail >= num_entries) | 187 | if (avail >= num_entries) |
188 | return q->q + q->tail; | 188 | return q->q + q->tail; |
189 | 189 | ||
190 | return NULL; | 190 | return NULL; |
191 | } | 191 | } |
192 | 192 | ||
193 | static unsigned long spu_queue_submit(struct spu_queue *q, void *last) | 193 | static unsigned long spu_queue_submit(struct spu_queue *q, void *last) |
194 | { | 194 | { |
195 | unsigned long hv_ret, new_tail; | 195 | unsigned long hv_ret, new_tail; |
196 | 196 | ||
197 | new_tail = spu_next_offset(q, last - q->q); | 197 | new_tail = spu_next_offset(q, last - q->q); |
198 | 198 | ||
199 | hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); | 199 | hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); |
200 | if (hv_ret == HV_EOK) | 200 | if (hv_ret == HV_EOK) |
201 | q->tail = new_tail; | 201 | q->tail = new_tail; |
202 | return hv_ret; | 202 | return hv_ret; |
203 | } | 203 | } |
204 | 204 | ||
205 | static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, | 205 | static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, |
206 | int enc_type, int auth_type, | 206 | int enc_type, int auth_type, |
207 | unsigned int hash_len, | 207 | unsigned int hash_len, |
208 | bool sfas, bool sob, bool eob, bool encrypt, | 208 | bool sfas, bool sob, bool eob, bool encrypt, |
209 | int opcode) | 209 | int opcode) |
210 | { | 210 | { |
211 | u64 word = (len - 1) & CONTROL_LEN; | 211 | u64 word = (len - 1) & CONTROL_LEN; |
212 | 212 | ||
213 | word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); | 213 | word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); |
214 | word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); | 214 | word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); |
215 | word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); | 215 | word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); |
216 | if (sfas) | 216 | if (sfas) |
217 | word |= CONTROL_STORE_FINAL_AUTH_STATE; | 217 | word |= CONTROL_STORE_FINAL_AUTH_STATE; |
218 | if (sob) | 218 | if (sob) |
219 | word |= CONTROL_START_OF_BLOCK; | 219 | word |= CONTROL_START_OF_BLOCK; |
220 | if (eob) | 220 | if (eob) |
221 | word |= CONTROL_END_OF_BLOCK; | 221 | word |= CONTROL_END_OF_BLOCK; |
222 | if (encrypt) | 222 | if (encrypt) |
223 | word |= CONTROL_ENCRYPT; | 223 | word |= CONTROL_ENCRYPT; |
224 | if (hmac_key_len) | 224 | if (hmac_key_len) |
225 | word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; | 225 | word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; |
226 | if (hash_len) | 226 | if (hash_len) |
227 | word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; | 227 | word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; |
228 | 228 | ||
229 | return word; | 229 | return word; |
230 | } | 230 | } |
231 | 231 | ||
232 | #if 0 | 232 | #if 0 |
233 | static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) | 233 | static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) |
234 | { | 234 | { |
235 | if (this_len >= 64 || | 235 | if (this_len >= 64 || |
236 | qp->head != qp->tail) | 236 | qp->head != qp->tail) |
237 | return true; | 237 | return true; |
238 | return false; | 238 | return false; |
239 | } | 239 | } |
240 | #endif | 240 | #endif |
241 | 241 | ||
242 | struct n2_ahash_alg { | 242 | struct n2_ahash_alg { |
243 | struct list_head entry; | 243 | struct list_head entry; |
244 | const char *hash_zero; | 244 | const char *hash_zero; |
245 | const u32 *hash_init; | 245 | const u32 *hash_init; |
246 | u8 hw_op_hashsz; | 246 | u8 hw_op_hashsz; |
247 | u8 digest_size; | 247 | u8 digest_size; |
248 | u8 auth_type; | 248 | u8 auth_type; |
249 | u8 hmac_type; | 249 | u8 hmac_type; |
250 | struct ahash_alg alg; | 250 | struct ahash_alg alg; |
251 | }; | 251 | }; |
252 | 252 | ||
253 | static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) | 253 | static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) |
254 | { | 254 | { |
255 | struct crypto_alg *alg = tfm->__crt_alg; | 255 | struct crypto_alg *alg = tfm->__crt_alg; |
256 | struct ahash_alg *ahash_alg; | 256 | struct ahash_alg *ahash_alg; |
257 | 257 | ||
258 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); | 258 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); |
259 | 259 | ||
260 | return container_of(ahash_alg, struct n2_ahash_alg, alg); | 260 | return container_of(ahash_alg, struct n2_ahash_alg, alg); |
261 | } | 261 | } |
262 | 262 | ||
263 | struct n2_hmac_alg { | 263 | struct n2_hmac_alg { |
264 | const char *child_alg; | 264 | const char *child_alg; |
265 | struct n2_ahash_alg derived; | 265 | struct n2_ahash_alg derived; |
266 | }; | 266 | }; |
267 | 267 | ||
268 | static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) | 268 | static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) |
269 | { | 269 | { |
270 | struct crypto_alg *alg = tfm->__crt_alg; | 270 | struct crypto_alg *alg = tfm->__crt_alg; |
271 | struct ahash_alg *ahash_alg; | 271 | struct ahash_alg *ahash_alg; |
272 | 272 | ||
273 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); | 273 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); |
274 | 274 | ||
275 | return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); | 275 | return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); |
276 | } | 276 | } |
277 | 277 | ||
278 | struct n2_hash_ctx { | 278 | struct n2_hash_ctx { |
279 | struct crypto_ahash *fallback_tfm; | 279 | struct crypto_ahash *fallback_tfm; |
280 | }; | 280 | }; |
281 | 281 | ||
282 | #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ | 282 | #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ |
283 | 283 | ||
284 | struct n2_hmac_ctx { | 284 | struct n2_hmac_ctx { |
285 | struct n2_hash_ctx base; | 285 | struct n2_hash_ctx base; |
286 | 286 | ||
287 | struct crypto_shash *child_shash; | 287 | struct crypto_shash *child_shash; |
288 | 288 | ||
289 | int hash_key_len; | 289 | int hash_key_len; |
290 | unsigned char hash_key[N2_HASH_KEY_MAX]; | 290 | unsigned char hash_key[N2_HASH_KEY_MAX]; |
291 | }; | 291 | }; |
292 | 292 | ||
293 | struct n2_hash_req_ctx { | 293 | struct n2_hash_req_ctx { |
294 | union { | 294 | union { |
295 | struct md5_state md5; | 295 | struct md5_state md5; |
296 | struct sha1_state sha1; | 296 | struct sha1_state sha1; |
297 | struct sha256_state sha256; | 297 | struct sha256_state sha256; |
298 | } u; | 298 | } u; |
299 | 299 | ||
300 | struct ahash_request fallback_req; | 300 | struct ahash_request fallback_req; |
301 | }; | 301 | }; |
302 | 302 | ||
303 | static int n2_hash_async_init(struct ahash_request *req) | 303 | static int n2_hash_async_init(struct ahash_request *req) |
304 | { | 304 | { |
305 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 305 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
306 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 306 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
307 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 307 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
308 | 308 | ||
309 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | 309 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
310 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 310 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
311 | 311 | ||
312 | return crypto_ahash_init(&rctx->fallback_req); | 312 | return crypto_ahash_init(&rctx->fallback_req); |
313 | } | 313 | } |
314 | 314 | ||
315 | static int n2_hash_async_update(struct ahash_request *req) | 315 | static int n2_hash_async_update(struct ahash_request *req) |
316 | { | 316 | { |
317 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 317 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
318 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 318 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
319 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 319 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
320 | 320 | ||
321 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | 321 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
322 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 322 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
323 | rctx->fallback_req.nbytes = req->nbytes; | 323 | rctx->fallback_req.nbytes = req->nbytes; |
324 | rctx->fallback_req.src = req->src; | 324 | rctx->fallback_req.src = req->src; |
325 | 325 | ||
326 | return crypto_ahash_update(&rctx->fallback_req); | 326 | return crypto_ahash_update(&rctx->fallback_req); |
327 | } | 327 | } |
328 | 328 | ||
329 | static int n2_hash_async_final(struct ahash_request *req) | 329 | static int n2_hash_async_final(struct ahash_request *req) |
330 | { | 330 | { |
331 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 331 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
332 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 332 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
333 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 333 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
334 | 334 | ||
335 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | 335 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
336 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 336 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
337 | rctx->fallback_req.result = req->result; | 337 | rctx->fallback_req.result = req->result; |
338 | 338 | ||
339 | return crypto_ahash_final(&rctx->fallback_req); | 339 | return crypto_ahash_final(&rctx->fallback_req); |
340 | } | 340 | } |
341 | 341 | ||
342 | static int n2_hash_async_finup(struct ahash_request *req) | 342 | static int n2_hash_async_finup(struct ahash_request *req) |
343 | { | 343 | { |
344 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 344 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
345 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 345 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
346 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 346 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
347 | 347 | ||
348 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | 348 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
349 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 349 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
350 | rctx->fallback_req.nbytes = req->nbytes; | 350 | rctx->fallback_req.nbytes = req->nbytes; |
351 | rctx->fallback_req.src = req->src; | 351 | rctx->fallback_req.src = req->src; |
352 | rctx->fallback_req.result = req->result; | 352 | rctx->fallback_req.result = req->result; |
353 | 353 | ||
354 | return crypto_ahash_finup(&rctx->fallback_req); | 354 | return crypto_ahash_finup(&rctx->fallback_req); |
355 | } | 355 | } |
356 | 356 | ||
357 | static int n2_hash_cra_init(struct crypto_tfm *tfm) | 357 | static int n2_hash_cra_init(struct crypto_tfm *tfm) |
358 | { | 358 | { |
359 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | 359 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; |
360 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | 360 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
361 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 361 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
362 | struct crypto_ahash *fallback_tfm; | 362 | struct crypto_ahash *fallback_tfm; |
363 | int err; | 363 | int err; |
364 | 364 | ||
365 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, | 365 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, |
366 | CRYPTO_ALG_NEED_FALLBACK); | 366 | CRYPTO_ALG_NEED_FALLBACK); |
367 | if (IS_ERR(fallback_tfm)) { | 367 | if (IS_ERR(fallback_tfm)) { |
368 | pr_warning("Fallback driver '%s' could not be loaded!\n", | 368 | pr_warning("Fallback driver '%s' could not be loaded!\n", |
369 | fallback_driver_name); | 369 | fallback_driver_name); |
370 | err = PTR_ERR(fallback_tfm); | 370 | err = PTR_ERR(fallback_tfm); |
371 | goto out; | 371 | goto out; |
372 | } | 372 | } |
373 | 373 | ||
374 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + | 374 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + |
375 | crypto_ahash_reqsize(fallback_tfm))); | 375 | crypto_ahash_reqsize(fallback_tfm))); |
376 | 376 | ||
377 | ctx->fallback_tfm = fallback_tfm; | 377 | ctx->fallback_tfm = fallback_tfm; |
378 | return 0; | 378 | return 0; |
379 | 379 | ||
380 | out: | 380 | out: |
381 | return err; | 381 | return err; |
382 | } | 382 | } |
383 | 383 | ||
384 | static void n2_hash_cra_exit(struct crypto_tfm *tfm) | 384 | static void n2_hash_cra_exit(struct crypto_tfm *tfm) |
385 | { | 385 | { |
386 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | 386 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
387 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 387 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
388 | 388 | ||
389 | crypto_free_ahash(ctx->fallback_tfm); | 389 | crypto_free_ahash(ctx->fallback_tfm); |
390 | } | 390 | } |
391 | 391 | ||
392 | static int n2_hmac_cra_init(struct crypto_tfm *tfm) | 392 | static int n2_hmac_cra_init(struct crypto_tfm *tfm) |
393 | { | 393 | { |
394 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | 394 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; |
395 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | 395 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
396 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); | 396 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); |
397 | struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); | 397 | struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); |
398 | struct crypto_ahash *fallback_tfm; | 398 | struct crypto_ahash *fallback_tfm; |
399 | struct crypto_shash *child_shash; | 399 | struct crypto_shash *child_shash; |
400 | int err; | 400 | int err; |
401 | 401 | ||
402 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, | 402 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, |
403 | CRYPTO_ALG_NEED_FALLBACK); | 403 | CRYPTO_ALG_NEED_FALLBACK); |
404 | if (IS_ERR(fallback_tfm)) { | 404 | if (IS_ERR(fallback_tfm)) { |
405 | pr_warning("Fallback driver '%s' could not be loaded!\n", | 405 | pr_warning("Fallback driver '%s' could not be loaded!\n", |
406 | fallback_driver_name); | 406 | fallback_driver_name); |
407 | err = PTR_ERR(fallback_tfm); | 407 | err = PTR_ERR(fallback_tfm); |
408 | goto out; | 408 | goto out; |
409 | } | 409 | } |
410 | 410 | ||
411 | child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); | 411 | child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); |
412 | if (IS_ERR(child_shash)) { | 412 | if (IS_ERR(child_shash)) { |
413 | pr_warning("Child shash '%s' could not be loaded!\n", | 413 | pr_warning("Child shash '%s' could not be loaded!\n", |
414 | n2alg->child_alg); | 414 | n2alg->child_alg); |
415 | err = PTR_ERR(child_shash); | 415 | err = PTR_ERR(child_shash); |
416 | goto out_free_fallback; | 416 | goto out_free_fallback; |
417 | } | 417 | } |
418 | 418 | ||
419 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + | 419 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + |
420 | crypto_ahash_reqsize(fallback_tfm))); | 420 | crypto_ahash_reqsize(fallback_tfm))); |
421 | 421 | ||
422 | ctx->child_shash = child_shash; | 422 | ctx->child_shash = child_shash; |
423 | ctx->base.fallback_tfm = fallback_tfm; | 423 | ctx->base.fallback_tfm = fallback_tfm; |
424 | return 0; | 424 | return 0; |
425 | 425 | ||
426 | out_free_fallback: | 426 | out_free_fallback: |
427 | crypto_free_ahash(fallback_tfm); | 427 | crypto_free_ahash(fallback_tfm); |
428 | 428 | ||
429 | out: | 429 | out: |
430 | return err; | 430 | return err; |
431 | } | 431 | } |
432 | 432 | ||
433 | static void n2_hmac_cra_exit(struct crypto_tfm *tfm) | 433 | static void n2_hmac_cra_exit(struct crypto_tfm *tfm) |
434 | { | 434 | { |
435 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | 435 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
436 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); | 436 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); |
437 | 437 | ||
438 | crypto_free_ahash(ctx->base.fallback_tfm); | 438 | crypto_free_ahash(ctx->base.fallback_tfm); |
439 | crypto_free_shash(ctx->child_shash); | 439 | crypto_free_shash(ctx->child_shash); |
440 | } | 440 | } |
441 | 441 | ||
442 | static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, | 442 | static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, |
443 | unsigned int keylen) | 443 | unsigned int keylen) |
444 | { | 444 | { |
445 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); | 445 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); |
446 | struct crypto_shash *child_shash = ctx->child_shash; | 446 | struct crypto_shash *child_shash = ctx->child_shash; |
447 | struct crypto_ahash *fallback_tfm; | 447 | struct crypto_ahash *fallback_tfm; |
448 | struct { | 448 | struct { |
449 | struct shash_desc shash; | 449 | struct shash_desc shash; |
450 | char ctx[crypto_shash_descsize(child_shash)]; | 450 | char ctx[crypto_shash_descsize(child_shash)]; |
451 | } desc; | 451 | } desc; |
452 | int err, bs, ds; | 452 | int err, bs, ds; |
453 | 453 | ||
454 | fallback_tfm = ctx->base.fallback_tfm; | 454 | fallback_tfm = ctx->base.fallback_tfm; |
455 | err = crypto_ahash_setkey(fallback_tfm, key, keylen); | 455 | err = crypto_ahash_setkey(fallback_tfm, key, keylen); |
456 | if (err) | 456 | if (err) |
457 | return err; | 457 | return err; |
458 | 458 | ||
459 | desc.shash.tfm = child_shash; | 459 | desc.shash.tfm = child_shash; |
460 | desc.shash.flags = crypto_ahash_get_flags(tfm) & | 460 | desc.shash.flags = crypto_ahash_get_flags(tfm) & |
461 | CRYPTO_TFM_REQ_MAY_SLEEP; | 461 | CRYPTO_TFM_REQ_MAY_SLEEP; |
462 | 462 | ||
463 | bs = crypto_shash_blocksize(child_shash); | 463 | bs = crypto_shash_blocksize(child_shash); |
464 | ds = crypto_shash_digestsize(child_shash); | 464 | ds = crypto_shash_digestsize(child_shash); |
465 | BUG_ON(ds > N2_HASH_KEY_MAX); | 465 | BUG_ON(ds > N2_HASH_KEY_MAX); |
466 | if (keylen > bs) { | 466 | if (keylen > bs) { |
467 | err = crypto_shash_digest(&desc.shash, key, keylen, | 467 | err = crypto_shash_digest(&desc.shash, key, keylen, |
468 | ctx->hash_key); | 468 | ctx->hash_key); |
469 | if (err) | 469 | if (err) |
470 | return err; | 470 | return err; |
471 | keylen = ds; | 471 | keylen = ds; |
472 | } else if (keylen <= N2_HASH_KEY_MAX) | 472 | } else if (keylen <= N2_HASH_KEY_MAX) |
473 | memcpy(ctx->hash_key, key, keylen); | 473 | memcpy(ctx->hash_key, key, keylen); |
474 | 474 | ||
475 | ctx->hash_key_len = keylen; | 475 | ctx->hash_key_len = keylen; |
476 | 476 | ||
477 | return err; | 477 | return err; |
478 | } | 478 | } |
479 | 479 | ||
480 | static unsigned long wait_for_tail(struct spu_queue *qp) | 480 | static unsigned long wait_for_tail(struct spu_queue *qp) |
481 | { | 481 | { |
482 | unsigned long head, hv_ret; | 482 | unsigned long head, hv_ret; |
483 | 483 | ||
484 | do { | 484 | do { |
485 | hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); | 485 | hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); |
486 | if (hv_ret != HV_EOK) { | 486 | if (hv_ret != HV_EOK) { |
487 | pr_err("Hypervisor error on gethead\n"); | 487 | pr_err("Hypervisor error on gethead\n"); |
488 | break; | 488 | break; |
489 | } | 489 | } |
490 | if (head == qp->tail) { | 490 | if (head == qp->tail) { |
491 | qp->head = head; | 491 | qp->head = head; |
492 | break; | 492 | break; |
493 | } | 493 | } |
494 | } while (1); | 494 | } while (1); |
495 | return hv_ret; | 495 | return hv_ret; |
496 | } | 496 | } |
497 | 497 | ||
498 | static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, | 498 | static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, |
499 | struct cwq_initial_entry *ent) | 499 | struct cwq_initial_entry *ent) |
500 | { | 500 | { |
501 | unsigned long hv_ret = spu_queue_submit(qp, ent); | 501 | unsigned long hv_ret = spu_queue_submit(qp, ent); |
502 | 502 | ||
503 | if (hv_ret == HV_EOK) | 503 | if (hv_ret == HV_EOK) |
504 | hv_ret = wait_for_tail(qp); | 504 | hv_ret = wait_for_tail(qp); |
505 | 505 | ||
506 | return hv_ret; | 506 | return hv_ret; |
507 | } | 507 | } |
508 | 508 | ||
509 | static int n2_do_async_digest(struct ahash_request *req, | 509 | static int n2_do_async_digest(struct ahash_request *req, |
510 | unsigned int auth_type, unsigned int digest_size, | 510 | unsigned int auth_type, unsigned int digest_size, |
511 | unsigned int result_size, void *hash_loc, | 511 | unsigned int result_size, void *hash_loc, |
512 | unsigned long auth_key, unsigned int auth_key_len) | 512 | unsigned long auth_key, unsigned int auth_key_len) |
513 | { | 513 | { |
514 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 514 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
515 | struct cwq_initial_entry *ent; | 515 | struct cwq_initial_entry *ent; |
516 | struct crypto_hash_walk walk; | 516 | struct crypto_hash_walk walk; |
517 | struct spu_queue *qp; | 517 | struct spu_queue *qp; |
518 | unsigned long flags; | 518 | unsigned long flags; |
519 | int err = -ENODEV; | 519 | int err = -ENODEV; |
520 | int nbytes, cpu; | 520 | int nbytes, cpu; |
521 | 521 | ||
522 | /* The total effective length of the operation may not | 522 | /* The total effective length of the operation may not |
523 | * exceed 2^16. | 523 | * exceed 2^16. |
524 | */ | 524 | */ |
525 | if (unlikely(req->nbytes > (1 << 16))) { | 525 | if (unlikely(req->nbytes > (1 << 16))) { |
526 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 526 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
527 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 527 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
528 | 528 | ||
529 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | 529 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
530 | rctx->fallback_req.base.flags = | 530 | rctx->fallback_req.base.flags = |
531 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 531 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
532 | rctx->fallback_req.nbytes = req->nbytes; | 532 | rctx->fallback_req.nbytes = req->nbytes; |
533 | rctx->fallback_req.src = req->src; | 533 | rctx->fallback_req.src = req->src; |
534 | rctx->fallback_req.result = req->result; | 534 | rctx->fallback_req.result = req->result; |
535 | 535 | ||
536 | return crypto_ahash_digest(&rctx->fallback_req); | 536 | return crypto_ahash_digest(&rctx->fallback_req); |
537 | } | 537 | } |
538 | 538 | ||
539 | nbytes = crypto_hash_walk_first(req, &walk); | 539 | nbytes = crypto_hash_walk_first(req, &walk); |
540 | 540 | ||
541 | cpu = get_cpu(); | 541 | cpu = get_cpu(); |
542 | qp = cpu_to_cwq[cpu]; | 542 | qp = cpu_to_cwq[cpu]; |
543 | if (!qp) | 543 | if (!qp) |
544 | goto out; | 544 | goto out; |
545 | 545 | ||
546 | spin_lock_irqsave(&qp->lock, flags); | 546 | spin_lock_irqsave(&qp->lock, flags); |
547 | 547 | ||
548 | /* XXX can do better, improve this later by doing a by-hand scatterlist | 548 | /* XXX can do better, improve this later by doing a by-hand scatterlist |
549 | * XXX walk, etc. | 549 | * XXX walk, etc. |
550 | */ | 550 | */ |
551 | ent = qp->q + qp->tail; | 551 | ent = qp->q + qp->tail; |
552 | 552 | ||
553 | ent->control = control_word_base(nbytes, auth_key_len, 0, | 553 | ent->control = control_word_base(nbytes, auth_key_len, 0, |
554 | auth_type, digest_size, | 554 | auth_type, digest_size, |
555 | false, true, false, false, | 555 | false, true, false, false, |
556 | OPCODE_INPLACE_BIT | | 556 | OPCODE_INPLACE_BIT | |
557 | OPCODE_AUTH_MAC); | 557 | OPCODE_AUTH_MAC); |
558 | ent->src_addr = __pa(walk.data); | 558 | ent->src_addr = __pa(walk.data); |
559 | ent->auth_key_addr = auth_key; | 559 | ent->auth_key_addr = auth_key; |
560 | ent->auth_iv_addr = __pa(hash_loc); | 560 | ent->auth_iv_addr = __pa(hash_loc); |
561 | ent->final_auth_state_addr = 0UL; | 561 | ent->final_auth_state_addr = 0UL; |
562 | ent->enc_key_addr = 0UL; | 562 | ent->enc_key_addr = 0UL; |
563 | ent->enc_iv_addr = 0UL; | 563 | ent->enc_iv_addr = 0UL; |
564 | ent->dest_addr = __pa(hash_loc); | 564 | ent->dest_addr = __pa(hash_loc); |
565 | 565 | ||
566 | nbytes = crypto_hash_walk_done(&walk, 0); | 566 | nbytes = crypto_hash_walk_done(&walk, 0); |
567 | while (nbytes > 0) { | 567 | while (nbytes > 0) { |
568 | ent = spu_queue_next(qp, ent); | 568 | ent = spu_queue_next(qp, ent); |
569 | 569 | ||
570 | ent->control = (nbytes - 1); | 570 | ent->control = (nbytes - 1); |
571 | ent->src_addr = __pa(walk.data); | 571 | ent->src_addr = __pa(walk.data); |
572 | ent->auth_key_addr = 0UL; | 572 | ent->auth_key_addr = 0UL; |
573 | ent->auth_iv_addr = 0UL; | 573 | ent->auth_iv_addr = 0UL; |
574 | ent->final_auth_state_addr = 0UL; | 574 | ent->final_auth_state_addr = 0UL; |
575 | ent->enc_key_addr = 0UL; | 575 | ent->enc_key_addr = 0UL; |
576 | ent->enc_iv_addr = 0UL; | 576 | ent->enc_iv_addr = 0UL; |
577 | ent->dest_addr = 0UL; | 577 | ent->dest_addr = 0UL; |
578 | 578 | ||
579 | nbytes = crypto_hash_walk_done(&walk, 0); | 579 | nbytes = crypto_hash_walk_done(&walk, 0); |
580 | } | 580 | } |
581 | ent->control |= CONTROL_END_OF_BLOCK; | 581 | ent->control |= CONTROL_END_OF_BLOCK; |
582 | 582 | ||
583 | if (submit_and_wait_for_tail(qp, ent) != HV_EOK) | 583 | if (submit_and_wait_for_tail(qp, ent) != HV_EOK) |
584 | err = -EINVAL; | 584 | err = -EINVAL; |
585 | else | 585 | else |
586 | err = 0; | 586 | err = 0; |
587 | 587 | ||
588 | spin_unlock_irqrestore(&qp->lock, flags); | 588 | spin_unlock_irqrestore(&qp->lock, flags); |
589 | 589 | ||
590 | if (!err) | 590 | if (!err) |
591 | memcpy(req->result, hash_loc, result_size); | 591 | memcpy(req->result, hash_loc, result_size); |
592 | out: | 592 | out: |
593 | put_cpu(); | 593 | put_cpu(); |
594 | 594 | ||
595 | return err; | 595 | return err; |
596 | } | 596 | } |
597 | 597 | ||
598 | static int n2_hash_async_digest(struct ahash_request *req) | 598 | static int n2_hash_async_digest(struct ahash_request *req) |
599 | { | 599 | { |
600 | struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); | 600 | struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); |
601 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 601 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
602 | int ds; | 602 | int ds; |
603 | 603 | ||
604 | ds = n2alg->digest_size; | 604 | ds = n2alg->digest_size; |
605 | if (unlikely(req->nbytes == 0)) { | 605 | if (unlikely(req->nbytes == 0)) { |
606 | memcpy(req->result, n2alg->hash_zero, ds); | 606 | memcpy(req->result, n2alg->hash_zero, ds); |
607 | return 0; | 607 | return 0; |
608 | } | 608 | } |
609 | memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); | 609 | memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); |
610 | 610 | ||
611 | return n2_do_async_digest(req, n2alg->auth_type, | 611 | return n2_do_async_digest(req, n2alg->auth_type, |
612 | n2alg->hw_op_hashsz, ds, | 612 | n2alg->hw_op_hashsz, ds, |
613 | &rctx->u, 0UL, 0); | 613 | &rctx->u, 0UL, 0); |
614 | } | 614 | } |
615 | 615 | ||
616 | static int n2_hmac_async_digest(struct ahash_request *req) | 616 | static int n2_hmac_async_digest(struct ahash_request *req) |
617 | { | 617 | { |
618 | struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); | 618 | struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); |
619 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 619 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
620 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 620 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
621 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); | 621 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); |
622 | int ds; | 622 | int ds; |
623 | 623 | ||
624 | ds = n2alg->derived.digest_size; | 624 | ds = n2alg->derived.digest_size; |
625 | if (unlikely(req->nbytes == 0) || | 625 | if (unlikely(req->nbytes == 0) || |
626 | unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { | 626 | unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { |
627 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 627 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
628 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 628 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
629 | 629 | ||
630 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | 630 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
631 | rctx->fallback_req.base.flags = | 631 | rctx->fallback_req.base.flags = |
632 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 632 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
633 | rctx->fallback_req.nbytes = req->nbytes; | 633 | rctx->fallback_req.nbytes = req->nbytes; |
634 | rctx->fallback_req.src = req->src; | 634 | rctx->fallback_req.src = req->src; |
635 | rctx->fallback_req.result = req->result; | 635 | rctx->fallback_req.result = req->result; |
636 | 636 | ||
637 | return crypto_ahash_digest(&rctx->fallback_req); | 637 | return crypto_ahash_digest(&rctx->fallback_req); |
638 | } | 638 | } |
639 | memcpy(&rctx->u, n2alg->derived.hash_init, | 639 | memcpy(&rctx->u, n2alg->derived.hash_init, |
640 | n2alg->derived.hw_op_hashsz); | 640 | n2alg->derived.hw_op_hashsz); |
641 | 641 | ||
642 | return n2_do_async_digest(req, n2alg->derived.hmac_type, | 642 | return n2_do_async_digest(req, n2alg->derived.hmac_type, |
643 | n2alg->derived.hw_op_hashsz, ds, | 643 | n2alg->derived.hw_op_hashsz, ds, |
644 | &rctx->u, | 644 | &rctx->u, |
645 | __pa(&ctx->hash_key), | 645 | __pa(&ctx->hash_key), |
646 | ctx->hash_key_len); | 646 | ctx->hash_key_len); |
647 | } | 647 | } |
648 | 648 | ||
649 | struct n2_cipher_context { | 649 | struct n2_cipher_context { |
650 | int key_len; | 650 | int key_len; |
651 | int enc_type; | 651 | int enc_type; |
652 | union { | 652 | union { |
653 | u8 aes[AES_MAX_KEY_SIZE]; | 653 | u8 aes[AES_MAX_KEY_SIZE]; |
654 | u8 des[DES_KEY_SIZE]; | 654 | u8 des[DES_KEY_SIZE]; |
655 | u8 des3[3 * DES_KEY_SIZE]; | 655 | u8 des3[3 * DES_KEY_SIZE]; |
656 | u8 arc4[258]; /* S-box, X, Y */ | 656 | u8 arc4[258]; /* S-box, X, Y */ |
657 | } key; | 657 | } key; |
658 | }; | 658 | }; |
659 | 659 | ||
660 | #define N2_CHUNK_ARR_LEN 16 | 660 | #define N2_CHUNK_ARR_LEN 16 |
661 | 661 | ||
662 | struct n2_crypto_chunk { | 662 | struct n2_crypto_chunk { |
663 | struct list_head entry; | 663 | struct list_head entry; |
664 | unsigned long iv_paddr : 44; | 664 | unsigned long iv_paddr : 44; |
665 | unsigned long arr_len : 20; | 665 | unsigned long arr_len : 20; |
666 | unsigned long dest_paddr; | 666 | unsigned long dest_paddr; |
667 | unsigned long dest_final; | 667 | unsigned long dest_final; |
668 | struct { | 668 | struct { |
669 | unsigned long src_paddr : 44; | 669 | unsigned long src_paddr : 44; |
670 | unsigned long src_len : 20; | 670 | unsigned long src_len : 20; |
671 | } arr[N2_CHUNK_ARR_LEN]; | 671 | } arr[N2_CHUNK_ARR_LEN]; |
672 | }; | 672 | }; |
673 | 673 | ||
674 | struct n2_request_context { | 674 | struct n2_request_context { |
675 | struct ablkcipher_walk walk; | 675 | struct ablkcipher_walk walk; |
676 | struct list_head chunk_list; | 676 | struct list_head chunk_list; |
677 | struct n2_crypto_chunk chunk; | 677 | struct n2_crypto_chunk chunk; |
678 | u8 temp_iv[16]; | 678 | u8 temp_iv[16]; |
679 | }; | 679 | }; |
680 | 680 | ||
681 | /* The SPU allows some level of flexibility for partial cipher blocks | 681 | /* The SPU allows some level of flexibility for partial cipher blocks |
682 | * being specified in a descriptor. | 682 | * being specified in a descriptor. |
683 | * | 683 | * |
684 | * It merely requires that every descriptor's length field is at least | 684 | * It merely requires that every descriptor's length field is at least |
685 | * as large as the cipher block size. This means that a cipher block | 685 | * as large as the cipher block size. This means that a cipher block |
686 | * can span at most 2 descriptors. However, this does not allow a | 686 | * can span at most 2 descriptors. However, this does not allow a |
687 | * partial block to span into the final descriptor as that would | 687 | * partial block to span into the final descriptor as that would |
688 | * violate the rule (since every descriptor's length must be at lest | 688 | * violate the rule (since every descriptor's length must be at lest |
689 | * the block size). So, for example, assuming an 8 byte block size: | 689 | * the block size). So, for example, assuming an 8 byte block size: |
690 | * | 690 | * |
691 | * 0xe --> 0xa --> 0x8 | 691 | * 0xe --> 0xa --> 0x8 |
692 | * | 692 | * |
693 | * is a valid length sequence, whereas: | 693 | * is a valid length sequence, whereas: |
694 | * | 694 | * |
695 | * 0xe --> 0xb --> 0x7 | 695 | * 0xe --> 0xb --> 0x7 |
696 | * | 696 | * |
697 | * is not a valid sequence. | 697 | * is not a valid sequence. |
698 | */ | 698 | */ |
699 | 699 | ||
700 | struct n2_cipher_alg { | 700 | struct n2_cipher_alg { |
701 | struct list_head entry; | 701 | struct list_head entry; |
702 | u8 enc_type; | 702 | u8 enc_type; |
703 | struct crypto_alg alg; | 703 | struct crypto_alg alg; |
704 | }; | 704 | }; |
705 | 705 | ||
706 | static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) | 706 | static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) |
707 | { | 707 | { |
708 | struct crypto_alg *alg = tfm->__crt_alg; | 708 | struct crypto_alg *alg = tfm->__crt_alg; |
709 | 709 | ||
710 | return container_of(alg, struct n2_cipher_alg, alg); | 710 | return container_of(alg, struct n2_cipher_alg, alg); |
711 | } | 711 | } |
712 | 712 | ||
713 | struct n2_cipher_request_context { | 713 | struct n2_cipher_request_context { |
714 | struct ablkcipher_walk walk; | 714 | struct ablkcipher_walk walk; |
715 | }; | 715 | }; |
716 | 716 | ||
717 | static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 717 | static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
718 | unsigned int keylen) | 718 | unsigned int keylen) |
719 | { | 719 | { |
720 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 720 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
721 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | 721 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); |
722 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | 722 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); |
723 | 723 | ||
724 | ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); | 724 | ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); |
725 | 725 | ||
726 | switch (keylen) { | 726 | switch (keylen) { |
727 | case AES_KEYSIZE_128: | 727 | case AES_KEYSIZE_128: |
728 | ctx->enc_type |= ENC_TYPE_ALG_AES128; | 728 | ctx->enc_type |= ENC_TYPE_ALG_AES128; |
729 | break; | 729 | break; |
730 | case AES_KEYSIZE_192: | 730 | case AES_KEYSIZE_192: |
731 | ctx->enc_type |= ENC_TYPE_ALG_AES192; | 731 | ctx->enc_type |= ENC_TYPE_ALG_AES192; |
732 | break; | 732 | break; |
733 | case AES_KEYSIZE_256: | 733 | case AES_KEYSIZE_256: |
734 | ctx->enc_type |= ENC_TYPE_ALG_AES256; | 734 | ctx->enc_type |= ENC_TYPE_ALG_AES256; |
735 | break; | 735 | break; |
736 | default: | 736 | default: |
737 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 737 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
738 | return -EINVAL; | 738 | return -EINVAL; |
739 | } | 739 | } |
740 | 740 | ||
741 | ctx->key_len = keylen; | 741 | ctx->key_len = keylen; |
742 | memcpy(ctx->key.aes, key, keylen); | 742 | memcpy(ctx->key.aes, key, keylen); |
743 | return 0; | 743 | return 0; |
744 | } | 744 | } |
745 | 745 | ||
746 | static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 746 | static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
747 | unsigned int keylen) | 747 | unsigned int keylen) |
748 | { | 748 | { |
749 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 749 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
750 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | 750 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); |
751 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | 751 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); |
752 | u32 tmp[DES_EXPKEY_WORDS]; | 752 | u32 tmp[DES_EXPKEY_WORDS]; |
753 | int err; | 753 | int err; |
754 | 754 | ||
755 | ctx->enc_type = n2alg->enc_type; | 755 | ctx->enc_type = n2alg->enc_type; |
756 | 756 | ||
757 | if (keylen != DES_KEY_SIZE) { | 757 | if (keylen != DES_KEY_SIZE) { |
758 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 758 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
759 | return -EINVAL; | 759 | return -EINVAL; |
760 | } | 760 | } |
761 | 761 | ||
762 | err = des_ekey(tmp, key); | 762 | err = des_ekey(tmp, key); |
763 | if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | 763 | if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { |
764 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | 764 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; |
765 | return -EINVAL; | 765 | return -EINVAL; |
766 | } | 766 | } |
767 | 767 | ||
768 | ctx->key_len = keylen; | 768 | ctx->key_len = keylen; |
769 | memcpy(ctx->key.des, key, keylen); | 769 | memcpy(ctx->key.des, key, keylen); |
770 | return 0; | 770 | return 0; |
771 | } | 771 | } |
772 | 772 | ||
773 | static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 773 | static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
774 | unsigned int keylen) | 774 | unsigned int keylen) |
775 | { | 775 | { |
776 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 776 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
777 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | 777 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); |
778 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | 778 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); |
779 | 779 | ||
780 | ctx->enc_type = n2alg->enc_type; | 780 | ctx->enc_type = n2alg->enc_type; |
781 | 781 | ||
782 | if (keylen != (3 * DES_KEY_SIZE)) { | 782 | if (keylen != (3 * DES_KEY_SIZE)) { |
783 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 783 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
784 | return -EINVAL; | 784 | return -EINVAL; |
785 | } | 785 | } |
786 | ctx->key_len = keylen; | 786 | ctx->key_len = keylen; |
787 | memcpy(ctx->key.des3, key, keylen); | 787 | memcpy(ctx->key.des3, key, keylen); |
788 | return 0; | 788 | return 0; |
789 | } | 789 | } |
790 | 790 | ||
791 | static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 791 | static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
792 | unsigned int keylen) | 792 | unsigned int keylen) |
793 | { | 793 | { |
794 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 794 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
795 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | 795 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); |
796 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | 796 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); |
797 | u8 *s = ctx->key.arc4; | 797 | u8 *s = ctx->key.arc4; |
798 | u8 *x = s + 256; | 798 | u8 *x = s + 256; |
799 | u8 *y = x + 1; | 799 | u8 *y = x + 1; |
800 | int i, j, k; | 800 | int i, j, k; |
801 | 801 | ||
802 | ctx->enc_type = n2alg->enc_type; | 802 | ctx->enc_type = n2alg->enc_type; |
803 | 803 | ||
804 | j = k = 0; | 804 | j = k = 0; |
805 | *x = 0; | 805 | *x = 0; |
806 | *y = 0; | 806 | *y = 0; |
807 | for (i = 0; i < 256; i++) | 807 | for (i = 0; i < 256; i++) |
808 | s[i] = i; | 808 | s[i] = i; |
809 | for (i = 0; i < 256; i++) { | 809 | for (i = 0; i < 256; i++) { |
810 | u8 a = s[i]; | 810 | u8 a = s[i]; |
811 | j = (j + key[k] + a) & 0xff; | 811 | j = (j + key[k] + a) & 0xff; |
812 | s[i] = s[j]; | 812 | s[i] = s[j]; |
813 | s[j] = a; | 813 | s[j] = a; |
814 | if (++k >= keylen) | 814 | if (++k >= keylen) |
815 | k = 0; | 815 | k = 0; |
816 | } | 816 | } |
817 | 817 | ||
818 | return 0; | 818 | return 0; |
819 | } | 819 | } |
820 | 820 | ||
821 | static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) | 821 | static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) |
822 | { | 822 | { |
823 | int this_len = nbytes; | 823 | int this_len = nbytes; |
824 | 824 | ||
825 | this_len -= (nbytes & (block_size - 1)); | 825 | this_len -= (nbytes & (block_size - 1)); |
826 | return this_len > (1 << 16) ? (1 << 16) : this_len; | 826 | return this_len > (1 << 16) ? (1 << 16) : this_len; |
827 | } | 827 | } |
828 | 828 | ||
829 | static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, | 829 | static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, |
830 | struct spu_queue *qp, bool encrypt) | 830 | struct spu_queue *qp, bool encrypt) |
831 | { | 831 | { |
832 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | 832 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); |
833 | struct cwq_initial_entry *ent; | 833 | struct cwq_initial_entry *ent; |
834 | bool in_place; | 834 | bool in_place; |
835 | int i; | 835 | int i; |
836 | 836 | ||
837 | ent = spu_queue_alloc(qp, cp->arr_len); | 837 | ent = spu_queue_alloc(qp, cp->arr_len); |
838 | if (!ent) { | 838 | if (!ent) { |
839 | pr_info("queue_alloc() of %d fails\n", | 839 | pr_info("queue_alloc() of %d fails\n", |
840 | cp->arr_len); | 840 | cp->arr_len); |
841 | return -EBUSY; | 841 | return -EBUSY; |
842 | } | 842 | } |
843 | 843 | ||
844 | in_place = (cp->dest_paddr == cp->arr[0].src_paddr); | 844 | in_place = (cp->dest_paddr == cp->arr[0].src_paddr); |
845 | 845 | ||
846 | ent->control = control_word_base(cp->arr[0].src_len, | 846 | ent->control = control_word_base(cp->arr[0].src_len, |
847 | 0, ctx->enc_type, 0, 0, | 847 | 0, ctx->enc_type, 0, 0, |
848 | false, true, false, encrypt, | 848 | false, true, false, encrypt, |
849 | OPCODE_ENCRYPT | | 849 | OPCODE_ENCRYPT | |
850 | (in_place ? OPCODE_INPLACE_BIT : 0)); | 850 | (in_place ? OPCODE_INPLACE_BIT : 0)); |
851 | ent->src_addr = cp->arr[0].src_paddr; | 851 | ent->src_addr = cp->arr[0].src_paddr; |
852 | ent->auth_key_addr = 0UL; | 852 | ent->auth_key_addr = 0UL; |
853 | ent->auth_iv_addr = 0UL; | 853 | ent->auth_iv_addr = 0UL; |
854 | ent->final_auth_state_addr = 0UL; | 854 | ent->final_auth_state_addr = 0UL; |
855 | ent->enc_key_addr = __pa(&ctx->key); | 855 | ent->enc_key_addr = __pa(&ctx->key); |
856 | ent->enc_iv_addr = cp->iv_paddr; | 856 | ent->enc_iv_addr = cp->iv_paddr; |
857 | ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); | 857 | ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); |
858 | 858 | ||
859 | for (i = 1; i < cp->arr_len; i++) { | 859 | for (i = 1; i < cp->arr_len; i++) { |
860 | ent = spu_queue_next(qp, ent); | 860 | ent = spu_queue_next(qp, ent); |
861 | 861 | ||
862 | ent->control = cp->arr[i].src_len - 1; | 862 | ent->control = cp->arr[i].src_len - 1; |
863 | ent->src_addr = cp->arr[i].src_paddr; | 863 | ent->src_addr = cp->arr[i].src_paddr; |
864 | ent->auth_key_addr = 0UL; | 864 | ent->auth_key_addr = 0UL; |
865 | ent->auth_iv_addr = 0UL; | 865 | ent->auth_iv_addr = 0UL; |
866 | ent->final_auth_state_addr = 0UL; | 866 | ent->final_auth_state_addr = 0UL; |
867 | ent->enc_key_addr = 0UL; | 867 | ent->enc_key_addr = 0UL; |
868 | ent->enc_iv_addr = 0UL; | 868 | ent->enc_iv_addr = 0UL; |
869 | ent->dest_addr = 0UL; | 869 | ent->dest_addr = 0UL; |
870 | } | 870 | } |
871 | ent->control |= CONTROL_END_OF_BLOCK; | 871 | ent->control |= CONTROL_END_OF_BLOCK; |
872 | 872 | ||
873 | return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; | 873 | return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; |
874 | } | 874 | } |
875 | 875 | ||
876 | static int n2_compute_chunks(struct ablkcipher_request *req) | 876 | static int n2_compute_chunks(struct ablkcipher_request *req) |
877 | { | 877 | { |
878 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | 878 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); |
879 | struct ablkcipher_walk *walk = &rctx->walk; | 879 | struct ablkcipher_walk *walk = &rctx->walk; |
880 | struct n2_crypto_chunk *chunk; | 880 | struct n2_crypto_chunk *chunk; |
881 | unsigned long dest_prev; | 881 | unsigned long dest_prev; |
882 | unsigned int tot_len; | 882 | unsigned int tot_len; |
883 | bool prev_in_place; | 883 | bool prev_in_place; |
884 | int err, nbytes; | 884 | int err, nbytes; |
885 | 885 | ||
886 | ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); | 886 | ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); |
887 | err = ablkcipher_walk_phys(req, walk); | 887 | err = ablkcipher_walk_phys(req, walk); |
888 | if (err) | 888 | if (err) |
889 | return err; | 889 | return err; |
890 | 890 | ||
891 | INIT_LIST_HEAD(&rctx->chunk_list); | 891 | INIT_LIST_HEAD(&rctx->chunk_list); |
892 | 892 | ||
893 | chunk = &rctx->chunk; | 893 | chunk = &rctx->chunk; |
894 | INIT_LIST_HEAD(&chunk->entry); | 894 | INIT_LIST_HEAD(&chunk->entry); |
895 | 895 | ||
896 | chunk->iv_paddr = 0UL; | 896 | chunk->iv_paddr = 0UL; |
897 | chunk->arr_len = 0; | 897 | chunk->arr_len = 0; |
898 | chunk->dest_paddr = 0UL; | 898 | chunk->dest_paddr = 0UL; |
899 | 899 | ||
900 | prev_in_place = false; | 900 | prev_in_place = false; |
901 | dest_prev = ~0UL; | 901 | dest_prev = ~0UL; |
902 | tot_len = 0; | 902 | tot_len = 0; |
903 | 903 | ||
904 | while ((nbytes = walk->nbytes) != 0) { | 904 | while ((nbytes = walk->nbytes) != 0) { |
905 | unsigned long dest_paddr, src_paddr; | 905 | unsigned long dest_paddr, src_paddr; |
906 | bool in_place; | 906 | bool in_place; |
907 | int this_len; | 907 | int this_len; |
908 | 908 | ||
909 | src_paddr = (page_to_phys(walk->src.page) + | 909 | src_paddr = (page_to_phys(walk->src.page) + |
910 | walk->src.offset); | 910 | walk->src.offset); |
911 | dest_paddr = (page_to_phys(walk->dst.page) + | 911 | dest_paddr = (page_to_phys(walk->dst.page) + |
912 | walk->dst.offset); | 912 | walk->dst.offset); |
913 | in_place = (src_paddr == dest_paddr); | 913 | in_place = (src_paddr == dest_paddr); |
914 | this_len = cipher_descriptor_len(nbytes, walk->blocksize); | 914 | this_len = cipher_descriptor_len(nbytes, walk->blocksize); |
915 | 915 | ||
916 | if (chunk->arr_len != 0) { | 916 | if (chunk->arr_len != 0) { |
917 | if (in_place != prev_in_place || | 917 | if (in_place != prev_in_place || |
918 | (!prev_in_place && | 918 | (!prev_in_place && |
919 | dest_paddr != dest_prev) || | 919 | dest_paddr != dest_prev) || |
920 | chunk->arr_len == N2_CHUNK_ARR_LEN || | 920 | chunk->arr_len == N2_CHUNK_ARR_LEN || |
921 | tot_len + this_len > (1 << 16)) { | 921 | tot_len + this_len > (1 << 16)) { |
922 | chunk->dest_final = dest_prev; | 922 | chunk->dest_final = dest_prev; |
923 | list_add_tail(&chunk->entry, | 923 | list_add_tail(&chunk->entry, |
924 | &rctx->chunk_list); | 924 | &rctx->chunk_list); |
925 | chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); | 925 | chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); |
926 | if (!chunk) { | 926 | if (!chunk) { |
927 | err = -ENOMEM; | 927 | err = -ENOMEM; |
928 | break; | 928 | break; |
929 | } | 929 | } |
930 | INIT_LIST_HEAD(&chunk->entry); | 930 | INIT_LIST_HEAD(&chunk->entry); |
931 | } | 931 | } |
932 | } | 932 | } |
933 | if (chunk->arr_len == 0) { | 933 | if (chunk->arr_len == 0) { |
934 | chunk->dest_paddr = dest_paddr; | 934 | chunk->dest_paddr = dest_paddr; |
935 | tot_len = 0; | 935 | tot_len = 0; |
936 | } | 936 | } |
937 | chunk->arr[chunk->arr_len].src_paddr = src_paddr; | 937 | chunk->arr[chunk->arr_len].src_paddr = src_paddr; |
938 | chunk->arr[chunk->arr_len].src_len = this_len; | 938 | chunk->arr[chunk->arr_len].src_len = this_len; |
939 | chunk->arr_len++; | 939 | chunk->arr_len++; |
940 | 940 | ||
941 | dest_prev = dest_paddr + this_len; | 941 | dest_prev = dest_paddr + this_len; |
942 | prev_in_place = in_place; | 942 | prev_in_place = in_place; |
943 | tot_len += this_len; | 943 | tot_len += this_len; |
944 | 944 | ||
945 | err = ablkcipher_walk_done(req, walk, nbytes - this_len); | 945 | err = ablkcipher_walk_done(req, walk, nbytes - this_len); |
946 | if (err) | 946 | if (err) |
947 | break; | 947 | break; |
948 | } | 948 | } |
949 | if (!err && chunk->arr_len != 0) { | 949 | if (!err && chunk->arr_len != 0) { |
950 | chunk->dest_final = dest_prev; | 950 | chunk->dest_final = dest_prev; |
951 | list_add_tail(&chunk->entry, &rctx->chunk_list); | 951 | list_add_tail(&chunk->entry, &rctx->chunk_list); |
952 | } | 952 | } |
953 | 953 | ||
954 | return err; | 954 | return err; |
955 | } | 955 | } |
956 | 956 | ||
957 | static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) | 957 | static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) |
958 | { | 958 | { |
959 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | 959 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); |
960 | struct n2_crypto_chunk *c, *tmp; | 960 | struct n2_crypto_chunk *c, *tmp; |
961 | 961 | ||
962 | if (final_iv) | 962 | if (final_iv) |
963 | memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); | 963 | memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); |
964 | 964 | ||
965 | ablkcipher_walk_complete(&rctx->walk); | 965 | ablkcipher_walk_complete(&rctx->walk); |
966 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { | 966 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { |
967 | list_del(&c->entry); | 967 | list_del(&c->entry); |
968 | if (unlikely(c != &rctx->chunk)) | 968 | if (unlikely(c != &rctx->chunk)) |
969 | kfree(c); | 969 | kfree(c); |
970 | } | 970 | } |
971 | 971 | ||
972 | } | 972 | } |
973 | 973 | ||
974 | static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) | 974 | static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) |
975 | { | 975 | { |
976 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | 976 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); |
977 | struct crypto_tfm *tfm = req->base.tfm; | 977 | struct crypto_tfm *tfm = req->base.tfm; |
978 | int err = n2_compute_chunks(req); | 978 | int err = n2_compute_chunks(req); |
979 | struct n2_crypto_chunk *c, *tmp; | 979 | struct n2_crypto_chunk *c, *tmp; |
980 | unsigned long flags, hv_ret; | 980 | unsigned long flags, hv_ret; |
981 | struct spu_queue *qp; | 981 | struct spu_queue *qp; |
982 | 982 | ||
983 | if (err) | 983 | if (err) |
984 | return err; | 984 | return err; |
985 | 985 | ||
986 | qp = cpu_to_cwq[get_cpu()]; | 986 | qp = cpu_to_cwq[get_cpu()]; |
987 | err = -ENODEV; | 987 | err = -ENODEV; |
988 | if (!qp) | 988 | if (!qp) |
989 | goto out; | 989 | goto out; |
990 | 990 | ||
991 | spin_lock_irqsave(&qp->lock, flags); | 991 | spin_lock_irqsave(&qp->lock, flags); |
992 | 992 | ||
993 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { | 993 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { |
994 | err = __n2_crypt_chunk(tfm, c, qp, encrypt); | 994 | err = __n2_crypt_chunk(tfm, c, qp, encrypt); |
995 | if (err) | 995 | if (err) |
996 | break; | 996 | break; |
997 | list_del(&c->entry); | 997 | list_del(&c->entry); |
998 | if (unlikely(c != &rctx->chunk)) | 998 | if (unlikely(c != &rctx->chunk)) |
999 | kfree(c); | 999 | kfree(c); |
1000 | } | 1000 | } |
1001 | if (!err) { | 1001 | if (!err) { |
1002 | hv_ret = wait_for_tail(qp); | 1002 | hv_ret = wait_for_tail(qp); |
1003 | if (hv_ret != HV_EOK) | 1003 | if (hv_ret != HV_EOK) |
1004 | err = -EINVAL; | 1004 | err = -EINVAL; |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | spin_unlock_irqrestore(&qp->lock, flags); | 1007 | spin_unlock_irqrestore(&qp->lock, flags); |
1008 | 1008 | ||
1009 | out: | 1009 | out: |
1010 | put_cpu(); | 1010 | put_cpu(); |
1011 | 1011 | ||
1012 | n2_chunk_complete(req, NULL); | 1012 | n2_chunk_complete(req, NULL); |
1013 | return err; | 1013 | return err; |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | static int n2_encrypt_ecb(struct ablkcipher_request *req) | 1016 | static int n2_encrypt_ecb(struct ablkcipher_request *req) |
1017 | { | 1017 | { |
1018 | return n2_do_ecb(req, true); | 1018 | return n2_do_ecb(req, true); |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | static int n2_decrypt_ecb(struct ablkcipher_request *req) | 1021 | static int n2_decrypt_ecb(struct ablkcipher_request *req) |
1022 | { | 1022 | { |
1023 | return n2_do_ecb(req, false); | 1023 | return n2_do_ecb(req, false); |
1024 | } | 1024 | } |
1025 | 1025 | ||
1026 | static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) | 1026 | static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) |
1027 | { | 1027 | { |
1028 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | 1028 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); |
1029 | struct crypto_tfm *tfm = req->base.tfm; | 1029 | struct crypto_tfm *tfm = req->base.tfm; |
1030 | unsigned long flags, hv_ret, iv_paddr; | 1030 | unsigned long flags, hv_ret, iv_paddr; |
1031 | int err = n2_compute_chunks(req); | 1031 | int err = n2_compute_chunks(req); |
1032 | struct n2_crypto_chunk *c, *tmp; | 1032 | struct n2_crypto_chunk *c, *tmp; |
1033 | struct spu_queue *qp; | 1033 | struct spu_queue *qp; |
1034 | void *final_iv_addr; | 1034 | void *final_iv_addr; |
1035 | 1035 | ||
1036 | final_iv_addr = NULL; | 1036 | final_iv_addr = NULL; |
1037 | 1037 | ||
1038 | if (err) | 1038 | if (err) |
1039 | return err; | 1039 | return err; |
1040 | 1040 | ||
1041 | qp = cpu_to_cwq[get_cpu()]; | 1041 | qp = cpu_to_cwq[get_cpu()]; |
1042 | err = -ENODEV; | 1042 | err = -ENODEV; |
1043 | if (!qp) | 1043 | if (!qp) |
1044 | goto out; | 1044 | goto out; |
1045 | 1045 | ||
1046 | spin_lock_irqsave(&qp->lock, flags); | 1046 | spin_lock_irqsave(&qp->lock, flags); |
1047 | 1047 | ||
1048 | if (encrypt) { | 1048 | if (encrypt) { |
1049 | iv_paddr = __pa(rctx->walk.iv); | 1049 | iv_paddr = __pa(rctx->walk.iv); |
1050 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, | 1050 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, |
1051 | entry) { | 1051 | entry) { |
1052 | c->iv_paddr = iv_paddr; | 1052 | c->iv_paddr = iv_paddr; |
1053 | err = __n2_crypt_chunk(tfm, c, qp, true); | 1053 | err = __n2_crypt_chunk(tfm, c, qp, true); |
1054 | if (err) | 1054 | if (err) |
1055 | break; | 1055 | break; |
1056 | iv_paddr = c->dest_final - rctx->walk.blocksize; | 1056 | iv_paddr = c->dest_final - rctx->walk.blocksize; |
1057 | list_del(&c->entry); | 1057 | list_del(&c->entry); |
1058 | if (unlikely(c != &rctx->chunk)) | 1058 | if (unlikely(c != &rctx->chunk)) |
1059 | kfree(c); | 1059 | kfree(c); |
1060 | } | 1060 | } |
1061 | final_iv_addr = __va(iv_paddr); | 1061 | final_iv_addr = __va(iv_paddr); |
1062 | } else { | 1062 | } else { |
1063 | list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, | 1063 | list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, |
1064 | entry) { | 1064 | entry) { |
1065 | if (c == &rctx->chunk) { | 1065 | if (c == &rctx->chunk) { |
1066 | iv_paddr = __pa(rctx->walk.iv); | 1066 | iv_paddr = __pa(rctx->walk.iv); |
1067 | } else { | 1067 | } else { |
1068 | iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + | 1068 | iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + |
1069 | tmp->arr[tmp->arr_len-1].src_len - | 1069 | tmp->arr[tmp->arr_len-1].src_len - |
1070 | rctx->walk.blocksize); | 1070 | rctx->walk.blocksize); |
1071 | } | 1071 | } |
1072 | if (!final_iv_addr) { | 1072 | if (!final_iv_addr) { |
1073 | unsigned long pa; | 1073 | unsigned long pa; |
1074 | 1074 | ||
1075 | pa = (c->arr[c->arr_len-1].src_paddr + | 1075 | pa = (c->arr[c->arr_len-1].src_paddr + |
1076 | c->arr[c->arr_len-1].src_len - | 1076 | c->arr[c->arr_len-1].src_len - |
1077 | rctx->walk.blocksize); | 1077 | rctx->walk.blocksize); |
1078 | final_iv_addr = rctx->temp_iv; | 1078 | final_iv_addr = rctx->temp_iv; |
1079 | memcpy(rctx->temp_iv, __va(pa), | 1079 | memcpy(rctx->temp_iv, __va(pa), |
1080 | rctx->walk.blocksize); | 1080 | rctx->walk.blocksize); |
1081 | } | 1081 | } |
1082 | c->iv_paddr = iv_paddr; | 1082 | c->iv_paddr = iv_paddr; |
1083 | err = __n2_crypt_chunk(tfm, c, qp, false); | 1083 | err = __n2_crypt_chunk(tfm, c, qp, false); |
1084 | if (err) | 1084 | if (err) |
1085 | break; | 1085 | break; |
1086 | list_del(&c->entry); | 1086 | list_del(&c->entry); |
1087 | if (unlikely(c != &rctx->chunk)) | 1087 | if (unlikely(c != &rctx->chunk)) |
1088 | kfree(c); | 1088 | kfree(c); |
1089 | } | 1089 | } |
1090 | } | 1090 | } |
1091 | if (!err) { | 1091 | if (!err) { |
1092 | hv_ret = wait_for_tail(qp); | 1092 | hv_ret = wait_for_tail(qp); |
1093 | if (hv_ret != HV_EOK) | 1093 | if (hv_ret != HV_EOK) |
1094 | err = -EINVAL; | 1094 | err = -EINVAL; |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | spin_unlock_irqrestore(&qp->lock, flags); | 1097 | spin_unlock_irqrestore(&qp->lock, flags); |
1098 | 1098 | ||
1099 | out: | 1099 | out: |
1100 | put_cpu(); | 1100 | put_cpu(); |
1101 | 1101 | ||
1102 | n2_chunk_complete(req, err ? NULL : final_iv_addr); | 1102 | n2_chunk_complete(req, err ? NULL : final_iv_addr); |
1103 | return err; | 1103 | return err; |
1104 | } | 1104 | } |
1105 | 1105 | ||
1106 | static int n2_encrypt_chaining(struct ablkcipher_request *req) | 1106 | static int n2_encrypt_chaining(struct ablkcipher_request *req) |
1107 | { | 1107 | { |
1108 | return n2_do_chaining(req, true); | 1108 | return n2_do_chaining(req, true); |
1109 | } | 1109 | } |
1110 | 1110 | ||
1111 | static int n2_decrypt_chaining(struct ablkcipher_request *req) | 1111 | static int n2_decrypt_chaining(struct ablkcipher_request *req) |
1112 | { | 1112 | { |
1113 | return n2_do_chaining(req, false); | 1113 | return n2_do_chaining(req, false); |
1114 | } | 1114 | } |
1115 | 1115 | ||
1116 | struct n2_cipher_tmpl { | 1116 | struct n2_cipher_tmpl { |
1117 | const char *name; | 1117 | const char *name; |
1118 | const char *drv_name; | 1118 | const char *drv_name; |
1119 | u8 block_size; | 1119 | u8 block_size; |
1120 | u8 enc_type; | 1120 | u8 enc_type; |
1121 | struct ablkcipher_alg ablkcipher; | 1121 | struct ablkcipher_alg ablkcipher; |
1122 | }; | 1122 | }; |
1123 | 1123 | ||
1124 | static const struct n2_cipher_tmpl cipher_tmpls[] = { | 1124 | static const struct n2_cipher_tmpl cipher_tmpls[] = { |
1125 | /* ARC4: only ECB is supported (chaining bits ignored) */ | 1125 | /* ARC4: only ECB is supported (chaining bits ignored) */ |
1126 | { .name = "ecb(arc4)", | 1126 | { .name = "ecb(arc4)", |
1127 | .drv_name = "ecb-arc4", | 1127 | .drv_name = "ecb-arc4", |
1128 | .block_size = 1, | 1128 | .block_size = 1, |
1129 | .enc_type = (ENC_TYPE_ALG_RC4_STREAM | | 1129 | .enc_type = (ENC_TYPE_ALG_RC4_STREAM | |
1130 | ENC_TYPE_CHAINING_ECB), | 1130 | ENC_TYPE_CHAINING_ECB), |
1131 | .ablkcipher = { | 1131 | .ablkcipher = { |
1132 | .min_keysize = 1, | 1132 | .min_keysize = 1, |
1133 | .max_keysize = 256, | 1133 | .max_keysize = 256, |
1134 | .setkey = n2_arc4_setkey, | 1134 | .setkey = n2_arc4_setkey, |
1135 | .encrypt = n2_encrypt_ecb, | 1135 | .encrypt = n2_encrypt_ecb, |
1136 | .decrypt = n2_decrypt_ecb, | 1136 | .decrypt = n2_decrypt_ecb, |
1137 | }, | 1137 | }, |
1138 | }, | 1138 | }, |
1139 | 1139 | ||
1140 | /* DES: ECB CBC and CFB are supported */ | 1140 | /* DES: ECB CBC and CFB are supported */ |
1141 | { .name = "ecb(des)", | 1141 | { .name = "ecb(des)", |
1142 | .drv_name = "ecb-des", | 1142 | .drv_name = "ecb-des", |
1143 | .block_size = DES_BLOCK_SIZE, | 1143 | .block_size = DES_BLOCK_SIZE, |
1144 | .enc_type = (ENC_TYPE_ALG_DES | | 1144 | .enc_type = (ENC_TYPE_ALG_DES | |
1145 | ENC_TYPE_CHAINING_ECB), | 1145 | ENC_TYPE_CHAINING_ECB), |
1146 | .ablkcipher = { | 1146 | .ablkcipher = { |
1147 | .min_keysize = DES_KEY_SIZE, | 1147 | .min_keysize = DES_KEY_SIZE, |
1148 | .max_keysize = DES_KEY_SIZE, | 1148 | .max_keysize = DES_KEY_SIZE, |
1149 | .setkey = n2_des_setkey, | 1149 | .setkey = n2_des_setkey, |
1150 | .encrypt = n2_encrypt_ecb, | 1150 | .encrypt = n2_encrypt_ecb, |
1151 | .decrypt = n2_decrypt_ecb, | 1151 | .decrypt = n2_decrypt_ecb, |
1152 | }, | 1152 | }, |
1153 | }, | 1153 | }, |
1154 | { .name = "cbc(des)", | 1154 | { .name = "cbc(des)", |
1155 | .drv_name = "cbc-des", | 1155 | .drv_name = "cbc-des", |
1156 | .block_size = DES_BLOCK_SIZE, | 1156 | .block_size = DES_BLOCK_SIZE, |
1157 | .enc_type = (ENC_TYPE_ALG_DES | | 1157 | .enc_type = (ENC_TYPE_ALG_DES | |
1158 | ENC_TYPE_CHAINING_CBC), | 1158 | ENC_TYPE_CHAINING_CBC), |
1159 | .ablkcipher = { | 1159 | .ablkcipher = { |
1160 | .ivsize = DES_BLOCK_SIZE, | 1160 | .ivsize = DES_BLOCK_SIZE, |
1161 | .min_keysize = DES_KEY_SIZE, | 1161 | .min_keysize = DES_KEY_SIZE, |
1162 | .max_keysize = DES_KEY_SIZE, | 1162 | .max_keysize = DES_KEY_SIZE, |
1163 | .setkey = n2_des_setkey, | 1163 | .setkey = n2_des_setkey, |
1164 | .encrypt = n2_encrypt_chaining, | 1164 | .encrypt = n2_encrypt_chaining, |
1165 | .decrypt = n2_decrypt_chaining, | 1165 | .decrypt = n2_decrypt_chaining, |
1166 | }, | 1166 | }, |
1167 | }, | 1167 | }, |
1168 | { .name = "cfb(des)", | 1168 | { .name = "cfb(des)", |
1169 | .drv_name = "cfb-des", | 1169 | .drv_name = "cfb-des", |
1170 | .block_size = DES_BLOCK_SIZE, | 1170 | .block_size = DES_BLOCK_SIZE, |
1171 | .enc_type = (ENC_TYPE_ALG_DES | | 1171 | .enc_type = (ENC_TYPE_ALG_DES | |
1172 | ENC_TYPE_CHAINING_CFB), | 1172 | ENC_TYPE_CHAINING_CFB), |
1173 | .ablkcipher = { | 1173 | .ablkcipher = { |
1174 | .min_keysize = DES_KEY_SIZE, | 1174 | .min_keysize = DES_KEY_SIZE, |
1175 | .max_keysize = DES_KEY_SIZE, | 1175 | .max_keysize = DES_KEY_SIZE, |
1176 | .setkey = n2_des_setkey, | 1176 | .setkey = n2_des_setkey, |
1177 | .encrypt = n2_encrypt_chaining, | 1177 | .encrypt = n2_encrypt_chaining, |
1178 | .decrypt = n2_decrypt_chaining, | 1178 | .decrypt = n2_decrypt_chaining, |
1179 | }, | 1179 | }, |
1180 | }, | 1180 | }, |
1181 | 1181 | ||
1182 | /* 3DES: ECB CBC and CFB are supported */ | 1182 | /* 3DES: ECB CBC and CFB are supported */ |
1183 | { .name = "ecb(des3_ede)", | 1183 | { .name = "ecb(des3_ede)", |
1184 | .drv_name = "ecb-3des", | 1184 | .drv_name = "ecb-3des", |
1185 | .block_size = DES_BLOCK_SIZE, | 1185 | .block_size = DES_BLOCK_SIZE, |
1186 | .enc_type = (ENC_TYPE_ALG_3DES | | 1186 | .enc_type = (ENC_TYPE_ALG_3DES | |
1187 | ENC_TYPE_CHAINING_ECB), | 1187 | ENC_TYPE_CHAINING_ECB), |
1188 | .ablkcipher = { | 1188 | .ablkcipher = { |
1189 | .min_keysize = 3 * DES_KEY_SIZE, | 1189 | .min_keysize = 3 * DES_KEY_SIZE, |
1190 | .max_keysize = 3 * DES_KEY_SIZE, | 1190 | .max_keysize = 3 * DES_KEY_SIZE, |
1191 | .setkey = n2_3des_setkey, | 1191 | .setkey = n2_3des_setkey, |
1192 | .encrypt = n2_encrypt_ecb, | 1192 | .encrypt = n2_encrypt_ecb, |
1193 | .decrypt = n2_decrypt_ecb, | 1193 | .decrypt = n2_decrypt_ecb, |
1194 | }, | 1194 | }, |
1195 | }, | 1195 | }, |
1196 | { .name = "cbc(des3_ede)", | 1196 | { .name = "cbc(des3_ede)", |
1197 | .drv_name = "cbc-3des", | 1197 | .drv_name = "cbc-3des", |
1198 | .block_size = DES_BLOCK_SIZE, | 1198 | .block_size = DES_BLOCK_SIZE, |
1199 | .enc_type = (ENC_TYPE_ALG_3DES | | 1199 | .enc_type = (ENC_TYPE_ALG_3DES | |
1200 | ENC_TYPE_CHAINING_CBC), | 1200 | ENC_TYPE_CHAINING_CBC), |
1201 | .ablkcipher = { | 1201 | .ablkcipher = { |
1202 | .ivsize = DES_BLOCK_SIZE, | 1202 | .ivsize = DES_BLOCK_SIZE, |
1203 | .min_keysize = 3 * DES_KEY_SIZE, | 1203 | .min_keysize = 3 * DES_KEY_SIZE, |
1204 | .max_keysize = 3 * DES_KEY_SIZE, | 1204 | .max_keysize = 3 * DES_KEY_SIZE, |
1205 | .setkey = n2_3des_setkey, | 1205 | .setkey = n2_3des_setkey, |
1206 | .encrypt = n2_encrypt_chaining, | 1206 | .encrypt = n2_encrypt_chaining, |
1207 | .decrypt = n2_decrypt_chaining, | 1207 | .decrypt = n2_decrypt_chaining, |
1208 | }, | 1208 | }, |
1209 | }, | 1209 | }, |
1210 | { .name = "cfb(des3_ede)", | 1210 | { .name = "cfb(des3_ede)", |
1211 | .drv_name = "cfb-3des", | 1211 | .drv_name = "cfb-3des", |
1212 | .block_size = DES_BLOCK_SIZE, | 1212 | .block_size = DES_BLOCK_SIZE, |
1213 | .enc_type = (ENC_TYPE_ALG_3DES | | 1213 | .enc_type = (ENC_TYPE_ALG_3DES | |
1214 | ENC_TYPE_CHAINING_CFB), | 1214 | ENC_TYPE_CHAINING_CFB), |
1215 | .ablkcipher = { | 1215 | .ablkcipher = { |
1216 | .min_keysize = 3 * DES_KEY_SIZE, | 1216 | .min_keysize = 3 * DES_KEY_SIZE, |
1217 | .max_keysize = 3 * DES_KEY_SIZE, | 1217 | .max_keysize = 3 * DES_KEY_SIZE, |
1218 | .setkey = n2_3des_setkey, | 1218 | .setkey = n2_3des_setkey, |
1219 | .encrypt = n2_encrypt_chaining, | 1219 | .encrypt = n2_encrypt_chaining, |
1220 | .decrypt = n2_decrypt_chaining, | 1220 | .decrypt = n2_decrypt_chaining, |
1221 | }, | 1221 | }, |
1222 | }, | 1222 | }, |
1223 | /* AES: ECB CBC and CTR are supported */ | 1223 | /* AES: ECB CBC and CTR are supported */ |
1224 | { .name = "ecb(aes)", | 1224 | { .name = "ecb(aes)", |
1225 | .drv_name = "ecb-aes", | 1225 | .drv_name = "ecb-aes", |
1226 | .block_size = AES_BLOCK_SIZE, | 1226 | .block_size = AES_BLOCK_SIZE, |
1227 | .enc_type = (ENC_TYPE_ALG_AES128 | | 1227 | .enc_type = (ENC_TYPE_ALG_AES128 | |
1228 | ENC_TYPE_CHAINING_ECB), | 1228 | ENC_TYPE_CHAINING_ECB), |
1229 | .ablkcipher = { | 1229 | .ablkcipher = { |
1230 | .min_keysize = AES_MIN_KEY_SIZE, | 1230 | .min_keysize = AES_MIN_KEY_SIZE, |
1231 | .max_keysize = AES_MAX_KEY_SIZE, | 1231 | .max_keysize = AES_MAX_KEY_SIZE, |
1232 | .setkey = n2_aes_setkey, | 1232 | .setkey = n2_aes_setkey, |
1233 | .encrypt = n2_encrypt_ecb, | 1233 | .encrypt = n2_encrypt_ecb, |
1234 | .decrypt = n2_decrypt_ecb, | 1234 | .decrypt = n2_decrypt_ecb, |
1235 | }, | 1235 | }, |
1236 | }, | 1236 | }, |
1237 | { .name = "cbc(aes)", | 1237 | { .name = "cbc(aes)", |
1238 | .drv_name = "cbc-aes", | 1238 | .drv_name = "cbc-aes", |
1239 | .block_size = AES_BLOCK_SIZE, | 1239 | .block_size = AES_BLOCK_SIZE, |
1240 | .enc_type = (ENC_TYPE_ALG_AES128 | | 1240 | .enc_type = (ENC_TYPE_ALG_AES128 | |
1241 | ENC_TYPE_CHAINING_CBC), | 1241 | ENC_TYPE_CHAINING_CBC), |
1242 | .ablkcipher = { | 1242 | .ablkcipher = { |
1243 | .ivsize = AES_BLOCK_SIZE, | 1243 | .ivsize = AES_BLOCK_SIZE, |
1244 | .min_keysize = AES_MIN_KEY_SIZE, | 1244 | .min_keysize = AES_MIN_KEY_SIZE, |
1245 | .max_keysize = AES_MAX_KEY_SIZE, | 1245 | .max_keysize = AES_MAX_KEY_SIZE, |
1246 | .setkey = n2_aes_setkey, | 1246 | .setkey = n2_aes_setkey, |
1247 | .encrypt = n2_encrypt_chaining, | 1247 | .encrypt = n2_encrypt_chaining, |
1248 | .decrypt = n2_decrypt_chaining, | 1248 | .decrypt = n2_decrypt_chaining, |
1249 | }, | 1249 | }, |
1250 | }, | 1250 | }, |
1251 | { .name = "ctr(aes)", | 1251 | { .name = "ctr(aes)", |
1252 | .drv_name = "ctr-aes", | 1252 | .drv_name = "ctr-aes", |
1253 | .block_size = AES_BLOCK_SIZE, | 1253 | .block_size = AES_BLOCK_SIZE, |
1254 | .enc_type = (ENC_TYPE_ALG_AES128 | | 1254 | .enc_type = (ENC_TYPE_ALG_AES128 | |
1255 | ENC_TYPE_CHAINING_COUNTER), | 1255 | ENC_TYPE_CHAINING_COUNTER), |
1256 | .ablkcipher = { | 1256 | .ablkcipher = { |
1257 | .ivsize = AES_BLOCK_SIZE, | 1257 | .ivsize = AES_BLOCK_SIZE, |
1258 | .min_keysize = AES_MIN_KEY_SIZE, | 1258 | .min_keysize = AES_MIN_KEY_SIZE, |
1259 | .max_keysize = AES_MAX_KEY_SIZE, | 1259 | .max_keysize = AES_MAX_KEY_SIZE, |
1260 | .setkey = n2_aes_setkey, | 1260 | .setkey = n2_aes_setkey, |
1261 | .encrypt = n2_encrypt_chaining, | 1261 | .encrypt = n2_encrypt_chaining, |
1262 | .decrypt = n2_encrypt_chaining, | 1262 | .decrypt = n2_encrypt_chaining, |
1263 | }, | 1263 | }, |
1264 | }, | 1264 | }, |
1265 | 1265 | ||
1266 | }; | 1266 | }; |
1267 | #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) | 1267 | #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) |
1268 | 1268 | ||
1269 | static LIST_HEAD(cipher_algs); | 1269 | static LIST_HEAD(cipher_algs); |
1270 | 1270 | ||
1271 | struct n2_hash_tmpl { | 1271 | struct n2_hash_tmpl { |
1272 | const char *name; | 1272 | const char *name; |
1273 | const char *hash_zero; | 1273 | const char *hash_zero; |
1274 | const u32 *hash_init; | 1274 | const u32 *hash_init; |
1275 | u8 hw_op_hashsz; | 1275 | u8 hw_op_hashsz; |
1276 | u8 digest_size; | 1276 | u8 digest_size; |
1277 | u8 block_size; | 1277 | u8 block_size; |
1278 | u8 auth_type; | 1278 | u8 auth_type; |
1279 | u8 hmac_type; | 1279 | u8 hmac_type; |
1280 | }; | 1280 | }; |
1281 | 1281 | ||
1282 | static const char md5_zero[MD5_DIGEST_SIZE] = { | 1282 | static const char md5_zero[MD5_DIGEST_SIZE] = { |
1283 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | 1283 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, |
1284 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | 1284 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, |
1285 | }; | 1285 | }; |
1286 | static const u32 md5_init[MD5_HASH_WORDS] = { | 1286 | static const u32 md5_init[MD5_HASH_WORDS] = { |
1287 | cpu_to_le32(0x67452301), | 1287 | cpu_to_le32(0x67452301), |
1288 | cpu_to_le32(0xefcdab89), | 1288 | cpu_to_le32(0xefcdab89), |
1289 | cpu_to_le32(0x98badcfe), | 1289 | cpu_to_le32(0x98badcfe), |
1290 | cpu_to_le32(0x10325476), | 1290 | cpu_to_le32(0x10325476), |
1291 | }; | 1291 | }; |
1292 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | 1292 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { |
1293 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | 1293 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, |
1294 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, | 1294 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, |
1295 | 0x07, 0x09 | 1295 | 0x07, 0x09 |
1296 | }; | 1296 | }; |
1297 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { | 1297 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { |
1298 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, | 1298 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, |
1299 | }; | 1299 | }; |
1300 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | 1300 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { |
1301 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, | 1301 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, |
1302 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, | 1302 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, |
1303 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, | 1303 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, |
1304 | 0x1b, 0x78, 0x52, 0xb8, 0x55 | 1304 | 0x1b, 0x78, 0x52, 0xb8, 0x55 |
1305 | }; | 1305 | }; |
1306 | static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { | 1306 | static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { |
1307 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | 1307 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, |
1308 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, | 1308 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, |
1309 | }; | 1309 | }; |
1310 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | 1310 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { |
1311 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | 1311 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, |
1312 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | 1312 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, |
1313 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | 1313 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, |
1314 | 0x2f | 1314 | 0x2f |
1315 | }; | 1315 | }; |
1316 | static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { | 1316 | static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { |
1317 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, | 1317 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, |
1318 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, | 1318 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, |
1319 | }; | 1319 | }; |
1320 | 1320 | ||
1321 | static const struct n2_hash_tmpl hash_tmpls[] = { | 1321 | static const struct n2_hash_tmpl hash_tmpls[] = { |
1322 | { .name = "md5", | 1322 | { .name = "md5", |
1323 | .hash_zero = md5_zero, | 1323 | .hash_zero = md5_zero, |
1324 | .hash_init = md5_init, | 1324 | .hash_init = md5_init, |
1325 | .auth_type = AUTH_TYPE_MD5, | 1325 | .auth_type = AUTH_TYPE_MD5, |
1326 | .hmac_type = AUTH_TYPE_HMAC_MD5, | 1326 | .hmac_type = AUTH_TYPE_HMAC_MD5, |
1327 | .hw_op_hashsz = MD5_DIGEST_SIZE, | 1327 | .hw_op_hashsz = MD5_DIGEST_SIZE, |
1328 | .digest_size = MD5_DIGEST_SIZE, | 1328 | .digest_size = MD5_DIGEST_SIZE, |
1329 | .block_size = MD5_HMAC_BLOCK_SIZE }, | 1329 | .block_size = MD5_HMAC_BLOCK_SIZE }, |
1330 | { .name = "sha1", | 1330 | { .name = "sha1", |
1331 | .hash_zero = sha1_zero, | 1331 | .hash_zero = sha1_zero, |
1332 | .hash_init = sha1_init, | 1332 | .hash_init = sha1_init, |
1333 | .auth_type = AUTH_TYPE_SHA1, | 1333 | .auth_type = AUTH_TYPE_SHA1, |
1334 | .hmac_type = AUTH_TYPE_HMAC_SHA1, | 1334 | .hmac_type = AUTH_TYPE_HMAC_SHA1, |
1335 | .hw_op_hashsz = SHA1_DIGEST_SIZE, | 1335 | .hw_op_hashsz = SHA1_DIGEST_SIZE, |
1336 | .digest_size = SHA1_DIGEST_SIZE, | 1336 | .digest_size = SHA1_DIGEST_SIZE, |
1337 | .block_size = SHA1_BLOCK_SIZE }, | 1337 | .block_size = SHA1_BLOCK_SIZE }, |
1338 | { .name = "sha256", | 1338 | { .name = "sha256", |
1339 | .hash_zero = sha256_zero, | 1339 | .hash_zero = sha256_zero, |
1340 | .hash_init = sha256_init, | 1340 | .hash_init = sha256_init, |
1341 | .auth_type = AUTH_TYPE_SHA256, | 1341 | .auth_type = AUTH_TYPE_SHA256, |
1342 | .hmac_type = AUTH_TYPE_HMAC_SHA256, | 1342 | .hmac_type = AUTH_TYPE_HMAC_SHA256, |
1343 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | 1343 | .hw_op_hashsz = SHA256_DIGEST_SIZE, |
1344 | .digest_size = SHA256_DIGEST_SIZE, | 1344 | .digest_size = SHA256_DIGEST_SIZE, |
1345 | .block_size = SHA256_BLOCK_SIZE }, | 1345 | .block_size = SHA256_BLOCK_SIZE }, |
1346 | { .name = "sha224", | 1346 | { .name = "sha224", |
1347 | .hash_zero = sha224_zero, | 1347 | .hash_zero = sha224_zero, |
1348 | .hash_init = sha224_init, | 1348 | .hash_init = sha224_init, |
1349 | .auth_type = AUTH_TYPE_SHA256, | 1349 | .auth_type = AUTH_TYPE_SHA256, |
1350 | .hmac_type = AUTH_TYPE_RESERVED, | 1350 | .hmac_type = AUTH_TYPE_RESERVED, |
1351 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | 1351 | .hw_op_hashsz = SHA256_DIGEST_SIZE, |
1352 | .digest_size = SHA224_DIGEST_SIZE, | 1352 | .digest_size = SHA224_DIGEST_SIZE, |
1353 | .block_size = SHA224_BLOCK_SIZE }, | 1353 | .block_size = SHA224_BLOCK_SIZE }, |
1354 | }; | 1354 | }; |
1355 | #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) | 1355 | #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) |
1356 | 1356 | ||
1357 | static LIST_HEAD(ahash_algs); | 1357 | static LIST_HEAD(ahash_algs); |
1358 | static LIST_HEAD(hmac_algs); | 1358 | static LIST_HEAD(hmac_algs); |
1359 | 1359 | ||
1360 | static int algs_registered; | 1360 | static int algs_registered; |
1361 | 1361 | ||
1362 | static void __n2_unregister_algs(void) | 1362 | static void __n2_unregister_algs(void) |
1363 | { | 1363 | { |
1364 | struct n2_cipher_alg *cipher, *cipher_tmp; | 1364 | struct n2_cipher_alg *cipher, *cipher_tmp; |
1365 | struct n2_ahash_alg *alg, *alg_tmp; | 1365 | struct n2_ahash_alg *alg, *alg_tmp; |
1366 | struct n2_hmac_alg *hmac, *hmac_tmp; | 1366 | struct n2_hmac_alg *hmac, *hmac_tmp; |
1367 | 1367 | ||
1368 | list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { | 1368 | list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { |
1369 | crypto_unregister_alg(&cipher->alg); | 1369 | crypto_unregister_alg(&cipher->alg); |
1370 | list_del(&cipher->entry); | 1370 | list_del(&cipher->entry); |
1371 | kfree(cipher); | 1371 | kfree(cipher); |
1372 | } | 1372 | } |
1373 | list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { | 1373 | list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { |
1374 | crypto_unregister_ahash(&hmac->derived.alg); | 1374 | crypto_unregister_ahash(&hmac->derived.alg); |
1375 | list_del(&hmac->derived.entry); | 1375 | list_del(&hmac->derived.entry); |
1376 | kfree(hmac); | 1376 | kfree(hmac); |
1377 | } | 1377 | } |
1378 | list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { | 1378 | list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { |
1379 | crypto_unregister_ahash(&alg->alg); | 1379 | crypto_unregister_ahash(&alg->alg); |
1380 | list_del(&alg->entry); | 1380 | list_del(&alg->entry); |
1381 | kfree(alg); | 1381 | kfree(alg); |
1382 | } | 1382 | } |
1383 | } | 1383 | } |
1384 | 1384 | ||
1385 | static int n2_cipher_cra_init(struct crypto_tfm *tfm) | 1385 | static int n2_cipher_cra_init(struct crypto_tfm *tfm) |
1386 | { | 1386 | { |
1387 | tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); | 1387 | tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); |
1388 | return 0; | 1388 | return 0; |
1389 | } | 1389 | } |
1390 | 1390 | ||
1391 | static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) | 1391 | static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) |
1392 | { | 1392 | { |
1393 | struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | 1393 | struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); |
1394 | struct crypto_alg *alg; | 1394 | struct crypto_alg *alg; |
1395 | int err; | 1395 | int err; |
1396 | 1396 | ||
1397 | if (!p) | 1397 | if (!p) |
1398 | return -ENOMEM; | 1398 | return -ENOMEM; |
1399 | 1399 | ||
1400 | alg = &p->alg; | 1400 | alg = &p->alg; |
1401 | 1401 | ||
1402 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); | 1402 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); |
1403 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); | 1403 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); |
1404 | alg->cra_priority = N2_CRA_PRIORITY; | 1404 | alg->cra_priority = N2_CRA_PRIORITY; |
1405 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 1405 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
1406 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; | 1406 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; |
1407 | alg->cra_blocksize = tmpl->block_size; | 1407 | alg->cra_blocksize = tmpl->block_size; |
1408 | p->enc_type = tmpl->enc_type; | 1408 | p->enc_type = tmpl->enc_type; |
1409 | alg->cra_ctxsize = sizeof(struct n2_cipher_context); | 1409 | alg->cra_ctxsize = sizeof(struct n2_cipher_context); |
1410 | alg->cra_type = &crypto_ablkcipher_type; | 1410 | alg->cra_type = &crypto_ablkcipher_type; |
1411 | alg->cra_u.ablkcipher = tmpl->ablkcipher; | 1411 | alg->cra_u.ablkcipher = tmpl->ablkcipher; |
1412 | alg->cra_init = n2_cipher_cra_init; | 1412 | alg->cra_init = n2_cipher_cra_init; |
1413 | alg->cra_module = THIS_MODULE; | 1413 | alg->cra_module = THIS_MODULE; |
1414 | 1414 | ||
1415 | list_add(&p->entry, &cipher_algs); | 1415 | list_add(&p->entry, &cipher_algs); |
1416 | err = crypto_register_alg(alg); | 1416 | err = crypto_register_alg(alg); |
1417 | if (err) { | 1417 | if (err) { |
1418 | pr_err("%s alg registration failed\n", alg->cra_name); | 1418 | pr_err("%s alg registration failed\n", alg->cra_name); |
1419 | list_del(&p->entry); | 1419 | list_del(&p->entry); |
1420 | kfree(p); | 1420 | kfree(p); |
1421 | } else { | 1421 | } else { |
1422 | pr_info("%s alg registered\n", alg->cra_name); | 1422 | pr_info("%s alg registered\n", alg->cra_name); |
1423 | } | 1423 | } |
1424 | return err; | 1424 | return err; |
1425 | } | 1425 | } |
1426 | 1426 | ||
1427 | static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) | 1427 | static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) |
1428 | { | 1428 | { |
1429 | struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | 1429 | struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); |
1430 | struct ahash_alg *ahash; | 1430 | struct ahash_alg *ahash; |
1431 | struct crypto_alg *base; | 1431 | struct crypto_alg *base; |
1432 | int err; | 1432 | int err; |
1433 | 1433 | ||
1434 | if (!p) | 1434 | if (!p) |
1435 | return -ENOMEM; | 1435 | return -ENOMEM; |
1436 | 1436 | ||
1437 | p->child_alg = n2ahash->alg.halg.base.cra_name; | 1437 | p->child_alg = n2ahash->alg.halg.base.cra_name; |
1438 | memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); | 1438 | memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); |
1439 | INIT_LIST_HEAD(&p->derived.entry); | 1439 | INIT_LIST_HEAD(&p->derived.entry); |
1440 | 1440 | ||
1441 | ahash = &p->derived.alg; | 1441 | ahash = &p->derived.alg; |
1442 | ahash->digest = n2_hmac_async_digest; | 1442 | ahash->digest = n2_hmac_async_digest; |
1443 | ahash->setkey = n2_hmac_async_setkey; | 1443 | ahash->setkey = n2_hmac_async_setkey; |
1444 | 1444 | ||
1445 | base = &ahash->halg.base; | 1445 | base = &ahash->halg.base; |
1446 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); | 1446 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); |
1447 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); | 1447 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); |
1448 | 1448 | ||
1449 | base->cra_ctxsize = sizeof(struct n2_hmac_ctx); | 1449 | base->cra_ctxsize = sizeof(struct n2_hmac_ctx); |
1450 | base->cra_init = n2_hmac_cra_init; | 1450 | base->cra_init = n2_hmac_cra_init; |
1451 | base->cra_exit = n2_hmac_cra_exit; | 1451 | base->cra_exit = n2_hmac_cra_exit; |
1452 | 1452 | ||
1453 | list_add(&p->derived.entry, &hmac_algs); | 1453 | list_add(&p->derived.entry, &hmac_algs); |
1454 | err = crypto_register_ahash(ahash); | 1454 | err = crypto_register_ahash(ahash); |
1455 | if (err) { | 1455 | if (err) { |
1456 | pr_err("%s alg registration failed\n", base->cra_name); | 1456 | pr_err("%s alg registration failed\n", base->cra_name); |
1457 | list_del(&p->derived.entry); | 1457 | list_del(&p->derived.entry); |
1458 | kfree(p); | 1458 | kfree(p); |
1459 | } else { | 1459 | } else { |
1460 | pr_info("%s alg registered\n", base->cra_name); | 1460 | pr_info("%s alg registered\n", base->cra_name); |
1461 | } | 1461 | } |
1462 | return err; | 1462 | return err; |
1463 | } | 1463 | } |
1464 | 1464 | ||
1465 | static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) | 1465 | static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) |
1466 | { | 1466 | { |
1467 | struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | 1467 | struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); |
1468 | struct hash_alg_common *halg; | 1468 | struct hash_alg_common *halg; |
1469 | struct crypto_alg *base; | 1469 | struct crypto_alg *base; |
1470 | struct ahash_alg *ahash; | 1470 | struct ahash_alg *ahash; |
1471 | int err; | 1471 | int err; |
1472 | 1472 | ||
1473 | if (!p) | 1473 | if (!p) |
1474 | return -ENOMEM; | 1474 | return -ENOMEM; |
1475 | 1475 | ||
1476 | p->hash_zero = tmpl->hash_zero; | 1476 | p->hash_zero = tmpl->hash_zero; |
1477 | p->hash_init = tmpl->hash_init; | 1477 | p->hash_init = tmpl->hash_init; |
1478 | p->auth_type = tmpl->auth_type; | 1478 | p->auth_type = tmpl->auth_type; |
1479 | p->hmac_type = tmpl->hmac_type; | 1479 | p->hmac_type = tmpl->hmac_type; |
1480 | p->hw_op_hashsz = tmpl->hw_op_hashsz; | 1480 | p->hw_op_hashsz = tmpl->hw_op_hashsz; |
1481 | p->digest_size = tmpl->digest_size; | 1481 | p->digest_size = tmpl->digest_size; |
1482 | 1482 | ||
1483 | ahash = &p->alg; | 1483 | ahash = &p->alg; |
1484 | ahash->init = n2_hash_async_init; | 1484 | ahash->init = n2_hash_async_init; |
1485 | ahash->update = n2_hash_async_update; | 1485 | ahash->update = n2_hash_async_update; |
1486 | ahash->final = n2_hash_async_final; | 1486 | ahash->final = n2_hash_async_final; |
1487 | ahash->finup = n2_hash_async_finup; | 1487 | ahash->finup = n2_hash_async_finup; |
1488 | ahash->digest = n2_hash_async_digest; | 1488 | ahash->digest = n2_hash_async_digest; |
1489 | 1489 | ||
1490 | halg = &ahash->halg; | 1490 | halg = &ahash->halg; |
1491 | halg->digestsize = tmpl->digest_size; | 1491 | halg->digestsize = tmpl->digest_size; |
1492 | 1492 | ||
1493 | base = &halg->base; | 1493 | base = &halg->base; |
1494 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); | 1494 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); |
1495 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); | 1495 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); |
1496 | base->cra_priority = N2_CRA_PRIORITY; | 1496 | base->cra_priority = N2_CRA_PRIORITY; |
1497 | base->cra_flags = CRYPTO_ALG_TYPE_AHASH | | 1497 | base->cra_flags = CRYPTO_ALG_TYPE_AHASH | |
1498 | CRYPTO_ALG_KERN_DRIVER_ONLY | | 1498 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
1499 | CRYPTO_ALG_NEED_FALLBACK; | 1499 | CRYPTO_ALG_NEED_FALLBACK; |
1500 | base->cra_blocksize = tmpl->block_size; | 1500 | base->cra_blocksize = tmpl->block_size; |
1501 | base->cra_ctxsize = sizeof(struct n2_hash_ctx); | 1501 | base->cra_ctxsize = sizeof(struct n2_hash_ctx); |
1502 | base->cra_module = THIS_MODULE; | 1502 | base->cra_module = THIS_MODULE; |
1503 | base->cra_init = n2_hash_cra_init; | 1503 | base->cra_init = n2_hash_cra_init; |
1504 | base->cra_exit = n2_hash_cra_exit; | 1504 | base->cra_exit = n2_hash_cra_exit; |
1505 | 1505 | ||
1506 | list_add(&p->entry, &ahash_algs); | 1506 | list_add(&p->entry, &ahash_algs); |
1507 | err = crypto_register_ahash(ahash); | 1507 | err = crypto_register_ahash(ahash); |
1508 | if (err) { | 1508 | if (err) { |
1509 | pr_err("%s alg registration failed\n", base->cra_name); | 1509 | pr_err("%s alg registration failed\n", base->cra_name); |
1510 | list_del(&p->entry); | 1510 | list_del(&p->entry); |
1511 | kfree(p); | 1511 | kfree(p); |
1512 | } else { | 1512 | } else { |
1513 | pr_info("%s alg registered\n", base->cra_name); | 1513 | pr_info("%s alg registered\n", base->cra_name); |
1514 | } | 1514 | } |
1515 | if (!err && p->hmac_type != AUTH_TYPE_RESERVED) | 1515 | if (!err && p->hmac_type != AUTH_TYPE_RESERVED) |
1516 | err = __n2_register_one_hmac(p); | 1516 | err = __n2_register_one_hmac(p); |
1517 | return err; | 1517 | return err; |
1518 | } | 1518 | } |
1519 | 1519 | ||
1520 | static int __devinit n2_register_algs(void) | 1520 | static int __devinit n2_register_algs(void) |
1521 | { | 1521 | { |
1522 | int i, err = 0; | 1522 | int i, err = 0; |
1523 | 1523 | ||
1524 | mutex_lock(&spu_lock); | 1524 | mutex_lock(&spu_lock); |
1525 | if (algs_registered++) | 1525 | if (algs_registered++) |
1526 | goto out; | 1526 | goto out; |
1527 | 1527 | ||
1528 | for (i = 0; i < NUM_HASH_TMPLS; i++) { | 1528 | for (i = 0; i < NUM_HASH_TMPLS; i++) { |
1529 | err = __n2_register_one_ahash(&hash_tmpls[i]); | 1529 | err = __n2_register_one_ahash(&hash_tmpls[i]); |
1530 | if (err) { | 1530 | if (err) { |
1531 | __n2_unregister_algs(); | 1531 | __n2_unregister_algs(); |
1532 | goto out; | 1532 | goto out; |
1533 | } | 1533 | } |
1534 | } | 1534 | } |
1535 | for (i = 0; i < NUM_CIPHER_TMPLS; i++) { | 1535 | for (i = 0; i < NUM_CIPHER_TMPLS; i++) { |
1536 | err = __n2_register_one_cipher(&cipher_tmpls[i]); | 1536 | err = __n2_register_one_cipher(&cipher_tmpls[i]); |
1537 | if (err) { | 1537 | if (err) { |
1538 | __n2_unregister_algs(); | 1538 | __n2_unregister_algs(); |
1539 | goto out; | 1539 | goto out; |
1540 | } | 1540 | } |
1541 | } | 1541 | } |
1542 | 1542 | ||
1543 | out: | 1543 | out: |
1544 | mutex_unlock(&spu_lock); | 1544 | mutex_unlock(&spu_lock); |
1545 | return err; | 1545 | return err; |
1546 | } | 1546 | } |
1547 | 1547 | ||
1548 | static void __devexit n2_unregister_algs(void) | 1548 | static void __devexit n2_unregister_algs(void) |
1549 | { | 1549 | { |
1550 | mutex_lock(&spu_lock); | 1550 | mutex_lock(&spu_lock); |
1551 | if (!--algs_registered) | 1551 | if (!--algs_registered) |
1552 | __n2_unregister_algs(); | 1552 | __n2_unregister_algs(); |
1553 | mutex_unlock(&spu_lock); | 1553 | mutex_unlock(&spu_lock); |
1554 | } | 1554 | } |
1555 | 1555 | ||
1556 | /* To map CWQ queues to interrupt sources, the hypervisor API provides | 1556 | /* To map CWQ queues to interrupt sources, the hypervisor API provides |
1557 | * a devino. This isn't very useful to us because all of the | 1557 | * a devino. This isn't very useful to us because all of the |
1558 | * interrupts listed in the device_node have been translated to | 1558 | * interrupts listed in the device_node have been translated to |
1559 | * Linux virtual IRQ cookie numbers. | 1559 | * Linux virtual IRQ cookie numbers. |
1560 | * | 1560 | * |
1561 | * So we have to back-translate, going through the 'intr' and 'ino' | 1561 | * So we have to back-translate, going through the 'intr' and 'ino' |
1562 | * property tables of the n2cp MDESC node, matching it with the OF | 1562 | * property tables of the n2cp MDESC node, matching it with the OF |
1563 | * 'interrupts' property entries, in order to to figure out which | 1563 | * 'interrupts' property entries, in order to to figure out which |
1564 | * devino goes to which already-translated IRQ. | 1564 | * devino goes to which already-translated IRQ. |
1565 | */ | 1565 | */ |
1566 | static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, | 1566 | static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, |
1567 | unsigned long dev_ino) | 1567 | unsigned long dev_ino) |
1568 | { | 1568 | { |
1569 | const unsigned int *dev_intrs; | 1569 | const unsigned int *dev_intrs; |
1570 | unsigned int intr; | 1570 | unsigned int intr; |
1571 | int i; | 1571 | int i; |
1572 | 1572 | ||
1573 | for (i = 0; i < ip->num_intrs; i++) { | 1573 | for (i = 0; i < ip->num_intrs; i++) { |
1574 | if (ip->ino_table[i].ino == dev_ino) | 1574 | if (ip->ino_table[i].ino == dev_ino) |
1575 | break; | 1575 | break; |
1576 | } | 1576 | } |
1577 | if (i == ip->num_intrs) | 1577 | if (i == ip->num_intrs) |
1578 | return -ENODEV; | 1578 | return -ENODEV; |
1579 | 1579 | ||
1580 | intr = ip->ino_table[i].intr; | 1580 | intr = ip->ino_table[i].intr; |
1581 | 1581 | ||
1582 | dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); | 1582 | dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); |
1583 | if (!dev_intrs) | 1583 | if (!dev_intrs) |
1584 | return -ENODEV; | 1584 | return -ENODEV; |
1585 | 1585 | ||
1586 | for (i = 0; i < dev->archdata.num_irqs; i++) { | 1586 | for (i = 0; i < dev->archdata.num_irqs; i++) { |
1587 | if (dev_intrs[i] == intr) | 1587 | if (dev_intrs[i] == intr) |
1588 | return i; | 1588 | return i; |
1589 | } | 1589 | } |
1590 | 1590 | ||
1591 | return -ENODEV; | 1591 | return -ENODEV; |
1592 | } | 1592 | } |
1593 | 1593 | ||
1594 | static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, | 1594 | static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, |
1595 | const char *irq_name, struct spu_queue *p, | 1595 | const char *irq_name, struct spu_queue *p, |
1596 | irq_handler_t handler) | 1596 | irq_handler_t handler) |
1597 | { | 1597 | { |
1598 | unsigned long herr; | 1598 | unsigned long herr; |
1599 | int index; | 1599 | int index; |
1600 | 1600 | ||
1601 | herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); | 1601 | herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); |
1602 | if (herr) | 1602 | if (herr) |
1603 | return -EINVAL; | 1603 | return -EINVAL; |
1604 | 1604 | ||
1605 | index = find_devino_index(dev, ip, p->devino); | 1605 | index = find_devino_index(dev, ip, p->devino); |
1606 | if (index < 0) | 1606 | if (index < 0) |
1607 | return index; | 1607 | return index; |
1608 | 1608 | ||
1609 | p->irq = dev->archdata.irqs[index]; | 1609 | p->irq = dev->archdata.irqs[index]; |
1610 | 1610 | ||
1611 | sprintf(p->irq_name, "%s-%d", irq_name, index); | 1611 | sprintf(p->irq_name, "%s-%d", irq_name, index); |
1612 | 1612 | ||
1613 | return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM, | 1613 | return request_irq(p->irq, handler, 0, p->irq_name, p); |
1614 | p->irq_name, p); | ||
1615 | } | 1614 | } |
1616 | 1615 | ||
1617 | static struct kmem_cache *queue_cache[2]; | 1616 | static struct kmem_cache *queue_cache[2]; |
1618 | 1617 | ||
1619 | static void *new_queue(unsigned long q_type) | 1618 | static void *new_queue(unsigned long q_type) |
1620 | { | 1619 | { |
1621 | return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); | 1620 | return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); |
1622 | } | 1621 | } |
1623 | 1622 | ||
1624 | static void free_queue(void *p, unsigned long q_type) | 1623 | static void free_queue(void *p, unsigned long q_type) |
1625 | { | 1624 | { |
1626 | return kmem_cache_free(queue_cache[q_type - 1], p); | 1625 | return kmem_cache_free(queue_cache[q_type - 1], p); |
1627 | } | 1626 | } |
1628 | 1627 | ||
1629 | static int queue_cache_init(void) | 1628 | static int queue_cache_init(void) |
1630 | { | 1629 | { |
1631 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | 1630 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) |
1632 | queue_cache[HV_NCS_QTYPE_MAU - 1] = | 1631 | queue_cache[HV_NCS_QTYPE_MAU - 1] = |
1633 | kmem_cache_create("mau_queue", | 1632 | kmem_cache_create("mau_queue", |
1634 | (MAU_NUM_ENTRIES * | 1633 | (MAU_NUM_ENTRIES * |
1635 | MAU_ENTRY_SIZE), | 1634 | MAU_ENTRY_SIZE), |
1636 | MAU_ENTRY_SIZE, 0, NULL); | 1635 | MAU_ENTRY_SIZE, 0, NULL); |
1637 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | 1636 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) |
1638 | return -ENOMEM; | 1637 | return -ENOMEM; |
1639 | 1638 | ||
1640 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) | 1639 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) |
1641 | queue_cache[HV_NCS_QTYPE_CWQ - 1] = | 1640 | queue_cache[HV_NCS_QTYPE_CWQ - 1] = |
1642 | kmem_cache_create("cwq_queue", | 1641 | kmem_cache_create("cwq_queue", |
1643 | (CWQ_NUM_ENTRIES * | 1642 | (CWQ_NUM_ENTRIES * |
1644 | CWQ_ENTRY_SIZE), | 1643 | CWQ_ENTRY_SIZE), |
1645 | CWQ_ENTRY_SIZE, 0, NULL); | 1644 | CWQ_ENTRY_SIZE, 0, NULL); |
1646 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { | 1645 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { |
1647 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | 1646 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); |
1648 | return -ENOMEM; | 1647 | return -ENOMEM; |
1649 | } | 1648 | } |
1650 | return 0; | 1649 | return 0; |
1651 | } | 1650 | } |
1652 | 1651 | ||
1653 | static void queue_cache_destroy(void) | 1652 | static void queue_cache_destroy(void) |
1654 | { | 1653 | { |
1655 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | 1654 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); |
1656 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); | 1655 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); |
1657 | } | 1656 | } |
1658 | 1657 | ||
1659 | static int spu_queue_register(struct spu_queue *p, unsigned long q_type) | 1658 | static int spu_queue_register(struct spu_queue *p, unsigned long q_type) |
1660 | { | 1659 | { |
1661 | cpumask_var_t old_allowed; | 1660 | cpumask_var_t old_allowed; |
1662 | unsigned long hv_ret; | 1661 | unsigned long hv_ret; |
1663 | 1662 | ||
1664 | if (cpumask_empty(&p->sharing)) | 1663 | if (cpumask_empty(&p->sharing)) |
1665 | return -EINVAL; | 1664 | return -EINVAL; |
1666 | 1665 | ||
1667 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) | 1666 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) |
1668 | return -ENOMEM; | 1667 | return -ENOMEM; |
1669 | 1668 | ||
1670 | cpumask_copy(old_allowed, ¤t->cpus_allowed); | 1669 | cpumask_copy(old_allowed, ¤t->cpus_allowed); |
1671 | 1670 | ||
1672 | set_cpus_allowed_ptr(current, &p->sharing); | 1671 | set_cpus_allowed_ptr(current, &p->sharing); |
1673 | 1672 | ||
1674 | hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), | 1673 | hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), |
1675 | CWQ_NUM_ENTRIES, &p->qhandle); | 1674 | CWQ_NUM_ENTRIES, &p->qhandle); |
1676 | if (!hv_ret) | 1675 | if (!hv_ret) |
1677 | sun4v_ncs_sethead_marker(p->qhandle, 0); | 1676 | sun4v_ncs_sethead_marker(p->qhandle, 0); |
1678 | 1677 | ||
1679 | set_cpus_allowed_ptr(current, old_allowed); | 1678 | set_cpus_allowed_ptr(current, old_allowed); |
1680 | 1679 | ||
1681 | free_cpumask_var(old_allowed); | 1680 | free_cpumask_var(old_allowed); |
1682 | 1681 | ||
1683 | return (hv_ret ? -EINVAL : 0); | 1682 | return (hv_ret ? -EINVAL : 0); |
1684 | } | 1683 | } |
1685 | 1684 | ||
1686 | static int spu_queue_setup(struct spu_queue *p) | 1685 | static int spu_queue_setup(struct spu_queue *p) |
1687 | { | 1686 | { |
1688 | int err; | 1687 | int err; |
1689 | 1688 | ||
1690 | p->q = new_queue(p->q_type); | 1689 | p->q = new_queue(p->q_type); |
1691 | if (!p->q) | 1690 | if (!p->q) |
1692 | return -ENOMEM; | 1691 | return -ENOMEM; |
1693 | 1692 | ||
1694 | err = spu_queue_register(p, p->q_type); | 1693 | err = spu_queue_register(p, p->q_type); |
1695 | if (err) { | 1694 | if (err) { |
1696 | free_queue(p->q, p->q_type); | 1695 | free_queue(p->q, p->q_type); |
1697 | p->q = NULL; | 1696 | p->q = NULL; |
1698 | } | 1697 | } |
1699 | 1698 | ||
1700 | return err; | 1699 | return err; |
1701 | } | 1700 | } |
1702 | 1701 | ||
1703 | static void spu_queue_destroy(struct spu_queue *p) | 1702 | static void spu_queue_destroy(struct spu_queue *p) |
1704 | { | 1703 | { |
1705 | unsigned long hv_ret; | 1704 | unsigned long hv_ret; |
1706 | 1705 | ||
1707 | if (!p->q) | 1706 | if (!p->q) |
1708 | return; | 1707 | return; |
1709 | 1708 | ||
1710 | hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); | 1709 | hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); |
1711 | 1710 | ||
1712 | if (!hv_ret) | 1711 | if (!hv_ret) |
1713 | free_queue(p->q, p->q_type); | 1712 | free_queue(p->q, p->q_type); |
1714 | } | 1713 | } |
1715 | 1714 | ||
1716 | static void spu_list_destroy(struct list_head *list) | 1715 | static void spu_list_destroy(struct list_head *list) |
1717 | { | 1716 | { |
1718 | struct spu_queue *p, *n; | 1717 | struct spu_queue *p, *n; |
1719 | 1718 | ||
1720 | list_for_each_entry_safe(p, n, list, list) { | 1719 | list_for_each_entry_safe(p, n, list, list) { |
1721 | int i; | 1720 | int i; |
1722 | 1721 | ||
1723 | for (i = 0; i < NR_CPUS; i++) { | 1722 | for (i = 0; i < NR_CPUS; i++) { |
1724 | if (cpu_to_cwq[i] == p) | 1723 | if (cpu_to_cwq[i] == p) |
1725 | cpu_to_cwq[i] = NULL; | 1724 | cpu_to_cwq[i] = NULL; |
1726 | } | 1725 | } |
1727 | 1726 | ||
1728 | if (p->irq) { | 1727 | if (p->irq) { |
1729 | free_irq(p->irq, p); | 1728 | free_irq(p->irq, p); |
1730 | p->irq = 0; | 1729 | p->irq = 0; |
1731 | } | 1730 | } |
1732 | spu_queue_destroy(p); | 1731 | spu_queue_destroy(p); |
1733 | list_del(&p->list); | 1732 | list_del(&p->list); |
1734 | kfree(p); | 1733 | kfree(p); |
1735 | } | 1734 | } |
1736 | } | 1735 | } |
1737 | 1736 | ||
1738 | /* Walk the backward arcs of a CWQ 'exec-unit' node, | 1737 | /* Walk the backward arcs of a CWQ 'exec-unit' node, |
1739 | * gathering cpu membership information. | 1738 | * gathering cpu membership information. |
1740 | */ | 1739 | */ |
1741 | static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, | 1740 | static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, |
1742 | struct platform_device *dev, | 1741 | struct platform_device *dev, |
1743 | u64 node, struct spu_queue *p, | 1742 | u64 node, struct spu_queue *p, |
1744 | struct spu_queue **table) | 1743 | struct spu_queue **table) |
1745 | { | 1744 | { |
1746 | u64 arc; | 1745 | u64 arc; |
1747 | 1746 | ||
1748 | mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { | 1747 | mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { |
1749 | u64 tgt = mdesc_arc_target(mdesc, arc); | 1748 | u64 tgt = mdesc_arc_target(mdesc, arc); |
1750 | const char *name = mdesc_node_name(mdesc, tgt); | 1749 | const char *name = mdesc_node_name(mdesc, tgt); |
1751 | const u64 *id; | 1750 | const u64 *id; |
1752 | 1751 | ||
1753 | if (strcmp(name, "cpu")) | 1752 | if (strcmp(name, "cpu")) |
1754 | continue; | 1753 | continue; |
1755 | id = mdesc_get_property(mdesc, tgt, "id", NULL); | 1754 | id = mdesc_get_property(mdesc, tgt, "id", NULL); |
1756 | if (table[*id] != NULL) { | 1755 | if (table[*id] != NULL) { |
1757 | dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", | 1756 | dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", |
1758 | dev->dev.of_node->full_name); | 1757 | dev->dev.of_node->full_name); |
1759 | return -EINVAL; | 1758 | return -EINVAL; |
1760 | } | 1759 | } |
1761 | cpu_set(*id, p->sharing); | 1760 | cpu_set(*id, p->sharing); |
1762 | table[*id] = p; | 1761 | table[*id] = p; |
1763 | } | 1762 | } |
1764 | return 0; | 1763 | return 0; |
1765 | } | 1764 | } |
1766 | 1765 | ||
1767 | /* Process an 'exec-unit' MDESC node of type 'cwq'. */ | 1766 | /* Process an 'exec-unit' MDESC node of type 'cwq'. */ |
1768 | static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, | 1767 | static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, |
1769 | struct platform_device *dev, struct mdesc_handle *mdesc, | 1768 | struct platform_device *dev, struct mdesc_handle *mdesc, |
1770 | u64 node, const char *iname, unsigned long q_type, | 1769 | u64 node, const char *iname, unsigned long q_type, |
1771 | irq_handler_t handler, struct spu_queue **table) | 1770 | irq_handler_t handler, struct spu_queue **table) |
1772 | { | 1771 | { |
1773 | struct spu_queue *p; | 1772 | struct spu_queue *p; |
1774 | int err; | 1773 | int err; |
1775 | 1774 | ||
1776 | p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); | 1775 | p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); |
1777 | if (!p) { | 1776 | if (!p) { |
1778 | dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", | 1777 | dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", |
1779 | dev->dev.of_node->full_name); | 1778 | dev->dev.of_node->full_name); |
1780 | return -ENOMEM; | 1779 | return -ENOMEM; |
1781 | } | 1780 | } |
1782 | 1781 | ||
1783 | cpus_clear(p->sharing); | 1782 | cpus_clear(p->sharing); |
1784 | spin_lock_init(&p->lock); | 1783 | spin_lock_init(&p->lock); |
1785 | p->q_type = q_type; | 1784 | p->q_type = q_type; |
1786 | INIT_LIST_HEAD(&p->jobs); | 1785 | INIT_LIST_HEAD(&p->jobs); |
1787 | list_add(&p->list, list); | 1786 | list_add(&p->list, list); |
1788 | 1787 | ||
1789 | err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); | 1788 | err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); |
1790 | if (err) | 1789 | if (err) |
1791 | return err; | 1790 | return err; |
1792 | 1791 | ||
1793 | err = spu_queue_setup(p); | 1792 | err = spu_queue_setup(p); |
1794 | if (err) | 1793 | if (err) |
1795 | return err; | 1794 | return err; |
1796 | 1795 | ||
1797 | return spu_map_ino(dev, ip, iname, p, handler); | 1796 | return spu_map_ino(dev, ip, iname, p, handler); |
1798 | } | 1797 | } |
1799 | 1798 | ||
1800 | static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, | 1799 | static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, |
1801 | struct spu_mdesc_info *ip, struct list_head *list, | 1800 | struct spu_mdesc_info *ip, struct list_head *list, |
1802 | const char *exec_name, unsigned long q_type, | 1801 | const char *exec_name, unsigned long q_type, |
1803 | irq_handler_t handler, struct spu_queue **table) | 1802 | irq_handler_t handler, struct spu_queue **table) |
1804 | { | 1803 | { |
1805 | int err = 0; | 1804 | int err = 0; |
1806 | u64 node; | 1805 | u64 node; |
1807 | 1806 | ||
1808 | mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { | 1807 | mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { |
1809 | const char *type; | 1808 | const char *type; |
1810 | 1809 | ||
1811 | type = mdesc_get_property(mdesc, node, "type", NULL); | 1810 | type = mdesc_get_property(mdesc, node, "type", NULL); |
1812 | if (!type || strcmp(type, exec_name)) | 1811 | if (!type || strcmp(type, exec_name)) |
1813 | continue; | 1812 | continue; |
1814 | 1813 | ||
1815 | err = handle_exec_unit(ip, list, dev, mdesc, node, | 1814 | err = handle_exec_unit(ip, list, dev, mdesc, node, |
1816 | exec_name, q_type, handler, table); | 1815 | exec_name, q_type, handler, table); |
1817 | if (err) { | 1816 | if (err) { |
1818 | spu_list_destroy(list); | 1817 | spu_list_destroy(list); |
1819 | break; | 1818 | break; |
1820 | } | 1819 | } |
1821 | } | 1820 | } |
1822 | 1821 | ||
1823 | return err; | 1822 | return err; |
1824 | } | 1823 | } |
1825 | 1824 | ||
1826 | static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, | 1825 | static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, |
1827 | struct spu_mdesc_info *ip) | 1826 | struct spu_mdesc_info *ip) |
1828 | { | 1827 | { |
1829 | const u64 *ino; | 1828 | const u64 *ino; |
1830 | int ino_len; | 1829 | int ino_len; |
1831 | int i; | 1830 | int i; |
1832 | 1831 | ||
1833 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); | 1832 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); |
1834 | if (!ino) { | 1833 | if (!ino) { |
1835 | printk("NO 'ino'\n"); | 1834 | printk("NO 'ino'\n"); |
1836 | return -ENODEV; | 1835 | return -ENODEV; |
1837 | } | 1836 | } |
1838 | 1837 | ||
1839 | ip->num_intrs = ino_len / sizeof(u64); | 1838 | ip->num_intrs = ino_len / sizeof(u64); |
1840 | ip->ino_table = kzalloc((sizeof(struct ino_blob) * | 1839 | ip->ino_table = kzalloc((sizeof(struct ino_blob) * |
1841 | ip->num_intrs), | 1840 | ip->num_intrs), |
1842 | GFP_KERNEL); | 1841 | GFP_KERNEL); |
1843 | if (!ip->ino_table) | 1842 | if (!ip->ino_table) |
1844 | return -ENOMEM; | 1843 | return -ENOMEM; |
1845 | 1844 | ||
1846 | for (i = 0; i < ip->num_intrs; i++) { | 1845 | for (i = 0; i < ip->num_intrs; i++) { |
1847 | struct ino_blob *b = &ip->ino_table[i]; | 1846 | struct ino_blob *b = &ip->ino_table[i]; |
1848 | b->intr = i + 1; | 1847 | b->intr = i + 1; |
1849 | b->ino = ino[i]; | 1848 | b->ino = ino[i]; |
1850 | } | 1849 | } |
1851 | 1850 | ||
1852 | return 0; | 1851 | return 0; |
1853 | } | 1852 | } |
1854 | 1853 | ||
1855 | static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, | 1854 | static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, |
1856 | struct platform_device *dev, | 1855 | struct platform_device *dev, |
1857 | struct spu_mdesc_info *ip, | 1856 | struct spu_mdesc_info *ip, |
1858 | const char *node_name) | 1857 | const char *node_name) |
1859 | { | 1858 | { |
1860 | const unsigned int *reg; | 1859 | const unsigned int *reg; |
1861 | u64 node; | 1860 | u64 node; |
1862 | 1861 | ||
1863 | reg = of_get_property(dev->dev.of_node, "reg", NULL); | 1862 | reg = of_get_property(dev->dev.of_node, "reg", NULL); |
1864 | if (!reg) | 1863 | if (!reg) |
1865 | return -ENODEV; | 1864 | return -ENODEV; |
1866 | 1865 | ||
1867 | mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { | 1866 | mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { |
1868 | const char *name; | 1867 | const char *name; |
1869 | const u64 *chdl; | 1868 | const u64 *chdl; |
1870 | 1869 | ||
1871 | name = mdesc_get_property(mdesc, node, "name", NULL); | 1870 | name = mdesc_get_property(mdesc, node, "name", NULL); |
1872 | if (!name || strcmp(name, node_name)) | 1871 | if (!name || strcmp(name, node_name)) |
1873 | continue; | 1872 | continue; |
1874 | chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); | 1873 | chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); |
1875 | if (!chdl || (*chdl != *reg)) | 1874 | if (!chdl || (*chdl != *reg)) |
1876 | continue; | 1875 | continue; |
1877 | ip->cfg_handle = *chdl; | 1876 | ip->cfg_handle = *chdl; |
1878 | return get_irq_props(mdesc, node, ip); | 1877 | return get_irq_props(mdesc, node, ip); |
1879 | } | 1878 | } |
1880 | 1879 | ||
1881 | return -ENODEV; | 1880 | return -ENODEV; |
1882 | } | 1881 | } |
1883 | 1882 | ||
1884 | static unsigned long n2_spu_hvapi_major; | 1883 | static unsigned long n2_spu_hvapi_major; |
1885 | static unsigned long n2_spu_hvapi_minor; | 1884 | static unsigned long n2_spu_hvapi_minor; |
1886 | 1885 | ||
1887 | static int __devinit n2_spu_hvapi_register(void) | 1886 | static int __devinit n2_spu_hvapi_register(void) |
1888 | { | 1887 | { |
1889 | int err; | 1888 | int err; |
1890 | 1889 | ||
1891 | n2_spu_hvapi_major = 2; | 1890 | n2_spu_hvapi_major = 2; |
1892 | n2_spu_hvapi_minor = 0; | 1891 | n2_spu_hvapi_minor = 0; |
1893 | 1892 | ||
1894 | err = sun4v_hvapi_register(HV_GRP_NCS, | 1893 | err = sun4v_hvapi_register(HV_GRP_NCS, |
1895 | n2_spu_hvapi_major, | 1894 | n2_spu_hvapi_major, |
1896 | &n2_spu_hvapi_minor); | 1895 | &n2_spu_hvapi_minor); |
1897 | 1896 | ||
1898 | if (!err) | 1897 | if (!err) |
1899 | pr_info("Registered NCS HVAPI version %lu.%lu\n", | 1898 | pr_info("Registered NCS HVAPI version %lu.%lu\n", |
1900 | n2_spu_hvapi_major, | 1899 | n2_spu_hvapi_major, |
1901 | n2_spu_hvapi_minor); | 1900 | n2_spu_hvapi_minor); |
1902 | 1901 | ||
1903 | return err; | 1902 | return err; |
1904 | } | 1903 | } |
1905 | 1904 | ||
1906 | static void n2_spu_hvapi_unregister(void) | 1905 | static void n2_spu_hvapi_unregister(void) |
1907 | { | 1906 | { |
1908 | sun4v_hvapi_unregister(HV_GRP_NCS); | 1907 | sun4v_hvapi_unregister(HV_GRP_NCS); |
1909 | } | 1908 | } |
1910 | 1909 | ||
1911 | static int global_ref; | 1910 | static int global_ref; |
1912 | 1911 | ||
1913 | static int __devinit grab_global_resources(void) | 1912 | static int __devinit grab_global_resources(void) |
1914 | { | 1913 | { |
1915 | int err = 0; | 1914 | int err = 0; |
1916 | 1915 | ||
1917 | mutex_lock(&spu_lock); | 1916 | mutex_lock(&spu_lock); |
1918 | 1917 | ||
1919 | if (global_ref++) | 1918 | if (global_ref++) |
1920 | goto out; | 1919 | goto out; |
1921 | 1920 | ||
1922 | err = n2_spu_hvapi_register(); | 1921 | err = n2_spu_hvapi_register(); |
1923 | if (err) | 1922 | if (err) |
1924 | goto out; | 1923 | goto out; |
1925 | 1924 | ||
1926 | err = queue_cache_init(); | 1925 | err = queue_cache_init(); |
1927 | if (err) | 1926 | if (err) |
1928 | goto out_hvapi_release; | 1927 | goto out_hvapi_release; |
1929 | 1928 | ||
1930 | err = -ENOMEM; | 1929 | err = -ENOMEM; |
1931 | cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, | 1930 | cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, |
1932 | GFP_KERNEL); | 1931 | GFP_KERNEL); |
1933 | if (!cpu_to_cwq) | 1932 | if (!cpu_to_cwq) |
1934 | goto out_queue_cache_destroy; | 1933 | goto out_queue_cache_destroy; |
1935 | 1934 | ||
1936 | cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, | 1935 | cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, |
1937 | GFP_KERNEL); | 1936 | GFP_KERNEL); |
1938 | if (!cpu_to_mau) | 1937 | if (!cpu_to_mau) |
1939 | goto out_free_cwq_table; | 1938 | goto out_free_cwq_table; |
1940 | 1939 | ||
1941 | err = 0; | 1940 | err = 0; |
1942 | 1941 | ||
1943 | out: | 1942 | out: |
1944 | if (err) | 1943 | if (err) |
1945 | global_ref--; | 1944 | global_ref--; |
1946 | mutex_unlock(&spu_lock); | 1945 | mutex_unlock(&spu_lock); |
1947 | return err; | 1946 | return err; |
1948 | 1947 | ||
1949 | out_free_cwq_table: | 1948 | out_free_cwq_table: |
1950 | kfree(cpu_to_cwq); | 1949 | kfree(cpu_to_cwq); |
1951 | cpu_to_cwq = NULL; | 1950 | cpu_to_cwq = NULL; |
1952 | 1951 | ||
1953 | out_queue_cache_destroy: | 1952 | out_queue_cache_destroy: |
1954 | queue_cache_destroy(); | 1953 | queue_cache_destroy(); |
1955 | 1954 | ||
1956 | out_hvapi_release: | 1955 | out_hvapi_release: |
1957 | n2_spu_hvapi_unregister(); | 1956 | n2_spu_hvapi_unregister(); |
1958 | goto out; | 1957 | goto out; |
1959 | } | 1958 | } |
1960 | 1959 | ||
1961 | static void release_global_resources(void) | 1960 | static void release_global_resources(void) |
1962 | { | 1961 | { |
1963 | mutex_lock(&spu_lock); | 1962 | mutex_lock(&spu_lock); |
1964 | if (!--global_ref) { | 1963 | if (!--global_ref) { |
1965 | kfree(cpu_to_cwq); | 1964 | kfree(cpu_to_cwq); |
1966 | cpu_to_cwq = NULL; | 1965 | cpu_to_cwq = NULL; |
1967 | 1966 | ||
1968 | kfree(cpu_to_mau); | 1967 | kfree(cpu_to_mau); |
1969 | cpu_to_mau = NULL; | 1968 | cpu_to_mau = NULL; |
1970 | 1969 | ||
1971 | queue_cache_destroy(); | 1970 | queue_cache_destroy(); |
1972 | n2_spu_hvapi_unregister(); | 1971 | n2_spu_hvapi_unregister(); |
1973 | } | 1972 | } |
1974 | mutex_unlock(&spu_lock); | 1973 | mutex_unlock(&spu_lock); |
1975 | } | 1974 | } |
1976 | 1975 | ||
1977 | static struct n2_crypto * __devinit alloc_n2cp(void) | 1976 | static struct n2_crypto * __devinit alloc_n2cp(void) |
1978 | { | 1977 | { |
1979 | struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); | 1978 | struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); |
1980 | 1979 | ||
1981 | if (np) | 1980 | if (np) |
1982 | INIT_LIST_HEAD(&np->cwq_list); | 1981 | INIT_LIST_HEAD(&np->cwq_list); |
1983 | 1982 | ||
1984 | return np; | 1983 | return np; |
1985 | } | 1984 | } |
1986 | 1985 | ||
1987 | static void free_n2cp(struct n2_crypto *np) | 1986 | static void free_n2cp(struct n2_crypto *np) |
1988 | { | 1987 | { |
1989 | if (np->cwq_info.ino_table) { | 1988 | if (np->cwq_info.ino_table) { |
1990 | kfree(np->cwq_info.ino_table); | 1989 | kfree(np->cwq_info.ino_table); |
1991 | np->cwq_info.ino_table = NULL; | 1990 | np->cwq_info.ino_table = NULL; |
1992 | } | 1991 | } |
1993 | 1992 | ||
1994 | kfree(np); | 1993 | kfree(np); |
1995 | } | 1994 | } |
1996 | 1995 | ||
1997 | static void __devinit n2_spu_driver_version(void) | 1996 | static void __devinit n2_spu_driver_version(void) |
1998 | { | 1997 | { |
1999 | static int n2_spu_version_printed; | 1998 | static int n2_spu_version_printed; |
2000 | 1999 | ||
2001 | if (n2_spu_version_printed++ == 0) | 2000 | if (n2_spu_version_printed++ == 0) |
2002 | pr_info("%s", version); | 2001 | pr_info("%s", version); |
2003 | } | 2002 | } |
2004 | 2003 | ||
2005 | static int __devinit n2_crypto_probe(struct platform_device *dev) | 2004 | static int __devinit n2_crypto_probe(struct platform_device *dev) |
2006 | { | 2005 | { |
2007 | struct mdesc_handle *mdesc; | 2006 | struct mdesc_handle *mdesc; |
2008 | const char *full_name; | 2007 | const char *full_name; |
2009 | struct n2_crypto *np; | 2008 | struct n2_crypto *np; |
2010 | int err; | 2009 | int err; |
2011 | 2010 | ||
2012 | n2_spu_driver_version(); | 2011 | n2_spu_driver_version(); |
2013 | 2012 | ||
2014 | full_name = dev->dev.of_node->full_name; | 2013 | full_name = dev->dev.of_node->full_name; |
2015 | pr_info("Found N2CP at %s\n", full_name); | 2014 | pr_info("Found N2CP at %s\n", full_name); |
2016 | 2015 | ||
2017 | np = alloc_n2cp(); | 2016 | np = alloc_n2cp(); |
2018 | if (!np) { | 2017 | if (!np) { |
2019 | dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", | 2018 | dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", |
2020 | full_name); | 2019 | full_name); |
2021 | return -ENOMEM; | 2020 | return -ENOMEM; |
2022 | } | 2021 | } |
2023 | 2022 | ||
2024 | err = grab_global_resources(); | 2023 | err = grab_global_resources(); |
2025 | if (err) { | 2024 | if (err) { |
2026 | dev_err(&dev->dev, "%s: Unable to grab " | 2025 | dev_err(&dev->dev, "%s: Unable to grab " |
2027 | "global resources.\n", full_name); | 2026 | "global resources.\n", full_name); |
2028 | goto out_free_n2cp; | 2027 | goto out_free_n2cp; |
2029 | } | 2028 | } |
2030 | 2029 | ||
2031 | mdesc = mdesc_grab(); | 2030 | mdesc = mdesc_grab(); |
2032 | 2031 | ||
2033 | if (!mdesc) { | 2032 | if (!mdesc) { |
2034 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", | 2033 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", |
2035 | full_name); | 2034 | full_name); |
2036 | err = -ENODEV; | 2035 | err = -ENODEV; |
2037 | goto out_free_global; | 2036 | goto out_free_global; |
2038 | } | 2037 | } |
2039 | err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); | 2038 | err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); |
2040 | if (err) { | 2039 | if (err) { |
2041 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", | 2040 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", |
2042 | full_name); | 2041 | full_name); |
2043 | mdesc_release(mdesc); | 2042 | mdesc_release(mdesc); |
2044 | goto out_free_global; | 2043 | goto out_free_global; |
2045 | } | 2044 | } |
2046 | 2045 | ||
2047 | err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, | 2046 | err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, |
2048 | "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, | 2047 | "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, |
2049 | cpu_to_cwq); | 2048 | cpu_to_cwq); |
2050 | mdesc_release(mdesc); | 2049 | mdesc_release(mdesc); |
2051 | 2050 | ||
2052 | if (err) { | 2051 | if (err) { |
2053 | dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", | 2052 | dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", |
2054 | full_name); | 2053 | full_name); |
2055 | goto out_free_global; | 2054 | goto out_free_global; |
2056 | } | 2055 | } |
2057 | 2056 | ||
2058 | err = n2_register_algs(); | 2057 | err = n2_register_algs(); |
2059 | if (err) { | 2058 | if (err) { |
2060 | dev_err(&dev->dev, "%s: Unable to register algorithms.\n", | 2059 | dev_err(&dev->dev, "%s: Unable to register algorithms.\n", |
2061 | full_name); | 2060 | full_name); |
2062 | goto out_free_spu_list; | 2061 | goto out_free_spu_list; |
2063 | } | 2062 | } |
2064 | 2063 | ||
2065 | dev_set_drvdata(&dev->dev, np); | 2064 | dev_set_drvdata(&dev->dev, np); |
2066 | 2065 | ||
2067 | return 0; | 2066 | return 0; |
2068 | 2067 | ||
2069 | out_free_spu_list: | 2068 | out_free_spu_list: |
2070 | spu_list_destroy(&np->cwq_list); | 2069 | spu_list_destroy(&np->cwq_list); |
2071 | 2070 | ||
2072 | out_free_global: | 2071 | out_free_global: |
2073 | release_global_resources(); | 2072 | release_global_resources(); |
2074 | 2073 | ||
2075 | out_free_n2cp: | 2074 | out_free_n2cp: |
2076 | free_n2cp(np); | 2075 | free_n2cp(np); |
2077 | 2076 | ||
2078 | return err; | 2077 | return err; |
2079 | } | 2078 | } |
2080 | 2079 | ||
2081 | static int __devexit n2_crypto_remove(struct platform_device *dev) | 2080 | static int __devexit n2_crypto_remove(struct platform_device *dev) |
2082 | { | 2081 | { |
2083 | struct n2_crypto *np = dev_get_drvdata(&dev->dev); | 2082 | struct n2_crypto *np = dev_get_drvdata(&dev->dev); |
2084 | 2083 | ||
2085 | n2_unregister_algs(); | 2084 | n2_unregister_algs(); |
2086 | 2085 | ||
2087 | spu_list_destroy(&np->cwq_list); | 2086 | spu_list_destroy(&np->cwq_list); |
2088 | 2087 | ||
2089 | release_global_resources(); | 2088 | release_global_resources(); |
2090 | 2089 | ||
2091 | free_n2cp(np); | 2090 | free_n2cp(np); |
2092 | 2091 | ||
2093 | return 0; | 2092 | return 0; |
2094 | } | 2093 | } |
2095 | 2094 | ||
2096 | static struct n2_mau * __devinit alloc_ncp(void) | 2095 | static struct n2_mau * __devinit alloc_ncp(void) |
2097 | { | 2096 | { |
2098 | struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); | 2097 | struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); |
2099 | 2098 | ||
2100 | if (mp) | 2099 | if (mp) |
2101 | INIT_LIST_HEAD(&mp->mau_list); | 2100 | INIT_LIST_HEAD(&mp->mau_list); |
2102 | 2101 | ||
2103 | return mp; | 2102 | return mp; |
2104 | } | 2103 | } |
2105 | 2104 | ||
2106 | static void free_ncp(struct n2_mau *mp) | 2105 | static void free_ncp(struct n2_mau *mp) |
2107 | { | 2106 | { |
2108 | if (mp->mau_info.ino_table) { | 2107 | if (mp->mau_info.ino_table) { |
2109 | kfree(mp->mau_info.ino_table); | 2108 | kfree(mp->mau_info.ino_table); |
2110 | mp->mau_info.ino_table = NULL; | 2109 | mp->mau_info.ino_table = NULL; |
2111 | } | 2110 | } |
2112 | 2111 | ||
2113 | kfree(mp); | 2112 | kfree(mp); |
2114 | } | 2113 | } |
2115 | 2114 | ||
2116 | static int __devinit n2_mau_probe(struct platform_device *dev) | 2115 | static int __devinit n2_mau_probe(struct platform_device *dev) |
2117 | { | 2116 | { |
2118 | struct mdesc_handle *mdesc; | 2117 | struct mdesc_handle *mdesc; |
2119 | const char *full_name; | 2118 | const char *full_name; |
2120 | struct n2_mau *mp; | 2119 | struct n2_mau *mp; |
2121 | int err; | 2120 | int err; |
2122 | 2121 | ||
2123 | n2_spu_driver_version(); | 2122 | n2_spu_driver_version(); |
2124 | 2123 | ||
2125 | full_name = dev->dev.of_node->full_name; | 2124 | full_name = dev->dev.of_node->full_name; |
2126 | pr_info("Found NCP at %s\n", full_name); | 2125 | pr_info("Found NCP at %s\n", full_name); |
2127 | 2126 | ||
2128 | mp = alloc_ncp(); | 2127 | mp = alloc_ncp(); |
2129 | if (!mp) { | 2128 | if (!mp) { |
2130 | dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", | 2129 | dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", |
2131 | full_name); | 2130 | full_name); |
2132 | return -ENOMEM; | 2131 | return -ENOMEM; |
2133 | } | 2132 | } |
2134 | 2133 | ||
2135 | err = grab_global_resources(); | 2134 | err = grab_global_resources(); |
2136 | if (err) { | 2135 | if (err) { |
2137 | dev_err(&dev->dev, "%s: Unable to grab " | 2136 | dev_err(&dev->dev, "%s: Unable to grab " |
2138 | "global resources.\n", full_name); | 2137 | "global resources.\n", full_name); |
2139 | goto out_free_ncp; | 2138 | goto out_free_ncp; |
2140 | } | 2139 | } |
2141 | 2140 | ||
2142 | mdesc = mdesc_grab(); | 2141 | mdesc = mdesc_grab(); |
2143 | 2142 | ||
2144 | if (!mdesc) { | 2143 | if (!mdesc) { |
2145 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", | 2144 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", |
2146 | full_name); | 2145 | full_name); |
2147 | err = -ENODEV; | 2146 | err = -ENODEV; |
2148 | goto out_free_global; | 2147 | goto out_free_global; |
2149 | } | 2148 | } |
2150 | 2149 | ||
2151 | err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); | 2150 | err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); |
2152 | if (err) { | 2151 | if (err) { |
2153 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", | 2152 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", |
2154 | full_name); | 2153 | full_name); |
2155 | mdesc_release(mdesc); | 2154 | mdesc_release(mdesc); |
2156 | goto out_free_global; | 2155 | goto out_free_global; |
2157 | } | 2156 | } |
2158 | 2157 | ||
2159 | err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, | 2158 | err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, |
2160 | "mau", HV_NCS_QTYPE_MAU, mau_intr, | 2159 | "mau", HV_NCS_QTYPE_MAU, mau_intr, |
2161 | cpu_to_mau); | 2160 | cpu_to_mau); |
2162 | mdesc_release(mdesc); | 2161 | mdesc_release(mdesc); |
2163 | 2162 | ||
2164 | if (err) { | 2163 | if (err) { |
2165 | dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", | 2164 | dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", |
2166 | full_name); | 2165 | full_name); |
2167 | goto out_free_global; | 2166 | goto out_free_global; |
2168 | } | 2167 | } |
2169 | 2168 | ||
2170 | dev_set_drvdata(&dev->dev, mp); | 2169 | dev_set_drvdata(&dev->dev, mp); |
2171 | 2170 | ||
2172 | return 0; | 2171 | return 0; |
2173 | 2172 | ||
2174 | out_free_global: | 2173 | out_free_global: |
2175 | release_global_resources(); | 2174 | release_global_resources(); |
2176 | 2175 | ||
2177 | out_free_ncp: | 2176 | out_free_ncp: |
2178 | free_ncp(mp); | 2177 | free_ncp(mp); |
2179 | 2178 | ||
2180 | return err; | 2179 | return err; |
2181 | } | 2180 | } |
2182 | 2181 | ||
2183 | static int __devexit n2_mau_remove(struct platform_device *dev) | 2182 | static int __devexit n2_mau_remove(struct platform_device *dev) |
2184 | { | 2183 | { |
2185 | struct n2_mau *mp = dev_get_drvdata(&dev->dev); | 2184 | struct n2_mau *mp = dev_get_drvdata(&dev->dev); |
2186 | 2185 | ||
2187 | spu_list_destroy(&mp->mau_list); | 2186 | spu_list_destroy(&mp->mau_list); |
2188 | 2187 | ||
2189 | release_global_resources(); | 2188 | release_global_resources(); |
2190 | 2189 | ||
2191 | free_ncp(mp); | 2190 | free_ncp(mp); |
2192 | 2191 | ||
2193 | return 0; | 2192 | return 0; |
2194 | } | 2193 | } |
2195 | 2194 | ||
2196 | static struct of_device_id n2_crypto_match[] = { | 2195 | static struct of_device_id n2_crypto_match[] = { |
2197 | { | 2196 | { |
2198 | .name = "n2cp", | 2197 | .name = "n2cp", |
2199 | .compatible = "SUNW,n2-cwq", | 2198 | .compatible = "SUNW,n2-cwq", |
2200 | }, | 2199 | }, |
2201 | { | 2200 | { |
2202 | .name = "n2cp", | 2201 | .name = "n2cp", |
2203 | .compatible = "SUNW,vf-cwq", | 2202 | .compatible = "SUNW,vf-cwq", |
2204 | }, | 2203 | }, |
2205 | { | 2204 | { |
2206 | .name = "n2cp", | 2205 | .name = "n2cp", |
2207 | .compatible = "SUNW,kt-cwq", | 2206 | .compatible = "SUNW,kt-cwq", |
2208 | }, | 2207 | }, |
2209 | {}, | 2208 | {}, |
2210 | }; | 2209 | }; |
2211 | 2210 | ||
2212 | MODULE_DEVICE_TABLE(of, n2_crypto_match); | 2211 | MODULE_DEVICE_TABLE(of, n2_crypto_match); |
2213 | 2212 | ||
2214 | static struct platform_driver n2_crypto_driver = { | 2213 | static struct platform_driver n2_crypto_driver = { |
2215 | .driver = { | 2214 | .driver = { |
2216 | .name = "n2cp", | 2215 | .name = "n2cp", |
2217 | .owner = THIS_MODULE, | 2216 | .owner = THIS_MODULE, |
2218 | .of_match_table = n2_crypto_match, | 2217 | .of_match_table = n2_crypto_match, |
2219 | }, | 2218 | }, |
2220 | .probe = n2_crypto_probe, | 2219 | .probe = n2_crypto_probe, |
2221 | .remove = __devexit_p(n2_crypto_remove), | 2220 | .remove = __devexit_p(n2_crypto_remove), |
2222 | }; | 2221 | }; |
2223 | 2222 | ||
2224 | static struct of_device_id n2_mau_match[] = { | 2223 | static struct of_device_id n2_mau_match[] = { |
2225 | { | 2224 | { |
2226 | .name = "ncp", | 2225 | .name = "ncp", |
2227 | .compatible = "SUNW,n2-mau", | 2226 | .compatible = "SUNW,n2-mau", |
2228 | }, | 2227 | }, |
2229 | { | 2228 | { |
2230 | .name = "ncp", | 2229 | .name = "ncp", |
2231 | .compatible = "SUNW,vf-mau", | 2230 | .compatible = "SUNW,vf-mau", |
2232 | }, | 2231 | }, |
2233 | { | 2232 | { |
2234 | .name = "ncp", | 2233 | .name = "ncp", |
2235 | .compatible = "SUNW,kt-mau", | 2234 | .compatible = "SUNW,kt-mau", |
2236 | }, | 2235 | }, |
2237 | {}, | 2236 | {}, |
2238 | }; | 2237 | }; |
2239 | 2238 | ||
2240 | MODULE_DEVICE_TABLE(of, n2_mau_match); | 2239 | MODULE_DEVICE_TABLE(of, n2_mau_match); |
2241 | 2240 | ||
2242 | static struct platform_driver n2_mau_driver = { | 2241 | static struct platform_driver n2_mau_driver = { |
2243 | .driver = { | 2242 | .driver = { |
2244 | .name = "ncp", | 2243 | .name = "ncp", |
2245 | .owner = THIS_MODULE, | 2244 | .owner = THIS_MODULE, |
2246 | .of_match_table = n2_mau_match, | 2245 | .of_match_table = n2_mau_match, |
2247 | }, | 2246 | }, |
2248 | .probe = n2_mau_probe, | 2247 | .probe = n2_mau_probe, |
2249 | .remove = __devexit_p(n2_mau_remove), | 2248 | .remove = __devexit_p(n2_mau_remove), |
2250 | }; | 2249 | }; |
2251 | 2250 | ||
2252 | static int __init n2_init(void) | 2251 | static int __init n2_init(void) |
2253 | { | 2252 | { |
2254 | int err = platform_driver_register(&n2_crypto_driver); | 2253 | int err = platform_driver_register(&n2_crypto_driver); |
2255 | 2254 | ||
2256 | if (!err) { | 2255 | if (!err) { |
2257 | err = platform_driver_register(&n2_mau_driver); | 2256 | err = platform_driver_register(&n2_mau_driver); |
2258 | if (err) | 2257 | if (err) |
2259 | platform_driver_unregister(&n2_crypto_driver); | 2258 | platform_driver_unregister(&n2_crypto_driver); |
2260 | } | 2259 | } |
2261 | return err; | 2260 | return err; |
2262 | } | 2261 | } |
2263 | 2262 | ||
2264 | static void __exit n2_exit(void) | 2263 | static void __exit n2_exit(void) |
2265 | { | 2264 | { |
2266 | platform_driver_unregister(&n2_mau_driver); | 2265 | platform_driver_unregister(&n2_mau_driver); |
2267 | platform_driver_unregister(&n2_crypto_driver); | 2266 | platform_driver_unregister(&n2_crypto_driver); |
2268 | } | 2267 | } |
2269 | 2268 | ||
2270 | module_init(n2_init); | 2269 | module_init(n2_init); |
2271 | module_exit(n2_exit); | 2270 | module_exit(n2_exit); |
2272 | 2271 |