Commit 1fbc9f46a024535d95c3d5f136901decd86b109e
Committed by
Martin Schwidefsky
1 parent
ab640db010
Exists in
master
and in
7 other branches
[S390] list usage cleanup in s390
Trivial cleanup, list_del(); list_add{,_tail}() is equivalent to list_move{,_tail}(). Semantic patch for coccinelle can be found at www.cccmz.de/~snakebyte/list_move_tail.spatch Signed-off-by: Eric Sesterhenn <snakebyte@gmx.de> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Showing 1 changed file with 2 additions and 4 deletions Inline Diff
drivers/s390/crypto/zcrypt_api.c
1 | /* | 1 | /* |
2 | * linux/drivers/s390/crypto/zcrypt_api.c | 2 | * linux/drivers/s390/crypto/zcrypt_api.c |
3 | * | 3 | * |
4 | * zcrypt 2.1.0 | 4 | * zcrypt 2.1.0 |
5 | * | 5 | * |
6 | * Copyright (C) 2001, 2006 IBM Corporation | 6 | * Copyright (C) 2001, 2006 IBM Corporation |
7 | * Author(s): Robert Burroughs | 7 | * Author(s): Robert Burroughs |
8 | * Eric Rossman (edrossma@us.ibm.com) | 8 | * Eric Rossman (edrossma@us.ibm.com) |
9 | * Cornelia Huck <cornelia.huck@de.ibm.com> | 9 | * Cornelia Huck <cornelia.huck@de.ibm.com> |
10 | * | 10 | * |
11 | * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) | 11 | * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) |
12 | * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> | 12 | * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> |
13 | * Ralph Wuerthner <rwuerthn@de.ibm.com> | 13 | * Ralph Wuerthner <rwuerthn@de.ibm.com> |
14 | * | 14 | * |
15 | * This program is free software; you can redistribute it and/or modify | 15 | * This program is free software; you can redistribute it and/or modify |
16 | * it under the terms of the GNU General Public License as published by | 16 | * it under the terms of the GNU General Public License as published by |
17 | * the Free Software Foundation; either version 2, or (at your option) | 17 | * the Free Software Foundation; either version 2, or (at your option) |
18 | * any later version. | 18 | * any later version. |
19 | * | 19 | * |
20 | * This program is distributed in the hope that it will be useful, | 20 | * This program is distributed in the hope that it will be useful, |
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
23 | * GNU General Public License for more details. | 23 | * GNU General Public License for more details. |
24 | * | 24 | * |
25 | * You should have received a copy of the GNU General Public License | 25 | * You should have received a copy of the GNU General Public License |
26 | * along with this program; if not, write to the Free Software | 26 | * along with this program; if not, write to the Free Software |
27 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 27 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
33 | #include <linux/miscdevice.h> | 33 | #include <linux/miscdevice.h> |
34 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
35 | #include <linux/proc_fs.h> | 35 | #include <linux/proc_fs.h> |
36 | #include <linux/compat.h> | 36 | #include <linux/compat.h> |
37 | #include <linux/smp_lock.h> | 37 | #include <linux/smp_lock.h> |
38 | #include <asm/atomic.h> | 38 | #include <asm/atomic.h> |
39 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
40 | #include <linux/hw_random.h> | 40 | #include <linux/hw_random.h> |
41 | 41 | ||
42 | #include "zcrypt_api.h" | 42 | #include "zcrypt_api.h" |
43 | 43 | ||
44 | /* | 44 | /* |
45 | * Module description. | 45 | * Module description. |
46 | */ | 46 | */ |
47 | MODULE_AUTHOR("IBM Corporation"); | 47 | MODULE_AUTHOR("IBM Corporation"); |
48 | MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " | 48 | MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " |
49 | "Copyright 2001, 2006 IBM Corporation"); | 49 | "Copyright 2001, 2006 IBM Corporation"); |
50 | MODULE_LICENSE("GPL"); | 50 | MODULE_LICENSE("GPL"); |
51 | 51 | ||
52 | static DEFINE_SPINLOCK(zcrypt_device_lock); | 52 | static DEFINE_SPINLOCK(zcrypt_device_lock); |
53 | static LIST_HEAD(zcrypt_device_list); | 53 | static LIST_HEAD(zcrypt_device_list); |
54 | static int zcrypt_device_count = 0; | 54 | static int zcrypt_device_count = 0; |
55 | static atomic_t zcrypt_open_count = ATOMIC_INIT(0); | 55 | static atomic_t zcrypt_open_count = ATOMIC_INIT(0); |
56 | 56 | ||
57 | static int zcrypt_rng_device_add(void); | 57 | static int zcrypt_rng_device_add(void); |
58 | static void zcrypt_rng_device_remove(void); | 58 | static void zcrypt_rng_device_remove(void); |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Device attributes common for all crypto devices. | 61 | * Device attributes common for all crypto devices. |
62 | */ | 62 | */ |
63 | static ssize_t zcrypt_type_show(struct device *dev, | 63 | static ssize_t zcrypt_type_show(struct device *dev, |
64 | struct device_attribute *attr, char *buf) | 64 | struct device_attribute *attr, char *buf) |
65 | { | 65 | { |
66 | struct zcrypt_device *zdev = to_ap_dev(dev)->private; | 66 | struct zcrypt_device *zdev = to_ap_dev(dev)->private; |
67 | return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string); | 67 | return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string); |
68 | } | 68 | } |
69 | 69 | ||
70 | static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL); | 70 | static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL); |
71 | 71 | ||
72 | static ssize_t zcrypt_online_show(struct device *dev, | 72 | static ssize_t zcrypt_online_show(struct device *dev, |
73 | struct device_attribute *attr, char *buf) | 73 | struct device_attribute *attr, char *buf) |
74 | { | 74 | { |
75 | struct zcrypt_device *zdev = to_ap_dev(dev)->private; | 75 | struct zcrypt_device *zdev = to_ap_dev(dev)->private; |
76 | return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online); | 76 | return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online); |
77 | } | 77 | } |
78 | 78 | ||
79 | static ssize_t zcrypt_online_store(struct device *dev, | 79 | static ssize_t zcrypt_online_store(struct device *dev, |
80 | struct device_attribute *attr, | 80 | struct device_attribute *attr, |
81 | const char *buf, size_t count) | 81 | const char *buf, size_t count) |
82 | { | 82 | { |
83 | struct zcrypt_device *zdev = to_ap_dev(dev)->private; | 83 | struct zcrypt_device *zdev = to_ap_dev(dev)->private; |
84 | int online; | 84 | int online; |
85 | 85 | ||
86 | if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) | 86 | if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) |
87 | return -EINVAL; | 87 | return -EINVAL; |
88 | zdev->online = online; | 88 | zdev->online = online; |
89 | if (!online) | 89 | if (!online) |
90 | ap_flush_queue(zdev->ap_dev); | 90 | ap_flush_queue(zdev->ap_dev); |
91 | return count; | 91 | return count; |
92 | } | 92 | } |
93 | 93 | ||
94 | static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store); | 94 | static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store); |
95 | 95 | ||
96 | static struct attribute * zcrypt_device_attrs[] = { | 96 | static struct attribute * zcrypt_device_attrs[] = { |
97 | &dev_attr_type.attr, | 97 | &dev_attr_type.attr, |
98 | &dev_attr_online.attr, | 98 | &dev_attr_online.attr, |
99 | NULL, | 99 | NULL, |
100 | }; | 100 | }; |
101 | 101 | ||
102 | static struct attribute_group zcrypt_device_attr_group = { | 102 | static struct attribute_group zcrypt_device_attr_group = { |
103 | .attrs = zcrypt_device_attrs, | 103 | .attrs = zcrypt_device_attrs, |
104 | }; | 104 | }; |
105 | 105 | ||
106 | /** | 106 | /** |
107 | * __zcrypt_increase_preference(): Increase preference of a crypto device. | 107 | * __zcrypt_increase_preference(): Increase preference of a crypto device. |
108 | * @zdev: Pointer the crypto device | 108 | * @zdev: Pointer the crypto device |
109 | * | 109 | * |
110 | * Move the device towards the head of the device list. | 110 | * Move the device towards the head of the device list. |
111 | * Need to be called while holding the zcrypt device list lock. | 111 | * Need to be called while holding the zcrypt device list lock. |
112 | * Note: cards with speed_rating of 0 are kept at the end of the list. | 112 | * Note: cards with speed_rating of 0 are kept at the end of the list. |
113 | */ | 113 | */ |
114 | static void __zcrypt_increase_preference(struct zcrypt_device *zdev) | 114 | static void __zcrypt_increase_preference(struct zcrypt_device *zdev) |
115 | { | 115 | { |
116 | struct zcrypt_device *tmp; | 116 | struct zcrypt_device *tmp; |
117 | struct list_head *l; | 117 | struct list_head *l; |
118 | 118 | ||
119 | if (zdev->speed_rating == 0) | 119 | if (zdev->speed_rating == 0) |
120 | return; | 120 | return; |
121 | for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { | 121 | for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { |
122 | tmp = list_entry(l, struct zcrypt_device, list); | 122 | tmp = list_entry(l, struct zcrypt_device, list); |
123 | if ((tmp->request_count + 1) * tmp->speed_rating <= | 123 | if ((tmp->request_count + 1) * tmp->speed_rating <= |
124 | (zdev->request_count + 1) * zdev->speed_rating && | 124 | (zdev->request_count + 1) * zdev->speed_rating && |
125 | tmp->speed_rating != 0) | 125 | tmp->speed_rating != 0) |
126 | break; | 126 | break; |
127 | } | 127 | } |
128 | if (l == zdev->list.prev) | 128 | if (l == zdev->list.prev) |
129 | return; | 129 | return; |
130 | /* Move zdev behind l */ | 130 | /* Move zdev behind l */ |
131 | list_del(&zdev->list); | 131 | list_move(&zdev->list, l); |
132 | list_add(&zdev->list, l); | ||
133 | } | 132 | } |
134 | 133 | ||
135 | /** | 134 | /** |
136 | * __zcrypt_decrease_preference(): Decrease preference of a crypto device. | 135 | * __zcrypt_decrease_preference(): Decrease preference of a crypto device. |
137 | * @zdev: Pointer to a crypto device. | 136 | * @zdev: Pointer to a crypto device. |
138 | * | 137 | * |
139 | * Move the device towards the tail of the device list. | 138 | * Move the device towards the tail of the device list. |
140 | * Need to be called while holding the zcrypt device list lock. | 139 | * Need to be called while holding the zcrypt device list lock. |
141 | * Note: cards with speed_rating of 0 are kept at the end of the list. | 140 | * Note: cards with speed_rating of 0 are kept at the end of the list. |
142 | */ | 141 | */ |
143 | static void __zcrypt_decrease_preference(struct zcrypt_device *zdev) | 142 | static void __zcrypt_decrease_preference(struct zcrypt_device *zdev) |
144 | { | 143 | { |
145 | struct zcrypt_device *tmp; | 144 | struct zcrypt_device *tmp; |
146 | struct list_head *l; | 145 | struct list_head *l; |
147 | 146 | ||
148 | if (zdev->speed_rating == 0) | 147 | if (zdev->speed_rating == 0) |
149 | return; | 148 | return; |
150 | for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { | 149 | for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { |
151 | tmp = list_entry(l, struct zcrypt_device, list); | 150 | tmp = list_entry(l, struct zcrypt_device, list); |
152 | if ((tmp->request_count + 1) * tmp->speed_rating > | 151 | if ((tmp->request_count + 1) * tmp->speed_rating > |
153 | (zdev->request_count + 1) * zdev->speed_rating || | 152 | (zdev->request_count + 1) * zdev->speed_rating || |
154 | tmp->speed_rating == 0) | 153 | tmp->speed_rating == 0) |
155 | break; | 154 | break; |
156 | } | 155 | } |
157 | if (l == zdev->list.next) | 156 | if (l == zdev->list.next) |
158 | return; | 157 | return; |
159 | /* Move zdev before l */ | 158 | /* Move zdev before l */ |
160 | list_del(&zdev->list); | 159 | list_move_tail(&zdev->list, l); |
161 | list_add_tail(&zdev->list, l); | ||
162 | } | 160 | } |
163 | 161 | ||
164 | static void zcrypt_device_release(struct kref *kref) | 162 | static void zcrypt_device_release(struct kref *kref) |
165 | { | 163 | { |
166 | struct zcrypt_device *zdev = | 164 | struct zcrypt_device *zdev = |
167 | container_of(kref, struct zcrypt_device, refcount); | 165 | container_of(kref, struct zcrypt_device, refcount); |
168 | zcrypt_device_free(zdev); | 166 | zcrypt_device_free(zdev); |
169 | } | 167 | } |
170 | 168 | ||
171 | void zcrypt_device_get(struct zcrypt_device *zdev) | 169 | void zcrypt_device_get(struct zcrypt_device *zdev) |
172 | { | 170 | { |
173 | kref_get(&zdev->refcount); | 171 | kref_get(&zdev->refcount); |
174 | } | 172 | } |
175 | EXPORT_SYMBOL(zcrypt_device_get); | 173 | EXPORT_SYMBOL(zcrypt_device_get); |
176 | 174 | ||
177 | int zcrypt_device_put(struct zcrypt_device *zdev) | 175 | int zcrypt_device_put(struct zcrypt_device *zdev) |
178 | { | 176 | { |
179 | return kref_put(&zdev->refcount, zcrypt_device_release); | 177 | return kref_put(&zdev->refcount, zcrypt_device_release); |
180 | } | 178 | } |
181 | EXPORT_SYMBOL(zcrypt_device_put); | 179 | EXPORT_SYMBOL(zcrypt_device_put); |
182 | 180 | ||
183 | struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) | 181 | struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) |
184 | { | 182 | { |
185 | struct zcrypt_device *zdev; | 183 | struct zcrypt_device *zdev; |
186 | 184 | ||
187 | zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); | 185 | zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); |
188 | if (!zdev) | 186 | if (!zdev) |
189 | return NULL; | 187 | return NULL; |
190 | zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); | 188 | zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); |
191 | if (!zdev->reply.message) | 189 | if (!zdev->reply.message) |
192 | goto out_free; | 190 | goto out_free; |
193 | zdev->reply.length = max_response_size; | 191 | zdev->reply.length = max_response_size; |
194 | spin_lock_init(&zdev->lock); | 192 | spin_lock_init(&zdev->lock); |
195 | INIT_LIST_HEAD(&zdev->list); | 193 | INIT_LIST_HEAD(&zdev->list); |
196 | return zdev; | 194 | return zdev; |
197 | 195 | ||
198 | out_free: | 196 | out_free: |
199 | kfree(zdev); | 197 | kfree(zdev); |
200 | return NULL; | 198 | return NULL; |
201 | } | 199 | } |
202 | EXPORT_SYMBOL(zcrypt_device_alloc); | 200 | EXPORT_SYMBOL(zcrypt_device_alloc); |
203 | 201 | ||
204 | void zcrypt_device_free(struct zcrypt_device *zdev) | 202 | void zcrypt_device_free(struct zcrypt_device *zdev) |
205 | { | 203 | { |
206 | kfree(zdev->reply.message); | 204 | kfree(zdev->reply.message); |
207 | kfree(zdev); | 205 | kfree(zdev); |
208 | } | 206 | } |
209 | EXPORT_SYMBOL(zcrypt_device_free); | 207 | EXPORT_SYMBOL(zcrypt_device_free); |
210 | 208 | ||
211 | /** | 209 | /** |
212 | * zcrypt_device_register() - Register a crypto device. | 210 | * zcrypt_device_register() - Register a crypto device. |
213 | * @zdev: Pointer to a crypto device | 211 | * @zdev: Pointer to a crypto device |
214 | * | 212 | * |
215 | * Register a crypto device. Returns 0 if successful. | 213 | * Register a crypto device. Returns 0 if successful. |
216 | */ | 214 | */ |
217 | int zcrypt_device_register(struct zcrypt_device *zdev) | 215 | int zcrypt_device_register(struct zcrypt_device *zdev) |
218 | { | 216 | { |
219 | int rc; | 217 | int rc; |
220 | 218 | ||
221 | rc = sysfs_create_group(&zdev->ap_dev->device.kobj, | 219 | rc = sysfs_create_group(&zdev->ap_dev->device.kobj, |
222 | &zcrypt_device_attr_group); | 220 | &zcrypt_device_attr_group); |
223 | if (rc) | 221 | if (rc) |
224 | goto out; | 222 | goto out; |
225 | get_device(&zdev->ap_dev->device); | 223 | get_device(&zdev->ap_dev->device); |
226 | kref_init(&zdev->refcount); | 224 | kref_init(&zdev->refcount); |
227 | spin_lock_bh(&zcrypt_device_lock); | 225 | spin_lock_bh(&zcrypt_device_lock); |
228 | zdev->online = 1; /* New devices are online by default. */ | 226 | zdev->online = 1; /* New devices are online by default. */ |
229 | list_add_tail(&zdev->list, &zcrypt_device_list); | 227 | list_add_tail(&zdev->list, &zcrypt_device_list); |
230 | __zcrypt_increase_preference(zdev); | 228 | __zcrypt_increase_preference(zdev); |
231 | zcrypt_device_count++; | 229 | zcrypt_device_count++; |
232 | spin_unlock_bh(&zcrypt_device_lock); | 230 | spin_unlock_bh(&zcrypt_device_lock); |
233 | if (zdev->ops->rng) { | 231 | if (zdev->ops->rng) { |
234 | rc = zcrypt_rng_device_add(); | 232 | rc = zcrypt_rng_device_add(); |
235 | if (rc) | 233 | if (rc) |
236 | goto out_unregister; | 234 | goto out_unregister; |
237 | } | 235 | } |
238 | return 0; | 236 | return 0; |
239 | 237 | ||
240 | out_unregister: | 238 | out_unregister: |
241 | spin_lock_bh(&zcrypt_device_lock); | 239 | spin_lock_bh(&zcrypt_device_lock); |
242 | zcrypt_device_count--; | 240 | zcrypt_device_count--; |
243 | list_del_init(&zdev->list); | 241 | list_del_init(&zdev->list); |
244 | spin_unlock_bh(&zcrypt_device_lock); | 242 | spin_unlock_bh(&zcrypt_device_lock); |
245 | sysfs_remove_group(&zdev->ap_dev->device.kobj, | 243 | sysfs_remove_group(&zdev->ap_dev->device.kobj, |
246 | &zcrypt_device_attr_group); | 244 | &zcrypt_device_attr_group); |
247 | put_device(&zdev->ap_dev->device); | 245 | put_device(&zdev->ap_dev->device); |
248 | zcrypt_device_put(zdev); | 246 | zcrypt_device_put(zdev); |
249 | out: | 247 | out: |
250 | return rc; | 248 | return rc; |
251 | } | 249 | } |
252 | EXPORT_SYMBOL(zcrypt_device_register); | 250 | EXPORT_SYMBOL(zcrypt_device_register); |
253 | 251 | ||
254 | /** | 252 | /** |
255 | * zcrypt_device_unregister(): Unregister a crypto device. | 253 | * zcrypt_device_unregister(): Unregister a crypto device. |
256 | * @zdev: Pointer to crypto device | 254 | * @zdev: Pointer to crypto device |
257 | * | 255 | * |
258 | * Unregister a crypto device. | 256 | * Unregister a crypto device. |
259 | */ | 257 | */ |
260 | void zcrypt_device_unregister(struct zcrypt_device *zdev) | 258 | void zcrypt_device_unregister(struct zcrypt_device *zdev) |
261 | { | 259 | { |
262 | if (zdev->ops->rng) | 260 | if (zdev->ops->rng) |
263 | zcrypt_rng_device_remove(); | 261 | zcrypt_rng_device_remove(); |
264 | spin_lock_bh(&zcrypt_device_lock); | 262 | spin_lock_bh(&zcrypt_device_lock); |
265 | zcrypt_device_count--; | 263 | zcrypt_device_count--; |
266 | list_del_init(&zdev->list); | 264 | list_del_init(&zdev->list); |
267 | spin_unlock_bh(&zcrypt_device_lock); | 265 | spin_unlock_bh(&zcrypt_device_lock); |
268 | sysfs_remove_group(&zdev->ap_dev->device.kobj, | 266 | sysfs_remove_group(&zdev->ap_dev->device.kobj, |
269 | &zcrypt_device_attr_group); | 267 | &zcrypt_device_attr_group); |
270 | put_device(&zdev->ap_dev->device); | 268 | put_device(&zdev->ap_dev->device); |
271 | zcrypt_device_put(zdev); | 269 | zcrypt_device_put(zdev); |
272 | } | 270 | } |
273 | EXPORT_SYMBOL(zcrypt_device_unregister); | 271 | EXPORT_SYMBOL(zcrypt_device_unregister); |
274 | 272 | ||
275 | /** | 273 | /** |
276 | * zcrypt_read (): Not supported beyond zcrypt 1.3.1. | 274 | * zcrypt_read (): Not supported beyond zcrypt 1.3.1. |
277 | * | 275 | * |
278 | * This function is not supported beyond zcrypt 1.3.1. | 276 | * This function is not supported beyond zcrypt 1.3.1. |
279 | */ | 277 | */ |
280 | static ssize_t zcrypt_read(struct file *filp, char __user *buf, | 278 | static ssize_t zcrypt_read(struct file *filp, char __user *buf, |
281 | size_t count, loff_t *f_pos) | 279 | size_t count, loff_t *f_pos) |
282 | { | 280 | { |
283 | return -EPERM; | 281 | return -EPERM; |
284 | } | 282 | } |
285 | 283 | ||
286 | /** | 284 | /** |
287 | * zcrypt_write(): Not allowed. | 285 | * zcrypt_write(): Not allowed. |
288 | * | 286 | * |
289 | * Write is is not allowed | 287 | * Write is is not allowed |
290 | */ | 288 | */ |
291 | static ssize_t zcrypt_write(struct file *filp, const char __user *buf, | 289 | static ssize_t zcrypt_write(struct file *filp, const char __user *buf, |
292 | size_t count, loff_t *f_pos) | 290 | size_t count, loff_t *f_pos) |
293 | { | 291 | { |
294 | return -EPERM; | 292 | return -EPERM; |
295 | } | 293 | } |
296 | 294 | ||
297 | /** | 295 | /** |
298 | * zcrypt_open(): Count number of users. | 296 | * zcrypt_open(): Count number of users. |
299 | * | 297 | * |
300 | * Device open function to count number of users. | 298 | * Device open function to count number of users. |
301 | */ | 299 | */ |
302 | static int zcrypt_open(struct inode *inode, struct file *filp) | 300 | static int zcrypt_open(struct inode *inode, struct file *filp) |
303 | { | 301 | { |
304 | lock_kernel(); | 302 | lock_kernel(); |
305 | atomic_inc(&zcrypt_open_count); | 303 | atomic_inc(&zcrypt_open_count); |
306 | unlock_kernel(); | 304 | unlock_kernel(); |
307 | return 0; | 305 | return 0; |
308 | } | 306 | } |
309 | 307 | ||
310 | /** | 308 | /** |
311 | * zcrypt_release(): Count number of users. | 309 | * zcrypt_release(): Count number of users. |
312 | * | 310 | * |
313 | * Device close function to count number of users. | 311 | * Device close function to count number of users. |
314 | */ | 312 | */ |
315 | static int zcrypt_release(struct inode *inode, struct file *filp) | 313 | static int zcrypt_release(struct inode *inode, struct file *filp) |
316 | { | 314 | { |
317 | atomic_dec(&zcrypt_open_count); | 315 | atomic_dec(&zcrypt_open_count); |
318 | return 0; | 316 | return 0; |
319 | } | 317 | } |
320 | 318 | ||
321 | /* | 319 | /* |
322 | * zcrypt ioctls. | 320 | * zcrypt ioctls. |
323 | */ | 321 | */ |
324 | static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) | 322 | static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) |
325 | { | 323 | { |
326 | struct zcrypt_device *zdev; | 324 | struct zcrypt_device *zdev; |
327 | int rc; | 325 | int rc; |
328 | 326 | ||
329 | if (mex->outputdatalength < mex->inputdatalength) | 327 | if (mex->outputdatalength < mex->inputdatalength) |
330 | return -EINVAL; | 328 | return -EINVAL; |
331 | /* | 329 | /* |
332 | * As long as outputdatalength is big enough, we can set the | 330 | * As long as outputdatalength is big enough, we can set the |
333 | * outputdatalength equal to the inputdatalength, since that is the | 331 | * outputdatalength equal to the inputdatalength, since that is the |
334 | * number of bytes we will copy in any case | 332 | * number of bytes we will copy in any case |
335 | */ | 333 | */ |
336 | mex->outputdatalength = mex->inputdatalength; | 334 | mex->outputdatalength = mex->inputdatalength; |
337 | 335 | ||
338 | spin_lock_bh(&zcrypt_device_lock); | 336 | spin_lock_bh(&zcrypt_device_lock); |
339 | list_for_each_entry(zdev, &zcrypt_device_list, list) { | 337 | list_for_each_entry(zdev, &zcrypt_device_list, list) { |
340 | if (!zdev->online || | 338 | if (!zdev->online || |
341 | !zdev->ops->rsa_modexpo || | 339 | !zdev->ops->rsa_modexpo || |
342 | zdev->min_mod_size > mex->inputdatalength || | 340 | zdev->min_mod_size > mex->inputdatalength || |
343 | zdev->max_mod_size < mex->inputdatalength) | 341 | zdev->max_mod_size < mex->inputdatalength) |
344 | continue; | 342 | continue; |
345 | zcrypt_device_get(zdev); | 343 | zcrypt_device_get(zdev); |
346 | get_device(&zdev->ap_dev->device); | 344 | get_device(&zdev->ap_dev->device); |
347 | zdev->request_count++; | 345 | zdev->request_count++; |
348 | __zcrypt_decrease_preference(zdev); | 346 | __zcrypt_decrease_preference(zdev); |
349 | if (try_module_get(zdev->ap_dev->drv->driver.owner)) { | 347 | if (try_module_get(zdev->ap_dev->drv->driver.owner)) { |
350 | spin_unlock_bh(&zcrypt_device_lock); | 348 | spin_unlock_bh(&zcrypt_device_lock); |
351 | rc = zdev->ops->rsa_modexpo(zdev, mex); | 349 | rc = zdev->ops->rsa_modexpo(zdev, mex); |
352 | spin_lock_bh(&zcrypt_device_lock); | 350 | spin_lock_bh(&zcrypt_device_lock); |
353 | module_put(zdev->ap_dev->drv->driver.owner); | 351 | module_put(zdev->ap_dev->drv->driver.owner); |
354 | } | 352 | } |
355 | else | 353 | else |
356 | rc = -EAGAIN; | 354 | rc = -EAGAIN; |
357 | zdev->request_count--; | 355 | zdev->request_count--; |
358 | __zcrypt_increase_preference(zdev); | 356 | __zcrypt_increase_preference(zdev); |
359 | put_device(&zdev->ap_dev->device); | 357 | put_device(&zdev->ap_dev->device); |
360 | zcrypt_device_put(zdev); | 358 | zcrypt_device_put(zdev); |
361 | spin_unlock_bh(&zcrypt_device_lock); | 359 | spin_unlock_bh(&zcrypt_device_lock); |
362 | return rc; | 360 | return rc; |
363 | } | 361 | } |
364 | spin_unlock_bh(&zcrypt_device_lock); | 362 | spin_unlock_bh(&zcrypt_device_lock); |
365 | return -ENODEV; | 363 | return -ENODEV; |
366 | } | 364 | } |
367 | 365 | ||
368 | static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) | 366 | static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) |
369 | { | 367 | { |
370 | struct zcrypt_device *zdev; | 368 | struct zcrypt_device *zdev; |
371 | unsigned long long z1, z2, z3; | 369 | unsigned long long z1, z2, z3; |
372 | int rc, copied; | 370 | int rc, copied; |
373 | 371 | ||
374 | if (crt->outputdatalength < crt->inputdatalength || | 372 | if (crt->outputdatalength < crt->inputdatalength || |
375 | (crt->inputdatalength & 1)) | 373 | (crt->inputdatalength & 1)) |
376 | return -EINVAL; | 374 | return -EINVAL; |
377 | /* | 375 | /* |
378 | * As long as outputdatalength is big enough, we can set the | 376 | * As long as outputdatalength is big enough, we can set the |
379 | * outputdatalength equal to the inputdatalength, since that is the | 377 | * outputdatalength equal to the inputdatalength, since that is the |
380 | * number of bytes we will copy in any case | 378 | * number of bytes we will copy in any case |
381 | */ | 379 | */ |
382 | crt->outputdatalength = crt->inputdatalength; | 380 | crt->outputdatalength = crt->inputdatalength; |
383 | 381 | ||
384 | copied = 0; | 382 | copied = 0; |
385 | restart: | 383 | restart: |
386 | spin_lock_bh(&zcrypt_device_lock); | 384 | spin_lock_bh(&zcrypt_device_lock); |
387 | list_for_each_entry(zdev, &zcrypt_device_list, list) { | 385 | list_for_each_entry(zdev, &zcrypt_device_list, list) { |
388 | if (!zdev->online || | 386 | if (!zdev->online || |
389 | !zdev->ops->rsa_modexpo_crt || | 387 | !zdev->ops->rsa_modexpo_crt || |
390 | zdev->min_mod_size > crt->inputdatalength || | 388 | zdev->min_mod_size > crt->inputdatalength || |
391 | zdev->max_mod_size < crt->inputdatalength) | 389 | zdev->max_mod_size < crt->inputdatalength) |
392 | continue; | 390 | continue; |
393 | if (zdev->short_crt && crt->inputdatalength > 240) { | 391 | if (zdev->short_crt && crt->inputdatalength > 240) { |
394 | /* | 392 | /* |
395 | * Check inputdata for leading zeros for cards | 393 | * Check inputdata for leading zeros for cards |
396 | * that can't handle np_prime, bp_key, or | 394 | * that can't handle np_prime, bp_key, or |
397 | * u_mult_inv > 128 bytes. | 395 | * u_mult_inv > 128 bytes. |
398 | */ | 396 | */ |
399 | if (copied == 0) { | 397 | if (copied == 0) { |
400 | int len; | 398 | int len; |
401 | spin_unlock_bh(&zcrypt_device_lock); | 399 | spin_unlock_bh(&zcrypt_device_lock); |
402 | /* len is max 256 / 2 - 120 = 8 */ | 400 | /* len is max 256 / 2 - 120 = 8 */ |
403 | len = crt->inputdatalength / 2 - 120; | 401 | len = crt->inputdatalength / 2 - 120; |
404 | z1 = z2 = z3 = 0; | 402 | z1 = z2 = z3 = 0; |
405 | if (copy_from_user(&z1, crt->np_prime, len) || | 403 | if (copy_from_user(&z1, crt->np_prime, len) || |
406 | copy_from_user(&z2, crt->bp_key, len) || | 404 | copy_from_user(&z2, crt->bp_key, len) || |
407 | copy_from_user(&z3, crt->u_mult_inv, len)) | 405 | copy_from_user(&z3, crt->u_mult_inv, len)) |
408 | return -EFAULT; | 406 | return -EFAULT; |
409 | copied = 1; | 407 | copied = 1; |
410 | /* | 408 | /* |
411 | * We have to restart device lookup - | 409 | * We have to restart device lookup - |
412 | * the device list may have changed by now. | 410 | * the device list may have changed by now. |
413 | */ | 411 | */ |
414 | goto restart; | 412 | goto restart; |
415 | } | 413 | } |
416 | if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) | 414 | if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) |
417 | /* The device can't handle this request. */ | 415 | /* The device can't handle this request. */ |
418 | continue; | 416 | continue; |
419 | } | 417 | } |
420 | zcrypt_device_get(zdev); | 418 | zcrypt_device_get(zdev); |
421 | get_device(&zdev->ap_dev->device); | 419 | get_device(&zdev->ap_dev->device); |
422 | zdev->request_count++; | 420 | zdev->request_count++; |
423 | __zcrypt_decrease_preference(zdev); | 421 | __zcrypt_decrease_preference(zdev); |
424 | if (try_module_get(zdev->ap_dev->drv->driver.owner)) { | 422 | if (try_module_get(zdev->ap_dev->drv->driver.owner)) { |
425 | spin_unlock_bh(&zcrypt_device_lock); | 423 | spin_unlock_bh(&zcrypt_device_lock); |
426 | rc = zdev->ops->rsa_modexpo_crt(zdev, crt); | 424 | rc = zdev->ops->rsa_modexpo_crt(zdev, crt); |
427 | spin_lock_bh(&zcrypt_device_lock); | 425 | spin_lock_bh(&zcrypt_device_lock); |
428 | module_put(zdev->ap_dev->drv->driver.owner); | 426 | module_put(zdev->ap_dev->drv->driver.owner); |
429 | } | 427 | } |
430 | else | 428 | else |
431 | rc = -EAGAIN; | 429 | rc = -EAGAIN; |
432 | zdev->request_count--; | 430 | zdev->request_count--; |
433 | __zcrypt_increase_preference(zdev); | 431 | __zcrypt_increase_preference(zdev); |
434 | put_device(&zdev->ap_dev->device); | 432 | put_device(&zdev->ap_dev->device); |
435 | zcrypt_device_put(zdev); | 433 | zcrypt_device_put(zdev); |
436 | spin_unlock_bh(&zcrypt_device_lock); | 434 | spin_unlock_bh(&zcrypt_device_lock); |
437 | return rc; | 435 | return rc; |
438 | } | 436 | } |
439 | spin_unlock_bh(&zcrypt_device_lock); | 437 | spin_unlock_bh(&zcrypt_device_lock); |
440 | return -ENODEV; | 438 | return -ENODEV; |
441 | } | 439 | } |
442 | 440 | ||
443 | static long zcrypt_send_cprb(struct ica_xcRB *xcRB) | 441 | static long zcrypt_send_cprb(struct ica_xcRB *xcRB) |
444 | { | 442 | { |
445 | struct zcrypt_device *zdev; | 443 | struct zcrypt_device *zdev; |
446 | int rc; | 444 | int rc; |
447 | 445 | ||
448 | spin_lock_bh(&zcrypt_device_lock); | 446 | spin_lock_bh(&zcrypt_device_lock); |
449 | list_for_each_entry(zdev, &zcrypt_device_list, list) { | 447 | list_for_each_entry(zdev, &zcrypt_device_list, list) { |
450 | if (!zdev->online || !zdev->ops->send_cprb || | 448 | if (!zdev->online || !zdev->ops->send_cprb || |
451 | (xcRB->user_defined != AUTOSELECT && | 449 | (xcRB->user_defined != AUTOSELECT && |
452 | AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined) | 450 | AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined) |
453 | ) | 451 | ) |
454 | continue; | 452 | continue; |
455 | zcrypt_device_get(zdev); | 453 | zcrypt_device_get(zdev); |
456 | get_device(&zdev->ap_dev->device); | 454 | get_device(&zdev->ap_dev->device); |
457 | zdev->request_count++; | 455 | zdev->request_count++; |
458 | __zcrypt_decrease_preference(zdev); | 456 | __zcrypt_decrease_preference(zdev); |
459 | if (try_module_get(zdev->ap_dev->drv->driver.owner)) { | 457 | if (try_module_get(zdev->ap_dev->drv->driver.owner)) { |
460 | spin_unlock_bh(&zcrypt_device_lock); | 458 | spin_unlock_bh(&zcrypt_device_lock); |
461 | rc = zdev->ops->send_cprb(zdev, xcRB); | 459 | rc = zdev->ops->send_cprb(zdev, xcRB); |
462 | spin_lock_bh(&zcrypt_device_lock); | 460 | spin_lock_bh(&zcrypt_device_lock); |
463 | module_put(zdev->ap_dev->drv->driver.owner); | 461 | module_put(zdev->ap_dev->drv->driver.owner); |
464 | } | 462 | } |
465 | else | 463 | else |
466 | rc = -EAGAIN; | 464 | rc = -EAGAIN; |
467 | zdev->request_count--; | 465 | zdev->request_count--; |
468 | __zcrypt_increase_preference(zdev); | 466 | __zcrypt_increase_preference(zdev); |
469 | put_device(&zdev->ap_dev->device); | 467 | put_device(&zdev->ap_dev->device); |
470 | zcrypt_device_put(zdev); | 468 | zcrypt_device_put(zdev); |
471 | spin_unlock_bh(&zcrypt_device_lock); | 469 | spin_unlock_bh(&zcrypt_device_lock); |
472 | return rc; | 470 | return rc; |
473 | } | 471 | } |
474 | spin_unlock_bh(&zcrypt_device_lock); | 472 | spin_unlock_bh(&zcrypt_device_lock); |
475 | return -ENODEV; | 473 | return -ENODEV; |
476 | } | 474 | } |
477 | 475 | ||
478 | static long zcrypt_rng(char *buffer) | 476 | static long zcrypt_rng(char *buffer) |
479 | { | 477 | { |
480 | struct zcrypt_device *zdev; | 478 | struct zcrypt_device *zdev; |
481 | int rc; | 479 | int rc; |
482 | 480 | ||
483 | spin_lock_bh(&zcrypt_device_lock); | 481 | spin_lock_bh(&zcrypt_device_lock); |
484 | list_for_each_entry(zdev, &zcrypt_device_list, list) { | 482 | list_for_each_entry(zdev, &zcrypt_device_list, list) { |
485 | if (!zdev->online || !zdev->ops->rng) | 483 | if (!zdev->online || !zdev->ops->rng) |
486 | continue; | 484 | continue; |
487 | zcrypt_device_get(zdev); | 485 | zcrypt_device_get(zdev); |
488 | get_device(&zdev->ap_dev->device); | 486 | get_device(&zdev->ap_dev->device); |
489 | zdev->request_count++; | 487 | zdev->request_count++; |
490 | __zcrypt_decrease_preference(zdev); | 488 | __zcrypt_decrease_preference(zdev); |
491 | if (try_module_get(zdev->ap_dev->drv->driver.owner)) { | 489 | if (try_module_get(zdev->ap_dev->drv->driver.owner)) { |
492 | spin_unlock_bh(&zcrypt_device_lock); | 490 | spin_unlock_bh(&zcrypt_device_lock); |
493 | rc = zdev->ops->rng(zdev, buffer); | 491 | rc = zdev->ops->rng(zdev, buffer); |
494 | spin_lock_bh(&zcrypt_device_lock); | 492 | spin_lock_bh(&zcrypt_device_lock); |
495 | module_put(zdev->ap_dev->drv->driver.owner); | 493 | module_put(zdev->ap_dev->drv->driver.owner); |
496 | } else | 494 | } else |
497 | rc = -EAGAIN; | 495 | rc = -EAGAIN; |
498 | zdev->request_count--; | 496 | zdev->request_count--; |
499 | __zcrypt_increase_preference(zdev); | 497 | __zcrypt_increase_preference(zdev); |
500 | put_device(&zdev->ap_dev->device); | 498 | put_device(&zdev->ap_dev->device); |
501 | zcrypt_device_put(zdev); | 499 | zcrypt_device_put(zdev); |
502 | spin_unlock_bh(&zcrypt_device_lock); | 500 | spin_unlock_bh(&zcrypt_device_lock); |
503 | return rc; | 501 | return rc; |
504 | } | 502 | } |
505 | spin_unlock_bh(&zcrypt_device_lock); | 503 | spin_unlock_bh(&zcrypt_device_lock); |
506 | return -ENODEV; | 504 | return -ENODEV; |
507 | } | 505 | } |
508 | 506 | ||
509 | static void zcrypt_status_mask(char status[AP_DEVICES]) | 507 | static void zcrypt_status_mask(char status[AP_DEVICES]) |
510 | { | 508 | { |
511 | struct zcrypt_device *zdev; | 509 | struct zcrypt_device *zdev; |
512 | 510 | ||
513 | memset(status, 0, sizeof(char) * AP_DEVICES); | 511 | memset(status, 0, sizeof(char) * AP_DEVICES); |
514 | spin_lock_bh(&zcrypt_device_lock); | 512 | spin_lock_bh(&zcrypt_device_lock); |
515 | list_for_each_entry(zdev, &zcrypt_device_list, list) | 513 | list_for_each_entry(zdev, &zcrypt_device_list, list) |
516 | status[AP_QID_DEVICE(zdev->ap_dev->qid)] = | 514 | status[AP_QID_DEVICE(zdev->ap_dev->qid)] = |
517 | zdev->online ? zdev->user_space_type : 0x0d; | 515 | zdev->online ? zdev->user_space_type : 0x0d; |
518 | spin_unlock_bh(&zcrypt_device_lock); | 516 | spin_unlock_bh(&zcrypt_device_lock); |
519 | } | 517 | } |
520 | 518 | ||
521 | static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) | 519 | static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) |
522 | { | 520 | { |
523 | struct zcrypt_device *zdev; | 521 | struct zcrypt_device *zdev; |
524 | 522 | ||
525 | memset(qdepth, 0, sizeof(char) * AP_DEVICES); | 523 | memset(qdepth, 0, sizeof(char) * AP_DEVICES); |
526 | spin_lock_bh(&zcrypt_device_lock); | 524 | spin_lock_bh(&zcrypt_device_lock); |
527 | list_for_each_entry(zdev, &zcrypt_device_list, list) { | 525 | list_for_each_entry(zdev, &zcrypt_device_list, list) { |
528 | spin_lock(&zdev->ap_dev->lock); | 526 | spin_lock(&zdev->ap_dev->lock); |
529 | qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = | 527 | qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = |
530 | zdev->ap_dev->pendingq_count + | 528 | zdev->ap_dev->pendingq_count + |
531 | zdev->ap_dev->requestq_count; | 529 | zdev->ap_dev->requestq_count; |
532 | spin_unlock(&zdev->ap_dev->lock); | 530 | spin_unlock(&zdev->ap_dev->lock); |
533 | } | 531 | } |
534 | spin_unlock_bh(&zcrypt_device_lock); | 532 | spin_unlock_bh(&zcrypt_device_lock); |
535 | } | 533 | } |
536 | 534 | ||
537 | static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) | 535 | static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) |
538 | { | 536 | { |
539 | struct zcrypt_device *zdev; | 537 | struct zcrypt_device *zdev; |
540 | 538 | ||
541 | memset(reqcnt, 0, sizeof(int) * AP_DEVICES); | 539 | memset(reqcnt, 0, sizeof(int) * AP_DEVICES); |
542 | spin_lock_bh(&zcrypt_device_lock); | 540 | spin_lock_bh(&zcrypt_device_lock); |
543 | list_for_each_entry(zdev, &zcrypt_device_list, list) { | 541 | list_for_each_entry(zdev, &zcrypt_device_list, list) { |
544 | spin_lock(&zdev->ap_dev->lock); | 542 | spin_lock(&zdev->ap_dev->lock); |
545 | reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = | 543 | reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = |
546 | zdev->ap_dev->total_request_count; | 544 | zdev->ap_dev->total_request_count; |
547 | spin_unlock(&zdev->ap_dev->lock); | 545 | spin_unlock(&zdev->ap_dev->lock); |
548 | } | 546 | } |
549 | spin_unlock_bh(&zcrypt_device_lock); | 547 | spin_unlock_bh(&zcrypt_device_lock); |
550 | } | 548 | } |
551 | 549 | ||
552 | static int zcrypt_pendingq_count(void) | 550 | static int zcrypt_pendingq_count(void) |
553 | { | 551 | { |
554 | struct zcrypt_device *zdev; | 552 | struct zcrypt_device *zdev; |
555 | int pendingq_count = 0; | 553 | int pendingq_count = 0; |
556 | 554 | ||
557 | spin_lock_bh(&zcrypt_device_lock); | 555 | spin_lock_bh(&zcrypt_device_lock); |
558 | list_for_each_entry(zdev, &zcrypt_device_list, list) { | 556 | list_for_each_entry(zdev, &zcrypt_device_list, list) { |
559 | spin_lock(&zdev->ap_dev->lock); | 557 | spin_lock(&zdev->ap_dev->lock); |
560 | pendingq_count += zdev->ap_dev->pendingq_count; | 558 | pendingq_count += zdev->ap_dev->pendingq_count; |
561 | spin_unlock(&zdev->ap_dev->lock); | 559 | spin_unlock(&zdev->ap_dev->lock); |
562 | } | 560 | } |
563 | spin_unlock_bh(&zcrypt_device_lock); | 561 | spin_unlock_bh(&zcrypt_device_lock); |
564 | return pendingq_count; | 562 | return pendingq_count; |
565 | } | 563 | } |
566 | 564 | ||
567 | static int zcrypt_requestq_count(void) | 565 | static int zcrypt_requestq_count(void) |
568 | { | 566 | { |
569 | struct zcrypt_device *zdev; | 567 | struct zcrypt_device *zdev; |
570 | int requestq_count = 0; | 568 | int requestq_count = 0; |
571 | 569 | ||
572 | spin_lock_bh(&zcrypt_device_lock); | 570 | spin_lock_bh(&zcrypt_device_lock); |
573 | list_for_each_entry(zdev, &zcrypt_device_list, list) { | 571 | list_for_each_entry(zdev, &zcrypt_device_list, list) { |
574 | spin_lock(&zdev->ap_dev->lock); | 572 | spin_lock(&zdev->ap_dev->lock); |
575 | requestq_count += zdev->ap_dev->requestq_count; | 573 | requestq_count += zdev->ap_dev->requestq_count; |
576 | spin_unlock(&zdev->ap_dev->lock); | 574 | spin_unlock(&zdev->ap_dev->lock); |
577 | } | 575 | } |
578 | spin_unlock_bh(&zcrypt_device_lock); | 576 | spin_unlock_bh(&zcrypt_device_lock); |
579 | return requestq_count; | 577 | return requestq_count; |
580 | } | 578 | } |
581 | 579 | ||
582 | static int zcrypt_count_type(int type) | 580 | static int zcrypt_count_type(int type) |
583 | { | 581 | { |
584 | struct zcrypt_device *zdev; | 582 | struct zcrypt_device *zdev; |
585 | int device_count = 0; | 583 | int device_count = 0; |
586 | 584 | ||
587 | spin_lock_bh(&zcrypt_device_lock); | 585 | spin_lock_bh(&zcrypt_device_lock); |
588 | list_for_each_entry(zdev, &zcrypt_device_list, list) | 586 | list_for_each_entry(zdev, &zcrypt_device_list, list) |
589 | if (zdev->user_space_type == type) | 587 | if (zdev->user_space_type == type) |
590 | device_count++; | 588 | device_count++; |
591 | spin_unlock_bh(&zcrypt_device_lock); | 589 | spin_unlock_bh(&zcrypt_device_lock); |
592 | return device_count; | 590 | return device_count; |
593 | } | 591 | } |
594 | 592 | ||
595 | /** | 593 | /** |
596 | * zcrypt_ica_status(): Old, depracted combi status call. | 594 | * zcrypt_ica_status(): Old, depracted combi status call. |
597 | * | 595 | * |
598 | * Old, deprecated combi status call. | 596 | * Old, deprecated combi status call. |
599 | */ | 597 | */ |
600 | static long zcrypt_ica_status(struct file *filp, unsigned long arg) | 598 | static long zcrypt_ica_status(struct file *filp, unsigned long arg) |
601 | { | 599 | { |
602 | struct ica_z90_status *pstat; | 600 | struct ica_z90_status *pstat; |
603 | int ret; | 601 | int ret; |
604 | 602 | ||
605 | pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); | 603 | pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); |
606 | if (!pstat) | 604 | if (!pstat) |
607 | return -ENOMEM; | 605 | return -ENOMEM; |
608 | pstat->totalcount = zcrypt_device_count; | 606 | pstat->totalcount = zcrypt_device_count; |
609 | pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); | 607 | pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); |
610 | pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); | 608 | pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); |
611 | pstat->requestqWaitCount = zcrypt_requestq_count(); | 609 | pstat->requestqWaitCount = zcrypt_requestq_count(); |
612 | pstat->pendingqWaitCount = zcrypt_pendingq_count(); | 610 | pstat->pendingqWaitCount = zcrypt_pendingq_count(); |
613 | pstat->totalOpenCount = atomic_read(&zcrypt_open_count); | 611 | pstat->totalOpenCount = atomic_read(&zcrypt_open_count); |
614 | pstat->cryptoDomain = ap_domain_index; | 612 | pstat->cryptoDomain = ap_domain_index; |
615 | zcrypt_status_mask(pstat->status); | 613 | zcrypt_status_mask(pstat->status); |
616 | zcrypt_qdepth_mask(pstat->qdepth); | 614 | zcrypt_qdepth_mask(pstat->qdepth); |
617 | ret = 0; | 615 | ret = 0; |
618 | if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) | 616 | if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) |
619 | ret = -EFAULT; | 617 | ret = -EFAULT; |
620 | kfree(pstat); | 618 | kfree(pstat); |
621 | return ret; | 619 | return ret; |
622 | } | 620 | } |
623 | 621 | ||
624 | static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, | 622 | static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, |
625 | unsigned long arg) | 623 | unsigned long arg) |
626 | { | 624 | { |
627 | int rc; | 625 | int rc; |
628 | 626 | ||
629 | switch (cmd) { | 627 | switch (cmd) { |
630 | case ICARSAMODEXPO: { | 628 | case ICARSAMODEXPO: { |
631 | struct ica_rsa_modexpo __user *umex = (void __user *) arg; | 629 | struct ica_rsa_modexpo __user *umex = (void __user *) arg; |
632 | struct ica_rsa_modexpo mex; | 630 | struct ica_rsa_modexpo mex; |
633 | if (copy_from_user(&mex, umex, sizeof(mex))) | 631 | if (copy_from_user(&mex, umex, sizeof(mex))) |
634 | return -EFAULT; | 632 | return -EFAULT; |
635 | do { | 633 | do { |
636 | rc = zcrypt_rsa_modexpo(&mex); | 634 | rc = zcrypt_rsa_modexpo(&mex); |
637 | } while (rc == -EAGAIN); | 635 | } while (rc == -EAGAIN); |
638 | if (rc) | 636 | if (rc) |
639 | return rc; | 637 | return rc; |
640 | return put_user(mex.outputdatalength, &umex->outputdatalength); | 638 | return put_user(mex.outputdatalength, &umex->outputdatalength); |
641 | } | 639 | } |
642 | case ICARSACRT: { | 640 | case ICARSACRT: { |
643 | struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; | 641 | struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; |
644 | struct ica_rsa_modexpo_crt crt; | 642 | struct ica_rsa_modexpo_crt crt; |
645 | if (copy_from_user(&crt, ucrt, sizeof(crt))) | 643 | if (copy_from_user(&crt, ucrt, sizeof(crt))) |
646 | return -EFAULT; | 644 | return -EFAULT; |
647 | do { | 645 | do { |
648 | rc = zcrypt_rsa_crt(&crt); | 646 | rc = zcrypt_rsa_crt(&crt); |
649 | } while (rc == -EAGAIN); | 647 | } while (rc == -EAGAIN); |
650 | if (rc) | 648 | if (rc) |
651 | return rc; | 649 | return rc; |
652 | return put_user(crt.outputdatalength, &ucrt->outputdatalength); | 650 | return put_user(crt.outputdatalength, &ucrt->outputdatalength); |
653 | } | 651 | } |
654 | case ZSECSENDCPRB: { | 652 | case ZSECSENDCPRB: { |
655 | struct ica_xcRB __user *uxcRB = (void __user *) arg; | 653 | struct ica_xcRB __user *uxcRB = (void __user *) arg; |
656 | struct ica_xcRB xcRB; | 654 | struct ica_xcRB xcRB; |
657 | if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) | 655 | if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) |
658 | return -EFAULT; | 656 | return -EFAULT; |
659 | do { | 657 | do { |
660 | rc = zcrypt_send_cprb(&xcRB); | 658 | rc = zcrypt_send_cprb(&xcRB); |
661 | } while (rc == -EAGAIN); | 659 | } while (rc == -EAGAIN); |
662 | if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) | 660 | if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) |
663 | return -EFAULT; | 661 | return -EFAULT; |
664 | return rc; | 662 | return rc; |
665 | } | 663 | } |
666 | case Z90STAT_STATUS_MASK: { | 664 | case Z90STAT_STATUS_MASK: { |
667 | char status[AP_DEVICES]; | 665 | char status[AP_DEVICES]; |
668 | zcrypt_status_mask(status); | 666 | zcrypt_status_mask(status); |
669 | if (copy_to_user((char __user *) arg, status, | 667 | if (copy_to_user((char __user *) arg, status, |
670 | sizeof(char) * AP_DEVICES)) | 668 | sizeof(char) * AP_DEVICES)) |
671 | return -EFAULT; | 669 | return -EFAULT; |
672 | return 0; | 670 | return 0; |
673 | } | 671 | } |
674 | case Z90STAT_QDEPTH_MASK: { | 672 | case Z90STAT_QDEPTH_MASK: { |
675 | char qdepth[AP_DEVICES]; | 673 | char qdepth[AP_DEVICES]; |
676 | zcrypt_qdepth_mask(qdepth); | 674 | zcrypt_qdepth_mask(qdepth); |
677 | if (copy_to_user((char __user *) arg, qdepth, | 675 | if (copy_to_user((char __user *) arg, qdepth, |
678 | sizeof(char) * AP_DEVICES)) | 676 | sizeof(char) * AP_DEVICES)) |
679 | return -EFAULT; | 677 | return -EFAULT; |
680 | return 0; | 678 | return 0; |
681 | } | 679 | } |
682 | case Z90STAT_PERDEV_REQCNT: { | 680 | case Z90STAT_PERDEV_REQCNT: { |
683 | int reqcnt[AP_DEVICES]; | 681 | int reqcnt[AP_DEVICES]; |
684 | zcrypt_perdev_reqcnt(reqcnt); | 682 | zcrypt_perdev_reqcnt(reqcnt); |
685 | if (copy_to_user((int __user *) arg, reqcnt, | 683 | if (copy_to_user((int __user *) arg, reqcnt, |
686 | sizeof(int) * AP_DEVICES)) | 684 | sizeof(int) * AP_DEVICES)) |
687 | return -EFAULT; | 685 | return -EFAULT; |
688 | return 0; | 686 | return 0; |
689 | } | 687 | } |
690 | case Z90STAT_REQUESTQ_COUNT: | 688 | case Z90STAT_REQUESTQ_COUNT: |
691 | return put_user(zcrypt_requestq_count(), (int __user *) arg); | 689 | return put_user(zcrypt_requestq_count(), (int __user *) arg); |
692 | case Z90STAT_PENDINGQ_COUNT: | 690 | case Z90STAT_PENDINGQ_COUNT: |
693 | return put_user(zcrypt_pendingq_count(), (int __user *) arg); | 691 | return put_user(zcrypt_pendingq_count(), (int __user *) arg); |
694 | case Z90STAT_TOTALOPEN_COUNT: | 692 | case Z90STAT_TOTALOPEN_COUNT: |
695 | return put_user(atomic_read(&zcrypt_open_count), | 693 | return put_user(atomic_read(&zcrypt_open_count), |
696 | (int __user *) arg); | 694 | (int __user *) arg); |
697 | case Z90STAT_DOMAIN_INDEX: | 695 | case Z90STAT_DOMAIN_INDEX: |
698 | return put_user(ap_domain_index, (int __user *) arg); | 696 | return put_user(ap_domain_index, (int __user *) arg); |
699 | /* | 697 | /* |
700 | * Deprecated ioctls. Don't add another device count ioctl, | 698 | * Deprecated ioctls. Don't add another device count ioctl, |
701 | * you can count them yourself in the user space with the | 699 | * you can count them yourself in the user space with the |
702 | * output of the Z90STAT_STATUS_MASK ioctl. | 700 | * output of the Z90STAT_STATUS_MASK ioctl. |
703 | */ | 701 | */ |
704 | case ICAZ90STATUS: | 702 | case ICAZ90STATUS: |
705 | return zcrypt_ica_status(filp, arg); | 703 | return zcrypt_ica_status(filp, arg); |
706 | case Z90STAT_TOTALCOUNT: | 704 | case Z90STAT_TOTALCOUNT: |
707 | return put_user(zcrypt_device_count, (int __user *) arg); | 705 | return put_user(zcrypt_device_count, (int __user *) arg); |
708 | case Z90STAT_PCICACOUNT: | 706 | case Z90STAT_PCICACOUNT: |
709 | return put_user(zcrypt_count_type(ZCRYPT_PCICA), | 707 | return put_user(zcrypt_count_type(ZCRYPT_PCICA), |
710 | (int __user *) arg); | 708 | (int __user *) arg); |
711 | case Z90STAT_PCICCCOUNT: | 709 | case Z90STAT_PCICCCOUNT: |
712 | return put_user(zcrypt_count_type(ZCRYPT_PCICC), | 710 | return put_user(zcrypt_count_type(ZCRYPT_PCICC), |
713 | (int __user *) arg); | 711 | (int __user *) arg); |
714 | case Z90STAT_PCIXCCMCL2COUNT: | 712 | case Z90STAT_PCIXCCMCL2COUNT: |
715 | return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), | 713 | return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), |
716 | (int __user *) arg); | 714 | (int __user *) arg); |
717 | case Z90STAT_PCIXCCMCL3COUNT: | 715 | case Z90STAT_PCIXCCMCL3COUNT: |
718 | return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), | 716 | return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), |
719 | (int __user *) arg); | 717 | (int __user *) arg); |
720 | case Z90STAT_PCIXCCCOUNT: | 718 | case Z90STAT_PCIXCCCOUNT: |
721 | return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + | 719 | return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + |
722 | zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), | 720 | zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), |
723 | (int __user *) arg); | 721 | (int __user *) arg); |
724 | case Z90STAT_CEX2CCOUNT: | 722 | case Z90STAT_CEX2CCOUNT: |
725 | return put_user(zcrypt_count_type(ZCRYPT_CEX2C), | 723 | return put_user(zcrypt_count_type(ZCRYPT_CEX2C), |
726 | (int __user *) arg); | 724 | (int __user *) arg); |
727 | case Z90STAT_CEX2ACOUNT: | 725 | case Z90STAT_CEX2ACOUNT: |
728 | return put_user(zcrypt_count_type(ZCRYPT_CEX2A), | 726 | return put_user(zcrypt_count_type(ZCRYPT_CEX2A), |
729 | (int __user *) arg); | 727 | (int __user *) arg); |
730 | default: | 728 | default: |
731 | /* unknown ioctl number */ | 729 | /* unknown ioctl number */ |
732 | return -ENOIOCTLCMD; | 730 | return -ENOIOCTLCMD; |
733 | } | 731 | } |
734 | } | 732 | } |
735 | 733 | ||
736 | #ifdef CONFIG_COMPAT | 734 | #ifdef CONFIG_COMPAT |
737 | /* | 735 | /* |
738 | * ioctl32 conversion routines | 736 | * ioctl32 conversion routines |
739 | */ | 737 | */ |
740 | struct compat_ica_rsa_modexpo { | 738 | struct compat_ica_rsa_modexpo { |
741 | compat_uptr_t inputdata; | 739 | compat_uptr_t inputdata; |
742 | unsigned int inputdatalength; | 740 | unsigned int inputdatalength; |
743 | compat_uptr_t outputdata; | 741 | compat_uptr_t outputdata; |
744 | unsigned int outputdatalength; | 742 | unsigned int outputdatalength; |
745 | compat_uptr_t b_key; | 743 | compat_uptr_t b_key; |
746 | compat_uptr_t n_modulus; | 744 | compat_uptr_t n_modulus; |
747 | }; | 745 | }; |
748 | 746 | ||
749 | static long trans_modexpo32(struct file *filp, unsigned int cmd, | 747 | static long trans_modexpo32(struct file *filp, unsigned int cmd, |
750 | unsigned long arg) | 748 | unsigned long arg) |
751 | { | 749 | { |
752 | struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); | 750 | struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); |
753 | struct compat_ica_rsa_modexpo mex32; | 751 | struct compat_ica_rsa_modexpo mex32; |
754 | struct ica_rsa_modexpo mex64; | 752 | struct ica_rsa_modexpo mex64; |
755 | long rc; | 753 | long rc; |
756 | 754 | ||
757 | if (copy_from_user(&mex32, umex32, sizeof(mex32))) | 755 | if (copy_from_user(&mex32, umex32, sizeof(mex32))) |
758 | return -EFAULT; | 756 | return -EFAULT; |
759 | mex64.inputdata = compat_ptr(mex32.inputdata); | 757 | mex64.inputdata = compat_ptr(mex32.inputdata); |
760 | mex64.inputdatalength = mex32.inputdatalength; | 758 | mex64.inputdatalength = mex32.inputdatalength; |
761 | mex64.outputdata = compat_ptr(mex32.outputdata); | 759 | mex64.outputdata = compat_ptr(mex32.outputdata); |
762 | mex64.outputdatalength = mex32.outputdatalength; | 760 | mex64.outputdatalength = mex32.outputdatalength; |
763 | mex64.b_key = compat_ptr(mex32.b_key); | 761 | mex64.b_key = compat_ptr(mex32.b_key); |
764 | mex64.n_modulus = compat_ptr(mex32.n_modulus); | 762 | mex64.n_modulus = compat_ptr(mex32.n_modulus); |
765 | do { | 763 | do { |
766 | rc = zcrypt_rsa_modexpo(&mex64); | 764 | rc = zcrypt_rsa_modexpo(&mex64); |
767 | } while (rc == -EAGAIN); | 765 | } while (rc == -EAGAIN); |
768 | if (!rc) | 766 | if (!rc) |
769 | rc = put_user(mex64.outputdatalength, | 767 | rc = put_user(mex64.outputdatalength, |
770 | &umex32->outputdatalength); | 768 | &umex32->outputdatalength); |
771 | return rc; | 769 | return rc; |
772 | } | 770 | } |
773 | 771 | ||
774 | struct compat_ica_rsa_modexpo_crt { | 772 | struct compat_ica_rsa_modexpo_crt { |
775 | compat_uptr_t inputdata; | 773 | compat_uptr_t inputdata; |
776 | unsigned int inputdatalength; | 774 | unsigned int inputdatalength; |
777 | compat_uptr_t outputdata; | 775 | compat_uptr_t outputdata; |
778 | unsigned int outputdatalength; | 776 | unsigned int outputdatalength; |
779 | compat_uptr_t bp_key; | 777 | compat_uptr_t bp_key; |
780 | compat_uptr_t bq_key; | 778 | compat_uptr_t bq_key; |
781 | compat_uptr_t np_prime; | 779 | compat_uptr_t np_prime; |
782 | compat_uptr_t nq_prime; | 780 | compat_uptr_t nq_prime; |
783 | compat_uptr_t u_mult_inv; | 781 | compat_uptr_t u_mult_inv; |
784 | }; | 782 | }; |
785 | 783 | ||
786 | static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, | 784 | static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, |
787 | unsigned long arg) | 785 | unsigned long arg) |
788 | { | 786 | { |
789 | struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); | 787 | struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); |
790 | struct compat_ica_rsa_modexpo_crt crt32; | 788 | struct compat_ica_rsa_modexpo_crt crt32; |
791 | struct ica_rsa_modexpo_crt crt64; | 789 | struct ica_rsa_modexpo_crt crt64; |
792 | long rc; | 790 | long rc; |
793 | 791 | ||
794 | if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) | 792 | if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) |
795 | return -EFAULT; | 793 | return -EFAULT; |
796 | crt64.inputdata = compat_ptr(crt32.inputdata); | 794 | crt64.inputdata = compat_ptr(crt32.inputdata); |
797 | crt64.inputdatalength = crt32.inputdatalength; | 795 | crt64.inputdatalength = crt32.inputdatalength; |
798 | crt64.outputdata= compat_ptr(crt32.outputdata); | 796 | crt64.outputdata= compat_ptr(crt32.outputdata); |
799 | crt64.outputdatalength = crt32.outputdatalength; | 797 | crt64.outputdatalength = crt32.outputdatalength; |
800 | crt64.bp_key = compat_ptr(crt32.bp_key); | 798 | crt64.bp_key = compat_ptr(crt32.bp_key); |
801 | crt64.bq_key = compat_ptr(crt32.bq_key); | 799 | crt64.bq_key = compat_ptr(crt32.bq_key); |
802 | crt64.np_prime = compat_ptr(crt32.np_prime); | 800 | crt64.np_prime = compat_ptr(crt32.np_prime); |
803 | crt64.nq_prime = compat_ptr(crt32.nq_prime); | 801 | crt64.nq_prime = compat_ptr(crt32.nq_prime); |
804 | crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); | 802 | crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); |
805 | do { | 803 | do { |
806 | rc = zcrypt_rsa_crt(&crt64); | 804 | rc = zcrypt_rsa_crt(&crt64); |
807 | } while (rc == -EAGAIN); | 805 | } while (rc == -EAGAIN); |
808 | if (!rc) | 806 | if (!rc) |
809 | rc = put_user(crt64.outputdatalength, | 807 | rc = put_user(crt64.outputdatalength, |
810 | &ucrt32->outputdatalength); | 808 | &ucrt32->outputdatalength); |
811 | return rc; | 809 | return rc; |
812 | } | 810 | } |
813 | 811 | ||
814 | struct compat_ica_xcRB { | 812 | struct compat_ica_xcRB { |
815 | unsigned short agent_ID; | 813 | unsigned short agent_ID; |
816 | unsigned int user_defined; | 814 | unsigned int user_defined; |
817 | unsigned short request_ID; | 815 | unsigned short request_ID; |
818 | unsigned int request_control_blk_length; | 816 | unsigned int request_control_blk_length; |
819 | unsigned char padding1[16 - sizeof (compat_uptr_t)]; | 817 | unsigned char padding1[16 - sizeof (compat_uptr_t)]; |
820 | compat_uptr_t request_control_blk_addr; | 818 | compat_uptr_t request_control_blk_addr; |
821 | unsigned int request_data_length; | 819 | unsigned int request_data_length; |
822 | char padding2[16 - sizeof (compat_uptr_t)]; | 820 | char padding2[16 - sizeof (compat_uptr_t)]; |
823 | compat_uptr_t request_data_address; | 821 | compat_uptr_t request_data_address; |
824 | unsigned int reply_control_blk_length; | 822 | unsigned int reply_control_blk_length; |
825 | char padding3[16 - sizeof (compat_uptr_t)]; | 823 | char padding3[16 - sizeof (compat_uptr_t)]; |
826 | compat_uptr_t reply_control_blk_addr; | 824 | compat_uptr_t reply_control_blk_addr; |
827 | unsigned int reply_data_length; | 825 | unsigned int reply_data_length; |
828 | char padding4[16 - sizeof (compat_uptr_t)]; | 826 | char padding4[16 - sizeof (compat_uptr_t)]; |
829 | compat_uptr_t reply_data_addr; | 827 | compat_uptr_t reply_data_addr; |
830 | unsigned short priority_window; | 828 | unsigned short priority_window; |
831 | unsigned int status; | 829 | unsigned int status; |
832 | } __attribute__((packed)); | 830 | } __attribute__((packed)); |
833 | 831 | ||
834 | static long trans_xcRB32(struct file *filp, unsigned int cmd, | 832 | static long trans_xcRB32(struct file *filp, unsigned int cmd, |
835 | unsigned long arg) | 833 | unsigned long arg) |
836 | { | 834 | { |
837 | struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); | 835 | struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); |
838 | struct compat_ica_xcRB xcRB32; | 836 | struct compat_ica_xcRB xcRB32; |
839 | struct ica_xcRB xcRB64; | 837 | struct ica_xcRB xcRB64; |
840 | long rc; | 838 | long rc; |
841 | 839 | ||
842 | if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) | 840 | if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) |
843 | return -EFAULT; | 841 | return -EFAULT; |
844 | xcRB64.agent_ID = xcRB32.agent_ID; | 842 | xcRB64.agent_ID = xcRB32.agent_ID; |
845 | xcRB64.user_defined = xcRB32.user_defined; | 843 | xcRB64.user_defined = xcRB32.user_defined; |
846 | xcRB64.request_ID = xcRB32.request_ID; | 844 | xcRB64.request_ID = xcRB32.request_ID; |
847 | xcRB64.request_control_blk_length = | 845 | xcRB64.request_control_blk_length = |
848 | xcRB32.request_control_blk_length; | 846 | xcRB32.request_control_blk_length; |
849 | xcRB64.request_control_blk_addr = | 847 | xcRB64.request_control_blk_addr = |
850 | compat_ptr(xcRB32.request_control_blk_addr); | 848 | compat_ptr(xcRB32.request_control_blk_addr); |
851 | xcRB64.request_data_length = | 849 | xcRB64.request_data_length = |
852 | xcRB32.request_data_length; | 850 | xcRB32.request_data_length; |
853 | xcRB64.request_data_address = | 851 | xcRB64.request_data_address = |
854 | compat_ptr(xcRB32.request_data_address); | 852 | compat_ptr(xcRB32.request_data_address); |
855 | xcRB64.reply_control_blk_length = | 853 | xcRB64.reply_control_blk_length = |
856 | xcRB32.reply_control_blk_length; | 854 | xcRB32.reply_control_blk_length; |
857 | xcRB64.reply_control_blk_addr = | 855 | xcRB64.reply_control_blk_addr = |
858 | compat_ptr(xcRB32.reply_control_blk_addr); | 856 | compat_ptr(xcRB32.reply_control_blk_addr); |
859 | xcRB64.reply_data_length = xcRB32.reply_data_length; | 857 | xcRB64.reply_data_length = xcRB32.reply_data_length; |
860 | xcRB64.reply_data_addr = | 858 | xcRB64.reply_data_addr = |
861 | compat_ptr(xcRB32.reply_data_addr); | 859 | compat_ptr(xcRB32.reply_data_addr); |
862 | xcRB64.priority_window = xcRB32.priority_window; | 860 | xcRB64.priority_window = xcRB32.priority_window; |
863 | xcRB64.status = xcRB32.status; | 861 | xcRB64.status = xcRB32.status; |
864 | do { | 862 | do { |
865 | rc = zcrypt_send_cprb(&xcRB64); | 863 | rc = zcrypt_send_cprb(&xcRB64); |
866 | } while (rc == -EAGAIN); | 864 | } while (rc == -EAGAIN); |
867 | xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; | 865 | xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; |
868 | xcRB32.reply_data_length = xcRB64.reply_data_length; | 866 | xcRB32.reply_data_length = xcRB64.reply_data_length; |
869 | xcRB32.status = xcRB64.status; | 867 | xcRB32.status = xcRB64.status; |
870 | if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) | 868 | if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) |
871 | return -EFAULT; | 869 | return -EFAULT; |
872 | return rc; | 870 | return rc; |
873 | } | 871 | } |
874 | 872 | ||
875 | static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, | 873 | static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, |
876 | unsigned long arg) | 874 | unsigned long arg) |
877 | { | 875 | { |
878 | if (cmd == ICARSAMODEXPO) | 876 | if (cmd == ICARSAMODEXPO) |
879 | return trans_modexpo32(filp, cmd, arg); | 877 | return trans_modexpo32(filp, cmd, arg); |
880 | if (cmd == ICARSACRT) | 878 | if (cmd == ICARSACRT) |
881 | return trans_modexpo_crt32(filp, cmd, arg); | 879 | return trans_modexpo_crt32(filp, cmd, arg); |
882 | if (cmd == ZSECSENDCPRB) | 880 | if (cmd == ZSECSENDCPRB) |
883 | return trans_xcRB32(filp, cmd, arg); | 881 | return trans_xcRB32(filp, cmd, arg); |
884 | return zcrypt_unlocked_ioctl(filp, cmd, arg); | 882 | return zcrypt_unlocked_ioctl(filp, cmd, arg); |
885 | } | 883 | } |
886 | #endif | 884 | #endif |
887 | 885 | ||
888 | /* | 886 | /* |
889 | * Misc device file operations. | 887 | * Misc device file operations. |
890 | */ | 888 | */ |
891 | static const struct file_operations zcrypt_fops = { | 889 | static const struct file_operations zcrypt_fops = { |
892 | .owner = THIS_MODULE, | 890 | .owner = THIS_MODULE, |
893 | .read = zcrypt_read, | 891 | .read = zcrypt_read, |
894 | .write = zcrypt_write, | 892 | .write = zcrypt_write, |
895 | .unlocked_ioctl = zcrypt_unlocked_ioctl, | 893 | .unlocked_ioctl = zcrypt_unlocked_ioctl, |
896 | #ifdef CONFIG_COMPAT | 894 | #ifdef CONFIG_COMPAT |
897 | .compat_ioctl = zcrypt_compat_ioctl, | 895 | .compat_ioctl = zcrypt_compat_ioctl, |
898 | #endif | 896 | #endif |
899 | .open = zcrypt_open, | 897 | .open = zcrypt_open, |
900 | .release = zcrypt_release | 898 | .release = zcrypt_release |
901 | }; | 899 | }; |
902 | 900 | ||
903 | /* | 901 | /* |
904 | * Misc device. | 902 | * Misc device. |
905 | */ | 903 | */ |
906 | static struct miscdevice zcrypt_misc_device = { | 904 | static struct miscdevice zcrypt_misc_device = { |
907 | .minor = MISC_DYNAMIC_MINOR, | 905 | .minor = MISC_DYNAMIC_MINOR, |
908 | .name = "z90crypt", | 906 | .name = "z90crypt", |
909 | .fops = &zcrypt_fops, | 907 | .fops = &zcrypt_fops, |
910 | }; | 908 | }; |
911 | 909 | ||
912 | /* | 910 | /* |
913 | * Deprecated /proc entry support. | 911 | * Deprecated /proc entry support. |
914 | */ | 912 | */ |
915 | static struct proc_dir_entry *zcrypt_entry; | 913 | static struct proc_dir_entry *zcrypt_entry; |
916 | 914 | ||
917 | static int sprintcl(unsigned char *outaddr, unsigned char *addr, | 915 | static int sprintcl(unsigned char *outaddr, unsigned char *addr, |
918 | unsigned int len) | 916 | unsigned int len) |
919 | { | 917 | { |
920 | int hl, i; | 918 | int hl, i; |
921 | 919 | ||
922 | hl = 0; | 920 | hl = 0; |
923 | for (i = 0; i < len; i++) | 921 | for (i = 0; i < len; i++) |
924 | hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); | 922 | hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); |
925 | hl += sprintf(outaddr+hl, " "); | 923 | hl += sprintf(outaddr+hl, " "); |
926 | return hl; | 924 | return hl; |
927 | } | 925 | } |
928 | 926 | ||
929 | static int sprintrw(unsigned char *outaddr, unsigned char *addr, | 927 | static int sprintrw(unsigned char *outaddr, unsigned char *addr, |
930 | unsigned int len) | 928 | unsigned int len) |
931 | { | 929 | { |
932 | int hl, inl, c, cx; | 930 | int hl, inl, c, cx; |
933 | 931 | ||
934 | hl = sprintf(outaddr, " "); | 932 | hl = sprintf(outaddr, " "); |
935 | inl = 0; | 933 | inl = 0; |
936 | for (c = 0; c < (len / 16); c++) { | 934 | for (c = 0; c < (len / 16); c++) { |
937 | hl += sprintcl(outaddr+hl, addr+inl, 16); | 935 | hl += sprintcl(outaddr+hl, addr+inl, 16); |
938 | inl += 16; | 936 | inl += 16; |
939 | } | 937 | } |
940 | cx = len%16; | 938 | cx = len%16; |
941 | if (cx) { | 939 | if (cx) { |
942 | hl += sprintcl(outaddr+hl, addr+inl, cx); | 940 | hl += sprintcl(outaddr+hl, addr+inl, cx); |
943 | inl += cx; | 941 | inl += cx; |
944 | } | 942 | } |
945 | hl += sprintf(outaddr+hl, "\n"); | 943 | hl += sprintf(outaddr+hl, "\n"); |
946 | return hl; | 944 | return hl; |
947 | } | 945 | } |
948 | 946 | ||
949 | static int sprinthx(unsigned char *title, unsigned char *outaddr, | 947 | static int sprinthx(unsigned char *title, unsigned char *outaddr, |
950 | unsigned char *addr, unsigned int len) | 948 | unsigned char *addr, unsigned int len) |
951 | { | 949 | { |
952 | int hl, inl, r, rx; | 950 | int hl, inl, r, rx; |
953 | 951 | ||
954 | hl = sprintf(outaddr, "\n%s\n", title); | 952 | hl = sprintf(outaddr, "\n%s\n", title); |
955 | inl = 0; | 953 | inl = 0; |
956 | for (r = 0; r < (len / 64); r++) { | 954 | for (r = 0; r < (len / 64); r++) { |
957 | hl += sprintrw(outaddr+hl, addr+inl, 64); | 955 | hl += sprintrw(outaddr+hl, addr+inl, 64); |
958 | inl += 64; | 956 | inl += 64; |
959 | } | 957 | } |
960 | rx = len % 64; | 958 | rx = len % 64; |
961 | if (rx) { | 959 | if (rx) { |
962 | hl += sprintrw(outaddr+hl, addr+inl, rx); | 960 | hl += sprintrw(outaddr+hl, addr+inl, rx); |
963 | inl += rx; | 961 | inl += rx; |
964 | } | 962 | } |
965 | hl += sprintf(outaddr+hl, "\n"); | 963 | hl += sprintf(outaddr+hl, "\n"); |
966 | return hl; | 964 | return hl; |
967 | } | 965 | } |
968 | 966 | ||
969 | static int sprinthx4(unsigned char *title, unsigned char *outaddr, | 967 | static int sprinthx4(unsigned char *title, unsigned char *outaddr, |
970 | unsigned int *array, unsigned int len) | 968 | unsigned int *array, unsigned int len) |
971 | { | 969 | { |
972 | int hl, r; | 970 | int hl, r; |
973 | 971 | ||
974 | hl = sprintf(outaddr, "\n%s\n", title); | 972 | hl = sprintf(outaddr, "\n%s\n", title); |
975 | for (r = 0; r < len; r++) { | 973 | for (r = 0; r < len; r++) { |
976 | if ((r % 8) == 0) | 974 | if ((r % 8) == 0) |
977 | hl += sprintf(outaddr+hl, " "); | 975 | hl += sprintf(outaddr+hl, " "); |
978 | hl += sprintf(outaddr+hl, "%08X ", array[r]); | 976 | hl += sprintf(outaddr+hl, "%08X ", array[r]); |
979 | if ((r % 8) == 7) | 977 | if ((r % 8) == 7) |
980 | hl += sprintf(outaddr+hl, "\n"); | 978 | hl += sprintf(outaddr+hl, "\n"); |
981 | } | 979 | } |
982 | hl += sprintf(outaddr+hl, "\n"); | 980 | hl += sprintf(outaddr+hl, "\n"); |
983 | return hl; | 981 | return hl; |
984 | } | 982 | } |
985 | 983 | ||
986 | static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, | 984 | static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, |
987 | int count, int *eof, void *data) | 985 | int count, int *eof, void *data) |
988 | { | 986 | { |
989 | unsigned char *workarea; | 987 | unsigned char *workarea; |
990 | int len; | 988 | int len; |
991 | 989 | ||
992 | len = 0; | 990 | len = 0; |
993 | 991 | ||
994 | /* resp_buff is a page. Use the right half for a work area */ | 992 | /* resp_buff is a page. Use the right half for a work area */ |
995 | workarea = resp_buff + 2000; | 993 | workarea = resp_buff + 2000; |
996 | len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n", | 994 | len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n", |
997 | ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); | 995 | ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); |
998 | len += sprintf(resp_buff + len, "Cryptographic domain: %d\n", | 996 | len += sprintf(resp_buff + len, "Cryptographic domain: %d\n", |
999 | ap_domain_index); | 997 | ap_domain_index); |
1000 | len += sprintf(resp_buff + len, "Total device count: %d\n", | 998 | len += sprintf(resp_buff + len, "Total device count: %d\n", |
1001 | zcrypt_device_count); | 999 | zcrypt_device_count); |
1002 | len += sprintf(resp_buff + len, "PCICA count: %d\n", | 1000 | len += sprintf(resp_buff + len, "PCICA count: %d\n", |
1003 | zcrypt_count_type(ZCRYPT_PCICA)); | 1001 | zcrypt_count_type(ZCRYPT_PCICA)); |
1004 | len += sprintf(resp_buff + len, "PCICC count: %d\n", | 1002 | len += sprintf(resp_buff + len, "PCICC count: %d\n", |
1005 | zcrypt_count_type(ZCRYPT_PCICC)); | 1003 | zcrypt_count_type(ZCRYPT_PCICC)); |
1006 | len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n", | 1004 | len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n", |
1007 | zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); | 1005 | zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); |
1008 | len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n", | 1006 | len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n", |
1009 | zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); | 1007 | zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); |
1010 | len += sprintf(resp_buff + len, "CEX2C count: %d\n", | 1008 | len += sprintf(resp_buff + len, "CEX2C count: %d\n", |
1011 | zcrypt_count_type(ZCRYPT_CEX2C)); | 1009 | zcrypt_count_type(ZCRYPT_CEX2C)); |
1012 | len += sprintf(resp_buff + len, "CEX2A count: %d\n", | 1010 | len += sprintf(resp_buff + len, "CEX2A count: %d\n", |
1013 | zcrypt_count_type(ZCRYPT_CEX2A)); | 1011 | zcrypt_count_type(ZCRYPT_CEX2A)); |
1014 | len += sprintf(resp_buff + len, "requestq count: %d\n", | 1012 | len += sprintf(resp_buff + len, "requestq count: %d\n", |
1015 | zcrypt_requestq_count()); | 1013 | zcrypt_requestq_count()); |
1016 | len += sprintf(resp_buff + len, "pendingq count: %d\n", | 1014 | len += sprintf(resp_buff + len, "pendingq count: %d\n", |
1017 | zcrypt_pendingq_count()); | 1015 | zcrypt_pendingq_count()); |
1018 | len += sprintf(resp_buff + len, "Total open handles: %d\n\n", | 1016 | len += sprintf(resp_buff + len, "Total open handles: %d\n\n", |
1019 | atomic_read(&zcrypt_open_count)); | 1017 | atomic_read(&zcrypt_open_count)); |
1020 | zcrypt_status_mask(workarea); | 1018 | zcrypt_status_mask(workarea); |
1021 | len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " | 1019 | len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " |
1022 | "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", | 1020 | "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", |
1023 | resp_buff+len, workarea, AP_DEVICES); | 1021 | resp_buff+len, workarea, AP_DEVICES); |
1024 | zcrypt_qdepth_mask(workarea); | 1022 | zcrypt_qdepth_mask(workarea); |
1025 | len += sprinthx("Waiting work element counts", | 1023 | len += sprinthx("Waiting work element counts", |
1026 | resp_buff+len, workarea, AP_DEVICES); | 1024 | resp_buff+len, workarea, AP_DEVICES); |
1027 | zcrypt_perdev_reqcnt((int *) workarea); | 1025 | zcrypt_perdev_reqcnt((int *) workarea); |
1028 | len += sprinthx4("Per-device successfully completed request counts", | 1026 | len += sprinthx4("Per-device successfully completed request counts", |
1029 | resp_buff+len,(unsigned int *) workarea, AP_DEVICES); | 1027 | resp_buff+len,(unsigned int *) workarea, AP_DEVICES); |
1030 | *eof = 1; | 1028 | *eof = 1; |
1031 | memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int)); | 1029 | memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int)); |
1032 | return len; | 1030 | return len; |
1033 | } | 1031 | } |
1034 | 1032 | ||
1035 | static void zcrypt_disable_card(int index) | 1033 | static void zcrypt_disable_card(int index) |
1036 | { | 1034 | { |
1037 | struct zcrypt_device *zdev; | 1035 | struct zcrypt_device *zdev; |
1038 | 1036 | ||
1039 | spin_lock_bh(&zcrypt_device_lock); | 1037 | spin_lock_bh(&zcrypt_device_lock); |
1040 | list_for_each_entry(zdev, &zcrypt_device_list, list) | 1038 | list_for_each_entry(zdev, &zcrypt_device_list, list) |
1041 | if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { | 1039 | if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { |
1042 | zdev->online = 0; | 1040 | zdev->online = 0; |
1043 | ap_flush_queue(zdev->ap_dev); | 1041 | ap_flush_queue(zdev->ap_dev); |
1044 | break; | 1042 | break; |
1045 | } | 1043 | } |
1046 | spin_unlock_bh(&zcrypt_device_lock); | 1044 | spin_unlock_bh(&zcrypt_device_lock); |
1047 | } | 1045 | } |
1048 | 1046 | ||
1049 | static void zcrypt_enable_card(int index) | 1047 | static void zcrypt_enable_card(int index) |
1050 | { | 1048 | { |
1051 | struct zcrypt_device *zdev; | 1049 | struct zcrypt_device *zdev; |
1052 | 1050 | ||
1053 | spin_lock_bh(&zcrypt_device_lock); | 1051 | spin_lock_bh(&zcrypt_device_lock); |
1054 | list_for_each_entry(zdev, &zcrypt_device_list, list) | 1052 | list_for_each_entry(zdev, &zcrypt_device_list, list) |
1055 | if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { | 1053 | if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { |
1056 | zdev->online = 1; | 1054 | zdev->online = 1; |
1057 | break; | 1055 | break; |
1058 | } | 1056 | } |
1059 | spin_unlock_bh(&zcrypt_device_lock); | 1057 | spin_unlock_bh(&zcrypt_device_lock); |
1060 | } | 1058 | } |
1061 | 1059 | ||
1062 | static int zcrypt_status_write(struct file *file, const char __user *buffer, | 1060 | static int zcrypt_status_write(struct file *file, const char __user *buffer, |
1063 | unsigned long count, void *data) | 1061 | unsigned long count, void *data) |
1064 | { | 1062 | { |
1065 | unsigned char *lbuf, *ptr; | 1063 | unsigned char *lbuf, *ptr; |
1066 | unsigned long local_count; | 1064 | unsigned long local_count; |
1067 | int j; | 1065 | int j; |
1068 | 1066 | ||
1069 | if (count <= 0) | 1067 | if (count <= 0) |
1070 | return 0; | 1068 | return 0; |
1071 | 1069 | ||
1072 | #define LBUFSIZE 1200UL | 1070 | #define LBUFSIZE 1200UL |
1073 | lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); | 1071 | lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); |
1074 | if (!lbuf) | 1072 | if (!lbuf) |
1075 | return 0; | 1073 | return 0; |
1076 | 1074 | ||
1077 | local_count = min(LBUFSIZE - 1, count); | 1075 | local_count = min(LBUFSIZE - 1, count); |
1078 | if (copy_from_user(lbuf, buffer, local_count) != 0) { | 1076 | if (copy_from_user(lbuf, buffer, local_count) != 0) { |
1079 | kfree(lbuf); | 1077 | kfree(lbuf); |
1080 | return -EFAULT; | 1078 | return -EFAULT; |
1081 | } | 1079 | } |
1082 | lbuf[local_count] = '\0'; | 1080 | lbuf[local_count] = '\0'; |
1083 | 1081 | ||
1084 | ptr = strstr(lbuf, "Online devices"); | 1082 | ptr = strstr(lbuf, "Online devices"); |
1085 | if (!ptr) | 1083 | if (!ptr) |
1086 | goto out; | 1084 | goto out; |
1087 | ptr = strstr(ptr, "\n"); | 1085 | ptr = strstr(ptr, "\n"); |
1088 | if (!ptr) | 1086 | if (!ptr) |
1089 | goto out; | 1087 | goto out; |
1090 | ptr++; | 1088 | ptr++; |
1091 | 1089 | ||
1092 | if (strstr(ptr, "Waiting work element counts") == NULL) | 1090 | if (strstr(ptr, "Waiting work element counts") == NULL) |
1093 | goto out; | 1091 | goto out; |
1094 | 1092 | ||
1095 | for (j = 0; j < 64 && *ptr; ptr++) { | 1093 | for (j = 0; j < 64 && *ptr; ptr++) { |
1096 | /* | 1094 | /* |
1097 | * '0' for no device, '1' for PCICA, '2' for PCICC, | 1095 | * '0' for no device, '1' for PCICA, '2' for PCICC, |
1098 | * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, | 1096 | * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, |
1099 | * '5' for CEX2C and '6' for CEX2A' | 1097 | * '5' for CEX2C and '6' for CEX2A' |
1100 | */ | 1098 | */ |
1101 | if (*ptr >= '0' && *ptr <= '6') | 1099 | if (*ptr >= '0' && *ptr <= '6') |
1102 | j++; | 1100 | j++; |
1103 | else if (*ptr == 'd' || *ptr == 'D') | 1101 | else if (*ptr == 'd' || *ptr == 'D') |
1104 | zcrypt_disable_card(j++); | 1102 | zcrypt_disable_card(j++); |
1105 | else if (*ptr == 'e' || *ptr == 'E') | 1103 | else if (*ptr == 'e' || *ptr == 'E') |
1106 | zcrypt_enable_card(j++); | 1104 | zcrypt_enable_card(j++); |
1107 | else if (*ptr != ' ' && *ptr != '\t') | 1105 | else if (*ptr != ' ' && *ptr != '\t') |
1108 | break; | 1106 | break; |
1109 | } | 1107 | } |
1110 | out: | 1108 | out: |
1111 | kfree(lbuf); | 1109 | kfree(lbuf); |
1112 | return count; | 1110 | return count; |
1113 | } | 1111 | } |
1114 | 1112 | ||
1115 | static int zcrypt_rng_device_count; | 1113 | static int zcrypt_rng_device_count; |
1116 | static u32 *zcrypt_rng_buffer; | 1114 | static u32 *zcrypt_rng_buffer; |
1117 | static int zcrypt_rng_buffer_index; | 1115 | static int zcrypt_rng_buffer_index; |
1118 | static DEFINE_MUTEX(zcrypt_rng_mutex); | 1116 | static DEFINE_MUTEX(zcrypt_rng_mutex); |
1119 | 1117 | ||
1120 | static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) | 1118 | static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) |
1121 | { | 1119 | { |
1122 | int rc; | 1120 | int rc; |
1123 | 1121 | ||
1124 | /* | 1122 | /* |
1125 | * We don't need locking here because the RNG API guarantees serialized | 1123 | * We don't need locking here because the RNG API guarantees serialized |
1126 | * read method calls. | 1124 | * read method calls. |
1127 | */ | 1125 | */ |
1128 | if (zcrypt_rng_buffer_index == 0) { | 1126 | if (zcrypt_rng_buffer_index == 0) { |
1129 | rc = zcrypt_rng((char *) zcrypt_rng_buffer); | 1127 | rc = zcrypt_rng((char *) zcrypt_rng_buffer); |
1130 | if (rc < 0) | 1128 | if (rc < 0) |
1131 | return -EIO; | 1129 | return -EIO; |
1132 | zcrypt_rng_buffer_index = rc / sizeof *data; | 1130 | zcrypt_rng_buffer_index = rc / sizeof *data; |
1133 | } | 1131 | } |
1134 | *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; | 1132 | *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; |
1135 | return sizeof *data; | 1133 | return sizeof *data; |
1136 | } | 1134 | } |
1137 | 1135 | ||
1138 | static struct hwrng zcrypt_rng_dev = { | 1136 | static struct hwrng zcrypt_rng_dev = { |
1139 | .name = "zcrypt", | 1137 | .name = "zcrypt", |
1140 | .data_read = zcrypt_rng_data_read, | 1138 | .data_read = zcrypt_rng_data_read, |
1141 | }; | 1139 | }; |
1142 | 1140 | ||
1143 | static int zcrypt_rng_device_add(void) | 1141 | static int zcrypt_rng_device_add(void) |
1144 | { | 1142 | { |
1145 | int rc = 0; | 1143 | int rc = 0; |
1146 | 1144 | ||
1147 | mutex_lock(&zcrypt_rng_mutex); | 1145 | mutex_lock(&zcrypt_rng_mutex); |
1148 | if (zcrypt_rng_device_count == 0) { | 1146 | if (zcrypt_rng_device_count == 0) { |
1149 | zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); | 1147 | zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); |
1150 | if (!zcrypt_rng_buffer) { | 1148 | if (!zcrypt_rng_buffer) { |
1151 | rc = -ENOMEM; | 1149 | rc = -ENOMEM; |
1152 | goto out; | 1150 | goto out; |
1153 | } | 1151 | } |
1154 | zcrypt_rng_buffer_index = 0; | 1152 | zcrypt_rng_buffer_index = 0; |
1155 | rc = hwrng_register(&zcrypt_rng_dev); | 1153 | rc = hwrng_register(&zcrypt_rng_dev); |
1156 | if (rc) | 1154 | if (rc) |
1157 | goto out_free; | 1155 | goto out_free; |
1158 | zcrypt_rng_device_count = 1; | 1156 | zcrypt_rng_device_count = 1; |
1159 | } else | 1157 | } else |
1160 | zcrypt_rng_device_count++; | 1158 | zcrypt_rng_device_count++; |
1161 | mutex_unlock(&zcrypt_rng_mutex); | 1159 | mutex_unlock(&zcrypt_rng_mutex); |
1162 | return 0; | 1160 | return 0; |
1163 | 1161 | ||
1164 | out_free: | 1162 | out_free: |
1165 | free_page((unsigned long) zcrypt_rng_buffer); | 1163 | free_page((unsigned long) zcrypt_rng_buffer); |
1166 | out: | 1164 | out: |
1167 | mutex_unlock(&zcrypt_rng_mutex); | 1165 | mutex_unlock(&zcrypt_rng_mutex); |
1168 | return rc; | 1166 | return rc; |
1169 | } | 1167 | } |
1170 | 1168 | ||
1171 | static void zcrypt_rng_device_remove(void) | 1169 | static void zcrypt_rng_device_remove(void) |
1172 | { | 1170 | { |
1173 | mutex_lock(&zcrypt_rng_mutex); | 1171 | mutex_lock(&zcrypt_rng_mutex); |
1174 | zcrypt_rng_device_count--; | 1172 | zcrypt_rng_device_count--; |
1175 | if (zcrypt_rng_device_count == 0) { | 1173 | if (zcrypt_rng_device_count == 0) { |
1176 | hwrng_unregister(&zcrypt_rng_dev); | 1174 | hwrng_unregister(&zcrypt_rng_dev); |
1177 | free_page((unsigned long) zcrypt_rng_buffer); | 1175 | free_page((unsigned long) zcrypt_rng_buffer); |
1178 | } | 1176 | } |
1179 | mutex_unlock(&zcrypt_rng_mutex); | 1177 | mutex_unlock(&zcrypt_rng_mutex); |
1180 | } | 1178 | } |
1181 | 1179 | ||
1182 | /** | 1180 | /** |
1183 | * zcrypt_api_init(): Module initialization. | 1181 | * zcrypt_api_init(): Module initialization. |
1184 | * | 1182 | * |
1185 | * The module initialization code. | 1183 | * The module initialization code. |
1186 | */ | 1184 | */ |
1187 | int __init zcrypt_api_init(void) | 1185 | int __init zcrypt_api_init(void) |
1188 | { | 1186 | { |
1189 | int rc; | 1187 | int rc; |
1190 | 1188 | ||
1191 | /* Register the request sprayer. */ | 1189 | /* Register the request sprayer. */ |
1192 | rc = misc_register(&zcrypt_misc_device); | 1190 | rc = misc_register(&zcrypt_misc_device); |
1193 | if (rc < 0) | 1191 | if (rc < 0) |
1194 | goto out; | 1192 | goto out; |
1195 | 1193 | ||
1196 | /* Set up the proc file system */ | 1194 | /* Set up the proc file system */ |
1197 | zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); | 1195 | zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); |
1198 | if (!zcrypt_entry) { | 1196 | if (!zcrypt_entry) { |
1199 | rc = -ENOMEM; | 1197 | rc = -ENOMEM; |
1200 | goto out_misc; | 1198 | goto out_misc; |
1201 | } | 1199 | } |
1202 | zcrypt_entry->data = NULL; | 1200 | zcrypt_entry->data = NULL; |
1203 | zcrypt_entry->read_proc = zcrypt_status_read; | 1201 | zcrypt_entry->read_proc = zcrypt_status_read; |
1204 | zcrypt_entry->write_proc = zcrypt_status_write; | 1202 | zcrypt_entry->write_proc = zcrypt_status_write; |
1205 | 1203 | ||
1206 | return 0; | 1204 | return 0; |
1207 | 1205 | ||
1208 | out_misc: | 1206 | out_misc: |
1209 | misc_deregister(&zcrypt_misc_device); | 1207 | misc_deregister(&zcrypt_misc_device); |
1210 | out: | 1208 | out: |
1211 | return rc; | 1209 | return rc; |
1212 | } | 1210 | } |
1213 | 1211 | ||
1214 | /** | 1212 | /** |
1215 | * zcrypt_api_exit(): Module termination. | 1213 | * zcrypt_api_exit(): Module termination. |
1216 | * | 1214 | * |
1217 | * The module termination code. | 1215 | * The module termination code. |
1218 | */ | 1216 | */ |
1219 | void zcrypt_api_exit(void) | 1217 | void zcrypt_api_exit(void) |
1220 | { | 1218 | { |
1221 | remove_proc_entry("driver/z90crypt", NULL); | 1219 | remove_proc_entry("driver/z90crypt", NULL); |
1222 | misc_deregister(&zcrypt_misc_device); | 1220 | misc_deregister(&zcrypt_misc_device); |
1223 | } | 1221 | } |
1224 | 1222 | ||
1225 | #ifndef CONFIG_ZCRYPT_MONOLITHIC | 1223 | #ifndef CONFIG_ZCRYPT_MONOLITHIC |
1226 | module_init(zcrypt_api_init); | 1224 | module_init(zcrypt_api_init); |
1227 | module_exit(zcrypt_api_exit); | 1225 | module_exit(zcrypt_api_exit); |
1228 | #endif | 1226 | #endif |
1229 | 1227 |