Commit d0a7e574007fd547d72ec693bfa35778623d0738
Committed by
James Bottomley
1 parent
10c1b88987
Exists in
master
and in
7 other branches
[SCSI] correct transport class abstraction to work outside SCSI
I recently tried to construct a totally generic transport class and found there were certain features missing from the current abstract transport class. Most notable is that you have to hang the data on the class_device but most of the API is framed in terms of the generic device, not the class_device. These changes are two fold - Provide the class_device to all of the setup and configure APIs - Provide and extra API to take the device and the attribute class and return the corresponding class_device Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Showing 6 changed files with 72 additions and 20 deletions Inline Diff
drivers/base/attribute_container.c
1 | /* | 1 | /* |
2 | * attribute_container.c - implementation of a simple container for classes | 2 | * attribute_container.c - implementation of a simple container for classes |
3 | * | 3 | * |
4 | * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> | 4 | * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> |
5 | * | 5 | * |
6 | * This file is licensed under GPLv2 | 6 | * This file is licensed under GPLv2 |
7 | * | 7 | * |
8 | * The basic idea here is to enable a device to be attached to an | 8 | * The basic idea here is to enable a device to be attached to an |
9 | * aritrary numer of classes without having to allocate storage for them. | 9 | * aritrary numer of classes without having to allocate storage for them. |
10 | * Instead, the contained classes select the devices they need to attach | 10 | * Instead, the contained classes select the devices they need to attach |
11 | * to via a matching function. | 11 | * to via a matching function. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/attribute_container.h> | 14 | #include <linux/attribute_container.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | 21 | ||
22 | /* This is a private structure used to tie the classdev and the | 22 | /* This is a private structure used to tie the classdev and the |
23 | * container .. it should never be visible outside this file */ | 23 | * container .. it should never be visible outside this file */ |
24 | struct internal_container { | 24 | struct internal_container { |
25 | struct list_head node; | 25 | struct list_head node; |
26 | struct attribute_container *cont; | 26 | struct attribute_container *cont; |
27 | struct class_device classdev; | 27 | struct class_device classdev; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | /** | 30 | /** |
31 | * attribute_container_classdev_to_container - given a classdev, return the container | 31 | * attribute_container_classdev_to_container - given a classdev, return the container |
32 | * | 32 | * |
33 | * @classdev: the class device created by attribute_container_add_device. | 33 | * @classdev: the class device created by attribute_container_add_device. |
34 | * | 34 | * |
35 | * Returns the container associated with this classdev. | 35 | * Returns the container associated with this classdev. |
36 | */ | 36 | */ |
37 | struct attribute_container * | 37 | struct attribute_container * |
38 | attribute_container_classdev_to_container(struct class_device *classdev) | 38 | attribute_container_classdev_to_container(struct class_device *classdev) |
39 | { | 39 | { |
40 | struct internal_container *ic = | 40 | struct internal_container *ic = |
41 | container_of(classdev, struct internal_container, classdev); | 41 | container_of(classdev, struct internal_container, classdev); |
42 | return ic->cont; | 42 | return ic->cont; |
43 | } | 43 | } |
44 | EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container); | 44 | EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container); |
45 | 45 | ||
46 | static struct list_head attribute_container_list; | 46 | static struct list_head attribute_container_list; |
47 | 47 | ||
48 | static DECLARE_MUTEX(attribute_container_mutex); | 48 | static DECLARE_MUTEX(attribute_container_mutex); |
49 | 49 | ||
50 | /** | 50 | /** |
51 | * attribute_container_register - register an attribute container | 51 | * attribute_container_register - register an attribute container |
52 | * | 52 | * |
53 | * @cont: The container to register. This must be allocated by the | 53 | * @cont: The container to register. This must be allocated by the |
54 | * callee and should also be zeroed by it. | 54 | * callee and should also be zeroed by it. |
55 | */ | 55 | */ |
56 | int | 56 | int |
57 | attribute_container_register(struct attribute_container *cont) | 57 | attribute_container_register(struct attribute_container *cont) |
58 | { | 58 | { |
59 | INIT_LIST_HEAD(&cont->node); | 59 | INIT_LIST_HEAD(&cont->node); |
60 | INIT_LIST_HEAD(&cont->containers); | 60 | INIT_LIST_HEAD(&cont->containers); |
61 | spin_lock_init(&cont->containers_lock); | ||
61 | 62 | ||
62 | down(&attribute_container_mutex); | 63 | down(&attribute_container_mutex); |
63 | list_add_tail(&cont->node, &attribute_container_list); | 64 | list_add_tail(&cont->node, &attribute_container_list); |
64 | up(&attribute_container_mutex); | 65 | up(&attribute_container_mutex); |
65 | 66 | ||
66 | return 0; | 67 | return 0; |
67 | } | 68 | } |
68 | EXPORT_SYMBOL_GPL(attribute_container_register); | 69 | EXPORT_SYMBOL_GPL(attribute_container_register); |
69 | 70 | ||
70 | /** | 71 | /** |
71 | * attribute_container_unregister - remove a container registration | 72 | * attribute_container_unregister - remove a container registration |
72 | * | 73 | * |
73 | * @cont: previously registered container to remove | 74 | * @cont: previously registered container to remove |
74 | */ | 75 | */ |
75 | int | 76 | int |
76 | attribute_container_unregister(struct attribute_container *cont) | 77 | attribute_container_unregister(struct attribute_container *cont) |
77 | { | 78 | { |
78 | int retval = -EBUSY; | 79 | int retval = -EBUSY; |
79 | down(&attribute_container_mutex); | 80 | down(&attribute_container_mutex); |
81 | spin_lock(&cont->containers_lock); | ||
80 | if (!list_empty(&cont->containers)) | 82 | if (!list_empty(&cont->containers)) |
81 | goto out; | 83 | goto out; |
82 | retval = 0; | 84 | retval = 0; |
83 | list_del(&cont->node); | 85 | list_del(&cont->node); |
84 | out: | 86 | out: |
87 | spin_unlock(&cont->containers_lock); | ||
85 | up(&attribute_container_mutex); | 88 | up(&attribute_container_mutex); |
86 | return retval; | 89 | return retval; |
87 | 90 | ||
88 | } | 91 | } |
89 | EXPORT_SYMBOL_GPL(attribute_container_unregister); | 92 | EXPORT_SYMBOL_GPL(attribute_container_unregister); |
90 | 93 | ||
91 | /* private function used as class release */ | 94 | /* private function used as class release */ |
92 | static void attribute_container_release(struct class_device *classdev) | 95 | static void attribute_container_release(struct class_device *classdev) |
93 | { | 96 | { |
94 | struct internal_container *ic | 97 | struct internal_container *ic |
95 | = container_of(classdev, struct internal_container, classdev); | 98 | = container_of(classdev, struct internal_container, classdev); |
96 | struct device *dev = classdev->dev; | 99 | struct device *dev = classdev->dev; |
97 | 100 | ||
98 | kfree(ic); | 101 | kfree(ic); |
99 | put_device(dev); | 102 | put_device(dev); |
100 | } | 103 | } |
101 | 104 | ||
102 | /** | 105 | /** |
103 | * attribute_container_add_device - see if any container is interested in dev | 106 | * attribute_container_add_device - see if any container is interested in dev |
104 | * | 107 | * |
105 | * @dev: device to add attributes to | 108 | * @dev: device to add attributes to |
106 | * @fn: function to trigger addition of class device. | 109 | * @fn: function to trigger addition of class device. |
107 | * | 110 | * |
108 | * This function allocates storage for the class device(s) to be | 111 | * This function allocates storage for the class device(s) to be |
109 | * attached to dev (one for each matching attribute_container). If no | 112 | * attached to dev (one for each matching attribute_container). If no |
110 | * fn is provided, the code will simply register the class device via | 113 | * fn is provided, the code will simply register the class device via |
111 | * class_device_add. If a function is provided, it is expected to add | 114 | * class_device_add. If a function is provided, it is expected to add |
112 | * the class device at the appropriate time. One of the things that | 115 | * the class device at the appropriate time. One of the things that |
113 | * might be necessary is to allocate and initialise the classdev and | 116 | * might be necessary is to allocate and initialise the classdev and |
114 | * then add it a later time. To do this, call this routine for | 117 | * then add it a later time. To do this, call this routine for |
115 | * allocation and initialisation and then use | 118 | * allocation and initialisation and then use |
116 | * attribute_container_device_trigger() to call class_device_add() on | 119 | * attribute_container_device_trigger() to call class_device_add() on |
117 | * it. Note: after this, the class device contains a reference to dev | 120 | * it. Note: after this, the class device contains a reference to dev |
118 | * which is not relinquished until the release of the classdev. | 121 | * which is not relinquished until the release of the classdev. |
119 | */ | 122 | */ |
120 | void | 123 | void |
121 | attribute_container_add_device(struct device *dev, | 124 | attribute_container_add_device(struct device *dev, |
122 | int (*fn)(struct attribute_container *, | 125 | int (*fn)(struct attribute_container *, |
123 | struct device *, | 126 | struct device *, |
124 | struct class_device *)) | 127 | struct class_device *)) |
125 | { | 128 | { |
126 | struct attribute_container *cont; | 129 | struct attribute_container *cont; |
127 | 130 | ||
128 | down(&attribute_container_mutex); | 131 | down(&attribute_container_mutex); |
129 | list_for_each_entry(cont, &attribute_container_list, node) { | 132 | list_for_each_entry(cont, &attribute_container_list, node) { |
130 | struct internal_container *ic; | 133 | struct internal_container *ic; |
131 | 134 | ||
132 | if (attribute_container_no_classdevs(cont)) | 135 | if (attribute_container_no_classdevs(cont)) |
133 | continue; | 136 | continue; |
134 | 137 | ||
135 | if (!cont->match(cont, dev)) | 138 | if (!cont->match(cont, dev)) |
136 | continue; | 139 | continue; |
137 | ic = kmalloc(sizeof(struct internal_container), GFP_KERNEL); | 140 | ic = kmalloc(sizeof(struct internal_container), GFP_KERNEL); |
138 | if (!ic) { | 141 | if (!ic) { |
139 | dev_printk(KERN_ERR, dev, "failed to allocate class container\n"); | 142 | dev_printk(KERN_ERR, dev, "failed to allocate class container\n"); |
140 | continue; | 143 | continue; |
141 | } | 144 | } |
142 | memset(ic, 0, sizeof(struct internal_container)); | 145 | memset(ic, 0, sizeof(struct internal_container)); |
143 | INIT_LIST_HEAD(&ic->node); | 146 | INIT_LIST_HEAD(&ic->node); |
144 | ic->cont = cont; | 147 | ic->cont = cont; |
145 | class_device_initialize(&ic->classdev); | 148 | class_device_initialize(&ic->classdev); |
146 | ic->classdev.dev = get_device(dev); | 149 | ic->classdev.dev = get_device(dev); |
147 | ic->classdev.class = cont->class; | 150 | ic->classdev.class = cont->class; |
148 | cont->class->release = attribute_container_release; | 151 | cont->class->release = attribute_container_release; |
149 | strcpy(ic->classdev.class_id, dev->bus_id); | 152 | strcpy(ic->classdev.class_id, dev->bus_id); |
150 | if (fn) | 153 | if (fn) |
151 | fn(cont, dev, &ic->classdev); | 154 | fn(cont, dev, &ic->classdev); |
152 | else | 155 | else |
153 | attribute_container_add_class_device(&ic->classdev); | 156 | attribute_container_add_class_device(&ic->classdev); |
157 | spin_lock(&cont->containers_lock); | ||
154 | list_add_tail(&ic->node, &cont->containers); | 158 | list_add_tail(&ic->node, &cont->containers); |
159 | spin_unlock(&cont->containers_lock); | ||
155 | } | 160 | } |
156 | up(&attribute_container_mutex); | 161 | up(&attribute_container_mutex); |
157 | } | 162 | } |
158 | 163 | ||
159 | /** | 164 | /** |
160 | * attribute_container_remove_device - make device eligible for removal. | 165 | * attribute_container_remove_device - make device eligible for removal. |
161 | * | 166 | * |
162 | * @dev: The generic device | 167 | * @dev: The generic device |
163 | * @fn: A function to call to remove the device | 168 | * @fn: A function to call to remove the device |
164 | * | 169 | * |
165 | * This routine triggers device removal. If fn is NULL, then it is | 170 | * This routine triggers device removal. If fn is NULL, then it is |
166 | * simply done via class_device_unregister (note that if something | 171 | * simply done via class_device_unregister (note that if something |
167 | * still has a reference to the classdev, then the memory occupied | 172 | * still has a reference to the classdev, then the memory occupied |
168 | * will not be freed until the classdev is released). If you want a | 173 | * will not be freed until the classdev is released). If you want a |
169 | * two phase release: remove from visibility and then delete the | 174 | * two phase release: remove from visibility and then delete the |
170 | * device, then you should use this routine with a fn that calls | 175 | * device, then you should use this routine with a fn that calls |
171 | * class_device_del() and then use | 176 | * class_device_del() and then use |
172 | * attribute_container_device_trigger() to do the final put on the | 177 | * attribute_container_device_trigger() to do the final put on the |
173 | * classdev. | 178 | * classdev. |
174 | */ | 179 | */ |
175 | void | 180 | void |
176 | attribute_container_remove_device(struct device *dev, | 181 | attribute_container_remove_device(struct device *dev, |
177 | void (*fn)(struct attribute_container *, | 182 | void (*fn)(struct attribute_container *, |
178 | struct device *, | 183 | struct device *, |
179 | struct class_device *)) | 184 | struct class_device *)) |
180 | { | 185 | { |
181 | struct attribute_container *cont; | 186 | struct attribute_container *cont; |
182 | 187 | ||
183 | down(&attribute_container_mutex); | 188 | down(&attribute_container_mutex); |
184 | list_for_each_entry(cont, &attribute_container_list, node) { | 189 | list_for_each_entry(cont, &attribute_container_list, node) { |
185 | struct internal_container *ic, *tmp; | 190 | struct internal_container *ic, *tmp; |
186 | 191 | ||
187 | if (attribute_container_no_classdevs(cont)) | 192 | if (attribute_container_no_classdevs(cont)) |
188 | continue; | 193 | continue; |
189 | 194 | ||
190 | if (!cont->match(cont, dev)) | 195 | if (!cont->match(cont, dev)) |
191 | continue; | 196 | continue; |
197 | spin_lock(&cont->containers_lock); | ||
192 | list_for_each_entry_safe(ic, tmp, &cont->containers, node) { | 198 | list_for_each_entry_safe(ic, tmp, &cont->containers, node) { |
193 | if (dev != ic->classdev.dev) | 199 | if (dev != ic->classdev.dev) |
194 | continue; | 200 | continue; |
195 | list_del(&ic->node); | 201 | list_del(&ic->node); |
196 | if (fn) | 202 | if (fn) |
197 | fn(cont, dev, &ic->classdev); | 203 | fn(cont, dev, &ic->classdev); |
198 | else { | 204 | else { |
199 | attribute_container_remove_attrs(&ic->classdev); | 205 | attribute_container_remove_attrs(&ic->classdev); |
200 | class_device_unregister(&ic->classdev); | 206 | class_device_unregister(&ic->classdev); |
201 | } | 207 | } |
202 | } | 208 | } |
209 | spin_unlock(&cont->containers_lock); | ||
203 | } | 210 | } |
204 | up(&attribute_container_mutex); | 211 | up(&attribute_container_mutex); |
205 | } | 212 | } |
206 | EXPORT_SYMBOL_GPL(attribute_container_remove_device); | 213 | EXPORT_SYMBOL_GPL(attribute_container_remove_device); |
207 | 214 | ||
208 | /** | 215 | /** |
209 | * attribute_container_device_trigger - execute a trigger for each matching classdev | 216 | * attribute_container_device_trigger - execute a trigger for each matching classdev |
210 | * | 217 | * |
211 | * @dev: The generic device to run the trigger for | 218 | * @dev: The generic device to run the trigger for |
212 | * @fn the function to execute for each classdev. | 219 | * @fn the function to execute for each classdev. |
213 | * | 220 | * |
214 | * This funcion is for executing a trigger when you need to know both | 221 | * This funcion is for executing a trigger when you need to know both |
215 | * the container and the classdev. If you only care about the | 222 | * the container and the classdev. If you only care about the |
216 | * container, then use attribute_container_trigger() instead. | 223 | * container, then use attribute_container_trigger() instead. |
217 | */ | 224 | */ |
218 | void | 225 | void |
219 | attribute_container_device_trigger(struct device *dev, | 226 | attribute_container_device_trigger(struct device *dev, |
220 | int (*fn)(struct attribute_container *, | 227 | int (*fn)(struct attribute_container *, |
221 | struct device *, | 228 | struct device *, |
222 | struct class_device *)) | 229 | struct class_device *)) |
223 | { | 230 | { |
224 | struct attribute_container *cont; | 231 | struct attribute_container *cont; |
225 | 232 | ||
226 | down(&attribute_container_mutex); | 233 | down(&attribute_container_mutex); |
227 | list_for_each_entry(cont, &attribute_container_list, node) { | 234 | list_for_each_entry(cont, &attribute_container_list, node) { |
228 | struct internal_container *ic, *tmp; | 235 | struct internal_container *ic, *tmp; |
229 | 236 | ||
230 | if (!cont->match(cont, dev)) | 237 | if (!cont->match(cont, dev)) |
231 | continue; | 238 | continue; |
232 | 239 | ||
240 | spin_lock(&cont->containers_lock); | ||
233 | list_for_each_entry_safe(ic, tmp, &cont->containers, node) { | 241 | list_for_each_entry_safe(ic, tmp, &cont->containers, node) { |
234 | if (dev == ic->classdev.dev) | 242 | if (dev == ic->classdev.dev) |
235 | fn(cont, dev, &ic->classdev); | 243 | fn(cont, dev, &ic->classdev); |
236 | } | 244 | } |
245 | spin_unlock(&cont->containers_lock); | ||
237 | } | 246 | } |
238 | up(&attribute_container_mutex); | 247 | up(&attribute_container_mutex); |
239 | } | 248 | } |
240 | EXPORT_SYMBOL_GPL(attribute_container_device_trigger); | 249 | EXPORT_SYMBOL_GPL(attribute_container_device_trigger); |
241 | 250 | ||
242 | /** | 251 | /** |
243 | * attribute_container_trigger - trigger a function for each matching container | 252 | * attribute_container_trigger - trigger a function for each matching container |
244 | * | 253 | * |
245 | * @dev: The generic device to activate the trigger for | 254 | * @dev: The generic device to activate the trigger for |
246 | * @fn: the function to trigger | 255 | * @fn: the function to trigger |
247 | * | 256 | * |
248 | * This routine triggers a function that only needs to know the | 257 | * This routine triggers a function that only needs to know the |
249 | * matching containers (not the classdev) associated with a device. | 258 | * matching containers (not the classdev) associated with a device. |
250 | * It is more lightweight than attribute_container_device_trigger, so | 259 | * It is more lightweight than attribute_container_device_trigger, so |
251 | * should be used in preference unless the triggering function | 260 | * should be used in preference unless the triggering function |
252 | * actually needs to know the classdev. | 261 | * actually needs to know the classdev. |
253 | */ | 262 | */ |
254 | void | 263 | void |
255 | attribute_container_trigger(struct device *dev, | 264 | attribute_container_trigger(struct device *dev, |
256 | int (*fn)(struct attribute_container *, | 265 | int (*fn)(struct attribute_container *, |
257 | struct device *)) | 266 | struct device *)) |
258 | { | 267 | { |
259 | struct attribute_container *cont; | 268 | struct attribute_container *cont; |
260 | 269 | ||
261 | down(&attribute_container_mutex); | 270 | down(&attribute_container_mutex); |
262 | list_for_each_entry(cont, &attribute_container_list, node) { | 271 | list_for_each_entry(cont, &attribute_container_list, node) { |
263 | if (cont->match(cont, dev)) | 272 | if (cont->match(cont, dev)) |
264 | fn(cont, dev); | 273 | fn(cont, dev); |
265 | } | 274 | } |
266 | up(&attribute_container_mutex); | 275 | up(&attribute_container_mutex); |
267 | } | 276 | } |
268 | EXPORT_SYMBOL_GPL(attribute_container_trigger); | 277 | EXPORT_SYMBOL_GPL(attribute_container_trigger); |
269 | 278 | ||
270 | /** | 279 | /** |
271 | * attribute_container_add_attrs - add attributes | 280 | * attribute_container_add_attrs - add attributes |
272 | * | 281 | * |
273 | * @classdev: The class device | 282 | * @classdev: The class device |
274 | * | 283 | * |
275 | * This simply creates all the class device sysfs files from the | 284 | * This simply creates all the class device sysfs files from the |
276 | * attributes listed in the container | 285 | * attributes listed in the container |
277 | */ | 286 | */ |
278 | int | 287 | int |
279 | attribute_container_add_attrs(struct class_device *classdev) | 288 | attribute_container_add_attrs(struct class_device *classdev) |
280 | { | 289 | { |
281 | struct attribute_container *cont = | 290 | struct attribute_container *cont = |
282 | attribute_container_classdev_to_container(classdev); | 291 | attribute_container_classdev_to_container(classdev); |
283 | struct class_device_attribute **attrs = cont->attrs; | 292 | struct class_device_attribute **attrs = cont->attrs; |
284 | int i, error; | 293 | int i, error; |
285 | 294 | ||
286 | if (!attrs) | 295 | if (!attrs) |
287 | return 0; | 296 | return 0; |
288 | 297 | ||
289 | for (i = 0; attrs[i]; i++) { | 298 | for (i = 0; attrs[i]; i++) { |
290 | error = class_device_create_file(classdev, attrs[i]); | 299 | error = class_device_create_file(classdev, attrs[i]); |
291 | if (error) | 300 | if (error) |
292 | return error; | 301 | return error; |
293 | } | 302 | } |
294 | 303 | ||
295 | return 0; | 304 | return 0; |
296 | } | 305 | } |
297 | EXPORT_SYMBOL_GPL(attribute_container_add_attrs); | 306 | EXPORT_SYMBOL_GPL(attribute_container_add_attrs); |
298 | 307 | ||
299 | /** | 308 | /** |
300 | * attribute_container_add_class_device - same function as class_device_add | 309 | * attribute_container_add_class_device - same function as class_device_add |
301 | * | 310 | * |
302 | * @classdev: the class device to add | 311 | * @classdev: the class device to add |
303 | * | 312 | * |
304 | * This performs essentially the same function as class_device_add except for | 313 | * This performs essentially the same function as class_device_add except for |
305 | * attribute containers, namely add the classdev to the system and then | 314 | * attribute containers, namely add the classdev to the system and then |
306 | * create the attribute files | 315 | * create the attribute files |
307 | */ | 316 | */ |
308 | int | 317 | int |
309 | attribute_container_add_class_device(struct class_device *classdev) | 318 | attribute_container_add_class_device(struct class_device *classdev) |
310 | { | 319 | { |
311 | int error = class_device_add(classdev); | 320 | int error = class_device_add(classdev); |
312 | if (error) | 321 | if (error) |
313 | return error; | 322 | return error; |
314 | return attribute_container_add_attrs(classdev); | 323 | return attribute_container_add_attrs(classdev); |
315 | } | 324 | } |
316 | EXPORT_SYMBOL_GPL(attribute_container_add_class_device); | 325 | EXPORT_SYMBOL_GPL(attribute_container_add_class_device); |
317 | 326 | ||
318 | /** | 327 | /** |
319 | * attribute_container_add_class_device_adapter - simple adapter for triggers | 328 | * attribute_container_add_class_device_adapter - simple adapter for triggers |
320 | * | 329 | * |
321 | * This function is identical to attribute_container_add_class_device except | 330 | * This function is identical to attribute_container_add_class_device except |
322 | * that it is designed to be called from the triggers | 331 | * that it is designed to be called from the triggers |
323 | */ | 332 | */ |
324 | int | 333 | int |
325 | attribute_container_add_class_device_adapter(struct attribute_container *cont, | 334 | attribute_container_add_class_device_adapter(struct attribute_container *cont, |
326 | struct device *dev, | 335 | struct device *dev, |
327 | struct class_device *classdev) | 336 | struct class_device *classdev) |
328 | { | 337 | { |
329 | return attribute_container_add_class_device(classdev); | 338 | return attribute_container_add_class_device(classdev); |
330 | } | 339 | } |
331 | EXPORT_SYMBOL_GPL(attribute_container_add_class_device_adapter); | 340 | EXPORT_SYMBOL_GPL(attribute_container_add_class_device_adapter); |
332 | 341 | ||
333 | /** | 342 | /** |
334 | * attribute_container_remove_attrs - remove any attribute files | 343 | * attribute_container_remove_attrs - remove any attribute files |
335 | * | 344 | * |
336 | * @classdev: The class device to remove the files from | 345 | * @classdev: The class device to remove the files from |
337 | * | 346 | * |
338 | */ | 347 | */ |
339 | void | 348 | void |
340 | attribute_container_remove_attrs(struct class_device *classdev) | 349 | attribute_container_remove_attrs(struct class_device *classdev) |
341 | { | 350 | { |
342 | struct attribute_container *cont = | 351 | struct attribute_container *cont = |
343 | attribute_container_classdev_to_container(classdev); | 352 | attribute_container_classdev_to_container(classdev); |
344 | struct class_device_attribute **attrs = cont->attrs; | 353 | struct class_device_attribute **attrs = cont->attrs; |
345 | int i; | 354 | int i; |
346 | 355 | ||
347 | if (!attrs) | 356 | if (!attrs) |
348 | return; | 357 | return; |
349 | 358 | ||
350 | for (i = 0; attrs[i]; i++) | 359 | for (i = 0; attrs[i]; i++) |
351 | class_device_remove_file(classdev, attrs[i]); | 360 | class_device_remove_file(classdev, attrs[i]); |
352 | } | 361 | } |
353 | EXPORT_SYMBOL_GPL(attribute_container_remove_attrs); | 362 | EXPORT_SYMBOL_GPL(attribute_container_remove_attrs); |
354 | 363 | ||
355 | /** | 364 | /** |
356 | * attribute_container_class_device_del - equivalent of class_device_del | 365 | * attribute_container_class_device_del - equivalent of class_device_del |
357 | * | 366 | * |
358 | * @classdev: the class device | 367 | * @classdev: the class device |
359 | * | 368 | * |
360 | * This function simply removes all the attribute files and then calls | 369 | * This function simply removes all the attribute files and then calls |
361 | * class_device_del. | 370 | * class_device_del. |
362 | */ | 371 | */ |
363 | void | 372 | void |
364 | attribute_container_class_device_del(struct class_device *classdev) | 373 | attribute_container_class_device_del(struct class_device *classdev) |
365 | { | 374 | { |
366 | attribute_container_remove_attrs(classdev); | 375 | attribute_container_remove_attrs(classdev); |
367 | class_device_del(classdev); | 376 | class_device_del(classdev); |
368 | } | 377 | } |
369 | EXPORT_SYMBOL_GPL(attribute_container_class_device_del); | 378 | EXPORT_SYMBOL_GPL(attribute_container_class_device_del); |
379 | |||
380 | /** | ||
381 | * attribute_container_find_class_device - find the corresponding class_device | ||
382 | * | ||
383 | * @cont: the container | ||
384 | * @dev: the generic device | ||
385 | * | ||
386 | * Looks up the device in the container's list of class devices and returns | ||
387 | * the corresponding class_device. | ||
388 | */ | ||
389 | struct class_device * | ||
390 | attribute_container_find_class_device(struct attribute_container *cont, | ||
391 | struct device *dev) | ||
392 | { | ||
393 | struct class_device *cdev = NULL; | ||
394 | struct internal_container *ic; | ||
395 | |||
396 | spin_lock(&cont->containers_lock); | ||
397 | list_for_each_entry(ic, &cont->containers, node) { | ||
398 | if (ic->classdev.dev == dev) { | ||
399 | cdev = &ic->classdev; | ||
400 | break; | ||
401 | } | ||
402 | } | ||
403 | spin_unlock(&cont->containers_lock); | ||
404 | |||
405 | return cdev; | ||
406 | } | ||
407 | EXPORT_SYMBOL_GPL(attribute_container_find_class_device); | ||
370 | 408 | ||
371 | int __init | 409 | int __init |
372 | attribute_container_init(void) | 410 | attribute_container_init(void) |
373 | { | 411 | { |
374 | INIT_LIST_HEAD(&attribute_container_list); | 412 | INIT_LIST_HEAD(&attribute_container_list); |
375 | return 0; | 413 | return 0; |
376 | } | 414 | } |
377 | 415 |
drivers/base/transport_class.c
1 | /* | 1 | /* |
2 | * transport_class.c - implementation of generic transport classes | 2 | * transport_class.c - implementation of generic transport classes |
3 | * using attribute_containers | 3 | * using attribute_containers |
4 | * | 4 | * |
5 | * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> | 5 | * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> |
6 | * | 6 | * |
7 | * This file is licensed under GPLv2 | 7 | * This file is licensed under GPLv2 |
8 | * | 8 | * |
9 | * The basic idea here is to allow any "device controller" (which | 9 | * The basic idea here is to allow any "device controller" (which |
10 | * would most often be a Host Bus Adapter" to use the services of one | 10 | * would most often be a Host Bus Adapter" to use the services of one |
11 | * or more tranport classes for performing transport specific | 11 | * or more tranport classes for performing transport specific |
12 | * services. Transport specific services are things that the generic | 12 | * services. Transport specific services are things that the generic |
13 | * command layer doesn't want to know about (speed settings, line | 13 | * command layer doesn't want to know about (speed settings, line |
14 | * condidtioning, etc), but which the user might be interested in. | 14 | * condidtioning, etc), but which the user might be interested in. |
15 | * Thus, the HBA's use the routines exported by the transport classes | 15 | * Thus, the HBA's use the routines exported by the transport classes |
16 | * to perform these functions. The transport classes export certain | 16 | * to perform these functions. The transport classes export certain |
17 | * values to the user via sysfs using attribute containers. | 17 | * values to the user via sysfs using attribute containers. |
18 | * | 18 | * |
19 | * Note: because not every HBA will care about every transport | 19 | * Note: because not every HBA will care about every transport |
20 | * attribute, there's a many to one relationship that goes like this: | 20 | * attribute, there's a many to one relationship that goes like this: |
21 | * | 21 | * |
22 | * transport class<-----attribute container<----class device | 22 | * transport class<-----attribute container<----class device |
23 | * | 23 | * |
24 | * Usually the attribute container is per-HBA, but the design doesn't | 24 | * Usually the attribute container is per-HBA, but the design doesn't |
25 | * mandate that. Although most of the services will be specific to | 25 | * mandate that. Although most of the services will be specific to |
26 | * the actual external storage connection used by the HBA, the generic | 26 | * the actual external storage connection used by the HBA, the generic |
27 | * transport class is framed entirely in terms of generic devices to | 27 | * transport class is framed entirely in terms of generic devices to |
28 | * allow it to be used by any physical HBA in the system. | 28 | * allow it to be used by any physical HBA in the system. |
29 | */ | 29 | */ |
30 | #include <linux/attribute_container.h> | 30 | #include <linux/attribute_container.h> |
31 | #include <linux/transport_class.h> | 31 | #include <linux/transport_class.h> |
32 | 32 | ||
33 | /** | 33 | /** |
34 | * transport_class_register - register an initial transport class | 34 | * transport_class_register - register an initial transport class |
35 | * | 35 | * |
36 | * @tclass: a pointer to the transport class structure to be initialised | 36 | * @tclass: a pointer to the transport class structure to be initialised |
37 | * | 37 | * |
38 | * The transport class contains an embedded class which is used to | 38 | * The transport class contains an embedded class which is used to |
39 | * identify it. The caller should initialise this structure with | 39 | * identify it. The caller should initialise this structure with |
40 | * zeros and then generic class must have been initialised with the | 40 | * zeros and then generic class must have been initialised with the |
41 | * actual transport class unique name. There's a macro | 41 | * actual transport class unique name. There's a macro |
42 | * DECLARE_TRANSPORT_CLASS() to do this (declared classes still must | 42 | * DECLARE_TRANSPORT_CLASS() to do this (declared classes still must |
43 | * be registered). | 43 | * be registered). |
44 | * | 44 | * |
45 | * Returns 0 on success or error on failure. | 45 | * Returns 0 on success or error on failure. |
46 | */ | 46 | */ |
47 | int transport_class_register(struct transport_class *tclass) | 47 | int transport_class_register(struct transport_class *tclass) |
48 | { | 48 | { |
49 | return class_register(&tclass->class); | 49 | return class_register(&tclass->class); |
50 | } | 50 | } |
51 | EXPORT_SYMBOL_GPL(transport_class_register); | 51 | EXPORT_SYMBOL_GPL(transport_class_register); |
52 | 52 | ||
53 | /** | 53 | /** |
54 | * transport_class_unregister - unregister a previously registered class | 54 | * transport_class_unregister - unregister a previously registered class |
55 | * | 55 | * |
56 | * @tclass: The transport class to unregister | 56 | * @tclass: The transport class to unregister |
57 | * | 57 | * |
58 | * Must be called prior to deallocating the memory for the transport | 58 | * Must be called prior to deallocating the memory for the transport |
59 | * class. | 59 | * class. |
60 | */ | 60 | */ |
61 | void transport_class_unregister(struct transport_class *tclass) | 61 | void transport_class_unregister(struct transport_class *tclass) |
62 | { | 62 | { |
63 | class_unregister(&tclass->class); | 63 | class_unregister(&tclass->class); |
64 | } | 64 | } |
65 | EXPORT_SYMBOL_GPL(transport_class_unregister); | 65 | EXPORT_SYMBOL_GPL(transport_class_unregister); |
66 | 66 | ||
67 | static int anon_transport_dummy_function(struct device *dev) | 67 | static int anon_transport_dummy_function(struct transport_container *tc, |
68 | struct device *dev, | ||
69 | struct class_device *cdev) | ||
68 | { | 70 | { |
69 | /* do nothing */ | 71 | /* do nothing */ |
70 | return 0; | 72 | return 0; |
71 | } | 73 | } |
72 | 74 | ||
73 | /** | 75 | /** |
74 | * anon_transport_class_register - register an anonymous class | 76 | * anon_transport_class_register - register an anonymous class |
75 | * | 77 | * |
76 | * @atc: The anon transport class to register | 78 | * @atc: The anon transport class to register |
77 | * | 79 | * |
78 | * The anonymous transport class contains both a transport class and a | 80 | * The anonymous transport class contains both a transport class and a |
79 | * container. The idea of an anonymous class is that it never | 81 | * container. The idea of an anonymous class is that it never |
80 | * actually has any device attributes associated with it (and thus | 82 | * actually has any device attributes associated with it (and thus |
81 | * saves on container storage). So it can only be used for triggering | 83 | * saves on container storage). So it can only be used for triggering |
82 | * events. Use prezero and then use DECLARE_ANON_TRANSPORT_CLASS() to | 84 | * events. Use prezero and then use DECLARE_ANON_TRANSPORT_CLASS() to |
83 | * initialise the anon transport class storage. | 85 | * initialise the anon transport class storage. |
84 | */ | 86 | */ |
85 | int anon_transport_class_register(struct anon_transport_class *atc) | 87 | int anon_transport_class_register(struct anon_transport_class *atc) |
86 | { | 88 | { |
87 | int error; | 89 | int error; |
88 | atc->container.class = &atc->tclass.class; | 90 | atc->container.class = &atc->tclass.class; |
89 | attribute_container_set_no_classdevs(&atc->container); | 91 | attribute_container_set_no_classdevs(&atc->container); |
90 | error = attribute_container_register(&atc->container); | 92 | error = attribute_container_register(&atc->container); |
91 | if (error) | 93 | if (error) |
92 | return error; | 94 | return error; |
93 | atc->tclass.setup = anon_transport_dummy_function; | 95 | atc->tclass.setup = anon_transport_dummy_function; |
94 | atc->tclass.remove = anon_transport_dummy_function; | 96 | atc->tclass.remove = anon_transport_dummy_function; |
95 | return 0; | 97 | return 0; |
96 | } | 98 | } |
97 | EXPORT_SYMBOL_GPL(anon_transport_class_register); | 99 | EXPORT_SYMBOL_GPL(anon_transport_class_register); |
98 | 100 | ||
99 | /** | 101 | /** |
100 | * anon_transport_class_unregister - unregister an anon class | 102 | * anon_transport_class_unregister - unregister an anon class |
101 | * | 103 | * |
102 | * @atc: Pointer to the anon transport class to unregister | 104 | * @atc: Pointer to the anon transport class to unregister |
103 | * | 105 | * |
104 | * Must be called prior to deallocating the memory for the anon | 106 | * Must be called prior to deallocating the memory for the anon |
105 | * transport class. | 107 | * transport class. |
106 | */ | 108 | */ |
107 | void anon_transport_class_unregister(struct anon_transport_class *atc) | 109 | void anon_transport_class_unregister(struct anon_transport_class *atc) |
108 | { | 110 | { |
109 | attribute_container_unregister(&atc->container); | 111 | attribute_container_unregister(&atc->container); |
110 | } | 112 | } |
111 | EXPORT_SYMBOL_GPL(anon_transport_class_unregister); | 113 | EXPORT_SYMBOL_GPL(anon_transport_class_unregister); |
112 | 114 | ||
113 | static int transport_setup_classdev(struct attribute_container *cont, | 115 | static int transport_setup_classdev(struct attribute_container *cont, |
114 | struct device *dev, | 116 | struct device *dev, |
115 | struct class_device *classdev) | 117 | struct class_device *classdev) |
116 | { | 118 | { |
117 | struct transport_class *tclass = class_to_transport_class(cont->class); | 119 | struct transport_class *tclass = class_to_transport_class(cont->class); |
120 | struct transport_container *tcont = attribute_container_to_transport_container(cont); | ||
118 | 121 | ||
119 | if (tclass->setup) | 122 | if (tclass->setup) |
120 | tclass->setup(dev); | 123 | tclass->setup(tcont, dev, classdev); |
121 | 124 | ||
122 | return 0; | 125 | return 0; |
123 | } | 126 | } |
124 | 127 | ||
125 | /** | 128 | /** |
126 | * transport_setup_device - declare a new dev for transport class association | 129 | * transport_setup_device - declare a new dev for transport class association |
127 | * but don't make it visible yet. | 130 | * but don't make it visible yet. |
128 | * | 131 | * |
129 | * @dev: the generic device representing the entity being added | 132 | * @dev: the generic device representing the entity being added |
130 | * | 133 | * |
131 | * Usually, dev represents some component in the HBA system (either | 134 | * Usually, dev represents some component in the HBA system (either |
132 | * the HBA itself or a device remote across the HBA bus). This | 135 | * the HBA itself or a device remote across the HBA bus). This |
133 | * routine is simply a trigger point to see if any set of transport | 136 | * routine is simply a trigger point to see if any set of transport |
134 | * classes wishes to associate with the added device. This allocates | 137 | * classes wishes to associate with the added device. This allocates |
135 | * storage for the class device and initialises it, but does not yet | 138 | * storage for the class device and initialises it, but does not yet |
136 | * add it to the system or add attributes to it (you do this with | 139 | * add it to the system or add attributes to it (you do this with |
137 | * transport_add_device). If you have no need for a separate setup | 140 | * transport_add_device). If you have no need for a separate setup |
138 | * and add operations, use transport_register_device (see | 141 | * and add operations, use transport_register_device (see |
139 | * transport_class.h). | 142 | * transport_class.h). |
140 | */ | 143 | */ |
141 | 144 | ||
142 | void transport_setup_device(struct device *dev) | 145 | void transport_setup_device(struct device *dev) |
143 | { | 146 | { |
144 | attribute_container_add_device(dev, transport_setup_classdev); | 147 | attribute_container_add_device(dev, transport_setup_classdev); |
145 | } | 148 | } |
146 | EXPORT_SYMBOL_GPL(transport_setup_device); | 149 | EXPORT_SYMBOL_GPL(transport_setup_device); |
147 | 150 | ||
148 | static int transport_add_class_device(struct attribute_container *cont, | 151 | static int transport_add_class_device(struct attribute_container *cont, |
149 | struct device *dev, | 152 | struct device *dev, |
150 | struct class_device *classdev) | 153 | struct class_device *classdev) |
151 | { | 154 | { |
152 | int error = attribute_container_add_class_device(classdev); | 155 | int error = attribute_container_add_class_device(classdev); |
153 | struct transport_container *tcont = | 156 | struct transport_container *tcont = |
154 | attribute_container_to_transport_container(cont); | 157 | attribute_container_to_transport_container(cont); |
155 | 158 | ||
156 | if (!error && tcont->statistics) | 159 | if (!error && tcont->statistics) |
157 | error = sysfs_create_group(&classdev->kobj, tcont->statistics); | 160 | error = sysfs_create_group(&classdev->kobj, tcont->statistics); |
158 | 161 | ||
159 | return error; | 162 | return error; |
160 | } | 163 | } |
161 | 164 | ||
162 | 165 | ||
163 | /** | 166 | /** |
164 | * transport_add_device - declare a new dev for transport class association | 167 | * transport_add_device - declare a new dev for transport class association |
165 | * | 168 | * |
166 | * @dev: the generic device representing the entity being added | 169 | * @dev: the generic device representing the entity being added |
167 | * | 170 | * |
168 | * Usually, dev represents some component in the HBA system (either | 171 | * Usually, dev represents some component in the HBA system (either |
169 | * the HBA itself or a device remote across the HBA bus). This | 172 | * the HBA itself or a device remote across the HBA bus). This |
170 | * routine is simply a trigger point used to add the device to the | 173 | * routine is simply a trigger point used to add the device to the |
171 | * system and register attributes for it. | 174 | * system and register attributes for it. |
172 | */ | 175 | */ |
173 | 176 | ||
174 | void transport_add_device(struct device *dev) | 177 | void transport_add_device(struct device *dev) |
175 | { | 178 | { |
176 | attribute_container_device_trigger(dev, transport_add_class_device); | 179 | attribute_container_device_trigger(dev, transport_add_class_device); |
177 | } | 180 | } |
178 | EXPORT_SYMBOL_GPL(transport_add_device); | 181 | EXPORT_SYMBOL_GPL(transport_add_device); |
179 | 182 | ||
180 | static int transport_configure(struct attribute_container *cont, | 183 | static int transport_configure(struct attribute_container *cont, |
181 | struct device *dev) | 184 | struct device *dev, |
185 | struct class_device *cdev) | ||
182 | { | 186 | { |
183 | struct transport_class *tclass = class_to_transport_class(cont->class); | 187 | struct transport_class *tclass = class_to_transport_class(cont->class); |
188 | struct transport_container *tcont = attribute_container_to_transport_container(cont); | ||
184 | 189 | ||
185 | if (tclass->configure) | 190 | if (tclass->configure) |
186 | tclass->configure(dev); | 191 | tclass->configure(tcont, dev, cdev); |
187 | 192 | ||
188 | return 0; | 193 | return 0; |
189 | } | 194 | } |
190 | 195 | ||
191 | /** | 196 | /** |
192 | * transport_configure_device - configure an already set up device | 197 | * transport_configure_device - configure an already set up device |
193 | * | 198 | * |
194 | * @dev: generic device representing device to be configured | 199 | * @dev: generic device representing device to be configured |
195 | * | 200 | * |
196 | * The idea of configure is simply to provide a point within the setup | 201 | * The idea of configure is simply to provide a point within the setup |
197 | * process to allow the transport class to extract information from a | 202 | * process to allow the transport class to extract information from a |
198 | * device after it has been setup. This is used in SCSI because we | 203 | * device after it has been setup. This is used in SCSI because we |
199 | * have to have a setup device to begin using the HBA, but after we | 204 | * have to have a setup device to begin using the HBA, but after we |
200 | * send the initial inquiry, we use configure to extract the device | 205 | * send the initial inquiry, we use configure to extract the device |
201 | * parameters. The device need not have been added to be configured. | 206 | * parameters. The device need not have been added to be configured. |
202 | */ | 207 | */ |
203 | void transport_configure_device(struct device *dev) | 208 | void transport_configure_device(struct device *dev) |
204 | { | 209 | { |
205 | attribute_container_trigger(dev, transport_configure); | 210 | attribute_container_device_trigger(dev, transport_configure); |
206 | } | 211 | } |
207 | EXPORT_SYMBOL_GPL(transport_configure_device); | 212 | EXPORT_SYMBOL_GPL(transport_configure_device); |
208 | 213 | ||
209 | static int transport_remove_classdev(struct attribute_container *cont, | 214 | static int transport_remove_classdev(struct attribute_container *cont, |
210 | struct device *dev, | 215 | struct device *dev, |
211 | struct class_device *classdev) | 216 | struct class_device *classdev) |
212 | { | 217 | { |
213 | struct transport_container *tcont = | 218 | struct transport_container *tcont = |
214 | attribute_container_to_transport_container(cont); | 219 | attribute_container_to_transport_container(cont); |
215 | struct transport_class *tclass = class_to_transport_class(cont->class); | 220 | struct transport_class *tclass = class_to_transport_class(cont->class); |
216 | 221 | ||
217 | if (tclass->remove) | 222 | if (tclass->remove) |
218 | tclass->remove(dev); | 223 | tclass->remove(tcont, dev, classdev); |
219 | 224 | ||
220 | if (tclass->remove != anon_transport_dummy_function) { | 225 | if (tclass->remove != anon_transport_dummy_function) { |
221 | if (tcont->statistics) | 226 | if (tcont->statistics) |
222 | sysfs_remove_group(&classdev->kobj, tcont->statistics); | 227 | sysfs_remove_group(&classdev->kobj, tcont->statistics); |
223 | attribute_container_class_device_del(classdev); | 228 | attribute_container_class_device_del(classdev); |
224 | } | 229 | } |
225 | 230 | ||
226 | return 0; | 231 | return 0; |
227 | } | 232 | } |
228 | 233 | ||
229 | 234 | ||
230 | /** | 235 | /** |
231 | * transport_remove_device - remove the visibility of a device | 236 | * transport_remove_device - remove the visibility of a device |
232 | * | 237 | * |
233 | * @dev: generic device to remove | 238 | * @dev: generic device to remove |
234 | * | 239 | * |
235 | * This call removes the visibility of the device (to the user from | 240 | * This call removes the visibility of the device (to the user from |
236 | * sysfs), but does not destroy it. To eliminate a device entirely | 241 | * sysfs), but does not destroy it. To eliminate a device entirely |
237 | * you must also call transport_destroy_device. If you don't need to | 242 | * you must also call transport_destroy_device. If you don't need to |
238 | * do remove and destroy as separate operations, use | 243 | * do remove and destroy as separate operations, use |
239 | * transport_unregister_device() (see transport_class.h) which will | 244 | * transport_unregister_device() (see transport_class.h) which will |
240 | * perform both calls for you. | 245 | * perform both calls for you. |
241 | */ | 246 | */ |
242 | void transport_remove_device(struct device *dev) | 247 | void transport_remove_device(struct device *dev) |
243 | { | 248 | { |
244 | attribute_container_device_trigger(dev, transport_remove_classdev); | 249 | attribute_container_device_trigger(dev, transport_remove_classdev); |
245 | } | 250 | } |
246 | EXPORT_SYMBOL_GPL(transport_remove_device); | 251 | EXPORT_SYMBOL_GPL(transport_remove_device); |
247 | 252 | ||
248 | static void transport_destroy_classdev(struct attribute_container *cont, | 253 | static void transport_destroy_classdev(struct attribute_container *cont, |
249 | struct device *dev, | 254 | struct device *dev, |
250 | struct class_device *classdev) | 255 | struct class_device *classdev) |
251 | { | 256 | { |
252 | struct transport_class *tclass = class_to_transport_class(cont->class); | 257 | struct transport_class *tclass = class_to_transport_class(cont->class); |
253 | 258 | ||
254 | if (tclass->remove != anon_transport_dummy_function) | 259 | if (tclass->remove != anon_transport_dummy_function) |
255 | class_device_put(classdev); | 260 | class_device_put(classdev); |
256 | } | 261 | } |
257 | 262 | ||
258 | 263 | ||
259 | /** | 264 | /** |
260 | * transport_destroy_device - destroy a removed device | 265 | * transport_destroy_device - destroy a removed device |
261 | * | 266 | * |
262 | * @dev: device to eliminate from the transport class. | 267 | * @dev: device to eliminate from the transport class. |
263 | * | 268 | * |
264 | * This call triggers the elimination of storage associated with the | 269 | * This call triggers the elimination of storage associated with the |
265 | * transport classdev. Note: all it really does is relinquish a | 270 | * transport classdev. Note: all it really does is relinquish a |
266 | * reference to the classdev. The memory will not be freed until the | 271 | * reference to the classdev. The memory will not be freed until the |
267 | * last reference goes to zero. Note also that the classdev retains a | 272 | * last reference goes to zero. Note also that the classdev retains a |
268 | * reference count on dev, so dev too will remain for as long as the | 273 | * reference count on dev, so dev too will remain for as long as the |
269 | * transport class device remains around. | 274 | * transport class device remains around. |
270 | */ | 275 | */ |
271 | void transport_destroy_device(struct device *dev) | 276 | void transport_destroy_device(struct device *dev) |
272 | { | 277 | { |
273 | attribute_container_remove_device(dev, transport_destroy_classdev); | 278 | attribute_container_remove_device(dev, transport_destroy_classdev); |
274 | } | 279 | } |
275 | EXPORT_SYMBOL_GPL(transport_destroy_device); | 280 | EXPORT_SYMBOL_GPL(transport_destroy_device); |
276 | 281 |
drivers/scsi/scsi_transport_fc.c
1 | /* | 1 | /* |
2 | * FiberChannel transport specific attributes exported to sysfs. | 2 | * FiberChannel transport specific attributes exported to sysfs. |
3 | * | 3 | * |
4 | * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. | 4 | * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | * | 19 | * |
20 | * ======== | 20 | * ======== |
21 | * | 21 | * |
22 | * Copyright (C) 2004-2005 James Smart, Emulex Corporation | 22 | * Copyright (C) 2004-2005 James Smart, Emulex Corporation |
23 | * Rewrite for host, target, device, and remote port attributes, | 23 | * Rewrite for host, target, device, and remote port attributes, |
24 | * statistics, and service functions... | 24 | * statistics, and service functions... |
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <scsi/scsi_device.h> | 29 | #include <scsi/scsi_device.h> |
30 | #include <scsi/scsi_host.h> | 30 | #include <scsi/scsi_host.h> |
31 | #include <scsi/scsi_transport.h> | 31 | #include <scsi/scsi_transport.h> |
32 | #include <scsi/scsi_transport_fc.h> | 32 | #include <scsi/scsi_transport_fc.h> |
33 | #include "scsi_priv.h" | 33 | #include "scsi_priv.h" |
34 | 34 | ||
35 | #define FC_PRINTK(x, l, f, a...) printk(l "scsi(%d:%d:%d:%d): " f, (x)->host->host_no, (x)->channel, (x)->id, (x)->lun , ##a) | 35 | #define FC_PRINTK(x, l, f, a...) printk(l "scsi(%d:%d:%d:%d): " f, (x)->host->host_no, (x)->channel, (x)->id, (x)->lun , ##a) |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Redefine so that we can have same named attributes in the | 38 | * Redefine so that we can have same named attributes in the |
39 | * sdev/starget/host objects. | 39 | * sdev/starget/host objects. |
40 | */ | 40 | */ |
41 | #define FC_CLASS_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ | 41 | #define FC_CLASS_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ |
42 | struct class_device_attribute class_device_attr_##_prefix##_##_name = \ | 42 | struct class_device_attribute class_device_attr_##_prefix##_##_name = \ |
43 | __ATTR(_name,_mode,_show,_store) | 43 | __ATTR(_name,_mode,_show,_store) |
44 | 44 | ||
45 | #define fc_enum_name_search(title, table_type, table) \ | 45 | #define fc_enum_name_search(title, table_type, table) \ |
46 | static const char *get_fc_##title##_name(enum table_type table_key) \ | 46 | static const char *get_fc_##title##_name(enum table_type table_key) \ |
47 | { \ | 47 | { \ |
48 | int i; \ | 48 | int i; \ |
49 | char *name = NULL; \ | 49 | char *name = NULL; \ |
50 | \ | 50 | \ |
51 | for (i = 0; i < sizeof(table)/sizeof(table[0]); i++) { \ | 51 | for (i = 0; i < sizeof(table)/sizeof(table[0]); i++) { \ |
52 | if (table[i].value == table_key) { \ | 52 | if (table[i].value == table_key) { \ |
53 | name = table[i].name; \ | 53 | name = table[i].name; \ |
54 | break; \ | 54 | break; \ |
55 | } \ | 55 | } \ |
56 | } \ | 56 | } \ |
57 | return name; \ | 57 | return name; \ |
58 | } | 58 | } |
59 | 59 | ||
60 | #define fc_enum_name_match(title, table_type, table) \ | 60 | #define fc_enum_name_match(title, table_type, table) \ |
61 | static int get_fc_##title##_match(const char *table_key, \ | 61 | static int get_fc_##title##_match(const char *table_key, \ |
62 | enum table_type *value) \ | 62 | enum table_type *value) \ |
63 | { \ | 63 | { \ |
64 | int i; \ | 64 | int i; \ |
65 | \ | 65 | \ |
66 | for (i = 0; i < sizeof(table)/sizeof(table[0]); i++) { \ | 66 | for (i = 0; i < sizeof(table)/sizeof(table[0]); i++) { \ |
67 | if (strncmp(table_key, table[i].name, \ | 67 | if (strncmp(table_key, table[i].name, \ |
68 | table[i].matchlen) == 0) { \ | 68 | table[i].matchlen) == 0) { \ |
69 | *value = table[i].value; \ | 69 | *value = table[i].value; \ |
70 | return 0; /* success */ \ | 70 | return 0; /* success */ \ |
71 | } \ | 71 | } \ |
72 | } \ | 72 | } \ |
73 | return 1; /* failure */ \ | 73 | return 1; /* failure */ \ |
74 | } | 74 | } |
75 | 75 | ||
76 | 76 | ||
77 | /* Convert fc_port_type values to ascii string name */ | 77 | /* Convert fc_port_type values to ascii string name */ |
78 | static struct { | 78 | static struct { |
79 | enum fc_port_type value; | 79 | enum fc_port_type value; |
80 | char *name; | 80 | char *name; |
81 | } fc_port_type_names[] = { | 81 | } fc_port_type_names[] = { |
82 | { FC_PORTTYPE_UNKNOWN, "Unknown" }, | 82 | { FC_PORTTYPE_UNKNOWN, "Unknown" }, |
83 | { FC_PORTTYPE_OTHER, "Other" }, | 83 | { FC_PORTTYPE_OTHER, "Other" }, |
84 | { FC_PORTTYPE_NOTPRESENT, "Not Present" }, | 84 | { FC_PORTTYPE_NOTPRESENT, "Not Present" }, |
85 | { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" }, | 85 | { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" }, |
86 | { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" }, | 86 | { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" }, |
87 | { FC_PORTTYPE_LPORT, "LPort (private loop)" }, | 87 | { FC_PORTTYPE_LPORT, "LPort (private loop)" }, |
88 | { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection" }, | 88 | { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection" }, |
89 | }; | 89 | }; |
90 | fc_enum_name_search(port_type, fc_port_type, fc_port_type_names) | 90 | fc_enum_name_search(port_type, fc_port_type, fc_port_type_names) |
91 | #define FC_PORTTYPE_MAX_NAMELEN 50 | 91 | #define FC_PORTTYPE_MAX_NAMELEN 50 |
92 | 92 | ||
93 | 93 | ||
94 | /* Convert fc_port_state values to ascii string name */ | 94 | /* Convert fc_port_state values to ascii string name */ |
95 | static struct { | 95 | static struct { |
96 | enum fc_port_state value; | 96 | enum fc_port_state value; |
97 | char *name; | 97 | char *name; |
98 | } fc_port_state_names[] = { | 98 | } fc_port_state_names[] = { |
99 | { FC_PORTSTATE_UNKNOWN, "Unknown" }, | 99 | { FC_PORTSTATE_UNKNOWN, "Unknown" }, |
100 | { FC_PORTSTATE_NOTPRESENT, "Not Present" }, | 100 | { FC_PORTSTATE_NOTPRESENT, "Not Present" }, |
101 | { FC_PORTSTATE_ONLINE, "Online" }, | 101 | { FC_PORTSTATE_ONLINE, "Online" }, |
102 | { FC_PORTSTATE_OFFLINE, "Offline" }, | 102 | { FC_PORTSTATE_OFFLINE, "Offline" }, |
103 | { FC_PORTSTATE_BLOCKED, "Blocked" }, | 103 | { FC_PORTSTATE_BLOCKED, "Blocked" }, |
104 | { FC_PORTSTATE_BYPASSED, "Bypassed" }, | 104 | { FC_PORTSTATE_BYPASSED, "Bypassed" }, |
105 | { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics" }, | 105 | { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics" }, |
106 | { FC_PORTSTATE_LINKDOWN, "Linkdown" }, | 106 | { FC_PORTSTATE_LINKDOWN, "Linkdown" }, |
107 | { FC_PORTSTATE_ERROR, "Error" }, | 107 | { FC_PORTSTATE_ERROR, "Error" }, |
108 | { FC_PORTSTATE_LOOPBACK, "Loopback" }, | 108 | { FC_PORTSTATE_LOOPBACK, "Loopback" }, |
109 | }; | 109 | }; |
110 | fc_enum_name_search(port_state, fc_port_state, fc_port_state_names) | 110 | fc_enum_name_search(port_state, fc_port_state, fc_port_state_names) |
111 | #define FC_PORTSTATE_MAX_NAMELEN 20 | 111 | #define FC_PORTSTATE_MAX_NAMELEN 20 |
112 | 112 | ||
113 | 113 | ||
114 | /* Convert fc_tgtid_binding_type values to ascii string name */ | 114 | /* Convert fc_tgtid_binding_type values to ascii string name */ |
115 | static struct { | 115 | static struct { |
116 | enum fc_tgtid_binding_type value; | 116 | enum fc_tgtid_binding_type value; |
117 | char *name; | 117 | char *name; |
118 | int matchlen; | 118 | int matchlen; |
119 | } fc_tgtid_binding_type_names[] = { | 119 | } fc_tgtid_binding_type_names[] = { |
120 | { FC_TGTID_BIND_NONE, "none", 4 }, | 120 | { FC_TGTID_BIND_NONE, "none", 4 }, |
121 | { FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 }, | 121 | { FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 }, |
122 | { FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 }, | 122 | { FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 }, |
123 | { FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 }, | 123 | { FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 }, |
124 | }; | 124 | }; |
125 | fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type, | 125 | fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type, |
126 | fc_tgtid_binding_type_names) | 126 | fc_tgtid_binding_type_names) |
127 | fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type, | 127 | fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type, |
128 | fc_tgtid_binding_type_names) | 128 | fc_tgtid_binding_type_names) |
129 | #define FC_BINDTYPE_MAX_NAMELEN 30 | 129 | #define FC_BINDTYPE_MAX_NAMELEN 30 |
130 | 130 | ||
131 | 131 | ||
132 | #define fc_bitfield_name_search(title, table) \ | 132 | #define fc_bitfield_name_search(title, table) \ |
133 | static ssize_t \ | 133 | static ssize_t \ |
134 | get_fc_##title##_names(u32 table_key, char *buf) \ | 134 | get_fc_##title##_names(u32 table_key, char *buf) \ |
135 | { \ | 135 | { \ |
136 | char *prefix = ""; \ | 136 | char *prefix = ""; \ |
137 | ssize_t len = 0; \ | 137 | ssize_t len = 0; \ |
138 | int i; \ | 138 | int i; \ |
139 | \ | 139 | \ |
140 | for (i = 0; i < sizeof(table)/sizeof(table[0]); i++) { \ | 140 | for (i = 0; i < sizeof(table)/sizeof(table[0]); i++) { \ |
141 | if (table[i].value & table_key) { \ | 141 | if (table[i].value & table_key) { \ |
142 | len += sprintf(buf + len, "%s%s", \ | 142 | len += sprintf(buf + len, "%s%s", \ |
143 | prefix, table[i].name); \ | 143 | prefix, table[i].name); \ |
144 | prefix = ", "; \ | 144 | prefix = ", "; \ |
145 | } \ | 145 | } \ |
146 | } \ | 146 | } \ |
147 | len += sprintf(buf + len, "\n"); \ | 147 | len += sprintf(buf + len, "\n"); \ |
148 | return len; \ | 148 | return len; \ |
149 | } | 149 | } |
150 | 150 | ||
151 | 151 | ||
152 | /* Convert FC_COS bit values to ascii string name */ | 152 | /* Convert FC_COS bit values to ascii string name */ |
153 | static struct { | 153 | static struct { |
154 | u32 value; | 154 | u32 value; |
155 | char *name; | 155 | char *name; |
156 | } fc_cos_names[] = { | 156 | } fc_cos_names[] = { |
157 | { FC_COS_CLASS1, "Class 1" }, | 157 | { FC_COS_CLASS1, "Class 1" }, |
158 | { FC_COS_CLASS2, "Class 2" }, | 158 | { FC_COS_CLASS2, "Class 2" }, |
159 | { FC_COS_CLASS3, "Class 3" }, | 159 | { FC_COS_CLASS3, "Class 3" }, |
160 | { FC_COS_CLASS4, "Class 4" }, | 160 | { FC_COS_CLASS4, "Class 4" }, |
161 | { FC_COS_CLASS6, "Class 6" }, | 161 | { FC_COS_CLASS6, "Class 6" }, |
162 | }; | 162 | }; |
163 | fc_bitfield_name_search(cos, fc_cos_names) | 163 | fc_bitfield_name_search(cos, fc_cos_names) |
164 | 164 | ||
165 | 165 | ||
166 | /* Convert FC_PORTSPEED bit values to ascii string name */ | 166 | /* Convert FC_PORTSPEED bit values to ascii string name */ |
167 | static struct { | 167 | static struct { |
168 | u32 value; | 168 | u32 value; |
169 | char *name; | 169 | char *name; |
170 | } fc_port_speed_names[] = { | 170 | } fc_port_speed_names[] = { |
171 | { FC_PORTSPEED_1GBIT, "1 Gbit" }, | 171 | { FC_PORTSPEED_1GBIT, "1 Gbit" }, |
172 | { FC_PORTSPEED_2GBIT, "2 Gbit" }, | 172 | { FC_PORTSPEED_2GBIT, "2 Gbit" }, |
173 | { FC_PORTSPEED_4GBIT, "4 Gbit" }, | 173 | { FC_PORTSPEED_4GBIT, "4 Gbit" }, |
174 | { FC_PORTSPEED_10GBIT, "10 Gbit" }, | 174 | { FC_PORTSPEED_10GBIT, "10 Gbit" }, |
175 | { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, | 175 | { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, |
176 | }; | 176 | }; |
177 | fc_bitfield_name_search(port_speed, fc_port_speed_names) | 177 | fc_bitfield_name_search(port_speed, fc_port_speed_names) |
178 | 178 | ||
179 | 179 | ||
180 | static int | 180 | static int |
181 | show_fc_fc4s (char *buf, u8 *fc4_list) | 181 | show_fc_fc4s (char *buf, u8 *fc4_list) |
182 | { | 182 | { |
183 | int i, len=0; | 183 | int i, len=0; |
184 | 184 | ||
185 | for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++) | 185 | for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++) |
186 | len += sprintf(buf + len , "0x%02x ", *fc4_list); | 186 | len += sprintf(buf + len , "0x%02x ", *fc4_list); |
187 | len += sprintf(buf + len, "\n"); | 187 | len += sprintf(buf + len, "\n"); |
188 | return len; | 188 | return len; |
189 | } | 189 | } |
190 | 190 | ||
191 | 191 | ||
192 | /* Convert FC_RPORT_ROLE bit values to ascii string name */ | 192 | /* Convert FC_RPORT_ROLE bit values to ascii string name */ |
193 | static struct { | 193 | static struct { |
194 | u32 value; | 194 | u32 value; |
195 | char *name; | 195 | char *name; |
196 | } fc_remote_port_role_names[] = { | 196 | } fc_remote_port_role_names[] = { |
197 | { FC_RPORT_ROLE_FCP_TARGET, "FCP Target" }, | 197 | { FC_RPORT_ROLE_FCP_TARGET, "FCP Target" }, |
198 | { FC_RPORT_ROLE_FCP_INITIATOR, "FCP Initiator" }, | 198 | { FC_RPORT_ROLE_FCP_INITIATOR, "FCP Initiator" }, |
199 | { FC_RPORT_ROLE_IP_PORT, "IP Port" }, | 199 | { FC_RPORT_ROLE_IP_PORT, "IP Port" }, |
200 | }; | 200 | }; |
201 | fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names) | 201 | fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names) |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Define roles that are specific to port_id. Values are relative to ROLE_MASK. | 204 | * Define roles that are specific to port_id. Values are relative to ROLE_MASK. |
205 | */ | 205 | */ |
206 | #define FC_WELLKNOWN_PORTID_MASK 0xfffff0 | 206 | #define FC_WELLKNOWN_PORTID_MASK 0xfffff0 |
207 | #define FC_WELLKNOWN_ROLE_MASK 0x00000f | 207 | #define FC_WELLKNOWN_ROLE_MASK 0x00000f |
208 | #define FC_FPORT_PORTID 0x00000e | 208 | #define FC_FPORT_PORTID 0x00000e |
209 | #define FC_FABCTLR_PORTID 0x00000d | 209 | #define FC_FABCTLR_PORTID 0x00000d |
210 | #define FC_DIRSRVR_PORTID 0x00000c | 210 | #define FC_DIRSRVR_PORTID 0x00000c |
211 | #define FC_TIMESRVR_PORTID 0x00000b | 211 | #define FC_TIMESRVR_PORTID 0x00000b |
212 | #define FC_MGMTSRVR_PORTID 0x00000a | 212 | #define FC_MGMTSRVR_PORTID 0x00000a |
213 | 213 | ||
214 | 214 | ||
215 | static void fc_timeout_blocked_rport(void *data); | 215 | static void fc_timeout_blocked_rport(void *data); |
216 | static void fc_scsi_scan_rport(void *data); | 216 | static void fc_scsi_scan_rport(void *data); |
217 | static void fc_rport_terminate(struct fc_rport *rport); | 217 | static void fc_rport_terminate(struct fc_rport *rport); |
218 | 218 | ||
219 | /* | 219 | /* |
220 | * Attribute counts pre object type... | 220 | * Attribute counts pre object type... |
221 | * Increase these values if you add attributes | 221 | * Increase these values if you add attributes |
222 | */ | 222 | */ |
223 | #define FC_STARGET_NUM_ATTRS 3 | 223 | #define FC_STARGET_NUM_ATTRS 3 |
224 | #define FC_RPORT_NUM_ATTRS 9 | 224 | #define FC_RPORT_NUM_ATTRS 9 |
225 | #define FC_HOST_NUM_ATTRS 15 | 225 | #define FC_HOST_NUM_ATTRS 15 |
226 | 226 | ||
227 | struct fc_internal { | 227 | struct fc_internal { |
228 | struct scsi_transport_template t; | 228 | struct scsi_transport_template t; |
229 | struct fc_function_template *f; | 229 | struct fc_function_template *f; |
230 | 230 | ||
231 | /* | 231 | /* |
232 | * For attributes : each object has : | 232 | * For attributes : each object has : |
233 | * An array of the actual attributes structures | 233 | * An array of the actual attributes structures |
234 | * An array of null-terminated pointers to the attribute | 234 | * An array of null-terminated pointers to the attribute |
235 | * structures - used for mid-layer interaction. | 235 | * structures - used for mid-layer interaction. |
236 | * | 236 | * |
237 | * The attribute containers for the starget and host are are | 237 | * The attribute containers for the starget and host are are |
238 | * part of the midlayer. As the remote port is specific to the | 238 | * part of the midlayer. As the remote port is specific to the |
239 | * fc transport, we must provide the attribute container. | 239 | * fc transport, we must provide the attribute container. |
240 | */ | 240 | */ |
241 | struct class_device_attribute private_starget_attrs[ | 241 | struct class_device_attribute private_starget_attrs[ |
242 | FC_STARGET_NUM_ATTRS]; | 242 | FC_STARGET_NUM_ATTRS]; |
243 | struct class_device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1]; | 243 | struct class_device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1]; |
244 | 244 | ||
245 | struct class_device_attribute private_host_attrs[FC_HOST_NUM_ATTRS]; | 245 | struct class_device_attribute private_host_attrs[FC_HOST_NUM_ATTRS]; |
246 | struct class_device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1]; | 246 | struct class_device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1]; |
247 | 247 | ||
248 | struct transport_container rport_attr_cont; | 248 | struct transport_container rport_attr_cont; |
249 | struct class_device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS]; | 249 | struct class_device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS]; |
250 | struct class_device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1]; | 250 | struct class_device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1]; |
251 | }; | 251 | }; |
252 | 252 | ||
253 | #define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) | 253 | #define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) |
254 | 254 | ||
255 | static int fc_target_setup(struct device *dev) | 255 | static int fc_target_setup(struct transport_container *tc, struct device *dev, |
256 | struct class_device *cdev) | ||
256 | { | 257 | { |
257 | struct scsi_target *starget = to_scsi_target(dev); | 258 | struct scsi_target *starget = to_scsi_target(dev); |
258 | struct fc_rport *rport = starget_to_rport(starget); | 259 | struct fc_rport *rport = starget_to_rport(starget); |
259 | 260 | ||
260 | /* | 261 | /* |
261 | * if parent is remote port, use values from remote port. | 262 | * if parent is remote port, use values from remote port. |
262 | * Otherwise, this host uses the fc_transport, but not the | 263 | * Otherwise, this host uses the fc_transport, but not the |
263 | * remote port interface. As such, initialize to known non-values. | 264 | * remote port interface. As such, initialize to known non-values. |
264 | */ | 265 | */ |
265 | if (rport) { | 266 | if (rport) { |
266 | fc_starget_node_name(starget) = rport->node_name; | 267 | fc_starget_node_name(starget) = rport->node_name; |
267 | fc_starget_port_name(starget) = rport->port_name; | 268 | fc_starget_port_name(starget) = rport->port_name; |
268 | fc_starget_port_id(starget) = rport->port_id; | 269 | fc_starget_port_id(starget) = rport->port_id; |
269 | } else { | 270 | } else { |
270 | fc_starget_node_name(starget) = -1; | 271 | fc_starget_node_name(starget) = -1; |
271 | fc_starget_port_name(starget) = -1; | 272 | fc_starget_port_name(starget) = -1; |
272 | fc_starget_port_id(starget) = -1; | 273 | fc_starget_port_id(starget) = -1; |
273 | } | 274 | } |
274 | 275 | ||
275 | return 0; | 276 | return 0; |
276 | } | 277 | } |
277 | 278 | ||
278 | static DECLARE_TRANSPORT_CLASS(fc_transport_class, | 279 | static DECLARE_TRANSPORT_CLASS(fc_transport_class, |
279 | "fc_transport", | 280 | "fc_transport", |
280 | fc_target_setup, | 281 | fc_target_setup, |
281 | NULL, | 282 | NULL, |
282 | NULL); | 283 | NULL); |
283 | 284 | ||
284 | static int fc_host_setup(struct device *dev) | 285 | static int fc_host_setup(struct transport_container *tc, struct device *dev, |
286 | struct class_device *cdev) | ||
285 | { | 287 | { |
286 | struct Scsi_Host *shost = dev_to_shost(dev); | 288 | struct Scsi_Host *shost = dev_to_shost(dev); |
287 | 289 | ||
288 | /* | 290 | /* |
289 | * Set default values easily detected by the midlayer as | 291 | * Set default values easily detected by the midlayer as |
290 | * failure cases. The scsi lldd is responsible for initializing | 292 | * failure cases. The scsi lldd is responsible for initializing |
291 | * all transport attributes to valid values per host. | 293 | * all transport attributes to valid values per host. |
292 | */ | 294 | */ |
293 | fc_host_node_name(shost) = -1; | 295 | fc_host_node_name(shost) = -1; |
294 | fc_host_port_name(shost) = -1; | 296 | fc_host_port_name(shost) = -1; |
295 | fc_host_supported_classes(shost) = FC_COS_UNSPECIFIED; | 297 | fc_host_supported_classes(shost) = FC_COS_UNSPECIFIED; |
296 | memset(fc_host_supported_fc4s(shost), 0, | 298 | memset(fc_host_supported_fc4s(shost), 0, |
297 | sizeof(fc_host_supported_fc4s(shost))); | 299 | sizeof(fc_host_supported_fc4s(shost))); |
298 | memset(fc_host_symbolic_name(shost), 0, | 300 | memset(fc_host_symbolic_name(shost), 0, |
299 | sizeof(fc_host_symbolic_name(shost))); | 301 | sizeof(fc_host_symbolic_name(shost))); |
300 | fc_host_supported_speeds(shost) = FC_PORTSPEED_UNKNOWN; | 302 | fc_host_supported_speeds(shost) = FC_PORTSPEED_UNKNOWN; |
301 | fc_host_maxframe_size(shost) = -1; | 303 | fc_host_maxframe_size(shost) = -1; |
302 | memset(fc_host_serial_number(shost), 0, | 304 | memset(fc_host_serial_number(shost), 0, |
303 | sizeof(fc_host_serial_number(shost))); | 305 | sizeof(fc_host_serial_number(shost))); |
304 | 306 | ||
305 | fc_host_port_id(shost) = -1; | 307 | fc_host_port_id(shost) = -1; |
306 | fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; | 308 | fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; |
307 | fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; | 309 | fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; |
308 | memset(fc_host_active_fc4s(shost), 0, | 310 | memset(fc_host_active_fc4s(shost), 0, |
309 | sizeof(fc_host_active_fc4s(shost))); | 311 | sizeof(fc_host_active_fc4s(shost))); |
310 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; | 312 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; |
311 | fc_host_fabric_name(shost) = -1; | 313 | fc_host_fabric_name(shost) = -1; |
312 | 314 | ||
313 | fc_host_tgtid_bind_type(shost) = FC_TGTID_BIND_BY_WWPN; | 315 | fc_host_tgtid_bind_type(shost) = FC_TGTID_BIND_BY_WWPN; |
314 | 316 | ||
315 | INIT_LIST_HEAD(&fc_host_rports(shost)); | 317 | INIT_LIST_HEAD(&fc_host_rports(shost)); |
316 | INIT_LIST_HEAD(&fc_host_rport_bindings(shost)); | 318 | INIT_LIST_HEAD(&fc_host_rport_bindings(shost)); |
317 | fc_host_next_rport_number(shost) = 0; | 319 | fc_host_next_rport_number(shost) = 0; |
318 | fc_host_next_target_id(shost) = 0; | 320 | fc_host_next_target_id(shost) = 0; |
319 | 321 | ||
320 | return 0; | 322 | return 0; |
321 | } | 323 | } |
322 | 324 | ||
323 | static DECLARE_TRANSPORT_CLASS(fc_host_class, | 325 | static DECLARE_TRANSPORT_CLASS(fc_host_class, |
324 | "fc_host", | 326 | "fc_host", |
325 | fc_host_setup, | 327 | fc_host_setup, |
326 | NULL, | 328 | NULL, |
327 | NULL); | 329 | NULL); |
328 | 330 | ||
329 | /* | 331 | /* |
330 | * Setup and Remove actions for remote ports are handled | 332 | * Setup and Remove actions for remote ports are handled |
331 | * in the service functions below. | 333 | * in the service functions below. |
332 | */ | 334 | */ |
333 | static DECLARE_TRANSPORT_CLASS(fc_rport_class, | 335 | static DECLARE_TRANSPORT_CLASS(fc_rport_class, |
334 | "fc_remote_ports", | 336 | "fc_remote_ports", |
335 | NULL, | 337 | NULL, |
336 | NULL, | 338 | NULL, |
337 | NULL); | 339 | NULL); |
338 | 340 | ||
339 | /* | 341 | /* |
340 | * Module Parameters | 342 | * Module Parameters |
341 | */ | 343 | */ |
342 | 344 | ||
343 | /* | 345 | /* |
344 | * dev_loss_tmo: the default number of seconds that the FC transport | 346 | * dev_loss_tmo: the default number of seconds that the FC transport |
345 | * should insulate the loss of a remote port. | 347 | * should insulate the loss of a remote port. |
346 | * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT. | 348 | * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT. |
347 | */ | 349 | */ |
348 | static unsigned int fc_dev_loss_tmo = SCSI_DEVICE_BLOCK_MAX_TIMEOUT; | 350 | static unsigned int fc_dev_loss_tmo = SCSI_DEVICE_BLOCK_MAX_TIMEOUT; |
349 | 351 | ||
350 | module_param_named(dev_loss_tmo, fc_dev_loss_tmo, int, S_IRUGO|S_IWUSR); | 352 | module_param_named(dev_loss_tmo, fc_dev_loss_tmo, int, S_IRUGO|S_IWUSR); |
351 | MODULE_PARM_DESC(dev_loss_tmo, | 353 | MODULE_PARM_DESC(dev_loss_tmo, |
352 | "Maximum number of seconds that the FC transport should" | 354 | "Maximum number of seconds that the FC transport should" |
353 | " insulate the loss of a remote port. Once this value is" | 355 | " insulate the loss of a remote port. Once this value is" |
354 | " exceeded, the scsi target is removed. Value should be" | 356 | " exceeded, the scsi target is removed. Value should be" |
355 | " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT."); | 357 | " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT."); |
356 | 358 | ||
357 | 359 | ||
358 | static __init int fc_transport_init(void) | 360 | static __init int fc_transport_init(void) |
359 | { | 361 | { |
360 | int error = transport_class_register(&fc_host_class); | 362 | int error = transport_class_register(&fc_host_class); |
361 | if (error) | 363 | if (error) |
362 | return error; | 364 | return error; |
363 | error = transport_class_register(&fc_rport_class); | 365 | error = transport_class_register(&fc_rport_class); |
364 | if (error) | 366 | if (error) |
365 | return error; | 367 | return error; |
366 | return transport_class_register(&fc_transport_class); | 368 | return transport_class_register(&fc_transport_class); |
367 | } | 369 | } |
368 | 370 | ||
369 | static void __exit fc_transport_exit(void) | 371 | static void __exit fc_transport_exit(void) |
370 | { | 372 | { |
371 | transport_class_unregister(&fc_transport_class); | 373 | transport_class_unregister(&fc_transport_class); |
372 | transport_class_unregister(&fc_rport_class); | 374 | transport_class_unregister(&fc_rport_class); |
373 | transport_class_unregister(&fc_host_class); | 375 | transport_class_unregister(&fc_host_class); |
374 | } | 376 | } |
375 | 377 | ||
376 | /* | 378 | /* |
377 | * FC Remote Port Attribute Management | 379 | * FC Remote Port Attribute Management |
378 | */ | 380 | */ |
379 | 381 | ||
380 | #define fc_rport_show_function(field, format_string, sz, cast) \ | 382 | #define fc_rport_show_function(field, format_string, sz, cast) \ |
381 | static ssize_t \ | 383 | static ssize_t \ |
382 | show_fc_rport_##field (struct class_device *cdev, char *buf) \ | 384 | show_fc_rport_##field (struct class_device *cdev, char *buf) \ |
383 | { \ | 385 | { \ |
384 | struct fc_rport *rport = transport_class_to_rport(cdev); \ | 386 | struct fc_rport *rport = transport_class_to_rport(cdev); \ |
385 | struct Scsi_Host *shost = rport_to_shost(rport); \ | 387 | struct Scsi_Host *shost = rport_to_shost(rport); \ |
386 | struct fc_internal *i = to_fc_internal(shost->transportt); \ | 388 | struct fc_internal *i = to_fc_internal(shost->transportt); \ |
387 | if (i->f->get_rport_##field) \ | 389 | if (i->f->get_rport_##field) \ |
388 | i->f->get_rport_##field(rport); \ | 390 | i->f->get_rport_##field(rport); \ |
389 | return snprintf(buf, sz, format_string, cast rport->field); \ | 391 | return snprintf(buf, sz, format_string, cast rport->field); \ |
390 | } | 392 | } |
391 | 393 | ||
392 | #define fc_rport_store_function(field) \ | 394 | #define fc_rport_store_function(field) \ |
393 | static ssize_t \ | 395 | static ssize_t \ |
394 | store_fc_rport_##field(struct class_device *cdev, const char *buf, \ | 396 | store_fc_rport_##field(struct class_device *cdev, const char *buf, \ |
395 | size_t count) \ | 397 | size_t count) \ |
396 | { \ | 398 | { \ |
397 | int val; \ | 399 | int val; \ |
398 | struct fc_rport *rport = transport_class_to_rport(cdev); \ | 400 | struct fc_rport *rport = transport_class_to_rport(cdev); \ |
399 | struct Scsi_Host *shost = rport_to_shost(rport); \ | 401 | struct Scsi_Host *shost = rport_to_shost(rport); \ |
400 | struct fc_internal *i = to_fc_internal(shost->transportt); \ | 402 | struct fc_internal *i = to_fc_internal(shost->transportt); \ |
401 | val = simple_strtoul(buf, NULL, 0); \ | 403 | val = simple_strtoul(buf, NULL, 0); \ |
402 | i->f->set_rport_##field(rport, val); \ | 404 | i->f->set_rport_##field(rport, val); \ |
403 | return count; \ | 405 | return count; \ |
404 | } | 406 | } |
405 | 407 | ||
406 | #define fc_rport_rd_attr(field, format_string, sz) \ | 408 | #define fc_rport_rd_attr(field, format_string, sz) \ |
407 | fc_rport_show_function(field, format_string, sz, ) \ | 409 | fc_rport_show_function(field, format_string, sz, ) \ |
408 | static FC_CLASS_DEVICE_ATTR(rport, field, S_IRUGO, \ | 410 | static FC_CLASS_DEVICE_ATTR(rport, field, S_IRUGO, \ |
409 | show_fc_rport_##field, NULL) | 411 | show_fc_rport_##field, NULL) |
410 | 412 | ||
411 | #define fc_rport_rd_attr_cast(field, format_string, sz, cast) \ | 413 | #define fc_rport_rd_attr_cast(field, format_string, sz, cast) \ |
412 | fc_rport_show_function(field, format_string, sz, (cast)) \ | 414 | fc_rport_show_function(field, format_string, sz, (cast)) \ |
413 | static FC_CLASS_DEVICE_ATTR(rport, field, S_IRUGO, \ | 415 | static FC_CLASS_DEVICE_ATTR(rport, field, S_IRUGO, \ |
414 | show_fc_rport_##field, NULL) | 416 | show_fc_rport_##field, NULL) |
415 | 417 | ||
416 | #define fc_rport_rw_attr(field, format_string, sz) \ | 418 | #define fc_rport_rw_attr(field, format_string, sz) \ |
417 | fc_rport_show_function(field, format_string, sz, ) \ | 419 | fc_rport_show_function(field, format_string, sz, ) \ |
418 | fc_rport_store_function(field) \ | 420 | fc_rport_store_function(field) \ |
419 | static FC_CLASS_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR, \ | 421 | static FC_CLASS_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR, \ |
420 | show_fc_rport_##field, \ | 422 | show_fc_rport_##field, \ |
421 | store_fc_rport_##field) | 423 | store_fc_rport_##field) |
422 | 424 | ||
423 | 425 | ||
424 | #define fc_private_rport_show_function(field, format_string, sz, cast) \ | 426 | #define fc_private_rport_show_function(field, format_string, sz, cast) \ |
425 | static ssize_t \ | 427 | static ssize_t \ |
426 | show_fc_rport_##field (struct class_device *cdev, char *buf) \ | 428 | show_fc_rport_##field (struct class_device *cdev, char *buf) \ |
427 | { \ | 429 | { \ |
428 | struct fc_rport *rport = transport_class_to_rport(cdev); \ | 430 | struct fc_rport *rport = transport_class_to_rport(cdev); \ |
429 | return snprintf(buf, sz, format_string, cast rport->field); \ | 431 | return snprintf(buf, sz, format_string, cast rport->field); \ |
430 | } | 432 | } |
431 | 433 | ||
432 | #define fc_private_rport_rd_attr(field, format_string, sz) \ | 434 | #define fc_private_rport_rd_attr(field, format_string, sz) \ |
433 | fc_private_rport_show_function(field, format_string, sz, ) \ | 435 | fc_private_rport_show_function(field, format_string, sz, ) \ |
434 | static FC_CLASS_DEVICE_ATTR(rport, field, S_IRUGO, \ | 436 | static FC_CLASS_DEVICE_ATTR(rport, field, S_IRUGO, \ |
435 | show_fc_rport_##field, NULL) | 437 | show_fc_rport_##field, NULL) |
436 | 438 | ||
437 | #define fc_private_rport_rd_attr_cast(field, format_string, sz, cast) \ | 439 | #define fc_private_rport_rd_attr_cast(field, format_string, sz, cast) \ |
438 | fc_private_rport_show_function(field, format_string, sz, (cast)) \ | 440 | fc_private_rport_show_function(field, format_string, sz, (cast)) \ |
439 | static FC_CLASS_DEVICE_ATTR(rport, field, S_IRUGO, \ | 441 | static FC_CLASS_DEVICE_ATTR(rport, field, S_IRUGO, \ |
440 | show_fc_rport_##field, NULL) | 442 | show_fc_rport_##field, NULL) |
441 | 443 | ||
442 | 444 | ||
443 | #define fc_private_rport_rd_enum_attr(title, maxlen) \ | 445 | #define fc_private_rport_rd_enum_attr(title, maxlen) \ |
444 | static ssize_t \ | 446 | static ssize_t \ |
445 | show_fc_rport_##title (struct class_device *cdev, char *buf) \ | 447 | show_fc_rport_##title (struct class_device *cdev, char *buf) \ |
446 | { \ | 448 | { \ |
447 | struct fc_rport *rport = transport_class_to_rport(cdev); \ | 449 | struct fc_rport *rport = transport_class_to_rport(cdev); \ |
448 | const char *name; \ | 450 | const char *name; \ |
449 | name = get_fc_##title##_name(rport->title); \ | 451 | name = get_fc_##title##_name(rport->title); \ |
450 | if (!name) \ | 452 | if (!name) \ |
451 | return -EINVAL; \ | 453 | return -EINVAL; \ |
452 | return snprintf(buf, maxlen, "%s\n", name); \ | 454 | return snprintf(buf, maxlen, "%s\n", name); \ |
453 | } \ | 455 | } \ |
454 | static FC_CLASS_DEVICE_ATTR(rport, title, S_IRUGO, \ | 456 | static FC_CLASS_DEVICE_ATTR(rport, title, S_IRUGO, \ |
455 | show_fc_rport_##title, NULL) | 457 | show_fc_rport_##title, NULL) |
456 | 458 | ||
457 | 459 | ||
458 | #define SETUP_RPORT_ATTRIBUTE_RD(field) \ | 460 | #define SETUP_RPORT_ATTRIBUTE_RD(field) \ |
459 | i->private_rport_attrs[count] = class_device_attr_rport_##field; \ | 461 | i->private_rport_attrs[count] = class_device_attr_rport_##field; \ |
460 | i->private_rport_attrs[count].attr.mode = S_IRUGO; \ | 462 | i->private_rport_attrs[count].attr.mode = S_IRUGO; \ |
461 | i->private_rport_attrs[count].store = NULL; \ | 463 | i->private_rport_attrs[count].store = NULL; \ |
462 | i->rport_attrs[count] = &i->private_rport_attrs[count]; \ | 464 | i->rport_attrs[count] = &i->private_rport_attrs[count]; \ |
463 | if (i->f->show_rport_##field) \ | 465 | if (i->f->show_rport_##field) \ |
464 | count++ | 466 | count++ |
465 | 467 | ||
466 | #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field) \ | 468 | #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field) \ |
467 | i->private_rport_attrs[count] = class_device_attr_rport_##field; \ | 469 | i->private_rport_attrs[count] = class_device_attr_rport_##field; \ |
468 | i->private_rport_attrs[count].attr.mode = S_IRUGO; \ | 470 | i->private_rport_attrs[count].attr.mode = S_IRUGO; \ |
469 | i->private_rport_attrs[count].store = NULL; \ | 471 | i->private_rport_attrs[count].store = NULL; \ |
470 | i->rport_attrs[count] = &i->private_rport_attrs[count]; \ | 472 | i->rport_attrs[count] = &i->private_rport_attrs[count]; \ |
471 | count++ | 473 | count++ |
472 | 474 | ||
473 | #define SETUP_RPORT_ATTRIBUTE_RW(field) \ | 475 | #define SETUP_RPORT_ATTRIBUTE_RW(field) \ |
474 | i->private_rport_attrs[count] = class_device_attr_rport_##field; \ | 476 | i->private_rport_attrs[count] = class_device_attr_rport_##field; \ |
475 | if (!i->f->set_rport_##field) { \ | 477 | if (!i->f->set_rport_##field) { \ |
476 | i->private_rport_attrs[count].attr.mode = S_IRUGO; \ | 478 | i->private_rport_attrs[count].attr.mode = S_IRUGO; \ |
477 | i->private_rport_attrs[count].store = NULL; \ | 479 | i->private_rport_attrs[count].store = NULL; \ |
478 | } \ | 480 | } \ |
479 | i->rport_attrs[count] = &i->private_rport_attrs[count]; \ | 481 | i->rport_attrs[count] = &i->private_rport_attrs[count]; \ |
480 | if (i->f->show_rport_##field) \ | 482 | if (i->f->show_rport_##field) \ |
481 | count++ | 483 | count++ |
482 | 484 | ||
483 | 485 | ||
484 | /* The FC Transport Remote Port Attributes: */ | 486 | /* The FC Transport Remote Port Attributes: */ |
485 | 487 | ||
486 | /* Fixed Remote Port Attributes */ | 488 | /* Fixed Remote Port Attributes */ |
487 | 489 | ||
488 | fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20); | 490 | fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20); |
489 | 491 | ||
490 | static ssize_t | 492 | static ssize_t |
491 | show_fc_rport_supported_classes (struct class_device *cdev, char *buf) | 493 | show_fc_rport_supported_classes (struct class_device *cdev, char *buf) |
492 | { | 494 | { |
493 | struct fc_rport *rport = transport_class_to_rport(cdev); | 495 | struct fc_rport *rport = transport_class_to_rport(cdev); |
494 | if (rport->supported_classes == FC_COS_UNSPECIFIED) | 496 | if (rport->supported_classes == FC_COS_UNSPECIFIED) |
495 | return snprintf(buf, 20, "unspecified\n"); | 497 | return snprintf(buf, 20, "unspecified\n"); |
496 | return get_fc_cos_names(rport->supported_classes, buf); | 498 | return get_fc_cos_names(rport->supported_classes, buf); |
497 | } | 499 | } |
498 | static FC_CLASS_DEVICE_ATTR(rport, supported_classes, S_IRUGO, | 500 | static FC_CLASS_DEVICE_ATTR(rport, supported_classes, S_IRUGO, |
499 | show_fc_rport_supported_classes, NULL); | 501 | show_fc_rport_supported_classes, NULL); |
500 | 502 | ||
501 | /* Dynamic Remote Port Attributes */ | 503 | /* Dynamic Remote Port Attributes */ |
502 | 504 | ||
503 | fc_rport_rw_attr(dev_loss_tmo, "%d\n", 20); | 505 | fc_rport_rw_attr(dev_loss_tmo, "%d\n", 20); |
504 | 506 | ||
505 | 507 | ||
506 | /* Private Remote Port Attributes */ | 508 | /* Private Remote Port Attributes */ |
507 | 509 | ||
508 | fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); | 510 | fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); |
509 | fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); | 511 | fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); |
510 | fc_private_rport_rd_attr(port_id, "0x%06x\n", 20); | 512 | fc_private_rport_rd_attr(port_id, "0x%06x\n", 20); |
511 | 513 | ||
512 | static ssize_t | 514 | static ssize_t |
513 | show_fc_rport_roles (struct class_device *cdev, char *buf) | 515 | show_fc_rport_roles (struct class_device *cdev, char *buf) |
514 | { | 516 | { |
515 | struct fc_rport *rport = transport_class_to_rport(cdev); | 517 | struct fc_rport *rport = transport_class_to_rport(cdev); |
516 | 518 | ||
517 | /* identify any roles that are port_id specific */ | 519 | /* identify any roles that are port_id specific */ |
518 | if ((rport->port_id != -1) && | 520 | if ((rport->port_id != -1) && |
519 | (rport->port_id & FC_WELLKNOWN_PORTID_MASK) == | 521 | (rport->port_id & FC_WELLKNOWN_PORTID_MASK) == |
520 | FC_WELLKNOWN_PORTID_MASK) { | 522 | FC_WELLKNOWN_PORTID_MASK) { |
521 | switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) { | 523 | switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) { |
522 | case FC_FPORT_PORTID: | 524 | case FC_FPORT_PORTID: |
523 | return snprintf(buf, 30, "Fabric Port\n"); | 525 | return snprintf(buf, 30, "Fabric Port\n"); |
524 | case FC_FABCTLR_PORTID: | 526 | case FC_FABCTLR_PORTID: |
525 | return snprintf(buf, 30, "Fabric Controller\n"); | 527 | return snprintf(buf, 30, "Fabric Controller\n"); |
526 | case FC_DIRSRVR_PORTID: | 528 | case FC_DIRSRVR_PORTID: |
527 | return snprintf(buf, 30, "Directory Server\n"); | 529 | return snprintf(buf, 30, "Directory Server\n"); |
528 | case FC_TIMESRVR_PORTID: | 530 | case FC_TIMESRVR_PORTID: |
529 | return snprintf(buf, 30, "Time Server\n"); | 531 | return snprintf(buf, 30, "Time Server\n"); |
530 | case FC_MGMTSRVR_PORTID: | 532 | case FC_MGMTSRVR_PORTID: |
531 | return snprintf(buf, 30, "Management Server\n"); | 533 | return snprintf(buf, 30, "Management Server\n"); |
532 | default: | 534 | default: |
533 | return snprintf(buf, 30, "Unknown Fabric Entity\n"); | 535 | return snprintf(buf, 30, "Unknown Fabric Entity\n"); |
534 | } | 536 | } |
535 | } else { | 537 | } else { |
536 | if (rport->roles == FC_RPORT_ROLE_UNKNOWN) | 538 | if (rport->roles == FC_RPORT_ROLE_UNKNOWN) |
537 | return snprintf(buf, 20, "unknown\n"); | 539 | return snprintf(buf, 20, "unknown\n"); |
538 | return get_fc_remote_port_roles_names(rport->roles, buf); | 540 | return get_fc_remote_port_roles_names(rport->roles, buf); |
539 | } | 541 | } |
540 | } | 542 | } |
541 | static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO, | 543 | static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO, |
542 | show_fc_rport_roles, NULL); | 544 | show_fc_rport_roles, NULL); |
543 | 545 | ||
544 | fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); | 546 | fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); |
545 | fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20); | 547 | fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20); |
546 | 548 | ||
547 | 549 | ||
548 | 550 | ||
549 | /* | 551 | /* |
550 | * FC SCSI Target Attribute Management | 552 | * FC SCSI Target Attribute Management |
551 | */ | 553 | */ |
552 | 554 | ||
553 | /* | 555 | /* |
554 | * Note: in the target show function we recognize when the remote | 556 | * Note: in the target show function we recognize when the remote |
555 | * port is in the heirarchy and do not allow the driver to get | 557 | * port is in the heirarchy and do not allow the driver to get |
556 | * involved in sysfs functions. The driver only gets involved if | 558 | * involved in sysfs functions. The driver only gets involved if |
557 | * it's the "old" style that doesn't use rports. | 559 | * it's the "old" style that doesn't use rports. |
558 | */ | 560 | */ |
559 | #define fc_starget_show_function(field, format_string, sz, cast) \ | 561 | #define fc_starget_show_function(field, format_string, sz, cast) \ |
560 | static ssize_t \ | 562 | static ssize_t \ |
561 | show_fc_starget_##field (struct class_device *cdev, char *buf) \ | 563 | show_fc_starget_##field (struct class_device *cdev, char *buf) \ |
562 | { \ | 564 | { \ |
563 | struct scsi_target *starget = transport_class_to_starget(cdev); \ | 565 | struct scsi_target *starget = transport_class_to_starget(cdev); \ |
564 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ | 566 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ |
565 | struct fc_internal *i = to_fc_internal(shost->transportt); \ | 567 | struct fc_internal *i = to_fc_internal(shost->transportt); \ |
566 | struct fc_rport *rport = starget_to_rport(starget); \ | 568 | struct fc_rport *rport = starget_to_rport(starget); \ |
567 | if (rport) \ | 569 | if (rport) \ |
568 | fc_starget_##field(starget) = rport->field; \ | 570 | fc_starget_##field(starget) = rport->field; \ |
569 | else if (i->f->get_starget_##field) \ | 571 | else if (i->f->get_starget_##field) \ |
570 | i->f->get_starget_##field(starget); \ | 572 | i->f->get_starget_##field(starget); \ |
571 | return snprintf(buf, sz, format_string, \ | 573 | return snprintf(buf, sz, format_string, \ |
572 | cast fc_starget_##field(starget)); \ | 574 | cast fc_starget_##field(starget)); \ |
573 | } | 575 | } |
574 | 576 | ||
575 | #define fc_starget_rd_attr(field, format_string, sz) \ | 577 | #define fc_starget_rd_attr(field, format_string, sz) \ |
576 | fc_starget_show_function(field, format_string, sz, ) \ | 578 | fc_starget_show_function(field, format_string, sz, ) \ |
577 | static FC_CLASS_DEVICE_ATTR(starget, field, S_IRUGO, \ | 579 | static FC_CLASS_DEVICE_ATTR(starget, field, S_IRUGO, \ |
578 | show_fc_starget_##field, NULL) | 580 | show_fc_starget_##field, NULL) |
579 | 581 | ||
580 | #define fc_starget_rd_attr_cast(field, format_string, sz, cast) \ | 582 | #define fc_starget_rd_attr_cast(field, format_string, sz, cast) \ |
581 | fc_starget_show_function(field, format_string, sz, (cast)) \ | 583 | fc_starget_show_function(field, format_string, sz, (cast)) \ |
582 | static FC_CLASS_DEVICE_ATTR(starget, field, S_IRUGO, \ | 584 | static FC_CLASS_DEVICE_ATTR(starget, field, S_IRUGO, \ |
583 | show_fc_starget_##field, NULL) | 585 | show_fc_starget_##field, NULL) |
584 | 586 | ||
585 | #define SETUP_STARGET_ATTRIBUTE_RD(field) \ | 587 | #define SETUP_STARGET_ATTRIBUTE_RD(field) \ |
586 | i->private_starget_attrs[count] = class_device_attr_starget_##field; \ | 588 | i->private_starget_attrs[count] = class_device_attr_starget_##field; \ |
587 | i->private_starget_attrs[count].attr.mode = S_IRUGO; \ | 589 | i->private_starget_attrs[count].attr.mode = S_IRUGO; \ |
588 | i->private_starget_attrs[count].store = NULL; \ | 590 | i->private_starget_attrs[count].store = NULL; \ |
589 | i->starget_attrs[count] = &i->private_starget_attrs[count]; \ | 591 | i->starget_attrs[count] = &i->private_starget_attrs[count]; \ |
590 | if (i->f->show_starget_##field) \ | 592 | if (i->f->show_starget_##field) \ |
591 | count++ | 593 | count++ |
592 | 594 | ||
593 | #define SETUP_STARGET_ATTRIBUTE_RW(field) \ | 595 | #define SETUP_STARGET_ATTRIBUTE_RW(field) \ |
594 | i->private_starget_attrs[count] = class_device_attr_starget_##field; \ | 596 | i->private_starget_attrs[count] = class_device_attr_starget_##field; \ |
595 | if (!i->f->set_starget_##field) { \ | 597 | if (!i->f->set_starget_##field) { \ |
596 | i->private_starget_attrs[count].attr.mode = S_IRUGO; \ | 598 | i->private_starget_attrs[count].attr.mode = S_IRUGO; \ |
597 | i->private_starget_attrs[count].store = NULL; \ | 599 | i->private_starget_attrs[count].store = NULL; \ |
598 | } \ | 600 | } \ |
599 | i->starget_attrs[count] = &i->private_starget_attrs[count]; \ | 601 | i->starget_attrs[count] = &i->private_starget_attrs[count]; \ |
600 | if (i->f->show_starget_##field) \ | 602 | if (i->f->show_starget_##field) \ |
601 | count++ | 603 | count++ |
602 | 604 | ||
603 | /* The FC Transport SCSI Target Attributes: */ | 605 | /* The FC Transport SCSI Target Attributes: */ |
604 | fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); | 606 | fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); |
605 | fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); | 607 | fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); |
606 | fc_starget_rd_attr(port_id, "0x%06x\n", 20); | 608 | fc_starget_rd_attr(port_id, "0x%06x\n", 20); |
607 | 609 | ||
608 | 610 | ||
609 | /* | 611 | /* |
610 | * Host Attribute Management | 612 | * Host Attribute Management |
611 | */ | 613 | */ |
612 | 614 | ||
613 | #define fc_host_show_function(field, format_string, sz, cast) \ | 615 | #define fc_host_show_function(field, format_string, sz, cast) \ |
614 | static ssize_t \ | 616 | static ssize_t \ |
615 | show_fc_host_##field (struct class_device *cdev, char *buf) \ | 617 | show_fc_host_##field (struct class_device *cdev, char *buf) \ |
616 | { \ | 618 | { \ |
617 | struct Scsi_Host *shost = transport_class_to_shost(cdev); \ | 619 | struct Scsi_Host *shost = transport_class_to_shost(cdev); \ |
618 | struct fc_internal *i = to_fc_internal(shost->transportt); \ | 620 | struct fc_internal *i = to_fc_internal(shost->transportt); \ |
619 | if (i->f->get_host_##field) \ | 621 | if (i->f->get_host_##field) \ |
620 | i->f->get_host_##field(shost); \ | 622 | i->f->get_host_##field(shost); \ |
621 | return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \ | 623 | return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \ |
622 | } | 624 | } |
623 | 625 | ||
624 | #define fc_host_store_function(field) \ | 626 | #define fc_host_store_function(field) \ |
625 | static ssize_t \ | 627 | static ssize_t \ |
626 | store_fc_host_##field(struct class_device *cdev, const char *buf, \ | 628 | store_fc_host_##field(struct class_device *cdev, const char *buf, \ |
627 | size_t count) \ | 629 | size_t count) \ |
628 | { \ | 630 | { \ |
629 | int val; \ | 631 | int val; \ |
630 | struct Scsi_Host *shost = transport_class_to_shost(cdev); \ | 632 | struct Scsi_Host *shost = transport_class_to_shost(cdev); \ |
631 | struct fc_internal *i = to_fc_internal(shost->transportt); \ | 633 | struct fc_internal *i = to_fc_internal(shost->transportt); \ |
632 | \ | 634 | \ |
633 | val = simple_strtoul(buf, NULL, 0); \ | 635 | val = simple_strtoul(buf, NULL, 0); \ |
634 | i->f->set_host_##field(shost, val); \ | 636 | i->f->set_host_##field(shost, val); \ |
635 | return count; \ | 637 | return count; \ |
636 | } | 638 | } |
637 | 639 | ||
638 | #define fc_host_rd_attr(field, format_string, sz) \ | 640 | #define fc_host_rd_attr(field, format_string, sz) \ |
639 | fc_host_show_function(field, format_string, sz, ) \ | 641 | fc_host_show_function(field, format_string, sz, ) \ |
640 | static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ | 642 | static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ |
641 | show_fc_host_##field, NULL) | 643 | show_fc_host_##field, NULL) |
642 | 644 | ||
643 | #define fc_host_rd_attr_cast(field, format_string, sz, cast) \ | 645 | #define fc_host_rd_attr_cast(field, format_string, sz, cast) \ |
644 | fc_host_show_function(field, format_string, sz, (cast)) \ | 646 | fc_host_show_function(field, format_string, sz, (cast)) \ |
645 | static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ | 647 | static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ |
646 | show_fc_host_##field, NULL) | 648 | show_fc_host_##field, NULL) |
647 | 649 | ||
648 | #define fc_host_rw_attr(field, format_string, sz) \ | 650 | #define fc_host_rw_attr(field, format_string, sz) \ |
649 | fc_host_show_function(field, format_string, sz, ) \ | 651 | fc_host_show_function(field, format_string, sz, ) \ |
650 | fc_host_store_function(field) \ | 652 | fc_host_store_function(field) \ |
651 | static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR, \ | 653 | static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR, \ |
652 | show_fc_host_##field, \ | 654 | show_fc_host_##field, \ |
653 | store_fc_host_##field) | 655 | store_fc_host_##field) |
654 | 656 | ||
655 | #define fc_host_rd_enum_attr(title, maxlen) \ | 657 | #define fc_host_rd_enum_attr(title, maxlen) \ |
656 | static ssize_t \ | 658 | static ssize_t \ |
657 | show_fc_host_##title (struct class_device *cdev, char *buf) \ | 659 | show_fc_host_##title (struct class_device *cdev, char *buf) \ |
658 | { \ | 660 | { \ |
659 | struct Scsi_Host *shost = transport_class_to_shost(cdev); \ | 661 | struct Scsi_Host *shost = transport_class_to_shost(cdev); \ |
660 | struct fc_internal *i = to_fc_internal(shost->transportt); \ | 662 | struct fc_internal *i = to_fc_internal(shost->transportt); \ |
661 | const char *name; \ | 663 | const char *name; \ |
662 | if (i->f->get_host_##title) \ | 664 | if (i->f->get_host_##title) \ |
663 | i->f->get_host_##title(shost); \ | 665 | i->f->get_host_##title(shost); \ |
664 | name = get_fc_##title##_name(fc_host_##title(shost)); \ | 666 | name = get_fc_##title##_name(fc_host_##title(shost)); \ |
665 | if (!name) \ | 667 | if (!name) \ |
666 | return -EINVAL; \ | 668 | return -EINVAL; \ |
667 | return snprintf(buf, maxlen, "%s\n", name); \ | 669 | return snprintf(buf, maxlen, "%s\n", name); \ |
668 | } \ | 670 | } \ |
669 | static FC_CLASS_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL) | 671 | static FC_CLASS_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL) |
670 | 672 | ||
671 | #define SETUP_HOST_ATTRIBUTE_RD(field) \ | 673 | #define SETUP_HOST_ATTRIBUTE_RD(field) \ |
672 | i->private_host_attrs[count] = class_device_attr_host_##field; \ | 674 | i->private_host_attrs[count] = class_device_attr_host_##field; \ |
673 | i->private_host_attrs[count].attr.mode = S_IRUGO; \ | 675 | i->private_host_attrs[count].attr.mode = S_IRUGO; \ |
674 | i->private_host_attrs[count].store = NULL; \ | 676 | i->private_host_attrs[count].store = NULL; \ |
675 | i->host_attrs[count] = &i->private_host_attrs[count]; \ | 677 | i->host_attrs[count] = &i->private_host_attrs[count]; \ |
676 | if (i->f->show_host_##field) \ | 678 | if (i->f->show_host_##field) \ |
677 | count++ | 679 | count++ |
678 | 680 | ||
679 | #define SETUP_HOST_ATTRIBUTE_RW(field) \ | 681 | #define SETUP_HOST_ATTRIBUTE_RW(field) \ |
680 | i->private_host_attrs[count] = class_device_attr_host_##field; \ | 682 | i->private_host_attrs[count] = class_device_attr_host_##field; \ |
681 | if (!i->f->set_host_##field) { \ | 683 | if (!i->f->set_host_##field) { \ |
682 | i->private_host_attrs[count].attr.mode = S_IRUGO; \ | 684 | i->private_host_attrs[count].attr.mode = S_IRUGO; \ |
683 | i->private_host_attrs[count].store = NULL; \ | 685 | i->private_host_attrs[count].store = NULL; \ |
684 | } \ | 686 | } \ |
685 | i->host_attrs[count] = &i->private_host_attrs[count]; \ | 687 | i->host_attrs[count] = &i->private_host_attrs[count]; \ |
686 | if (i->f->show_host_##field) \ | 688 | if (i->f->show_host_##field) \ |
687 | count++ | 689 | count++ |
688 | 690 | ||
689 | 691 | ||
690 | #define fc_private_host_show_function(field, format_string, sz, cast) \ | 692 | #define fc_private_host_show_function(field, format_string, sz, cast) \ |
691 | static ssize_t \ | 693 | static ssize_t \ |
692 | show_fc_host_##field (struct class_device *cdev, char *buf) \ | 694 | show_fc_host_##field (struct class_device *cdev, char *buf) \ |
693 | { \ | 695 | { \ |
694 | struct Scsi_Host *shost = transport_class_to_shost(cdev); \ | 696 | struct Scsi_Host *shost = transport_class_to_shost(cdev); \ |
695 | return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \ | 697 | return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \ |
696 | } | 698 | } |
697 | 699 | ||
698 | #define fc_private_host_rd_attr(field, format_string, sz) \ | 700 | #define fc_private_host_rd_attr(field, format_string, sz) \ |
699 | fc_private_host_show_function(field, format_string, sz, ) \ | 701 | fc_private_host_show_function(field, format_string, sz, ) \ |
700 | static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ | 702 | static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ |
701 | show_fc_host_##field, NULL) | 703 | show_fc_host_##field, NULL) |
702 | 704 | ||
703 | #define fc_private_host_rd_attr_cast(field, format_string, sz, cast) \ | 705 | #define fc_private_host_rd_attr_cast(field, format_string, sz, cast) \ |
704 | fc_private_host_show_function(field, format_string, sz, (cast)) \ | 706 | fc_private_host_show_function(field, format_string, sz, (cast)) \ |
705 | static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ | 707 | static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ |
706 | show_fc_host_##field, NULL) | 708 | show_fc_host_##field, NULL) |
707 | 709 | ||
708 | #define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field) \ | 710 | #define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field) \ |
709 | i->private_host_attrs[count] = class_device_attr_host_##field; \ | 711 | i->private_host_attrs[count] = class_device_attr_host_##field; \ |
710 | i->private_host_attrs[count].attr.mode = S_IRUGO; \ | 712 | i->private_host_attrs[count].attr.mode = S_IRUGO; \ |
711 | i->private_host_attrs[count].store = NULL; \ | 713 | i->private_host_attrs[count].store = NULL; \ |
712 | i->host_attrs[count] = &i->private_host_attrs[count]; \ | 714 | i->host_attrs[count] = &i->private_host_attrs[count]; \ |
713 | count++ | 715 | count++ |
714 | 716 | ||
715 | #define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field) \ | 717 | #define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field) \ |
716 | i->private_host_attrs[count] = class_device_attr_host_##field; \ | 718 | i->private_host_attrs[count] = class_device_attr_host_##field; \ |
717 | i->host_attrs[count] = &i->private_host_attrs[count]; \ | 719 | i->host_attrs[count] = &i->private_host_attrs[count]; \ |
718 | count++ | 720 | count++ |
719 | 721 | ||
720 | 722 | ||
721 | /* Fixed Host Attributes */ | 723 | /* Fixed Host Attributes */ |
722 | 724 | ||
723 | static ssize_t | 725 | static ssize_t |
724 | show_fc_host_supported_classes (struct class_device *cdev, char *buf) | 726 | show_fc_host_supported_classes (struct class_device *cdev, char *buf) |
725 | { | 727 | { |
726 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 728 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
727 | 729 | ||
728 | if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED) | 730 | if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED) |
729 | return snprintf(buf, 20, "unspecified\n"); | 731 | return snprintf(buf, 20, "unspecified\n"); |
730 | 732 | ||
731 | return get_fc_cos_names(fc_host_supported_classes(shost), buf); | 733 | return get_fc_cos_names(fc_host_supported_classes(shost), buf); |
732 | } | 734 | } |
733 | static FC_CLASS_DEVICE_ATTR(host, supported_classes, S_IRUGO, | 735 | static FC_CLASS_DEVICE_ATTR(host, supported_classes, S_IRUGO, |
734 | show_fc_host_supported_classes, NULL); | 736 | show_fc_host_supported_classes, NULL); |
735 | 737 | ||
736 | static ssize_t | 738 | static ssize_t |
737 | show_fc_host_supported_fc4s (struct class_device *cdev, char *buf) | 739 | show_fc_host_supported_fc4s (struct class_device *cdev, char *buf) |
738 | { | 740 | { |
739 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 741 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
740 | return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost)); | 742 | return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost)); |
741 | } | 743 | } |
742 | static FC_CLASS_DEVICE_ATTR(host, supported_fc4s, S_IRUGO, | 744 | static FC_CLASS_DEVICE_ATTR(host, supported_fc4s, S_IRUGO, |
743 | show_fc_host_supported_fc4s, NULL); | 745 | show_fc_host_supported_fc4s, NULL); |
744 | 746 | ||
745 | static ssize_t | 747 | static ssize_t |
746 | show_fc_host_supported_speeds (struct class_device *cdev, char *buf) | 748 | show_fc_host_supported_speeds (struct class_device *cdev, char *buf) |
747 | { | 749 | { |
748 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 750 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
749 | 751 | ||
750 | if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN) | 752 | if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN) |
751 | return snprintf(buf, 20, "unknown\n"); | 753 | return snprintf(buf, 20, "unknown\n"); |
752 | 754 | ||
753 | return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf); | 755 | return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf); |
754 | } | 756 | } |
755 | static FC_CLASS_DEVICE_ATTR(host, supported_speeds, S_IRUGO, | 757 | static FC_CLASS_DEVICE_ATTR(host, supported_speeds, S_IRUGO, |
756 | show_fc_host_supported_speeds, NULL); | 758 | show_fc_host_supported_speeds, NULL); |
757 | 759 | ||
758 | 760 | ||
759 | fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); | 761 | fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); |
760 | fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); | 762 | fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); |
761 | fc_private_host_rd_attr(symbolic_name, "%s\n", (FC_SYMBOLIC_NAME_SIZE +1)); | 763 | fc_private_host_rd_attr(symbolic_name, "%s\n", (FC_SYMBOLIC_NAME_SIZE +1)); |
762 | fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); | 764 | fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); |
763 | fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); | 765 | fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); |
764 | 766 | ||
765 | 767 | ||
766 | /* Dynamic Host Attributes */ | 768 | /* Dynamic Host Attributes */ |
767 | 769 | ||
768 | static ssize_t | 770 | static ssize_t |
769 | show_fc_host_active_fc4s (struct class_device *cdev, char *buf) | 771 | show_fc_host_active_fc4s (struct class_device *cdev, char *buf) |
770 | { | 772 | { |
771 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 773 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
772 | struct fc_internal *i = to_fc_internal(shost->transportt); | 774 | struct fc_internal *i = to_fc_internal(shost->transportt); |
773 | 775 | ||
774 | if (i->f->get_host_active_fc4s) | 776 | if (i->f->get_host_active_fc4s) |
775 | i->f->get_host_active_fc4s(shost); | 777 | i->f->get_host_active_fc4s(shost); |
776 | 778 | ||
777 | return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost)); | 779 | return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost)); |
778 | } | 780 | } |
779 | static FC_CLASS_DEVICE_ATTR(host, active_fc4s, S_IRUGO, | 781 | static FC_CLASS_DEVICE_ATTR(host, active_fc4s, S_IRUGO, |
780 | show_fc_host_active_fc4s, NULL); | 782 | show_fc_host_active_fc4s, NULL); |
781 | 783 | ||
782 | static ssize_t | 784 | static ssize_t |
783 | show_fc_host_speed (struct class_device *cdev, char *buf) | 785 | show_fc_host_speed (struct class_device *cdev, char *buf) |
784 | { | 786 | { |
785 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 787 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
786 | struct fc_internal *i = to_fc_internal(shost->transportt); | 788 | struct fc_internal *i = to_fc_internal(shost->transportt); |
787 | 789 | ||
788 | if (i->f->get_host_speed) | 790 | if (i->f->get_host_speed) |
789 | i->f->get_host_speed(shost); | 791 | i->f->get_host_speed(shost); |
790 | 792 | ||
791 | if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN) | 793 | if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN) |
792 | return snprintf(buf, 20, "unknown\n"); | 794 | return snprintf(buf, 20, "unknown\n"); |
793 | 795 | ||
794 | return get_fc_port_speed_names(fc_host_speed(shost), buf); | 796 | return get_fc_port_speed_names(fc_host_speed(shost), buf); |
795 | } | 797 | } |
796 | static FC_CLASS_DEVICE_ATTR(host, speed, S_IRUGO, | 798 | static FC_CLASS_DEVICE_ATTR(host, speed, S_IRUGO, |
797 | show_fc_host_speed, NULL); | 799 | show_fc_host_speed, NULL); |
798 | 800 | ||
799 | 801 | ||
800 | fc_host_rd_attr(port_id, "0x%06x\n", 20); | 802 | fc_host_rd_attr(port_id, "0x%06x\n", 20); |
801 | fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN); | 803 | fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN); |
802 | fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); | 804 | fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); |
803 | fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); | 805 | fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); |
804 | 806 | ||
805 | 807 | ||
806 | /* Private Host Attributes */ | 808 | /* Private Host Attributes */ |
807 | 809 | ||
808 | static ssize_t | 810 | static ssize_t |
809 | show_fc_private_host_tgtid_bind_type(struct class_device *cdev, char *buf) | 811 | show_fc_private_host_tgtid_bind_type(struct class_device *cdev, char *buf) |
810 | { | 812 | { |
811 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 813 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
812 | const char *name; | 814 | const char *name; |
813 | 815 | ||
814 | name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost)); | 816 | name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost)); |
815 | if (!name) | 817 | if (!name) |
816 | return -EINVAL; | 818 | return -EINVAL; |
817 | return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name); | 819 | return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name); |
818 | } | 820 | } |
819 | 821 | ||
820 | static ssize_t | 822 | static ssize_t |
821 | store_fc_private_host_tgtid_bind_type(struct class_device *cdev, | 823 | store_fc_private_host_tgtid_bind_type(struct class_device *cdev, |
822 | const char *buf, size_t count) | 824 | const char *buf, size_t count) |
823 | { | 825 | { |
824 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 826 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
825 | struct fc_rport *rport, *next_rport; | 827 | struct fc_rport *rport, *next_rport; |
826 | enum fc_tgtid_binding_type val; | 828 | enum fc_tgtid_binding_type val; |
827 | unsigned long flags; | 829 | unsigned long flags; |
828 | 830 | ||
829 | if (get_fc_tgtid_bind_type_match(buf, &val)) | 831 | if (get_fc_tgtid_bind_type_match(buf, &val)) |
830 | return -EINVAL; | 832 | return -EINVAL; |
831 | 833 | ||
832 | /* if changing bind type, purge all unused consistent bindings */ | 834 | /* if changing bind type, purge all unused consistent bindings */ |
833 | if (val != fc_host_tgtid_bind_type(shost)) { | 835 | if (val != fc_host_tgtid_bind_type(shost)) { |
834 | spin_lock_irqsave(shost->host_lock, flags); | 836 | spin_lock_irqsave(shost->host_lock, flags); |
835 | list_for_each_entry_safe(rport, next_rport, | 837 | list_for_each_entry_safe(rport, next_rport, |
836 | &fc_host_rport_bindings(shost), peers) | 838 | &fc_host_rport_bindings(shost), peers) |
837 | fc_rport_terminate(rport); | 839 | fc_rport_terminate(rport); |
838 | spin_unlock_irqrestore(shost->host_lock, flags); | 840 | spin_unlock_irqrestore(shost->host_lock, flags); |
839 | } | 841 | } |
840 | 842 | ||
841 | fc_host_tgtid_bind_type(shost) = val; | 843 | fc_host_tgtid_bind_type(shost) = val; |
842 | return count; | 844 | return count; |
843 | } | 845 | } |
844 | 846 | ||
845 | static FC_CLASS_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR, | 847 | static FC_CLASS_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR, |
846 | show_fc_private_host_tgtid_bind_type, | 848 | show_fc_private_host_tgtid_bind_type, |
847 | store_fc_private_host_tgtid_bind_type); | 849 | store_fc_private_host_tgtid_bind_type); |
848 | 850 | ||
849 | /* | 851 | /* |
850 | * Host Statistics Management | 852 | * Host Statistics Management |
851 | */ | 853 | */ |
852 | 854 | ||
853 | /* Show a given an attribute in the statistics group */ | 855 | /* Show a given an attribute in the statistics group */ |
854 | static ssize_t | 856 | static ssize_t |
855 | fc_stat_show(const struct class_device *cdev, char *buf, unsigned long offset) | 857 | fc_stat_show(const struct class_device *cdev, char *buf, unsigned long offset) |
856 | { | 858 | { |
857 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 859 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
858 | struct fc_internal *i = to_fc_internal(shost->transportt); | 860 | struct fc_internal *i = to_fc_internal(shost->transportt); |
859 | struct fc_host_statistics *stats; | 861 | struct fc_host_statistics *stats; |
860 | ssize_t ret = -ENOENT; | 862 | ssize_t ret = -ENOENT; |
861 | 863 | ||
862 | if (offset > sizeof(struct fc_host_statistics) || | 864 | if (offset > sizeof(struct fc_host_statistics) || |
863 | offset % sizeof(u64) != 0) | 865 | offset % sizeof(u64) != 0) |
864 | WARN_ON(1); | 866 | WARN_ON(1); |
865 | 867 | ||
866 | if (i->f->get_fc_host_stats) { | 868 | if (i->f->get_fc_host_stats) { |
867 | stats = (i->f->get_fc_host_stats)(shost); | 869 | stats = (i->f->get_fc_host_stats)(shost); |
868 | if (stats) | 870 | if (stats) |
869 | ret = snprintf(buf, 20, "0x%llx\n", | 871 | ret = snprintf(buf, 20, "0x%llx\n", |
870 | (unsigned long long)*(u64 *)(((u8 *) stats) + offset)); | 872 | (unsigned long long)*(u64 *)(((u8 *) stats) + offset)); |
871 | } | 873 | } |
872 | return ret; | 874 | return ret; |
873 | } | 875 | } |
874 | 876 | ||
875 | 877 | ||
876 | /* generate a read-only statistics attribute */ | 878 | /* generate a read-only statistics attribute */ |
877 | #define fc_host_statistic(name) \ | 879 | #define fc_host_statistic(name) \ |
878 | static ssize_t show_fcstat_##name(struct class_device *cd, char *buf) \ | 880 | static ssize_t show_fcstat_##name(struct class_device *cd, char *buf) \ |
879 | { \ | 881 | { \ |
880 | return fc_stat_show(cd, buf, \ | 882 | return fc_stat_show(cd, buf, \ |
881 | offsetof(struct fc_host_statistics, name)); \ | 883 | offsetof(struct fc_host_statistics, name)); \ |
882 | } \ | 884 | } \ |
883 | static FC_CLASS_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL) | 885 | static FC_CLASS_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL) |
884 | 886 | ||
885 | fc_host_statistic(seconds_since_last_reset); | 887 | fc_host_statistic(seconds_since_last_reset); |
886 | fc_host_statistic(tx_frames); | 888 | fc_host_statistic(tx_frames); |
887 | fc_host_statistic(tx_words); | 889 | fc_host_statistic(tx_words); |
888 | fc_host_statistic(rx_frames); | 890 | fc_host_statistic(rx_frames); |
889 | fc_host_statistic(rx_words); | 891 | fc_host_statistic(rx_words); |
890 | fc_host_statistic(lip_count); | 892 | fc_host_statistic(lip_count); |
891 | fc_host_statistic(nos_count); | 893 | fc_host_statistic(nos_count); |
892 | fc_host_statistic(error_frames); | 894 | fc_host_statistic(error_frames); |
893 | fc_host_statistic(dumped_frames); | 895 | fc_host_statistic(dumped_frames); |
894 | fc_host_statistic(link_failure_count); | 896 | fc_host_statistic(link_failure_count); |
895 | fc_host_statistic(loss_of_sync_count); | 897 | fc_host_statistic(loss_of_sync_count); |
896 | fc_host_statistic(loss_of_signal_count); | 898 | fc_host_statistic(loss_of_signal_count); |
897 | fc_host_statistic(prim_seq_protocol_err_count); | 899 | fc_host_statistic(prim_seq_protocol_err_count); |
898 | fc_host_statistic(invalid_tx_word_count); | 900 | fc_host_statistic(invalid_tx_word_count); |
899 | fc_host_statistic(invalid_crc_count); | 901 | fc_host_statistic(invalid_crc_count); |
900 | fc_host_statistic(fcp_input_requests); | 902 | fc_host_statistic(fcp_input_requests); |
901 | fc_host_statistic(fcp_output_requests); | 903 | fc_host_statistic(fcp_output_requests); |
902 | fc_host_statistic(fcp_control_requests); | 904 | fc_host_statistic(fcp_control_requests); |
903 | fc_host_statistic(fcp_input_megabytes); | 905 | fc_host_statistic(fcp_input_megabytes); |
904 | fc_host_statistic(fcp_output_megabytes); | 906 | fc_host_statistic(fcp_output_megabytes); |
905 | 907 | ||
906 | static ssize_t | 908 | static ssize_t |
907 | fc_reset_statistics(struct class_device *cdev, const char *buf, | 909 | fc_reset_statistics(struct class_device *cdev, const char *buf, |
908 | size_t count) | 910 | size_t count) |
909 | { | 911 | { |
910 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 912 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
911 | struct fc_internal *i = to_fc_internal(shost->transportt); | 913 | struct fc_internal *i = to_fc_internal(shost->transportt); |
912 | 914 | ||
913 | /* ignore any data value written to the attribute */ | 915 | /* ignore any data value written to the attribute */ |
914 | if (i->f->reset_fc_host_stats) { | 916 | if (i->f->reset_fc_host_stats) { |
915 | i->f->reset_fc_host_stats(shost); | 917 | i->f->reset_fc_host_stats(shost); |
916 | return count; | 918 | return count; |
917 | } | 919 | } |
918 | 920 | ||
919 | return -ENOENT; | 921 | return -ENOENT; |
920 | } | 922 | } |
921 | static FC_CLASS_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL, | 923 | static FC_CLASS_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL, |
922 | fc_reset_statistics); | 924 | fc_reset_statistics); |
923 | 925 | ||
924 | 926 | ||
925 | static struct attribute *fc_statistics_attrs[] = { | 927 | static struct attribute *fc_statistics_attrs[] = { |
926 | &class_device_attr_host_seconds_since_last_reset.attr, | 928 | &class_device_attr_host_seconds_since_last_reset.attr, |
927 | &class_device_attr_host_tx_frames.attr, | 929 | &class_device_attr_host_tx_frames.attr, |
928 | &class_device_attr_host_tx_words.attr, | 930 | &class_device_attr_host_tx_words.attr, |
929 | &class_device_attr_host_rx_frames.attr, | 931 | &class_device_attr_host_rx_frames.attr, |
930 | &class_device_attr_host_rx_words.attr, | 932 | &class_device_attr_host_rx_words.attr, |
931 | &class_device_attr_host_lip_count.attr, | 933 | &class_device_attr_host_lip_count.attr, |
932 | &class_device_attr_host_nos_count.attr, | 934 | &class_device_attr_host_nos_count.attr, |
933 | &class_device_attr_host_error_frames.attr, | 935 | &class_device_attr_host_error_frames.attr, |
934 | &class_device_attr_host_dumped_frames.attr, | 936 | &class_device_attr_host_dumped_frames.attr, |
935 | &class_device_attr_host_link_failure_count.attr, | 937 | &class_device_attr_host_link_failure_count.attr, |
936 | &class_device_attr_host_loss_of_sync_count.attr, | 938 | &class_device_attr_host_loss_of_sync_count.attr, |
937 | &class_device_attr_host_loss_of_signal_count.attr, | 939 | &class_device_attr_host_loss_of_signal_count.attr, |
938 | &class_device_attr_host_prim_seq_protocol_err_count.attr, | 940 | &class_device_attr_host_prim_seq_protocol_err_count.attr, |
939 | &class_device_attr_host_invalid_tx_word_count.attr, | 941 | &class_device_attr_host_invalid_tx_word_count.attr, |
940 | &class_device_attr_host_invalid_crc_count.attr, | 942 | &class_device_attr_host_invalid_crc_count.attr, |
941 | &class_device_attr_host_fcp_input_requests.attr, | 943 | &class_device_attr_host_fcp_input_requests.attr, |
942 | &class_device_attr_host_fcp_output_requests.attr, | 944 | &class_device_attr_host_fcp_output_requests.attr, |
943 | &class_device_attr_host_fcp_control_requests.attr, | 945 | &class_device_attr_host_fcp_control_requests.attr, |
944 | &class_device_attr_host_fcp_input_megabytes.attr, | 946 | &class_device_attr_host_fcp_input_megabytes.attr, |
945 | &class_device_attr_host_fcp_output_megabytes.attr, | 947 | &class_device_attr_host_fcp_output_megabytes.attr, |
946 | &class_device_attr_host_reset_statistics.attr, | 948 | &class_device_attr_host_reset_statistics.attr, |
947 | NULL | 949 | NULL |
948 | }; | 950 | }; |
949 | 951 | ||
950 | static struct attribute_group fc_statistics_group = { | 952 | static struct attribute_group fc_statistics_group = { |
951 | .name = "statistics", | 953 | .name = "statistics", |
952 | .attrs = fc_statistics_attrs, | 954 | .attrs = fc_statistics_attrs, |
953 | }; | 955 | }; |
954 | 956 | ||
955 | static int fc_host_match(struct attribute_container *cont, | 957 | static int fc_host_match(struct attribute_container *cont, |
956 | struct device *dev) | 958 | struct device *dev) |
957 | { | 959 | { |
958 | struct Scsi_Host *shost; | 960 | struct Scsi_Host *shost; |
959 | struct fc_internal *i; | 961 | struct fc_internal *i; |
960 | 962 | ||
961 | if (!scsi_is_host_device(dev)) | 963 | if (!scsi_is_host_device(dev)) |
962 | return 0; | 964 | return 0; |
963 | 965 | ||
964 | shost = dev_to_shost(dev); | 966 | shost = dev_to_shost(dev); |
965 | if (!shost->transportt || shost->transportt->host_attrs.ac.class | 967 | if (!shost->transportt || shost->transportt->host_attrs.ac.class |
966 | != &fc_host_class.class) | 968 | != &fc_host_class.class) |
967 | return 0; | 969 | return 0; |
968 | 970 | ||
969 | i = to_fc_internal(shost->transportt); | 971 | i = to_fc_internal(shost->transportt); |
970 | 972 | ||
971 | return &i->t.host_attrs.ac == cont; | 973 | return &i->t.host_attrs.ac == cont; |
972 | } | 974 | } |
973 | 975 | ||
974 | static int fc_target_match(struct attribute_container *cont, | 976 | static int fc_target_match(struct attribute_container *cont, |
975 | struct device *dev) | 977 | struct device *dev) |
976 | { | 978 | { |
977 | struct Scsi_Host *shost; | 979 | struct Scsi_Host *shost; |
978 | struct fc_internal *i; | 980 | struct fc_internal *i; |
979 | 981 | ||
980 | if (!scsi_is_target_device(dev)) | 982 | if (!scsi_is_target_device(dev)) |
981 | return 0; | 983 | return 0; |
982 | 984 | ||
983 | shost = dev_to_shost(dev->parent); | 985 | shost = dev_to_shost(dev->parent); |
984 | if (!shost->transportt || shost->transportt->host_attrs.ac.class | 986 | if (!shost->transportt || shost->transportt->host_attrs.ac.class |
985 | != &fc_host_class.class) | 987 | != &fc_host_class.class) |
986 | return 0; | 988 | return 0; |
987 | 989 | ||
988 | i = to_fc_internal(shost->transportt); | 990 | i = to_fc_internal(shost->transportt); |
989 | 991 | ||
990 | return &i->t.target_attrs.ac == cont; | 992 | return &i->t.target_attrs.ac == cont; |
991 | } | 993 | } |
992 | 994 | ||
993 | static void fc_rport_dev_release(struct device *dev) | 995 | static void fc_rport_dev_release(struct device *dev) |
994 | { | 996 | { |
995 | struct fc_rport *rport = dev_to_rport(dev); | 997 | struct fc_rport *rport = dev_to_rport(dev); |
996 | put_device(dev->parent); | 998 | put_device(dev->parent); |
997 | kfree(rport); | 999 | kfree(rport); |
998 | } | 1000 | } |
999 | 1001 | ||
1000 | int scsi_is_fc_rport(const struct device *dev) | 1002 | int scsi_is_fc_rport(const struct device *dev) |
1001 | { | 1003 | { |
1002 | return dev->release == fc_rport_dev_release; | 1004 | return dev->release == fc_rport_dev_release; |
1003 | } | 1005 | } |
1004 | EXPORT_SYMBOL(scsi_is_fc_rport); | 1006 | EXPORT_SYMBOL(scsi_is_fc_rport); |
1005 | 1007 | ||
1006 | static int fc_rport_match(struct attribute_container *cont, | 1008 | static int fc_rport_match(struct attribute_container *cont, |
1007 | struct device *dev) | 1009 | struct device *dev) |
1008 | { | 1010 | { |
1009 | struct Scsi_Host *shost; | 1011 | struct Scsi_Host *shost; |
1010 | struct fc_internal *i; | 1012 | struct fc_internal *i; |
1011 | 1013 | ||
1012 | if (!scsi_is_fc_rport(dev)) | 1014 | if (!scsi_is_fc_rport(dev)) |
1013 | return 0; | 1015 | return 0; |
1014 | 1016 | ||
1015 | shost = dev_to_shost(dev->parent); | 1017 | shost = dev_to_shost(dev->parent); |
1016 | if (!shost->transportt || shost->transportt->host_attrs.ac.class | 1018 | if (!shost->transportt || shost->transportt->host_attrs.ac.class |
1017 | != &fc_host_class.class) | 1019 | != &fc_host_class.class) |
1018 | return 0; | 1020 | return 0; |
1019 | 1021 | ||
1020 | i = to_fc_internal(shost->transportt); | 1022 | i = to_fc_internal(shost->transportt); |
1021 | 1023 | ||
1022 | return &i->rport_attr_cont.ac == cont; | 1024 | return &i->rport_attr_cont.ac == cont; |
1023 | } | 1025 | } |
1024 | 1026 | ||
1025 | struct scsi_transport_template * | 1027 | struct scsi_transport_template * |
1026 | fc_attach_transport(struct fc_function_template *ft) | 1028 | fc_attach_transport(struct fc_function_template *ft) |
1027 | { | 1029 | { |
1028 | struct fc_internal *i = kmalloc(sizeof(struct fc_internal), | 1030 | struct fc_internal *i = kmalloc(sizeof(struct fc_internal), |
1029 | GFP_KERNEL); | 1031 | GFP_KERNEL); |
1030 | int count; | 1032 | int count; |
1031 | 1033 | ||
1032 | if (unlikely(!i)) | 1034 | if (unlikely(!i)) |
1033 | return NULL; | 1035 | return NULL; |
1034 | 1036 | ||
1035 | memset(i, 0, sizeof(struct fc_internal)); | 1037 | memset(i, 0, sizeof(struct fc_internal)); |
1036 | 1038 | ||
1037 | i->t.target_attrs.ac.attrs = &i->starget_attrs[0]; | 1039 | i->t.target_attrs.ac.attrs = &i->starget_attrs[0]; |
1038 | i->t.target_attrs.ac.class = &fc_transport_class.class; | 1040 | i->t.target_attrs.ac.class = &fc_transport_class.class; |
1039 | i->t.target_attrs.ac.match = fc_target_match; | 1041 | i->t.target_attrs.ac.match = fc_target_match; |
1040 | i->t.target_size = sizeof(struct fc_starget_attrs); | 1042 | i->t.target_size = sizeof(struct fc_starget_attrs); |
1041 | transport_container_register(&i->t.target_attrs); | 1043 | transport_container_register(&i->t.target_attrs); |
1042 | 1044 | ||
1043 | i->t.host_attrs.ac.attrs = &i->host_attrs[0]; | 1045 | i->t.host_attrs.ac.attrs = &i->host_attrs[0]; |
1044 | i->t.host_attrs.ac.class = &fc_host_class.class; | 1046 | i->t.host_attrs.ac.class = &fc_host_class.class; |
1045 | i->t.host_attrs.ac.match = fc_host_match; | 1047 | i->t.host_attrs.ac.match = fc_host_match; |
1046 | i->t.host_size = sizeof(struct fc_host_attrs); | 1048 | i->t.host_size = sizeof(struct fc_host_attrs); |
1047 | if (ft->get_fc_host_stats) | 1049 | if (ft->get_fc_host_stats) |
1048 | i->t.host_attrs.statistics = &fc_statistics_group; | 1050 | i->t.host_attrs.statistics = &fc_statistics_group; |
1049 | transport_container_register(&i->t.host_attrs); | 1051 | transport_container_register(&i->t.host_attrs); |
1050 | 1052 | ||
1051 | i->rport_attr_cont.ac.attrs = &i->rport_attrs[0]; | 1053 | i->rport_attr_cont.ac.attrs = &i->rport_attrs[0]; |
1052 | i->rport_attr_cont.ac.class = &fc_rport_class.class; | 1054 | i->rport_attr_cont.ac.class = &fc_rport_class.class; |
1053 | i->rport_attr_cont.ac.match = fc_rport_match; | 1055 | i->rport_attr_cont.ac.match = fc_rport_match; |
1054 | transport_container_register(&i->rport_attr_cont); | 1056 | transport_container_register(&i->rport_attr_cont); |
1055 | 1057 | ||
1056 | i->f = ft; | 1058 | i->f = ft; |
1057 | 1059 | ||
1058 | /* Transport uses the shost workq for scsi scanning */ | 1060 | /* Transport uses the shost workq for scsi scanning */ |
1059 | i->t.create_work_queue = 1; | 1061 | i->t.create_work_queue = 1; |
1060 | 1062 | ||
1061 | /* | 1063 | /* |
1062 | * Setup SCSI Target Attributes. | 1064 | * Setup SCSI Target Attributes. |
1063 | */ | 1065 | */ |
1064 | count = 0; | 1066 | count = 0; |
1065 | SETUP_STARGET_ATTRIBUTE_RD(node_name); | 1067 | SETUP_STARGET_ATTRIBUTE_RD(node_name); |
1066 | SETUP_STARGET_ATTRIBUTE_RD(port_name); | 1068 | SETUP_STARGET_ATTRIBUTE_RD(port_name); |
1067 | SETUP_STARGET_ATTRIBUTE_RD(port_id); | 1069 | SETUP_STARGET_ATTRIBUTE_RD(port_id); |
1068 | 1070 | ||
1069 | BUG_ON(count > FC_STARGET_NUM_ATTRS); | 1071 | BUG_ON(count > FC_STARGET_NUM_ATTRS); |
1070 | 1072 | ||
1071 | i->starget_attrs[count] = NULL; | 1073 | i->starget_attrs[count] = NULL; |
1072 | 1074 | ||
1073 | 1075 | ||
1074 | /* | 1076 | /* |
1075 | * Setup SCSI Host Attributes. | 1077 | * Setup SCSI Host Attributes. |
1076 | */ | 1078 | */ |
1077 | count=0; | 1079 | count=0; |
1078 | SETUP_HOST_ATTRIBUTE_RD(node_name); | 1080 | SETUP_HOST_ATTRIBUTE_RD(node_name); |
1079 | SETUP_HOST_ATTRIBUTE_RD(port_name); | 1081 | SETUP_HOST_ATTRIBUTE_RD(port_name); |
1080 | SETUP_HOST_ATTRIBUTE_RD(supported_classes); | 1082 | SETUP_HOST_ATTRIBUTE_RD(supported_classes); |
1081 | SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); | 1083 | SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); |
1082 | SETUP_HOST_ATTRIBUTE_RD(symbolic_name); | 1084 | SETUP_HOST_ATTRIBUTE_RD(symbolic_name); |
1083 | SETUP_HOST_ATTRIBUTE_RD(supported_speeds); | 1085 | SETUP_HOST_ATTRIBUTE_RD(supported_speeds); |
1084 | SETUP_HOST_ATTRIBUTE_RD(maxframe_size); | 1086 | SETUP_HOST_ATTRIBUTE_RD(maxframe_size); |
1085 | SETUP_HOST_ATTRIBUTE_RD(serial_number); | 1087 | SETUP_HOST_ATTRIBUTE_RD(serial_number); |
1086 | 1088 | ||
1087 | SETUP_HOST_ATTRIBUTE_RD(port_id); | 1089 | SETUP_HOST_ATTRIBUTE_RD(port_id); |
1088 | SETUP_HOST_ATTRIBUTE_RD(port_type); | 1090 | SETUP_HOST_ATTRIBUTE_RD(port_type); |
1089 | SETUP_HOST_ATTRIBUTE_RD(port_state); | 1091 | SETUP_HOST_ATTRIBUTE_RD(port_state); |
1090 | SETUP_HOST_ATTRIBUTE_RD(active_fc4s); | 1092 | SETUP_HOST_ATTRIBUTE_RD(active_fc4s); |
1091 | SETUP_HOST_ATTRIBUTE_RD(speed); | 1093 | SETUP_HOST_ATTRIBUTE_RD(speed); |
1092 | SETUP_HOST_ATTRIBUTE_RD(fabric_name); | 1094 | SETUP_HOST_ATTRIBUTE_RD(fabric_name); |
1093 | 1095 | ||
1094 | /* Transport-managed attributes */ | 1096 | /* Transport-managed attributes */ |
1095 | SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); | 1097 | SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); |
1096 | 1098 | ||
1097 | BUG_ON(count > FC_HOST_NUM_ATTRS); | 1099 | BUG_ON(count > FC_HOST_NUM_ATTRS); |
1098 | 1100 | ||
1099 | i->host_attrs[count] = NULL; | 1101 | i->host_attrs[count] = NULL; |
1100 | 1102 | ||
1101 | /* | 1103 | /* |
1102 | * Setup Remote Port Attributes. | 1104 | * Setup Remote Port Attributes. |
1103 | */ | 1105 | */ |
1104 | count=0; | 1106 | count=0; |
1105 | SETUP_RPORT_ATTRIBUTE_RD(maxframe_size); | 1107 | SETUP_RPORT_ATTRIBUTE_RD(maxframe_size); |
1106 | SETUP_RPORT_ATTRIBUTE_RD(supported_classes); | 1108 | SETUP_RPORT_ATTRIBUTE_RD(supported_classes); |
1107 | SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo); | 1109 | SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo); |
1108 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name); | 1110 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name); |
1109 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name); | 1111 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name); |
1110 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id); | 1112 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id); |
1111 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); | 1113 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); |
1112 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state); | 1114 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state); |
1113 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); | 1115 | SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); |
1114 | 1116 | ||
1115 | BUG_ON(count > FC_RPORT_NUM_ATTRS); | 1117 | BUG_ON(count > FC_RPORT_NUM_ATTRS); |
1116 | 1118 | ||
1117 | i->rport_attrs[count] = NULL; | 1119 | i->rport_attrs[count] = NULL; |
1118 | 1120 | ||
1119 | return &i->t; | 1121 | return &i->t; |
1120 | } | 1122 | } |
1121 | EXPORT_SYMBOL(fc_attach_transport); | 1123 | EXPORT_SYMBOL(fc_attach_transport); |
1122 | 1124 | ||
1123 | void fc_release_transport(struct scsi_transport_template *t) | 1125 | void fc_release_transport(struct scsi_transport_template *t) |
1124 | { | 1126 | { |
1125 | struct fc_internal *i = to_fc_internal(t); | 1127 | struct fc_internal *i = to_fc_internal(t); |
1126 | 1128 | ||
1127 | transport_container_unregister(&i->t.target_attrs); | 1129 | transport_container_unregister(&i->t.target_attrs); |
1128 | transport_container_unregister(&i->t.host_attrs); | 1130 | transport_container_unregister(&i->t.host_attrs); |
1129 | transport_container_unregister(&i->rport_attr_cont); | 1131 | transport_container_unregister(&i->rport_attr_cont); |
1130 | 1132 | ||
1131 | kfree(i); | 1133 | kfree(i); |
1132 | } | 1134 | } |
1133 | EXPORT_SYMBOL(fc_release_transport); | 1135 | EXPORT_SYMBOL(fc_release_transport); |
1134 | 1136 | ||
1135 | 1137 | ||
1136 | /** | 1138 | /** |
1137 | * fc_remove_host - called to terminate any fc_transport-related elements | 1139 | * fc_remove_host - called to terminate any fc_transport-related elements |
1138 | * for a scsi host. | 1140 | * for a scsi host. |
1139 | * @rport: remote port to be unblocked. | 1141 | * @rport: remote port to be unblocked. |
1140 | * | 1142 | * |
1141 | * This routine is expected to be called immediately preceeding the | 1143 | * This routine is expected to be called immediately preceeding the |
1142 | * a driver's call to scsi_remove_host(). | 1144 | * a driver's call to scsi_remove_host(). |
1143 | * | 1145 | * |
1144 | * WARNING: A driver utilizing the fc_transport, which fails to call | 1146 | * WARNING: A driver utilizing the fc_transport, which fails to call |
1145 | * this routine prior to scsi_remote_host(), will leave dangling | 1147 | * this routine prior to scsi_remote_host(), will leave dangling |
1146 | * objects in /sys/class/fc_remote_ports. Access to any of these | 1148 | * objects in /sys/class/fc_remote_ports. Access to any of these |
1147 | * objects can result in a system crash !!! | 1149 | * objects can result in a system crash !!! |
1148 | * | 1150 | * |
1149 | * Notes: | 1151 | * Notes: |
1150 | * This routine assumes no locks are held on entry. | 1152 | * This routine assumes no locks are held on entry. |
1151 | **/ | 1153 | **/ |
1152 | void | 1154 | void |
1153 | fc_remove_host(struct Scsi_Host *shost) | 1155 | fc_remove_host(struct Scsi_Host *shost) |
1154 | { | 1156 | { |
1155 | struct fc_rport *rport, *next_rport; | 1157 | struct fc_rport *rport, *next_rport; |
1156 | 1158 | ||
1157 | /* Remove any remote ports */ | 1159 | /* Remove any remote ports */ |
1158 | list_for_each_entry_safe(rport, next_rport, | 1160 | list_for_each_entry_safe(rport, next_rport, |
1159 | &fc_host_rports(shost), peers) | 1161 | &fc_host_rports(shost), peers) |
1160 | fc_rport_terminate(rport); | 1162 | fc_rport_terminate(rport); |
1161 | list_for_each_entry_safe(rport, next_rport, | 1163 | list_for_each_entry_safe(rport, next_rport, |
1162 | &fc_host_rport_bindings(shost), peers) | 1164 | &fc_host_rport_bindings(shost), peers) |
1163 | fc_rport_terminate(rport); | 1165 | fc_rport_terminate(rport); |
1164 | } | 1166 | } |
1165 | EXPORT_SYMBOL(fc_remove_host); | 1167 | EXPORT_SYMBOL(fc_remove_host); |
1166 | 1168 | ||
1167 | /** | 1169 | /** |
1168 | * fc_rport_create - allocates and creates a remote FC port. | 1170 | * fc_rport_create - allocates and creates a remote FC port. |
1169 | * @shost: scsi host the remote port is connected to. | 1171 | * @shost: scsi host the remote port is connected to. |
1170 | * @channel: Channel on shost port connected to. | 1172 | * @channel: Channel on shost port connected to. |
1171 | * @ids: The world wide names, fc address, and FC4 port | 1173 | * @ids: The world wide names, fc address, and FC4 port |
1172 | * roles for the remote port. | 1174 | * roles for the remote port. |
1173 | * | 1175 | * |
1174 | * Allocates and creates the remoter port structure, including the | 1176 | * Allocates and creates the remoter port structure, including the |
1175 | * class and sysfs creation. | 1177 | * class and sysfs creation. |
1176 | * | 1178 | * |
1177 | * Notes: | 1179 | * Notes: |
1178 | * This routine assumes no locks are held on entry. | 1180 | * This routine assumes no locks are held on entry. |
1179 | **/ | 1181 | **/ |
1180 | struct fc_rport * | 1182 | struct fc_rport * |
1181 | fc_rport_create(struct Scsi_Host *shost, int channel, | 1183 | fc_rport_create(struct Scsi_Host *shost, int channel, |
1182 | struct fc_rport_identifiers *ids) | 1184 | struct fc_rport_identifiers *ids) |
1183 | { | 1185 | { |
1184 | struct fc_host_attrs *fc_host = | 1186 | struct fc_host_attrs *fc_host = |
1185 | (struct fc_host_attrs *)shost->shost_data; | 1187 | (struct fc_host_attrs *)shost->shost_data; |
1186 | struct fc_internal *fci = to_fc_internal(shost->transportt); | 1188 | struct fc_internal *fci = to_fc_internal(shost->transportt); |
1187 | struct fc_rport *rport; | 1189 | struct fc_rport *rport; |
1188 | struct device *dev; | 1190 | struct device *dev; |
1189 | unsigned long flags; | 1191 | unsigned long flags; |
1190 | int error; | 1192 | int error; |
1191 | size_t size; | 1193 | size_t size; |
1192 | 1194 | ||
1193 | size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size); | 1195 | size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size); |
1194 | rport = kmalloc(size, GFP_KERNEL); | 1196 | rport = kmalloc(size, GFP_KERNEL); |
1195 | if (unlikely(!rport)) { | 1197 | if (unlikely(!rport)) { |
1196 | printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); | 1198 | printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); |
1197 | return NULL; | 1199 | return NULL; |
1198 | } | 1200 | } |
1199 | memset(rport, 0, size); | 1201 | memset(rport, 0, size); |
1200 | 1202 | ||
1201 | rport->maxframe_size = -1; | 1203 | rport->maxframe_size = -1; |
1202 | rport->supported_classes = FC_COS_UNSPECIFIED; | 1204 | rport->supported_classes = FC_COS_UNSPECIFIED; |
1203 | rport->dev_loss_tmo = fc_dev_loss_tmo; | 1205 | rport->dev_loss_tmo = fc_dev_loss_tmo; |
1204 | memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); | 1206 | memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); |
1205 | memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); | 1207 | memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); |
1206 | rport->port_id = ids->port_id; | 1208 | rport->port_id = ids->port_id; |
1207 | rport->roles = ids->roles; | 1209 | rport->roles = ids->roles; |
1208 | rport->port_state = FC_PORTSTATE_ONLINE; | 1210 | rport->port_state = FC_PORTSTATE_ONLINE; |
1209 | if (fci->f->dd_fcrport_size) | 1211 | if (fci->f->dd_fcrport_size) |
1210 | rport->dd_data = &rport[1]; | 1212 | rport->dd_data = &rport[1]; |
1211 | rport->channel = channel; | 1213 | rport->channel = channel; |
1212 | 1214 | ||
1213 | INIT_WORK(&rport->dev_loss_work, fc_timeout_blocked_rport, rport); | 1215 | INIT_WORK(&rport->dev_loss_work, fc_timeout_blocked_rport, rport); |
1214 | INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); | 1216 | INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); |
1215 | 1217 | ||
1216 | spin_lock_irqsave(shost->host_lock, flags); | 1218 | spin_lock_irqsave(shost->host_lock, flags); |
1217 | 1219 | ||
1218 | rport->number = fc_host->next_rport_number++; | 1220 | rport->number = fc_host->next_rport_number++; |
1219 | if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) | 1221 | if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) |
1220 | rport->scsi_target_id = fc_host->next_target_id++; | 1222 | rport->scsi_target_id = fc_host->next_target_id++; |
1221 | else | 1223 | else |
1222 | rport->scsi_target_id = -1; | 1224 | rport->scsi_target_id = -1; |
1223 | list_add_tail(&rport->peers, &fc_host_rports(shost)); | 1225 | list_add_tail(&rport->peers, &fc_host_rports(shost)); |
1224 | get_device(&shost->shost_gendev); | 1226 | get_device(&shost->shost_gendev); |
1225 | 1227 | ||
1226 | spin_unlock_irqrestore(shost->host_lock, flags); | 1228 | spin_unlock_irqrestore(shost->host_lock, flags); |
1227 | 1229 | ||
1228 | dev = &rport->dev; | 1230 | dev = &rport->dev; |
1229 | device_initialize(dev); | 1231 | device_initialize(dev); |
1230 | dev->parent = get_device(&shost->shost_gendev); | 1232 | dev->parent = get_device(&shost->shost_gendev); |
1231 | dev->release = fc_rport_dev_release; | 1233 | dev->release = fc_rport_dev_release; |
1232 | sprintf(dev->bus_id, "rport-%d:%d-%d", | 1234 | sprintf(dev->bus_id, "rport-%d:%d-%d", |
1233 | shost->host_no, channel, rport->number); | 1235 | shost->host_no, channel, rport->number); |
1234 | transport_setup_device(dev); | 1236 | transport_setup_device(dev); |
1235 | 1237 | ||
1236 | error = device_add(dev); | 1238 | error = device_add(dev); |
1237 | if (error) { | 1239 | if (error) { |
1238 | printk(KERN_ERR "FC Remote Port device_add failed\n"); | 1240 | printk(KERN_ERR "FC Remote Port device_add failed\n"); |
1239 | goto delete_rport; | 1241 | goto delete_rport; |
1240 | } | 1242 | } |
1241 | transport_add_device(dev); | 1243 | transport_add_device(dev); |
1242 | transport_configure_device(dev); | 1244 | transport_configure_device(dev); |
1243 | 1245 | ||
1244 | if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) | 1246 | if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) |
1245 | /* initiate a scan of the target */ | 1247 | /* initiate a scan of the target */ |
1246 | scsi_queue_work(shost, &rport->scan_work); | 1248 | scsi_queue_work(shost, &rport->scan_work); |
1247 | 1249 | ||
1248 | return rport; | 1250 | return rport; |
1249 | 1251 | ||
1250 | delete_rport: | 1252 | delete_rport: |
1251 | transport_destroy_device(dev); | 1253 | transport_destroy_device(dev); |
1252 | put_device(dev->parent); | 1254 | put_device(dev->parent); |
1253 | spin_lock_irqsave(shost->host_lock, flags); | 1255 | spin_lock_irqsave(shost->host_lock, flags); |
1254 | list_del(&rport->peers); | 1256 | list_del(&rport->peers); |
1255 | put_device(&shost->shost_gendev); | 1257 | put_device(&shost->shost_gendev); |
1256 | spin_unlock_irqrestore(shost->host_lock, flags); | 1258 | spin_unlock_irqrestore(shost->host_lock, flags); |
1257 | put_device(dev->parent); | 1259 | put_device(dev->parent); |
1258 | kfree(rport); | 1260 | kfree(rport); |
1259 | return NULL; | 1261 | return NULL; |
1260 | } | 1262 | } |
1261 | 1263 | ||
1262 | /** | 1264 | /** |
1263 | * fc_remote_port_add - notifies the fc transport of the existence | 1265 | * fc_remote_port_add - notifies the fc transport of the existence |
1264 | * of a remote FC port. | 1266 | * of a remote FC port. |
1265 | * @shost: scsi host the remote port is connected to. | 1267 | * @shost: scsi host the remote port is connected to. |
1266 | * @channel: Channel on shost port connected to. | 1268 | * @channel: Channel on shost port connected to. |
1267 | * @ids: The world wide names, fc address, and FC4 port | 1269 | * @ids: The world wide names, fc address, and FC4 port |
1268 | * roles for the remote port. | 1270 | * roles for the remote port. |
1269 | * | 1271 | * |
1270 | * The LLDD calls this routine to notify the transport of the existence | 1272 | * The LLDD calls this routine to notify the transport of the existence |
1271 | * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn) | 1273 | * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn) |
1272 | * of the port, it's FC address (port_id), and the FC4 roles that are | 1274 | * of the port, it's FC address (port_id), and the FC4 roles that are |
1273 | * active for the port. | 1275 | * active for the port. |
1274 | * | 1276 | * |
1275 | * For ports that are FCP targets (aka scsi targets), the FC transport | 1277 | * For ports that are FCP targets (aka scsi targets), the FC transport |
1276 | * maintains consistent target id bindings on behalf of the LLDD. | 1278 | * maintains consistent target id bindings on behalf of the LLDD. |
1277 | * A consistent target id binding is an assignment of a target id to | 1279 | * A consistent target id binding is an assignment of a target id to |
1278 | * a remote port identifier, which persists while the scsi host is | 1280 | * a remote port identifier, which persists while the scsi host is |
1279 | * attached. The remote port can disappear, then later reappear, and | 1281 | * attached. The remote port can disappear, then later reappear, and |
1280 | * it's target id assignment remains the same. This allows for shifts | 1282 | * it's target id assignment remains the same. This allows for shifts |
1281 | * in FC addressing (if binding by wwpn or wwnn) with no apparent | 1283 | * in FC addressing (if binding by wwpn or wwnn) with no apparent |
1282 | * changes to the scsi subsystem which is based on scsi host number and | 1284 | * changes to the scsi subsystem which is based on scsi host number and |
1283 | * target id values. Bindings are only valid during the attachment of | 1285 | * target id values. Bindings are only valid during the attachment of |
1284 | * the scsi host. If the host detaches, then later re-attaches, target | 1286 | * the scsi host. If the host detaches, then later re-attaches, target |
1285 | * id bindings may change. | 1287 | * id bindings may change. |
1286 | * | 1288 | * |
1287 | * This routine is responsible for returning a remote port structure. | 1289 | * This routine is responsible for returning a remote port structure. |
1288 | * The routine will search the list of remote ports it maintains | 1290 | * The routine will search the list of remote ports it maintains |
1289 | * internally on behalf of consistent target id mappings. If found, the | 1291 | * internally on behalf of consistent target id mappings. If found, the |
1290 | * remote port structure will be reused. Otherwise, a new remote port | 1292 | * remote port structure will be reused. Otherwise, a new remote port |
1291 | * structure will be allocated. | 1293 | * structure will be allocated. |
1292 | * | 1294 | * |
1293 | * Whenever a remote port is allocated, a new fc_remote_port class | 1295 | * Whenever a remote port is allocated, a new fc_remote_port class |
1294 | * device is created. | 1296 | * device is created. |
1295 | * | 1297 | * |
1296 | * Should not be called from interrupt context. | 1298 | * Should not be called from interrupt context. |
1297 | * | 1299 | * |
1298 | * Notes: | 1300 | * Notes: |
1299 | * This routine assumes no locks are held on entry. | 1301 | * This routine assumes no locks are held on entry. |
1300 | **/ | 1302 | **/ |
1301 | struct fc_rport * | 1303 | struct fc_rport * |
1302 | fc_remote_port_add(struct Scsi_Host *shost, int channel, | 1304 | fc_remote_port_add(struct Scsi_Host *shost, int channel, |
1303 | struct fc_rport_identifiers *ids) | 1305 | struct fc_rport_identifiers *ids) |
1304 | { | 1306 | { |
1305 | struct fc_rport *rport; | 1307 | struct fc_rport *rport; |
1306 | unsigned long flags; | 1308 | unsigned long flags; |
1307 | int match = 0; | 1309 | int match = 0; |
1308 | 1310 | ||
1309 | if (likely((ids->roles & FC_RPORT_ROLE_FCP_TARGET) && | 1311 | if (likely((ids->roles & FC_RPORT_ROLE_FCP_TARGET) && |
1310 | (fc_host_tgtid_bind_type(shost) != FC_TGTID_BIND_NONE))) { | 1312 | (fc_host_tgtid_bind_type(shost) != FC_TGTID_BIND_NONE))) { |
1311 | 1313 | ||
1312 | /* search for a matching consistent binding */ | 1314 | /* search for a matching consistent binding */ |
1313 | 1315 | ||
1314 | spin_lock_irqsave(shost->host_lock, flags); | 1316 | spin_lock_irqsave(shost->host_lock, flags); |
1315 | 1317 | ||
1316 | list_for_each_entry(rport, &fc_host_rport_bindings(shost), | 1318 | list_for_each_entry(rport, &fc_host_rport_bindings(shost), |
1317 | peers) { | 1319 | peers) { |
1318 | if (rport->channel != channel) | 1320 | if (rport->channel != channel) |
1319 | continue; | 1321 | continue; |
1320 | 1322 | ||
1321 | switch (fc_host_tgtid_bind_type(shost)) { | 1323 | switch (fc_host_tgtid_bind_type(shost)) { |
1322 | case FC_TGTID_BIND_BY_WWPN: | 1324 | case FC_TGTID_BIND_BY_WWPN: |
1323 | if (rport->port_name == ids->port_name) | 1325 | if (rport->port_name == ids->port_name) |
1324 | match = 1; | 1326 | match = 1; |
1325 | break; | 1327 | break; |
1326 | case FC_TGTID_BIND_BY_WWNN: | 1328 | case FC_TGTID_BIND_BY_WWNN: |
1327 | if (rport->node_name == ids->node_name) | 1329 | if (rport->node_name == ids->node_name) |
1328 | match = 1; | 1330 | match = 1; |
1329 | break; | 1331 | break; |
1330 | case FC_TGTID_BIND_BY_ID: | 1332 | case FC_TGTID_BIND_BY_ID: |
1331 | if (rport->port_id == ids->port_id) | 1333 | if (rport->port_id == ids->port_id) |
1332 | match = 1; | 1334 | match = 1; |
1333 | break; | 1335 | break; |
1334 | case FC_TGTID_BIND_NONE: /* to keep compiler happy */ | 1336 | case FC_TGTID_BIND_NONE: /* to keep compiler happy */ |
1335 | break; | 1337 | break; |
1336 | } | 1338 | } |
1337 | 1339 | ||
1338 | if (match) { | 1340 | if (match) { |
1339 | list_move_tail(&rport->peers, | 1341 | list_move_tail(&rport->peers, |
1340 | &fc_host_rports(shost)); | 1342 | &fc_host_rports(shost)); |
1341 | break; | 1343 | break; |
1342 | } | 1344 | } |
1343 | } | 1345 | } |
1344 | 1346 | ||
1345 | spin_unlock_irqrestore(shost->host_lock, flags); | 1347 | spin_unlock_irqrestore(shost->host_lock, flags); |
1346 | 1348 | ||
1347 | if (match) { | 1349 | if (match) { |
1348 | memcpy(&rport->node_name, &ids->node_name, | 1350 | memcpy(&rport->node_name, &ids->node_name, |
1349 | sizeof(rport->node_name)); | 1351 | sizeof(rport->node_name)); |
1350 | memcpy(&rport->port_name, &ids->port_name, | 1352 | memcpy(&rport->port_name, &ids->port_name, |
1351 | sizeof(rport->port_name)); | 1353 | sizeof(rport->port_name)); |
1352 | rport->port_id = ids->port_id; | 1354 | rport->port_id = ids->port_id; |
1353 | rport->roles = ids->roles; | 1355 | rport->roles = ids->roles; |
1354 | rport->port_state = FC_PORTSTATE_ONLINE; | 1356 | rport->port_state = FC_PORTSTATE_ONLINE; |
1355 | 1357 | ||
1356 | if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) | 1358 | if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) |
1357 | /* initiate a scan of the target */ | 1359 | /* initiate a scan of the target */ |
1358 | scsi_queue_work(shost, &rport->scan_work); | 1360 | scsi_queue_work(shost, &rport->scan_work); |
1359 | 1361 | ||
1360 | return rport; | 1362 | return rport; |
1361 | } | 1363 | } |
1362 | } | 1364 | } |
1363 | 1365 | ||
1364 | /* No consistent binding found - create new remote port entry */ | 1366 | /* No consistent binding found - create new remote port entry */ |
1365 | rport = fc_rport_create(shost, channel, ids); | 1367 | rport = fc_rport_create(shost, channel, ids); |
1366 | 1368 | ||
1367 | return rport; | 1369 | return rport; |
1368 | } | 1370 | } |
1369 | EXPORT_SYMBOL(fc_remote_port_add); | 1371 | EXPORT_SYMBOL(fc_remote_port_add); |
1370 | 1372 | ||
1371 | /* | 1373 | /* |
1372 | * fc_rport_tgt_remove - Removes the scsi target on the remote port | 1374 | * fc_rport_tgt_remove - Removes the scsi target on the remote port |
1373 | * @rport: The remote port to be operated on | 1375 | * @rport: The remote port to be operated on |
1374 | */ | 1376 | */ |
1375 | static void | 1377 | static void |
1376 | fc_rport_tgt_remove(struct fc_rport *rport) | 1378 | fc_rport_tgt_remove(struct fc_rport *rport) |
1377 | { | 1379 | { |
1378 | struct Scsi_Host *shost = rport_to_shost(rport); | 1380 | struct Scsi_Host *shost = rport_to_shost(rport); |
1379 | 1381 | ||
1380 | scsi_target_unblock(&rport->dev); | 1382 | scsi_target_unblock(&rport->dev); |
1381 | 1383 | ||
1382 | /* Stop anything on the workq */ | 1384 | /* Stop anything on the workq */ |
1383 | if (!cancel_delayed_work(&rport->dev_loss_work)) | 1385 | if (!cancel_delayed_work(&rport->dev_loss_work)) |
1384 | flush_scheduled_work(); | 1386 | flush_scheduled_work(); |
1385 | scsi_flush_work(shost); | 1387 | scsi_flush_work(shost); |
1386 | 1388 | ||
1387 | scsi_remove_target(&rport->dev); | 1389 | scsi_remove_target(&rport->dev); |
1388 | } | 1390 | } |
1389 | 1391 | ||
1390 | /* | 1392 | /* |
1391 | * fc_rport_terminate - this routine tears down and deallocates a remote port. | 1393 | * fc_rport_terminate - this routine tears down and deallocates a remote port. |
1392 | * @rport: The remote port to be terminated | 1394 | * @rport: The remote port to be terminated |
1393 | * | 1395 | * |
1394 | * Notes: | 1396 | * Notes: |
1395 | * This routine assumes no locks are held on entry. | 1397 | * This routine assumes no locks are held on entry. |
1396 | */ | 1398 | */ |
1397 | static void | 1399 | static void |
1398 | fc_rport_terminate(struct fc_rport *rport) | 1400 | fc_rport_terminate(struct fc_rport *rport) |
1399 | { | 1401 | { |
1400 | struct Scsi_Host *shost = rport_to_shost(rport); | 1402 | struct Scsi_Host *shost = rport_to_shost(rport); |
1401 | struct device *dev = &rport->dev; | 1403 | struct device *dev = &rport->dev; |
1402 | unsigned long flags; | 1404 | unsigned long flags; |
1403 | 1405 | ||
1404 | fc_rport_tgt_remove(rport); | 1406 | fc_rport_tgt_remove(rport); |
1405 | 1407 | ||
1406 | transport_remove_device(dev); | 1408 | transport_remove_device(dev); |
1407 | device_del(dev); | 1409 | device_del(dev); |
1408 | transport_destroy_device(dev); | 1410 | transport_destroy_device(dev); |
1409 | spin_lock_irqsave(shost->host_lock, flags); | 1411 | spin_lock_irqsave(shost->host_lock, flags); |
1410 | list_del(&rport->peers); | 1412 | list_del(&rport->peers); |
1411 | spin_unlock_irqrestore(shost->host_lock, flags); | 1413 | spin_unlock_irqrestore(shost->host_lock, flags); |
1412 | put_device(&shost->shost_gendev); | 1414 | put_device(&shost->shost_gendev); |
1413 | } | 1415 | } |
1414 | 1416 | ||
1415 | /** | 1417 | /** |
1416 | * fc_remote_port_delete - notifies the fc transport that a remote | 1418 | * fc_remote_port_delete - notifies the fc transport that a remote |
1417 | * port is no longer in existence. | 1419 | * port is no longer in existence. |
1418 | * @rport: The remote port that no longer exists | 1420 | * @rport: The remote port that no longer exists |
1419 | * | 1421 | * |
1420 | * The LLDD calls this routine to notify the transport that a remote | 1422 | * The LLDD calls this routine to notify the transport that a remote |
1421 | * port is no longer part of the topology. Note: Although a port | 1423 | * port is no longer part of the topology. Note: Although a port |
1422 | * may no longer be part of the topology, it may persist in the remote | 1424 | * may no longer be part of the topology, it may persist in the remote |
1423 | * ports displayed by the fc_host. This is done so that target id | 1425 | * ports displayed by the fc_host. This is done so that target id |
1424 | * mappings (managed via the remote port structures), are always visible | 1426 | * mappings (managed via the remote port structures), are always visible |
1425 | * as long as the mapping is valid, regardless of port state, | 1427 | * as long as the mapping is valid, regardless of port state, |
1426 | * | 1428 | * |
1427 | * If the remote port is not an FCP Target, it will be fully torn down | 1429 | * If the remote port is not an FCP Target, it will be fully torn down |
1428 | * and deallocated, including the fc_remote_port class device. | 1430 | * and deallocated, including the fc_remote_port class device. |
1429 | * | 1431 | * |
1430 | * If the remote port is an FCP Target, the port structure will be | 1432 | * If the remote port is an FCP Target, the port structure will be |
1431 | * marked as Not Present, but will remain as long as there is a valid | 1433 | * marked as Not Present, but will remain as long as there is a valid |
1432 | * SCSI target id mapping associated with the port structure. Validity | 1434 | * SCSI target id mapping associated with the port structure. Validity |
1433 | * is determined by the binding type. If binding by wwpn, then the port | 1435 | * is determined by the binding type. If binding by wwpn, then the port |
1434 | * structure is always valid and will not be deallocated until the host | 1436 | * structure is always valid and will not be deallocated until the host |
1435 | * is removed. If binding by wwnn, then the port structure is valid | 1437 | * is removed. If binding by wwnn, then the port structure is valid |
1436 | * until another port with the same node name is found in the topology. | 1438 | * until another port with the same node name is found in the topology. |
1437 | * If binding by port id (fc address), then the port structure is valid | 1439 | * If binding by port id (fc address), then the port structure is valid |
1438 | * valid until another port with the same address is identified. | 1440 | * valid until another port with the same address is identified. |
1439 | * | 1441 | * |
1440 | * Called from interrupt or normal process context. | 1442 | * Called from interrupt or normal process context. |
1441 | * | 1443 | * |
1442 | * Notes: | 1444 | * Notes: |
1443 | * This routine assumes no locks are held on entry. | 1445 | * This routine assumes no locks are held on entry. |
1444 | **/ | 1446 | **/ |
1445 | void | 1447 | void |
1446 | fc_remote_port_delete(struct fc_rport *rport) | 1448 | fc_remote_port_delete(struct fc_rport *rport) |
1447 | { | 1449 | { |
1448 | struct Scsi_Host *shost = rport_to_shost(rport); | 1450 | struct Scsi_Host *shost = rport_to_shost(rport); |
1449 | unsigned long flags; | 1451 | unsigned long flags; |
1450 | 1452 | ||
1451 | /* If no scsi target id mapping or consistent binding type, delete it */ | 1453 | /* If no scsi target id mapping or consistent binding type, delete it */ |
1452 | if ((rport->scsi_target_id == -1) || | 1454 | if ((rport->scsi_target_id == -1) || |
1453 | (fc_host_tgtid_bind_type(shost) == FC_TGTID_BIND_NONE)) { | 1455 | (fc_host_tgtid_bind_type(shost) == FC_TGTID_BIND_NONE)) { |
1454 | fc_rport_terminate(rport); | 1456 | fc_rport_terminate(rport); |
1455 | return; | 1457 | return; |
1456 | } | 1458 | } |
1457 | 1459 | ||
1458 | fc_rport_tgt_remove(rport); | 1460 | fc_rport_tgt_remove(rport); |
1459 | 1461 | ||
1460 | spin_lock_irqsave(shost->host_lock, flags); | 1462 | spin_lock_irqsave(shost->host_lock, flags); |
1461 | list_move_tail(&rport->peers, &fc_host_rport_bindings(shost)); | 1463 | list_move_tail(&rport->peers, &fc_host_rport_bindings(shost)); |
1462 | spin_unlock_irqrestore(shost->host_lock, flags); | 1464 | spin_unlock_irqrestore(shost->host_lock, flags); |
1463 | 1465 | ||
1464 | /* | 1466 | /* |
1465 | * Note: We do not remove or clear the hostdata area. This allows | 1467 | * Note: We do not remove or clear the hostdata area. This allows |
1466 | * host-specific target data to persist along with the | 1468 | * host-specific target data to persist along with the |
1467 | * scsi_target_id. It's up to the host to manage it's hostdata area. | 1469 | * scsi_target_id. It's up to the host to manage it's hostdata area. |
1468 | */ | 1470 | */ |
1469 | 1471 | ||
1470 | /* | 1472 | /* |
1471 | * Reinitialize port attributes that may change if the port comes back. | 1473 | * Reinitialize port attributes that may change if the port comes back. |
1472 | */ | 1474 | */ |
1473 | rport->maxframe_size = -1; | 1475 | rport->maxframe_size = -1; |
1474 | rport->supported_classes = FC_COS_UNSPECIFIED; | 1476 | rport->supported_classes = FC_COS_UNSPECIFIED; |
1475 | rport->roles = FC_RPORT_ROLE_UNKNOWN; | 1477 | rport->roles = FC_RPORT_ROLE_UNKNOWN; |
1476 | rport->port_state = FC_PORTSTATE_NOTPRESENT; | 1478 | rport->port_state = FC_PORTSTATE_NOTPRESENT; |
1477 | 1479 | ||
1478 | /* remove the identifiers that aren't used in the consisting binding */ | 1480 | /* remove the identifiers that aren't used in the consisting binding */ |
1479 | switch (fc_host_tgtid_bind_type(shost)) { | 1481 | switch (fc_host_tgtid_bind_type(shost)) { |
1480 | case FC_TGTID_BIND_BY_WWPN: | 1482 | case FC_TGTID_BIND_BY_WWPN: |
1481 | rport->node_name = -1; | 1483 | rport->node_name = -1; |
1482 | rport->port_id = -1; | 1484 | rport->port_id = -1; |
1483 | break; | 1485 | break; |
1484 | case FC_TGTID_BIND_BY_WWNN: | 1486 | case FC_TGTID_BIND_BY_WWNN: |
1485 | rport->port_name = -1; | 1487 | rport->port_name = -1; |
1486 | rport->port_id = -1; | 1488 | rport->port_id = -1; |
1487 | break; | 1489 | break; |
1488 | case FC_TGTID_BIND_BY_ID: | 1490 | case FC_TGTID_BIND_BY_ID: |
1489 | rport->node_name = -1; | 1491 | rport->node_name = -1; |
1490 | rport->port_name = -1; | 1492 | rport->port_name = -1; |
1491 | break; | 1493 | break; |
1492 | case FC_TGTID_BIND_NONE: /* to keep compiler happy */ | 1494 | case FC_TGTID_BIND_NONE: /* to keep compiler happy */ |
1493 | break; | 1495 | break; |
1494 | } | 1496 | } |
1495 | } | 1497 | } |
1496 | EXPORT_SYMBOL(fc_remote_port_delete); | 1498 | EXPORT_SYMBOL(fc_remote_port_delete); |
1497 | 1499 | ||
1498 | /** | 1500 | /** |
1499 | * fc_remote_port_rolechg - notifies the fc transport that the roles | 1501 | * fc_remote_port_rolechg - notifies the fc transport that the roles |
1500 | * on a remote may have changed. | 1502 | * on a remote may have changed. |
1501 | * @rport: The remote port that changed. | 1503 | * @rport: The remote port that changed. |
1502 | * | 1504 | * |
1503 | * The LLDD calls this routine to notify the transport that the roles | 1505 | * The LLDD calls this routine to notify the transport that the roles |
1504 | * on a remote port may have changed. The largest effect of this is | 1506 | * on a remote port may have changed. The largest effect of this is |
1505 | * if a port now becomes a FCP Target, it must be allocated a | 1507 | * if a port now becomes a FCP Target, it must be allocated a |
1506 | * scsi target id. If the port is no longer a FCP target, any | 1508 | * scsi target id. If the port is no longer a FCP target, any |
1507 | * scsi target id value assigned to it will persist in case the | 1509 | * scsi target id value assigned to it will persist in case the |
1508 | * role changes back to include FCP Target. No changes in the scsi | 1510 | * role changes back to include FCP Target. No changes in the scsi |
1509 | * midlayer will be invoked if the role changes (in the expectation | 1511 | * midlayer will be invoked if the role changes (in the expectation |
1510 | * that the role will be resumed. If it doesn't normal error processing | 1512 | * that the role will be resumed. If it doesn't normal error processing |
1511 | * will take place). | 1513 | * will take place). |
1512 | * | 1514 | * |
1513 | * Should not be called from interrupt context. | 1515 | * Should not be called from interrupt context. |
1514 | * | 1516 | * |
1515 | * Notes: | 1517 | * Notes: |
1516 | * This routine assumes no locks are held on entry. | 1518 | * This routine assumes no locks are held on entry. |
1517 | **/ | 1519 | **/ |
1518 | void | 1520 | void |
1519 | fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) | 1521 | fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) |
1520 | { | 1522 | { |
1521 | struct Scsi_Host *shost = rport_to_shost(rport); | 1523 | struct Scsi_Host *shost = rport_to_shost(rport); |
1522 | struct fc_host_attrs *fc_host = | 1524 | struct fc_host_attrs *fc_host = |
1523 | (struct fc_host_attrs *)shost->shost_data; | 1525 | (struct fc_host_attrs *)shost->shost_data; |
1524 | unsigned long flags; | 1526 | unsigned long flags; |
1525 | int create = 0; | 1527 | int create = 0; |
1526 | 1528 | ||
1527 | rport->roles = roles; | 1529 | rport->roles = roles; |
1528 | 1530 | ||
1529 | spin_lock_irqsave(shost->host_lock, flags); | 1531 | spin_lock_irqsave(shost->host_lock, flags); |
1530 | if ((rport->scsi_target_id == -1) && | 1532 | if ((rport->scsi_target_id == -1) && |
1531 | (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { | 1533 | (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { |
1532 | rport->scsi_target_id = fc_host->next_target_id++; | 1534 | rport->scsi_target_id = fc_host->next_target_id++; |
1533 | create = 1; | 1535 | create = 1; |
1534 | } | 1536 | } |
1535 | spin_unlock_irqrestore(shost->host_lock, flags); | 1537 | spin_unlock_irqrestore(shost->host_lock, flags); |
1536 | 1538 | ||
1537 | if (create) | 1539 | if (create) |
1538 | /* initiate a scan of the target */ | 1540 | /* initiate a scan of the target */ |
1539 | scsi_queue_work(shost, &rport->scan_work); | 1541 | scsi_queue_work(shost, &rport->scan_work); |
1540 | } | 1542 | } |
1541 | EXPORT_SYMBOL(fc_remote_port_rolechg); | 1543 | EXPORT_SYMBOL(fc_remote_port_rolechg); |
1542 | 1544 | ||
1543 | /** | 1545 | /** |
1544 | * fc_timeout_blocked_rport - Timeout handler for blocked remote port | 1546 | * fc_timeout_blocked_rport - Timeout handler for blocked remote port |
1545 | * that fails to return in the alloted time. | 1547 | * that fails to return in the alloted time. |
1546 | * @data: scsi target that failed to reappear in the alloted time. | 1548 | * @data: scsi target that failed to reappear in the alloted time. |
1547 | **/ | 1549 | **/ |
1548 | static void | 1550 | static void |
1549 | fc_timeout_blocked_rport(void *data) | 1551 | fc_timeout_blocked_rport(void *data) |
1550 | { | 1552 | { |
1551 | struct fc_rport *rport = (struct fc_rport *)data; | 1553 | struct fc_rport *rport = (struct fc_rport *)data; |
1552 | 1554 | ||
1553 | rport->port_state = FC_PORTSTATE_OFFLINE; | 1555 | rport->port_state = FC_PORTSTATE_OFFLINE; |
1554 | 1556 | ||
1555 | dev_printk(KERN_ERR, &rport->dev, | 1557 | dev_printk(KERN_ERR, &rport->dev, |
1556 | "blocked FC remote port time out: removing target\n"); | 1558 | "blocked FC remote port time out: removing target\n"); |
1557 | 1559 | ||
1558 | /* | 1560 | /* |
1559 | * As this only occurs if the remote port (scsi target) | 1561 | * As this only occurs if the remote port (scsi target) |
1560 | * went away and didn't come back - we'll remove | 1562 | * went away and didn't come back - we'll remove |
1561 | * all attached scsi devices. | 1563 | * all attached scsi devices. |
1562 | */ | 1564 | */ |
1563 | scsi_target_unblock(&rport->dev); | 1565 | scsi_target_unblock(&rport->dev); |
1564 | scsi_remove_target(&rport->dev); | 1566 | scsi_remove_target(&rport->dev); |
1565 | } | 1567 | } |
1566 | 1568 | ||
1567 | /** | 1569 | /** |
1568 | * fc_remote_port_block - temporarily block any scsi traffic to a remote port. | 1570 | * fc_remote_port_block - temporarily block any scsi traffic to a remote port. |
1569 | * @rport: remote port to be blocked. | 1571 | * @rport: remote port to be blocked. |
1570 | * | 1572 | * |
1571 | * scsi lldd's with a FC transport call this routine to temporarily stop | 1573 | * scsi lldd's with a FC transport call this routine to temporarily stop |
1572 | * all scsi traffic to a remote port. If the port is not a SCSI target, | 1574 | * all scsi traffic to a remote port. If the port is not a SCSI target, |
1573 | * no action is taken. If the port is a SCSI target, all attached devices | 1575 | * no action is taken. If the port is a SCSI target, all attached devices |
1574 | * are placed into a SDEV_BLOCK state and a timer is started. The timer is | 1576 | * are placed into a SDEV_BLOCK state and a timer is started. The timer is |
1575 | * represents the maximum amount of time the port may be blocked. If the | 1577 | * represents the maximum amount of time the port may be blocked. If the |
1576 | * timer expires, the port is considered non-existent and the attached | 1578 | * timer expires, the port is considered non-existent and the attached |
1577 | * scsi devices will be removed. | 1579 | * scsi devices will be removed. |
1578 | * | 1580 | * |
1579 | * Called from interrupt or normal process context. | 1581 | * Called from interrupt or normal process context. |
1580 | * | 1582 | * |
1581 | * Returns zero if successful or error if not | 1583 | * Returns zero if successful or error if not |
1582 | * | 1584 | * |
1583 | * Notes: | 1585 | * Notes: |
1584 | * This routine assumes no locks are held on entry. | 1586 | * This routine assumes no locks are held on entry. |
1585 | * | 1587 | * |
1586 | * The timeout and timer types are extracted from the fc transport | 1588 | * The timeout and timer types are extracted from the fc transport |
1587 | * attributes from the caller's rport pointer. | 1589 | * attributes from the caller's rport pointer. |
1588 | **/ | 1590 | **/ |
1589 | int | 1591 | int |
1590 | fc_remote_port_block(struct fc_rport *rport) | 1592 | fc_remote_port_block(struct fc_rport *rport) |
1591 | { | 1593 | { |
1592 | int timeout = rport->dev_loss_tmo; | 1594 | int timeout = rport->dev_loss_tmo; |
1593 | struct work_struct *work = &rport->dev_loss_work; | 1595 | struct work_struct *work = &rport->dev_loss_work; |
1594 | 1596 | ||
1595 | if (timeout < 0 || timeout > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) | 1597 | if (timeout < 0 || timeout > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) |
1596 | return -EINVAL; | 1598 | return -EINVAL; |
1597 | 1599 | ||
1598 | scsi_target_block(&rport->dev); | 1600 | scsi_target_block(&rport->dev); |
1599 | 1601 | ||
1600 | /* cap the length the devices can be blocked */ | 1602 | /* cap the length the devices can be blocked */ |
1601 | schedule_delayed_work(work, timeout * HZ); | 1603 | schedule_delayed_work(work, timeout * HZ); |
1602 | 1604 | ||
1603 | rport->port_state = FC_PORTSTATE_BLOCKED; | 1605 | rport->port_state = FC_PORTSTATE_BLOCKED; |
1604 | return 0; | 1606 | return 0; |
1605 | } | 1607 | } |
1606 | EXPORT_SYMBOL(fc_remote_port_block); | 1608 | EXPORT_SYMBOL(fc_remote_port_block); |
1607 | 1609 | ||
1608 | /** | 1610 | /** |
1609 | * fc_remote_port_unblock - restart any blocked scsi traffic to a remote port. | 1611 | * fc_remote_port_unblock - restart any blocked scsi traffic to a remote port. |
1610 | * @rport: remote port to be unblocked. | 1612 | * @rport: remote port to be unblocked. |
1611 | * | 1613 | * |
1612 | * scsi lld's with a FC transport call this routine to restart IO to all | 1614 | * scsi lld's with a FC transport call this routine to restart IO to all |
1613 | * devices associated with the caller's scsi target following a fc_target_block | 1615 | * devices associated with the caller's scsi target following a fc_target_block |
1614 | * request. Called from interrupt or normal process context. | 1616 | * request. Called from interrupt or normal process context. |
1615 | * | 1617 | * |
1616 | * Notes: | 1618 | * Notes: |
1617 | * This routine assumes no locks are held on entry. | 1619 | * This routine assumes no locks are held on entry. |
1618 | **/ | 1620 | **/ |
1619 | void | 1621 | void |
1620 | fc_remote_port_unblock(struct fc_rport *rport) | 1622 | fc_remote_port_unblock(struct fc_rport *rport) |
1621 | { | 1623 | { |
1622 | struct work_struct *work = &rport->dev_loss_work; | 1624 | struct work_struct *work = &rport->dev_loss_work; |
1623 | struct Scsi_Host *shost = rport_to_shost(rport); | 1625 | struct Scsi_Host *shost = rport_to_shost(rport); |
1624 | 1626 | ||
1625 | /* | 1627 | /* |
1626 | * Stop the target timer first. Take no action on the del_timer | 1628 | * Stop the target timer first. Take no action on the del_timer |
1627 | * failure as the state machine state change will validate the | 1629 | * failure as the state machine state change will validate the |
1628 | * transaction. | 1630 | * transaction. |
1629 | */ | 1631 | */ |
1630 | if (!cancel_delayed_work(work)) | 1632 | if (!cancel_delayed_work(work)) |
1631 | flush_scheduled_work(); | 1633 | flush_scheduled_work(); |
1632 | 1634 | ||
1633 | if (rport->port_state == FC_PORTSTATE_OFFLINE) | 1635 | if (rport->port_state == FC_PORTSTATE_OFFLINE) |
1634 | /* | 1636 | /* |
1635 | * initiate a scan of the target as the target has | 1637 | * initiate a scan of the target as the target has |
1636 | * been torn down. | 1638 | * been torn down. |
1637 | */ | 1639 | */ |
1638 | scsi_queue_work(shost, &rport->scan_work); | 1640 | scsi_queue_work(shost, &rport->scan_work); |
1639 | else | 1641 | else |
1640 | scsi_target_unblock(&rport->dev); | 1642 | scsi_target_unblock(&rport->dev); |
1641 | 1643 | ||
1642 | rport->port_state = FC_PORTSTATE_ONLINE; | 1644 | rport->port_state = FC_PORTSTATE_ONLINE; |
1643 | } | 1645 | } |
1644 | EXPORT_SYMBOL(fc_remote_port_unblock); | 1646 | EXPORT_SYMBOL(fc_remote_port_unblock); |
1645 | 1647 | ||
1646 | /** | 1648 | /** |
1647 | * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. | 1649 | * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. |
1648 | * @data: remote port to be scanned. | 1650 | * @data: remote port to be scanned. |
1649 | **/ | 1651 | **/ |
1650 | static void | 1652 | static void |
1651 | fc_scsi_scan_rport(void *data) | 1653 | fc_scsi_scan_rport(void *data) |
1652 | { | 1654 | { |
1653 | struct fc_rport *rport = (struct fc_rport *)data; | 1655 | struct fc_rport *rport = (struct fc_rport *)data; |
1654 | 1656 | ||
1655 | scsi_scan_target(&rport->dev, rport->channel, rport->scsi_target_id, | 1657 | scsi_scan_target(&rport->dev, rport->channel, rport->scsi_target_id, |
1656 | SCAN_WILD_CARD, 1); | 1658 | SCAN_WILD_CARD, 1); |
1657 | } | 1659 | } |
1658 | 1660 | ||
1659 | 1661 | ||
1660 | MODULE_AUTHOR("Martin Hicks"); | 1662 | MODULE_AUTHOR("Martin Hicks"); |
1661 | MODULE_DESCRIPTION("FC Transport Attributes"); | 1663 | MODULE_DESCRIPTION("FC Transport Attributes"); |
1662 | MODULE_LICENSE("GPL"); | 1664 | MODULE_LICENSE("GPL"); |
1663 | 1665 | ||
1664 | module_init(fc_transport_init); | 1666 | module_init(fc_transport_init); |
1665 | module_exit(fc_transport_exit); | 1667 | module_exit(fc_transport_exit); |
1666 | 1668 |
drivers/scsi/scsi_transport_spi.c
1 | /* | 1 | /* |
2 | * Parallel SCSI (SPI) transport specific attributes exported to sysfs. | 2 | * Parallel SCSI (SPI) transport specific attributes exported to sysfs. |
3 | * | 3 | * |
4 | * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. | 4 | * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. |
5 | * Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com> | 5 | * Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
9 | * the Free Software Foundation; either version 2 of the License, or | 9 | * the Free Software Foundation; either version 2 of the License, or |
10 | * (at your option) any later version. | 10 | * (at your option) any later version. |
11 | * | 11 | * |
12 | * This program is distributed in the hope that it will be useful, | 12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU General Public License | 17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ | 20 | */ |
21 | #include <linux/ctype.h> | 21 | #include <linux/ctype.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
25 | #include <linux/blkdev.h> | 25 | #include <linux/blkdev.h> |
26 | #include <asm/semaphore.h> | 26 | #include <asm/semaphore.h> |
27 | #include <scsi/scsi.h> | 27 | #include <scsi/scsi.h> |
28 | #include "scsi_priv.h" | 28 | #include "scsi_priv.h" |
29 | #include <scsi/scsi_device.h> | 29 | #include <scsi/scsi_device.h> |
30 | #include <scsi/scsi_host.h> | 30 | #include <scsi/scsi_host.h> |
31 | #include <scsi/scsi_request.h> | 31 | #include <scsi/scsi_request.h> |
32 | #include <scsi/scsi_eh.h> | 32 | #include <scsi/scsi_eh.h> |
33 | #include <scsi/scsi_transport.h> | 33 | #include <scsi/scsi_transport.h> |
34 | #include <scsi/scsi_transport_spi.h> | 34 | #include <scsi/scsi_transport_spi.h> |
35 | 35 | ||
36 | #define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a) | 36 | #define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a) |
37 | 37 | ||
38 | #define SPI_NUM_ATTRS 14 /* increase this if you add attributes */ | 38 | #define SPI_NUM_ATTRS 14 /* increase this if you add attributes */ |
39 | #define SPI_OTHER_ATTRS 1 /* Increase this if you add "always | 39 | #define SPI_OTHER_ATTRS 1 /* Increase this if you add "always |
40 | * on" attributes */ | 40 | * on" attributes */ |
41 | #define SPI_HOST_ATTRS 1 | 41 | #define SPI_HOST_ATTRS 1 |
42 | 42 | ||
43 | #define SPI_MAX_ECHO_BUFFER_SIZE 4096 | 43 | #define SPI_MAX_ECHO_BUFFER_SIZE 4096 |
44 | 44 | ||
45 | #define DV_LOOPS 3 | 45 | #define DV_LOOPS 3 |
46 | #define DV_TIMEOUT (10*HZ) | 46 | #define DV_TIMEOUT (10*HZ) |
47 | #define DV_RETRIES 3 /* should only need at most | 47 | #define DV_RETRIES 3 /* should only need at most |
48 | * two cc/ua clears */ | 48 | * two cc/ua clears */ |
49 | 49 | ||
50 | /* Private data accessors (keep these out of the header file) */ | 50 | /* Private data accessors (keep these out of the header file) */ |
51 | #define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending) | 51 | #define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending) |
52 | #define spi_dv_sem(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_sem) | 52 | #define spi_dv_sem(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_sem) |
53 | 53 | ||
54 | struct spi_internal { | 54 | struct spi_internal { |
55 | struct scsi_transport_template t; | 55 | struct scsi_transport_template t; |
56 | struct spi_function_template *f; | 56 | struct spi_function_template *f; |
57 | /* The actual attributes */ | 57 | /* The actual attributes */ |
58 | struct class_device_attribute private_attrs[SPI_NUM_ATTRS]; | 58 | struct class_device_attribute private_attrs[SPI_NUM_ATTRS]; |
59 | /* The array of null terminated pointers to attributes | 59 | /* The array of null terminated pointers to attributes |
60 | * needed by scsi_sysfs.c */ | 60 | * needed by scsi_sysfs.c */ |
61 | struct class_device_attribute *attrs[SPI_NUM_ATTRS + SPI_OTHER_ATTRS + 1]; | 61 | struct class_device_attribute *attrs[SPI_NUM_ATTRS + SPI_OTHER_ATTRS + 1]; |
62 | struct class_device_attribute private_host_attrs[SPI_HOST_ATTRS]; | 62 | struct class_device_attribute private_host_attrs[SPI_HOST_ATTRS]; |
63 | struct class_device_attribute *host_attrs[SPI_HOST_ATTRS + 1]; | 63 | struct class_device_attribute *host_attrs[SPI_HOST_ATTRS + 1]; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | #define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t) | 66 | #define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t) |
67 | 67 | ||
68 | static const int ppr_to_ps[] = { | 68 | static const int ppr_to_ps[] = { |
69 | /* The PPR values 0-6 are reserved, fill them in when | 69 | /* The PPR values 0-6 are reserved, fill them in when |
70 | * the committee defines them */ | 70 | * the committee defines them */ |
71 | -1, /* 0x00 */ | 71 | -1, /* 0x00 */ |
72 | -1, /* 0x01 */ | 72 | -1, /* 0x01 */ |
73 | -1, /* 0x02 */ | 73 | -1, /* 0x02 */ |
74 | -1, /* 0x03 */ | 74 | -1, /* 0x03 */ |
75 | -1, /* 0x04 */ | 75 | -1, /* 0x04 */ |
76 | -1, /* 0x05 */ | 76 | -1, /* 0x05 */ |
77 | -1, /* 0x06 */ | 77 | -1, /* 0x06 */ |
78 | 3125, /* 0x07 */ | 78 | 3125, /* 0x07 */ |
79 | 6250, /* 0x08 */ | 79 | 6250, /* 0x08 */ |
80 | 12500, /* 0x09 */ | 80 | 12500, /* 0x09 */ |
81 | 25000, /* 0x0a */ | 81 | 25000, /* 0x0a */ |
82 | 30300, /* 0x0b */ | 82 | 30300, /* 0x0b */ |
83 | 50000, /* 0x0c */ | 83 | 50000, /* 0x0c */ |
84 | }; | 84 | }; |
85 | /* The PPR values at which you calculate the period in ns by multiplying | 85 | /* The PPR values at which you calculate the period in ns by multiplying |
86 | * by 4 */ | 86 | * by 4 */ |
87 | #define SPI_STATIC_PPR 0x0c | 87 | #define SPI_STATIC_PPR 0x0c |
88 | 88 | ||
89 | static int sprint_frac(char *dest, int value, int denom) | 89 | static int sprint_frac(char *dest, int value, int denom) |
90 | { | 90 | { |
91 | int frac = value % denom; | 91 | int frac = value % denom; |
92 | int result = sprintf(dest, "%d", value / denom); | 92 | int result = sprintf(dest, "%d", value / denom); |
93 | 93 | ||
94 | if (frac == 0) | 94 | if (frac == 0) |
95 | return result; | 95 | return result; |
96 | dest[result++] = '.'; | 96 | dest[result++] = '.'; |
97 | 97 | ||
98 | do { | 98 | do { |
99 | denom /= 10; | 99 | denom /= 10; |
100 | sprintf(dest + result, "%d", frac / denom); | 100 | sprintf(dest + result, "%d", frac / denom); |
101 | result++; | 101 | result++; |
102 | frac %= denom; | 102 | frac %= denom; |
103 | } while (frac); | 103 | } while (frac); |
104 | 104 | ||
105 | dest[result++] = '\0'; | 105 | dest[result++] = '\0'; |
106 | return result; | 106 | return result; |
107 | } | 107 | } |
108 | 108 | ||
109 | /* Modification of scsi_wait_req that will clear UNIT ATTENTION conditions | 109 | /* Modification of scsi_wait_req that will clear UNIT ATTENTION conditions |
110 | * resulting from (likely) bus and device resets */ | 110 | * resulting from (likely) bus and device resets */ |
111 | static void spi_wait_req(struct scsi_request *sreq, const void *cmd, | 111 | static void spi_wait_req(struct scsi_request *sreq, const void *cmd, |
112 | void *buffer, unsigned bufflen) | 112 | void *buffer, unsigned bufflen) |
113 | { | 113 | { |
114 | int i; | 114 | int i; |
115 | 115 | ||
116 | for(i = 0; i < DV_RETRIES; i++) { | 116 | for(i = 0; i < DV_RETRIES; i++) { |
117 | sreq->sr_request->flags |= REQ_FAILFAST; | 117 | sreq->sr_request->flags |= REQ_FAILFAST; |
118 | 118 | ||
119 | scsi_wait_req(sreq, cmd, buffer, bufflen, | 119 | scsi_wait_req(sreq, cmd, buffer, bufflen, |
120 | DV_TIMEOUT, /* retries */ 1); | 120 | DV_TIMEOUT, /* retries */ 1); |
121 | if (sreq->sr_result & DRIVER_SENSE) { | 121 | if (sreq->sr_result & DRIVER_SENSE) { |
122 | struct scsi_sense_hdr sshdr; | 122 | struct scsi_sense_hdr sshdr; |
123 | 123 | ||
124 | if (scsi_request_normalize_sense(sreq, &sshdr) | 124 | if (scsi_request_normalize_sense(sreq, &sshdr) |
125 | && sshdr.sense_key == UNIT_ATTENTION) | 125 | && sshdr.sense_key == UNIT_ATTENTION) |
126 | continue; | 126 | continue; |
127 | } | 127 | } |
128 | break; | 128 | break; |
129 | } | 129 | } |
130 | } | 130 | } |
131 | 131 | ||
132 | static struct { | 132 | static struct { |
133 | enum spi_signal_type value; | 133 | enum spi_signal_type value; |
134 | char *name; | 134 | char *name; |
135 | } signal_types[] = { | 135 | } signal_types[] = { |
136 | { SPI_SIGNAL_UNKNOWN, "unknown" }, | 136 | { SPI_SIGNAL_UNKNOWN, "unknown" }, |
137 | { SPI_SIGNAL_SE, "SE" }, | 137 | { SPI_SIGNAL_SE, "SE" }, |
138 | { SPI_SIGNAL_LVD, "LVD" }, | 138 | { SPI_SIGNAL_LVD, "LVD" }, |
139 | { SPI_SIGNAL_HVD, "HVD" }, | 139 | { SPI_SIGNAL_HVD, "HVD" }, |
140 | }; | 140 | }; |
141 | 141 | ||
142 | static inline const char *spi_signal_to_string(enum spi_signal_type type) | 142 | static inline const char *spi_signal_to_string(enum spi_signal_type type) |
143 | { | 143 | { |
144 | int i; | 144 | int i; |
145 | 145 | ||
146 | for (i = 0; i < sizeof(signal_types)/sizeof(signal_types[0]); i++) { | 146 | for (i = 0; i < sizeof(signal_types)/sizeof(signal_types[0]); i++) { |
147 | if (type == signal_types[i].value) | 147 | if (type == signal_types[i].value) |
148 | return signal_types[i].name; | 148 | return signal_types[i].name; |
149 | } | 149 | } |
150 | return NULL; | 150 | return NULL; |
151 | } | 151 | } |
152 | static inline enum spi_signal_type spi_signal_to_value(const char *name) | 152 | static inline enum spi_signal_type spi_signal_to_value(const char *name) |
153 | { | 153 | { |
154 | int i, len; | 154 | int i, len; |
155 | 155 | ||
156 | for (i = 0; i < sizeof(signal_types)/sizeof(signal_types[0]); i++) { | 156 | for (i = 0; i < sizeof(signal_types)/sizeof(signal_types[0]); i++) { |
157 | len = strlen(signal_types[i].name); | 157 | len = strlen(signal_types[i].name); |
158 | if (strncmp(name, signal_types[i].name, len) == 0 && | 158 | if (strncmp(name, signal_types[i].name, len) == 0 && |
159 | (name[len] == '\n' || name[len] == '\0')) | 159 | (name[len] == '\n' || name[len] == '\0')) |
160 | return signal_types[i].value; | 160 | return signal_types[i].value; |
161 | } | 161 | } |
162 | return SPI_SIGNAL_UNKNOWN; | 162 | return SPI_SIGNAL_UNKNOWN; |
163 | } | 163 | } |
164 | 164 | ||
165 | static int spi_host_setup(struct device *dev) | 165 | static int spi_host_setup(struct transport_container *tc, struct device *dev, |
166 | struct class_device *cdev) | ||
166 | { | 167 | { |
167 | struct Scsi_Host *shost = dev_to_shost(dev); | 168 | struct Scsi_Host *shost = dev_to_shost(dev); |
168 | 169 | ||
169 | spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; | 170 | spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; |
170 | 171 | ||
171 | return 0; | 172 | return 0; |
172 | } | 173 | } |
173 | 174 | ||
174 | static DECLARE_TRANSPORT_CLASS(spi_host_class, | 175 | static DECLARE_TRANSPORT_CLASS(spi_host_class, |
175 | "spi_host", | 176 | "spi_host", |
176 | spi_host_setup, | 177 | spi_host_setup, |
177 | NULL, | 178 | NULL, |
178 | NULL); | 179 | NULL); |
179 | 180 | ||
180 | static int spi_host_match(struct attribute_container *cont, | 181 | static int spi_host_match(struct attribute_container *cont, |
181 | struct device *dev) | 182 | struct device *dev) |
182 | { | 183 | { |
183 | struct Scsi_Host *shost; | 184 | struct Scsi_Host *shost; |
184 | struct spi_internal *i; | 185 | struct spi_internal *i; |
185 | 186 | ||
186 | if (!scsi_is_host_device(dev)) | 187 | if (!scsi_is_host_device(dev)) |
187 | return 0; | 188 | return 0; |
188 | 189 | ||
189 | shost = dev_to_shost(dev); | 190 | shost = dev_to_shost(dev); |
190 | if (!shost->transportt || shost->transportt->host_attrs.ac.class | 191 | if (!shost->transportt || shost->transportt->host_attrs.ac.class |
191 | != &spi_host_class.class) | 192 | != &spi_host_class.class) |
192 | return 0; | 193 | return 0; |
193 | 194 | ||
194 | i = to_spi_internal(shost->transportt); | 195 | i = to_spi_internal(shost->transportt); |
195 | 196 | ||
196 | return &i->t.host_attrs.ac == cont; | 197 | return &i->t.host_attrs.ac == cont; |
197 | } | 198 | } |
198 | 199 | ||
199 | static int spi_device_configure(struct device *dev) | 200 | static int spi_device_configure(struct transport_container *tc, |
201 | struct device *dev, | ||
202 | struct class_device *cdev) | ||
200 | { | 203 | { |
201 | struct scsi_device *sdev = to_scsi_device(dev); | 204 | struct scsi_device *sdev = to_scsi_device(dev); |
202 | struct scsi_target *starget = sdev->sdev_target; | 205 | struct scsi_target *starget = sdev->sdev_target; |
203 | 206 | ||
204 | /* Populate the target capability fields with the values | 207 | /* Populate the target capability fields with the values |
205 | * gleaned from the device inquiry */ | 208 | * gleaned from the device inquiry */ |
206 | 209 | ||
207 | spi_support_sync(starget) = scsi_device_sync(sdev); | 210 | spi_support_sync(starget) = scsi_device_sync(sdev); |
208 | spi_support_wide(starget) = scsi_device_wide(sdev); | 211 | spi_support_wide(starget) = scsi_device_wide(sdev); |
209 | spi_support_dt(starget) = scsi_device_dt(sdev); | 212 | spi_support_dt(starget) = scsi_device_dt(sdev); |
210 | spi_support_dt_only(starget) = scsi_device_dt_only(sdev); | 213 | spi_support_dt_only(starget) = scsi_device_dt_only(sdev); |
211 | spi_support_ius(starget) = scsi_device_ius(sdev); | 214 | spi_support_ius(starget) = scsi_device_ius(sdev); |
212 | spi_support_qas(starget) = scsi_device_qas(sdev); | 215 | spi_support_qas(starget) = scsi_device_qas(sdev); |
213 | 216 | ||
214 | return 0; | 217 | return 0; |
215 | } | 218 | } |
216 | 219 | ||
217 | static int spi_setup_transport_attrs(struct device *dev) | 220 | static int spi_setup_transport_attrs(struct transport_container *tc, |
221 | struct device *dev, | ||
222 | struct class_device *cdev) | ||
218 | { | 223 | { |
219 | struct scsi_target *starget = to_scsi_target(dev); | 224 | struct scsi_target *starget = to_scsi_target(dev); |
220 | 225 | ||
221 | spi_period(starget) = -1; /* illegal value */ | 226 | spi_period(starget) = -1; /* illegal value */ |
222 | spi_min_period(starget) = 0; | 227 | spi_min_period(starget) = 0; |
223 | spi_offset(starget) = 0; /* async */ | 228 | spi_offset(starget) = 0; /* async */ |
224 | spi_max_offset(starget) = 255; | 229 | spi_max_offset(starget) = 255; |
225 | spi_width(starget) = 0; /* narrow */ | 230 | spi_width(starget) = 0; /* narrow */ |
226 | spi_max_width(starget) = 1; | 231 | spi_max_width(starget) = 1; |
227 | spi_iu(starget) = 0; /* no IU */ | 232 | spi_iu(starget) = 0; /* no IU */ |
228 | spi_dt(starget) = 0; /* ST */ | 233 | spi_dt(starget) = 0; /* ST */ |
229 | spi_qas(starget) = 0; | 234 | spi_qas(starget) = 0; |
230 | spi_wr_flow(starget) = 0; | 235 | spi_wr_flow(starget) = 0; |
231 | spi_rd_strm(starget) = 0; | 236 | spi_rd_strm(starget) = 0; |
232 | spi_rti(starget) = 0; | 237 | spi_rti(starget) = 0; |
233 | spi_pcomp_en(starget) = 0; | 238 | spi_pcomp_en(starget) = 0; |
234 | spi_hold_mcs(starget) = 0; | 239 | spi_hold_mcs(starget) = 0; |
235 | spi_dv_pending(starget) = 0; | 240 | spi_dv_pending(starget) = 0; |
236 | spi_initial_dv(starget) = 0; | 241 | spi_initial_dv(starget) = 0; |
237 | init_MUTEX(&spi_dv_sem(starget)); | 242 | init_MUTEX(&spi_dv_sem(starget)); |
238 | 243 | ||
239 | return 0; | 244 | return 0; |
240 | } | 245 | } |
241 | 246 | ||
242 | #define spi_transport_show_simple(field, format_string) \ | 247 | #define spi_transport_show_simple(field, format_string) \ |
243 | \ | 248 | \ |
244 | static ssize_t \ | 249 | static ssize_t \ |
245 | show_spi_transport_##field(struct class_device *cdev, char *buf) \ | 250 | show_spi_transport_##field(struct class_device *cdev, char *buf) \ |
246 | { \ | 251 | { \ |
247 | struct scsi_target *starget = transport_class_to_starget(cdev); \ | 252 | struct scsi_target *starget = transport_class_to_starget(cdev); \ |
248 | struct spi_transport_attrs *tp; \ | 253 | struct spi_transport_attrs *tp; \ |
249 | \ | 254 | \ |
250 | tp = (struct spi_transport_attrs *)&starget->starget_data; \ | 255 | tp = (struct spi_transport_attrs *)&starget->starget_data; \ |
251 | return snprintf(buf, 20, format_string, tp->field); \ | 256 | return snprintf(buf, 20, format_string, tp->field); \ |
252 | } | 257 | } |
253 | 258 | ||
254 | #define spi_transport_store_simple(field, format_string) \ | 259 | #define spi_transport_store_simple(field, format_string) \ |
255 | \ | 260 | \ |
256 | static ssize_t \ | 261 | static ssize_t \ |
257 | store_spi_transport_##field(struct class_device *cdev, const char *buf, \ | 262 | store_spi_transport_##field(struct class_device *cdev, const char *buf, \ |
258 | size_t count) \ | 263 | size_t count) \ |
259 | { \ | 264 | { \ |
260 | int val; \ | 265 | int val; \ |
261 | struct scsi_target *starget = transport_class_to_starget(cdev); \ | 266 | struct scsi_target *starget = transport_class_to_starget(cdev); \ |
262 | struct spi_transport_attrs *tp; \ | 267 | struct spi_transport_attrs *tp; \ |
263 | \ | 268 | \ |
264 | tp = (struct spi_transport_attrs *)&starget->starget_data; \ | 269 | tp = (struct spi_transport_attrs *)&starget->starget_data; \ |
265 | val = simple_strtoul(buf, NULL, 0); \ | 270 | val = simple_strtoul(buf, NULL, 0); \ |
266 | tp->field = val; \ | 271 | tp->field = val; \ |
267 | return count; \ | 272 | return count; \ |
268 | } | 273 | } |
269 | 274 | ||
270 | #define spi_transport_show_function(field, format_string) \ | 275 | #define spi_transport_show_function(field, format_string) \ |
271 | \ | 276 | \ |
272 | static ssize_t \ | 277 | static ssize_t \ |
273 | show_spi_transport_##field(struct class_device *cdev, char *buf) \ | 278 | show_spi_transport_##field(struct class_device *cdev, char *buf) \ |
274 | { \ | 279 | { \ |
275 | struct scsi_target *starget = transport_class_to_starget(cdev); \ | 280 | struct scsi_target *starget = transport_class_to_starget(cdev); \ |
276 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ | 281 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ |
277 | struct spi_transport_attrs *tp; \ | 282 | struct spi_transport_attrs *tp; \ |
278 | struct spi_internal *i = to_spi_internal(shost->transportt); \ | 283 | struct spi_internal *i = to_spi_internal(shost->transportt); \ |
279 | tp = (struct spi_transport_attrs *)&starget->starget_data; \ | 284 | tp = (struct spi_transport_attrs *)&starget->starget_data; \ |
280 | if (i->f->get_##field) \ | 285 | if (i->f->get_##field) \ |
281 | i->f->get_##field(starget); \ | 286 | i->f->get_##field(starget); \ |
282 | return snprintf(buf, 20, format_string, tp->field); \ | 287 | return snprintf(buf, 20, format_string, tp->field); \ |
283 | } | 288 | } |
284 | 289 | ||
285 | #define spi_transport_store_function(field, format_string) \ | 290 | #define spi_transport_store_function(field, format_string) \ |
286 | static ssize_t \ | 291 | static ssize_t \ |
287 | store_spi_transport_##field(struct class_device *cdev, const char *buf, \ | 292 | store_spi_transport_##field(struct class_device *cdev, const char *buf, \ |
288 | size_t count) \ | 293 | size_t count) \ |
289 | { \ | 294 | { \ |
290 | int val; \ | 295 | int val; \ |
291 | struct scsi_target *starget = transport_class_to_starget(cdev); \ | 296 | struct scsi_target *starget = transport_class_to_starget(cdev); \ |
292 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ | 297 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ |
293 | struct spi_internal *i = to_spi_internal(shost->transportt); \ | 298 | struct spi_internal *i = to_spi_internal(shost->transportt); \ |
294 | \ | 299 | \ |
295 | val = simple_strtoul(buf, NULL, 0); \ | 300 | val = simple_strtoul(buf, NULL, 0); \ |
296 | i->f->set_##field(starget, val); \ | 301 | i->f->set_##field(starget, val); \ |
297 | return count; \ | 302 | return count; \ |
298 | } | 303 | } |
299 | 304 | ||
300 | #define spi_transport_store_max(field, format_string) \ | 305 | #define spi_transport_store_max(field, format_string) \ |
301 | static ssize_t \ | 306 | static ssize_t \ |
302 | store_spi_transport_##field(struct class_device *cdev, const char *buf, \ | 307 | store_spi_transport_##field(struct class_device *cdev, const char *buf, \ |
303 | size_t count) \ | 308 | size_t count) \ |
304 | { \ | 309 | { \ |
305 | int val; \ | 310 | int val; \ |
306 | struct scsi_target *starget = transport_class_to_starget(cdev); \ | 311 | struct scsi_target *starget = transport_class_to_starget(cdev); \ |
307 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ | 312 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ |
308 | struct spi_internal *i = to_spi_internal(shost->transportt); \ | 313 | struct spi_internal *i = to_spi_internal(shost->transportt); \ |
309 | struct spi_transport_attrs *tp \ | 314 | struct spi_transport_attrs *tp \ |
310 | = (struct spi_transport_attrs *)&starget->starget_data; \ | 315 | = (struct spi_transport_attrs *)&starget->starget_data; \ |
311 | \ | 316 | \ |
312 | val = simple_strtoul(buf, NULL, 0); \ | 317 | val = simple_strtoul(buf, NULL, 0); \ |
313 | if (val > tp->max_##field) \ | 318 | if (val > tp->max_##field) \ |
314 | val = tp->max_##field; \ | 319 | val = tp->max_##field; \ |
315 | i->f->set_##field(starget, val); \ | 320 | i->f->set_##field(starget, val); \ |
316 | return count; \ | 321 | return count; \ |
317 | } | 322 | } |
318 | 323 | ||
319 | #define spi_transport_rd_attr(field, format_string) \ | 324 | #define spi_transport_rd_attr(field, format_string) \ |
320 | spi_transport_show_function(field, format_string) \ | 325 | spi_transport_show_function(field, format_string) \ |
321 | spi_transport_store_function(field, format_string) \ | 326 | spi_transport_store_function(field, format_string) \ |
322 | static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \ | 327 | static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \ |
323 | show_spi_transport_##field, \ | 328 | show_spi_transport_##field, \ |
324 | store_spi_transport_##field); | 329 | store_spi_transport_##field); |
325 | 330 | ||
326 | #define spi_transport_simple_attr(field, format_string) \ | 331 | #define spi_transport_simple_attr(field, format_string) \ |
327 | spi_transport_show_simple(field, format_string) \ | 332 | spi_transport_show_simple(field, format_string) \ |
328 | spi_transport_store_simple(field, format_string) \ | 333 | spi_transport_store_simple(field, format_string) \ |
329 | static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \ | 334 | static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \ |
330 | show_spi_transport_##field, \ | 335 | show_spi_transport_##field, \ |
331 | store_spi_transport_##field); | 336 | store_spi_transport_##field); |
332 | 337 | ||
333 | #define spi_transport_max_attr(field, format_string) \ | 338 | #define spi_transport_max_attr(field, format_string) \ |
334 | spi_transport_show_function(field, format_string) \ | 339 | spi_transport_show_function(field, format_string) \ |
335 | spi_transport_store_max(field, format_string) \ | 340 | spi_transport_store_max(field, format_string) \ |
336 | spi_transport_simple_attr(max_##field, format_string) \ | 341 | spi_transport_simple_attr(max_##field, format_string) \ |
337 | static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \ | 342 | static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \ |
338 | show_spi_transport_##field, \ | 343 | show_spi_transport_##field, \ |
339 | store_spi_transport_##field); | 344 | store_spi_transport_##field); |
340 | 345 | ||
341 | /* The Parallel SCSI Tranport Attributes: */ | 346 | /* The Parallel SCSI Tranport Attributes: */ |
342 | spi_transport_max_attr(offset, "%d\n"); | 347 | spi_transport_max_attr(offset, "%d\n"); |
343 | spi_transport_max_attr(width, "%d\n"); | 348 | spi_transport_max_attr(width, "%d\n"); |
344 | spi_transport_rd_attr(iu, "%d\n"); | 349 | spi_transport_rd_attr(iu, "%d\n"); |
345 | spi_transport_rd_attr(dt, "%d\n"); | 350 | spi_transport_rd_attr(dt, "%d\n"); |
346 | spi_transport_rd_attr(qas, "%d\n"); | 351 | spi_transport_rd_attr(qas, "%d\n"); |
347 | spi_transport_rd_attr(wr_flow, "%d\n"); | 352 | spi_transport_rd_attr(wr_flow, "%d\n"); |
348 | spi_transport_rd_attr(rd_strm, "%d\n"); | 353 | spi_transport_rd_attr(rd_strm, "%d\n"); |
349 | spi_transport_rd_attr(rti, "%d\n"); | 354 | spi_transport_rd_attr(rti, "%d\n"); |
350 | spi_transport_rd_attr(pcomp_en, "%d\n"); | 355 | spi_transport_rd_attr(pcomp_en, "%d\n"); |
351 | spi_transport_rd_attr(hold_mcs, "%d\n"); | 356 | spi_transport_rd_attr(hold_mcs, "%d\n"); |
352 | 357 | ||
353 | /* we only care about the first child device so we return 1 */ | 358 | /* we only care about the first child device so we return 1 */ |
354 | static int child_iter(struct device *dev, void *data) | 359 | static int child_iter(struct device *dev, void *data) |
355 | { | 360 | { |
356 | struct scsi_device *sdev = to_scsi_device(dev); | 361 | struct scsi_device *sdev = to_scsi_device(dev); |
357 | 362 | ||
358 | spi_dv_device(sdev); | 363 | spi_dv_device(sdev); |
359 | return 1; | 364 | return 1; |
360 | } | 365 | } |
361 | 366 | ||
362 | static ssize_t | 367 | static ssize_t |
363 | store_spi_revalidate(struct class_device *cdev, const char *buf, size_t count) | 368 | store_spi_revalidate(struct class_device *cdev, const char *buf, size_t count) |
364 | { | 369 | { |
365 | struct scsi_target *starget = transport_class_to_starget(cdev); | 370 | struct scsi_target *starget = transport_class_to_starget(cdev); |
366 | 371 | ||
367 | device_for_each_child(&starget->dev, NULL, child_iter); | 372 | device_for_each_child(&starget->dev, NULL, child_iter); |
368 | return count; | 373 | return count; |
369 | } | 374 | } |
370 | static CLASS_DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate); | 375 | static CLASS_DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate); |
371 | 376 | ||
372 | /* Translate the period into ns according to the current spec | 377 | /* Translate the period into ns according to the current spec |
373 | * for SDTR/PPR messages */ | 378 | * for SDTR/PPR messages */ |
374 | static ssize_t | 379 | static ssize_t |
375 | show_spi_transport_period_helper(struct class_device *cdev, char *buf, | 380 | show_spi_transport_period_helper(struct class_device *cdev, char *buf, |
376 | int period) | 381 | int period) |
377 | { | 382 | { |
378 | int len, picosec; | 383 | int len, picosec; |
379 | 384 | ||
380 | if (period < 0 || period > 0xff) { | 385 | if (period < 0 || period > 0xff) { |
381 | picosec = -1; | 386 | picosec = -1; |
382 | } else if (period <= SPI_STATIC_PPR) { | 387 | } else if (period <= SPI_STATIC_PPR) { |
383 | picosec = ppr_to_ps[period]; | 388 | picosec = ppr_to_ps[period]; |
384 | } else { | 389 | } else { |
385 | picosec = period * 4000; | 390 | picosec = period * 4000; |
386 | } | 391 | } |
387 | 392 | ||
388 | if (picosec == -1) { | 393 | if (picosec == -1) { |
389 | len = sprintf(buf, "reserved"); | 394 | len = sprintf(buf, "reserved"); |
390 | } else { | 395 | } else { |
391 | len = sprint_frac(buf, picosec, 1000); | 396 | len = sprint_frac(buf, picosec, 1000); |
392 | } | 397 | } |
393 | 398 | ||
394 | buf[len++] = '\n'; | 399 | buf[len++] = '\n'; |
395 | buf[len] = '\0'; | 400 | buf[len] = '\0'; |
396 | return len; | 401 | return len; |
397 | } | 402 | } |
398 | 403 | ||
399 | static ssize_t | 404 | static ssize_t |
400 | store_spi_transport_period_helper(struct class_device *cdev, const char *buf, | 405 | store_spi_transport_period_helper(struct class_device *cdev, const char *buf, |
401 | size_t count, int *periodp) | 406 | size_t count, int *periodp) |
402 | { | 407 | { |
403 | int j, picosec, period = -1; | 408 | int j, picosec, period = -1; |
404 | char *endp; | 409 | char *endp; |
405 | 410 | ||
406 | picosec = simple_strtoul(buf, &endp, 10) * 1000; | 411 | picosec = simple_strtoul(buf, &endp, 10) * 1000; |
407 | if (*endp == '.') { | 412 | if (*endp == '.') { |
408 | int mult = 100; | 413 | int mult = 100; |
409 | do { | 414 | do { |
410 | endp++; | 415 | endp++; |
411 | if (!isdigit(*endp)) | 416 | if (!isdigit(*endp)) |
412 | break; | 417 | break; |
413 | picosec += (*endp - '0') * mult; | 418 | picosec += (*endp - '0') * mult; |
414 | mult /= 10; | 419 | mult /= 10; |
415 | } while (mult > 0); | 420 | } while (mult > 0); |
416 | } | 421 | } |
417 | 422 | ||
418 | for (j = 0; j <= SPI_STATIC_PPR; j++) { | 423 | for (j = 0; j <= SPI_STATIC_PPR; j++) { |
419 | if (ppr_to_ps[j] < picosec) | 424 | if (ppr_to_ps[j] < picosec) |
420 | continue; | 425 | continue; |
421 | period = j; | 426 | period = j; |
422 | break; | 427 | break; |
423 | } | 428 | } |
424 | 429 | ||
425 | if (period == -1) | 430 | if (period == -1) |
426 | period = picosec / 4000; | 431 | period = picosec / 4000; |
427 | 432 | ||
428 | if (period > 0xff) | 433 | if (period > 0xff) |
429 | period = 0xff; | 434 | period = 0xff; |
430 | 435 | ||
431 | *periodp = period; | 436 | *periodp = period; |
432 | 437 | ||
433 | return count; | 438 | return count; |
434 | } | 439 | } |
435 | 440 | ||
436 | static ssize_t | 441 | static ssize_t |
437 | show_spi_transport_period(struct class_device *cdev, char *buf) | 442 | show_spi_transport_period(struct class_device *cdev, char *buf) |
438 | { | 443 | { |
439 | struct scsi_target *starget = transport_class_to_starget(cdev); | 444 | struct scsi_target *starget = transport_class_to_starget(cdev); |
440 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 445 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
441 | struct spi_internal *i = to_spi_internal(shost->transportt); | 446 | struct spi_internal *i = to_spi_internal(shost->transportt); |
442 | struct spi_transport_attrs *tp = | 447 | struct spi_transport_attrs *tp = |
443 | (struct spi_transport_attrs *)&starget->starget_data; | 448 | (struct spi_transport_attrs *)&starget->starget_data; |
444 | 449 | ||
445 | if (i->f->get_period) | 450 | if (i->f->get_period) |
446 | i->f->get_period(starget); | 451 | i->f->get_period(starget); |
447 | 452 | ||
448 | return show_spi_transport_period_helper(cdev, buf, tp->period); | 453 | return show_spi_transport_period_helper(cdev, buf, tp->period); |
449 | } | 454 | } |
450 | 455 | ||
451 | static ssize_t | 456 | static ssize_t |
452 | store_spi_transport_period(struct class_device *cdev, const char *buf, | 457 | store_spi_transport_period(struct class_device *cdev, const char *buf, |
453 | size_t count) | 458 | size_t count) |
454 | { | 459 | { |
455 | struct scsi_target *starget = transport_class_to_starget(cdev); | 460 | struct scsi_target *starget = transport_class_to_starget(cdev); |
456 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 461 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
457 | struct spi_internal *i = to_spi_internal(shost->transportt); | 462 | struct spi_internal *i = to_spi_internal(shost->transportt); |
458 | struct spi_transport_attrs *tp = | 463 | struct spi_transport_attrs *tp = |
459 | (struct spi_transport_attrs *)&starget->starget_data; | 464 | (struct spi_transport_attrs *)&starget->starget_data; |
460 | int period, retval; | 465 | int period, retval; |
461 | 466 | ||
462 | retval = store_spi_transport_period_helper(cdev, buf, count, &period); | 467 | retval = store_spi_transport_period_helper(cdev, buf, count, &period); |
463 | 468 | ||
464 | if (period < tp->min_period) | 469 | if (period < tp->min_period) |
465 | period = tp->min_period; | 470 | period = tp->min_period; |
466 | 471 | ||
467 | i->f->set_period(starget, period); | 472 | i->f->set_period(starget, period); |
468 | 473 | ||
469 | return retval; | 474 | return retval; |
470 | } | 475 | } |
471 | 476 | ||
472 | static CLASS_DEVICE_ATTR(period, S_IRUGO | S_IWUSR, | 477 | static CLASS_DEVICE_ATTR(period, S_IRUGO | S_IWUSR, |
473 | show_spi_transport_period, | 478 | show_spi_transport_period, |
474 | store_spi_transport_period); | 479 | store_spi_transport_period); |
475 | 480 | ||
476 | static ssize_t | 481 | static ssize_t |
477 | show_spi_transport_min_period(struct class_device *cdev, char *buf) | 482 | show_spi_transport_min_period(struct class_device *cdev, char *buf) |
478 | { | 483 | { |
479 | struct scsi_target *starget = transport_class_to_starget(cdev); | 484 | struct scsi_target *starget = transport_class_to_starget(cdev); |
480 | struct spi_transport_attrs *tp = | 485 | struct spi_transport_attrs *tp = |
481 | (struct spi_transport_attrs *)&starget->starget_data; | 486 | (struct spi_transport_attrs *)&starget->starget_data; |
482 | 487 | ||
483 | return show_spi_transport_period_helper(cdev, buf, tp->min_period); | 488 | return show_spi_transport_period_helper(cdev, buf, tp->min_period); |
484 | } | 489 | } |
485 | 490 | ||
486 | static ssize_t | 491 | static ssize_t |
487 | store_spi_transport_min_period(struct class_device *cdev, const char *buf, | 492 | store_spi_transport_min_period(struct class_device *cdev, const char *buf, |
488 | size_t count) | 493 | size_t count) |
489 | { | 494 | { |
490 | struct scsi_target *starget = transport_class_to_starget(cdev); | 495 | struct scsi_target *starget = transport_class_to_starget(cdev); |
491 | struct spi_transport_attrs *tp = | 496 | struct spi_transport_attrs *tp = |
492 | (struct spi_transport_attrs *)&starget->starget_data; | 497 | (struct spi_transport_attrs *)&starget->starget_data; |
493 | 498 | ||
494 | return store_spi_transport_period_helper(cdev, buf, count, | 499 | return store_spi_transport_period_helper(cdev, buf, count, |
495 | &tp->min_period); | 500 | &tp->min_period); |
496 | } | 501 | } |
497 | 502 | ||
498 | 503 | ||
499 | static CLASS_DEVICE_ATTR(min_period, S_IRUGO | S_IWUSR, | 504 | static CLASS_DEVICE_ATTR(min_period, S_IRUGO | S_IWUSR, |
500 | show_spi_transport_min_period, | 505 | show_spi_transport_min_period, |
501 | store_spi_transport_min_period); | 506 | store_spi_transport_min_period); |
502 | 507 | ||
503 | 508 | ||
504 | static ssize_t show_spi_host_signalling(struct class_device *cdev, char *buf) | 509 | static ssize_t show_spi_host_signalling(struct class_device *cdev, char *buf) |
505 | { | 510 | { |
506 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 511 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
507 | struct spi_internal *i = to_spi_internal(shost->transportt); | 512 | struct spi_internal *i = to_spi_internal(shost->transportt); |
508 | 513 | ||
509 | if (i->f->get_signalling) | 514 | if (i->f->get_signalling) |
510 | i->f->get_signalling(shost); | 515 | i->f->get_signalling(shost); |
511 | 516 | ||
512 | return sprintf(buf, "%s\n", spi_signal_to_string(spi_signalling(shost))); | 517 | return sprintf(buf, "%s\n", spi_signal_to_string(spi_signalling(shost))); |
513 | } | 518 | } |
514 | static ssize_t store_spi_host_signalling(struct class_device *cdev, | 519 | static ssize_t store_spi_host_signalling(struct class_device *cdev, |
515 | const char *buf, size_t count) | 520 | const char *buf, size_t count) |
516 | { | 521 | { |
517 | struct Scsi_Host *shost = transport_class_to_shost(cdev); | 522 | struct Scsi_Host *shost = transport_class_to_shost(cdev); |
518 | struct spi_internal *i = to_spi_internal(shost->transportt); | 523 | struct spi_internal *i = to_spi_internal(shost->transportt); |
519 | enum spi_signal_type type = spi_signal_to_value(buf); | 524 | enum spi_signal_type type = spi_signal_to_value(buf); |
520 | 525 | ||
521 | if (type != SPI_SIGNAL_UNKNOWN) | 526 | if (type != SPI_SIGNAL_UNKNOWN) |
522 | i->f->set_signalling(shost, type); | 527 | i->f->set_signalling(shost, type); |
523 | 528 | ||
524 | return count; | 529 | return count; |
525 | } | 530 | } |
526 | static CLASS_DEVICE_ATTR(signalling, S_IRUGO | S_IWUSR, | 531 | static CLASS_DEVICE_ATTR(signalling, S_IRUGO | S_IWUSR, |
527 | show_spi_host_signalling, | 532 | show_spi_host_signalling, |
528 | store_spi_host_signalling); | 533 | store_spi_host_signalling); |
529 | 534 | ||
530 | #define DV_SET(x, y) \ | 535 | #define DV_SET(x, y) \ |
531 | if(i->f->set_##x) \ | 536 | if(i->f->set_##x) \ |
532 | i->f->set_##x(sdev->sdev_target, y) | 537 | i->f->set_##x(sdev->sdev_target, y) |
533 | 538 | ||
534 | enum spi_compare_returns { | 539 | enum spi_compare_returns { |
535 | SPI_COMPARE_SUCCESS, | 540 | SPI_COMPARE_SUCCESS, |
536 | SPI_COMPARE_FAILURE, | 541 | SPI_COMPARE_FAILURE, |
537 | SPI_COMPARE_SKIP_TEST, | 542 | SPI_COMPARE_SKIP_TEST, |
538 | }; | 543 | }; |
539 | 544 | ||
540 | 545 | ||
541 | /* This is for read/write Domain Validation: If the device supports | 546 | /* This is for read/write Domain Validation: If the device supports |
542 | * an echo buffer, we do read/write tests to it */ | 547 | * an echo buffer, we do read/write tests to it */ |
543 | static enum spi_compare_returns | 548 | static enum spi_compare_returns |
544 | spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer, | 549 | spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer, |
545 | u8 *ptr, const int retries) | 550 | u8 *ptr, const int retries) |
546 | { | 551 | { |
547 | struct scsi_device *sdev = sreq->sr_device; | 552 | struct scsi_device *sdev = sreq->sr_device; |
548 | int len = ptr - buffer; | 553 | int len = ptr - buffer; |
549 | int j, k, r; | 554 | int j, k, r; |
550 | unsigned int pattern = 0x0000ffff; | 555 | unsigned int pattern = 0x0000ffff; |
551 | 556 | ||
552 | const char spi_write_buffer[] = { | 557 | const char spi_write_buffer[] = { |
553 | WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 | 558 | WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 |
554 | }; | 559 | }; |
555 | const char spi_read_buffer[] = { | 560 | const char spi_read_buffer[] = { |
556 | READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 | 561 | READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 |
557 | }; | 562 | }; |
558 | 563 | ||
559 | /* set up the pattern buffer. Doesn't matter if we spill | 564 | /* set up the pattern buffer. Doesn't matter if we spill |
560 | * slightly beyond since that's where the read buffer is */ | 565 | * slightly beyond since that's where the read buffer is */ |
561 | for (j = 0; j < len; ) { | 566 | for (j = 0; j < len; ) { |
562 | 567 | ||
563 | /* fill the buffer with counting (test a) */ | 568 | /* fill the buffer with counting (test a) */ |
564 | for ( ; j < min(len, 32); j++) | 569 | for ( ; j < min(len, 32); j++) |
565 | buffer[j] = j; | 570 | buffer[j] = j; |
566 | k = j; | 571 | k = j; |
567 | /* fill the buffer with alternating words of 0x0 and | 572 | /* fill the buffer with alternating words of 0x0 and |
568 | * 0xffff (test b) */ | 573 | * 0xffff (test b) */ |
569 | for ( ; j < min(len, k + 32); j += 2) { | 574 | for ( ; j < min(len, k + 32); j += 2) { |
570 | u16 *word = (u16 *)&buffer[j]; | 575 | u16 *word = (u16 *)&buffer[j]; |
571 | 576 | ||
572 | *word = (j & 0x02) ? 0x0000 : 0xffff; | 577 | *word = (j & 0x02) ? 0x0000 : 0xffff; |
573 | } | 578 | } |
574 | k = j; | 579 | k = j; |
575 | /* fill with crosstalk (alternating 0x5555 0xaaa) | 580 | /* fill with crosstalk (alternating 0x5555 0xaaa) |
576 | * (test c) */ | 581 | * (test c) */ |
577 | for ( ; j < min(len, k + 32); j += 2) { | 582 | for ( ; j < min(len, k + 32); j += 2) { |
578 | u16 *word = (u16 *)&buffer[j]; | 583 | u16 *word = (u16 *)&buffer[j]; |
579 | 584 | ||
580 | *word = (j & 0x02) ? 0x5555 : 0xaaaa; | 585 | *word = (j & 0x02) ? 0x5555 : 0xaaaa; |
581 | } | 586 | } |
582 | k = j; | 587 | k = j; |
583 | /* fill with shifting bits (test d) */ | 588 | /* fill with shifting bits (test d) */ |
584 | for ( ; j < min(len, k + 32); j += 4) { | 589 | for ( ; j < min(len, k + 32); j += 4) { |
585 | u32 *word = (unsigned int *)&buffer[j]; | 590 | u32 *word = (unsigned int *)&buffer[j]; |
586 | u32 roll = (pattern & 0x80000000) ? 1 : 0; | 591 | u32 roll = (pattern & 0x80000000) ? 1 : 0; |
587 | 592 | ||
588 | *word = pattern; | 593 | *word = pattern; |
589 | pattern = (pattern << 1) | roll; | 594 | pattern = (pattern << 1) | roll; |
590 | } | 595 | } |
591 | /* don't bother with random data (test e) */ | 596 | /* don't bother with random data (test e) */ |
592 | } | 597 | } |
593 | 598 | ||
594 | for (r = 0; r < retries; r++) { | 599 | for (r = 0; r < retries; r++) { |
595 | sreq->sr_cmd_len = 0; /* wait_req to fill in */ | 600 | sreq->sr_cmd_len = 0; /* wait_req to fill in */ |
596 | sreq->sr_data_direction = DMA_TO_DEVICE; | 601 | sreq->sr_data_direction = DMA_TO_DEVICE; |
597 | spi_wait_req(sreq, spi_write_buffer, buffer, len); | 602 | spi_wait_req(sreq, spi_write_buffer, buffer, len); |
598 | if(sreq->sr_result || !scsi_device_online(sdev)) { | 603 | if(sreq->sr_result || !scsi_device_online(sdev)) { |
599 | struct scsi_sense_hdr sshdr; | 604 | struct scsi_sense_hdr sshdr; |
600 | 605 | ||
601 | scsi_device_set_state(sdev, SDEV_QUIESCE); | 606 | scsi_device_set_state(sdev, SDEV_QUIESCE); |
602 | if (scsi_request_normalize_sense(sreq, &sshdr) | 607 | if (scsi_request_normalize_sense(sreq, &sshdr) |
603 | && sshdr.sense_key == ILLEGAL_REQUEST | 608 | && sshdr.sense_key == ILLEGAL_REQUEST |
604 | /* INVALID FIELD IN CDB */ | 609 | /* INVALID FIELD IN CDB */ |
605 | && sshdr.asc == 0x24 && sshdr.ascq == 0x00) | 610 | && sshdr.asc == 0x24 && sshdr.ascq == 0x00) |
606 | /* This would mean that the drive lied | 611 | /* This would mean that the drive lied |
607 | * to us about supporting an echo | 612 | * to us about supporting an echo |
608 | * buffer (unfortunately some Western | 613 | * buffer (unfortunately some Western |
609 | * Digital drives do precisely this) | 614 | * Digital drives do precisely this) |
610 | */ | 615 | */ |
611 | return SPI_COMPARE_SKIP_TEST; | 616 | return SPI_COMPARE_SKIP_TEST; |
612 | 617 | ||
613 | 618 | ||
614 | SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Write Buffer failure %x\n", sreq->sr_result); | 619 | SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Write Buffer failure %x\n", sreq->sr_result); |
615 | return SPI_COMPARE_FAILURE; | 620 | return SPI_COMPARE_FAILURE; |
616 | } | 621 | } |
617 | 622 | ||
618 | memset(ptr, 0, len); | 623 | memset(ptr, 0, len); |
619 | sreq->sr_cmd_len = 0; /* wait_req to fill in */ | 624 | sreq->sr_cmd_len = 0; /* wait_req to fill in */ |
620 | sreq->sr_data_direction = DMA_FROM_DEVICE; | 625 | sreq->sr_data_direction = DMA_FROM_DEVICE; |
621 | spi_wait_req(sreq, spi_read_buffer, ptr, len); | 626 | spi_wait_req(sreq, spi_read_buffer, ptr, len); |
622 | scsi_device_set_state(sdev, SDEV_QUIESCE); | 627 | scsi_device_set_state(sdev, SDEV_QUIESCE); |
623 | 628 | ||
624 | if (memcmp(buffer, ptr, len) != 0) | 629 | if (memcmp(buffer, ptr, len) != 0) |
625 | return SPI_COMPARE_FAILURE; | 630 | return SPI_COMPARE_FAILURE; |
626 | } | 631 | } |
627 | return SPI_COMPARE_SUCCESS; | 632 | return SPI_COMPARE_SUCCESS; |
628 | } | 633 | } |
629 | 634 | ||
630 | /* This is for the simplest form of Domain Validation: a read test | 635 | /* This is for the simplest form of Domain Validation: a read test |
631 | * on the inquiry data from the device */ | 636 | * on the inquiry data from the device */ |
632 | static enum spi_compare_returns | 637 | static enum spi_compare_returns |
633 | spi_dv_device_compare_inquiry(struct scsi_request *sreq, u8 *buffer, | 638 | spi_dv_device_compare_inquiry(struct scsi_request *sreq, u8 *buffer, |
634 | u8 *ptr, const int retries) | 639 | u8 *ptr, const int retries) |
635 | { | 640 | { |
636 | int r; | 641 | int r; |
637 | const int len = sreq->sr_device->inquiry_len; | 642 | const int len = sreq->sr_device->inquiry_len; |
638 | struct scsi_device *sdev = sreq->sr_device; | 643 | struct scsi_device *sdev = sreq->sr_device; |
639 | const char spi_inquiry[] = { | 644 | const char spi_inquiry[] = { |
640 | INQUIRY, 0, 0, 0, len, 0 | 645 | INQUIRY, 0, 0, 0, len, 0 |
641 | }; | 646 | }; |
642 | 647 | ||
643 | for (r = 0; r < retries; r++) { | 648 | for (r = 0; r < retries; r++) { |
644 | sreq->sr_cmd_len = 0; /* wait_req to fill in */ | 649 | sreq->sr_cmd_len = 0; /* wait_req to fill in */ |
645 | sreq->sr_data_direction = DMA_FROM_DEVICE; | 650 | sreq->sr_data_direction = DMA_FROM_DEVICE; |
646 | 651 | ||
647 | memset(ptr, 0, len); | 652 | memset(ptr, 0, len); |
648 | 653 | ||
649 | spi_wait_req(sreq, spi_inquiry, ptr, len); | 654 | spi_wait_req(sreq, spi_inquiry, ptr, len); |
650 | 655 | ||
651 | if(sreq->sr_result || !scsi_device_online(sdev)) { | 656 | if(sreq->sr_result || !scsi_device_online(sdev)) { |
652 | scsi_device_set_state(sdev, SDEV_QUIESCE); | 657 | scsi_device_set_state(sdev, SDEV_QUIESCE); |
653 | return SPI_COMPARE_FAILURE; | 658 | return SPI_COMPARE_FAILURE; |
654 | } | 659 | } |
655 | 660 | ||
656 | /* If we don't have the inquiry data already, the | 661 | /* If we don't have the inquiry data already, the |
657 | * first read gets it */ | 662 | * first read gets it */ |
658 | if (ptr == buffer) { | 663 | if (ptr == buffer) { |
659 | ptr += len; | 664 | ptr += len; |
660 | --r; | 665 | --r; |
661 | continue; | 666 | continue; |
662 | } | 667 | } |
663 | 668 | ||
664 | if (memcmp(buffer, ptr, len) != 0) | 669 | if (memcmp(buffer, ptr, len) != 0) |
665 | /* failure */ | 670 | /* failure */ |
666 | return SPI_COMPARE_FAILURE; | 671 | return SPI_COMPARE_FAILURE; |
667 | } | 672 | } |
668 | return SPI_COMPARE_SUCCESS; | 673 | return SPI_COMPARE_SUCCESS; |
669 | } | 674 | } |
670 | 675 | ||
671 | static enum spi_compare_returns | 676 | static enum spi_compare_returns |
672 | spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr, | 677 | spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr, |
673 | enum spi_compare_returns | 678 | enum spi_compare_returns |
674 | (*compare_fn)(struct scsi_request *, u8 *, u8 *, int)) | 679 | (*compare_fn)(struct scsi_request *, u8 *, u8 *, int)) |
675 | { | 680 | { |
676 | struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); | 681 | struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); |
677 | struct scsi_device *sdev = sreq->sr_device; | 682 | struct scsi_device *sdev = sreq->sr_device; |
678 | struct scsi_target *starget = sdev->sdev_target; | 683 | struct scsi_target *starget = sdev->sdev_target; |
679 | int period = 0, prevperiod = 0; | 684 | int period = 0, prevperiod = 0; |
680 | enum spi_compare_returns retval; | 685 | enum spi_compare_returns retval; |
681 | 686 | ||
682 | 687 | ||
683 | for (;;) { | 688 | for (;;) { |
684 | int newperiod; | 689 | int newperiod; |
685 | retval = compare_fn(sreq, buffer, ptr, DV_LOOPS); | 690 | retval = compare_fn(sreq, buffer, ptr, DV_LOOPS); |
686 | 691 | ||
687 | if (retval == SPI_COMPARE_SUCCESS | 692 | if (retval == SPI_COMPARE_SUCCESS |
688 | || retval == SPI_COMPARE_SKIP_TEST) | 693 | || retval == SPI_COMPARE_SKIP_TEST) |
689 | break; | 694 | break; |
690 | 695 | ||
691 | /* OK, retrain, fallback */ | 696 | /* OK, retrain, fallback */ |
692 | if (i->f->get_iu) | 697 | if (i->f->get_iu) |
693 | i->f->get_iu(starget); | 698 | i->f->get_iu(starget); |
694 | if (i->f->get_qas) | 699 | if (i->f->get_qas) |
695 | i->f->get_qas(starget); | 700 | i->f->get_qas(starget); |
696 | if (i->f->get_period) | 701 | if (i->f->get_period) |
697 | i->f->get_period(sdev->sdev_target); | 702 | i->f->get_period(sdev->sdev_target); |
698 | 703 | ||
699 | /* Here's the fallback sequence; first try turning off | 704 | /* Here's the fallback sequence; first try turning off |
700 | * IU, then QAS (if we can control them), then finally | 705 | * IU, then QAS (if we can control them), then finally |
701 | * fall down the periods */ | 706 | * fall down the periods */ |
702 | if (i->f->set_iu && spi_iu(starget)) { | 707 | if (i->f->set_iu && spi_iu(starget)) { |
703 | SPI_PRINTK(starget, KERN_ERR, "Domain Validation Disabing Information Units\n"); | 708 | SPI_PRINTK(starget, KERN_ERR, "Domain Validation Disabing Information Units\n"); |
704 | DV_SET(iu, 0); | 709 | DV_SET(iu, 0); |
705 | } else if (i->f->set_qas && spi_qas(starget)) { | 710 | } else if (i->f->set_qas && spi_qas(starget)) { |
706 | SPI_PRINTK(starget, KERN_ERR, "Domain Validation Disabing Quick Arbitration and Selection\n"); | 711 | SPI_PRINTK(starget, KERN_ERR, "Domain Validation Disabing Quick Arbitration and Selection\n"); |
707 | DV_SET(qas, 0); | 712 | DV_SET(qas, 0); |
708 | } else { | 713 | } else { |
709 | newperiod = spi_period(starget); | 714 | newperiod = spi_period(starget); |
710 | period = newperiod > period ? newperiod : period; | 715 | period = newperiod > period ? newperiod : period; |
711 | if (period < 0x0d) | 716 | if (period < 0x0d) |
712 | period++; | 717 | period++; |
713 | else | 718 | else |
714 | period += period >> 1; | 719 | period += period >> 1; |
715 | 720 | ||
716 | if (unlikely(period > 0xff || period == prevperiod)) { | 721 | if (unlikely(period > 0xff || period == prevperiod)) { |
717 | /* Total failure; set to async and return */ | 722 | /* Total failure; set to async and return */ |
718 | SPI_PRINTK(starget, KERN_ERR, "Domain Validation Failure, dropping back to Asynchronous\n"); | 723 | SPI_PRINTK(starget, KERN_ERR, "Domain Validation Failure, dropping back to Asynchronous\n"); |
719 | DV_SET(offset, 0); | 724 | DV_SET(offset, 0); |
720 | return SPI_COMPARE_FAILURE; | 725 | return SPI_COMPARE_FAILURE; |
721 | } | 726 | } |
722 | SPI_PRINTK(starget, KERN_ERR, "Domain Validation detected failure, dropping back\n"); | 727 | SPI_PRINTK(starget, KERN_ERR, "Domain Validation detected failure, dropping back\n"); |
723 | DV_SET(period, period); | 728 | DV_SET(period, period); |
724 | prevperiod = period; | 729 | prevperiod = period; |
725 | } | 730 | } |
726 | } | 731 | } |
727 | return retval; | 732 | return retval; |
728 | } | 733 | } |
729 | 734 | ||
730 | static int | 735 | static int |
731 | spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer) | 736 | spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer) |
732 | { | 737 | { |
733 | int l; | 738 | int l; |
734 | 739 | ||
735 | /* first off do a test unit ready. This can error out | 740 | /* first off do a test unit ready. This can error out |
736 | * because of reservations or some other reason. If it | 741 | * because of reservations or some other reason. If it |
737 | * fails, the device won't let us write to the echo buffer | 742 | * fails, the device won't let us write to the echo buffer |
738 | * so just return failure */ | 743 | * so just return failure */ |
739 | 744 | ||
740 | const char spi_test_unit_ready[] = { | 745 | const char spi_test_unit_ready[] = { |
741 | TEST_UNIT_READY, 0, 0, 0, 0, 0 | 746 | TEST_UNIT_READY, 0, 0, 0, 0, 0 |
742 | }; | 747 | }; |
743 | 748 | ||
744 | const char spi_read_buffer_descriptor[] = { | 749 | const char spi_read_buffer_descriptor[] = { |
745 | READ_BUFFER, 0x0b, 0, 0, 0, 0, 0, 0, 4, 0 | 750 | READ_BUFFER, 0x0b, 0, 0, 0, 0, 0, 0, 4, 0 |
746 | }; | 751 | }; |
747 | 752 | ||
748 | 753 | ||
749 | sreq->sr_cmd_len = 0; | 754 | sreq->sr_cmd_len = 0; |
750 | sreq->sr_data_direction = DMA_NONE; | 755 | sreq->sr_data_direction = DMA_NONE; |
751 | 756 | ||
752 | /* We send a set of three TURs to clear any outstanding | 757 | /* We send a set of three TURs to clear any outstanding |
753 | * unit attention conditions if they exist (Otherwise the | 758 | * unit attention conditions if they exist (Otherwise the |
754 | * buffer tests won't be happy). If the TUR still fails | 759 | * buffer tests won't be happy). If the TUR still fails |
755 | * (reservation conflict, device not ready, etc) just | 760 | * (reservation conflict, device not ready, etc) just |
756 | * skip the write tests */ | 761 | * skip the write tests */ |
757 | for (l = 0; ; l++) { | 762 | for (l = 0; ; l++) { |
758 | spi_wait_req(sreq, spi_test_unit_ready, NULL, 0); | 763 | spi_wait_req(sreq, spi_test_unit_ready, NULL, 0); |
759 | 764 | ||
760 | if(sreq->sr_result) { | 765 | if(sreq->sr_result) { |
761 | if(l >= 3) | 766 | if(l >= 3) |
762 | return 0; | 767 | return 0; |
763 | } else { | 768 | } else { |
764 | /* TUR succeeded */ | 769 | /* TUR succeeded */ |
765 | break; | 770 | break; |
766 | } | 771 | } |
767 | } | 772 | } |
768 | 773 | ||
769 | sreq->sr_cmd_len = 0; | 774 | sreq->sr_cmd_len = 0; |
770 | sreq->sr_data_direction = DMA_FROM_DEVICE; | 775 | sreq->sr_data_direction = DMA_FROM_DEVICE; |
771 | 776 | ||
772 | spi_wait_req(sreq, spi_read_buffer_descriptor, buffer, 4); | 777 | spi_wait_req(sreq, spi_read_buffer_descriptor, buffer, 4); |
773 | 778 | ||
774 | if (sreq->sr_result) | 779 | if (sreq->sr_result) |
775 | /* Device has no echo buffer */ | 780 | /* Device has no echo buffer */ |
776 | return 0; | 781 | return 0; |
777 | 782 | ||
778 | return buffer[3] + ((buffer[2] & 0x1f) << 8); | 783 | return buffer[3] + ((buffer[2] & 0x1f) << 8); |
779 | } | 784 | } |
780 | 785 | ||
781 | static void | 786 | static void |
782 | spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer) | 787 | spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer) |
783 | { | 788 | { |
784 | struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); | 789 | struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); |
785 | struct scsi_device *sdev = sreq->sr_device; | 790 | struct scsi_device *sdev = sreq->sr_device; |
786 | struct scsi_target *starget = sdev->sdev_target; | 791 | struct scsi_target *starget = sdev->sdev_target; |
787 | int len = sdev->inquiry_len; | 792 | int len = sdev->inquiry_len; |
788 | /* first set us up for narrow async */ | 793 | /* first set us up for narrow async */ |
789 | DV_SET(offset, 0); | 794 | DV_SET(offset, 0); |
790 | DV_SET(width, 0); | 795 | DV_SET(width, 0); |
791 | 796 | ||
792 | if (spi_dv_device_compare_inquiry(sreq, buffer, buffer, DV_LOOPS) | 797 | if (spi_dv_device_compare_inquiry(sreq, buffer, buffer, DV_LOOPS) |
793 | != SPI_COMPARE_SUCCESS) { | 798 | != SPI_COMPARE_SUCCESS) { |
794 | SPI_PRINTK(starget, KERN_ERR, "Domain Validation Initial Inquiry Failed\n"); | 799 | SPI_PRINTK(starget, KERN_ERR, "Domain Validation Initial Inquiry Failed\n"); |
795 | /* FIXME: should probably offline the device here? */ | 800 | /* FIXME: should probably offline the device here? */ |
796 | return; | 801 | return; |
797 | } | 802 | } |
798 | 803 | ||
799 | /* test width */ | 804 | /* test width */ |
800 | if (i->f->set_width && spi_max_width(starget) && | 805 | if (i->f->set_width && spi_max_width(starget) && |
801 | scsi_device_wide(sdev)) { | 806 | scsi_device_wide(sdev)) { |
802 | i->f->set_width(starget, 1); | 807 | i->f->set_width(starget, 1); |
803 | 808 | ||
804 | if (spi_dv_device_compare_inquiry(sreq, buffer, | 809 | if (spi_dv_device_compare_inquiry(sreq, buffer, |
805 | buffer + len, | 810 | buffer + len, |
806 | DV_LOOPS) | 811 | DV_LOOPS) |
807 | != SPI_COMPARE_SUCCESS) { | 812 | != SPI_COMPARE_SUCCESS) { |
808 | SPI_PRINTK(starget, KERN_ERR, "Wide Transfers Fail\n"); | 813 | SPI_PRINTK(starget, KERN_ERR, "Wide Transfers Fail\n"); |
809 | i->f->set_width(starget, 0); | 814 | i->f->set_width(starget, 0); |
810 | } | 815 | } |
811 | } | 816 | } |
812 | 817 | ||
813 | if (!i->f->set_period) | 818 | if (!i->f->set_period) |
814 | return; | 819 | return; |
815 | 820 | ||
816 | /* device can't handle synchronous */ | 821 | /* device can't handle synchronous */ |
817 | if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev)) | 822 | if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev)) |
818 | return; | 823 | return; |
819 | 824 | ||
820 | /* see if the device has an echo buffer. If it does we can | 825 | /* see if the device has an echo buffer. If it does we can |
821 | * do the SPI pattern write tests */ | 826 | * do the SPI pattern write tests */ |
822 | 827 | ||
823 | len = 0; | 828 | len = 0; |
824 | if (scsi_device_dt(sdev)) | 829 | if (scsi_device_dt(sdev)) |
825 | len = spi_dv_device_get_echo_buffer(sreq, buffer); | 830 | len = spi_dv_device_get_echo_buffer(sreq, buffer); |
826 | 831 | ||
827 | retry: | 832 | retry: |
828 | 833 | ||
829 | /* now set up to the maximum */ | 834 | /* now set up to the maximum */ |
830 | DV_SET(offset, spi_max_offset(starget)); | 835 | DV_SET(offset, spi_max_offset(starget)); |
831 | DV_SET(period, spi_min_period(starget)); | 836 | DV_SET(period, spi_min_period(starget)); |
832 | /* try QAS requests; this should be harmless to set if the | 837 | /* try QAS requests; this should be harmless to set if the |
833 | * target supports it */ | 838 | * target supports it */ |
834 | if (scsi_device_qas(sdev)) | 839 | if (scsi_device_qas(sdev)) |
835 | DV_SET(qas, 1); | 840 | DV_SET(qas, 1); |
836 | /* Also try IU transfers */ | 841 | /* Also try IU transfers */ |
837 | if (scsi_device_ius(sdev)) | 842 | if (scsi_device_ius(sdev)) |
838 | DV_SET(iu, 1); | 843 | DV_SET(iu, 1); |
839 | if (spi_min_period(starget) < 9) { | 844 | if (spi_min_period(starget) < 9) { |
840 | /* This u320 (or u640). Ignore the coupled parameters | 845 | /* This u320 (or u640). Ignore the coupled parameters |
841 | * like DT and IU, but set the optional ones */ | 846 | * like DT and IU, but set the optional ones */ |
842 | DV_SET(rd_strm, 1); | 847 | DV_SET(rd_strm, 1); |
843 | DV_SET(wr_flow, 1); | 848 | DV_SET(wr_flow, 1); |
844 | DV_SET(rti, 1); | 849 | DV_SET(rti, 1); |
845 | if (spi_min_period(starget) == 8) | 850 | if (spi_min_period(starget) == 8) |
846 | DV_SET(pcomp_en, 1); | 851 | DV_SET(pcomp_en, 1); |
847 | } | 852 | } |
848 | 853 | ||
849 | if (len == 0) { | 854 | if (len == 0) { |
850 | SPI_PRINTK(starget, KERN_INFO, "Domain Validation skipping write tests\n"); | 855 | SPI_PRINTK(starget, KERN_INFO, "Domain Validation skipping write tests\n"); |
851 | spi_dv_retrain(sreq, buffer, buffer + len, | 856 | spi_dv_retrain(sreq, buffer, buffer + len, |
852 | spi_dv_device_compare_inquiry); | 857 | spi_dv_device_compare_inquiry); |
853 | return; | 858 | return; |
854 | } | 859 | } |
855 | 860 | ||
856 | if (len > SPI_MAX_ECHO_BUFFER_SIZE) { | 861 | if (len > SPI_MAX_ECHO_BUFFER_SIZE) { |
857 | SPI_PRINTK(starget, KERN_WARNING, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE); | 862 | SPI_PRINTK(starget, KERN_WARNING, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE); |
858 | len = SPI_MAX_ECHO_BUFFER_SIZE; | 863 | len = SPI_MAX_ECHO_BUFFER_SIZE; |
859 | } | 864 | } |
860 | 865 | ||
861 | if (spi_dv_retrain(sreq, buffer, buffer + len, | 866 | if (spi_dv_retrain(sreq, buffer, buffer + len, |
862 | spi_dv_device_echo_buffer) | 867 | spi_dv_device_echo_buffer) |
863 | == SPI_COMPARE_SKIP_TEST) { | 868 | == SPI_COMPARE_SKIP_TEST) { |
864 | /* OK, the stupid drive can't do a write echo buffer | 869 | /* OK, the stupid drive can't do a write echo buffer |
865 | * test after all, fall back to the read tests */ | 870 | * test after all, fall back to the read tests */ |
866 | len = 0; | 871 | len = 0; |
867 | goto retry; | 872 | goto retry; |
868 | } | 873 | } |
869 | } | 874 | } |
870 | 875 | ||
871 | 876 | ||
872 | /** spi_dv_device - Do Domain Validation on the device | 877 | /** spi_dv_device - Do Domain Validation on the device |
873 | * @sdev: scsi device to validate | 878 | * @sdev: scsi device to validate |
874 | * | 879 | * |
875 | * Performs the domain validation on the given device in the | 880 | * Performs the domain validation on the given device in the |
876 | * current execution thread. Since DV operations may sleep, | 881 | * current execution thread. Since DV operations may sleep, |
877 | * the current thread must have user context. Also no SCSI | 882 | * the current thread must have user context. Also no SCSI |
878 | * related locks that would deadlock I/O issued by the DV may | 883 | * related locks that would deadlock I/O issued by the DV may |
879 | * be held. | 884 | * be held. |
880 | */ | 885 | */ |
881 | void | 886 | void |
882 | spi_dv_device(struct scsi_device *sdev) | 887 | spi_dv_device(struct scsi_device *sdev) |
883 | { | 888 | { |
884 | struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL); | 889 | struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL); |
885 | struct scsi_target *starget = sdev->sdev_target; | 890 | struct scsi_target *starget = sdev->sdev_target; |
886 | u8 *buffer; | 891 | u8 *buffer; |
887 | const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; | 892 | const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; |
888 | 893 | ||
889 | if (unlikely(!sreq)) | 894 | if (unlikely(!sreq)) |
890 | return; | 895 | return; |
891 | 896 | ||
892 | if (unlikely(scsi_device_get(sdev))) | 897 | if (unlikely(scsi_device_get(sdev))) |
893 | goto out_free_req; | 898 | goto out_free_req; |
894 | 899 | ||
895 | buffer = kmalloc(len, GFP_KERNEL); | 900 | buffer = kmalloc(len, GFP_KERNEL); |
896 | 901 | ||
897 | if (unlikely(!buffer)) | 902 | if (unlikely(!buffer)) |
898 | goto out_put; | 903 | goto out_put; |
899 | 904 | ||
900 | memset(buffer, 0, len); | 905 | memset(buffer, 0, len); |
901 | 906 | ||
902 | /* We need to verify that the actual device will quiesce; the | 907 | /* We need to verify that the actual device will quiesce; the |
903 | * later target quiesce is just a nice to have */ | 908 | * later target quiesce is just a nice to have */ |
904 | if (unlikely(scsi_device_quiesce(sdev))) | 909 | if (unlikely(scsi_device_quiesce(sdev))) |
905 | goto out_free; | 910 | goto out_free; |
906 | 911 | ||
907 | scsi_target_quiesce(starget); | 912 | scsi_target_quiesce(starget); |
908 | 913 | ||
909 | spi_dv_pending(starget) = 1; | 914 | spi_dv_pending(starget) = 1; |
910 | down(&spi_dv_sem(starget)); | 915 | down(&spi_dv_sem(starget)); |
911 | 916 | ||
912 | SPI_PRINTK(starget, KERN_INFO, "Beginning Domain Validation\n"); | 917 | SPI_PRINTK(starget, KERN_INFO, "Beginning Domain Validation\n"); |
913 | 918 | ||
914 | spi_dv_device_internal(sreq, buffer); | 919 | spi_dv_device_internal(sreq, buffer); |
915 | 920 | ||
916 | SPI_PRINTK(starget, KERN_INFO, "Ending Domain Validation\n"); | 921 | SPI_PRINTK(starget, KERN_INFO, "Ending Domain Validation\n"); |
917 | 922 | ||
918 | up(&spi_dv_sem(starget)); | 923 | up(&spi_dv_sem(starget)); |
919 | spi_dv_pending(starget) = 0; | 924 | spi_dv_pending(starget) = 0; |
920 | 925 | ||
921 | scsi_target_resume(starget); | 926 | scsi_target_resume(starget); |
922 | 927 | ||
923 | spi_initial_dv(starget) = 1; | 928 | spi_initial_dv(starget) = 1; |
924 | 929 | ||
925 | out_free: | 930 | out_free: |
926 | kfree(buffer); | 931 | kfree(buffer); |
927 | out_put: | 932 | out_put: |
928 | scsi_device_put(sdev); | 933 | scsi_device_put(sdev); |
929 | out_free_req: | 934 | out_free_req: |
930 | scsi_release_request(sreq); | 935 | scsi_release_request(sreq); |
931 | } | 936 | } |
932 | EXPORT_SYMBOL(spi_dv_device); | 937 | EXPORT_SYMBOL(spi_dv_device); |
933 | 938 | ||
934 | struct work_queue_wrapper { | 939 | struct work_queue_wrapper { |
935 | struct work_struct work; | 940 | struct work_struct work; |
936 | struct scsi_device *sdev; | 941 | struct scsi_device *sdev; |
937 | }; | 942 | }; |
938 | 943 | ||
939 | static void | 944 | static void |
940 | spi_dv_device_work_wrapper(void *data) | 945 | spi_dv_device_work_wrapper(void *data) |
941 | { | 946 | { |
942 | struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; | 947 | struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; |
943 | struct scsi_device *sdev = wqw->sdev; | 948 | struct scsi_device *sdev = wqw->sdev; |
944 | 949 | ||
945 | kfree(wqw); | 950 | kfree(wqw); |
946 | spi_dv_device(sdev); | 951 | spi_dv_device(sdev); |
947 | spi_dv_pending(sdev->sdev_target) = 0; | 952 | spi_dv_pending(sdev->sdev_target) = 0; |
948 | scsi_device_put(sdev); | 953 | scsi_device_put(sdev); |
949 | } | 954 | } |
950 | 955 | ||
951 | 956 | ||
952 | /** | 957 | /** |
953 | * spi_schedule_dv_device - schedule domain validation to occur on the device | 958 | * spi_schedule_dv_device - schedule domain validation to occur on the device |
954 | * @sdev: The device to validate | 959 | * @sdev: The device to validate |
955 | * | 960 | * |
956 | * Identical to spi_dv_device() above, except that the DV will be | 961 | * Identical to spi_dv_device() above, except that the DV will be |
957 | * scheduled to occur in a workqueue later. All memory allocations | 962 | * scheduled to occur in a workqueue later. All memory allocations |
958 | * are atomic, so may be called from any context including those holding | 963 | * are atomic, so may be called from any context including those holding |
959 | * SCSI locks. | 964 | * SCSI locks. |
960 | */ | 965 | */ |
961 | void | 966 | void |
962 | spi_schedule_dv_device(struct scsi_device *sdev) | 967 | spi_schedule_dv_device(struct scsi_device *sdev) |
963 | { | 968 | { |
964 | struct work_queue_wrapper *wqw = | 969 | struct work_queue_wrapper *wqw = |
965 | kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC); | 970 | kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC); |
966 | 971 | ||
967 | if (unlikely(!wqw)) | 972 | if (unlikely(!wqw)) |
968 | return; | 973 | return; |
969 | 974 | ||
970 | if (unlikely(spi_dv_pending(sdev->sdev_target))) { | 975 | if (unlikely(spi_dv_pending(sdev->sdev_target))) { |
971 | kfree(wqw); | 976 | kfree(wqw); |
972 | return; | 977 | return; |
973 | } | 978 | } |
974 | /* Set pending early (dv_device doesn't check it, only sets it) */ | 979 | /* Set pending early (dv_device doesn't check it, only sets it) */ |
975 | spi_dv_pending(sdev->sdev_target) = 1; | 980 | spi_dv_pending(sdev->sdev_target) = 1; |
976 | if (unlikely(scsi_device_get(sdev))) { | 981 | if (unlikely(scsi_device_get(sdev))) { |
977 | kfree(wqw); | 982 | kfree(wqw); |
978 | spi_dv_pending(sdev->sdev_target) = 0; | 983 | spi_dv_pending(sdev->sdev_target) = 0; |
979 | return; | 984 | return; |
980 | } | 985 | } |
981 | 986 | ||
982 | INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw); | 987 | INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw); |
983 | wqw->sdev = sdev; | 988 | wqw->sdev = sdev; |
984 | 989 | ||
985 | schedule_work(&wqw->work); | 990 | schedule_work(&wqw->work); |
986 | } | 991 | } |
987 | EXPORT_SYMBOL(spi_schedule_dv_device); | 992 | EXPORT_SYMBOL(spi_schedule_dv_device); |
988 | 993 | ||
989 | /** | 994 | /** |
990 | * spi_display_xfer_agreement - Print the current target transfer agreement | 995 | * spi_display_xfer_agreement - Print the current target transfer agreement |
991 | * @starget: The target for which to display the agreement | 996 | * @starget: The target for which to display the agreement |
992 | * | 997 | * |
993 | * Each SPI port is required to maintain a transfer agreement for each | 998 | * Each SPI port is required to maintain a transfer agreement for each |
994 | * other port on the bus. This function prints a one-line summary of | 999 | * other port on the bus. This function prints a one-line summary of |
995 | * the current agreement; more detailed information is available in sysfs. | 1000 | * the current agreement; more detailed information is available in sysfs. |
996 | */ | 1001 | */ |
997 | void spi_display_xfer_agreement(struct scsi_target *starget) | 1002 | void spi_display_xfer_agreement(struct scsi_target *starget) |
998 | { | 1003 | { |
999 | struct spi_transport_attrs *tp; | 1004 | struct spi_transport_attrs *tp; |
1000 | tp = (struct spi_transport_attrs *)&starget->starget_data; | 1005 | tp = (struct spi_transport_attrs *)&starget->starget_data; |
1001 | 1006 | ||
1002 | if (tp->offset > 0 && tp->period > 0) { | 1007 | if (tp->offset > 0 && tp->period > 0) { |
1003 | unsigned int picosec, kb100; | 1008 | unsigned int picosec, kb100; |
1004 | char *scsi = "FAST-?"; | 1009 | char *scsi = "FAST-?"; |
1005 | char tmp[8]; | 1010 | char tmp[8]; |
1006 | 1011 | ||
1007 | if (tp->period <= SPI_STATIC_PPR) { | 1012 | if (tp->period <= SPI_STATIC_PPR) { |
1008 | picosec = ppr_to_ps[tp->period]; | 1013 | picosec = ppr_to_ps[tp->period]; |
1009 | switch (tp->period) { | 1014 | switch (tp->period) { |
1010 | case 7: scsi = "FAST-320"; break; | 1015 | case 7: scsi = "FAST-320"; break; |
1011 | case 8: scsi = "FAST-160"; break; | 1016 | case 8: scsi = "FAST-160"; break; |
1012 | case 9: scsi = "FAST-80"; break; | 1017 | case 9: scsi = "FAST-80"; break; |
1013 | case 10: | 1018 | case 10: |
1014 | case 11: scsi = "FAST-40"; break; | 1019 | case 11: scsi = "FAST-40"; break; |
1015 | case 12: scsi = "FAST-20"; break; | 1020 | case 12: scsi = "FAST-20"; break; |
1016 | } | 1021 | } |
1017 | } else { | 1022 | } else { |
1018 | picosec = tp->period * 4000; | 1023 | picosec = tp->period * 4000; |
1019 | if (tp->period < 25) | 1024 | if (tp->period < 25) |
1020 | scsi = "FAST-20"; | 1025 | scsi = "FAST-20"; |
1021 | else if (tp->period < 50) | 1026 | else if (tp->period < 50) |
1022 | scsi = "FAST-10"; | 1027 | scsi = "FAST-10"; |
1023 | else | 1028 | else |
1024 | scsi = "FAST-5"; | 1029 | scsi = "FAST-5"; |
1025 | } | 1030 | } |
1026 | 1031 | ||
1027 | kb100 = (10000000 + picosec / 2) / picosec; | 1032 | kb100 = (10000000 + picosec / 2) / picosec; |
1028 | if (tp->width) | 1033 | if (tp->width) |
1029 | kb100 *= 2; | 1034 | kb100 *= 2; |
1030 | sprint_frac(tmp, picosec, 1000); | 1035 | sprint_frac(tmp, picosec, 1000); |
1031 | 1036 | ||
1032 | dev_info(&starget->dev, | 1037 | dev_info(&starget->dev, |
1033 | "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n", | 1038 | "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n", |
1034 | scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, | 1039 | scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, |
1035 | tp->dt ? "DT" : "ST", | 1040 | tp->dt ? "DT" : "ST", |
1036 | tp->iu ? " IU" : "", | 1041 | tp->iu ? " IU" : "", |
1037 | tp->qas ? " QAS" : "", | 1042 | tp->qas ? " QAS" : "", |
1038 | tp->rd_strm ? " RDSTRM" : "", | 1043 | tp->rd_strm ? " RDSTRM" : "", |
1039 | tp->rti ? " RTI" : "", | 1044 | tp->rti ? " RTI" : "", |
1040 | tp->wr_flow ? " WRFLOW" : "", | 1045 | tp->wr_flow ? " WRFLOW" : "", |
1041 | tp->pcomp_en ? " PCOMP" : "", | 1046 | tp->pcomp_en ? " PCOMP" : "", |
1042 | tp->hold_mcs ? " HMCS" : "", | 1047 | tp->hold_mcs ? " HMCS" : "", |
1043 | tmp, tp->offset); | 1048 | tmp, tp->offset); |
1044 | } else { | 1049 | } else { |
1045 | dev_info(&starget->dev, "%sasynchronous.\n", | 1050 | dev_info(&starget->dev, "%sasynchronous.\n", |
1046 | tp->width ? "wide " : ""); | 1051 | tp->width ? "wide " : ""); |
1047 | } | 1052 | } |
1048 | } | 1053 | } |
1049 | EXPORT_SYMBOL(spi_display_xfer_agreement); | 1054 | EXPORT_SYMBOL(spi_display_xfer_agreement); |
1050 | 1055 | ||
1051 | #define SETUP_ATTRIBUTE(field) \ | 1056 | #define SETUP_ATTRIBUTE(field) \ |
1052 | i->private_attrs[count] = class_device_attr_##field; \ | 1057 | i->private_attrs[count] = class_device_attr_##field; \ |
1053 | if (!i->f->set_##field) { \ | 1058 | if (!i->f->set_##field) { \ |
1054 | i->private_attrs[count].attr.mode = S_IRUGO; \ | 1059 | i->private_attrs[count].attr.mode = S_IRUGO; \ |
1055 | i->private_attrs[count].store = NULL; \ | 1060 | i->private_attrs[count].store = NULL; \ |
1056 | } \ | 1061 | } \ |
1057 | i->attrs[count] = &i->private_attrs[count]; \ | 1062 | i->attrs[count] = &i->private_attrs[count]; \ |
1058 | if (i->f->show_##field) \ | 1063 | if (i->f->show_##field) \ |
1059 | count++ | 1064 | count++ |
1060 | 1065 | ||
1061 | #define SETUP_RELATED_ATTRIBUTE(field, rel_field) \ | 1066 | #define SETUP_RELATED_ATTRIBUTE(field, rel_field) \ |
1062 | i->private_attrs[count] = class_device_attr_##field; \ | 1067 | i->private_attrs[count] = class_device_attr_##field; \ |
1063 | if (!i->f->set_##rel_field) { \ | 1068 | if (!i->f->set_##rel_field) { \ |
1064 | i->private_attrs[count].attr.mode = S_IRUGO; \ | 1069 | i->private_attrs[count].attr.mode = S_IRUGO; \ |
1065 | i->private_attrs[count].store = NULL; \ | 1070 | i->private_attrs[count].store = NULL; \ |
1066 | } \ | 1071 | } \ |
1067 | i->attrs[count] = &i->private_attrs[count]; \ | 1072 | i->attrs[count] = &i->private_attrs[count]; \ |
1068 | if (i->f->show_##rel_field) \ | 1073 | if (i->f->show_##rel_field) \ |
1069 | count++ | 1074 | count++ |
1070 | 1075 | ||
1071 | #define SETUP_HOST_ATTRIBUTE(field) \ | 1076 | #define SETUP_HOST_ATTRIBUTE(field) \ |
1072 | i->private_host_attrs[count] = class_device_attr_##field; \ | 1077 | i->private_host_attrs[count] = class_device_attr_##field; \ |
1073 | if (!i->f->set_##field) { \ | 1078 | if (!i->f->set_##field) { \ |
1074 | i->private_host_attrs[count].attr.mode = S_IRUGO; \ | 1079 | i->private_host_attrs[count].attr.mode = S_IRUGO; \ |
1075 | i->private_host_attrs[count].store = NULL; \ | 1080 | i->private_host_attrs[count].store = NULL; \ |
1076 | } \ | 1081 | } \ |
1077 | i->host_attrs[count] = &i->private_host_attrs[count]; \ | 1082 | i->host_attrs[count] = &i->private_host_attrs[count]; \ |
1078 | count++ | 1083 | count++ |
1079 | 1084 | ||
1080 | static int spi_device_match(struct attribute_container *cont, | 1085 | static int spi_device_match(struct attribute_container *cont, |
1081 | struct device *dev) | 1086 | struct device *dev) |
1082 | { | 1087 | { |
1083 | struct scsi_device *sdev; | 1088 | struct scsi_device *sdev; |
1084 | struct Scsi_Host *shost; | 1089 | struct Scsi_Host *shost; |
1085 | struct spi_internal *i; | 1090 | struct spi_internal *i; |
1086 | 1091 | ||
1087 | if (!scsi_is_sdev_device(dev)) | 1092 | if (!scsi_is_sdev_device(dev)) |
1088 | return 0; | 1093 | return 0; |
1089 | 1094 | ||
1090 | sdev = to_scsi_device(dev); | 1095 | sdev = to_scsi_device(dev); |
1091 | shost = sdev->host; | 1096 | shost = sdev->host; |
1092 | if (!shost->transportt || shost->transportt->host_attrs.ac.class | 1097 | if (!shost->transportt || shost->transportt->host_attrs.ac.class |
1093 | != &spi_host_class.class) | 1098 | != &spi_host_class.class) |
1094 | return 0; | 1099 | return 0; |
1095 | /* Note: this class has no device attributes, so it has | 1100 | /* Note: this class has no device attributes, so it has |
1096 | * no per-HBA allocation and thus we don't need to distinguish | 1101 | * no per-HBA allocation and thus we don't need to distinguish |
1097 | * the attribute containers for the device */ | 1102 | * the attribute containers for the device */ |
1098 | i = to_spi_internal(shost->transportt); | 1103 | i = to_spi_internal(shost->transportt); |
1099 | if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target)) | 1104 | if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target)) |
1100 | return 0; | 1105 | return 0; |
1101 | return 1; | 1106 | return 1; |
1102 | } | 1107 | } |
1103 | 1108 | ||
1104 | static int spi_target_match(struct attribute_container *cont, | 1109 | static int spi_target_match(struct attribute_container *cont, |
1105 | struct device *dev) | 1110 | struct device *dev) |
1106 | { | 1111 | { |
1107 | struct Scsi_Host *shost; | 1112 | struct Scsi_Host *shost; |
1108 | struct scsi_target *starget; | 1113 | struct scsi_target *starget; |
1109 | struct spi_internal *i; | 1114 | struct spi_internal *i; |
1110 | 1115 | ||
1111 | if (!scsi_is_target_device(dev)) | 1116 | if (!scsi_is_target_device(dev)) |
1112 | return 0; | 1117 | return 0; |
1113 | 1118 | ||
1114 | shost = dev_to_shost(dev->parent); | 1119 | shost = dev_to_shost(dev->parent); |
1115 | if (!shost->transportt || shost->transportt->host_attrs.ac.class | 1120 | if (!shost->transportt || shost->transportt->host_attrs.ac.class |
1116 | != &spi_host_class.class) | 1121 | != &spi_host_class.class) |
1117 | return 0; | 1122 | return 0; |
1118 | 1123 | ||
1119 | i = to_spi_internal(shost->transportt); | 1124 | i = to_spi_internal(shost->transportt); |
1120 | starget = to_scsi_target(dev); | 1125 | starget = to_scsi_target(dev); |
1121 | 1126 | ||
1122 | if (i->f->deny_binding && i->f->deny_binding(starget)) | 1127 | if (i->f->deny_binding && i->f->deny_binding(starget)) |
1123 | return 0; | 1128 | return 0; |
1124 | 1129 | ||
1125 | return &i->t.target_attrs.ac == cont; | 1130 | return &i->t.target_attrs.ac == cont; |
1126 | } | 1131 | } |
1127 | 1132 | ||
1128 | static DECLARE_TRANSPORT_CLASS(spi_transport_class, | 1133 | static DECLARE_TRANSPORT_CLASS(spi_transport_class, |
1129 | "spi_transport", | 1134 | "spi_transport", |
1130 | spi_setup_transport_attrs, | 1135 | spi_setup_transport_attrs, |
1131 | NULL, | 1136 | NULL, |
1132 | NULL); | 1137 | NULL); |
1133 | 1138 | ||
1134 | static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class, | 1139 | static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class, |
1135 | spi_device_match, | 1140 | spi_device_match, |
1136 | spi_device_configure); | 1141 | spi_device_configure); |
1137 | 1142 | ||
1138 | struct scsi_transport_template * | 1143 | struct scsi_transport_template * |
1139 | spi_attach_transport(struct spi_function_template *ft) | 1144 | spi_attach_transport(struct spi_function_template *ft) |
1140 | { | 1145 | { |
1141 | struct spi_internal *i = kmalloc(sizeof(struct spi_internal), | 1146 | struct spi_internal *i = kmalloc(sizeof(struct spi_internal), |
1142 | GFP_KERNEL); | 1147 | GFP_KERNEL); |
1143 | int count = 0; | 1148 | int count = 0; |
1144 | if (unlikely(!i)) | 1149 | if (unlikely(!i)) |
1145 | return NULL; | 1150 | return NULL; |
1146 | 1151 | ||
1147 | memset(i, 0, sizeof(struct spi_internal)); | 1152 | memset(i, 0, sizeof(struct spi_internal)); |
1148 | 1153 | ||
1149 | 1154 | ||
1150 | i->t.target_attrs.ac.class = &spi_transport_class.class; | 1155 | i->t.target_attrs.ac.class = &spi_transport_class.class; |
1151 | i->t.target_attrs.ac.attrs = &i->attrs[0]; | 1156 | i->t.target_attrs.ac.attrs = &i->attrs[0]; |
1152 | i->t.target_attrs.ac.match = spi_target_match; | 1157 | i->t.target_attrs.ac.match = spi_target_match; |
1153 | transport_container_register(&i->t.target_attrs); | 1158 | transport_container_register(&i->t.target_attrs); |
1154 | i->t.target_size = sizeof(struct spi_transport_attrs); | 1159 | i->t.target_size = sizeof(struct spi_transport_attrs); |
1155 | i->t.host_attrs.ac.class = &spi_host_class.class; | 1160 | i->t.host_attrs.ac.class = &spi_host_class.class; |
1156 | i->t.host_attrs.ac.attrs = &i->host_attrs[0]; | 1161 | i->t.host_attrs.ac.attrs = &i->host_attrs[0]; |
1157 | i->t.host_attrs.ac.match = spi_host_match; | 1162 | i->t.host_attrs.ac.match = spi_host_match; |
1158 | transport_container_register(&i->t.host_attrs); | 1163 | transport_container_register(&i->t.host_attrs); |
1159 | i->t.host_size = sizeof(struct spi_host_attrs); | 1164 | i->t.host_size = sizeof(struct spi_host_attrs); |
1160 | i->f = ft; | 1165 | i->f = ft; |
1161 | 1166 | ||
1162 | SETUP_ATTRIBUTE(period); | 1167 | SETUP_ATTRIBUTE(period); |
1163 | SETUP_RELATED_ATTRIBUTE(min_period, period); | 1168 | SETUP_RELATED_ATTRIBUTE(min_period, period); |
1164 | SETUP_ATTRIBUTE(offset); | 1169 | SETUP_ATTRIBUTE(offset); |
1165 | SETUP_RELATED_ATTRIBUTE(max_offset, offset); | 1170 | SETUP_RELATED_ATTRIBUTE(max_offset, offset); |
1166 | SETUP_ATTRIBUTE(width); | 1171 | SETUP_ATTRIBUTE(width); |
1167 | SETUP_RELATED_ATTRIBUTE(max_width, width); | 1172 | SETUP_RELATED_ATTRIBUTE(max_width, width); |
1168 | SETUP_ATTRIBUTE(iu); | 1173 | SETUP_ATTRIBUTE(iu); |
1169 | SETUP_ATTRIBUTE(dt); | 1174 | SETUP_ATTRIBUTE(dt); |
1170 | SETUP_ATTRIBUTE(qas); | 1175 | SETUP_ATTRIBUTE(qas); |
1171 | SETUP_ATTRIBUTE(wr_flow); | 1176 | SETUP_ATTRIBUTE(wr_flow); |
1172 | SETUP_ATTRIBUTE(rd_strm); | 1177 | SETUP_ATTRIBUTE(rd_strm); |
1173 | SETUP_ATTRIBUTE(rti); | 1178 | SETUP_ATTRIBUTE(rti); |
1174 | SETUP_ATTRIBUTE(pcomp_en); | 1179 | SETUP_ATTRIBUTE(pcomp_en); |
1175 | SETUP_ATTRIBUTE(hold_mcs); | 1180 | SETUP_ATTRIBUTE(hold_mcs); |
1176 | 1181 | ||
1177 | /* if you add an attribute but forget to increase SPI_NUM_ATTRS | 1182 | /* if you add an attribute but forget to increase SPI_NUM_ATTRS |
1178 | * this bug will trigger */ | 1183 | * this bug will trigger */ |
1179 | BUG_ON(count > SPI_NUM_ATTRS); | 1184 | BUG_ON(count > SPI_NUM_ATTRS); |
1180 | 1185 | ||
1181 | i->attrs[count++] = &class_device_attr_revalidate; | 1186 | i->attrs[count++] = &class_device_attr_revalidate; |
1182 | 1187 | ||
1183 | i->attrs[count] = NULL; | 1188 | i->attrs[count] = NULL; |
1184 | 1189 | ||
1185 | count = 0; | 1190 | count = 0; |
1186 | SETUP_HOST_ATTRIBUTE(signalling); | 1191 | SETUP_HOST_ATTRIBUTE(signalling); |
1187 | 1192 | ||
1188 | BUG_ON(count > SPI_HOST_ATTRS); | 1193 | BUG_ON(count > SPI_HOST_ATTRS); |
1189 | 1194 | ||
1190 | i->host_attrs[count] = NULL; | 1195 | i->host_attrs[count] = NULL; |
1191 | 1196 | ||
1192 | return &i->t; | 1197 | return &i->t; |
1193 | } | 1198 | } |
1194 | EXPORT_SYMBOL(spi_attach_transport); | 1199 | EXPORT_SYMBOL(spi_attach_transport); |
1195 | 1200 | ||
1196 | void spi_release_transport(struct scsi_transport_template *t) | 1201 | void spi_release_transport(struct scsi_transport_template *t) |
1197 | { | 1202 | { |
1198 | struct spi_internal *i = to_spi_internal(t); | 1203 | struct spi_internal *i = to_spi_internal(t); |
1199 | 1204 | ||
1200 | transport_container_unregister(&i->t.target_attrs); | 1205 | transport_container_unregister(&i->t.target_attrs); |
1201 | transport_container_unregister(&i->t.host_attrs); | 1206 | transport_container_unregister(&i->t.host_attrs); |
1202 | 1207 | ||
1203 | kfree(i); | 1208 | kfree(i); |
1204 | } | 1209 | } |
1205 | EXPORT_SYMBOL(spi_release_transport); | 1210 | EXPORT_SYMBOL(spi_release_transport); |
1206 | 1211 | ||
1207 | static __init int spi_transport_init(void) | 1212 | static __init int spi_transport_init(void) |
1208 | { | 1213 | { |
1209 | int error = transport_class_register(&spi_transport_class); | 1214 | int error = transport_class_register(&spi_transport_class); |
1210 | if (error) | 1215 | if (error) |
1211 | return error; | 1216 | return error; |
1212 | error = anon_transport_class_register(&spi_device_class); | 1217 | error = anon_transport_class_register(&spi_device_class); |
1213 | return transport_class_register(&spi_host_class); | 1218 | return transport_class_register(&spi_host_class); |
1214 | } | 1219 | } |
1215 | 1220 | ||
1216 | static void __exit spi_transport_exit(void) | 1221 | static void __exit spi_transport_exit(void) |
1217 | { | 1222 | { |
1218 | transport_class_unregister(&spi_transport_class); | 1223 | transport_class_unregister(&spi_transport_class); |
1219 | anon_transport_class_unregister(&spi_device_class); | 1224 | anon_transport_class_unregister(&spi_device_class); |
1220 | transport_class_unregister(&spi_host_class); | 1225 | transport_class_unregister(&spi_host_class); |
1221 | } | 1226 | } |
1222 | 1227 | ||
1223 | MODULE_AUTHOR("Martin Hicks"); | 1228 | MODULE_AUTHOR("Martin Hicks"); |
1224 | MODULE_DESCRIPTION("SPI Transport Attributes"); | 1229 | MODULE_DESCRIPTION("SPI Transport Attributes"); |
1225 | MODULE_LICENSE("GPL"); | 1230 | MODULE_LICENSE("GPL"); |
1226 | 1231 | ||
1227 | module_init(spi_transport_init); | 1232 | module_init(spi_transport_init); |
1228 | module_exit(spi_transport_exit); | 1233 | module_exit(spi_transport_exit); |
1229 | 1234 |
include/linux/attribute_container.h
1 | /* | 1 | /* |
2 | * class_container.h - a generic container for all classes | 2 | * class_container.h - a generic container for all classes |
3 | * | 3 | * |
4 | * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> | 4 | * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> |
5 | * | 5 | * |
6 | * This file is licensed under GPLv2 | 6 | * This file is licensed under GPLv2 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef _ATTRIBUTE_CONTAINER_H_ | 9 | #ifndef _ATTRIBUTE_CONTAINER_H_ |
10 | #define _ATTRIBUTE_CONTAINER_H_ | 10 | #define _ATTRIBUTE_CONTAINER_H_ |
11 | 11 | ||
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/list.h> | 13 | #include <linux/list.h> |
14 | #include <linux/spinlock.h> | ||
14 | 15 | ||
15 | struct attribute_container { | 16 | struct attribute_container { |
16 | struct list_head node; | 17 | struct list_head node; |
17 | struct list_head containers; | 18 | struct list_head containers; |
19 | spinlock_t containers_lock; | ||
18 | struct class *class; | 20 | struct class *class; |
19 | struct class_device_attribute **attrs; | 21 | struct class_device_attribute **attrs; |
20 | int (*match)(struct attribute_container *, struct device *); | 22 | int (*match)(struct attribute_container *, struct device *); |
21 | #define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01 | 23 | #define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01 |
22 | unsigned long flags; | 24 | unsigned long flags; |
23 | }; | 25 | }; |
24 | 26 | ||
25 | static inline int | 27 | static inline int |
26 | attribute_container_no_classdevs(struct attribute_container *atc) | 28 | attribute_container_no_classdevs(struct attribute_container *atc) |
27 | { | 29 | { |
28 | return atc->flags & ATTRIBUTE_CONTAINER_NO_CLASSDEVS; | 30 | return atc->flags & ATTRIBUTE_CONTAINER_NO_CLASSDEVS; |
29 | } | 31 | } |
30 | 32 | ||
31 | static inline void | 33 | static inline void |
32 | attribute_container_set_no_classdevs(struct attribute_container *atc) | 34 | attribute_container_set_no_classdevs(struct attribute_container *atc) |
33 | { | 35 | { |
34 | atc->flags |= ATTRIBUTE_CONTAINER_NO_CLASSDEVS; | 36 | atc->flags |= ATTRIBUTE_CONTAINER_NO_CLASSDEVS; |
35 | } | 37 | } |
36 | 38 | ||
37 | int attribute_container_register(struct attribute_container *cont); | 39 | int attribute_container_register(struct attribute_container *cont); |
38 | int attribute_container_unregister(struct attribute_container *cont); | 40 | int attribute_container_unregister(struct attribute_container *cont); |
39 | void attribute_container_create_device(struct device *dev, | 41 | void attribute_container_create_device(struct device *dev, |
40 | int (*fn)(struct attribute_container *, | 42 | int (*fn)(struct attribute_container *, |
41 | struct device *, | 43 | struct device *, |
42 | struct class_device *)); | 44 | struct class_device *)); |
43 | void attribute_container_add_device(struct device *dev, | 45 | void attribute_container_add_device(struct device *dev, |
44 | int (*fn)(struct attribute_container *, | 46 | int (*fn)(struct attribute_container *, |
45 | struct device *, | 47 | struct device *, |
46 | struct class_device *)); | 48 | struct class_device *)); |
47 | void attribute_container_remove_device(struct device *dev, | 49 | void attribute_container_remove_device(struct device *dev, |
48 | void (*fn)(struct attribute_container *, | 50 | void (*fn)(struct attribute_container *, |
49 | struct device *, | 51 | struct device *, |
50 | struct class_device *)); | 52 | struct class_device *)); |
51 | void attribute_container_device_trigger(struct device *dev, | 53 | void attribute_container_device_trigger(struct device *dev, |
52 | int (*fn)(struct attribute_container *, | 54 | int (*fn)(struct attribute_container *, |
53 | struct device *, | 55 | struct device *, |
54 | struct class_device *)); | 56 | struct class_device *)); |
55 | void attribute_container_trigger(struct device *dev, | 57 | void attribute_container_trigger(struct device *dev, |
56 | int (*fn)(struct attribute_container *, | 58 | int (*fn)(struct attribute_container *, |
57 | struct device *)); | 59 | struct device *)); |
58 | int attribute_container_add_attrs(struct class_device *classdev); | 60 | int attribute_container_add_attrs(struct class_device *classdev); |
59 | int attribute_container_add_class_device(struct class_device *classdev); | 61 | int attribute_container_add_class_device(struct class_device *classdev); |
60 | int attribute_container_add_class_device_adapter(struct attribute_container *cont, | 62 | int attribute_container_add_class_device_adapter(struct attribute_container *cont, |
61 | struct device *dev, | 63 | struct device *dev, |
62 | struct class_device *classdev); | 64 | struct class_device *classdev); |
63 | void attribute_container_remove_attrs(struct class_device *classdev); | 65 | void attribute_container_remove_attrs(struct class_device *classdev); |
64 | void attribute_container_class_device_del(struct class_device *classdev); | 66 | void attribute_container_class_device_del(struct class_device *classdev); |
65 | 67 | struct class_device *attribute_container_find_class_device(struct attribute_container *, struct device *); | |
66 | |||
67 | |||
68 | |||
69 | |||
70 | |||
71 | struct class_device_attribute **attribute_container_classdev_to_attrs(const struct class_device *classdev); | 68 | struct class_device_attribute **attribute_container_classdev_to_attrs(const struct class_device *classdev); |
72 | 69 |
include/linux/transport_class.h
1 | /* | 1 | /* |
2 | * transport_class.h - a generic container for all transport classes | 2 | * transport_class.h - a generic container for all transport classes |
3 | * | 3 | * |
4 | * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> | 4 | * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> |
5 | * | 5 | * |
6 | * This file is licensed under GPLv2 | 6 | * This file is licensed under GPLv2 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef _TRANSPORT_CLASS_H_ | 9 | #ifndef _TRANSPORT_CLASS_H_ |
10 | #define _TRANSPORT_CLASS_H_ | 10 | #define _TRANSPORT_CLASS_H_ |
11 | 11 | ||
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/attribute_container.h> | 13 | #include <linux/attribute_container.h> |
14 | 14 | ||
15 | struct transport_container; | ||
16 | |||
15 | struct transport_class { | 17 | struct transport_class { |
16 | struct class class; | 18 | struct class class; |
17 | int (*setup)(struct device *); | 19 | int (*setup)(struct transport_container *, struct device *, |
18 | int (*configure)(struct device *); | 20 | struct class_device *); |
19 | int (*remove)(struct device *); | 21 | int (*configure)(struct transport_container *, struct device *, |
22 | struct class_device *); | ||
23 | int (*remove)(struct transport_container *, struct device *, | ||
24 | struct class_device *); | ||
20 | }; | 25 | }; |
21 | 26 | ||
22 | #define DECLARE_TRANSPORT_CLASS(cls, nm, su, rm, cfg) \ | 27 | #define DECLARE_TRANSPORT_CLASS(cls, nm, su, rm, cfg) \ |
23 | struct transport_class cls = { \ | 28 | struct transport_class cls = { \ |
24 | .class = { \ | 29 | .class = { \ |
25 | .name = nm, \ | 30 | .name = nm, \ |
26 | }, \ | 31 | }, \ |
27 | .setup = su, \ | 32 | .setup = su, \ |
28 | .remove = rm, \ | 33 | .remove = rm, \ |
29 | .configure = cfg, \ | 34 | .configure = cfg, \ |
30 | } | 35 | } |
31 | 36 | ||
32 | 37 | ||
33 | struct anon_transport_class { | 38 | struct anon_transport_class { |
34 | struct transport_class tclass; | 39 | struct transport_class tclass; |
35 | struct attribute_container container; | 40 | struct attribute_container container; |
36 | }; | 41 | }; |
37 | 42 | ||
38 | #define DECLARE_ANON_TRANSPORT_CLASS(cls, mtch, cfg) \ | 43 | #define DECLARE_ANON_TRANSPORT_CLASS(cls, mtch, cfg) \ |
39 | struct anon_transport_class cls = { \ | 44 | struct anon_transport_class cls = { \ |
40 | .tclass = { \ | 45 | .tclass = { \ |
41 | .configure = cfg, \ | 46 | .configure = cfg, \ |
42 | }, \ | 47 | }, \ |
43 | . container = { \ | 48 | . container = { \ |
44 | .match = mtch, \ | 49 | .match = mtch, \ |
45 | }, \ | 50 | }, \ |
46 | } | 51 | } |
47 | 52 | ||
48 | #define class_to_transport_class(x) \ | 53 | #define class_to_transport_class(x) \ |
49 | container_of(x, struct transport_class, class) | 54 | container_of(x, struct transport_class, class) |
50 | 55 | ||
51 | struct transport_container { | 56 | struct transport_container { |
52 | struct attribute_container ac; | 57 | struct attribute_container ac; |
53 | struct attribute_group *statistics; | 58 | struct attribute_group *statistics; |
54 | }; | 59 | }; |
55 | 60 | ||
56 | #define attribute_container_to_transport_container(x) \ | 61 | #define attribute_container_to_transport_container(x) \ |
57 | container_of(x, struct transport_container, ac) | 62 | container_of(x, struct transport_container, ac) |
58 | 63 | ||
59 | void transport_remove_device(struct device *); | 64 | void transport_remove_device(struct device *); |
60 | void transport_add_device(struct device *); | 65 | void transport_add_device(struct device *); |
61 | void transport_setup_device(struct device *); | 66 | void transport_setup_device(struct device *); |
62 | void transport_configure_device(struct device *); | 67 | void transport_configure_device(struct device *); |
63 | void transport_destroy_device(struct device *); | 68 | void transport_destroy_device(struct device *); |
64 | 69 | ||
65 | static inline void | 70 | static inline void |
66 | transport_register_device(struct device *dev) | 71 | transport_register_device(struct device *dev) |
67 | { | 72 | { |
68 | transport_setup_device(dev); | 73 | transport_setup_device(dev); |
69 | transport_add_device(dev); | 74 | transport_add_device(dev); |
70 | } | 75 | } |
71 | 76 | ||
72 | static inline void | 77 | static inline void |
73 | transport_unregister_device(struct device *dev) | 78 | transport_unregister_device(struct device *dev) |
74 | { | 79 | { |
75 | transport_remove_device(dev); | 80 | transport_remove_device(dev); |
76 | transport_destroy_device(dev); | 81 | transport_destroy_device(dev); |
77 | } | 82 | } |
78 | 83 | ||
79 | static inline int transport_container_register(struct transport_container *tc) | 84 | static inline int transport_container_register(struct transport_container *tc) |
80 | { | 85 | { |
81 | return attribute_container_register(&tc->ac); | 86 | return attribute_container_register(&tc->ac); |
82 | } | 87 | } |
83 | 88 | ||
84 | static inline int transport_container_unregister(struct transport_container *tc) | 89 | static inline int transport_container_unregister(struct transport_container *tc) |
85 | { | 90 | { |
86 | return attribute_container_unregister(&tc->ac); | 91 | return attribute_container_unregister(&tc->ac); |
87 | } | 92 | } |
88 | 93 | ||
89 | int transport_class_register(struct transport_class *); | 94 | int transport_class_register(struct transport_class *); |
90 | int anon_transport_class_register(struct anon_transport_class *); | 95 | int anon_transport_class_register(struct anon_transport_class *); |
91 | void transport_class_unregister(struct transport_class *); | 96 | void transport_class_unregister(struct transport_class *); |
92 | void anon_transport_class_unregister(struct anon_transport_class *); | 97 | void anon_transport_class_unregister(struct anon_transport_class *); |
93 | 98 | ||
94 | 99 | ||
95 | #endif | 100 | #endif |
96 | 101 |