Commit 9e0fc764eaec082cd2ffcf82568dfdd086935934

Authored by Stephen M. Cameron
Committed by James Bottomley
1 parent 5767a1c498

[SCSI] hpsa: do not re-order commands in internal queues

Driver's internal queues should be FIFO, not LIFO.
This is a port of an almost identical patch from cciss by Jens Axboe.

Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

Showing 3 changed files with 14 additions and 15 deletions Inline Diff

1 /* 1 /*
2 * Disk Array driver for HP Smart Array SAS controllers 2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License. 7 * the Free Software Foundation; version 2 of the License.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details. 12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 * 17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 * 19 *
20 */ 20 */
21 21
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/interrupt.h> 23 #include <linux/interrupt.h>
24 #include <linux/types.h> 24 #include <linux/types.h>
25 #include <linux/pci.h> 25 #include <linux/pci.h>
26 #include <linux/kernel.h> 26 #include <linux/kernel.h>
27 #include <linux/slab.h> 27 #include <linux/slab.h>
28 #include <linux/delay.h> 28 #include <linux/delay.h>
29 #include <linux/fs.h> 29 #include <linux/fs.h>
30 #include <linux/timer.h> 30 #include <linux/timer.h>
31 #include <linux/seq_file.h> 31 #include <linux/seq_file.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/spinlock.h> 33 #include <linux/spinlock.h>
34 #include <linux/compat.h> 34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h> 35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h> 36 #include <linux/uaccess.h>
37 #include <linux/io.h> 37 #include <linux/io.h>
38 #include <linux/dma-mapping.h> 38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h> 39 #include <linux/completion.h>
40 #include <linux/moduleparam.h> 40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h> 41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h> 43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h> 44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h> 45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h> 46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h> 47 #include <linux/string.h>
48 #include <linux/bitmap.h> 48 #include <linux/bitmap.h>
49 #include <asm/atomic.h> 49 #include <asm/atomic.h>
50 #include <linux/kthread.h> 50 #include <linux/kthread.h>
51 #include "hpsa_cmd.h" 51 #include "hpsa_cmd.h"
52 #include "hpsa.h" 52 #include "hpsa.h"
53 53
54 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 54 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
55 #define HPSA_DRIVER_VERSION "2.0.2-1" 55 #define HPSA_DRIVER_VERSION "2.0.2-1"
56 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 56 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
57 57
58 /* How long to wait (in milliseconds) for board to go into simple mode */ 58 /* How long to wait (in milliseconds) for board to go into simple mode */
59 #define MAX_CONFIG_WAIT 30000 59 #define MAX_CONFIG_WAIT 30000
60 #define MAX_IOCTL_CONFIG_WAIT 1000 60 #define MAX_IOCTL_CONFIG_WAIT 1000
61 61
62 /*define how many times we will try a command because of bus resets */ 62 /*define how many times we will try a command because of bus resets */
63 #define MAX_CMD_RETRIES 3 63 #define MAX_CMD_RETRIES 3
64 64
65 /* Embedded module documentation macros - see modules.h */ 65 /* Embedded module documentation macros - see modules.h */
66 MODULE_AUTHOR("Hewlett-Packard Company"); 66 MODULE_AUTHOR("Hewlett-Packard Company");
67 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 67 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
68 HPSA_DRIVER_VERSION); 68 HPSA_DRIVER_VERSION);
69 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 69 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
70 MODULE_VERSION(HPSA_DRIVER_VERSION); 70 MODULE_VERSION(HPSA_DRIVER_VERSION);
71 MODULE_LICENSE("GPL"); 71 MODULE_LICENSE("GPL");
72 72
73 static int hpsa_allow_any; 73 static int hpsa_allow_any;
74 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 74 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
75 MODULE_PARM_DESC(hpsa_allow_any, 75 MODULE_PARM_DESC(hpsa_allow_any,
76 "Allow hpsa driver to access unknown HP Smart Array hardware"); 76 "Allow hpsa driver to access unknown HP Smart Array hardware");
77 static int hpsa_simple_mode; 77 static int hpsa_simple_mode;
78 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 78 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
79 MODULE_PARM_DESC(hpsa_simple_mode, 79 MODULE_PARM_DESC(hpsa_simple_mode,
80 "Use 'simple mode' rather than 'performant mode'"); 80 "Use 'simple mode' rather than 'performant mode'");
81 81
82 /* define the PCI info for the cards we can control */ 82 /* define the PCI info for the cards we can control */
83 static const struct pci_device_id hpsa_pci_device_id[] = { 83 static const struct pci_device_id hpsa_pci_device_id[] = {
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254},
97 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 97 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
98 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 98 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
99 {0,} 99 {0,}
100 }; 100 };
101 101
102 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 102 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
103 103
104 /* board_id = Subsystem Device ID & Vendor ID 104 /* board_id = Subsystem Device ID & Vendor ID
105 * product = Marketing Name for the board 105 * product = Marketing Name for the board
106 * access = Address of the struct of function pointers 106 * access = Address of the struct of function pointers
107 */ 107 */
108 static struct board_type products[] = { 108 static struct board_type products[] = {
109 {0x3241103C, "Smart Array P212", &SA5_access}, 109 {0x3241103C, "Smart Array P212", &SA5_access},
110 {0x3243103C, "Smart Array P410", &SA5_access}, 110 {0x3243103C, "Smart Array P410", &SA5_access},
111 {0x3245103C, "Smart Array P410i", &SA5_access}, 111 {0x3245103C, "Smart Array P410i", &SA5_access},
112 {0x3247103C, "Smart Array P411", &SA5_access}, 112 {0x3247103C, "Smart Array P411", &SA5_access},
113 {0x3249103C, "Smart Array P812", &SA5_access}, 113 {0x3249103C, "Smart Array P812", &SA5_access},
114 {0x324a103C, "Smart Array P712m", &SA5_access}, 114 {0x324a103C, "Smart Array P712m", &SA5_access},
115 {0x324b103C, "Smart Array P711m", &SA5_access}, 115 {0x324b103C, "Smart Array P711m", &SA5_access},
116 {0x3250103C, "Smart Array", &SA5_access}, 116 {0x3250103C, "Smart Array", &SA5_access},
117 {0x3250113C, "Smart Array", &SA5_access}, 117 {0x3250113C, "Smart Array", &SA5_access},
118 {0x3250123C, "Smart Array", &SA5_access}, 118 {0x3250123C, "Smart Array", &SA5_access},
119 {0x3250133C, "Smart Array", &SA5_access}, 119 {0x3250133C, "Smart Array", &SA5_access},
120 {0x3250143C, "Smart Array", &SA5_access}, 120 {0x3250143C, "Smart Array", &SA5_access},
121 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 121 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
122 }; 122 };
123 123
124 static int number_of_controllers; 124 static int number_of_controllers;
125 125
126 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 126 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
127 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 127 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
128 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 128 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
129 static void start_io(struct ctlr_info *h); 129 static void start_io(struct ctlr_info *h);
130 130
131 #ifdef CONFIG_COMPAT 131 #ifdef CONFIG_COMPAT
132 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 132 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
133 #endif 133 #endif
134 134
135 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 135 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
136 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); 136 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
137 static struct CommandList *cmd_alloc(struct ctlr_info *h); 137 static struct CommandList *cmd_alloc(struct ctlr_info *h);
138 static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 138 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
139 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 139 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
140 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 140 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
141 int cmd_type); 141 int cmd_type);
142 142
143 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 143 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
144 static void hpsa_scan_start(struct Scsi_Host *); 144 static void hpsa_scan_start(struct Scsi_Host *);
145 static int hpsa_scan_finished(struct Scsi_Host *sh, 145 static int hpsa_scan_finished(struct Scsi_Host *sh,
146 unsigned long elapsed_time); 146 unsigned long elapsed_time);
147 static int hpsa_change_queue_depth(struct scsi_device *sdev, 147 static int hpsa_change_queue_depth(struct scsi_device *sdev,
148 int qdepth, int reason); 148 int qdepth, int reason);
149 149
150 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 150 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
151 static int hpsa_slave_alloc(struct scsi_device *sdev); 151 static int hpsa_slave_alloc(struct scsi_device *sdev);
152 static void hpsa_slave_destroy(struct scsi_device *sdev); 152 static void hpsa_slave_destroy(struct scsi_device *sdev);
153 153
154 static ssize_t raid_level_show(struct device *dev, 154 static ssize_t raid_level_show(struct device *dev,
155 struct device_attribute *attr, char *buf); 155 struct device_attribute *attr, char *buf);
156 static ssize_t lunid_show(struct device *dev, 156 static ssize_t lunid_show(struct device *dev,
157 struct device_attribute *attr, char *buf); 157 struct device_attribute *attr, char *buf);
158 static ssize_t unique_id_show(struct device *dev, 158 static ssize_t unique_id_show(struct device *dev,
159 struct device_attribute *attr, char *buf); 159 struct device_attribute *attr, char *buf);
160 static ssize_t host_show_firmware_revision(struct device *dev, 160 static ssize_t host_show_firmware_revision(struct device *dev,
161 struct device_attribute *attr, char *buf); 161 struct device_attribute *attr, char *buf);
162 static ssize_t host_show_commands_outstanding(struct device *dev, 162 static ssize_t host_show_commands_outstanding(struct device *dev,
163 struct device_attribute *attr, char *buf); 163 struct device_attribute *attr, char *buf);
164 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 164 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
165 static ssize_t host_store_rescan(struct device *dev, 165 static ssize_t host_store_rescan(struct device *dev,
166 struct device_attribute *attr, const char *buf, size_t count); 166 struct device_attribute *attr, const char *buf, size_t count);
167 static int check_for_unit_attention(struct ctlr_info *h, 167 static int check_for_unit_attention(struct ctlr_info *h,
168 struct CommandList *c); 168 struct CommandList *c);
169 static void check_ioctl_unit_attention(struct ctlr_info *h, 169 static void check_ioctl_unit_attention(struct ctlr_info *h,
170 struct CommandList *c); 170 struct CommandList *c);
171 /* performant mode helper functions */ 171 /* performant mode helper functions */
172 static void calc_bucket_map(int *bucket, int num_buckets, 172 static void calc_bucket_map(int *bucket, int num_buckets,
173 int nsgs, int *bucket_map); 173 int nsgs, int *bucket_map);
174 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 174 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
175 static inline u32 next_command(struct ctlr_info *h); 175 static inline u32 next_command(struct ctlr_info *h);
176 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 176 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
177 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 177 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
178 u64 *cfg_offset); 178 u64 *cfg_offset);
179 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 179 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
180 unsigned long *memory_bar); 180 unsigned long *memory_bar);
181 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 181 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
182 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 182 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
183 void __iomem *vaddr, int wait_for_ready); 183 void __iomem *vaddr, int wait_for_ready);
184 #define BOARD_NOT_READY 0 184 #define BOARD_NOT_READY 0
185 #define BOARD_READY 1 185 #define BOARD_READY 1
186 186
187 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 187 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
188 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 188 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
189 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 189 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
190 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 190 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
191 static DEVICE_ATTR(firmware_revision, S_IRUGO, 191 static DEVICE_ATTR(firmware_revision, S_IRUGO,
192 host_show_firmware_revision, NULL); 192 host_show_firmware_revision, NULL);
193 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 193 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
194 host_show_commands_outstanding, NULL); 194 host_show_commands_outstanding, NULL);
195 195
196 static struct device_attribute *hpsa_sdev_attrs[] = { 196 static struct device_attribute *hpsa_sdev_attrs[] = {
197 &dev_attr_raid_level, 197 &dev_attr_raid_level,
198 &dev_attr_lunid, 198 &dev_attr_lunid,
199 &dev_attr_unique_id, 199 &dev_attr_unique_id,
200 NULL, 200 NULL,
201 }; 201 };
202 202
203 static struct device_attribute *hpsa_shost_attrs[] = { 203 static struct device_attribute *hpsa_shost_attrs[] = {
204 &dev_attr_rescan, 204 &dev_attr_rescan,
205 &dev_attr_firmware_revision, 205 &dev_attr_firmware_revision,
206 &dev_attr_commands_outstanding, 206 &dev_attr_commands_outstanding,
207 NULL, 207 NULL,
208 }; 208 };
209 209
210 static struct scsi_host_template hpsa_driver_template = { 210 static struct scsi_host_template hpsa_driver_template = {
211 .module = THIS_MODULE, 211 .module = THIS_MODULE,
212 .name = "hpsa", 212 .name = "hpsa",
213 .proc_name = "hpsa", 213 .proc_name = "hpsa",
214 .queuecommand = hpsa_scsi_queue_command, 214 .queuecommand = hpsa_scsi_queue_command,
215 .scan_start = hpsa_scan_start, 215 .scan_start = hpsa_scan_start,
216 .scan_finished = hpsa_scan_finished, 216 .scan_finished = hpsa_scan_finished,
217 .change_queue_depth = hpsa_change_queue_depth, 217 .change_queue_depth = hpsa_change_queue_depth,
218 .this_id = -1, 218 .this_id = -1,
219 .use_clustering = ENABLE_CLUSTERING, 219 .use_clustering = ENABLE_CLUSTERING,
220 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 220 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
221 .ioctl = hpsa_ioctl, 221 .ioctl = hpsa_ioctl,
222 .slave_alloc = hpsa_slave_alloc, 222 .slave_alloc = hpsa_slave_alloc,
223 .slave_destroy = hpsa_slave_destroy, 223 .slave_destroy = hpsa_slave_destroy,
224 #ifdef CONFIG_COMPAT 224 #ifdef CONFIG_COMPAT
225 .compat_ioctl = hpsa_compat_ioctl, 225 .compat_ioctl = hpsa_compat_ioctl,
226 #endif 226 #endif
227 .sdev_attrs = hpsa_sdev_attrs, 227 .sdev_attrs = hpsa_sdev_attrs,
228 .shost_attrs = hpsa_shost_attrs, 228 .shost_attrs = hpsa_shost_attrs,
229 }; 229 };
230 230
231 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 231 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
232 { 232 {
233 unsigned long *priv = shost_priv(sdev->host); 233 unsigned long *priv = shost_priv(sdev->host);
234 return (struct ctlr_info *) *priv; 234 return (struct ctlr_info *) *priv;
235 } 235 }
236 236
237 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 237 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
238 { 238 {
239 unsigned long *priv = shost_priv(sh); 239 unsigned long *priv = shost_priv(sh);
240 return (struct ctlr_info *) *priv; 240 return (struct ctlr_info *) *priv;
241 } 241 }
242 242
243 static int check_for_unit_attention(struct ctlr_info *h, 243 static int check_for_unit_attention(struct ctlr_info *h,
244 struct CommandList *c) 244 struct CommandList *c)
245 { 245 {
246 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 246 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
247 return 0; 247 return 0;
248 248
249 switch (c->err_info->SenseInfo[12]) { 249 switch (c->err_info->SenseInfo[12]) {
250 case STATE_CHANGED: 250 case STATE_CHANGED:
251 dev_warn(&h->pdev->dev, "hpsa%d: a state change " 251 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
252 "detected, command retried\n", h->ctlr); 252 "detected, command retried\n", h->ctlr);
253 break; 253 break;
254 case LUN_FAILED: 254 case LUN_FAILED:
255 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure " 255 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
256 "detected, action required\n", h->ctlr); 256 "detected, action required\n", h->ctlr);
257 break; 257 break;
258 case REPORT_LUNS_CHANGED: 258 case REPORT_LUNS_CHANGED:
259 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " 259 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
260 "changed, action required\n", h->ctlr); 260 "changed, action required\n", h->ctlr);
261 /* 261 /*
262 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. 262 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
263 */ 263 */
264 break; 264 break;
265 case POWER_OR_RESET: 265 case POWER_OR_RESET:
266 dev_warn(&h->pdev->dev, "hpsa%d: a power on " 266 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
267 "or device reset detected\n", h->ctlr); 267 "or device reset detected\n", h->ctlr);
268 break; 268 break;
269 case UNIT_ATTENTION_CLEARED: 269 case UNIT_ATTENTION_CLEARED:
270 dev_warn(&h->pdev->dev, "hpsa%d: unit attention " 270 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
271 "cleared by another initiator\n", h->ctlr); 271 "cleared by another initiator\n", h->ctlr);
272 break; 272 break;
273 default: 273 default:
274 dev_warn(&h->pdev->dev, "hpsa%d: unknown " 274 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
275 "unit attention detected\n", h->ctlr); 275 "unit attention detected\n", h->ctlr);
276 break; 276 break;
277 } 277 }
278 return 1; 278 return 1;
279 } 279 }
280 280
281 static ssize_t host_store_rescan(struct device *dev, 281 static ssize_t host_store_rescan(struct device *dev,
282 struct device_attribute *attr, 282 struct device_attribute *attr,
283 const char *buf, size_t count) 283 const char *buf, size_t count)
284 { 284 {
285 struct ctlr_info *h; 285 struct ctlr_info *h;
286 struct Scsi_Host *shost = class_to_shost(dev); 286 struct Scsi_Host *shost = class_to_shost(dev);
287 h = shost_to_hba(shost); 287 h = shost_to_hba(shost);
288 hpsa_scan_start(h->scsi_host); 288 hpsa_scan_start(h->scsi_host);
289 return count; 289 return count;
290 } 290 }
291 291
292 static ssize_t host_show_firmware_revision(struct device *dev, 292 static ssize_t host_show_firmware_revision(struct device *dev,
293 struct device_attribute *attr, char *buf) 293 struct device_attribute *attr, char *buf)
294 { 294 {
295 struct ctlr_info *h; 295 struct ctlr_info *h;
296 struct Scsi_Host *shost = class_to_shost(dev); 296 struct Scsi_Host *shost = class_to_shost(dev);
297 unsigned char *fwrev; 297 unsigned char *fwrev;
298 298
299 h = shost_to_hba(shost); 299 h = shost_to_hba(shost);
300 if (!h->hba_inquiry_data) 300 if (!h->hba_inquiry_data)
301 return 0; 301 return 0;
302 fwrev = &h->hba_inquiry_data[32]; 302 fwrev = &h->hba_inquiry_data[32];
303 return snprintf(buf, 20, "%c%c%c%c\n", 303 return snprintf(buf, 20, "%c%c%c%c\n",
304 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 304 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
305 } 305 }
306 306
307 static ssize_t host_show_commands_outstanding(struct device *dev, 307 static ssize_t host_show_commands_outstanding(struct device *dev,
308 struct device_attribute *attr, char *buf) 308 struct device_attribute *attr, char *buf)
309 { 309 {
310 struct Scsi_Host *shost = class_to_shost(dev); 310 struct Scsi_Host *shost = class_to_shost(dev);
311 struct ctlr_info *h = shost_to_hba(shost); 311 struct ctlr_info *h = shost_to_hba(shost);
312 312
313 return snprintf(buf, 20, "%d\n", h->commands_outstanding); 313 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
314 } 314 }
315 315
316 /* Enqueuing and dequeuing functions for cmdlists. */ 316 /* Enqueuing and dequeuing functions for cmdlists. */
317 static inline void addQ(struct hlist_head *list, struct CommandList *c) 317 static inline void addQ(struct list_head *list, struct CommandList *c)
318 { 318 {
319 hlist_add_head(&c->list, list); 319 list_add_tail(&c->list, list);
320 } 320 }
321 321
322 static inline u32 next_command(struct ctlr_info *h) 322 static inline u32 next_command(struct ctlr_info *h)
323 { 323 {
324 u32 a; 324 u32 a;
325 325
326 if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) 326 if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
327 return h->access.command_completed(h); 327 return h->access.command_completed(h);
328 328
329 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 329 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
330 a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 330 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
331 (h->reply_pool_head)++; 331 (h->reply_pool_head)++;
332 h->commands_outstanding--; 332 h->commands_outstanding--;
333 } else { 333 } else {
334 a = FIFO_EMPTY; 334 a = FIFO_EMPTY;
335 } 335 }
336 /* Check for wraparound */ 336 /* Check for wraparound */
337 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 337 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
338 h->reply_pool_head = h->reply_pool; 338 h->reply_pool_head = h->reply_pool;
339 h->reply_pool_wraparound ^= 1; 339 h->reply_pool_wraparound ^= 1;
340 } 340 }
341 return a; 341 return a;
342 } 342 }
343 343
344 /* set_performant_mode: Modify the tag for cciss performant 344 /* set_performant_mode: Modify the tag for cciss performant
345 * set bit 0 for pull model, bits 3-1 for block fetch 345 * set bit 0 for pull model, bits 3-1 for block fetch
346 * register number 346 * register number
347 */ 347 */
348 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 348 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
349 { 349 {
350 if (likely(h->transMethod == CFGTBL_Trans_Performant)) 350 if (likely(h->transMethod == CFGTBL_Trans_Performant))
351 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 351 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
352 } 352 }
353 353
354 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 354 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
355 struct CommandList *c) 355 struct CommandList *c)
356 { 356 {
357 unsigned long flags; 357 unsigned long flags;
358 358
359 set_performant_mode(h, c); 359 set_performant_mode(h, c);
360 spin_lock_irqsave(&h->lock, flags); 360 spin_lock_irqsave(&h->lock, flags);
361 addQ(&h->reqQ, c); 361 addQ(&h->reqQ, c);
362 h->Qdepth++; 362 h->Qdepth++;
363 start_io(h); 363 start_io(h);
364 spin_unlock_irqrestore(&h->lock, flags); 364 spin_unlock_irqrestore(&h->lock, flags);
365 } 365 }
366 366
367 static inline void removeQ(struct CommandList *c) 367 static inline void removeQ(struct CommandList *c)
368 { 368 {
369 if (WARN_ON(hlist_unhashed(&c->list))) 369 if (WARN_ON(list_empty(&c->list)))
370 return; 370 return;
371 hlist_del_init(&c->list); 371 list_del_init(&c->list);
372 } 372 }
373 373
374 static inline int is_hba_lunid(unsigned char scsi3addr[]) 374 static inline int is_hba_lunid(unsigned char scsi3addr[])
375 { 375 {
376 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 376 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
377 } 377 }
378 378
379 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 379 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
380 { 380 {
381 return (scsi3addr[3] & 0xC0) == 0x40; 381 return (scsi3addr[3] & 0xC0) == 0x40;
382 } 382 }
383 383
384 static inline int is_scsi_rev_5(struct ctlr_info *h) 384 static inline int is_scsi_rev_5(struct ctlr_info *h)
385 { 385 {
386 if (!h->hba_inquiry_data) 386 if (!h->hba_inquiry_data)
387 return 0; 387 return 0;
388 if ((h->hba_inquiry_data[2] & 0x07) == 5) 388 if ((h->hba_inquiry_data[2] & 0x07) == 5)
389 return 1; 389 return 1;
390 return 0; 390 return 0;
391 } 391 }
392 392
393 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 393 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
394 "UNKNOWN" 394 "UNKNOWN"
395 }; 395 };
396 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 396 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
397 397
398 static ssize_t raid_level_show(struct device *dev, 398 static ssize_t raid_level_show(struct device *dev,
399 struct device_attribute *attr, char *buf) 399 struct device_attribute *attr, char *buf)
400 { 400 {
401 ssize_t l = 0; 401 ssize_t l = 0;
402 unsigned char rlevel; 402 unsigned char rlevel;
403 struct ctlr_info *h; 403 struct ctlr_info *h;
404 struct scsi_device *sdev; 404 struct scsi_device *sdev;
405 struct hpsa_scsi_dev_t *hdev; 405 struct hpsa_scsi_dev_t *hdev;
406 unsigned long flags; 406 unsigned long flags;
407 407
408 sdev = to_scsi_device(dev); 408 sdev = to_scsi_device(dev);
409 h = sdev_to_hba(sdev); 409 h = sdev_to_hba(sdev);
410 spin_lock_irqsave(&h->lock, flags); 410 spin_lock_irqsave(&h->lock, flags);
411 hdev = sdev->hostdata; 411 hdev = sdev->hostdata;
412 if (!hdev) { 412 if (!hdev) {
413 spin_unlock_irqrestore(&h->lock, flags); 413 spin_unlock_irqrestore(&h->lock, flags);
414 return -ENODEV; 414 return -ENODEV;
415 } 415 }
416 416
417 /* Is this even a logical drive? */ 417 /* Is this even a logical drive? */
418 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 418 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
419 spin_unlock_irqrestore(&h->lock, flags); 419 spin_unlock_irqrestore(&h->lock, flags);
420 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 420 l = snprintf(buf, PAGE_SIZE, "N/A\n");
421 return l; 421 return l;
422 } 422 }
423 423
424 rlevel = hdev->raid_level; 424 rlevel = hdev->raid_level;
425 spin_unlock_irqrestore(&h->lock, flags); 425 spin_unlock_irqrestore(&h->lock, flags);
426 if (rlevel > RAID_UNKNOWN) 426 if (rlevel > RAID_UNKNOWN)
427 rlevel = RAID_UNKNOWN; 427 rlevel = RAID_UNKNOWN;
428 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 428 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
429 return l; 429 return l;
430 } 430 }
431 431
432 static ssize_t lunid_show(struct device *dev, 432 static ssize_t lunid_show(struct device *dev,
433 struct device_attribute *attr, char *buf) 433 struct device_attribute *attr, char *buf)
434 { 434 {
435 struct ctlr_info *h; 435 struct ctlr_info *h;
436 struct scsi_device *sdev; 436 struct scsi_device *sdev;
437 struct hpsa_scsi_dev_t *hdev; 437 struct hpsa_scsi_dev_t *hdev;
438 unsigned long flags; 438 unsigned long flags;
439 unsigned char lunid[8]; 439 unsigned char lunid[8];
440 440
441 sdev = to_scsi_device(dev); 441 sdev = to_scsi_device(dev);
442 h = sdev_to_hba(sdev); 442 h = sdev_to_hba(sdev);
443 spin_lock_irqsave(&h->lock, flags); 443 spin_lock_irqsave(&h->lock, flags);
444 hdev = sdev->hostdata; 444 hdev = sdev->hostdata;
445 if (!hdev) { 445 if (!hdev) {
446 spin_unlock_irqrestore(&h->lock, flags); 446 spin_unlock_irqrestore(&h->lock, flags);
447 return -ENODEV; 447 return -ENODEV;
448 } 448 }
449 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 449 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
450 spin_unlock_irqrestore(&h->lock, flags); 450 spin_unlock_irqrestore(&h->lock, flags);
451 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 451 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
452 lunid[0], lunid[1], lunid[2], lunid[3], 452 lunid[0], lunid[1], lunid[2], lunid[3],
453 lunid[4], lunid[5], lunid[6], lunid[7]); 453 lunid[4], lunid[5], lunid[6], lunid[7]);
454 } 454 }
455 455
456 static ssize_t unique_id_show(struct device *dev, 456 static ssize_t unique_id_show(struct device *dev,
457 struct device_attribute *attr, char *buf) 457 struct device_attribute *attr, char *buf)
458 { 458 {
459 struct ctlr_info *h; 459 struct ctlr_info *h;
460 struct scsi_device *sdev; 460 struct scsi_device *sdev;
461 struct hpsa_scsi_dev_t *hdev; 461 struct hpsa_scsi_dev_t *hdev;
462 unsigned long flags; 462 unsigned long flags;
463 unsigned char sn[16]; 463 unsigned char sn[16];
464 464
465 sdev = to_scsi_device(dev); 465 sdev = to_scsi_device(dev);
466 h = sdev_to_hba(sdev); 466 h = sdev_to_hba(sdev);
467 spin_lock_irqsave(&h->lock, flags); 467 spin_lock_irqsave(&h->lock, flags);
468 hdev = sdev->hostdata; 468 hdev = sdev->hostdata;
469 if (!hdev) { 469 if (!hdev) {
470 spin_unlock_irqrestore(&h->lock, flags); 470 spin_unlock_irqrestore(&h->lock, flags);
471 return -ENODEV; 471 return -ENODEV;
472 } 472 }
473 memcpy(sn, hdev->device_id, sizeof(sn)); 473 memcpy(sn, hdev->device_id, sizeof(sn));
474 spin_unlock_irqrestore(&h->lock, flags); 474 spin_unlock_irqrestore(&h->lock, flags);
475 return snprintf(buf, 16 * 2 + 2, 475 return snprintf(buf, 16 * 2 + 2,
476 "%02X%02X%02X%02X%02X%02X%02X%02X" 476 "%02X%02X%02X%02X%02X%02X%02X%02X"
477 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 477 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
478 sn[0], sn[1], sn[2], sn[3], 478 sn[0], sn[1], sn[2], sn[3],
479 sn[4], sn[5], sn[6], sn[7], 479 sn[4], sn[5], sn[6], sn[7],
480 sn[8], sn[9], sn[10], sn[11], 480 sn[8], sn[9], sn[10], sn[11],
481 sn[12], sn[13], sn[14], sn[15]); 481 sn[12], sn[13], sn[14], sn[15]);
482 } 482 }
483 483
484 static int hpsa_find_target_lun(struct ctlr_info *h, 484 static int hpsa_find_target_lun(struct ctlr_info *h,
485 unsigned char scsi3addr[], int bus, int *target, int *lun) 485 unsigned char scsi3addr[], int bus, int *target, int *lun)
486 { 486 {
487 /* finds an unused bus, target, lun for a new physical device 487 /* finds an unused bus, target, lun for a new physical device
488 * assumes h->devlock is held 488 * assumes h->devlock is held
489 */ 489 */
490 int i, found = 0; 490 int i, found = 0;
491 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA); 491 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
492 492
493 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3); 493 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
494 494
495 for (i = 0; i < h->ndevices; i++) { 495 for (i = 0; i < h->ndevices; i++) {
496 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 496 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
497 set_bit(h->dev[i]->target, lun_taken); 497 set_bit(h->dev[i]->target, lun_taken);
498 } 498 }
499 499
500 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) { 500 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
501 if (!test_bit(i, lun_taken)) { 501 if (!test_bit(i, lun_taken)) {
502 /* *bus = 1; */ 502 /* *bus = 1; */
503 *target = i; 503 *target = i;
504 *lun = 0; 504 *lun = 0;
505 found = 1; 505 found = 1;
506 break; 506 break;
507 } 507 }
508 } 508 }
509 return !found; 509 return !found;
510 } 510 }
511 511
512 /* Add an entry into h->dev[] array. */ 512 /* Add an entry into h->dev[] array. */
513 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 513 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
514 struct hpsa_scsi_dev_t *device, 514 struct hpsa_scsi_dev_t *device,
515 struct hpsa_scsi_dev_t *added[], int *nadded) 515 struct hpsa_scsi_dev_t *added[], int *nadded)
516 { 516 {
517 /* assumes h->devlock is held */ 517 /* assumes h->devlock is held */
518 int n = h->ndevices; 518 int n = h->ndevices;
519 int i; 519 int i;
520 unsigned char addr1[8], addr2[8]; 520 unsigned char addr1[8], addr2[8];
521 struct hpsa_scsi_dev_t *sd; 521 struct hpsa_scsi_dev_t *sd;
522 522
523 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) { 523 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
524 dev_err(&h->pdev->dev, "too many devices, some will be " 524 dev_err(&h->pdev->dev, "too many devices, some will be "
525 "inaccessible.\n"); 525 "inaccessible.\n");
526 return -1; 526 return -1;
527 } 527 }
528 528
529 /* physical devices do not have lun or target assigned until now. */ 529 /* physical devices do not have lun or target assigned until now. */
530 if (device->lun != -1) 530 if (device->lun != -1)
531 /* Logical device, lun is already assigned. */ 531 /* Logical device, lun is already assigned. */
532 goto lun_assigned; 532 goto lun_assigned;
533 533
534 /* If this device a non-zero lun of a multi-lun device 534 /* If this device a non-zero lun of a multi-lun device
535 * byte 4 of the 8-byte LUN addr will contain the logical 535 * byte 4 of the 8-byte LUN addr will contain the logical
536 * unit no, zero otherise. 536 * unit no, zero otherise.
537 */ 537 */
538 if (device->scsi3addr[4] == 0) { 538 if (device->scsi3addr[4] == 0) {
539 /* This is not a non-zero lun of a multi-lun device */ 539 /* This is not a non-zero lun of a multi-lun device */
540 if (hpsa_find_target_lun(h, device->scsi3addr, 540 if (hpsa_find_target_lun(h, device->scsi3addr,
541 device->bus, &device->target, &device->lun) != 0) 541 device->bus, &device->target, &device->lun) != 0)
542 return -1; 542 return -1;
543 goto lun_assigned; 543 goto lun_assigned;
544 } 544 }
545 545
546 /* This is a non-zero lun of a multi-lun device. 546 /* This is a non-zero lun of a multi-lun device.
547 * Search through our list and find the device which 547 * Search through our list and find the device which
548 * has the same 8 byte LUN address, excepting byte 4. 548 * has the same 8 byte LUN address, excepting byte 4.
549 * Assign the same bus and target for this new LUN. 549 * Assign the same bus and target for this new LUN.
550 * Use the logical unit number from the firmware. 550 * Use the logical unit number from the firmware.
551 */ 551 */
552 memcpy(addr1, device->scsi3addr, 8); 552 memcpy(addr1, device->scsi3addr, 8);
553 addr1[4] = 0; 553 addr1[4] = 0;
554 for (i = 0; i < n; i++) { 554 for (i = 0; i < n; i++) {
555 sd = h->dev[i]; 555 sd = h->dev[i];
556 memcpy(addr2, sd->scsi3addr, 8); 556 memcpy(addr2, sd->scsi3addr, 8);
557 addr2[4] = 0; 557 addr2[4] = 0;
558 /* differ only in byte 4? */ 558 /* differ only in byte 4? */
559 if (memcmp(addr1, addr2, 8) == 0) { 559 if (memcmp(addr1, addr2, 8) == 0) {
560 device->bus = sd->bus; 560 device->bus = sd->bus;
561 device->target = sd->target; 561 device->target = sd->target;
562 device->lun = device->scsi3addr[4]; 562 device->lun = device->scsi3addr[4];
563 break; 563 break;
564 } 564 }
565 } 565 }
566 if (device->lun == -1) { 566 if (device->lun == -1) {
567 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 567 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
568 " suspect firmware bug or unsupported hardware " 568 " suspect firmware bug or unsupported hardware "
569 "configuration.\n"); 569 "configuration.\n");
570 return -1; 570 return -1;
571 } 571 }
572 572
573 lun_assigned: 573 lun_assigned:
574 574
575 h->dev[n] = device; 575 h->dev[n] = device;
576 h->ndevices++; 576 h->ndevices++;
577 added[*nadded] = device; 577 added[*nadded] = device;
578 (*nadded)++; 578 (*nadded)++;
579 579
580 /* initially, (before registering with scsi layer) we don't 580 /* initially, (before registering with scsi layer) we don't
581 * know our hostno and we don't want to print anything first 581 * know our hostno and we don't want to print anything first
582 * time anyway (the scsi layer's inquiries will show that info) 582 * time anyway (the scsi layer's inquiries will show that info)
583 */ 583 */
584 /* if (hostno != -1) */ 584 /* if (hostno != -1) */
585 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 585 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
586 scsi_device_type(device->devtype), hostno, 586 scsi_device_type(device->devtype), hostno,
587 device->bus, device->target, device->lun); 587 device->bus, device->target, device->lun);
588 return 0; 588 return 0;
589 } 589 }
590 590
591 /* Replace an entry from h->dev[] array. */ 591 /* Replace an entry from h->dev[] array. */
592 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 592 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
593 int entry, struct hpsa_scsi_dev_t *new_entry, 593 int entry, struct hpsa_scsi_dev_t *new_entry,
594 struct hpsa_scsi_dev_t *added[], int *nadded, 594 struct hpsa_scsi_dev_t *added[], int *nadded,
595 struct hpsa_scsi_dev_t *removed[], int *nremoved) 595 struct hpsa_scsi_dev_t *removed[], int *nremoved)
596 { 596 {
597 /* assumes h->devlock is held */ 597 /* assumes h->devlock is held */
598 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); 598 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
599 removed[*nremoved] = h->dev[entry]; 599 removed[*nremoved] = h->dev[entry];
600 (*nremoved)++; 600 (*nremoved)++;
601 h->dev[entry] = new_entry; 601 h->dev[entry] = new_entry;
602 added[*nadded] = new_entry; 602 added[*nadded] = new_entry;
603 (*nadded)++; 603 (*nadded)++;
604 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 604 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
605 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 605 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
606 new_entry->target, new_entry->lun); 606 new_entry->target, new_entry->lun);
607 } 607 }
608 608
609 /* Remove an entry from h->dev[] array. */ 609 /* Remove an entry from h->dev[] array. */
610 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 610 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
611 struct hpsa_scsi_dev_t *removed[], int *nremoved) 611 struct hpsa_scsi_dev_t *removed[], int *nremoved)
612 { 612 {
613 /* assumes h->devlock is held */ 613 /* assumes h->devlock is held */
614 int i; 614 int i;
615 struct hpsa_scsi_dev_t *sd; 615 struct hpsa_scsi_dev_t *sd;
616 616
617 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); 617 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
618 618
619 sd = h->dev[entry]; 619 sd = h->dev[entry];
620 removed[*nremoved] = h->dev[entry]; 620 removed[*nremoved] = h->dev[entry];
621 (*nremoved)++; 621 (*nremoved)++;
622 622
623 for (i = entry; i < h->ndevices-1; i++) 623 for (i = entry; i < h->ndevices-1; i++)
624 h->dev[i] = h->dev[i+1]; 624 h->dev[i] = h->dev[i+1];
625 h->ndevices--; 625 h->ndevices--;
626 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 626 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
627 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 627 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
628 sd->lun); 628 sd->lun);
629 } 629 }
630 630
631 #define SCSI3ADDR_EQ(a, b) ( \ 631 #define SCSI3ADDR_EQ(a, b) ( \
632 (a)[7] == (b)[7] && \ 632 (a)[7] == (b)[7] && \
633 (a)[6] == (b)[6] && \ 633 (a)[6] == (b)[6] && \
634 (a)[5] == (b)[5] && \ 634 (a)[5] == (b)[5] && \
635 (a)[4] == (b)[4] && \ 635 (a)[4] == (b)[4] && \
636 (a)[3] == (b)[3] && \ 636 (a)[3] == (b)[3] && \
637 (a)[2] == (b)[2] && \ 637 (a)[2] == (b)[2] && \
638 (a)[1] == (b)[1] && \ 638 (a)[1] == (b)[1] && \
639 (a)[0] == (b)[0]) 639 (a)[0] == (b)[0])
640 640
641 static void fixup_botched_add(struct ctlr_info *h, 641 static void fixup_botched_add(struct ctlr_info *h,
642 struct hpsa_scsi_dev_t *added) 642 struct hpsa_scsi_dev_t *added)
643 { 643 {
644 /* called when scsi_add_device fails in order to re-adjust 644 /* called when scsi_add_device fails in order to re-adjust
645 * h->dev[] to match the mid layer's view. 645 * h->dev[] to match the mid layer's view.
646 */ 646 */
647 unsigned long flags; 647 unsigned long flags;
648 int i, j; 648 int i, j;
649 649
650 spin_lock_irqsave(&h->lock, flags); 650 spin_lock_irqsave(&h->lock, flags);
651 for (i = 0; i < h->ndevices; i++) { 651 for (i = 0; i < h->ndevices; i++) {
652 if (h->dev[i] == added) { 652 if (h->dev[i] == added) {
653 for (j = i; j < h->ndevices-1; j++) 653 for (j = i; j < h->ndevices-1; j++)
654 h->dev[j] = h->dev[j+1]; 654 h->dev[j] = h->dev[j+1];
655 h->ndevices--; 655 h->ndevices--;
656 break; 656 break;
657 } 657 }
658 } 658 }
659 spin_unlock_irqrestore(&h->lock, flags); 659 spin_unlock_irqrestore(&h->lock, flags);
660 kfree(added); 660 kfree(added);
661 } 661 }
662 662
663 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 663 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
664 struct hpsa_scsi_dev_t *dev2) 664 struct hpsa_scsi_dev_t *dev2)
665 { 665 {
666 /* we compare everything except lun and target as these 666 /* we compare everything except lun and target as these
667 * are not yet assigned. Compare parts likely 667 * are not yet assigned. Compare parts likely
668 * to differ first 668 * to differ first
669 */ 669 */
670 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 670 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
671 sizeof(dev1->scsi3addr)) != 0) 671 sizeof(dev1->scsi3addr)) != 0)
672 return 0; 672 return 0;
673 if (memcmp(dev1->device_id, dev2->device_id, 673 if (memcmp(dev1->device_id, dev2->device_id,
674 sizeof(dev1->device_id)) != 0) 674 sizeof(dev1->device_id)) != 0)
675 return 0; 675 return 0;
676 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 676 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
677 return 0; 677 return 0;
678 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 678 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
679 return 0; 679 return 0;
680 if (dev1->devtype != dev2->devtype) 680 if (dev1->devtype != dev2->devtype)
681 return 0; 681 return 0;
682 if (dev1->bus != dev2->bus) 682 if (dev1->bus != dev2->bus)
683 return 0; 683 return 0;
684 return 1; 684 return 1;
685 } 685 }
686 686
687 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 687 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
688 * and return needle location in *index. If scsi3addr matches, but not 688 * and return needle location in *index. If scsi3addr matches, but not
689 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 689 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
690 * location in *index. If needle not found, return DEVICE_NOT_FOUND. 690 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
691 */ 691 */
692 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 692 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
693 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 693 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
694 int *index) 694 int *index)
695 { 695 {
696 int i; 696 int i;
697 #define DEVICE_NOT_FOUND 0 697 #define DEVICE_NOT_FOUND 0
698 #define DEVICE_CHANGED 1 698 #define DEVICE_CHANGED 1
699 #define DEVICE_SAME 2 699 #define DEVICE_SAME 2
700 for (i = 0; i < haystack_size; i++) { 700 for (i = 0; i < haystack_size; i++) {
701 if (haystack[i] == NULL) /* previously removed. */ 701 if (haystack[i] == NULL) /* previously removed. */
702 continue; 702 continue;
703 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 703 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
704 *index = i; 704 *index = i;
705 if (device_is_the_same(needle, haystack[i])) 705 if (device_is_the_same(needle, haystack[i]))
706 return DEVICE_SAME; 706 return DEVICE_SAME;
707 else 707 else
708 return DEVICE_CHANGED; 708 return DEVICE_CHANGED;
709 } 709 }
710 } 710 }
711 *index = -1; 711 *index = -1;
712 return DEVICE_NOT_FOUND; 712 return DEVICE_NOT_FOUND;
713 } 713 }
714 714
715 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 715 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
716 struct hpsa_scsi_dev_t *sd[], int nsds) 716 struct hpsa_scsi_dev_t *sd[], int nsds)
717 { 717 {
718 /* sd contains scsi3 addresses and devtypes, and inquiry 718 /* sd contains scsi3 addresses and devtypes, and inquiry
719 * data. This function takes what's in sd to be the current 719 * data. This function takes what's in sd to be the current
720 * reality and updates h->dev[] to reflect that reality. 720 * reality and updates h->dev[] to reflect that reality.
721 */ 721 */
722 int i, entry, device_change, changes = 0; 722 int i, entry, device_change, changes = 0;
723 struct hpsa_scsi_dev_t *csd; 723 struct hpsa_scsi_dev_t *csd;
724 unsigned long flags; 724 unsigned long flags;
725 struct hpsa_scsi_dev_t **added, **removed; 725 struct hpsa_scsi_dev_t **added, **removed;
726 int nadded, nremoved; 726 int nadded, nremoved;
727 struct Scsi_Host *sh = NULL; 727 struct Scsi_Host *sh = NULL;
728 728
729 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA, 729 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
730 GFP_KERNEL); 730 GFP_KERNEL);
731 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA, 731 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
732 GFP_KERNEL); 732 GFP_KERNEL);
733 733
734 if (!added || !removed) { 734 if (!added || !removed) {
735 dev_warn(&h->pdev->dev, "out of memory in " 735 dev_warn(&h->pdev->dev, "out of memory in "
736 "adjust_hpsa_scsi_table\n"); 736 "adjust_hpsa_scsi_table\n");
737 goto free_and_out; 737 goto free_and_out;
738 } 738 }
739 739
740 spin_lock_irqsave(&h->devlock, flags); 740 spin_lock_irqsave(&h->devlock, flags);
741 741
742 /* find any devices in h->dev[] that are not in 742 /* find any devices in h->dev[] that are not in
743 * sd[] and remove them from h->dev[], and for any 743 * sd[] and remove them from h->dev[], and for any
744 * devices which have changed, remove the old device 744 * devices which have changed, remove the old device
745 * info and add the new device info. 745 * info and add the new device info.
746 */ 746 */
747 i = 0; 747 i = 0;
748 nremoved = 0; 748 nremoved = 0;
749 nadded = 0; 749 nadded = 0;
750 while (i < h->ndevices) { 750 while (i < h->ndevices) {
751 csd = h->dev[i]; 751 csd = h->dev[i];
752 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 752 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
753 if (device_change == DEVICE_NOT_FOUND) { 753 if (device_change == DEVICE_NOT_FOUND) {
754 changes++; 754 changes++;
755 hpsa_scsi_remove_entry(h, hostno, i, 755 hpsa_scsi_remove_entry(h, hostno, i,
756 removed, &nremoved); 756 removed, &nremoved);
757 continue; /* remove ^^^, hence i not incremented */ 757 continue; /* remove ^^^, hence i not incremented */
758 } else if (device_change == DEVICE_CHANGED) { 758 } else if (device_change == DEVICE_CHANGED) {
759 changes++; 759 changes++;
760 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 760 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
761 added, &nadded, removed, &nremoved); 761 added, &nadded, removed, &nremoved);
762 /* Set it to NULL to prevent it from being freed 762 /* Set it to NULL to prevent it from being freed
763 * at the bottom of hpsa_update_scsi_devices() 763 * at the bottom of hpsa_update_scsi_devices()
764 */ 764 */
765 sd[entry] = NULL; 765 sd[entry] = NULL;
766 } 766 }
767 i++; 767 i++;
768 } 768 }
769 769
770 /* Now, make sure every device listed in sd[] is also 770 /* Now, make sure every device listed in sd[] is also
771 * listed in h->dev[], adding them if they aren't found 771 * listed in h->dev[], adding them if they aren't found
772 */ 772 */
773 773
774 for (i = 0; i < nsds; i++) { 774 for (i = 0; i < nsds; i++) {
775 if (!sd[i]) /* if already added above. */ 775 if (!sd[i]) /* if already added above. */
776 continue; 776 continue;
777 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 777 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
778 h->ndevices, &entry); 778 h->ndevices, &entry);
779 if (device_change == DEVICE_NOT_FOUND) { 779 if (device_change == DEVICE_NOT_FOUND) {
780 changes++; 780 changes++;
781 if (hpsa_scsi_add_entry(h, hostno, sd[i], 781 if (hpsa_scsi_add_entry(h, hostno, sd[i],
782 added, &nadded) != 0) 782 added, &nadded) != 0)
783 break; 783 break;
784 sd[i] = NULL; /* prevent from being freed later. */ 784 sd[i] = NULL; /* prevent from being freed later. */
785 } else if (device_change == DEVICE_CHANGED) { 785 } else if (device_change == DEVICE_CHANGED) {
786 /* should never happen... */ 786 /* should never happen... */
787 changes++; 787 changes++;
788 dev_warn(&h->pdev->dev, 788 dev_warn(&h->pdev->dev,
789 "device unexpectedly changed.\n"); 789 "device unexpectedly changed.\n");
790 /* but if it does happen, we just ignore that device */ 790 /* but if it does happen, we just ignore that device */
791 } 791 }
792 } 792 }
793 spin_unlock_irqrestore(&h->devlock, flags); 793 spin_unlock_irqrestore(&h->devlock, flags);
794 794
795 /* Don't notify scsi mid layer of any changes the first time through 795 /* Don't notify scsi mid layer of any changes the first time through
796 * (or if there are no changes) scsi_scan_host will do it later the 796 * (or if there are no changes) scsi_scan_host will do it later the
797 * first time through. 797 * first time through.
798 */ 798 */
799 if (hostno == -1 || !changes) 799 if (hostno == -1 || !changes)
800 goto free_and_out; 800 goto free_and_out;
801 801
802 sh = h->scsi_host; 802 sh = h->scsi_host;
803 /* Notify scsi mid layer of any removed devices */ 803 /* Notify scsi mid layer of any removed devices */
804 for (i = 0; i < nremoved; i++) { 804 for (i = 0; i < nremoved; i++) {
805 struct scsi_device *sdev = 805 struct scsi_device *sdev =
806 scsi_device_lookup(sh, removed[i]->bus, 806 scsi_device_lookup(sh, removed[i]->bus,
807 removed[i]->target, removed[i]->lun); 807 removed[i]->target, removed[i]->lun);
808 if (sdev != NULL) { 808 if (sdev != NULL) {
809 scsi_remove_device(sdev); 809 scsi_remove_device(sdev);
810 scsi_device_put(sdev); 810 scsi_device_put(sdev);
811 } else { 811 } else {
812 /* We don't expect to get here. 812 /* We don't expect to get here.
813 * future cmds to this device will get selection 813 * future cmds to this device will get selection
814 * timeout as if the device was gone. 814 * timeout as if the device was gone.
815 */ 815 */
816 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 816 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
817 " for removal.", hostno, removed[i]->bus, 817 " for removal.", hostno, removed[i]->bus,
818 removed[i]->target, removed[i]->lun); 818 removed[i]->target, removed[i]->lun);
819 } 819 }
820 kfree(removed[i]); 820 kfree(removed[i]);
821 removed[i] = NULL; 821 removed[i] = NULL;
822 } 822 }
823 823
824 /* Notify scsi mid layer of any added devices */ 824 /* Notify scsi mid layer of any added devices */
825 for (i = 0; i < nadded; i++) { 825 for (i = 0; i < nadded; i++) {
826 if (scsi_add_device(sh, added[i]->bus, 826 if (scsi_add_device(sh, added[i]->bus,
827 added[i]->target, added[i]->lun) == 0) 827 added[i]->target, added[i]->lun) == 0)
828 continue; 828 continue;
829 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 829 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
830 "device not added.\n", hostno, added[i]->bus, 830 "device not added.\n", hostno, added[i]->bus,
831 added[i]->target, added[i]->lun); 831 added[i]->target, added[i]->lun);
832 /* now we have to remove it from h->dev, 832 /* now we have to remove it from h->dev,
833 * since it didn't get added to scsi mid layer 833 * since it didn't get added to scsi mid layer
834 */ 834 */
835 fixup_botched_add(h, added[i]); 835 fixup_botched_add(h, added[i]);
836 } 836 }
837 837
838 free_and_out: 838 free_and_out:
839 kfree(added); 839 kfree(added);
840 kfree(removed); 840 kfree(removed);
841 } 841 }
842 842
843 /* 843 /*
844 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * 844 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
845 * Assume's h->devlock is held. 845 * Assume's h->devlock is held.
846 */ 846 */
847 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 847 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
848 int bus, int target, int lun) 848 int bus, int target, int lun)
849 { 849 {
850 int i; 850 int i;
851 struct hpsa_scsi_dev_t *sd; 851 struct hpsa_scsi_dev_t *sd;
852 852
853 for (i = 0; i < h->ndevices; i++) { 853 for (i = 0; i < h->ndevices; i++) {
854 sd = h->dev[i]; 854 sd = h->dev[i];
855 if (sd->bus == bus && sd->target == target && sd->lun == lun) 855 if (sd->bus == bus && sd->target == target && sd->lun == lun)
856 return sd; 856 return sd;
857 } 857 }
858 return NULL; 858 return NULL;
859 } 859 }
860 860
861 /* link sdev->hostdata to our per-device structure. */ 861 /* link sdev->hostdata to our per-device structure. */
862 static int hpsa_slave_alloc(struct scsi_device *sdev) 862 static int hpsa_slave_alloc(struct scsi_device *sdev)
863 { 863 {
864 struct hpsa_scsi_dev_t *sd; 864 struct hpsa_scsi_dev_t *sd;
865 unsigned long flags; 865 unsigned long flags;
866 struct ctlr_info *h; 866 struct ctlr_info *h;
867 867
868 h = sdev_to_hba(sdev); 868 h = sdev_to_hba(sdev);
869 spin_lock_irqsave(&h->devlock, flags); 869 spin_lock_irqsave(&h->devlock, flags);
870 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 870 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
871 sdev_id(sdev), sdev->lun); 871 sdev_id(sdev), sdev->lun);
872 if (sd != NULL) 872 if (sd != NULL)
873 sdev->hostdata = sd; 873 sdev->hostdata = sd;
874 spin_unlock_irqrestore(&h->devlock, flags); 874 spin_unlock_irqrestore(&h->devlock, flags);
875 return 0; 875 return 0;
876 } 876 }
877 877
878 static void hpsa_slave_destroy(struct scsi_device *sdev) 878 static void hpsa_slave_destroy(struct scsi_device *sdev)
879 { 879 {
880 /* nothing to do. */ 880 /* nothing to do. */
881 } 881 }
882 882
883 static void hpsa_scsi_setup(struct ctlr_info *h) 883 static void hpsa_scsi_setup(struct ctlr_info *h)
884 { 884 {
885 h->ndevices = 0; 885 h->ndevices = 0;
886 h->scsi_host = NULL; 886 h->scsi_host = NULL;
887 spin_lock_init(&h->devlock); 887 spin_lock_init(&h->devlock);
888 } 888 }
889 889
890 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 890 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
891 { 891 {
892 int i; 892 int i;
893 893
894 if (!h->cmd_sg_list) 894 if (!h->cmd_sg_list)
895 return; 895 return;
896 for (i = 0; i < h->nr_cmds; i++) { 896 for (i = 0; i < h->nr_cmds; i++) {
897 kfree(h->cmd_sg_list[i]); 897 kfree(h->cmd_sg_list[i]);
898 h->cmd_sg_list[i] = NULL; 898 h->cmd_sg_list[i] = NULL;
899 } 899 }
900 kfree(h->cmd_sg_list); 900 kfree(h->cmd_sg_list);
901 h->cmd_sg_list = NULL; 901 h->cmd_sg_list = NULL;
902 } 902 }
903 903
904 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 904 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
905 { 905 {
906 int i; 906 int i;
907 907
908 if (h->chainsize <= 0) 908 if (h->chainsize <= 0)
909 return 0; 909 return 0;
910 910
911 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 911 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
912 GFP_KERNEL); 912 GFP_KERNEL);
913 if (!h->cmd_sg_list) 913 if (!h->cmd_sg_list)
914 return -ENOMEM; 914 return -ENOMEM;
915 for (i = 0; i < h->nr_cmds; i++) { 915 for (i = 0; i < h->nr_cmds; i++) {
916 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 916 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
917 h->chainsize, GFP_KERNEL); 917 h->chainsize, GFP_KERNEL);
918 if (!h->cmd_sg_list[i]) 918 if (!h->cmd_sg_list[i])
919 goto clean; 919 goto clean;
920 } 920 }
921 return 0; 921 return 0;
922 922
923 clean: 923 clean:
924 hpsa_free_sg_chain_blocks(h); 924 hpsa_free_sg_chain_blocks(h);
925 return -ENOMEM; 925 return -ENOMEM;
926 } 926 }
927 927
928 static void hpsa_map_sg_chain_block(struct ctlr_info *h, 928 static void hpsa_map_sg_chain_block(struct ctlr_info *h,
929 struct CommandList *c) 929 struct CommandList *c)
930 { 930 {
931 struct SGDescriptor *chain_sg, *chain_block; 931 struct SGDescriptor *chain_sg, *chain_block;
932 u64 temp64; 932 u64 temp64;
933 933
934 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 934 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
935 chain_block = h->cmd_sg_list[c->cmdindex]; 935 chain_block = h->cmd_sg_list[c->cmdindex];
936 chain_sg->Ext = HPSA_SG_CHAIN; 936 chain_sg->Ext = HPSA_SG_CHAIN;
937 chain_sg->Len = sizeof(*chain_sg) * 937 chain_sg->Len = sizeof(*chain_sg) *
938 (c->Header.SGTotal - h->max_cmd_sg_entries); 938 (c->Header.SGTotal - h->max_cmd_sg_entries);
939 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 939 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
940 PCI_DMA_TODEVICE); 940 PCI_DMA_TODEVICE);
941 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 941 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
942 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); 942 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
943 } 943 }
944 944
945 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 945 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
946 struct CommandList *c) 946 struct CommandList *c)
947 { 947 {
948 struct SGDescriptor *chain_sg; 948 struct SGDescriptor *chain_sg;
949 union u64bit temp64; 949 union u64bit temp64;
950 950
951 if (c->Header.SGTotal <= h->max_cmd_sg_entries) 951 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
952 return; 952 return;
953 953
954 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 954 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
955 temp64.val32.lower = chain_sg->Addr.lower; 955 temp64.val32.lower = chain_sg->Addr.lower;
956 temp64.val32.upper = chain_sg->Addr.upper; 956 temp64.val32.upper = chain_sg->Addr.upper;
957 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 957 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
958 } 958 }
959 959
960 static void complete_scsi_command(struct CommandList *cp, 960 static void complete_scsi_command(struct CommandList *cp,
961 int timeout, u32 tag) 961 int timeout, u32 tag)
962 { 962 {
963 struct scsi_cmnd *cmd; 963 struct scsi_cmnd *cmd;
964 struct ctlr_info *h; 964 struct ctlr_info *h;
965 struct ErrorInfo *ei; 965 struct ErrorInfo *ei;
966 966
967 unsigned char sense_key; 967 unsigned char sense_key;
968 unsigned char asc; /* additional sense code */ 968 unsigned char asc; /* additional sense code */
969 unsigned char ascq; /* additional sense code qualifier */ 969 unsigned char ascq; /* additional sense code qualifier */
970 970
971 ei = cp->err_info; 971 ei = cp->err_info;
972 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 972 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
973 h = cp->h; 973 h = cp->h;
974 974
975 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 975 scsi_dma_unmap(cmd); /* undo the DMA mappings */
976 if (cp->Header.SGTotal > h->max_cmd_sg_entries) 976 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
977 hpsa_unmap_sg_chain_block(h, cp); 977 hpsa_unmap_sg_chain_block(h, cp);
978 978
979 cmd->result = (DID_OK << 16); /* host byte */ 979 cmd->result = (DID_OK << 16); /* host byte */
980 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 980 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
981 cmd->result |= ei->ScsiStatus; 981 cmd->result |= ei->ScsiStatus;
982 982
983 /* copy the sense data whether we need to or not. */ 983 /* copy the sense data whether we need to or not. */
984 memcpy(cmd->sense_buffer, ei->SenseInfo, 984 memcpy(cmd->sense_buffer, ei->SenseInfo,
985 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? 985 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
986 SCSI_SENSE_BUFFERSIZE : 986 SCSI_SENSE_BUFFERSIZE :
987 ei->SenseLen); 987 ei->SenseLen);
988 scsi_set_resid(cmd, ei->ResidualCnt); 988 scsi_set_resid(cmd, ei->ResidualCnt);
989 989
990 if (ei->CommandStatus == 0) { 990 if (ei->CommandStatus == 0) {
991 cmd->scsi_done(cmd); 991 cmd->scsi_done(cmd);
992 cmd_free(h, cp); 992 cmd_free(h, cp);
993 return; 993 return;
994 } 994 }
995 995
996 /* an error has occurred */ 996 /* an error has occurred */
997 switch (ei->CommandStatus) { 997 switch (ei->CommandStatus) {
998 998
999 case CMD_TARGET_STATUS: 999 case CMD_TARGET_STATUS:
1000 if (ei->ScsiStatus) { 1000 if (ei->ScsiStatus) {
1001 /* Get sense key */ 1001 /* Get sense key */
1002 sense_key = 0xf & ei->SenseInfo[2]; 1002 sense_key = 0xf & ei->SenseInfo[2];
1003 /* Get additional sense code */ 1003 /* Get additional sense code */
1004 asc = ei->SenseInfo[12]; 1004 asc = ei->SenseInfo[12];
1005 /* Get addition sense code qualifier */ 1005 /* Get addition sense code qualifier */
1006 ascq = ei->SenseInfo[13]; 1006 ascq = ei->SenseInfo[13];
1007 } 1007 }
1008 1008
1009 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1009 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1010 if (check_for_unit_attention(h, cp)) { 1010 if (check_for_unit_attention(h, cp)) {
1011 cmd->result = DID_SOFT_ERROR << 16; 1011 cmd->result = DID_SOFT_ERROR << 16;
1012 break; 1012 break;
1013 } 1013 }
1014 if (sense_key == ILLEGAL_REQUEST) { 1014 if (sense_key == ILLEGAL_REQUEST) {
1015 /* 1015 /*
1016 * SCSI REPORT_LUNS is commonly unsupported on 1016 * SCSI REPORT_LUNS is commonly unsupported on
1017 * Smart Array. Suppress noisy complaint. 1017 * Smart Array. Suppress noisy complaint.
1018 */ 1018 */
1019 if (cp->Request.CDB[0] == REPORT_LUNS) 1019 if (cp->Request.CDB[0] == REPORT_LUNS)
1020 break; 1020 break;
1021 1021
1022 /* If ASC/ASCQ indicate Logical Unit 1022 /* If ASC/ASCQ indicate Logical Unit
1023 * Not Supported condition, 1023 * Not Supported condition,
1024 */ 1024 */
1025 if ((asc == 0x25) && (ascq == 0x0)) { 1025 if ((asc == 0x25) && (ascq == 0x0)) {
1026 dev_warn(&h->pdev->dev, "cp %p " 1026 dev_warn(&h->pdev->dev, "cp %p "
1027 "has check condition\n", cp); 1027 "has check condition\n", cp);
1028 break; 1028 break;
1029 } 1029 }
1030 } 1030 }
1031 1031
1032 if (sense_key == NOT_READY) { 1032 if (sense_key == NOT_READY) {
1033 /* If Sense is Not Ready, Logical Unit 1033 /* If Sense is Not Ready, Logical Unit
1034 * Not ready, Manual Intervention 1034 * Not ready, Manual Intervention
1035 * required 1035 * required
1036 */ 1036 */
1037 if ((asc == 0x04) && (ascq == 0x03)) { 1037 if ((asc == 0x04) && (ascq == 0x03)) {
1038 dev_warn(&h->pdev->dev, "cp %p " 1038 dev_warn(&h->pdev->dev, "cp %p "
1039 "has check condition: unit " 1039 "has check condition: unit "
1040 "not ready, manual " 1040 "not ready, manual "
1041 "intervention required\n", cp); 1041 "intervention required\n", cp);
1042 break; 1042 break;
1043 } 1043 }
1044 } 1044 }
1045 if (sense_key == ABORTED_COMMAND) { 1045 if (sense_key == ABORTED_COMMAND) {
1046 /* Aborted command is retryable */ 1046 /* Aborted command is retryable */
1047 dev_warn(&h->pdev->dev, "cp %p " 1047 dev_warn(&h->pdev->dev, "cp %p "
1048 "has check condition: aborted command: " 1048 "has check condition: aborted command: "
1049 "ASC: 0x%x, ASCQ: 0x%x\n", 1049 "ASC: 0x%x, ASCQ: 0x%x\n",
1050 cp, asc, ascq); 1050 cp, asc, ascq);
1051 cmd->result = DID_SOFT_ERROR << 16; 1051 cmd->result = DID_SOFT_ERROR << 16;
1052 break; 1052 break;
1053 } 1053 }
1054 /* Must be some other type of check condition */ 1054 /* Must be some other type of check condition */
1055 dev_warn(&h->pdev->dev, "cp %p has check condition: " 1055 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1056 "unknown type: " 1056 "unknown type: "
1057 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1057 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1058 "Returning result: 0x%x, " 1058 "Returning result: 0x%x, "
1059 "cmd=[%02x %02x %02x %02x %02x " 1059 "cmd=[%02x %02x %02x %02x %02x "
1060 "%02x %02x %02x %02x %02x %02x " 1060 "%02x %02x %02x %02x %02x %02x "
1061 "%02x %02x %02x %02x %02x]\n", 1061 "%02x %02x %02x %02x %02x]\n",
1062 cp, sense_key, asc, ascq, 1062 cp, sense_key, asc, ascq,
1063 cmd->result, 1063 cmd->result,
1064 cmd->cmnd[0], cmd->cmnd[1], 1064 cmd->cmnd[0], cmd->cmnd[1],
1065 cmd->cmnd[2], cmd->cmnd[3], 1065 cmd->cmnd[2], cmd->cmnd[3],
1066 cmd->cmnd[4], cmd->cmnd[5], 1066 cmd->cmnd[4], cmd->cmnd[5],
1067 cmd->cmnd[6], cmd->cmnd[7], 1067 cmd->cmnd[6], cmd->cmnd[7],
1068 cmd->cmnd[8], cmd->cmnd[9], 1068 cmd->cmnd[8], cmd->cmnd[9],
1069 cmd->cmnd[10], cmd->cmnd[11], 1069 cmd->cmnd[10], cmd->cmnd[11],
1070 cmd->cmnd[12], cmd->cmnd[13], 1070 cmd->cmnd[12], cmd->cmnd[13],
1071 cmd->cmnd[14], cmd->cmnd[15]); 1071 cmd->cmnd[14], cmd->cmnd[15]);
1072 break; 1072 break;
1073 } 1073 }
1074 1074
1075 1075
1076 /* Problem was not a check condition 1076 /* Problem was not a check condition
1077 * Pass it up to the upper layers... 1077 * Pass it up to the upper layers...
1078 */ 1078 */
1079 if (ei->ScsiStatus) { 1079 if (ei->ScsiStatus) {
1080 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1080 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1081 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1081 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1082 "Returning result: 0x%x\n", 1082 "Returning result: 0x%x\n",
1083 cp, ei->ScsiStatus, 1083 cp, ei->ScsiStatus,
1084 sense_key, asc, ascq, 1084 sense_key, asc, ascq,
1085 cmd->result); 1085 cmd->result);
1086 } else { /* scsi status is zero??? How??? */ 1086 } else { /* scsi status is zero??? How??? */
1087 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1087 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1088 "Returning no connection.\n", cp), 1088 "Returning no connection.\n", cp),
1089 1089
1090 /* Ordinarily, this case should never happen, 1090 /* Ordinarily, this case should never happen,
1091 * but there is a bug in some released firmware 1091 * but there is a bug in some released firmware
1092 * revisions that allows it to happen if, for 1092 * revisions that allows it to happen if, for
1093 * example, a 4100 backplane loses power and 1093 * example, a 4100 backplane loses power and
1094 * the tape drive is in it. We assume that 1094 * the tape drive is in it. We assume that
1095 * it's a fatal error of some kind because we 1095 * it's a fatal error of some kind because we
1096 * can't show that it wasn't. We will make it 1096 * can't show that it wasn't. We will make it
1097 * look like selection timeout since that is 1097 * look like selection timeout since that is
1098 * the most common reason for this to occur, 1098 * the most common reason for this to occur,
1099 * and it's severe enough. 1099 * and it's severe enough.
1100 */ 1100 */
1101 1101
1102 cmd->result = DID_NO_CONNECT << 16; 1102 cmd->result = DID_NO_CONNECT << 16;
1103 } 1103 }
1104 break; 1104 break;
1105 1105
1106 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1106 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1107 break; 1107 break;
1108 case CMD_DATA_OVERRUN: 1108 case CMD_DATA_OVERRUN:
1109 dev_warn(&h->pdev->dev, "cp %p has" 1109 dev_warn(&h->pdev->dev, "cp %p has"
1110 " completed with data overrun " 1110 " completed with data overrun "
1111 "reported\n", cp); 1111 "reported\n", cp);
1112 break; 1112 break;
1113 case CMD_INVALID: { 1113 case CMD_INVALID: {
1114 /* print_bytes(cp, sizeof(*cp), 1, 0); 1114 /* print_bytes(cp, sizeof(*cp), 1, 0);
1115 print_cmd(cp); */ 1115 print_cmd(cp); */
1116 /* We get CMD_INVALID if you address a non-existent device 1116 /* We get CMD_INVALID if you address a non-existent device
1117 * instead of a selection timeout (no response). You will 1117 * instead of a selection timeout (no response). You will
1118 * see this if you yank out a drive, then try to access it. 1118 * see this if you yank out a drive, then try to access it.
1119 * This is kind of a shame because it means that any other 1119 * This is kind of a shame because it means that any other
1120 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1120 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1121 * missing target. */ 1121 * missing target. */
1122 cmd->result = DID_NO_CONNECT << 16; 1122 cmd->result = DID_NO_CONNECT << 16;
1123 } 1123 }
1124 break; 1124 break;
1125 case CMD_PROTOCOL_ERR: 1125 case CMD_PROTOCOL_ERR:
1126 dev_warn(&h->pdev->dev, "cp %p has " 1126 dev_warn(&h->pdev->dev, "cp %p has "
1127 "protocol error \n", cp); 1127 "protocol error \n", cp);
1128 break; 1128 break;
1129 case CMD_HARDWARE_ERR: 1129 case CMD_HARDWARE_ERR:
1130 cmd->result = DID_ERROR << 16; 1130 cmd->result = DID_ERROR << 16;
1131 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1131 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1132 break; 1132 break;
1133 case CMD_CONNECTION_LOST: 1133 case CMD_CONNECTION_LOST:
1134 cmd->result = DID_ERROR << 16; 1134 cmd->result = DID_ERROR << 16;
1135 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1135 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1136 break; 1136 break;
1137 case CMD_ABORTED: 1137 case CMD_ABORTED:
1138 cmd->result = DID_ABORT << 16; 1138 cmd->result = DID_ABORT << 16;
1139 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1139 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1140 cp, ei->ScsiStatus); 1140 cp, ei->ScsiStatus);
1141 break; 1141 break;
1142 case CMD_ABORT_FAILED: 1142 case CMD_ABORT_FAILED:
1143 cmd->result = DID_ERROR << 16; 1143 cmd->result = DID_ERROR << 16;
1144 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1144 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1145 break; 1145 break;
1146 case CMD_UNSOLICITED_ABORT: 1146 case CMD_UNSOLICITED_ABORT:
1147 cmd->result = DID_RESET << 16; 1147 cmd->result = DID_RESET << 16;
1148 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " 1148 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1149 "abort\n", cp); 1149 "abort\n", cp);
1150 break; 1150 break;
1151 case CMD_TIMEOUT: 1151 case CMD_TIMEOUT:
1152 cmd->result = DID_TIME_OUT << 16; 1152 cmd->result = DID_TIME_OUT << 16;
1153 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1153 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1154 break; 1154 break;
1155 case CMD_UNABORTABLE: 1155 case CMD_UNABORTABLE:
1156 cmd->result = DID_ERROR << 16; 1156 cmd->result = DID_ERROR << 16;
1157 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1157 dev_warn(&h->pdev->dev, "Command unabortable\n");
1158 break; 1158 break;
1159 default: 1159 default:
1160 cmd->result = DID_ERROR << 16; 1160 cmd->result = DID_ERROR << 16;
1161 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1161 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1162 cp, ei->CommandStatus); 1162 cp, ei->CommandStatus);
1163 } 1163 }
1164 cmd->scsi_done(cmd); 1164 cmd->scsi_done(cmd);
1165 cmd_free(h, cp); 1165 cmd_free(h, cp);
1166 } 1166 }
1167 1167
1168 static int hpsa_scsi_detect(struct ctlr_info *h) 1168 static int hpsa_scsi_detect(struct ctlr_info *h)
1169 { 1169 {
1170 struct Scsi_Host *sh; 1170 struct Scsi_Host *sh;
1171 int error; 1171 int error;
1172 1172
1173 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 1173 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1174 if (sh == NULL) 1174 if (sh == NULL)
1175 goto fail; 1175 goto fail;
1176 1176
1177 sh->io_port = 0; 1177 sh->io_port = 0;
1178 sh->n_io_port = 0; 1178 sh->n_io_port = 0;
1179 sh->this_id = -1; 1179 sh->this_id = -1;
1180 sh->max_channel = 3; 1180 sh->max_channel = 3;
1181 sh->max_cmd_len = MAX_COMMAND_SIZE; 1181 sh->max_cmd_len = MAX_COMMAND_SIZE;
1182 sh->max_lun = HPSA_MAX_LUN; 1182 sh->max_lun = HPSA_MAX_LUN;
1183 sh->max_id = HPSA_MAX_LUN; 1183 sh->max_id = HPSA_MAX_LUN;
1184 sh->can_queue = h->nr_cmds; 1184 sh->can_queue = h->nr_cmds;
1185 sh->cmd_per_lun = h->nr_cmds; 1185 sh->cmd_per_lun = h->nr_cmds;
1186 sh->sg_tablesize = h->maxsgentries; 1186 sh->sg_tablesize = h->maxsgentries;
1187 h->scsi_host = sh; 1187 h->scsi_host = sh;
1188 sh->hostdata[0] = (unsigned long) h; 1188 sh->hostdata[0] = (unsigned long) h;
1189 sh->irq = h->intr[PERF_MODE_INT]; 1189 sh->irq = h->intr[PERF_MODE_INT];
1190 sh->unique_id = sh->irq; 1190 sh->unique_id = sh->irq;
1191 error = scsi_add_host(sh, &h->pdev->dev); 1191 error = scsi_add_host(sh, &h->pdev->dev);
1192 if (error) 1192 if (error)
1193 goto fail_host_put; 1193 goto fail_host_put;
1194 scsi_scan_host(sh); 1194 scsi_scan_host(sh);
1195 return 0; 1195 return 0;
1196 1196
1197 fail_host_put: 1197 fail_host_put:
1198 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" 1198 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1199 " failed for controller %d\n", h->ctlr); 1199 " failed for controller %d\n", h->ctlr);
1200 scsi_host_put(sh); 1200 scsi_host_put(sh);
1201 return error; 1201 return error;
1202 fail: 1202 fail:
1203 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" 1203 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1204 " failed for controller %d\n", h->ctlr); 1204 " failed for controller %d\n", h->ctlr);
1205 return -ENOMEM; 1205 return -ENOMEM;
1206 } 1206 }
1207 1207
1208 static void hpsa_pci_unmap(struct pci_dev *pdev, 1208 static void hpsa_pci_unmap(struct pci_dev *pdev,
1209 struct CommandList *c, int sg_used, int data_direction) 1209 struct CommandList *c, int sg_used, int data_direction)
1210 { 1210 {
1211 int i; 1211 int i;
1212 union u64bit addr64; 1212 union u64bit addr64;
1213 1213
1214 for (i = 0; i < sg_used; i++) { 1214 for (i = 0; i < sg_used; i++) {
1215 addr64.val32.lower = c->SG[i].Addr.lower; 1215 addr64.val32.lower = c->SG[i].Addr.lower;
1216 addr64.val32.upper = c->SG[i].Addr.upper; 1216 addr64.val32.upper = c->SG[i].Addr.upper;
1217 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, 1217 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1218 data_direction); 1218 data_direction);
1219 } 1219 }
1220 } 1220 }
1221 1221
1222 static void hpsa_map_one(struct pci_dev *pdev, 1222 static void hpsa_map_one(struct pci_dev *pdev,
1223 struct CommandList *cp, 1223 struct CommandList *cp,
1224 unsigned char *buf, 1224 unsigned char *buf,
1225 size_t buflen, 1225 size_t buflen,
1226 int data_direction) 1226 int data_direction)
1227 { 1227 {
1228 u64 addr64; 1228 u64 addr64;
1229 1229
1230 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1230 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1231 cp->Header.SGList = 0; 1231 cp->Header.SGList = 0;
1232 cp->Header.SGTotal = 0; 1232 cp->Header.SGTotal = 0;
1233 return; 1233 return;
1234 } 1234 }
1235 1235
1236 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); 1236 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1237 cp->SG[0].Addr.lower = 1237 cp->SG[0].Addr.lower =
1238 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1238 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1239 cp->SG[0].Addr.upper = 1239 cp->SG[0].Addr.upper =
1240 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1240 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1241 cp->SG[0].Len = buflen; 1241 cp->SG[0].Len = buflen;
1242 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ 1242 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1243 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ 1243 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1244 } 1244 }
1245 1245
1246 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1246 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1247 struct CommandList *c) 1247 struct CommandList *c)
1248 { 1248 {
1249 DECLARE_COMPLETION_ONSTACK(wait); 1249 DECLARE_COMPLETION_ONSTACK(wait);
1250 1250
1251 c->waiting = &wait; 1251 c->waiting = &wait;
1252 enqueue_cmd_and_start_io(h, c); 1252 enqueue_cmd_and_start_io(h, c);
1253 wait_for_completion(&wait); 1253 wait_for_completion(&wait);
1254 } 1254 }
1255 1255
1256 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 1256 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1257 struct CommandList *c, int data_direction) 1257 struct CommandList *c, int data_direction)
1258 { 1258 {
1259 int retry_count = 0; 1259 int retry_count = 0;
1260 1260
1261 do { 1261 do {
1262 memset(c->err_info, 0, sizeof(c->err_info)); 1262 memset(c->err_info, 0, sizeof(c->err_info));
1263 hpsa_scsi_do_simple_cmd_core(h, c); 1263 hpsa_scsi_do_simple_cmd_core(h, c);
1264 retry_count++; 1264 retry_count++;
1265 } while (check_for_unit_attention(h, c) && retry_count <= 3); 1265 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1266 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 1266 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1267 } 1267 }
1268 1268
1269 static void hpsa_scsi_interpret_error(struct CommandList *cp) 1269 static void hpsa_scsi_interpret_error(struct CommandList *cp)
1270 { 1270 {
1271 struct ErrorInfo *ei; 1271 struct ErrorInfo *ei;
1272 struct device *d = &cp->h->pdev->dev; 1272 struct device *d = &cp->h->pdev->dev;
1273 1273
1274 ei = cp->err_info; 1274 ei = cp->err_info;
1275 switch (ei->CommandStatus) { 1275 switch (ei->CommandStatus) {
1276 case CMD_TARGET_STATUS: 1276 case CMD_TARGET_STATUS:
1277 dev_warn(d, "cmd %p has completed with errors\n", cp); 1277 dev_warn(d, "cmd %p has completed with errors\n", cp);
1278 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, 1278 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1279 ei->ScsiStatus); 1279 ei->ScsiStatus);
1280 if (ei->ScsiStatus == 0) 1280 if (ei->ScsiStatus == 0)
1281 dev_warn(d, "SCSI status is abnormally zero. " 1281 dev_warn(d, "SCSI status is abnormally zero. "
1282 "(probably indicates selection timeout " 1282 "(probably indicates selection timeout "
1283 "reported incorrectly due to a known " 1283 "reported incorrectly due to a known "
1284 "firmware bug, circa July, 2001.)\n"); 1284 "firmware bug, circa July, 2001.)\n");
1285 break; 1285 break;
1286 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1286 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1287 dev_info(d, "UNDERRUN\n"); 1287 dev_info(d, "UNDERRUN\n");
1288 break; 1288 break;
1289 case CMD_DATA_OVERRUN: 1289 case CMD_DATA_OVERRUN:
1290 dev_warn(d, "cp %p has completed with data overrun\n", cp); 1290 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1291 break; 1291 break;
1292 case CMD_INVALID: { 1292 case CMD_INVALID: {
1293 /* controller unfortunately reports SCSI passthru's 1293 /* controller unfortunately reports SCSI passthru's
1294 * to non-existent targets as invalid commands. 1294 * to non-existent targets as invalid commands.
1295 */ 1295 */
1296 dev_warn(d, "cp %p is reported invalid (probably means " 1296 dev_warn(d, "cp %p is reported invalid (probably means "
1297 "target device no longer present)\n", cp); 1297 "target device no longer present)\n", cp);
1298 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); 1298 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1299 print_cmd(cp); */ 1299 print_cmd(cp); */
1300 } 1300 }
1301 break; 1301 break;
1302 case CMD_PROTOCOL_ERR: 1302 case CMD_PROTOCOL_ERR:
1303 dev_warn(d, "cp %p has protocol error \n", cp); 1303 dev_warn(d, "cp %p has protocol error \n", cp);
1304 break; 1304 break;
1305 case CMD_HARDWARE_ERR: 1305 case CMD_HARDWARE_ERR:
1306 /* cmd->result = DID_ERROR << 16; */ 1306 /* cmd->result = DID_ERROR << 16; */
1307 dev_warn(d, "cp %p had hardware error\n", cp); 1307 dev_warn(d, "cp %p had hardware error\n", cp);
1308 break; 1308 break;
1309 case CMD_CONNECTION_LOST: 1309 case CMD_CONNECTION_LOST:
1310 dev_warn(d, "cp %p had connection lost\n", cp); 1310 dev_warn(d, "cp %p had connection lost\n", cp);
1311 break; 1311 break;
1312 case CMD_ABORTED: 1312 case CMD_ABORTED:
1313 dev_warn(d, "cp %p was aborted\n", cp); 1313 dev_warn(d, "cp %p was aborted\n", cp);
1314 break; 1314 break;
1315 case CMD_ABORT_FAILED: 1315 case CMD_ABORT_FAILED:
1316 dev_warn(d, "cp %p reports abort failed\n", cp); 1316 dev_warn(d, "cp %p reports abort failed\n", cp);
1317 break; 1317 break;
1318 case CMD_UNSOLICITED_ABORT: 1318 case CMD_UNSOLICITED_ABORT:
1319 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); 1319 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1320 break; 1320 break;
1321 case CMD_TIMEOUT: 1321 case CMD_TIMEOUT:
1322 dev_warn(d, "cp %p timed out\n", cp); 1322 dev_warn(d, "cp %p timed out\n", cp);
1323 break; 1323 break;
1324 case CMD_UNABORTABLE: 1324 case CMD_UNABORTABLE:
1325 dev_warn(d, "Command unabortable\n"); 1325 dev_warn(d, "Command unabortable\n");
1326 break; 1326 break;
1327 default: 1327 default:
1328 dev_warn(d, "cp %p returned unknown status %x\n", cp, 1328 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1329 ei->CommandStatus); 1329 ei->CommandStatus);
1330 } 1330 }
1331 } 1331 }
1332 1332
1333 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 1333 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1334 unsigned char page, unsigned char *buf, 1334 unsigned char page, unsigned char *buf,
1335 unsigned char bufsize) 1335 unsigned char bufsize)
1336 { 1336 {
1337 int rc = IO_OK; 1337 int rc = IO_OK;
1338 struct CommandList *c; 1338 struct CommandList *c;
1339 struct ErrorInfo *ei; 1339 struct ErrorInfo *ei;
1340 1340
1341 c = cmd_special_alloc(h); 1341 c = cmd_special_alloc(h);
1342 1342
1343 if (c == NULL) { /* trouble... */ 1343 if (c == NULL) { /* trouble... */
1344 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1344 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1345 return -ENOMEM; 1345 return -ENOMEM;
1346 } 1346 }
1347 1347
1348 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); 1348 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1349 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1349 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1350 ei = c->err_info; 1350 ei = c->err_info;
1351 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 1351 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1352 hpsa_scsi_interpret_error(c); 1352 hpsa_scsi_interpret_error(c);
1353 rc = -1; 1353 rc = -1;
1354 } 1354 }
1355 cmd_special_free(h, c); 1355 cmd_special_free(h, c);
1356 return rc; 1356 return rc;
1357 } 1357 }
1358 1358
1359 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) 1359 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1360 { 1360 {
1361 int rc = IO_OK; 1361 int rc = IO_OK;
1362 struct CommandList *c; 1362 struct CommandList *c;
1363 struct ErrorInfo *ei; 1363 struct ErrorInfo *ei;
1364 1364
1365 c = cmd_special_alloc(h); 1365 c = cmd_special_alloc(h);
1366 1366
1367 if (c == NULL) { /* trouble... */ 1367 if (c == NULL) { /* trouble... */
1368 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1368 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1369 return -ENOMEM; 1369 return -ENOMEM;
1370 } 1370 }
1371 1371
1372 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); 1372 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1373 hpsa_scsi_do_simple_cmd_core(h, c); 1373 hpsa_scsi_do_simple_cmd_core(h, c);
1374 /* no unmap needed here because no data xfer. */ 1374 /* no unmap needed here because no data xfer. */
1375 1375
1376 ei = c->err_info; 1376 ei = c->err_info;
1377 if (ei->CommandStatus != 0) { 1377 if (ei->CommandStatus != 0) {
1378 hpsa_scsi_interpret_error(c); 1378 hpsa_scsi_interpret_error(c);
1379 rc = -1; 1379 rc = -1;
1380 } 1380 }
1381 cmd_special_free(h, c); 1381 cmd_special_free(h, c);
1382 return rc; 1382 return rc;
1383 } 1383 }
1384 1384
1385 static void hpsa_get_raid_level(struct ctlr_info *h, 1385 static void hpsa_get_raid_level(struct ctlr_info *h,
1386 unsigned char *scsi3addr, unsigned char *raid_level) 1386 unsigned char *scsi3addr, unsigned char *raid_level)
1387 { 1387 {
1388 int rc; 1388 int rc;
1389 unsigned char *buf; 1389 unsigned char *buf;
1390 1390
1391 *raid_level = RAID_UNKNOWN; 1391 *raid_level = RAID_UNKNOWN;
1392 buf = kzalloc(64, GFP_KERNEL); 1392 buf = kzalloc(64, GFP_KERNEL);
1393 if (!buf) 1393 if (!buf)
1394 return; 1394 return;
1395 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); 1395 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1396 if (rc == 0) 1396 if (rc == 0)
1397 *raid_level = buf[8]; 1397 *raid_level = buf[8];
1398 if (*raid_level > RAID_UNKNOWN) 1398 if (*raid_level > RAID_UNKNOWN)
1399 *raid_level = RAID_UNKNOWN; 1399 *raid_level = RAID_UNKNOWN;
1400 kfree(buf); 1400 kfree(buf);
1401 return; 1401 return;
1402 } 1402 }
1403 1403
1404 /* Get the device id from inquiry page 0x83 */ 1404 /* Get the device id from inquiry page 0x83 */
1405 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 1405 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1406 unsigned char *device_id, int buflen) 1406 unsigned char *device_id, int buflen)
1407 { 1407 {
1408 int rc; 1408 int rc;
1409 unsigned char *buf; 1409 unsigned char *buf;
1410 1410
1411 if (buflen > 16) 1411 if (buflen > 16)
1412 buflen = 16; 1412 buflen = 16;
1413 buf = kzalloc(64, GFP_KERNEL); 1413 buf = kzalloc(64, GFP_KERNEL);
1414 if (!buf) 1414 if (!buf)
1415 return -1; 1415 return -1;
1416 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); 1416 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1417 if (rc == 0) 1417 if (rc == 0)
1418 memcpy(device_id, &buf[8], buflen); 1418 memcpy(device_id, &buf[8], buflen);
1419 kfree(buf); 1419 kfree(buf);
1420 return rc != 0; 1420 return rc != 0;
1421 } 1421 }
1422 1422
1423 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 1423 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1424 struct ReportLUNdata *buf, int bufsize, 1424 struct ReportLUNdata *buf, int bufsize,
1425 int extended_response) 1425 int extended_response)
1426 { 1426 {
1427 int rc = IO_OK; 1427 int rc = IO_OK;
1428 struct CommandList *c; 1428 struct CommandList *c;
1429 unsigned char scsi3addr[8]; 1429 unsigned char scsi3addr[8];
1430 struct ErrorInfo *ei; 1430 struct ErrorInfo *ei;
1431 1431
1432 c = cmd_special_alloc(h); 1432 c = cmd_special_alloc(h);
1433 if (c == NULL) { /* trouble... */ 1433 if (c == NULL) { /* trouble... */
1434 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1434 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1435 return -1; 1435 return -1;
1436 } 1436 }
1437 /* address the controller */ 1437 /* address the controller */
1438 memset(scsi3addr, 0, sizeof(scsi3addr)); 1438 memset(scsi3addr, 0, sizeof(scsi3addr));
1439 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 1439 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1440 buf, bufsize, 0, scsi3addr, TYPE_CMD); 1440 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1441 if (extended_response) 1441 if (extended_response)
1442 c->Request.CDB[1] = extended_response; 1442 c->Request.CDB[1] = extended_response;
1443 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1443 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1444 ei = c->err_info; 1444 ei = c->err_info;
1445 if (ei->CommandStatus != 0 && 1445 if (ei->CommandStatus != 0 &&
1446 ei->CommandStatus != CMD_DATA_UNDERRUN) { 1446 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1447 hpsa_scsi_interpret_error(c); 1447 hpsa_scsi_interpret_error(c);
1448 rc = -1; 1448 rc = -1;
1449 } 1449 }
1450 cmd_special_free(h, c); 1450 cmd_special_free(h, c);
1451 return rc; 1451 return rc;
1452 } 1452 }
1453 1453
1454 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 1454 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1455 struct ReportLUNdata *buf, 1455 struct ReportLUNdata *buf,
1456 int bufsize, int extended_response) 1456 int bufsize, int extended_response)
1457 { 1457 {
1458 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 1458 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1459 } 1459 }
1460 1460
1461 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 1461 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1462 struct ReportLUNdata *buf, int bufsize) 1462 struct ReportLUNdata *buf, int bufsize)
1463 { 1463 {
1464 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 1464 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1465 } 1465 }
1466 1466
1467 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 1467 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1468 int bus, int target, int lun) 1468 int bus, int target, int lun)
1469 { 1469 {
1470 device->bus = bus; 1470 device->bus = bus;
1471 device->target = target; 1471 device->target = target;
1472 device->lun = lun; 1472 device->lun = lun;
1473 } 1473 }
1474 1474
1475 static int hpsa_update_device_info(struct ctlr_info *h, 1475 static int hpsa_update_device_info(struct ctlr_info *h,
1476 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) 1476 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1477 { 1477 {
1478 #define OBDR_TAPE_INQ_SIZE 49 1478 #define OBDR_TAPE_INQ_SIZE 49
1479 unsigned char *inq_buff; 1479 unsigned char *inq_buff;
1480 1480
1481 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1481 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1482 if (!inq_buff) 1482 if (!inq_buff)
1483 goto bail_out; 1483 goto bail_out;
1484 1484
1485 /* Do an inquiry to the device to see what it is. */ 1485 /* Do an inquiry to the device to see what it is. */
1486 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 1486 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1487 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 1487 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1488 /* Inquiry failed (msg printed already) */ 1488 /* Inquiry failed (msg printed already) */
1489 dev_err(&h->pdev->dev, 1489 dev_err(&h->pdev->dev,
1490 "hpsa_update_device_info: inquiry failed\n"); 1490 "hpsa_update_device_info: inquiry failed\n");
1491 goto bail_out; 1491 goto bail_out;
1492 } 1492 }
1493 1493
1494 this_device->devtype = (inq_buff[0] & 0x1f); 1494 this_device->devtype = (inq_buff[0] & 0x1f);
1495 memcpy(this_device->scsi3addr, scsi3addr, 8); 1495 memcpy(this_device->scsi3addr, scsi3addr, 8);
1496 memcpy(this_device->vendor, &inq_buff[8], 1496 memcpy(this_device->vendor, &inq_buff[8],
1497 sizeof(this_device->vendor)); 1497 sizeof(this_device->vendor));
1498 memcpy(this_device->model, &inq_buff[16], 1498 memcpy(this_device->model, &inq_buff[16],
1499 sizeof(this_device->model)); 1499 sizeof(this_device->model));
1500 memset(this_device->device_id, 0, 1500 memset(this_device->device_id, 0,
1501 sizeof(this_device->device_id)); 1501 sizeof(this_device->device_id));
1502 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 1502 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1503 sizeof(this_device->device_id)); 1503 sizeof(this_device->device_id));
1504 1504
1505 if (this_device->devtype == TYPE_DISK && 1505 if (this_device->devtype == TYPE_DISK &&
1506 is_logical_dev_addr_mode(scsi3addr)) 1506 is_logical_dev_addr_mode(scsi3addr))
1507 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 1507 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1508 else 1508 else
1509 this_device->raid_level = RAID_UNKNOWN; 1509 this_device->raid_level = RAID_UNKNOWN;
1510 1510
1511 kfree(inq_buff); 1511 kfree(inq_buff);
1512 return 0; 1512 return 0;
1513 1513
1514 bail_out: 1514 bail_out:
1515 kfree(inq_buff); 1515 kfree(inq_buff);
1516 return 1; 1516 return 1;
1517 } 1517 }
1518 1518
1519 static unsigned char *msa2xxx_model[] = { 1519 static unsigned char *msa2xxx_model[] = {
1520 "MSA2012", 1520 "MSA2012",
1521 "MSA2024", 1521 "MSA2024",
1522 "MSA2312", 1522 "MSA2312",
1523 "MSA2324", 1523 "MSA2324",
1524 NULL, 1524 NULL,
1525 }; 1525 };
1526 1526
1527 static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 1527 static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1528 { 1528 {
1529 int i; 1529 int i;
1530 1530
1531 for (i = 0; msa2xxx_model[i]; i++) 1531 for (i = 0; msa2xxx_model[i]; i++)
1532 if (strncmp(device->model, msa2xxx_model[i], 1532 if (strncmp(device->model, msa2xxx_model[i],
1533 strlen(msa2xxx_model[i])) == 0) 1533 strlen(msa2xxx_model[i])) == 0)
1534 return 1; 1534 return 1;
1535 return 0; 1535 return 0;
1536 } 1536 }
1537 1537
1538 /* Helper function to assign bus, target, lun mapping of devices. 1538 /* Helper function to assign bus, target, lun mapping of devices.
1539 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical 1539 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1540 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 1540 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1541 * Logical drive target and lun are assigned at this time, but 1541 * Logical drive target and lun are assigned at this time, but
1542 * physical device lun and target assignment are deferred (assigned 1542 * physical device lun and target assignment are deferred (assigned
1543 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 1543 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1544 */ 1544 */
1545 static void figure_bus_target_lun(struct ctlr_info *h, 1545 static void figure_bus_target_lun(struct ctlr_info *h,
1546 u8 *lunaddrbytes, int *bus, int *target, int *lun, 1546 u8 *lunaddrbytes, int *bus, int *target, int *lun,
1547 struct hpsa_scsi_dev_t *device) 1547 struct hpsa_scsi_dev_t *device)
1548 { 1548 {
1549 u32 lunid; 1549 u32 lunid;
1550 1550
1551 if (is_logical_dev_addr_mode(lunaddrbytes)) { 1551 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1552 /* logical device */ 1552 /* logical device */
1553 if (unlikely(is_scsi_rev_5(h))) { 1553 if (unlikely(is_scsi_rev_5(h))) {
1554 /* p1210m, logical drives lun assignments 1554 /* p1210m, logical drives lun assignments
1555 * match SCSI REPORT LUNS data. 1555 * match SCSI REPORT LUNS data.
1556 */ 1556 */
1557 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 1557 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1558 *bus = 0; 1558 *bus = 0;
1559 *target = 0; 1559 *target = 0;
1560 *lun = (lunid & 0x3fff) + 1; 1560 *lun = (lunid & 0x3fff) + 1;
1561 } else { 1561 } else {
1562 /* not p1210m... */ 1562 /* not p1210m... */
1563 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 1563 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1564 if (is_msa2xxx(h, device)) { 1564 if (is_msa2xxx(h, device)) {
1565 /* msa2xxx way, put logicals on bus 1 1565 /* msa2xxx way, put logicals on bus 1
1566 * and match target/lun numbers box 1566 * and match target/lun numbers box
1567 * reports. 1567 * reports.
1568 */ 1568 */
1569 *bus = 1; 1569 *bus = 1;
1570 *target = (lunid >> 16) & 0x3fff; 1570 *target = (lunid >> 16) & 0x3fff;
1571 *lun = lunid & 0x00ff; 1571 *lun = lunid & 0x00ff;
1572 } else { 1572 } else {
1573 /* Traditional smart array way. */ 1573 /* Traditional smart array way. */
1574 *bus = 0; 1574 *bus = 0;
1575 *lun = 0; 1575 *lun = 0;
1576 *target = lunid & 0x3fff; 1576 *target = lunid & 0x3fff;
1577 } 1577 }
1578 } 1578 }
1579 } else { 1579 } else {
1580 /* physical device */ 1580 /* physical device */
1581 if (is_hba_lunid(lunaddrbytes)) 1581 if (is_hba_lunid(lunaddrbytes))
1582 if (unlikely(is_scsi_rev_5(h))) { 1582 if (unlikely(is_scsi_rev_5(h))) {
1583 *bus = 0; /* put p1210m ctlr at 0,0,0 */ 1583 *bus = 0; /* put p1210m ctlr at 0,0,0 */
1584 *target = 0; 1584 *target = 0;
1585 *lun = 0; 1585 *lun = 0;
1586 return; 1586 return;
1587 } else 1587 } else
1588 *bus = 3; /* traditional smartarray */ 1588 *bus = 3; /* traditional smartarray */
1589 else 1589 else
1590 *bus = 2; /* physical disk */ 1590 *bus = 2; /* physical disk */
1591 *target = -1; 1591 *target = -1;
1592 *lun = -1; /* we will fill these in later. */ 1592 *lun = -1; /* we will fill these in later. */
1593 } 1593 }
1594 } 1594 }
1595 1595
1596 /* 1596 /*
1597 * If there is no lun 0 on a target, linux won't find any devices. 1597 * If there is no lun 0 on a target, linux won't find any devices.
1598 * For the MSA2xxx boxes, we have to manually detect the enclosure 1598 * For the MSA2xxx boxes, we have to manually detect the enclosure
1599 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 1599 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1600 * it for some reason. *tmpdevice is the target we're adding, 1600 * it for some reason. *tmpdevice is the target we're adding,
1601 * this_device is a pointer into the current element of currentsd[] 1601 * this_device is a pointer into the current element of currentsd[]
1602 * that we're building up in update_scsi_devices(), below. 1602 * that we're building up in update_scsi_devices(), below.
1603 * lunzerobits is a bitmap that tracks which targets already have a 1603 * lunzerobits is a bitmap that tracks which targets already have a
1604 * lun 0 assigned. 1604 * lun 0 assigned.
1605 * Returns 1 if an enclosure was added, 0 if not. 1605 * Returns 1 if an enclosure was added, 0 if not.
1606 */ 1606 */
1607 static int add_msa2xxx_enclosure_device(struct ctlr_info *h, 1607 static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1608 struct hpsa_scsi_dev_t *tmpdevice, 1608 struct hpsa_scsi_dev_t *tmpdevice,
1609 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 1609 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1610 int bus, int target, int lun, unsigned long lunzerobits[], 1610 int bus, int target, int lun, unsigned long lunzerobits[],
1611 int *nmsa2xxx_enclosures) 1611 int *nmsa2xxx_enclosures)
1612 { 1612 {
1613 unsigned char scsi3addr[8]; 1613 unsigned char scsi3addr[8];
1614 1614
1615 if (test_bit(target, lunzerobits)) 1615 if (test_bit(target, lunzerobits))
1616 return 0; /* There is already a lun 0 on this target. */ 1616 return 0; /* There is already a lun 0 on this target. */
1617 1617
1618 if (!is_logical_dev_addr_mode(lunaddrbytes)) 1618 if (!is_logical_dev_addr_mode(lunaddrbytes))
1619 return 0; /* It's the logical targets that may lack lun 0. */ 1619 return 0; /* It's the logical targets that may lack lun 0. */
1620 1620
1621 if (!is_msa2xxx(h, tmpdevice)) 1621 if (!is_msa2xxx(h, tmpdevice))
1622 return 0; /* It's only the MSA2xxx that have this problem. */ 1622 return 0; /* It's only the MSA2xxx that have this problem. */
1623 1623
1624 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ 1624 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1625 return 0; 1625 return 0;
1626 1626
1627 memset(scsi3addr, 0, 8); 1627 memset(scsi3addr, 0, 8);
1628 scsi3addr[3] = target; 1628 scsi3addr[3] = target;
1629 if (is_hba_lunid(scsi3addr)) 1629 if (is_hba_lunid(scsi3addr))
1630 return 0; /* Don't add the RAID controller here. */ 1630 return 0; /* Don't add the RAID controller here. */
1631 1631
1632 if (is_scsi_rev_5(h)) 1632 if (is_scsi_rev_5(h))
1633 return 0; /* p1210m doesn't need to do this. */ 1633 return 0; /* p1210m doesn't need to do this. */
1634 1634
1635 #define MAX_MSA2XXX_ENCLOSURES 32 1635 #define MAX_MSA2XXX_ENCLOSURES 32
1636 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { 1636 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1637 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " 1637 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1638 "enclosures exceeded. Check your hardware " 1638 "enclosures exceeded. Check your hardware "
1639 "configuration."); 1639 "configuration.");
1640 return 0; 1640 return 0;
1641 } 1641 }
1642 1642
1643 if (hpsa_update_device_info(h, scsi3addr, this_device)) 1643 if (hpsa_update_device_info(h, scsi3addr, this_device))
1644 return 0; 1644 return 0;
1645 (*nmsa2xxx_enclosures)++; 1645 (*nmsa2xxx_enclosures)++;
1646 hpsa_set_bus_target_lun(this_device, bus, target, 0); 1646 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1647 set_bit(target, lunzerobits); 1647 set_bit(target, lunzerobits);
1648 return 1; 1648 return 1;
1649 } 1649 }
1650 1650
1651 /* 1651 /*
1652 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 1652 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1653 * logdev. The number of luns in physdev and logdev are returned in 1653 * logdev. The number of luns in physdev and logdev are returned in
1654 * *nphysicals and *nlogicals, respectively. 1654 * *nphysicals and *nlogicals, respectively.
1655 * Returns 0 on success, -1 otherwise. 1655 * Returns 0 on success, -1 otherwise.
1656 */ 1656 */
1657 static int hpsa_gather_lun_info(struct ctlr_info *h, 1657 static int hpsa_gather_lun_info(struct ctlr_info *h,
1658 int reportlunsize, 1658 int reportlunsize,
1659 struct ReportLUNdata *physdev, u32 *nphysicals, 1659 struct ReportLUNdata *physdev, u32 *nphysicals,
1660 struct ReportLUNdata *logdev, u32 *nlogicals) 1660 struct ReportLUNdata *logdev, u32 *nlogicals)
1661 { 1661 {
1662 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { 1662 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1663 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 1663 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1664 return -1; 1664 return -1;
1665 } 1665 }
1666 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; 1666 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1667 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 1667 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1668 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 1668 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1669 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1669 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1670 *nphysicals - HPSA_MAX_PHYS_LUN); 1670 *nphysicals - HPSA_MAX_PHYS_LUN);
1671 *nphysicals = HPSA_MAX_PHYS_LUN; 1671 *nphysicals = HPSA_MAX_PHYS_LUN;
1672 } 1672 }
1673 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { 1673 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1674 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 1674 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1675 return -1; 1675 return -1;
1676 } 1676 }
1677 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 1677 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1678 /* Reject Logicals in excess of our max capability. */ 1678 /* Reject Logicals in excess of our max capability. */
1679 if (*nlogicals > HPSA_MAX_LUN) { 1679 if (*nlogicals > HPSA_MAX_LUN) {
1680 dev_warn(&h->pdev->dev, 1680 dev_warn(&h->pdev->dev,
1681 "maximum logical LUNs (%d) exceeded. " 1681 "maximum logical LUNs (%d) exceeded. "
1682 "%d LUNs ignored.\n", HPSA_MAX_LUN, 1682 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1683 *nlogicals - HPSA_MAX_LUN); 1683 *nlogicals - HPSA_MAX_LUN);
1684 *nlogicals = HPSA_MAX_LUN; 1684 *nlogicals = HPSA_MAX_LUN;
1685 } 1685 }
1686 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 1686 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1687 dev_warn(&h->pdev->dev, 1687 dev_warn(&h->pdev->dev,
1688 "maximum logical + physical LUNs (%d) exceeded. " 1688 "maximum logical + physical LUNs (%d) exceeded. "
1689 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1689 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1690 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 1690 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1691 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 1691 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1692 } 1692 }
1693 return 0; 1693 return 0;
1694 } 1694 }
1695 1695
1696 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 1696 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1697 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, 1697 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1698 struct ReportLUNdata *logdev_list) 1698 struct ReportLUNdata *logdev_list)
1699 { 1699 {
1700 /* Helper function, figure out where the LUN ID info is coming from 1700 /* Helper function, figure out where the LUN ID info is coming from
1701 * given index i, lists of physical and logical devices, where in 1701 * given index i, lists of physical and logical devices, where in
1702 * the list the raid controller is supposed to appear (first or last) 1702 * the list the raid controller is supposed to appear (first or last)
1703 */ 1703 */
1704 1704
1705 int logicals_start = nphysicals + (raid_ctlr_position == 0); 1705 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1706 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 1706 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1707 1707
1708 if (i == raid_ctlr_position) 1708 if (i == raid_ctlr_position)
1709 return RAID_CTLR_LUNID; 1709 return RAID_CTLR_LUNID;
1710 1710
1711 if (i < logicals_start) 1711 if (i < logicals_start)
1712 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 1712 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1713 1713
1714 if (i < last_device) 1714 if (i < last_device)
1715 return &logdev_list->LUN[i - nphysicals - 1715 return &logdev_list->LUN[i - nphysicals -
1716 (raid_ctlr_position == 0)][0]; 1716 (raid_ctlr_position == 0)][0];
1717 BUG(); 1717 BUG();
1718 return NULL; 1718 return NULL;
1719 } 1719 }
1720 1720
1721 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 1721 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1722 { 1722 {
1723 /* the idea here is we could get notified 1723 /* the idea here is we could get notified
1724 * that some devices have changed, so we do a report 1724 * that some devices have changed, so we do a report
1725 * physical luns and report logical luns cmd, and adjust 1725 * physical luns and report logical luns cmd, and adjust
1726 * our list of devices accordingly. 1726 * our list of devices accordingly.
1727 * 1727 *
1728 * The scsi3addr's of devices won't change so long as the 1728 * The scsi3addr's of devices won't change so long as the
1729 * adapter is not reset. That means we can rescan and 1729 * adapter is not reset. That means we can rescan and
1730 * tell which devices we already know about, vs. new 1730 * tell which devices we already know about, vs. new
1731 * devices, vs. disappearing devices. 1731 * devices, vs. disappearing devices.
1732 */ 1732 */
1733 struct ReportLUNdata *physdev_list = NULL; 1733 struct ReportLUNdata *physdev_list = NULL;
1734 struct ReportLUNdata *logdev_list = NULL; 1734 struct ReportLUNdata *logdev_list = NULL;
1735 unsigned char *inq_buff = NULL; 1735 unsigned char *inq_buff = NULL;
1736 u32 nphysicals = 0; 1736 u32 nphysicals = 0;
1737 u32 nlogicals = 0; 1737 u32 nlogicals = 0;
1738 u32 ndev_allocated = 0; 1738 u32 ndev_allocated = 0;
1739 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 1739 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1740 int ncurrent = 0; 1740 int ncurrent = 0;
1741 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; 1741 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1742 int i, nmsa2xxx_enclosures, ndevs_to_allocate; 1742 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1743 int bus, target, lun; 1743 int bus, target, lun;
1744 int raid_ctlr_position; 1744 int raid_ctlr_position;
1745 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); 1745 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1746 1746
1747 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, 1747 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1748 GFP_KERNEL); 1748 GFP_KERNEL);
1749 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1749 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1750 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1750 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1751 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1751 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1752 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 1752 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1753 1753
1754 if (!currentsd || !physdev_list || !logdev_list || 1754 if (!currentsd || !physdev_list || !logdev_list ||
1755 !inq_buff || !tmpdevice) { 1755 !inq_buff || !tmpdevice) {
1756 dev_err(&h->pdev->dev, "out of memory\n"); 1756 dev_err(&h->pdev->dev, "out of memory\n");
1757 goto out; 1757 goto out;
1758 } 1758 }
1759 memset(lunzerobits, 0, sizeof(lunzerobits)); 1759 memset(lunzerobits, 0, sizeof(lunzerobits));
1760 1760
1761 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, 1761 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1762 logdev_list, &nlogicals)) 1762 logdev_list, &nlogicals))
1763 goto out; 1763 goto out;
1764 1764
1765 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them 1765 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1766 * but each of them 4 times through different paths. The plus 1 1766 * but each of them 4 times through different paths. The plus 1
1767 * is for the RAID controller. 1767 * is for the RAID controller.
1768 */ 1768 */
1769 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1; 1769 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1770 1770
1771 /* Allocate the per device structures */ 1771 /* Allocate the per device structures */
1772 for (i = 0; i < ndevs_to_allocate; i++) { 1772 for (i = 0; i < ndevs_to_allocate; i++) {
1773 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 1773 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1774 if (!currentsd[i]) { 1774 if (!currentsd[i]) {
1775 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 1775 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1776 __FILE__, __LINE__); 1776 __FILE__, __LINE__);
1777 goto out; 1777 goto out;
1778 } 1778 }
1779 ndev_allocated++; 1779 ndev_allocated++;
1780 } 1780 }
1781 1781
1782 if (unlikely(is_scsi_rev_5(h))) 1782 if (unlikely(is_scsi_rev_5(h)))
1783 raid_ctlr_position = 0; 1783 raid_ctlr_position = 0;
1784 else 1784 else
1785 raid_ctlr_position = nphysicals + nlogicals; 1785 raid_ctlr_position = nphysicals + nlogicals;
1786 1786
1787 /* adjust our table of devices */ 1787 /* adjust our table of devices */
1788 nmsa2xxx_enclosures = 0; 1788 nmsa2xxx_enclosures = 0;
1789 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1789 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1790 u8 *lunaddrbytes; 1790 u8 *lunaddrbytes;
1791 1791
1792 /* Figure out where the LUN ID info is coming from */ 1792 /* Figure out where the LUN ID info is coming from */
1793 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 1793 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1794 i, nphysicals, nlogicals, physdev_list, logdev_list); 1794 i, nphysicals, nlogicals, physdev_list, logdev_list);
1795 /* skip masked physical devices. */ 1795 /* skip masked physical devices. */
1796 if (lunaddrbytes[3] & 0xC0 && 1796 if (lunaddrbytes[3] & 0xC0 &&
1797 i < nphysicals + (raid_ctlr_position == 0)) 1797 i < nphysicals + (raid_ctlr_position == 0))
1798 continue; 1798 continue;
1799 1799
1800 /* Get device type, vendor, model, device id */ 1800 /* Get device type, vendor, model, device id */
1801 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) 1801 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
1802 continue; /* skip it if we can't talk to it. */ 1802 continue; /* skip it if we can't talk to it. */
1803 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, 1803 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1804 tmpdevice); 1804 tmpdevice);
1805 this_device = currentsd[ncurrent]; 1805 this_device = currentsd[ncurrent];
1806 1806
1807 /* 1807 /*
1808 * For the msa2xxx boxes, we have to insert a LUN 0 which 1808 * For the msa2xxx boxes, we have to insert a LUN 0 which
1809 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 1809 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1810 * is nonetheless an enclosure device there. We have to 1810 * is nonetheless an enclosure device there. We have to
1811 * present that otherwise linux won't find anything if 1811 * present that otherwise linux won't find anything if
1812 * there is no lun 0. 1812 * there is no lun 0.
1813 */ 1813 */
1814 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device, 1814 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1815 lunaddrbytes, bus, target, lun, lunzerobits, 1815 lunaddrbytes, bus, target, lun, lunzerobits,
1816 &nmsa2xxx_enclosures)) { 1816 &nmsa2xxx_enclosures)) {
1817 ncurrent++; 1817 ncurrent++;
1818 this_device = currentsd[ncurrent]; 1818 this_device = currentsd[ncurrent];
1819 } 1819 }
1820 1820
1821 *this_device = *tmpdevice; 1821 *this_device = *tmpdevice;
1822 hpsa_set_bus_target_lun(this_device, bus, target, lun); 1822 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1823 1823
1824 switch (this_device->devtype) { 1824 switch (this_device->devtype) {
1825 case TYPE_ROM: { 1825 case TYPE_ROM: {
1826 /* We don't *really* support actual CD-ROM devices, 1826 /* We don't *really* support actual CD-ROM devices,
1827 * just "One Button Disaster Recovery" tape drive 1827 * just "One Button Disaster Recovery" tape drive
1828 * which temporarily pretends to be a CD-ROM drive. 1828 * which temporarily pretends to be a CD-ROM drive.
1829 * So we check that the device is really an OBDR tape 1829 * So we check that the device is really an OBDR tape
1830 * device by checking for "$DR-10" in bytes 43-48 of 1830 * device by checking for "$DR-10" in bytes 43-48 of
1831 * the inquiry data. 1831 * the inquiry data.
1832 */ 1832 */
1833 char obdr_sig[7]; 1833 char obdr_sig[7];
1834 #define OBDR_TAPE_SIG "$DR-10" 1834 #define OBDR_TAPE_SIG "$DR-10"
1835 strncpy(obdr_sig, &inq_buff[43], 6); 1835 strncpy(obdr_sig, &inq_buff[43], 6);
1836 obdr_sig[6] = '\0'; 1836 obdr_sig[6] = '\0';
1837 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) 1837 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1838 /* Not OBDR device, ignore it. */ 1838 /* Not OBDR device, ignore it. */
1839 break; 1839 break;
1840 } 1840 }
1841 ncurrent++; 1841 ncurrent++;
1842 break; 1842 break;
1843 case TYPE_DISK: 1843 case TYPE_DISK:
1844 if (i < nphysicals) 1844 if (i < nphysicals)
1845 break; 1845 break;
1846 ncurrent++; 1846 ncurrent++;
1847 break; 1847 break;
1848 case TYPE_TAPE: 1848 case TYPE_TAPE:
1849 case TYPE_MEDIUM_CHANGER: 1849 case TYPE_MEDIUM_CHANGER:
1850 ncurrent++; 1850 ncurrent++;
1851 break; 1851 break;
1852 case TYPE_RAID: 1852 case TYPE_RAID:
1853 /* Only present the Smartarray HBA as a RAID controller. 1853 /* Only present the Smartarray HBA as a RAID controller.
1854 * If it's a RAID controller other than the HBA itself 1854 * If it's a RAID controller other than the HBA itself
1855 * (an external RAID controller, MSA500 or similar) 1855 * (an external RAID controller, MSA500 or similar)
1856 * don't present it. 1856 * don't present it.
1857 */ 1857 */
1858 if (!is_hba_lunid(lunaddrbytes)) 1858 if (!is_hba_lunid(lunaddrbytes))
1859 break; 1859 break;
1860 ncurrent++; 1860 ncurrent++;
1861 break; 1861 break;
1862 default: 1862 default:
1863 break; 1863 break;
1864 } 1864 }
1865 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA) 1865 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1866 break; 1866 break;
1867 } 1867 }
1868 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 1868 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1869 out: 1869 out:
1870 kfree(tmpdevice); 1870 kfree(tmpdevice);
1871 for (i = 0; i < ndev_allocated; i++) 1871 for (i = 0; i < ndev_allocated; i++)
1872 kfree(currentsd[i]); 1872 kfree(currentsd[i]);
1873 kfree(currentsd); 1873 kfree(currentsd);
1874 kfree(inq_buff); 1874 kfree(inq_buff);
1875 kfree(physdev_list); 1875 kfree(physdev_list);
1876 kfree(logdev_list); 1876 kfree(logdev_list);
1877 } 1877 }
1878 1878
1879 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 1879 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1880 * dma mapping and fills in the scatter gather entries of the 1880 * dma mapping and fills in the scatter gather entries of the
1881 * hpsa command, cp. 1881 * hpsa command, cp.
1882 */ 1882 */
1883 static int hpsa_scatter_gather(struct ctlr_info *h, 1883 static int hpsa_scatter_gather(struct ctlr_info *h,
1884 struct CommandList *cp, 1884 struct CommandList *cp,
1885 struct scsi_cmnd *cmd) 1885 struct scsi_cmnd *cmd)
1886 { 1886 {
1887 unsigned int len; 1887 unsigned int len;
1888 struct scatterlist *sg; 1888 struct scatterlist *sg;
1889 u64 addr64; 1889 u64 addr64;
1890 int use_sg, i, sg_index, chained; 1890 int use_sg, i, sg_index, chained;
1891 struct SGDescriptor *curr_sg; 1891 struct SGDescriptor *curr_sg;
1892 1892
1893 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 1893 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
1894 1894
1895 use_sg = scsi_dma_map(cmd); 1895 use_sg = scsi_dma_map(cmd);
1896 if (use_sg < 0) 1896 if (use_sg < 0)
1897 return use_sg; 1897 return use_sg;
1898 1898
1899 if (!use_sg) 1899 if (!use_sg)
1900 goto sglist_finished; 1900 goto sglist_finished;
1901 1901
1902 curr_sg = cp->SG; 1902 curr_sg = cp->SG;
1903 chained = 0; 1903 chained = 0;
1904 sg_index = 0; 1904 sg_index = 0;
1905 scsi_for_each_sg(cmd, sg, use_sg, i) { 1905 scsi_for_each_sg(cmd, sg, use_sg, i) {
1906 if (i == h->max_cmd_sg_entries - 1 && 1906 if (i == h->max_cmd_sg_entries - 1 &&
1907 use_sg > h->max_cmd_sg_entries) { 1907 use_sg > h->max_cmd_sg_entries) {
1908 chained = 1; 1908 chained = 1;
1909 curr_sg = h->cmd_sg_list[cp->cmdindex]; 1909 curr_sg = h->cmd_sg_list[cp->cmdindex];
1910 sg_index = 0; 1910 sg_index = 0;
1911 } 1911 }
1912 addr64 = (u64) sg_dma_address(sg); 1912 addr64 = (u64) sg_dma_address(sg);
1913 len = sg_dma_len(sg); 1913 len = sg_dma_len(sg);
1914 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 1914 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
1915 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 1915 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
1916 curr_sg->Len = len; 1916 curr_sg->Len = len;
1917 curr_sg->Ext = 0; /* we are not chaining */ 1917 curr_sg->Ext = 0; /* we are not chaining */
1918 curr_sg++; 1918 curr_sg++;
1919 } 1919 }
1920 1920
1921 if (use_sg + chained > h->maxSG) 1921 if (use_sg + chained > h->maxSG)
1922 h->maxSG = use_sg + chained; 1922 h->maxSG = use_sg + chained;
1923 1923
1924 if (chained) { 1924 if (chained) {
1925 cp->Header.SGList = h->max_cmd_sg_entries; 1925 cp->Header.SGList = h->max_cmd_sg_entries;
1926 cp->Header.SGTotal = (u16) (use_sg + 1); 1926 cp->Header.SGTotal = (u16) (use_sg + 1);
1927 hpsa_map_sg_chain_block(h, cp); 1927 hpsa_map_sg_chain_block(h, cp);
1928 return 0; 1928 return 0;
1929 } 1929 }
1930 1930
1931 sglist_finished: 1931 sglist_finished:
1932 1932
1933 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 1933 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
1934 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ 1934 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
1935 return 0; 1935 return 0;
1936 } 1936 }
1937 1937
1938 1938
1939 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 1939 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
1940 void (*done)(struct scsi_cmnd *)) 1940 void (*done)(struct scsi_cmnd *))
1941 { 1941 {
1942 struct ctlr_info *h; 1942 struct ctlr_info *h;
1943 struct hpsa_scsi_dev_t *dev; 1943 struct hpsa_scsi_dev_t *dev;
1944 unsigned char scsi3addr[8]; 1944 unsigned char scsi3addr[8];
1945 struct CommandList *c; 1945 struct CommandList *c;
1946 unsigned long flags; 1946 unsigned long flags;
1947 1947
1948 /* Get the ptr to our adapter structure out of cmd->host. */ 1948 /* Get the ptr to our adapter structure out of cmd->host. */
1949 h = sdev_to_hba(cmd->device); 1949 h = sdev_to_hba(cmd->device);
1950 dev = cmd->device->hostdata; 1950 dev = cmd->device->hostdata;
1951 if (!dev) { 1951 if (!dev) {
1952 cmd->result = DID_NO_CONNECT << 16; 1952 cmd->result = DID_NO_CONNECT << 16;
1953 done(cmd); 1953 done(cmd);
1954 return 0; 1954 return 0;
1955 } 1955 }
1956 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 1956 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
1957 1957
1958 /* Need a lock as this is being allocated from the pool */ 1958 /* Need a lock as this is being allocated from the pool */
1959 spin_lock_irqsave(&h->lock, flags); 1959 spin_lock_irqsave(&h->lock, flags);
1960 c = cmd_alloc(h); 1960 c = cmd_alloc(h);
1961 spin_unlock_irqrestore(&h->lock, flags); 1961 spin_unlock_irqrestore(&h->lock, flags);
1962 if (c == NULL) { /* trouble... */ 1962 if (c == NULL) { /* trouble... */
1963 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 1963 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
1964 return SCSI_MLQUEUE_HOST_BUSY; 1964 return SCSI_MLQUEUE_HOST_BUSY;
1965 } 1965 }
1966 1966
1967 /* Fill in the command list header */ 1967 /* Fill in the command list header */
1968 1968
1969 cmd->scsi_done = done; /* save this for use by completion code */ 1969 cmd->scsi_done = done; /* save this for use by completion code */
1970 1970
1971 /* save c in case we have to abort it */ 1971 /* save c in case we have to abort it */
1972 cmd->host_scribble = (unsigned char *) c; 1972 cmd->host_scribble = (unsigned char *) c;
1973 1973
1974 c->cmd_type = CMD_SCSI; 1974 c->cmd_type = CMD_SCSI;
1975 c->scsi_cmd = cmd; 1975 c->scsi_cmd = cmd;
1976 c->Header.ReplyQueue = 0; /* unused in simple mode */ 1976 c->Header.ReplyQueue = 0; /* unused in simple mode */
1977 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 1977 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1978 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 1978 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
1979 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; 1979 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
1980 1980
1981 /* Fill in the request block... */ 1981 /* Fill in the request block... */
1982 1982
1983 c->Request.Timeout = 0; 1983 c->Request.Timeout = 0;
1984 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 1984 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
1985 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 1985 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
1986 c->Request.CDBLen = cmd->cmd_len; 1986 c->Request.CDBLen = cmd->cmd_len;
1987 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 1987 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
1988 c->Request.Type.Type = TYPE_CMD; 1988 c->Request.Type.Type = TYPE_CMD;
1989 c->Request.Type.Attribute = ATTR_SIMPLE; 1989 c->Request.Type.Attribute = ATTR_SIMPLE;
1990 switch (cmd->sc_data_direction) { 1990 switch (cmd->sc_data_direction) {
1991 case DMA_TO_DEVICE: 1991 case DMA_TO_DEVICE:
1992 c->Request.Type.Direction = XFER_WRITE; 1992 c->Request.Type.Direction = XFER_WRITE;
1993 break; 1993 break;
1994 case DMA_FROM_DEVICE: 1994 case DMA_FROM_DEVICE:
1995 c->Request.Type.Direction = XFER_READ; 1995 c->Request.Type.Direction = XFER_READ;
1996 break; 1996 break;
1997 case DMA_NONE: 1997 case DMA_NONE:
1998 c->Request.Type.Direction = XFER_NONE; 1998 c->Request.Type.Direction = XFER_NONE;
1999 break; 1999 break;
2000 case DMA_BIDIRECTIONAL: 2000 case DMA_BIDIRECTIONAL:
2001 /* This can happen if a buggy application does a scsi passthru 2001 /* This can happen if a buggy application does a scsi passthru
2002 * and sets both inlen and outlen to non-zero. ( see 2002 * and sets both inlen and outlen to non-zero. ( see
2003 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 2003 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2004 */ 2004 */
2005 2005
2006 c->Request.Type.Direction = XFER_RSVD; 2006 c->Request.Type.Direction = XFER_RSVD;
2007 /* This is technically wrong, and hpsa controllers should 2007 /* This is technically wrong, and hpsa controllers should
2008 * reject it with CMD_INVALID, which is the most correct 2008 * reject it with CMD_INVALID, which is the most correct
2009 * response, but non-fibre backends appear to let it 2009 * response, but non-fibre backends appear to let it
2010 * slide by, and give the same results as if this field 2010 * slide by, and give the same results as if this field
2011 * were set correctly. Either way is acceptable for 2011 * were set correctly. Either way is acceptable for
2012 * our purposes here. 2012 * our purposes here.
2013 */ 2013 */
2014 2014
2015 break; 2015 break;
2016 2016
2017 default: 2017 default:
2018 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 2018 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2019 cmd->sc_data_direction); 2019 cmd->sc_data_direction);
2020 BUG(); 2020 BUG();
2021 break; 2021 break;
2022 } 2022 }
2023 2023
2024 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 2024 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
2025 cmd_free(h, c); 2025 cmd_free(h, c);
2026 return SCSI_MLQUEUE_HOST_BUSY; 2026 return SCSI_MLQUEUE_HOST_BUSY;
2027 } 2027 }
2028 enqueue_cmd_and_start_io(h, c); 2028 enqueue_cmd_and_start_io(h, c);
2029 /* the cmd'll come back via intr handler in complete_scsi_command() */ 2029 /* the cmd'll come back via intr handler in complete_scsi_command() */
2030 return 0; 2030 return 0;
2031 } 2031 }
2032 2032
2033 static DEF_SCSI_QCMD(hpsa_scsi_queue_command) 2033 static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2034 2034
2035 static void hpsa_scan_start(struct Scsi_Host *sh) 2035 static void hpsa_scan_start(struct Scsi_Host *sh)
2036 { 2036 {
2037 struct ctlr_info *h = shost_to_hba(sh); 2037 struct ctlr_info *h = shost_to_hba(sh);
2038 unsigned long flags; 2038 unsigned long flags;
2039 2039
2040 /* wait until any scan already in progress is finished. */ 2040 /* wait until any scan already in progress is finished. */
2041 while (1) { 2041 while (1) {
2042 spin_lock_irqsave(&h->scan_lock, flags); 2042 spin_lock_irqsave(&h->scan_lock, flags);
2043 if (h->scan_finished) 2043 if (h->scan_finished)
2044 break; 2044 break;
2045 spin_unlock_irqrestore(&h->scan_lock, flags); 2045 spin_unlock_irqrestore(&h->scan_lock, flags);
2046 wait_event(h->scan_wait_queue, h->scan_finished); 2046 wait_event(h->scan_wait_queue, h->scan_finished);
2047 /* Note: We don't need to worry about a race between this 2047 /* Note: We don't need to worry about a race between this
2048 * thread and driver unload because the midlayer will 2048 * thread and driver unload because the midlayer will
2049 * have incremented the reference count, so unload won't 2049 * have incremented the reference count, so unload won't
2050 * happen if we're in here. 2050 * happen if we're in here.
2051 */ 2051 */
2052 } 2052 }
2053 h->scan_finished = 0; /* mark scan as in progress */ 2053 h->scan_finished = 0; /* mark scan as in progress */
2054 spin_unlock_irqrestore(&h->scan_lock, flags); 2054 spin_unlock_irqrestore(&h->scan_lock, flags);
2055 2055
2056 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 2056 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2057 2057
2058 spin_lock_irqsave(&h->scan_lock, flags); 2058 spin_lock_irqsave(&h->scan_lock, flags);
2059 h->scan_finished = 1; /* mark scan as finished. */ 2059 h->scan_finished = 1; /* mark scan as finished. */
2060 wake_up_all(&h->scan_wait_queue); 2060 wake_up_all(&h->scan_wait_queue);
2061 spin_unlock_irqrestore(&h->scan_lock, flags); 2061 spin_unlock_irqrestore(&h->scan_lock, flags);
2062 } 2062 }
2063 2063
2064 static int hpsa_scan_finished(struct Scsi_Host *sh, 2064 static int hpsa_scan_finished(struct Scsi_Host *sh,
2065 unsigned long elapsed_time) 2065 unsigned long elapsed_time)
2066 { 2066 {
2067 struct ctlr_info *h = shost_to_hba(sh); 2067 struct ctlr_info *h = shost_to_hba(sh);
2068 unsigned long flags; 2068 unsigned long flags;
2069 int finished; 2069 int finished;
2070 2070
2071 spin_lock_irqsave(&h->scan_lock, flags); 2071 spin_lock_irqsave(&h->scan_lock, flags);
2072 finished = h->scan_finished; 2072 finished = h->scan_finished;
2073 spin_unlock_irqrestore(&h->scan_lock, flags); 2073 spin_unlock_irqrestore(&h->scan_lock, flags);
2074 return finished; 2074 return finished;
2075 } 2075 }
2076 2076
2077 static int hpsa_change_queue_depth(struct scsi_device *sdev, 2077 static int hpsa_change_queue_depth(struct scsi_device *sdev,
2078 int qdepth, int reason) 2078 int qdepth, int reason)
2079 { 2079 {
2080 struct ctlr_info *h = sdev_to_hba(sdev); 2080 struct ctlr_info *h = sdev_to_hba(sdev);
2081 2081
2082 if (reason != SCSI_QDEPTH_DEFAULT) 2082 if (reason != SCSI_QDEPTH_DEFAULT)
2083 return -ENOTSUPP; 2083 return -ENOTSUPP;
2084 2084
2085 if (qdepth < 1) 2085 if (qdepth < 1)
2086 qdepth = 1; 2086 qdepth = 1;
2087 else 2087 else
2088 if (qdepth > h->nr_cmds) 2088 if (qdepth > h->nr_cmds)
2089 qdepth = h->nr_cmds; 2089 qdepth = h->nr_cmds;
2090 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2090 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2091 return sdev->queue_depth; 2091 return sdev->queue_depth;
2092 } 2092 }
2093 2093
2094 static void hpsa_unregister_scsi(struct ctlr_info *h) 2094 static void hpsa_unregister_scsi(struct ctlr_info *h)
2095 { 2095 {
2096 /* we are being forcibly unloaded, and may not refuse. */ 2096 /* we are being forcibly unloaded, and may not refuse. */
2097 scsi_remove_host(h->scsi_host); 2097 scsi_remove_host(h->scsi_host);
2098 scsi_host_put(h->scsi_host); 2098 scsi_host_put(h->scsi_host);
2099 h->scsi_host = NULL; 2099 h->scsi_host = NULL;
2100 } 2100 }
2101 2101
2102 static int hpsa_register_scsi(struct ctlr_info *h) 2102 static int hpsa_register_scsi(struct ctlr_info *h)
2103 { 2103 {
2104 int rc; 2104 int rc;
2105 2105
2106 rc = hpsa_scsi_detect(h); 2106 rc = hpsa_scsi_detect(h);
2107 if (rc != 0) 2107 if (rc != 0)
2108 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" 2108 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
2109 " hpsa_scsi_detect(), rc is %d\n", rc); 2109 " hpsa_scsi_detect(), rc is %d\n", rc);
2110 return rc; 2110 return rc;
2111 } 2111 }
2112 2112
2113 static int wait_for_device_to_become_ready(struct ctlr_info *h, 2113 static int wait_for_device_to_become_ready(struct ctlr_info *h,
2114 unsigned char lunaddr[]) 2114 unsigned char lunaddr[])
2115 { 2115 {
2116 int rc = 0; 2116 int rc = 0;
2117 int count = 0; 2117 int count = 0;
2118 int waittime = 1; /* seconds */ 2118 int waittime = 1; /* seconds */
2119 struct CommandList *c; 2119 struct CommandList *c;
2120 2120
2121 c = cmd_special_alloc(h); 2121 c = cmd_special_alloc(h);
2122 if (!c) { 2122 if (!c) {
2123 dev_warn(&h->pdev->dev, "out of memory in " 2123 dev_warn(&h->pdev->dev, "out of memory in "
2124 "wait_for_device_to_become_ready.\n"); 2124 "wait_for_device_to_become_ready.\n");
2125 return IO_ERROR; 2125 return IO_ERROR;
2126 } 2126 }
2127 2127
2128 /* Send test unit ready until device ready, or give up. */ 2128 /* Send test unit ready until device ready, or give up. */
2129 while (count < HPSA_TUR_RETRY_LIMIT) { 2129 while (count < HPSA_TUR_RETRY_LIMIT) {
2130 2130
2131 /* Wait for a bit. do this first, because if we send 2131 /* Wait for a bit. do this first, because if we send
2132 * the TUR right away, the reset will just abort it. 2132 * the TUR right away, the reset will just abort it.
2133 */ 2133 */
2134 msleep(1000 * waittime); 2134 msleep(1000 * waittime);
2135 count++; 2135 count++;
2136 2136
2137 /* Increase wait time with each try, up to a point. */ 2137 /* Increase wait time with each try, up to a point. */
2138 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 2138 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2139 waittime = waittime * 2; 2139 waittime = waittime * 2;
2140 2140
2141 /* Send the Test Unit Ready */ 2141 /* Send the Test Unit Ready */
2142 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); 2142 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2143 hpsa_scsi_do_simple_cmd_core(h, c); 2143 hpsa_scsi_do_simple_cmd_core(h, c);
2144 /* no unmap needed here because no data xfer. */ 2144 /* no unmap needed here because no data xfer. */
2145 2145
2146 if (c->err_info->CommandStatus == CMD_SUCCESS) 2146 if (c->err_info->CommandStatus == CMD_SUCCESS)
2147 break; 2147 break;
2148 2148
2149 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 2149 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2150 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 2150 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2151 (c->err_info->SenseInfo[2] == NO_SENSE || 2151 (c->err_info->SenseInfo[2] == NO_SENSE ||
2152 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 2152 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2153 break; 2153 break;
2154 2154
2155 dev_warn(&h->pdev->dev, "waiting %d secs " 2155 dev_warn(&h->pdev->dev, "waiting %d secs "
2156 "for device to become ready.\n", waittime); 2156 "for device to become ready.\n", waittime);
2157 rc = 1; /* device not ready. */ 2157 rc = 1; /* device not ready. */
2158 } 2158 }
2159 2159
2160 if (rc) 2160 if (rc)
2161 dev_warn(&h->pdev->dev, "giving up on device.\n"); 2161 dev_warn(&h->pdev->dev, "giving up on device.\n");
2162 else 2162 else
2163 dev_warn(&h->pdev->dev, "device is ready.\n"); 2163 dev_warn(&h->pdev->dev, "device is ready.\n");
2164 2164
2165 cmd_special_free(h, c); 2165 cmd_special_free(h, c);
2166 return rc; 2166 return rc;
2167 } 2167 }
2168 2168
2169 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 2169 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
2170 * complaining. Doing a host- or bus-reset can't do anything good here. 2170 * complaining. Doing a host- or bus-reset can't do anything good here.
2171 */ 2171 */
2172 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 2172 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2173 { 2173 {
2174 int rc; 2174 int rc;
2175 struct ctlr_info *h; 2175 struct ctlr_info *h;
2176 struct hpsa_scsi_dev_t *dev; 2176 struct hpsa_scsi_dev_t *dev;
2177 2177
2178 /* find the controller to which the command to be aborted was sent */ 2178 /* find the controller to which the command to be aborted was sent */
2179 h = sdev_to_hba(scsicmd->device); 2179 h = sdev_to_hba(scsicmd->device);
2180 if (h == NULL) /* paranoia */ 2180 if (h == NULL) /* paranoia */
2181 return FAILED; 2181 return FAILED;
2182 dev = scsicmd->device->hostdata; 2182 dev = scsicmd->device->hostdata;
2183 if (!dev) { 2183 if (!dev) {
2184 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 2184 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2185 "device lookup failed.\n"); 2185 "device lookup failed.\n");
2186 return FAILED; 2186 return FAILED;
2187 } 2187 }
2188 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 2188 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2189 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 2189 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2190 /* send a reset to the SCSI LUN which the command was sent to */ 2190 /* send a reset to the SCSI LUN which the command was sent to */
2191 rc = hpsa_send_reset(h, dev->scsi3addr); 2191 rc = hpsa_send_reset(h, dev->scsi3addr);
2192 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 2192 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2193 return SUCCESS; 2193 return SUCCESS;
2194 2194
2195 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 2195 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2196 return FAILED; 2196 return FAILED;
2197 } 2197 }
2198 2198
2199 /* 2199 /*
2200 * For operations that cannot sleep, a command block is allocated at init, 2200 * For operations that cannot sleep, a command block is allocated at init,
2201 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 2201 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2202 * which ones are free or in use. Lock must be held when calling this. 2202 * which ones are free or in use. Lock must be held when calling this.
2203 * cmd_free() is the complement. 2203 * cmd_free() is the complement.
2204 */ 2204 */
2205 static struct CommandList *cmd_alloc(struct ctlr_info *h) 2205 static struct CommandList *cmd_alloc(struct ctlr_info *h)
2206 { 2206 {
2207 struct CommandList *c; 2207 struct CommandList *c;
2208 int i; 2208 int i;
2209 union u64bit temp64; 2209 union u64bit temp64;
2210 dma_addr_t cmd_dma_handle, err_dma_handle; 2210 dma_addr_t cmd_dma_handle, err_dma_handle;
2211 2211
2212 do { 2212 do {
2213 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 2213 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2214 if (i == h->nr_cmds) 2214 if (i == h->nr_cmds)
2215 return NULL; 2215 return NULL;
2216 } while (test_and_set_bit 2216 } while (test_and_set_bit
2217 (i & (BITS_PER_LONG - 1), 2217 (i & (BITS_PER_LONG - 1),
2218 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 2218 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2219 c = h->cmd_pool + i; 2219 c = h->cmd_pool + i;
2220 memset(c, 0, sizeof(*c)); 2220 memset(c, 0, sizeof(*c));
2221 cmd_dma_handle = h->cmd_pool_dhandle 2221 cmd_dma_handle = h->cmd_pool_dhandle
2222 + i * sizeof(*c); 2222 + i * sizeof(*c);
2223 c->err_info = h->errinfo_pool + i; 2223 c->err_info = h->errinfo_pool + i;
2224 memset(c->err_info, 0, sizeof(*c->err_info)); 2224 memset(c->err_info, 0, sizeof(*c->err_info));
2225 err_dma_handle = h->errinfo_pool_dhandle 2225 err_dma_handle = h->errinfo_pool_dhandle
2226 + i * sizeof(*c->err_info); 2226 + i * sizeof(*c->err_info);
2227 h->nr_allocs++; 2227 h->nr_allocs++;
2228 2228
2229 c->cmdindex = i; 2229 c->cmdindex = i;
2230 2230
2231 INIT_HLIST_NODE(&c->list); 2231 INIT_LIST_HEAD(&c->list);
2232 c->busaddr = (u32) cmd_dma_handle; 2232 c->busaddr = (u32) cmd_dma_handle;
2233 temp64.val = (u64) err_dma_handle; 2233 temp64.val = (u64) err_dma_handle;
2234 c->ErrDesc.Addr.lower = temp64.val32.lower; 2234 c->ErrDesc.Addr.lower = temp64.val32.lower;
2235 c->ErrDesc.Addr.upper = temp64.val32.upper; 2235 c->ErrDesc.Addr.upper = temp64.val32.upper;
2236 c->ErrDesc.Len = sizeof(*c->err_info); 2236 c->ErrDesc.Len = sizeof(*c->err_info);
2237 2237
2238 c->h = h; 2238 c->h = h;
2239 return c; 2239 return c;
2240 } 2240 }
2241 2241
2242 /* For operations that can wait for kmalloc to possibly sleep, 2242 /* For operations that can wait for kmalloc to possibly sleep,
2243 * this routine can be called. Lock need not be held to call 2243 * this routine can be called. Lock need not be held to call
2244 * cmd_special_alloc. cmd_special_free() is the complement. 2244 * cmd_special_alloc. cmd_special_free() is the complement.
2245 */ 2245 */
2246 static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 2246 static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2247 { 2247 {
2248 struct CommandList *c; 2248 struct CommandList *c;
2249 union u64bit temp64; 2249 union u64bit temp64;
2250 dma_addr_t cmd_dma_handle, err_dma_handle; 2250 dma_addr_t cmd_dma_handle, err_dma_handle;
2251 2251
2252 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 2252 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2253 if (c == NULL) 2253 if (c == NULL)
2254 return NULL; 2254 return NULL;
2255 memset(c, 0, sizeof(*c)); 2255 memset(c, 0, sizeof(*c));
2256 2256
2257 c->cmdindex = -1; 2257 c->cmdindex = -1;
2258 2258
2259 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), 2259 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2260 &err_dma_handle); 2260 &err_dma_handle);
2261 2261
2262 if (c->err_info == NULL) { 2262 if (c->err_info == NULL) {
2263 pci_free_consistent(h->pdev, 2263 pci_free_consistent(h->pdev,
2264 sizeof(*c), c, cmd_dma_handle); 2264 sizeof(*c), c, cmd_dma_handle);
2265 return NULL; 2265 return NULL;
2266 } 2266 }
2267 memset(c->err_info, 0, sizeof(*c->err_info)); 2267 memset(c->err_info, 0, sizeof(*c->err_info));
2268 2268
2269 INIT_HLIST_NODE(&c->list); 2269 INIT_LIST_HEAD(&c->list);
2270 c->busaddr = (u32) cmd_dma_handle; 2270 c->busaddr = (u32) cmd_dma_handle;
2271 temp64.val = (u64) err_dma_handle; 2271 temp64.val = (u64) err_dma_handle;
2272 c->ErrDesc.Addr.lower = temp64.val32.lower; 2272 c->ErrDesc.Addr.lower = temp64.val32.lower;
2273 c->ErrDesc.Addr.upper = temp64.val32.upper; 2273 c->ErrDesc.Addr.upper = temp64.val32.upper;
2274 c->ErrDesc.Len = sizeof(*c->err_info); 2274 c->ErrDesc.Len = sizeof(*c->err_info);
2275 2275
2276 c->h = h; 2276 c->h = h;
2277 return c; 2277 return c;
2278 } 2278 }
2279 2279
2280 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 2280 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2281 { 2281 {
2282 int i; 2282 int i;
2283 2283
2284 i = c - h->cmd_pool; 2284 i = c - h->cmd_pool;
2285 clear_bit(i & (BITS_PER_LONG - 1), 2285 clear_bit(i & (BITS_PER_LONG - 1),
2286 h->cmd_pool_bits + (i / BITS_PER_LONG)); 2286 h->cmd_pool_bits + (i / BITS_PER_LONG));
2287 h->nr_frees++; 2287 h->nr_frees++;
2288 } 2288 }
2289 2289
2290 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 2290 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2291 { 2291 {
2292 union u64bit temp64; 2292 union u64bit temp64;
2293 2293
2294 temp64.val32.lower = c->ErrDesc.Addr.lower; 2294 temp64.val32.lower = c->ErrDesc.Addr.lower;
2295 temp64.val32.upper = c->ErrDesc.Addr.upper; 2295 temp64.val32.upper = c->ErrDesc.Addr.upper;
2296 pci_free_consistent(h->pdev, sizeof(*c->err_info), 2296 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2297 c->err_info, (dma_addr_t) temp64.val); 2297 c->err_info, (dma_addr_t) temp64.val);
2298 pci_free_consistent(h->pdev, sizeof(*c), 2298 pci_free_consistent(h->pdev, sizeof(*c),
2299 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 2299 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
2300 } 2300 }
2301 2301
2302 #ifdef CONFIG_COMPAT 2302 #ifdef CONFIG_COMPAT
2303 2303
2304 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 2304 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2305 { 2305 {
2306 IOCTL32_Command_struct __user *arg32 = 2306 IOCTL32_Command_struct __user *arg32 =
2307 (IOCTL32_Command_struct __user *) arg; 2307 (IOCTL32_Command_struct __user *) arg;
2308 IOCTL_Command_struct arg64; 2308 IOCTL_Command_struct arg64;
2309 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 2309 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2310 int err; 2310 int err;
2311 u32 cp; 2311 u32 cp;
2312 2312
2313 memset(&arg64, 0, sizeof(arg64)); 2313 memset(&arg64, 0, sizeof(arg64));
2314 err = 0; 2314 err = 0;
2315 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2315 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2316 sizeof(arg64.LUN_info)); 2316 sizeof(arg64.LUN_info));
2317 err |= copy_from_user(&arg64.Request, &arg32->Request, 2317 err |= copy_from_user(&arg64.Request, &arg32->Request,
2318 sizeof(arg64.Request)); 2318 sizeof(arg64.Request));
2319 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 2319 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2320 sizeof(arg64.error_info)); 2320 sizeof(arg64.error_info));
2321 err |= get_user(arg64.buf_size, &arg32->buf_size); 2321 err |= get_user(arg64.buf_size, &arg32->buf_size);
2322 err |= get_user(cp, &arg32->buf); 2322 err |= get_user(cp, &arg32->buf);
2323 arg64.buf = compat_ptr(cp); 2323 arg64.buf = compat_ptr(cp);
2324 err |= copy_to_user(p, &arg64, sizeof(arg64)); 2324 err |= copy_to_user(p, &arg64, sizeof(arg64));
2325 2325
2326 if (err) 2326 if (err)
2327 return -EFAULT; 2327 return -EFAULT;
2328 2328
2329 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); 2329 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2330 if (err) 2330 if (err)
2331 return err; 2331 return err;
2332 err |= copy_in_user(&arg32->error_info, &p->error_info, 2332 err |= copy_in_user(&arg32->error_info, &p->error_info,
2333 sizeof(arg32->error_info)); 2333 sizeof(arg32->error_info));
2334 if (err) 2334 if (err)
2335 return -EFAULT; 2335 return -EFAULT;
2336 return err; 2336 return err;
2337 } 2337 }
2338 2338
2339 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 2339 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2340 int cmd, void *arg) 2340 int cmd, void *arg)
2341 { 2341 {
2342 BIG_IOCTL32_Command_struct __user *arg32 = 2342 BIG_IOCTL32_Command_struct __user *arg32 =
2343 (BIG_IOCTL32_Command_struct __user *) arg; 2343 (BIG_IOCTL32_Command_struct __user *) arg;
2344 BIG_IOCTL_Command_struct arg64; 2344 BIG_IOCTL_Command_struct arg64;
2345 BIG_IOCTL_Command_struct __user *p = 2345 BIG_IOCTL_Command_struct __user *p =
2346 compat_alloc_user_space(sizeof(arg64)); 2346 compat_alloc_user_space(sizeof(arg64));
2347 int err; 2347 int err;
2348 u32 cp; 2348 u32 cp;
2349 2349
2350 memset(&arg64, 0, sizeof(arg64)); 2350 memset(&arg64, 0, sizeof(arg64));
2351 err = 0; 2351 err = 0;
2352 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2352 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2353 sizeof(arg64.LUN_info)); 2353 sizeof(arg64.LUN_info));
2354 err |= copy_from_user(&arg64.Request, &arg32->Request, 2354 err |= copy_from_user(&arg64.Request, &arg32->Request,
2355 sizeof(arg64.Request)); 2355 sizeof(arg64.Request));
2356 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 2356 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2357 sizeof(arg64.error_info)); 2357 sizeof(arg64.error_info));
2358 err |= get_user(arg64.buf_size, &arg32->buf_size); 2358 err |= get_user(arg64.buf_size, &arg32->buf_size);
2359 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 2359 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2360 err |= get_user(cp, &arg32->buf); 2360 err |= get_user(cp, &arg32->buf);
2361 arg64.buf = compat_ptr(cp); 2361 arg64.buf = compat_ptr(cp);
2362 err |= copy_to_user(p, &arg64, sizeof(arg64)); 2362 err |= copy_to_user(p, &arg64, sizeof(arg64));
2363 2363
2364 if (err) 2364 if (err)
2365 return -EFAULT; 2365 return -EFAULT;
2366 2366
2367 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 2367 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2368 if (err) 2368 if (err)
2369 return err; 2369 return err;
2370 err |= copy_in_user(&arg32->error_info, &p->error_info, 2370 err |= copy_in_user(&arg32->error_info, &p->error_info,
2371 sizeof(arg32->error_info)); 2371 sizeof(arg32->error_info));
2372 if (err) 2372 if (err)
2373 return -EFAULT; 2373 return -EFAULT;
2374 return err; 2374 return err;
2375 } 2375 }
2376 2376
2377 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) 2377 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2378 { 2378 {
2379 switch (cmd) { 2379 switch (cmd) {
2380 case CCISS_GETPCIINFO: 2380 case CCISS_GETPCIINFO:
2381 case CCISS_GETINTINFO: 2381 case CCISS_GETINTINFO:
2382 case CCISS_SETINTINFO: 2382 case CCISS_SETINTINFO:
2383 case CCISS_GETNODENAME: 2383 case CCISS_GETNODENAME:
2384 case CCISS_SETNODENAME: 2384 case CCISS_SETNODENAME:
2385 case CCISS_GETHEARTBEAT: 2385 case CCISS_GETHEARTBEAT:
2386 case CCISS_GETBUSTYPES: 2386 case CCISS_GETBUSTYPES:
2387 case CCISS_GETFIRMVER: 2387 case CCISS_GETFIRMVER:
2388 case CCISS_GETDRIVVER: 2388 case CCISS_GETDRIVVER:
2389 case CCISS_REVALIDVOLS: 2389 case CCISS_REVALIDVOLS:
2390 case CCISS_DEREGDISK: 2390 case CCISS_DEREGDISK:
2391 case CCISS_REGNEWDISK: 2391 case CCISS_REGNEWDISK:
2392 case CCISS_REGNEWD: 2392 case CCISS_REGNEWD:
2393 case CCISS_RESCANDISK: 2393 case CCISS_RESCANDISK:
2394 case CCISS_GETLUNINFO: 2394 case CCISS_GETLUNINFO:
2395 return hpsa_ioctl(dev, cmd, arg); 2395 return hpsa_ioctl(dev, cmd, arg);
2396 2396
2397 case CCISS_PASSTHRU32: 2397 case CCISS_PASSTHRU32:
2398 return hpsa_ioctl32_passthru(dev, cmd, arg); 2398 return hpsa_ioctl32_passthru(dev, cmd, arg);
2399 case CCISS_BIG_PASSTHRU32: 2399 case CCISS_BIG_PASSTHRU32:
2400 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 2400 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2401 2401
2402 default: 2402 default:
2403 return -ENOIOCTLCMD; 2403 return -ENOIOCTLCMD;
2404 } 2404 }
2405 } 2405 }
2406 #endif 2406 #endif
2407 2407
2408 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 2408 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2409 { 2409 {
2410 struct hpsa_pci_info pciinfo; 2410 struct hpsa_pci_info pciinfo;
2411 2411
2412 if (!argp) 2412 if (!argp)
2413 return -EINVAL; 2413 return -EINVAL;
2414 pciinfo.domain = pci_domain_nr(h->pdev->bus); 2414 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2415 pciinfo.bus = h->pdev->bus->number; 2415 pciinfo.bus = h->pdev->bus->number;
2416 pciinfo.dev_fn = h->pdev->devfn; 2416 pciinfo.dev_fn = h->pdev->devfn;
2417 pciinfo.board_id = h->board_id; 2417 pciinfo.board_id = h->board_id;
2418 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 2418 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2419 return -EFAULT; 2419 return -EFAULT;
2420 return 0; 2420 return 0;
2421 } 2421 }
2422 2422
2423 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 2423 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2424 { 2424 {
2425 DriverVer_type DriverVer; 2425 DriverVer_type DriverVer;
2426 unsigned char vmaj, vmin, vsubmin; 2426 unsigned char vmaj, vmin, vsubmin;
2427 int rc; 2427 int rc;
2428 2428
2429 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 2429 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2430 &vmaj, &vmin, &vsubmin); 2430 &vmaj, &vmin, &vsubmin);
2431 if (rc != 3) { 2431 if (rc != 3) {
2432 dev_info(&h->pdev->dev, "driver version string '%s' " 2432 dev_info(&h->pdev->dev, "driver version string '%s' "
2433 "unrecognized.", HPSA_DRIVER_VERSION); 2433 "unrecognized.", HPSA_DRIVER_VERSION);
2434 vmaj = 0; 2434 vmaj = 0;
2435 vmin = 0; 2435 vmin = 0;
2436 vsubmin = 0; 2436 vsubmin = 0;
2437 } 2437 }
2438 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 2438 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2439 if (!argp) 2439 if (!argp)
2440 return -EINVAL; 2440 return -EINVAL;
2441 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 2441 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2442 return -EFAULT; 2442 return -EFAULT;
2443 return 0; 2443 return 0;
2444 } 2444 }
2445 2445
2446 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 2446 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2447 { 2447 {
2448 IOCTL_Command_struct iocommand; 2448 IOCTL_Command_struct iocommand;
2449 struct CommandList *c; 2449 struct CommandList *c;
2450 char *buff = NULL; 2450 char *buff = NULL;
2451 union u64bit temp64; 2451 union u64bit temp64;
2452 2452
2453 if (!argp) 2453 if (!argp)
2454 return -EINVAL; 2454 return -EINVAL;
2455 if (!capable(CAP_SYS_RAWIO)) 2455 if (!capable(CAP_SYS_RAWIO))
2456 return -EPERM; 2456 return -EPERM;
2457 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 2457 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2458 return -EFAULT; 2458 return -EFAULT;
2459 if ((iocommand.buf_size < 1) && 2459 if ((iocommand.buf_size < 1) &&
2460 (iocommand.Request.Type.Direction != XFER_NONE)) { 2460 (iocommand.Request.Type.Direction != XFER_NONE)) {
2461 return -EINVAL; 2461 return -EINVAL;
2462 } 2462 }
2463 if (iocommand.buf_size > 0) { 2463 if (iocommand.buf_size > 0) {
2464 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 2464 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2465 if (buff == NULL) 2465 if (buff == NULL)
2466 return -EFAULT; 2466 return -EFAULT;
2467 if (iocommand.Request.Type.Direction == XFER_WRITE) { 2467 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2468 /* Copy the data into the buffer we created */ 2468 /* Copy the data into the buffer we created */
2469 if (copy_from_user(buff, iocommand.buf, 2469 if (copy_from_user(buff, iocommand.buf,
2470 iocommand.buf_size)) { 2470 iocommand.buf_size)) {
2471 kfree(buff); 2471 kfree(buff);
2472 return -EFAULT; 2472 return -EFAULT;
2473 } 2473 }
2474 } else { 2474 } else {
2475 memset(buff, 0, iocommand.buf_size); 2475 memset(buff, 0, iocommand.buf_size);
2476 } 2476 }
2477 } 2477 }
2478 c = cmd_special_alloc(h); 2478 c = cmd_special_alloc(h);
2479 if (c == NULL) { 2479 if (c == NULL) {
2480 kfree(buff); 2480 kfree(buff);
2481 return -ENOMEM; 2481 return -ENOMEM;
2482 } 2482 }
2483 /* Fill in the command type */ 2483 /* Fill in the command type */
2484 c->cmd_type = CMD_IOCTL_PEND; 2484 c->cmd_type = CMD_IOCTL_PEND;
2485 /* Fill in Command Header */ 2485 /* Fill in Command Header */
2486 c->Header.ReplyQueue = 0; /* unused in simple mode */ 2486 c->Header.ReplyQueue = 0; /* unused in simple mode */
2487 if (iocommand.buf_size > 0) { /* buffer to fill */ 2487 if (iocommand.buf_size > 0) { /* buffer to fill */
2488 c->Header.SGList = 1; 2488 c->Header.SGList = 1;
2489 c->Header.SGTotal = 1; 2489 c->Header.SGTotal = 1;
2490 } else { /* no buffers to fill */ 2490 } else { /* no buffers to fill */
2491 c->Header.SGList = 0; 2491 c->Header.SGList = 0;
2492 c->Header.SGTotal = 0; 2492 c->Header.SGTotal = 0;
2493 } 2493 }
2494 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 2494 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2495 /* use the kernel address the cmd block for tag */ 2495 /* use the kernel address the cmd block for tag */
2496 c->Header.Tag.lower = c->busaddr; 2496 c->Header.Tag.lower = c->busaddr;
2497 2497
2498 /* Fill in Request block */ 2498 /* Fill in Request block */
2499 memcpy(&c->Request, &iocommand.Request, 2499 memcpy(&c->Request, &iocommand.Request,
2500 sizeof(c->Request)); 2500 sizeof(c->Request));
2501 2501
2502 /* Fill in the scatter gather information */ 2502 /* Fill in the scatter gather information */
2503 if (iocommand.buf_size > 0) { 2503 if (iocommand.buf_size > 0) {
2504 temp64.val = pci_map_single(h->pdev, buff, 2504 temp64.val = pci_map_single(h->pdev, buff,
2505 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 2505 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2506 c->SG[0].Addr.lower = temp64.val32.lower; 2506 c->SG[0].Addr.lower = temp64.val32.lower;
2507 c->SG[0].Addr.upper = temp64.val32.upper; 2507 c->SG[0].Addr.upper = temp64.val32.upper;
2508 c->SG[0].Len = iocommand.buf_size; 2508 c->SG[0].Len = iocommand.buf_size;
2509 c->SG[0].Ext = 0; /* we are not chaining*/ 2509 c->SG[0].Ext = 0; /* we are not chaining*/
2510 } 2510 }
2511 hpsa_scsi_do_simple_cmd_core(h, c); 2511 hpsa_scsi_do_simple_cmd_core(h, c);
2512 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 2512 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2513 check_ioctl_unit_attention(h, c); 2513 check_ioctl_unit_attention(h, c);
2514 2514
2515 /* Copy the error information out */ 2515 /* Copy the error information out */
2516 memcpy(&iocommand.error_info, c->err_info, 2516 memcpy(&iocommand.error_info, c->err_info,
2517 sizeof(iocommand.error_info)); 2517 sizeof(iocommand.error_info));
2518 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 2518 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2519 kfree(buff); 2519 kfree(buff);
2520 cmd_special_free(h, c); 2520 cmd_special_free(h, c);
2521 return -EFAULT; 2521 return -EFAULT;
2522 } 2522 }
2523 if (iocommand.Request.Type.Direction == XFER_READ && 2523 if (iocommand.Request.Type.Direction == XFER_READ &&
2524 iocommand.buf_size > 0) { 2524 iocommand.buf_size > 0) {
2525 /* Copy the data out of the buffer we created */ 2525 /* Copy the data out of the buffer we created */
2526 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 2526 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2527 kfree(buff); 2527 kfree(buff);
2528 cmd_special_free(h, c); 2528 cmd_special_free(h, c);
2529 return -EFAULT; 2529 return -EFAULT;
2530 } 2530 }
2531 } 2531 }
2532 kfree(buff); 2532 kfree(buff);
2533 cmd_special_free(h, c); 2533 cmd_special_free(h, c);
2534 return 0; 2534 return 0;
2535 } 2535 }
2536 2536
2537 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 2537 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2538 { 2538 {
2539 BIG_IOCTL_Command_struct *ioc; 2539 BIG_IOCTL_Command_struct *ioc;
2540 struct CommandList *c; 2540 struct CommandList *c;
2541 unsigned char **buff = NULL; 2541 unsigned char **buff = NULL;
2542 int *buff_size = NULL; 2542 int *buff_size = NULL;
2543 union u64bit temp64; 2543 union u64bit temp64;
2544 BYTE sg_used = 0; 2544 BYTE sg_used = 0;
2545 int status = 0; 2545 int status = 0;
2546 int i; 2546 int i;
2547 u32 left; 2547 u32 left;
2548 u32 sz; 2548 u32 sz;
2549 BYTE __user *data_ptr; 2549 BYTE __user *data_ptr;
2550 2550
2551 if (!argp) 2551 if (!argp)
2552 return -EINVAL; 2552 return -EINVAL;
2553 if (!capable(CAP_SYS_RAWIO)) 2553 if (!capable(CAP_SYS_RAWIO))
2554 return -EPERM; 2554 return -EPERM;
2555 ioc = (BIG_IOCTL_Command_struct *) 2555 ioc = (BIG_IOCTL_Command_struct *)
2556 kmalloc(sizeof(*ioc), GFP_KERNEL); 2556 kmalloc(sizeof(*ioc), GFP_KERNEL);
2557 if (!ioc) { 2557 if (!ioc) {
2558 status = -ENOMEM; 2558 status = -ENOMEM;
2559 goto cleanup1; 2559 goto cleanup1;
2560 } 2560 }
2561 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 2561 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2562 status = -EFAULT; 2562 status = -EFAULT;
2563 goto cleanup1; 2563 goto cleanup1;
2564 } 2564 }
2565 if ((ioc->buf_size < 1) && 2565 if ((ioc->buf_size < 1) &&
2566 (ioc->Request.Type.Direction != XFER_NONE)) { 2566 (ioc->Request.Type.Direction != XFER_NONE)) {
2567 status = -EINVAL; 2567 status = -EINVAL;
2568 goto cleanup1; 2568 goto cleanup1;
2569 } 2569 }
2570 /* Check kmalloc limits using all SGs */ 2570 /* Check kmalloc limits using all SGs */
2571 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 2571 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2572 status = -EINVAL; 2572 status = -EINVAL;
2573 goto cleanup1; 2573 goto cleanup1;
2574 } 2574 }
2575 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { 2575 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2576 status = -EINVAL; 2576 status = -EINVAL;
2577 goto cleanup1; 2577 goto cleanup1;
2578 } 2578 }
2579 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); 2579 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2580 if (!buff) { 2580 if (!buff) {
2581 status = -ENOMEM; 2581 status = -ENOMEM;
2582 goto cleanup1; 2582 goto cleanup1;
2583 } 2583 }
2584 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); 2584 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2585 if (!buff_size) { 2585 if (!buff_size) {
2586 status = -ENOMEM; 2586 status = -ENOMEM;
2587 goto cleanup1; 2587 goto cleanup1;
2588 } 2588 }
2589 left = ioc->buf_size; 2589 left = ioc->buf_size;
2590 data_ptr = ioc->buf; 2590 data_ptr = ioc->buf;
2591 while (left) { 2591 while (left) {
2592 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 2592 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2593 buff_size[sg_used] = sz; 2593 buff_size[sg_used] = sz;
2594 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 2594 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2595 if (buff[sg_used] == NULL) { 2595 if (buff[sg_used] == NULL) {
2596 status = -ENOMEM; 2596 status = -ENOMEM;
2597 goto cleanup1; 2597 goto cleanup1;
2598 } 2598 }
2599 if (ioc->Request.Type.Direction == XFER_WRITE) { 2599 if (ioc->Request.Type.Direction == XFER_WRITE) {
2600 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 2600 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2601 status = -ENOMEM; 2601 status = -ENOMEM;
2602 goto cleanup1; 2602 goto cleanup1;
2603 } 2603 }
2604 } else 2604 } else
2605 memset(buff[sg_used], 0, sz); 2605 memset(buff[sg_used], 0, sz);
2606 left -= sz; 2606 left -= sz;
2607 data_ptr += sz; 2607 data_ptr += sz;
2608 sg_used++; 2608 sg_used++;
2609 } 2609 }
2610 c = cmd_special_alloc(h); 2610 c = cmd_special_alloc(h);
2611 if (c == NULL) { 2611 if (c == NULL) {
2612 status = -ENOMEM; 2612 status = -ENOMEM;
2613 goto cleanup1; 2613 goto cleanup1;
2614 } 2614 }
2615 c->cmd_type = CMD_IOCTL_PEND; 2615 c->cmd_type = CMD_IOCTL_PEND;
2616 c->Header.ReplyQueue = 0; 2616 c->Header.ReplyQueue = 0;
2617 c->Header.SGList = c->Header.SGTotal = sg_used; 2617 c->Header.SGList = c->Header.SGTotal = sg_used;
2618 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 2618 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2619 c->Header.Tag.lower = c->busaddr; 2619 c->Header.Tag.lower = c->busaddr;
2620 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 2620 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2621 if (ioc->buf_size > 0) { 2621 if (ioc->buf_size > 0) {
2622 int i; 2622 int i;
2623 for (i = 0; i < sg_used; i++) { 2623 for (i = 0; i < sg_used; i++) {
2624 temp64.val = pci_map_single(h->pdev, buff[i], 2624 temp64.val = pci_map_single(h->pdev, buff[i],
2625 buff_size[i], PCI_DMA_BIDIRECTIONAL); 2625 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2626 c->SG[i].Addr.lower = temp64.val32.lower; 2626 c->SG[i].Addr.lower = temp64.val32.lower;
2627 c->SG[i].Addr.upper = temp64.val32.upper; 2627 c->SG[i].Addr.upper = temp64.val32.upper;
2628 c->SG[i].Len = buff_size[i]; 2628 c->SG[i].Len = buff_size[i];
2629 /* we are not chaining */ 2629 /* we are not chaining */
2630 c->SG[i].Ext = 0; 2630 c->SG[i].Ext = 0;
2631 } 2631 }
2632 } 2632 }
2633 hpsa_scsi_do_simple_cmd_core(h, c); 2633 hpsa_scsi_do_simple_cmd_core(h, c);
2634 if (sg_used) 2634 if (sg_used)
2635 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 2635 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2636 check_ioctl_unit_attention(h, c); 2636 check_ioctl_unit_attention(h, c);
2637 /* Copy the error information out */ 2637 /* Copy the error information out */
2638 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 2638 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2639 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 2639 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2640 cmd_special_free(h, c); 2640 cmd_special_free(h, c);
2641 status = -EFAULT; 2641 status = -EFAULT;
2642 goto cleanup1; 2642 goto cleanup1;
2643 } 2643 }
2644 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { 2644 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
2645 /* Copy the data out of the buffer we created */ 2645 /* Copy the data out of the buffer we created */
2646 BYTE __user *ptr = ioc->buf; 2646 BYTE __user *ptr = ioc->buf;
2647 for (i = 0; i < sg_used; i++) { 2647 for (i = 0; i < sg_used; i++) {
2648 if (copy_to_user(ptr, buff[i], buff_size[i])) { 2648 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2649 cmd_special_free(h, c); 2649 cmd_special_free(h, c);
2650 status = -EFAULT; 2650 status = -EFAULT;
2651 goto cleanup1; 2651 goto cleanup1;
2652 } 2652 }
2653 ptr += buff_size[i]; 2653 ptr += buff_size[i];
2654 } 2654 }
2655 } 2655 }
2656 cmd_special_free(h, c); 2656 cmd_special_free(h, c);
2657 status = 0; 2657 status = 0;
2658 cleanup1: 2658 cleanup1:
2659 if (buff) { 2659 if (buff) {
2660 for (i = 0; i < sg_used; i++) 2660 for (i = 0; i < sg_used; i++)
2661 kfree(buff[i]); 2661 kfree(buff[i]);
2662 kfree(buff); 2662 kfree(buff);
2663 } 2663 }
2664 kfree(buff_size); 2664 kfree(buff_size);
2665 kfree(ioc); 2665 kfree(ioc);
2666 return status; 2666 return status;
2667 } 2667 }
2668 2668
2669 static void check_ioctl_unit_attention(struct ctlr_info *h, 2669 static void check_ioctl_unit_attention(struct ctlr_info *h,
2670 struct CommandList *c) 2670 struct CommandList *c)
2671 { 2671 {
2672 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 2672 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2673 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 2673 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2674 (void) check_for_unit_attention(h, c); 2674 (void) check_for_unit_attention(h, c);
2675 } 2675 }
2676 /* 2676 /*
2677 * ioctl 2677 * ioctl
2678 */ 2678 */
2679 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) 2679 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2680 { 2680 {
2681 struct ctlr_info *h; 2681 struct ctlr_info *h;
2682 void __user *argp = (void __user *)arg; 2682 void __user *argp = (void __user *)arg;
2683 2683
2684 h = sdev_to_hba(dev); 2684 h = sdev_to_hba(dev);
2685 2685
2686 switch (cmd) { 2686 switch (cmd) {
2687 case CCISS_DEREGDISK: 2687 case CCISS_DEREGDISK:
2688 case CCISS_REGNEWDISK: 2688 case CCISS_REGNEWDISK:
2689 case CCISS_REGNEWD: 2689 case CCISS_REGNEWD:
2690 hpsa_scan_start(h->scsi_host); 2690 hpsa_scan_start(h->scsi_host);
2691 return 0; 2691 return 0;
2692 case CCISS_GETPCIINFO: 2692 case CCISS_GETPCIINFO:
2693 return hpsa_getpciinfo_ioctl(h, argp); 2693 return hpsa_getpciinfo_ioctl(h, argp);
2694 case CCISS_GETDRIVVER: 2694 case CCISS_GETDRIVVER:
2695 return hpsa_getdrivver_ioctl(h, argp); 2695 return hpsa_getdrivver_ioctl(h, argp);
2696 case CCISS_PASSTHRU: 2696 case CCISS_PASSTHRU:
2697 return hpsa_passthru_ioctl(h, argp); 2697 return hpsa_passthru_ioctl(h, argp);
2698 case CCISS_BIG_PASSTHRU: 2698 case CCISS_BIG_PASSTHRU:
2699 return hpsa_big_passthru_ioctl(h, argp); 2699 return hpsa_big_passthru_ioctl(h, argp);
2700 default: 2700 default:
2701 return -ENOTTY; 2701 return -ENOTTY;
2702 } 2702 }
2703 } 2703 }
2704 2704
2705 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 2705 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2706 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 2706 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2707 int cmd_type) 2707 int cmd_type)
2708 { 2708 {
2709 int pci_dir = XFER_NONE; 2709 int pci_dir = XFER_NONE;
2710 2710
2711 c->cmd_type = CMD_IOCTL_PEND; 2711 c->cmd_type = CMD_IOCTL_PEND;
2712 c->Header.ReplyQueue = 0; 2712 c->Header.ReplyQueue = 0;
2713 if (buff != NULL && size > 0) { 2713 if (buff != NULL && size > 0) {
2714 c->Header.SGList = 1; 2714 c->Header.SGList = 1;
2715 c->Header.SGTotal = 1; 2715 c->Header.SGTotal = 1;
2716 } else { 2716 } else {
2717 c->Header.SGList = 0; 2717 c->Header.SGList = 0;
2718 c->Header.SGTotal = 0; 2718 c->Header.SGTotal = 0;
2719 } 2719 }
2720 c->Header.Tag.lower = c->busaddr; 2720 c->Header.Tag.lower = c->busaddr;
2721 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 2721 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2722 2722
2723 c->Request.Type.Type = cmd_type; 2723 c->Request.Type.Type = cmd_type;
2724 if (cmd_type == TYPE_CMD) { 2724 if (cmd_type == TYPE_CMD) {
2725 switch (cmd) { 2725 switch (cmd) {
2726 case HPSA_INQUIRY: 2726 case HPSA_INQUIRY:
2727 /* are we trying to read a vital product page */ 2727 /* are we trying to read a vital product page */
2728 if (page_code != 0) { 2728 if (page_code != 0) {
2729 c->Request.CDB[1] = 0x01; 2729 c->Request.CDB[1] = 0x01;
2730 c->Request.CDB[2] = page_code; 2730 c->Request.CDB[2] = page_code;
2731 } 2731 }
2732 c->Request.CDBLen = 6; 2732 c->Request.CDBLen = 6;
2733 c->Request.Type.Attribute = ATTR_SIMPLE; 2733 c->Request.Type.Attribute = ATTR_SIMPLE;
2734 c->Request.Type.Direction = XFER_READ; 2734 c->Request.Type.Direction = XFER_READ;
2735 c->Request.Timeout = 0; 2735 c->Request.Timeout = 0;
2736 c->Request.CDB[0] = HPSA_INQUIRY; 2736 c->Request.CDB[0] = HPSA_INQUIRY;
2737 c->Request.CDB[4] = size & 0xFF; 2737 c->Request.CDB[4] = size & 0xFF;
2738 break; 2738 break;
2739 case HPSA_REPORT_LOG: 2739 case HPSA_REPORT_LOG:
2740 case HPSA_REPORT_PHYS: 2740 case HPSA_REPORT_PHYS:
2741 /* Talking to controller so It's a physical command 2741 /* Talking to controller so It's a physical command
2742 mode = 00 target = 0. Nothing to write. 2742 mode = 00 target = 0. Nothing to write.
2743 */ 2743 */
2744 c->Request.CDBLen = 12; 2744 c->Request.CDBLen = 12;
2745 c->Request.Type.Attribute = ATTR_SIMPLE; 2745 c->Request.Type.Attribute = ATTR_SIMPLE;
2746 c->Request.Type.Direction = XFER_READ; 2746 c->Request.Type.Direction = XFER_READ;
2747 c->Request.Timeout = 0; 2747 c->Request.Timeout = 0;
2748 c->Request.CDB[0] = cmd; 2748 c->Request.CDB[0] = cmd;
2749 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 2749 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2750 c->Request.CDB[7] = (size >> 16) & 0xFF; 2750 c->Request.CDB[7] = (size >> 16) & 0xFF;
2751 c->Request.CDB[8] = (size >> 8) & 0xFF; 2751 c->Request.CDB[8] = (size >> 8) & 0xFF;
2752 c->Request.CDB[9] = size & 0xFF; 2752 c->Request.CDB[9] = size & 0xFF;
2753 break; 2753 break;
2754 case HPSA_CACHE_FLUSH: 2754 case HPSA_CACHE_FLUSH:
2755 c->Request.CDBLen = 12; 2755 c->Request.CDBLen = 12;
2756 c->Request.Type.Attribute = ATTR_SIMPLE; 2756 c->Request.Type.Attribute = ATTR_SIMPLE;
2757 c->Request.Type.Direction = XFER_WRITE; 2757 c->Request.Type.Direction = XFER_WRITE;
2758 c->Request.Timeout = 0; 2758 c->Request.Timeout = 0;
2759 c->Request.CDB[0] = BMIC_WRITE; 2759 c->Request.CDB[0] = BMIC_WRITE;
2760 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2760 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2761 break; 2761 break;
2762 case TEST_UNIT_READY: 2762 case TEST_UNIT_READY:
2763 c->Request.CDBLen = 6; 2763 c->Request.CDBLen = 6;
2764 c->Request.Type.Attribute = ATTR_SIMPLE; 2764 c->Request.Type.Attribute = ATTR_SIMPLE;
2765 c->Request.Type.Direction = XFER_NONE; 2765 c->Request.Type.Direction = XFER_NONE;
2766 c->Request.Timeout = 0; 2766 c->Request.Timeout = 0;
2767 break; 2767 break;
2768 default: 2768 default:
2769 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 2769 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2770 BUG(); 2770 BUG();
2771 return; 2771 return;
2772 } 2772 }
2773 } else if (cmd_type == TYPE_MSG) { 2773 } else if (cmd_type == TYPE_MSG) {
2774 switch (cmd) { 2774 switch (cmd) {
2775 2775
2776 case HPSA_DEVICE_RESET_MSG: 2776 case HPSA_DEVICE_RESET_MSG:
2777 c->Request.CDBLen = 16; 2777 c->Request.CDBLen = 16;
2778 c->Request.Type.Type = 1; /* It is a MSG not a CMD */ 2778 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2779 c->Request.Type.Attribute = ATTR_SIMPLE; 2779 c->Request.Type.Attribute = ATTR_SIMPLE;
2780 c->Request.Type.Direction = XFER_NONE; 2780 c->Request.Type.Direction = XFER_NONE;
2781 c->Request.Timeout = 0; /* Don't time out */ 2781 c->Request.Timeout = 0; /* Don't time out */
2782 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */ 2782 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */
2783 c->Request.CDB[1] = 0x03; /* Reset target above */ 2783 c->Request.CDB[1] = 0x03; /* Reset target above */
2784 /* If bytes 4-7 are zero, it means reset the */ 2784 /* If bytes 4-7 are zero, it means reset the */
2785 /* LunID device */ 2785 /* LunID device */
2786 c->Request.CDB[4] = 0x00; 2786 c->Request.CDB[4] = 0x00;
2787 c->Request.CDB[5] = 0x00; 2787 c->Request.CDB[5] = 0x00;
2788 c->Request.CDB[6] = 0x00; 2788 c->Request.CDB[6] = 0x00;
2789 c->Request.CDB[7] = 0x00; 2789 c->Request.CDB[7] = 0x00;
2790 break; 2790 break;
2791 2791
2792 default: 2792 default:
2793 dev_warn(&h->pdev->dev, "unknown message type %d\n", 2793 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2794 cmd); 2794 cmd);
2795 BUG(); 2795 BUG();
2796 } 2796 }
2797 } else { 2797 } else {
2798 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 2798 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2799 BUG(); 2799 BUG();
2800 } 2800 }
2801 2801
2802 switch (c->Request.Type.Direction) { 2802 switch (c->Request.Type.Direction) {
2803 case XFER_READ: 2803 case XFER_READ:
2804 pci_dir = PCI_DMA_FROMDEVICE; 2804 pci_dir = PCI_DMA_FROMDEVICE;
2805 break; 2805 break;
2806 case XFER_WRITE: 2806 case XFER_WRITE:
2807 pci_dir = PCI_DMA_TODEVICE; 2807 pci_dir = PCI_DMA_TODEVICE;
2808 break; 2808 break;
2809 case XFER_NONE: 2809 case XFER_NONE:
2810 pci_dir = PCI_DMA_NONE; 2810 pci_dir = PCI_DMA_NONE;
2811 break; 2811 break;
2812 default: 2812 default:
2813 pci_dir = PCI_DMA_BIDIRECTIONAL; 2813 pci_dir = PCI_DMA_BIDIRECTIONAL;
2814 } 2814 }
2815 2815
2816 hpsa_map_one(h->pdev, c, buff, size, pci_dir); 2816 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2817 2817
2818 return; 2818 return;
2819 } 2819 }
2820 2820
2821 /* 2821 /*
2822 * Map (physical) PCI mem into (virtual) kernel space 2822 * Map (physical) PCI mem into (virtual) kernel space
2823 */ 2823 */
2824 static void __iomem *remap_pci_mem(ulong base, ulong size) 2824 static void __iomem *remap_pci_mem(ulong base, ulong size)
2825 { 2825 {
2826 ulong page_base = ((ulong) base) & PAGE_MASK; 2826 ulong page_base = ((ulong) base) & PAGE_MASK;
2827 ulong page_offs = ((ulong) base) - page_base; 2827 ulong page_offs = ((ulong) base) - page_base;
2828 void __iomem *page_remapped = ioremap(page_base, page_offs + size); 2828 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2829 2829
2830 return page_remapped ? (page_remapped + page_offs) : NULL; 2830 return page_remapped ? (page_remapped + page_offs) : NULL;
2831 } 2831 }
2832 2832
2833 /* Takes cmds off the submission queue and sends them to the hardware, 2833 /* Takes cmds off the submission queue and sends them to the hardware,
2834 * then puts them on the queue of cmds waiting for completion. 2834 * then puts them on the queue of cmds waiting for completion.
2835 */ 2835 */
2836 static void start_io(struct ctlr_info *h) 2836 static void start_io(struct ctlr_info *h)
2837 { 2837 {
2838 struct CommandList *c; 2838 struct CommandList *c;
2839 2839
2840 while (!hlist_empty(&h->reqQ)) { 2840 while (!list_empty(&h->reqQ)) {
2841 c = hlist_entry(h->reqQ.first, struct CommandList, list); 2841 c = list_entry(h->reqQ.next, struct CommandList, list);
2842 /* can't do anything if fifo is full */ 2842 /* can't do anything if fifo is full */
2843 if ((h->access.fifo_full(h))) { 2843 if ((h->access.fifo_full(h))) {
2844 dev_warn(&h->pdev->dev, "fifo full\n"); 2844 dev_warn(&h->pdev->dev, "fifo full\n");
2845 break; 2845 break;
2846 } 2846 }
2847 2847
2848 /* Get the first entry from the Request Q */ 2848 /* Get the first entry from the Request Q */
2849 removeQ(c); 2849 removeQ(c);
2850 h->Qdepth--; 2850 h->Qdepth--;
2851 2851
2852 /* Tell the controller execute command */ 2852 /* Tell the controller execute command */
2853 h->access.submit_command(h, c); 2853 h->access.submit_command(h, c);
2854 2854
2855 /* Put job onto the completed Q */ 2855 /* Put job onto the completed Q */
2856 addQ(&h->cmpQ, c); 2856 addQ(&h->cmpQ, c);
2857 } 2857 }
2858 } 2858 }
2859 2859
2860 static inline unsigned long get_next_completion(struct ctlr_info *h) 2860 static inline unsigned long get_next_completion(struct ctlr_info *h)
2861 { 2861 {
2862 return h->access.command_completed(h); 2862 return h->access.command_completed(h);
2863 } 2863 }
2864 2864
2865 static inline bool interrupt_pending(struct ctlr_info *h) 2865 static inline bool interrupt_pending(struct ctlr_info *h)
2866 { 2866 {
2867 return h->access.intr_pending(h); 2867 return h->access.intr_pending(h);
2868 } 2868 }
2869 2869
2870 static inline long interrupt_not_for_us(struct ctlr_info *h) 2870 static inline long interrupt_not_for_us(struct ctlr_info *h)
2871 { 2871 {
2872 return (h->access.intr_pending(h) == 0) || 2872 return (h->access.intr_pending(h) == 0) ||
2873 (h->interrupts_enabled == 0); 2873 (h->interrupts_enabled == 0);
2874 } 2874 }
2875 2875
2876 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 2876 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
2877 u32 raw_tag) 2877 u32 raw_tag)
2878 { 2878 {
2879 if (unlikely(tag_index >= h->nr_cmds)) { 2879 if (unlikely(tag_index >= h->nr_cmds)) {
2880 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 2880 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2881 return 1; 2881 return 1;
2882 } 2882 }
2883 return 0; 2883 return 0;
2884 } 2884 }
2885 2885
2886 static inline void finish_cmd(struct CommandList *c, u32 raw_tag) 2886 static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2887 { 2887 {
2888 removeQ(c); 2888 removeQ(c);
2889 if (likely(c->cmd_type == CMD_SCSI)) 2889 if (likely(c->cmd_type == CMD_SCSI))
2890 complete_scsi_command(c, 0, raw_tag); 2890 complete_scsi_command(c, 0, raw_tag);
2891 else if (c->cmd_type == CMD_IOCTL_PEND) 2891 else if (c->cmd_type == CMD_IOCTL_PEND)
2892 complete(c->waiting); 2892 complete(c->waiting);
2893 } 2893 }
2894 2894
2895 static inline u32 hpsa_tag_contains_index(u32 tag) 2895 static inline u32 hpsa_tag_contains_index(u32 tag)
2896 { 2896 {
2897 return tag & DIRECT_LOOKUP_BIT; 2897 return tag & DIRECT_LOOKUP_BIT;
2898 } 2898 }
2899 2899
2900 static inline u32 hpsa_tag_to_index(u32 tag) 2900 static inline u32 hpsa_tag_to_index(u32 tag)
2901 { 2901 {
2902 return tag >> DIRECT_LOOKUP_SHIFT; 2902 return tag >> DIRECT_LOOKUP_SHIFT;
2903 } 2903 }
2904 2904
2905 static inline u32 hpsa_tag_discard_error_bits(u32 tag) 2905 static inline u32 hpsa_tag_discard_error_bits(u32 tag)
2906 { 2906 {
2907 #define HPSA_ERROR_BITS 0x03 2907 #define HPSA_ERROR_BITS 0x03
2908 return tag & ~HPSA_ERROR_BITS; 2908 return tag & ~HPSA_ERROR_BITS;
2909 } 2909 }
2910 2910
2911 /* process completion of an indexed ("direct lookup") command */ 2911 /* process completion of an indexed ("direct lookup") command */
2912 static inline u32 process_indexed_cmd(struct ctlr_info *h, 2912 static inline u32 process_indexed_cmd(struct ctlr_info *h,
2913 u32 raw_tag) 2913 u32 raw_tag)
2914 { 2914 {
2915 u32 tag_index; 2915 u32 tag_index;
2916 struct CommandList *c; 2916 struct CommandList *c;
2917 2917
2918 tag_index = hpsa_tag_to_index(raw_tag); 2918 tag_index = hpsa_tag_to_index(raw_tag);
2919 if (bad_tag(h, tag_index, raw_tag)) 2919 if (bad_tag(h, tag_index, raw_tag))
2920 return next_command(h); 2920 return next_command(h);
2921 c = h->cmd_pool + tag_index; 2921 c = h->cmd_pool + tag_index;
2922 finish_cmd(c, raw_tag); 2922 finish_cmd(c, raw_tag);
2923 return next_command(h); 2923 return next_command(h);
2924 } 2924 }
2925 2925
2926 /* process completion of a non-indexed command */ 2926 /* process completion of a non-indexed command */
2927 static inline u32 process_nonindexed_cmd(struct ctlr_info *h, 2927 static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2928 u32 raw_tag) 2928 u32 raw_tag)
2929 { 2929 {
2930 u32 tag; 2930 u32 tag;
2931 struct CommandList *c = NULL; 2931 struct CommandList *c = NULL;
2932 struct hlist_node *tmp;
2933 2932
2934 tag = hpsa_tag_discard_error_bits(raw_tag); 2933 tag = hpsa_tag_discard_error_bits(raw_tag);
2935 hlist_for_each_entry(c, tmp, &h->cmpQ, list) { 2934 list_for_each_entry(c, &h->cmpQ, list) {
2936 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 2935 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
2937 finish_cmd(c, raw_tag); 2936 finish_cmd(c, raw_tag);
2938 return next_command(h); 2937 return next_command(h);
2939 } 2938 }
2940 } 2939 }
2941 bad_tag(h, h->nr_cmds + 1, raw_tag); 2940 bad_tag(h, h->nr_cmds + 1, raw_tag);
2942 return next_command(h); 2941 return next_command(h);
2943 } 2942 }
2944 2943
2945 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) 2944 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
2946 { 2945 {
2947 struct ctlr_info *h = dev_id; 2946 struct ctlr_info *h = dev_id;
2948 unsigned long flags; 2947 unsigned long flags;
2949 u32 raw_tag; 2948 u32 raw_tag;
2950 2949
2951 if (interrupt_not_for_us(h)) 2950 if (interrupt_not_for_us(h))
2952 return IRQ_NONE; 2951 return IRQ_NONE;
2953 spin_lock_irqsave(&h->lock, flags); 2952 spin_lock_irqsave(&h->lock, flags);
2954 while (interrupt_pending(h)) { 2953 while (interrupt_pending(h)) {
2955 raw_tag = get_next_completion(h); 2954 raw_tag = get_next_completion(h);
2956 while (raw_tag != FIFO_EMPTY) { 2955 while (raw_tag != FIFO_EMPTY) {
2957 if (hpsa_tag_contains_index(raw_tag)) 2956 if (hpsa_tag_contains_index(raw_tag))
2958 raw_tag = process_indexed_cmd(h, raw_tag); 2957 raw_tag = process_indexed_cmd(h, raw_tag);
2959 else 2958 else
2960 raw_tag = process_nonindexed_cmd(h, raw_tag); 2959 raw_tag = process_nonindexed_cmd(h, raw_tag);
2961 } 2960 }
2962 } 2961 }
2963 spin_unlock_irqrestore(&h->lock, flags); 2962 spin_unlock_irqrestore(&h->lock, flags);
2964 return IRQ_HANDLED; 2963 return IRQ_HANDLED;
2965 } 2964 }
2966 2965
2967 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) 2966 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
2968 { 2967 {
2969 struct ctlr_info *h = dev_id; 2968 struct ctlr_info *h = dev_id;
2970 unsigned long flags; 2969 unsigned long flags;
2971 u32 raw_tag; 2970 u32 raw_tag;
2972 2971
2973 spin_lock_irqsave(&h->lock, flags); 2972 spin_lock_irqsave(&h->lock, flags);
2974 raw_tag = get_next_completion(h); 2973 raw_tag = get_next_completion(h);
2975 while (raw_tag != FIFO_EMPTY) { 2974 while (raw_tag != FIFO_EMPTY) {
2976 if (hpsa_tag_contains_index(raw_tag)) 2975 if (hpsa_tag_contains_index(raw_tag))
2977 raw_tag = process_indexed_cmd(h, raw_tag); 2976 raw_tag = process_indexed_cmd(h, raw_tag);
2978 else 2977 else
2979 raw_tag = process_nonindexed_cmd(h, raw_tag); 2978 raw_tag = process_nonindexed_cmd(h, raw_tag);
2980 } 2979 }
2981 spin_unlock_irqrestore(&h->lock, flags); 2980 spin_unlock_irqrestore(&h->lock, flags);
2982 return IRQ_HANDLED; 2981 return IRQ_HANDLED;
2983 } 2982 }
2984 2983
2985 /* Send a message CDB to the firmware. */ 2984 /* Send a message CDB to the firmware. */
2986 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 2985 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2987 unsigned char type) 2986 unsigned char type)
2988 { 2987 {
2989 struct Command { 2988 struct Command {
2990 struct CommandListHeader CommandHeader; 2989 struct CommandListHeader CommandHeader;
2991 struct RequestBlock Request; 2990 struct RequestBlock Request;
2992 struct ErrDescriptor ErrorDescriptor; 2991 struct ErrDescriptor ErrorDescriptor;
2993 }; 2992 };
2994 struct Command *cmd; 2993 struct Command *cmd;
2995 static const size_t cmd_sz = sizeof(*cmd) + 2994 static const size_t cmd_sz = sizeof(*cmd) +
2996 sizeof(cmd->ErrorDescriptor); 2995 sizeof(cmd->ErrorDescriptor);
2997 dma_addr_t paddr64; 2996 dma_addr_t paddr64;
2998 uint32_t paddr32, tag; 2997 uint32_t paddr32, tag;
2999 void __iomem *vaddr; 2998 void __iomem *vaddr;
3000 int i, err; 2999 int i, err;
3001 3000
3002 vaddr = pci_ioremap_bar(pdev, 0); 3001 vaddr = pci_ioremap_bar(pdev, 0);
3003 if (vaddr == NULL) 3002 if (vaddr == NULL)
3004 return -ENOMEM; 3003 return -ENOMEM;
3005 3004
3006 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 3005 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3007 * CCISS commands, so they must be allocated from the lower 4GiB of 3006 * CCISS commands, so they must be allocated from the lower 4GiB of
3008 * memory. 3007 * memory.
3009 */ 3008 */
3010 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3009 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3011 if (err) { 3010 if (err) {
3012 iounmap(vaddr); 3011 iounmap(vaddr);
3013 return -ENOMEM; 3012 return -ENOMEM;
3014 } 3013 }
3015 3014
3016 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 3015 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3017 if (cmd == NULL) { 3016 if (cmd == NULL) {
3018 iounmap(vaddr); 3017 iounmap(vaddr);
3019 return -ENOMEM; 3018 return -ENOMEM;
3020 } 3019 }
3021 3020
3022 /* This must fit, because of the 32-bit consistent DMA mask. Also, 3021 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3023 * although there's no guarantee, we assume that the address is at 3022 * although there's no guarantee, we assume that the address is at
3024 * least 4-byte aligned (most likely, it's page-aligned). 3023 * least 4-byte aligned (most likely, it's page-aligned).
3025 */ 3024 */
3026 paddr32 = paddr64; 3025 paddr32 = paddr64;
3027 3026
3028 cmd->CommandHeader.ReplyQueue = 0; 3027 cmd->CommandHeader.ReplyQueue = 0;
3029 cmd->CommandHeader.SGList = 0; 3028 cmd->CommandHeader.SGList = 0;
3030 cmd->CommandHeader.SGTotal = 0; 3029 cmd->CommandHeader.SGTotal = 0;
3031 cmd->CommandHeader.Tag.lower = paddr32; 3030 cmd->CommandHeader.Tag.lower = paddr32;
3032 cmd->CommandHeader.Tag.upper = 0; 3031 cmd->CommandHeader.Tag.upper = 0;
3033 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 3032 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3034 3033
3035 cmd->Request.CDBLen = 16; 3034 cmd->Request.CDBLen = 16;
3036 cmd->Request.Type.Type = TYPE_MSG; 3035 cmd->Request.Type.Type = TYPE_MSG;
3037 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 3036 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3038 cmd->Request.Type.Direction = XFER_NONE; 3037 cmd->Request.Type.Direction = XFER_NONE;
3039 cmd->Request.Timeout = 0; /* Don't time out */ 3038 cmd->Request.Timeout = 0; /* Don't time out */
3040 cmd->Request.CDB[0] = opcode; 3039 cmd->Request.CDB[0] = opcode;
3041 cmd->Request.CDB[1] = type; 3040 cmd->Request.CDB[1] = type;
3042 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 3041 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
3043 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); 3042 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3044 cmd->ErrorDescriptor.Addr.upper = 0; 3043 cmd->ErrorDescriptor.Addr.upper = 0;
3045 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); 3044 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3046 3045
3047 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 3046 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3048 3047
3049 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 3048 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3050 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 3049 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3051 if (hpsa_tag_discard_error_bits(tag) == paddr32) 3050 if (hpsa_tag_discard_error_bits(tag) == paddr32)
3052 break; 3051 break;
3053 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 3052 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3054 } 3053 }
3055 3054
3056 iounmap(vaddr); 3055 iounmap(vaddr);
3057 3056
3058 /* we leak the DMA buffer here ... no choice since the controller could 3057 /* we leak the DMA buffer here ... no choice since the controller could
3059 * still complete the command. 3058 * still complete the command.
3060 */ 3059 */
3061 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 3060 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3062 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 3061 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3063 opcode, type); 3062 opcode, type);
3064 return -ETIMEDOUT; 3063 return -ETIMEDOUT;
3065 } 3064 }
3066 3065
3067 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 3066 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3068 3067
3069 if (tag & HPSA_ERROR_BIT) { 3068 if (tag & HPSA_ERROR_BIT) {
3070 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 3069 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3071 opcode, type); 3070 opcode, type);
3072 return -EIO; 3071 return -EIO;
3073 } 3072 }
3074 3073
3075 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 3074 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3076 opcode, type); 3075 opcode, type);
3077 return 0; 3076 return 0;
3078 } 3077 }
3079 3078
3080 #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0) 3079 #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
3081 #define hpsa_noop(p) hpsa_message(p, 3, 0) 3080 #define hpsa_noop(p) hpsa_message(p, 3, 0)
3082 3081
3083 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 3082 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3084 void * __iomem vaddr, bool use_doorbell) 3083 void * __iomem vaddr, bool use_doorbell)
3085 { 3084 {
3086 u16 pmcsr; 3085 u16 pmcsr;
3087 int pos; 3086 int pos;
3088 3087
3089 if (use_doorbell) { 3088 if (use_doorbell) {
3090 /* For everything after the P600, the PCI power state method 3089 /* For everything after the P600, the PCI power state method
3091 * of resetting the controller doesn't work, so we have this 3090 * of resetting the controller doesn't work, so we have this
3092 * other way using the doorbell register. 3091 * other way using the doorbell register.
3093 */ 3092 */
3094 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 3093 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3095 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); 3094 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL);
3096 msleep(1000); 3095 msleep(1000);
3097 } else { /* Try to do it the PCI power state way */ 3096 } else { /* Try to do it the PCI power state way */
3098 3097
3099 /* Quoting from the Open CISS Specification: "The Power 3098 /* Quoting from the Open CISS Specification: "The Power
3100 * Management Control/Status Register (CSR) controls the power 3099 * Management Control/Status Register (CSR) controls the power
3101 * state of the device. The normal operating state is D0, 3100 * state of the device. The normal operating state is D0,
3102 * CSR=00h. The software off state is D3, CSR=03h. To reset 3101 * CSR=00h. The software off state is D3, CSR=03h. To reset
3103 * the controller, place the interface device in D3 then to D0, 3102 * the controller, place the interface device in D3 then to D0,
3104 * this causes a secondary PCI reset which will reset the 3103 * this causes a secondary PCI reset which will reset the
3105 * controller." */ 3104 * controller." */
3106 3105
3107 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 3106 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3108 if (pos == 0) { 3107 if (pos == 0) {
3109 dev_err(&pdev->dev, 3108 dev_err(&pdev->dev,
3110 "hpsa_reset_controller: " 3109 "hpsa_reset_controller: "
3111 "PCI PM not supported\n"); 3110 "PCI PM not supported\n");
3112 return -ENODEV; 3111 return -ENODEV;
3113 } 3112 }
3114 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 3113 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3115 /* enter the D3hot power management state */ 3114 /* enter the D3hot power management state */
3116 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 3115 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3117 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3116 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3118 pmcsr |= PCI_D3hot; 3117 pmcsr |= PCI_D3hot;
3119 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3118 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3120 3119
3121 msleep(500); 3120 msleep(500);
3122 3121
3123 /* enter the D0 power management state */ 3122 /* enter the D0 power management state */
3124 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3123 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3125 pmcsr |= PCI_D0; 3124 pmcsr |= PCI_D0;
3126 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3125 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3127 3126
3128 msleep(500); 3127 msleep(500);
3129 } 3128 }
3130 return 0; 3129 return 0;
3131 } 3130 }
3132 3131
3133 /* This does a hard reset of the controller using PCI power management 3132 /* This does a hard reset of the controller using PCI power management
3134 * states or the using the doorbell register. 3133 * states or the using the doorbell register.
3135 */ 3134 */
3136 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 3135 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3137 { 3136 {
3138 u64 cfg_offset; 3137 u64 cfg_offset;
3139 u32 cfg_base_addr; 3138 u32 cfg_base_addr;
3140 u64 cfg_base_addr_index; 3139 u64 cfg_base_addr_index;
3141 void __iomem *vaddr; 3140 void __iomem *vaddr;
3142 unsigned long paddr; 3141 unsigned long paddr;
3143 u32 misc_fw_support, active_transport; 3142 u32 misc_fw_support, active_transport;
3144 int rc; 3143 int rc;
3145 struct CfgTable __iomem *cfgtable; 3144 struct CfgTable __iomem *cfgtable;
3146 bool use_doorbell; 3145 bool use_doorbell;
3147 u32 board_id; 3146 u32 board_id;
3148 u16 command_register; 3147 u16 command_register;
3149 3148
3150 /* For controllers as old as the P600, this is very nearly 3149 /* For controllers as old as the P600, this is very nearly
3151 * the same thing as 3150 * the same thing as
3152 * 3151 *
3153 * pci_save_state(pci_dev); 3152 * pci_save_state(pci_dev);
3154 * pci_set_power_state(pci_dev, PCI_D3hot); 3153 * pci_set_power_state(pci_dev, PCI_D3hot);
3155 * pci_set_power_state(pci_dev, PCI_D0); 3154 * pci_set_power_state(pci_dev, PCI_D0);
3156 * pci_restore_state(pci_dev); 3155 * pci_restore_state(pci_dev);
3157 * 3156 *
3158 * For controllers newer than the P600, the pci power state 3157 * For controllers newer than the P600, the pci power state
3159 * method of resetting doesn't work so we have another way 3158 * method of resetting doesn't work so we have another way
3160 * using the doorbell register. 3159 * using the doorbell register.
3161 */ 3160 */
3162 3161
3163 /* Exclude 640x boards. These are two pci devices in one slot 3162 /* Exclude 640x boards. These are two pci devices in one slot
3164 * which share a battery backed cache module. One controls the 3163 * which share a battery backed cache module. One controls the
3165 * cache, the other accesses the cache through the one that controls 3164 * cache, the other accesses the cache through the one that controls
3166 * it. If we reset the one controlling the cache, the other will 3165 * it. If we reset the one controlling the cache, the other will
3167 * likely not be happy. Just forbid resetting this conjoined mess. 3166 * likely not be happy. Just forbid resetting this conjoined mess.
3168 * The 640x isn't really supported by hpsa anyway. 3167 * The 640x isn't really supported by hpsa anyway.
3169 */ 3168 */
3170 rc = hpsa_lookup_board_id(pdev, &board_id); 3169 rc = hpsa_lookup_board_id(pdev, &board_id);
3171 if (rc < 0) { 3170 if (rc < 0) {
3172 dev_warn(&pdev->dev, "Not resetting device.\n"); 3171 dev_warn(&pdev->dev, "Not resetting device.\n");
3173 return -ENODEV; 3172 return -ENODEV;
3174 } 3173 }
3175 if (board_id == 0x409C0E11 || board_id == 0x409D0E11) 3174 if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
3176 return -ENOTSUPP; 3175 return -ENOTSUPP;
3177 3176
3178 /* Save the PCI command register */ 3177 /* Save the PCI command register */
3179 pci_read_config_word(pdev, 4, &command_register); 3178 pci_read_config_word(pdev, 4, &command_register);
3180 /* Turn the board off. This is so that later pci_restore_state() 3179 /* Turn the board off. This is so that later pci_restore_state()
3181 * won't turn the board on before the rest of config space is ready. 3180 * won't turn the board on before the rest of config space is ready.
3182 */ 3181 */
3183 pci_disable_device(pdev); 3182 pci_disable_device(pdev);
3184 pci_save_state(pdev); 3183 pci_save_state(pdev);
3185 3184
3186 /* find the first memory BAR, so we can find the cfg table */ 3185 /* find the first memory BAR, so we can find the cfg table */
3187 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 3186 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3188 if (rc) 3187 if (rc)
3189 return rc; 3188 return rc;
3190 vaddr = remap_pci_mem(paddr, 0x250); 3189 vaddr = remap_pci_mem(paddr, 0x250);
3191 if (!vaddr) 3190 if (!vaddr)
3192 return -ENOMEM; 3191 return -ENOMEM;
3193 3192
3194 /* find cfgtable in order to check if reset via doorbell is supported */ 3193 /* find cfgtable in order to check if reset via doorbell is supported */
3195 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 3194 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3196 &cfg_base_addr_index, &cfg_offset); 3195 &cfg_base_addr_index, &cfg_offset);
3197 if (rc) 3196 if (rc)
3198 goto unmap_vaddr; 3197 goto unmap_vaddr;
3199 cfgtable = remap_pci_mem(pci_resource_start(pdev, 3198 cfgtable = remap_pci_mem(pci_resource_start(pdev,
3200 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 3199 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3201 if (!cfgtable) { 3200 if (!cfgtable) {
3202 rc = -ENOMEM; 3201 rc = -ENOMEM;
3203 goto unmap_vaddr; 3202 goto unmap_vaddr;
3204 } 3203 }
3205 3204
3206 /* If reset via doorbell register is supported, use that. */ 3205 /* If reset via doorbell register is supported, use that. */
3207 misc_fw_support = readl(&cfgtable->misc_fw_support); 3206 misc_fw_support = readl(&cfgtable->misc_fw_support);
3208 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3207 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3209 3208
3210 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3209 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3211 if (rc) 3210 if (rc)
3212 goto unmap_cfgtable; 3211 goto unmap_cfgtable;
3213 3212
3214 pci_restore_state(pdev); 3213 pci_restore_state(pdev);
3215 rc = pci_enable_device(pdev); 3214 rc = pci_enable_device(pdev);
3216 if (rc) { 3215 if (rc) {
3217 dev_warn(&pdev->dev, "failed to enable device.\n"); 3216 dev_warn(&pdev->dev, "failed to enable device.\n");
3218 goto unmap_cfgtable; 3217 goto unmap_cfgtable;
3219 } 3218 }
3220 pci_write_config_word(pdev, 4, command_register); 3219 pci_write_config_word(pdev, 4, command_register);
3221 3220
3222 /* Some devices (notably the HP Smart Array 5i Controller) 3221 /* Some devices (notably the HP Smart Array 5i Controller)
3223 need a little pause here */ 3222 need a little pause here */
3224 msleep(HPSA_POST_RESET_PAUSE_MSECS); 3223 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3225 3224
3226 /* Wait for board to become not ready, then ready. */ 3225 /* Wait for board to become not ready, then ready. */
3227 dev_info(&pdev->dev, "Waiting for board to become ready.\n"); 3226 dev_info(&pdev->dev, "Waiting for board to become ready.\n");
3228 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 3227 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
3229 if (rc) 3228 if (rc)
3230 dev_warn(&pdev->dev, 3229 dev_warn(&pdev->dev,
3231 "failed waiting for board to become not ready\n"); 3230 "failed waiting for board to become not ready\n");
3232 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 3231 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3233 if (rc) { 3232 if (rc) {
3234 dev_warn(&pdev->dev, 3233 dev_warn(&pdev->dev,
3235 "failed waiting for board to become ready\n"); 3234 "failed waiting for board to become ready\n");
3236 goto unmap_cfgtable; 3235 goto unmap_cfgtable;
3237 } 3236 }
3238 dev_info(&pdev->dev, "board ready.\n"); 3237 dev_info(&pdev->dev, "board ready.\n");
3239 3238
3240 /* Controller should be in simple mode at this point. If it's not, 3239 /* Controller should be in simple mode at this point. If it's not,
3241 * It means we're on one of those controllers which doesn't support 3240 * It means we're on one of those controllers which doesn't support
3242 * the doorbell reset method and on which the PCI power management reset 3241 * the doorbell reset method and on which the PCI power management reset
3243 * method doesn't work (P800, for example.) 3242 * method doesn't work (P800, for example.)
3244 * In those cases, pretend the reset worked and hope for the best. 3243 * In those cases, pretend the reset worked and hope for the best.
3245 */ 3244 */
3246 active_transport = readl(&cfgtable->TransportActive); 3245 active_transport = readl(&cfgtable->TransportActive);
3247 if (active_transport & PERFORMANT_MODE) { 3246 if (active_transport & PERFORMANT_MODE) {
3248 dev_warn(&pdev->dev, "Unable to successfully reset controller," 3247 dev_warn(&pdev->dev, "Unable to successfully reset controller,"
3249 " proceeding anyway.\n"); 3248 " proceeding anyway.\n");
3250 rc = -ENOTSUPP; 3249 rc = -ENOTSUPP;
3251 } 3250 }
3252 3251
3253 unmap_cfgtable: 3252 unmap_cfgtable:
3254 iounmap(cfgtable); 3253 iounmap(cfgtable);
3255 3254
3256 unmap_vaddr: 3255 unmap_vaddr:
3257 iounmap(vaddr); 3256 iounmap(vaddr);
3258 return rc; 3257 return rc;
3259 } 3258 }
3260 3259
3261 /* 3260 /*
3262 * We cannot read the structure directly, for portability we must use 3261 * We cannot read the structure directly, for portability we must use
3263 * the io functions. 3262 * the io functions.
3264 * This is for debug only. 3263 * This is for debug only.
3265 */ 3264 */
3266 static void print_cfg_table(struct device *dev, struct CfgTable *tb) 3265 static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3267 { 3266 {
3268 #ifdef HPSA_DEBUG 3267 #ifdef HPSA_DEBUG
3269 int i; 3268 int i;
3270 char temp_name[17]; 3269 char temp_name[17];
3271 3270
3272 dev_info(dev, "Controller Configuration information\n"); 3271 dev_info(dev, "Controller Configuration information\n");
3273 dev_info(dev, "------------------------------------\n"); 3272 dev_info(dev, "------------------------------------\n");
3274 for (i = 0; i < 4; i++) 3273 for (i = 0; i < 4; i++)
3275 temp_name[i] = readb(&(tb->Signature[i])); 3274 temp_name[i] = readb(&(tb->Signature[i]));
3276 temp_name[4] = '\0'; 3275 temp_name[4] = '\0';
3277 dev_info(dev, " Signature = %s\n", temp_name); 3276 dev_info(dev, " Signature = %s\n", temp_name);
3278 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 3277 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3279 dev_info(dev, " Transport methods supported = 0x%x\n", 3278 dev_info(dev, " Transport methods supported = 0x%x\n",
3280 readl(&(tb->TransportSupport))); 3279 readl(&(tb->TransportSupport)));
3281 dev_info(dev, " Transport methods active = 0x%x\n", 3280 dev_info(dev, " Transport methods active = 0x%x\n",
3282 readl(&(tb->TransportActive))); 3281 readl(&(tb->TransportActive)));
3283 dev_info(dev, " Requested transport Method = 0x%x\n", 3282 dev_info(dev, " Requested transport Method = 0x%x\n",
3284 readl(&(tb->HostWrite.TransportRequest))); 3283 readl(&(tb->HostWrite.TransportRequest)));
3285 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 3284 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3286 readl(&(tb->HostWrite.CoalIntDelay))); 3285 readl(&(tb->HostWrite.CoalIntDelay)));
3287 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 3286 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3288 readl(&(tb->HostWrite.CoalIntCount))); 3287 readl(&(tb->HostWrite.CoalIntCount)));
3289 dev_info(dev, " Max outstanding commands = 0x%d\n", 3288 dev_info(dev, " Max outstanding commands = 0x%d\n",
3290 readl(&(tb->CmdsOutMax))); 3289 readl(&(tb->CmdsOutMax)));
3291 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 3290 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3292 for (i = 0; i < 16; i++) 3291 for (i = 0; i < 16; i++)
3293 temp_name[i] = readb(&(tb->ServerName[i])); 3292 temp_name[i] = readb(&(tb->ServerName[i]));
3294 temp_name[16] = '\0'; 3293 temp_name[16] = '\0';
3295 dev_info(dev, " Server Name = %s\n", temp_name); 3294 dev_info(dev, " Server Name = %s\n", temp_name);
3296 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 3295 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3297 readl(&(tb->HeartBeat))); 3296 readl(&(tb->HeartBeat)));
3298 #endif /* HPSA_DEBUG */ 3297 #endif /* HPSA_DEBUG */
3299 } 3298 }
3300 3299
3301 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 3300 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3302 { 3301 {
3303 int i, offset, mem_type, bar_type; 3302 int i, offset, mem_type, bar_type;
3304 3303
3305 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 3304 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3306 return 0; 3305 return 0;
3307 offset = 0; 3306 offset = 0;
3308 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3307 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3309 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 3308 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3310 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 3309 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3311 offset += 4; 3310 offset += 4;
3312 else { 3311 else {
3313 mem_type = pci_resource_flags(pdev, i) & 3312 mem_type = pci_resource_flags(pdev, i) &
3314 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 3313 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3315 switch (mem_type) { 3314 switch (mem_type) {
3316 case PCI_BASE_ADDRESS_MEM_TYPE_32: 3315 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3317 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 3316 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3318 offset += 4; /* 32 bit */ 3317 offset += 4; /* 32 bit */
3319 break; 3318 break;
3320 case PCI_BASE_ADDRESS_MEM_TYPE_64: 3319 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3321 offset += 8; 3320 offset += 8;
3322 break; 3321 break;
3323 default: /* reserved in PCI 2.2 */ 3322 default: /* reserved in PCI 2.2 */
3324 dev_warn(&pdev->dev, 3323 dev_warn(&pdev->dev,
3325 "base address is invalid\n"); 3324 "base address is invalid\n");
3326 return -1; 3325 return -1;
3327 break; 3326 break;
3328 } 3327 }
3329 } 3328 }
3330 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 3329 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3331 return i + 1; 3330 return i + 1;
3332 } 3331 }
3333 return -1; 3332 return -1;
3334 } 3333 }
3335 3334
3336 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 3335 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3337 * controllers that are capable. If not, we use IO-APIC mode. 3336 * controllers that are capable. If not, we use IO-APIC mode.
3338 */ 3337 */
3339 3338
3340 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) 3339 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
3341 { 3340 {
3342 #ifdef CONFIG_PCI_MSI 3341 #ifdef CONFIG_PCI_MSI
3343 int err; 3342 int err;
3344 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1}, 3343 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3345 {0, 2}, {0, 3} 3344 {0, 2}, {0, 3}
3346 }; 3345 };
3347 3346
3348 /* Some boards advertise MSI but don't really support it */ 3347 /* Some boards advertise MSI but don't really support it */
3349 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 3348 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
3350 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 3349 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
3351 goto default_int_mode; 3350 goto default_int_mode;
3352 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 3351 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
3353 dev_info(&h->pdev->dev, "MSIX\n"); 3352 dev_info(&h->pdev->dev, "MSIX\n");
3354 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4); 3353 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
3355 if (!err) { 3354 if (!err) {
3356 h->intr[0] = hpsa_msix_entries[0].vector; 3355 h->intr[0] = hpsa_msix_entries[0].vector;
3357 h->intr[1] = hpsa_msix_entries[1].vector; 3356 h->intr[1] = hpsa_msix_entries[1].vector;
3358 h->intr[2] = hpsa_msix_entries[2].vector; 3357 h->intr[2] = hpsa_msix_entries[2].vector;
3359 h->intr[3] = hpsa_msix_entries[3].vector; 3358 h->intr[3] = hpsa_msix_entries[3].vector;
3360 h->msix_vector = 1; 3359 h->msix_vector = 1;
3361 return; 3360 return;
3362 } 3361 }
3363 if (err > 0) { 3362 if (err > 0) {
3364 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 3363 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
3365 "available\n", err); 3364 "available\n", err);
3366 goto default_int_mode; 3365 goto default_int_mode;
3367 } else { 3366 } else {
3368 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", 3367 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
3369 err); 3368 err);
3370 goto default_int_mode; 3369 goto default_int_mode;
3371 } 3370 }
3372 } 3371 }
3373 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 3372 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
3374 dev_info(&h->pdev->dev, "MSI\n"); 3373 dev_info(&h->pdev->dev, "MSI\n");
3375 if (!pci_enable_msi(h->pdev)) 3374 if (!pci_enable_msi(h->pdev))
3376 h->msi_vector = 1; 3375 h->msi_vector = 1;
3377 else 3376 else
3378 dev_warn(&h->pdev->dev, "MSI init failed\n"); 3377 dev_warn(&h->pdev->dev, "MSI init failed\n");
3379 } 3378 }
3380 default_int_mode: 3379 default_int_mode:
3381 #endif /* CONFIG_PCI_MSI */ 3380 #endif /* CONFIG_PCI_MSI */
3382 /* if we get here we're going to use the default interrupt mode */ 3381 /* if we get here we're going to use the default interrupt mode */
3383 h->intr[PERF_MODE_INT] = h->pdev->irq; 3382 h->intr[PERF_MODE_INT] = h->pdev->irq;
3384 } 3383 }
3385 3384
3386 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 3385 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3387 { 3386 {
3388 int i; 3387 int i;
3389 u32 subsystem_vendor_id, subsystem_device_id; 3388 u32 subsystem_vendor_id, subsystem_device_id;
3390 3389
3391 subsystem_vendor_id = pdev->subsystem_vendor; 3390 subsystem_vendor_id = pdev->subsystem_vendor;
3392 subsystem_device_id = pdev->subsystem_device; 3391 subsystem_device_id = pdev->subsystem_device;
3393 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 3392 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3394 subsystem_vendor_id; 3393 subsystem_vendor_id;
3395 3394
3396 for (i = 0; i < ARRAY_SIZE(products); i++) 3395 for (i = 0; i < ARRAY_SIZE(products); i++)
3397 if (*board_id == products[i].board_id) 3396 if (*board_id == products[i].board_id)
3398 return i; 3397 return i;
3399 3398
3400 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 3399 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
3401 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 3400 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
3402 !hpsa_allow_any) { 3401 !hpsa_allow_any) {
3403 dev_warn(&pdev->dev, "unrecognized board ID: " 3402 dev_warn(&pdev->dev, "unrecognized board ID: "
3404 "0x%08x, ignoring.\n", *board_id); 3403 "0x%08x, ignoring.\n", *board_id);
3405 return -ENODEV; 3404 return -ENODEV;
3406 } 3405 }
3407 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 3406 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
3408 } 3407 }
3409 3408
3410 static inline bool hpsa_board_disabled(struct pci_dev *pdev) 3409 static inline bool hpsa_board_disabled(struct pci_dev *pdev)
3411 { 3410 {
3412 u16 command; 3411 u16 command;
3413 3412
3414 (void) pci_read_config_word(pdev, PCI_COMMAND, &command); 3413 (void) pci_read_config_word(pdev, PCI_COMMAND, &command);
3415 return ((command & PCI_COMMAND_MEMORY) == 0); 3414 return ((command & PCI_COMMAND_MEMORY) == 0);
3416 } 3415 }
3417 3416
3418 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 3417 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3419 unsigned long *memory_bar) 3418 unsigned long *memory_bar)
3420 { 3419 {
3421 int i; 3420 int i;
3422 3421
3423 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 3422 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
3424 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3423 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3425 /* addressing mode bits already removed */ 3424 /* addressing mode bits already removed */
3426 *memory_bar = pci_resource_start(pdev, i); 3425 *memory_bar = pci_resource_start(pdev, i);
3427 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 3426 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3428 *memory_bar); 3427 *memory_bar);
3429 return 0; 3428 return 0;
3430 } 3429 }
3431 dev_warn(&pdev->dev, "no memory BAR found\n"); 3430 dev_warn(&pdev->dev, "no memory BAR found\n");
3432 return -ENODEV; 3431 return -ENODEV;
3433 } 3432 }
3434 3433
3435 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 3434 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
3436 void __iomem *vaddr, int wait_for_ready) 3435 void __iomem *vaddr, int wait_for_ready)
3437 { 3436 {
3438 int i, iterations; 3437 int i, iterations;
3439 u32 scratchpad; 3438 u32 scratchpad;
3440 if (wait_for_ready) 3439 if (wait_for_ready)
3441 iterations = HPSA_BOARD_READY_ITERATIONS; 3440 iterations = HPSA_BOARD_READY_ITERATIONS;
3442 else 3441 else
3443 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 3442 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
3444 3443
3445 for (i = 0; i < iterations; i++) { 3444 for (i = 0; i < iterations; i++) {
3446 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 3445 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
3447 if (wait_for_ready) { 3446 if (wait_for_ready) {
3448 if (scratchpad == HPSA_FIRMWARE_READY) 3447 if (scratchpad == HPSA_FIRMWARE_READY)
3449 return 0; 3448 return 0;
3450 } else { 3449 } else {
3451 if (scratchpad != HPSA_FIRMWARE_READY) 3450 if (scratchpad != HPSA_FIRMWARE_READY)
3452 return 0; 3451 return 0;
3453 } 3452 }
3454 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 3453 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3455 } 3454 }
3456 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 3455 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3457 return -ENODEV; 3456 return -ENODEV;
3458 } 3457 }
3459 3458
3460 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 3459 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
3461 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 3460 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
3462 u64 *cfg_offset) 3461 u64 *cfg_offset)
3463 { 3462 {
3464 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 3463 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
3465 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 3464 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
3466 *cfg_base_addr &= (u32) 0x0000ffff; 3465 *cfg_base_addr &= (u32) 0x0000ffff;
3467 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 3466 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
3468 if (*cfg_base_addr_index == -1) { 3467 if (*cfg_base_addr_index == -1) {
3469 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 3468 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3470 return -ENODEV; 3469 return -ENODEV;
3471 } 3470 }
3472 return 0; 3471 return 0;
3473 } 3472 }
3474 3473
3475 static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) 3474 static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
3476 { 3475 {
3477 u64 cfg_offset; 3476 u64 cfg_offset;
3478 u32 cfg_base_addr; 3477 u32 cfg_base_addr;
3479 u64 cfg_base_addr_index; 3478 u64 cfg_base_addr_index;
3480 u32 trans_offset; 3479 u32 trans_offset;
3481 int rc; 3480 int rc;
3482 3481
3483 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 3482 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
3484 &cfg_base_addr_index, &cfg_offset); 3483 &cfg_base_addr_index, &cfg_offset);
3485 if (rc) 3484 if (rc)
3486 return rc; 3485 return rc;
3487 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 3486 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
3488 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 3487 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
3489 if (!h->cfgtable) 3488 if (!h->cfgtable)
3490 return -ENOMEM; 3489 return -ENOMEM;
3491 /* Find performant mode table. */ 3490 /* Find performant mode table. */
3492 trans_offset = readl(&h->cfgtable->TransMethodOffset); 3491 trans_offset = readl(&h->cfgtable->TransMethodOffset);
3493 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 3492 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
3494 cfg_base_addr_index)+cfg_offset+trans_offset, 3493 cfg_base_addr_index)+cfg_offset+trans_offset,
3495 sizeof(*h->transtable)); 3494 sizeof(*h->transtable));
3496 if (!h->transtable) 3495 if (!h->transtable)
3497 return -ENOMEM; 3496 return -ENOMEM;
3498 return 0; 3497 return 0;
3499 } 3498 }
3500 3499
3501 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 3500 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
3502 { 3501 {
3503 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3502 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3504 3503
3505 /* Limit commands in memory limited kdump scenario. */ 3504 /* Limit commands in memory limited kdump scenario. */
3506 if (reset_devices && h->max_commands > 32) 3505 if (reset_devices && h->max_commands > 32)
3507 h->max_commands = 32; 3506 h->max_commands = 32;
3508 3507
3509 if (h->max_commands < 16) { 3508 if (h->max_commands < 16) {
3510 dev_warn(&h->pdev->dev, "Controller reports " 3509 dev_warn(&h->pdev->dev, "Controller reports "
3511 "max supported commands of %d, an obvious lie. " 3510 "max supported commands of %d, an obvious lie. "
3512 "Using 16. Ensure that firmware is up to date.\n", 3511 "Using 16. Ensure that firmware is up to date.\n",
3513 h->max_commands); 3512 h->max_commands);
3514 h->max_commands = 16; 3513 h->max_commands = 16;
3515 } 3514 }
3516 } 3515 }
3517 3516
3518 /* Interrogate the hardware for some limits: 3517 /* Interrogate the hardware for some limits:
3519 * max commands, max SG elements without chaining, and with chaining, 3518 * max commands, max SG elements without chaining, and with chaining,
3520 * SG chain block size, etc. 3519 * SG chain block size, etc.
3521 */ 3520 */
3522 static void __devinit hpsa_find_board_params(struct ctlr_info *h) 3521 static void __devinit hpsa_find_board_params(struct ctlr_info *h)
3523 { 3522 {
3524 hpsa_get_max_perf_mode_cmds(h); 3523 hpsa_get_max_perf_mode_cmds(h);
3525 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 3524 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
3526 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 3525 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3527 /* 3526 /*
3528 * Limit in-command s/g elements to 32 save dma'able memory. 3527 * Limit in-command s/g elements to 32 save dma'able memory.
3529 * Howvever spec says if 0, use 31 3528 * Howvever spec says if 0, use 31
3530 */ 3529 */
3531 h->max_cmd_sg_entries = 31; 3530 h->max_cmd_sg_entries = 31;
3532 if (h->maxsgentries > 512) { 3531 if (h->maxsgentries > 512) {
3533 h->max_cmd_sg_entries = 32; 3532 h->max_cmd_sg_entries = 32;
3534 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 3533 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3535 h->maxsgentries--; /* save one for chain pointer */ 3534 h->maxsgentries--; /* save one for chain pointer */
3536 } else { 3535 } else {
3537 h->maxsgentries = 31; /* default to traditional values */ 3536 h->maxsgentries = 31; /* default to traditional values */
3538 h->chainsize = 0; 3537 h->chainsize = 0;
3539 } 3538 }
3540 } 3539 }
3541 3540
3542 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 3541 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
3543 { 3542 {
3544 if ((readb(&h->cfgtable->Signature[0]) != 'C') || 3543 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3545 (readb(&h->cfgtable->Signature[1]) != 'I') || 3544 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3546 (readb(&h->cfgtable->Signature[2]) != 'S') || 3545 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3547 (readb(&h->cfgtable->Signature[3]) != 'S')) { 3546 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3548 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 3547 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
3549 return false; 3548 return false;
3550 } 3549 }
3551 return true; 3550 return true;
3552 } 3551 }
3553 3552
3554 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 3553 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3555 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) 3554 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
3556 { 3555 {
3557 #ifdef CONFIG_X86 3556 #ifdef CONFIG_X86
3558 u32 prefetch; 3557 u32 prefetch;
3559 3558
3560 prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); 3559 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3561 prefetch |= 0x100; 3560 prefetch |= 0x100;
3562 writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); 3561 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3563 #endif 3562 #endif
3564 } 3563 }
3565 3564
3566 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 3565 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
3567 * in a prefetch beyond physical memory. 3566 * in a prefetch beyond physical memory.
3568 */ 3567 */
3569 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 3568 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
3570 { 3569 {
3571 u32 dma_prefetch; 3570 u32 dma_prefetch;
3572 3571
3573 if (h->board_id != 0x3225103C) 3572 if (h->board_id != 0x3225103C)
3574 return; 3573 return;
3575 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 3574 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3576 dma_prefetch |= 0x8000; 3575 dma_prefetch |= 0x8000;
3577 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 3576 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3578 } 3577 }
3579 3578
3580 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 3579 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
3581 { 3580 {
3582 int i; 3581 int i;
3583 u32 doorbell_value; 3582 u32 doorbell_value;
3584 unsigned long flags; 3583 unsigned long flags;
3585 3584
3586 /* under certain very rare conditions, this can take awhile. 3585 /* under certain very rare conditions, this can take awhile.
3587 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 3586 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3588 * as we enter this code.) 3587 * as we enter this code.)
3589 */ 3588 */
3590 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3589 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3591 spin_lock_irqsave(&h->lock, flags); 3590 spin_lock_irqsave(&h->lock, flags);
3592 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 3591 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
3593 spin_unlock_irqrestore(&h->lock, flags); 3592 spin_unlock_irqrestore(&h->lock, flags);
3594 if (!doorbell_value & CFGTBL_ChangeReq) 3593 if (!doorbell_value & CFGTBL_ChangeReq)
3595 break; 3594 break;
3596 /* delay and try again */ 3595 /* delay and try again */
3597 usleep_range(10000, 20000); 3596 usleep_range(10000, 20000);
3598 } 3597 }
3599 } 3598 }
3600 3599
3601 static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h) 3600 static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
3602 { 3601 {
3603 u32 trans_support; 3602 u32 trans_support;
3604 3603
3605 trans_support = readl(&(h->cfgtable->TransportSupport)); 3604 trans_support = readl(&(h->cfgtable->TransportSupport));
3606 if (!(trans_support & SIMPLE_MODE)) 3605 if (!(trans_support & SIMPLE_MODE))
3607 return -ENOTSUPP; 3606 return -ENOTSUPP;
3608 3607
3609 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 3608 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3610 /* Update the field, and then ring the doorbell */ 3609 /* Update the field, and then ring the doorbell */
3611 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 3610 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3612 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 3611 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3613 hpsa_wait_for_mode_change_ack(h); 3612 hpsa_wait_for_mode_change_ack(h);
3614 print_cfg_table(&h->pdev->dev, h->cfgtable); 3613 print_cfg_table(&h->pdev->dev, h->cfgtable);
3615 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { 3614 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3616 dev_warn(&h->pdev->dev, 3615 dev_warn(&h->pdev->dev,
3617 "unable to get board into simple mode\n"); 3616 "unable to get board into simple mode\n");
3618 return -ENODEV; 3617 return -ENODEV;
3619 } 3618 }
3620 return 0; 3619 return 0;
3621 } 3620 }
3622 3621
3623 static int __devinit hpsa_pci_init(struct ctlr_info *h) 3622 static int __devinit hpsa_pci_init(struct ctlr_info *h)
3624 { 3623 {
3625 int prod_index, err; 3624 int prod_index, err;
3626 3625
3627 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 3626 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
3628 if (prod_index < 0) 3627 if (prod_index < 0)
3629 return -ENODEV; 3628 return -ENODEV;
3630 h->product_name = products[prod_index].product_name; 3629 h->product_name = products[prod_index].product_name;
3631 h->access = *(products[prod_index].access); 3630 h->access = *(products[prod_index].access);
3632 3631
3633 if (hpsa_board_disabled(h->pdev)) { 3632 if (hpsa_board_disabled(h->pdev)) {
3634 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 3633 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3635 return -ENODEV; 3634 return -ENODEV;
3636 } 3635 }
3637 err = pci_enable_device(h->pdev); 3636 err = pci_enable_device(h->pdev);
3638 if (err) { 3637 if (err) {
3639 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 3638 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
3640 return err; 3639 return err;
3641 } 3640 }
3642 3641
3643 err = pci_request_regions(h->pdev, "hpsa"); 3642 err = pci_request_regions(h->pdev, "hpsa");
3644 if (err) { 3643 if (err) {
3645 dev_err(&h->pdev->dev, 3644 dev_err(&h->pdev->dev,
3646 "cannot obtain PCI resources, aborting\n"); 3645 "cannot obtain PCI resources, aborting\n");
3647 return err; 3646 return err;
3648 } 3647 }
3649 hpsa_interrupt_mode(h); 3648 hpsa_interrupt_mode(h);
3650 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 3649 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3651 if (err) 3650 if (err)
3652 goto err_out_free_res; 3651 goto err_out_free_res;
3653 h->vaddr = remap_pci_mem(h->paddr, 0x250); 3652 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3654 if (!h->vaddr) { 3653 if (!h->vaddr) {
3655 err = -ENOMEM; 3654 err = -ENOMEM;
3656 goto err_out_free_res; 3655 goto err_out_free_res;
3657 } 3656 }
3658 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 3657 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
3659 if (err) 3658 if (err)
3660 goto err_out_free_res; 3659 goto err_out_free_res;
3661 err = hpsa_find_cfgtables(h); 3660 err = hpsa_find_cfgtables(h);
3662 if (err) 3661 if (err)
3663 goto err_out_free_res; 3662 goto err_out_free_res;
3664 hpsa_find_board_params(h); 3663 hpsa_find_board_params(h);
3665 3664
3666 if (!hpsa_CISS_signature_present(h)) { 3665 if (!hpsa_CISS_signature_present(h)) {
3667 err = -ENODEV; 3666 err = -ENODEV;
3668 goto err_out_free_res; 3667 goto err_out_free_res;
3669 } 3668 }
3670 hpsa_enable_scsi_prefetch(h); 3669 hpsa_enable_scsi_prefetch(h);
3671 hpsa_p600_dma_prefetch_quirk(h); 3670 hpsa_p600_dma_prefetch_quirk(h);
3672 err = hpsa_enter_simple_mode(h); 3671 err = hpsa_enter_simple_mode(h);
3673 if (err) 3672 if (err)
3674 goto err_out_free_res; 3673 goto err_out_free_res;
3675 return 0; 3674 return 0;
3676 3675
3677 err_out_free_res: 3676 err_out_free_res:
3678 if (h->transtable) 3677 if (h->transtable)
3679 iounmap(h->transtable); 3678 iounmap(h->transtable);
3680 if (h->cfgtable) 3679 if (h->cfgtable)
3681 iounmap(h->cfgtable); 3680 iounmap(h->cfgtable);
3682 if (h->vaddr) 3681 if (h->vaddr)
3683 iounmap(h->vaddr); 3682 iounmap(h->vaddr);
3684 /* 3683 /*
3685 * Deliberately omit pci_disable_device(): it does something nasty to 3684 * Deliberately omit pci_disable_device(): it does something nasty to
3686 * Smart Array controllers that pci_enable_device does not undo 3685 * Smart Array controllers that pci_enable_device does not undo
3687 */ 3686 */
3688 pci_release_regions(h->pdev); 3687 pci_release_regions(h->pdev);
3689 return err; 3688 return err;
3690 } 3689 }
3691 3690
3692 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) 3691 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3693 { 3692 {
3694 int rc; 3693 int rc;
3695 3694
3696 #define HBA_INQUIRY_BYTE_COUNT 64 3695 #define HBA_INQUIRY_BYTE_COUNT 64
3697 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 3696 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
3698 if (!h->hba_inquiry_data) 3697 if (!h->hba_inquiry_data)
3699 return; 3698 return;
3700 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 3699 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
3701 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 3700 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
3702 if (rc != 0) { 3701 if (rc != 0) {
3703 kfree(h->hba_inquiry_data); 3702 kfree(h->hba_inquiry_data);
3704 h->hba_inquiry_data = NULL; 3703 h->hba_inquiry_data = NULL;
3705 } 3704 }
3706 } 3705 }
3707 3706
3708 static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) 3707 static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3709 { 3708 {
3710 int rc, i; 3709 int rc, i;
3711 3710
3712 if (!reset_devices) 3711 if (!reset_devices)
3713 return 0; 3712 return 0;
3714 3713
3715 /* Reset the controller with a PCI power-cycle or via doorbell */ 3714 /* Reset the controller with a PCI power-cycle or via doorbell */
3716 rc = hpsa_kdump_hard_reset_controller(pdev); 3715 rc = hpsa_kdump_hard_reset_controller(pdev);
3717 3716
3718 /* -ENOTSUPP here means we cannot reset the controller 3717 /* -ENOTSUPP here means we cannot reset the controller
3719 * but it's already (and still) up and running in 3718 * but it's already (and still) up and running in
3720 * "performant mode". Or, it might be 640x, which can't reset 3719 * "performant mode". Or, it might be 640x, which can't reset
3721 * due to concerns about shared bbwc between 6402/6404 pair. 3720 * due to concerns about shared bbwc between 6402/6404 pair.
3722 */ 3721 */
3723 if (rc == -ENOTSUPP) 3722 if (rc == -ENOTSUPP)
3724 return 0; /* just try to do the kdump anyhow. */ 3723 return 0; /* just try to do the kdump anyhow. */
3725 if (rc) 3724 if (rc)
3726 return -ENODEV; 3725 return -ENODEV;
3727 3726
3728 /* Now try to get the controller to respond to a no-op */ 3727 /* Now try to get the controller to respond to a no-op */
3729 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 3728 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3730 if (hpsa_noop(pdev) == 0) 3729 if (hpsa_noop(pdev) == 0)
3731 break; 3730 break;
3732 else 3731 else
3733 dev_warn(&pdev->dev, "no-op failed%s\n", 3732 dev_warn(&pdev->dev, "no-op failed%s\n",
3734 (i < 11 ? "; re-trying" : "")); 3733 (i < 11 ? "; re-trying" : ""));
3735 } 3734 }
3736 return 0; 3735 return 0;
3737 } 3736 }
3738 3737
3739 static int __devinit hpsa_init_one(struct pci_dev *pdev, 3738 static int __devinit hpsa_init_one(struct pci_dev *pdev,
3740 const struct pci_device_id *ent) 3739 const struct pci_device_id *ent)
3741 { 3740 {
3742 int dac, rc; 3741 int dac, rc;
3743 struct ctlr_info *h; 3742 struct ctlr_info *h;
3744 3743
3745 if (number_of_controllers == 0) 3744 if (number_of_controllers == 0)
3746 printk(KERN_INFO DRIVER_NAME "\n"); 3745 printk(KERN_INFO DRIVER_NAME "\n");
3747 3746
3748 rc = hpsa_init_reset_devices(pdev); 3747 rc = hpsa_init_reset_devices(pdev);
3749 if (rc) 3748 if (rc)
3750 return rc; 3749 return rc;
3751 3750
3752 /* Command structures must be aligned on a 32-byte boundary because 3751 /* Command structures must be aligned on a 32-byte boundary because
3753 * the 5 lower bits of the address are used by the hardware. and by 3752 * the 5 lower bits of the address are used by the hardware. and by
3754 * the driver. See comments in hpsa.h for more info. 3753 * the driver. See comments in hpsa.h for more info.
3755 */ 3754 */
3756 #define COMMANDLIST_ALIGNMENT 32 3755 #define COMMANDLIST_ALIGNMENT 32
3757 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 3756 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
3758 h = kzalloc(sizeof(*h), GFP_KERNEL); 3757 h = kzalloc(sizeof(*h), GFP_KERNEL);
3759 if (!h) 3758 if (!h)
3760 return -ENOMEM; 3759 return -ENOMEM;
3761 3760
3762 h->pdev = pdev; 3761 h->pdev = pdev;
3763 h->busy_initializing = 1; 3762 h->busy_initializing = 1;
3764 INIT_HLIST_HEAD(&h->cmpQ); 3763 INIT_LIST_HEAD(&h->cmpQ);
3765 INIT_HLIST_HEAD(&h->reqQ); 3764 INIT_LIST_HEAD(&h->reqQ);
3766 spin_lock_init(&h->lock); 3765 spin_lock_init(&h->lock);
3767 spin_lock_init(&h->scan_lock); 3766 spin_lock_init(&h->scan_lock);
3768 rc = hpsa_pci_init(h); 3767 rc = hpsa_pci_init(h);
3769 if (rc != 0) 3768 if (rc != 0)
3770 goto clean1; 3769 goto clean1;
3771 3770
3772 sprintf(h->devname, "hpsa%d", number_of_controllers); 3771 sprintf(h->devname, "hpsa%d", number_of_controllers);
3773 h->ctlr = number_of_controllers; 3772 h->ctlr = number_of_controllers;
3774 number_of_controllers++; 3773 number_of_controllers++;
3775 3774
3776 /* configure PCI DMA stuff */ 3775 /* configure PCI DMA stuff */
3777 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3776 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3778 if (rc == 0) { 3777 if (rc == 0) {
3779 dac = 1; 3778 dac = 1;
3780 } else { 3779 } else {
3781 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3780 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3782 if (rc == 0) { 3781 if (rc == 0) {
3783 dac = 0; 3782 dac = 0;
3784 } else { 3783 } else {
3785 dev_err(&pdev->dev, "no suitable DMA available\n"); 3784 dev_err(&pdev->dev, "no suitable DMA available\n");
3786 goto clean1; 3785 goto clean1;
3787 } 3786 }
3788 } 3787 }
3789 3788
3790 /* make sure the board interrupts are off */ 3789 /* make sure the board interrupts are off */
3791 h->access.set_intr_mask(h, HPSA_INTR_OFF); 3790 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3792 3791
3793 if (h->msix_vector || h->msi_vector) 3792 if (h->msix_vector || h->msi_vector)
3794 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi, 3793 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
3795 IRQF_DISABLED, h->devname, h); 3794 IRQF_DISABLED, h->devname, h);
3796 else 3795 else
3797 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx, 3796 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
3798 IRQF_DISABLED, h->devname, h); 3797 IRQF_DISABLED, h->devname, h);
3799 if (rc) { 3798 if (rc) {
3800 dev_err(&pdev->dev, "unable to get irq %d for %s\n", 3799 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3801 h->intr[PERF_MODE_INT], h->devname); 3800 h->intr[PERF_MODE_INT], h->devname);
3802 goto clean2; 3801 goto clean2;
3803 } 3802 }
3804 3803
3805 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 3804 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3806 h->devname, pdev->device, 3805 h->devname, pdev->device,
3807 h->intr[PERF_MODE_INT], dac ? "" : " not"); 3806 h->intr[PERF_MODE_INT], dac ? "" : " not");
3808 3807
3809 h->cmd_pool_bits = 3808 h->cmd_pool_bits =
3810 kmalloc(((h->nr_cmds + BITS_PER_LONG - 3809 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3811 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL); 3810 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3812 h->cmd_pool = pci_alloc_consistent(h->pdev, 3811 h->cmd_pool = pci_alloc_consistent(h->pdev,
3813 h->nr_cmds * sizeof(*h->cmd_pool), 3812 h->nr_cmds * sizeof(*h->cmd_pool),
3814 &(h->cmd_pool_dhandle)); 3813 &(h->cmd_pool_dhandle));
3815 h->errinfo_pool = pci_alloc_consistent(h->pdev, 3814 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3816 h->nr_cmds * sizeof(*h->errinfo_pool), 3815 h->nr_cmds * sizeof(*h->errinfo_pool),
3817 &(h->errinfo_pool_dhandle)); 3816 &(h->errinfo_pool_dhandle));
3818 if ((h->cmd_pool_bits == NULL) 3817 if ((h->cmd_pool_bits == NULL)
3819 || (h->cmd_pool == NULL) 3818 || (h->cmd_pool == NULL)
3820 || (h->errinfo_pool == NULL)) { 3819 || (h->errinfo_pool == NULL)) {
3821 dev_err(&pdev->dev, "out of memory"); 3820 dev_err(&pdev->dev, "out of memory");
3822 rc = -ENOMEM; 3821 rc = -ENOMEM;
3823 goto clean4; 3822 goto clean4;
3824 } 3823 }
3825 if (hpsa_allocate_sg_chain_blocks(h)) 3824 if (hpsa_allocate_sg_chain_blocks(h))
3826 goto clean4; 3825 goto clean4;
3827 init_waitqueue_head(&h->scan_wait_queue); 3826 init_waitqueue_head(&h->scan_wait_queue);
3828 h->scan_finished = 1; /* no scan currently in progress */ 3827 h->scan_finished = 1; /* no scan currently in progress */
3829 3828
3830 pci_set_drvdata(pdev, h); 3829 pci_set_drvdata(pdev, h);
3831 memset(h->cmd_pool_bits, 0, 3830 memset(h->cmd_pool_bits, 0,
3832 ((h->nr_cmds + BITS_PER_LONG - 3831 ((h->nr_cmds + BITS_PER_LONG -
3833 1) / BITS_PER_LONG) * sizeof(unsigned long)); 3832 1) / BITS_PER_LONG) * sizeof(unsigned long));
3834 3833
3835 hpsa_scsi_setup(h); 3834 hpsa_scsi_setup(h);
3836 3835
3837 /* Turn the interrupts on so we can service requests */ 3836 /* Turn the interrupts on so we can service requests */
3838 h->access.set_intr_mask(h, HPSA_INTR_ON); 3837 h->access.set_intr_mask(h, HPSA_INTR_ON);
3839 3838
3840 hpsa_put_ctlr_into_performant_mode(h); 3839 hpsa_put_ctlr_into_performant_mode(h);
3841 hpsa_hba_inquiry(h); 3840 hpsa_hba_inquiry(h);
3842 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 3841 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3843 h->busy_initializing = 0; 3842 h->busy_initializing = 0;
3844 return 1; 3843 return 1;
3845 3844
3846 clean4: 3845 clean4:
3847 hpsa_free_sg_chain_blocks(h); 3846 hpsa_free_sg_chain_blocks(h);
3848 kfree(h->cmd_pool_bits); 3847 kfree(h->cmd_pool_bits);
3849 if (h->cmd_pool) 3848 if (h->cmd_pool)
3850 pci_free_consistent(h->pdev, 3849 pci_free_consistent(h->pdev,
3851 h->nr_cmds * sizeof(struct CommandList), 3850 h->nr_cmds * sizeof(struct CommandList),
3852 h->cmd_pool, h->cmd_pool_dhandle); 3851 h->cmd_pool, h->cmd_pool_dhandle);
3853 if (h->errinfo_pool) 3852 if (h->errinfo_pool)
3854 pci_free_consistent(h->pdev, 3853 pci_free_consistent(h->pdev,
3855 h->nr_cmds * sizeof(struct ErrorInfo), 3854 h->nr_cmds * sizeof(struct ErrorInfo),
3856 h->errinfo_pool, 3855 h->errinfo_pool,
3857 h->errinfo_pool_dhandle); 3856 h->errinfo_pool_dhandle);
3858 free_irq(h->intr[PERF_MODE_INT], h); 3857 free_irq(h->intr[PERF_MODE_INT], h);
3859 clean2: 3858 clean2:
3860 clean1: 3859 clean1:
3861 h->busy_initializing = 0; 3860 h->busy_initializing = 0;
3862 kfree(h); 3861 kfree(h);
3863 return rc; 3862 return rc;
3864 } 3863 }
3865 3864
3866 static void hpsa_flush_cache(struct ctlr_info *h) 3865 static void hpsa_flush_cache(struct ctlr_info *h)
3867 { 3866 {
3868 char *flush_buf; 3867 char *flush_buf;
3869 struct CommandList *c; 3868 struct CommandList *c;
3870 3869
3871 flush_buf = kzalloc(4, GFP_KERNEL); 3870 flush_buf = kzalloc(4, GFP_KERNEL);
3872 if (!flush_buf) 3871 if (!flush_buf)
3873 return; 3872 return;
3874 3873
3875 c = cmd_special_alloc(h); 3874 c = cmd_special_alloc(h);
3876 if (!c) { 3875 if (!c) {
3877 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 3876 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
3878 goto out_of_memory; 3877 goto out_of_memory;
3879 } 3878 }
3880 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 3879 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
3881 RAID_CTLR_LUNID, TYPE_CMD); 3880 RAID_CTLR_LUNID, TYPE_CMD);
3882 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 3881 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
3883 if (c->err_info->CommandStatus != 0) 3882 if (c->err_info->CommandStatus != 0)
3884 dev_warn(&h->pdev->dev, 3883 dev_warn(&h->pdev->dev,
3885 "error flushing cache on controller\n"); 3884 "error flushing cache on controller\n");
3886 cmd_special_free(h, c); 3885 cmd_special_free(h, c);
3887 out_of_memory: 3886 out_of_memory:
3888 kfree(flush_buf); 3887 kfree(flush_buf);
3889 } 3888 }
3890 3889
3891 static void hpsa_shutdown(struct pci_dev *pdev) 3890 static void hpsa_shutdown(struct pci_dev *pdev)
3892 { 3891 {
3893 struct ctlr_info *h; 3892 struct ctlr_info *h;
3894 3893
3895 h = pci_get_drvdata(pdev); 3894 h = pci_get_drvdata(pdev);
3896 /* Turn board interrupts off and send the flush cache command 3895 /* Turn board interrupts off and send the flush cache command
3897 * sendcmd will turn off interrupt, and send the flush... 3896 * sendcmd will turn off interrupt, and send the flush...
3898 * To write all data in the battery backed cache to disks 3897 * To write all data in the battery backed cache to disks
3899 */ 3898 */
3900 hpsa_flush_cache(h); 3899 hpsa_flush_cache(h);
3901 h->access.set_intr_mask(h, HPSA_INTR_OFF); 3900 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3902 free_irq(h->intr[PERF_MODE_INT], h); 3901 free_irq(h->intr[PERF_MODE_INT], h);
3903 #ifdef CONFIG_PCI_MSI 3902 #ifdef CONFIG_PCI_MSI
3904 if (h->msix_vector) 3903 if (h->msix_vector)
3905 pci_disable_msix(h->pdev); 3904 pci_disable_msix(h->pdev);
3906 else if (h->msi_vector) 3905 else if (h->msi_vector)
3907 pci_disable_msi(h->pdev); 3906 pci_disable_msi(h->pdev);
3908 #endif /* CONFIG_PCI_MSI */ 3907 #endif /* CONFIG_PCI_MSI */
3909 } 3908 }
3910 3909
3911 static void __devexit hpsa_remove_one(struct pci_dev *pdev) 3910 static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3912 { 3911 {
3913 struct ctlr_info *h; 3912 struct ctlr_info *h;
3914 3913
3915 if (pci_get_drvdata(pdev) == NULL) { 3914 if (pci_get_drvdata(pdev) == NULL) {
3916 dev_err(&pdev->dev, "unable to remove device \n"); 3915 dev_err(&pdev->dev, "unable to remove device \n");
3917 return; 3916 return;
3918 } 3917 }
3919 h = pci_get_drvdata(pdev); 3918 h = pci_get_drvdata(pdev);
3920 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 3919 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
3921 hpsa_shutdown(pdev); 3920 hpsa_shutdown(pdev);
3922 iounmap(h->vaddr); 3921 iounmap(h->vaddr);
3923 iounmap(h->transtable); 3922 iounmap(h->transtable);
3924 iounmap(h->cfgtable); 3923 iounmap(h->cfgtable);
3925 hpsa_free_sg_chain_blocks(h); 3924 hpsa_free_sg_chain_blocks(h);
3926 pci_free_consistent(h->pdev, 3925 pci_free_consistent(h->pdev,
3927 h->nr_cmds * sizeof(struct CommandList), 3926 h->nr_cmds * sizeof(struct CommandList),
3928 h->cmd_pool, h->cmd_pool_dhandle); 3927 h->cmd_pool, h->cmd_pool_dhandle);
3929 pci_free_consistent(h->pdev, 3928 pci_free_consistent(h->pdev,
3930 h->nr_cmds * sizeof(struct ErrorInfo), 3929 h->nr_cmds * sizeof(struct ErrorInfo),
3931 h->errinfo_pool, h->errinfo_pool_dhandle); 3930 h->errinfo_pool, h->errinfo_pool_dhandle);
3932 pci_free_consistent(h->pdev, h->reply_pool_size, 3931 pci_free_consistent(h->pdev, h->reply_pool_size,
3933 h->reply_pool, h->reply_pool_dhandle); 3932 h->reply_pool, h->reply_pool_dhandle);
3934 kfree(h->cmd_pool_bits); 3933 kfree(h->cmd_pool_bits);
3935 kfree(h->blockFetchTable); 3934 kfree(h->blockFetchTable);
3936 kfree(h->hba_inquiry_data); 3935 kfree(h->hba_inquiry_data);
3937 /* 3936 /*
3938 * Deliberately omit pci_disable_device(): it does something nasty to 3937 * Deliberately omit pci_disable_device(): it does something nasty to
3939 * Smart Array controllers that pci_enable_device does not undo 3938 * Smart Array controllers that pci_enable_device does not undo
3940 */ 3939 */
3941 pci_release_regions(pdev); 3940 pci_release_regions(pdev);
3942 pci_set_drvdata(pdev, NULL); 3941 pci_set_drvdata(pdev, NULL);
3943 kfree(h); 3942 kfree(h);
3944 } 3943 }
3945 3944
3946 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 3945 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
3947 __attribute__((unused)) pm_message_t state) 3946 __attribute__((unused)) pm_message_t state)
3948 { 3947 {
3949 return -ENOSYS; 3948 return -ENOSYS;
3950 } 3949 }
3951 3950
3952 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 3951 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
3953 { 3952 {
3954 return -ENOSYS; 3953 return -ENOSYS;
3955 } 3954 }
3956 3955
3957 static struct pci_driver hpsa_pci_driver = { 3956 static struct pci_driver hpsa_pci_driver = {
3958 .name = "hpsa", 3957 .name = "hpsa",
3959 .probe = hpsa_init_one, 3958 .probe = hpsa_init_one,
3960 .remove = __devexit_p(hpsa_remove_one), 3959 .remove = __devexit_p(hpsa_remove_one),
3961 .id_table = hpsa_pci_device_id, /* id_table */ 3960 .id_table = hpsa_pci_device_id, /* id_table */
3962 .shutdown = hpsa_shutdown, 3961 .shutdown = hpsa_shutdown,
3963 .suspend = hpsa_suspend, 3962 .suspend = hpsa_suspend,
3964 .resume = hpsa_resume, 3963 .resume = hpsa_resume,
3965 }; 3964 };
3966 3965
3967 /* Fill in bucket_map[], given nsgs (the max number of 3966 /* Fill in bucket_map[], given nsgs (the max number of
3968 * scatter gather elements supported) and bucket[], 3967 * scatter gather elements supported) and bucket[],
3969 * which is an array of 8 integers. The bucket[] array 3968 * which is an array of 8 integers. The bucket[] array
3970 * contains 8 different DMA transfer sizes (in 16 3969 * contains 8 different DMA transfer sizes (in 16
3971 * byte increments) which the controller uses to fetch 3970 * byte increments) which the controller uses to fetch
3972 * commands. This function fills in bucket_map[], which 3971 * commands. This function fills in bucket_map[], which
3973 * maps a given number of scatter gather elements to one of 3972 * maps a given number of scatter gather elements to one of
3974 * the 8 DMA transfer sizes. The point of it is to allow the 3973 * the 8 DMA transfer sizes. The point of it is to allow the
3975 * controller to only do as much DMA as needed to fetch the 3974 * controller to only do as much DMA as needed to fetch the
3976 * command, with the DMA transfer size encoded in the lower 3975 * command, with the DMA transfer size encoded in the lower
3977 * bits of the command address. 3976 * bits of the command address.
3978 */ 3977 */
3979 static void calc_bucket_map(int bucket[], int num_buckets, 3978 static void calc_bucket_map(int bucket[], int num_buckets,
3980 int nsgs, int *bucket_map) 3979 int nsgs, int *bucket_map)
3981 { 3980 {
3982 int i, j, b, size; 3981 int i, j, b, size;
3983 3982
3984 /* even a command with 0 SGs requires 4 blocks */ 3983 /* even a command with 0 SGs requires 4 blocks */
3985 #define MINIMUM_TRANSFER_BLOCKS 4 3984 #define MINIMUM_TRANSFER_BLOCKS 4
3986 #define NUM_BUCKETS 8 3985 #define NUM_BUCKETS 8
3987 /* Note, bucket_map must have nsgs+1 entries. */ 3986 /* Note, bucket_map must have nsgs+1 entries. */
3988 for (i = 0; i <= nsgs; i++) { 3987 for (i = 0; i <= nsgs; i++) {
3989 /* Compute size of a command with i SG entries */ 3988 /* Compute size of a command with i SG entries */
3990 size = i + MINIMUM_TRANSFER_BLOCKS; 3989 size = i + MINIMUM_TRANSFER_BLOCKS;
3991 b = num_buckets; /* Assume the biggest bucket */ 3990 b = num_buckets; /* Assume the biggest bucket */
3992 /* Find the bucket that is just big enough */ 3991 /* Find the bucket that is just big enough */
3993 for (j = 0; j < 8; j++) { 3992 for (j = 0; j < 8; j++) {
3994 if (bucket[j] >= size) { 3993 if (bucket[j] >= size) {
3995 b = j; 3994 b = j;
3996 break; 3995 break;
3997 } 3996 }
3998 } 3997 }
3999 /* for a command with i SG entries, use bucket b. */ 3998 /* for a command with i SG entries, use bucket b. */
4000 bucket_map[i] = b; 3999 bucket_map[i] = b;
4001 } 4000 }
4002 } 4001 }
4003 4002
4004 static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h) 4003 static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
4005 { 4004 {
4006 int i; 4005 int i;
4007 unsigned long register_value; 4006 unsigned long register_value;
4008 4007
4009 /* This is a bit complicated. There are 8 registers on 4008 /* This is a bit complicated. There are 8 registers on
4010 * the controller which we write to to tell it 8 different 4009 * the controller which we write to to tell it 8 different
4011 * sizes of commands which there may be. It's a way of 4010 * sizes of commands which there may be. It's a way of
4012 * reducing the DMA done to fetch each command. Encoded into 4011 * reducing the DMA done to fetch each command. Encoded into
4013 * each command's tag are 3 bits which communicate to the controller 4012 * each command's tag are 3 bits which communicate to the controller
4014 * which of the eight sizes that command fits within. The size of 4013 * which of the eight sizes that command fits within. The size of
4015 * each command depends on how many scatter gather entries there are. 4014 * each command depends on how many scatter gather entries there are.
4016 * Each SG entry requires 16 bytes. The eight registers are programmed 4015 * Each SG entry requires 16 bytes. The eight registers are programmed
4017 * with the number of 16-byte blocks a command of that size requires. 4016 * with the number of 16-byte blocks a command of that size requires.
4018 * The smallest command possible requires 5 such 16 byte blocks. 4017 * The smallest command possible requires 5 such 16 byte blocks.
4019 * the largest command possible requires MAXSGENTRIES + 4 16-byte 4018 * the largest command possible requires MAXSGENTRIES + 4 16-byte
4020 * blocks. Note, this only extends to the SG entries contained 4019 * blocks. Note, this only extends to the SG entries contained
4021 * within the command block, and does not extend to chained blocks 4020 * within the command block, and does not extend to chained blocks
4022 * of SG elements. bft[] contains the eight values we write to 4021 * of SG elements. bft[] contains the eight values we write to
4023 * the registers. They are not evenly distributed, but have more 4022 * the registers. They are not evenly distributed, but have more
4024 * sizes for small commands, and fewer sizes for larger commands. 4023 * sizes for small commands, and fewer sizes for larger commands.
4025 */ 4024 */
4026 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; 4025 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
4027 BUILD_BUG_ON(28 > MAXSGENTRIES + 4); 4026 BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
4028 /* 5 = 1 s/g entry or 4k 4027 /* 5 = 1 s/g entry or 4k
4029 * 6 = 2 s/g entry or 8k 4028 * 6 = 2 s/g entry or 8k
4030 * 8 = 4 s/g entry or 16k 4029 * 8 = 4 s/g entry or 16k
4031 * 10 = 6 s/g entry or 24k 4030 * 10 = 6 s/g entry or 24k
4032 */ 4031 */
4033 4032
4034 h->reply_pool_wraparound = 1; /* spec: init to 1 */ 4033 h->reply_pool_wraparound = 1; /* spec: init to 1 */
4035 4034
4036 /* Controller spec: zero out this buffer. */ 4035 /* Controller spec: zero out this buffer. */
4037 memset(h->reply_pool, 0, h->reply_pool_size); 4036 memset(h->reply_pool, 0, h->reply_pool_size);
4038 h->reply_pool_head = h->reply_pool; 4037 h->reply_pool_head = h->reply_pool;
4039 4038
4040 bft[7] = h->max_sg_entries + 4; 4039 bft[7] = h->max_sg_entries + 4;
4041 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); 4040 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
4042 for (i = 0; i < 8; i++) 4041 for (i = 0; i < 8; i++)
4043 writel(bft[i], &h->transtable->BlockFetch[i]); 4042 writel(bft[i], &h->transtable->BlockFetch[i]);
4044 4043
4045 /* size of controller ring buffer */ 4044 /* size of controller ring buffer */
4046 writel(h->max_commands, &h->transtable->RepQSize); 4045 writel(h->max_commands, &h->transtable->RepQSize);
4047 writel(1, &h->transtable->RepQCount); 4046 writel(1, &h->transtable->RepQCount);
4048 writel(0, &h->transtable->RepQCtrAddrLow32); 4047 writel(0, &h->transtable->RepQCtrAddrLow32);
4049 writel(0, &h->transtable->RepQCtrAddrHigh32); 4048 writel(0, &h->transtable->RepQCtrAddrHigh32);
4050 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 4049 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
4051 writel(0, &h->transtable->RepQAddr0High32); 4050 writel(0, &h->transtable->RepQAddr0High32);
4052 writel(CFGTBL_Trans_Performant, 4051 writel(CFGTBL_Trans_Performant,
4053 &(h->cfgtable->HostWrite.TransportRequest)); 4052 &(h->cfgtable->HostWrite.TransportRequest));
4054 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 4053 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4055 hpsa_wait_for_mode_change_ack(h); 4054 hpsa_wait_for_mode_change_ack(h);
4056 register_value = readl(&(h->cfgtable->TransportActive)); 4055 register_value = readl(&(h->cfgtable->TransportActive));
4057 if (!(register_value & CFGTBL_Trans_Performant)) { 4056 if (!(register_value & CFGTBL_Trans_Performant)) {
4058 dev_warn(&h->pdev->dev, "unable to get board into" 4057 dev_warn(&h->pdev->dev, "unable to get board into"
4059 " performant mode\n"); 4058 " performant mode\n");
4060 return; 4059 return;
4061 } 4060 }
4062 } 4061 }
4063 4062
4064 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 4063 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4065 { 4064 {
4066 u32 trans_support; 4065 u32 trans_support;
4067 4066
4068 if (hpsa_simple_mode) 4067 if (hpsa_simple_mode)
4069 return; 4068 return;
4070 4069
4071 trans_support = readl(&(h->cfgtable->TransportSupport)); 4070 trans_support = readl(&(h->cfgtable->TransportSupport));
4072 if (!(trans_support & PERFORMANT_MODE)) 4071 if (!(trans_support & PERFORMANT_MODE))
4073 return; 4072 return;
4074 4073
4075 hpsa_get_max_perf_mode_cmds(h); 4074 hpsa_get_max_perf_mode_cmds(h);
4076 h->max_sg_entries = 32; 4075 h->max_sg_entries = 32;
4077 /* Performant mode ring buffer and supporting data structures */ 4076 /* Performant mode ring buffer and supporting data structures */
4078 h->reply_pool_size = h->max_commands * sizeof(u64); 4077 h->reply_pool_size = h->max_commands * sizeof(u64);
4079 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 4078 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
4080 &(h->reply_pool_dhandle)); 4079 &(h->reply_pool_dhandle));
4081 4080
4082 /* Need a block fetch table for performant mode */ 4081 /* Need a block fetch table for performant mode */
4083 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * 4082 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
4084 sizeof(u32)), GFP_KERNEL); 4083 sizeof(u32)), GFP_KERNEL);
4085 4084
4086 if ((h->reply_pool == NULL) 4085 if ((h->reply_pool == NULL)
4087 || (h->blockFetchTable == NULL)) 4086 || (h->blockFetchTable == NULL))
4088 goto clean_up; 4087 goto clean_up;
4089 4088
4090 hpsa_enter_performant_mode(h); 4089 hpsa_enter_performant_mode(h);
4091 4090
4092 /* Change the access methods to the performant access methods */ 4091 /* Change the access methods to the performant access methods */
4093 h->access = SA5_performant_access; 4092 h->access = SA5_performant_access;
4094 h->transMethod = CFGTBL_Trans_Performant; 4093 h->transMethod = CFGTBL_Trans_Performant;
4095 4094
4096 return; 4095 return;
4097 4096
4098 clean_up: 4097 clean_up:
4099 if (h->reply_pool) 4098 if (h->reply_pool)
4100 pci_free_consistent(h->pdev, h->reply_pool_size, 4099 pci_free_consistent(h->pdev, h->reply_pool_size,
4101 h->reply_pool, h->reply_pool_dhandle); 4100 h->reply_pool, h->reply_pool_dhandle);
4102 kfree(h->blockFetchTable); 4101 kfree(h->blockFetchTable);
4103 } 4102 }
4104 4103
4105 /* 4104 /*
4106 * This is it. Register the PCI driver information for the cards we control 4105 * This is it. Register the PCI driver information for the cards we control
4107 * the OS will call our registered routines when it finds one of our cards. 4106 * the OS will call our registered routines when it finds one of our cards.
4108 */ 4107 */
4109 static int __init hpsa_init(void) 4108 static int __init hpsa_init(void)
4110 { 4109 {
4111 return pci_register_driver(&hpsa_pci_driver); 4110 return pci_register_driver(&hpsa_pci_driver);
4112 } 4111 }
4113 4112
4114 static void __exit hpsa_cleanup(void) 4113 static void __exit hpsa_cleanup(void)
4115 { 4114 {
4116 pci_unregister_driver(&hpsa_pci_driver); 4115 pci_unregister_driver(&hpsa_pci_driver);
4117 } 4116 }
4118 4117
4119 module_init(hpsa_init); 4118 module_init(hpsa_init);
4120 module_exit(hpsa_cleanup); 4119 module_exit(hpsa_cleanup);
4121 4120
1 /* 1 /*
2 * Disk Array driver for HP Smart Array SAS controllers 2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License. 7 * the Free Software Foundation; version 2 of the License.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details. 12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 * 17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 * 19 *
20 */ 20 */
21 #ifndef HPSA_H 21 #ifndef HPSA_H
22 #define HPSA_H 22 #define HPSA_H
23 23
24 #include <scsi/scsicam.h> 24 #include <scsi/scsicam.h>
25 25
26 #define IO_OK 0 26 #define IO_OK 0
27 #define IO_ERROR 1 27 #define IO_ERROR 1
28 28
29 struct ctlr_info; 29 struct ctlr_info;
30 30
31 struct access_method { 31 struct access_method {
32 void (*submit_command)(struct ctlr_info *h, 32 void (*submit_command)(struct ctlr_info *h,
33 struct CommandList *c); 33 struct CommandList *c);
34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); 34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 unsigned long (*fifo_full)(struct ctlr_info *h); 35 unsigned long (*fifo_full)(struct ctlr_info *h);
36 bool (*intr_pending)(struct ctlr_info *h); 36 bool (*intr_pending)(struct ctlr_info *h);
37 unsigned long (*command_completed)(struct ctlr_info *h); 37 unsigned long (*command_completed)(struct ctlr_info *h);
38 }; 38 };
39 39
40 struct hpsa_scsi_dev_t { 40 struct hpsa_scsi_dev_t {
41 int devtype; 41 int devtype;
42 int bus, target, lun; /* as presented to the OS */ 42 int bus, target, lun; /* as presented to the OS */
43 unsigned char scsi3addr[8]; /* as presented to the HW */ 43 unsigned char scsi3addr[8]; /* as presented to the HW */
44 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" 44 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */ 45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
47 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 47 unsigned char model[16]; /* bytes 16-31 of inquiry data */
48 unsigned char raid_level; /* from inquiry page 0xC1 */ 48 unsigned char raid_level; /* from inquiry page 0xC1 */
49 }; 49 };
50 50
51 struct ctlr_info { 51 struct ctlr_info {
52 int ctlr; 52 int ctlr;
53 char devname[8]; 53 char devname[8];
54 char *product_name; 54 char *product_name;
55 struct pci_dev *pdev; 55 struct pci_dev *pdev;
56 u32 board_id; 56 u32 board_id;
57 void __iomem *vaddr; 57 void __iomem *vaddr;
58 unsigned long paddr; 58 unsigned long paddr;
59 int nr_cmds; /* Number of commands allowed on this controller */ 59 int nr_cmds; /* Number of commands allowed on this controller */
60 struct CfgTable __iomem *cfgtable; 60 struct CfgTable __iomem *cfgtable;
61 int max_sg_entries; 61 int max_sg_entries;
62 int interrupts_enabled; 62 int interrupts_enabled;
63 int major; 63 int major;
64 int max_commands; 64 int max_commands;
65 int commands_outstanding; 65 int commands_outstanding;
66 int max_outstanding; /* Debug */ 66 int max_outstanding; /* Debug */
67 int usage_count; /* number of opens all all minor devices */ 67 int usage_count; /* number of opens all all minor devices */
68 # define PERF_MODE_INT 0 68 # define PERF_MODE_INT 0
69 # define DOORBELL_INT 1 69 # define DOORBELL_INT 1
70 # define SIMPLE_MODE_INT 2 70 # define SIMPLE_MODE_INT 2
71 # define MEMQ_MODE_INT 3 71 # define MEMQ_MODE_INT 3
72 unsigned int intr[4]; 72 unsigned int intr[4];
73 unsigned int msix_vector; 73 unsigned int msix_vector;
74 unsigned int msi_vector; 74 unsigned int msi_vector;
75 struct access_method access; 75 struct access_method access;
76 76
77 /* queue and queue Info */ 77 /* queue and queue Info */
78 struct hlist_head reqQ; 78 struct list_head reqQ;
79 struct hlist_head cmpQ; 79 struct list_head cmpQ;
80 unsigned int Qdepth; 80 unsigned int Qdepth;
81 unsigned int maxQsinceinit; 81 unsigned int maxQsinceinit;
82 unsigned int maxSG; 82 unsigned int maxSG;
83 spinlock_t lock; 83 spinlock_t lock;
84 int maxsgentries; 84 int maxsgentries;
85 u8 max_cmd_sg_entries; 85 u8 max_cmd_sg_entries;
86 int chainsize; 86 int chainsize;
87 struct SGDescriptor **cmd_sg_list; 87 struct SGDescriptor **cmd_sg_list;
88 88
89 /* pointers to command and error info pool */ 89 /* pointers to command and error info pool */
90 struct CommandList *cmd_pool; 90 struct CommandList *cmd_pool;
91 dma_addr_t cmd_pool_dhandle; 91 dma_addr_t cmd_pool_dhandle;
92 struct ErrorInfo *errinfo_pool; 92 struct ErrorInfo *errinfo_pool;
93 dma_addr_t errinfo_pool_dhandle; 93 dma_addr_t errinfo_pool_dhandle;
94 unsigned long *cmd_pool_bits; 94 unsigned long *cmd_pool_bits;
95 int nr_allocs; 95 int nr_allocs;
96 int nr_frees; 96 int nr_frees;
97 int busy_initializing; 97 int busy_initializing;
98 int busy_scanning; 98 int busy_scanning;
99 int scan_finished; 99 int scan_finished;
100 spinlock_t scan_lock; 100 spinlock_t scan_lock;
101 wait_queue_head_t scan_wait_queue; 101 wait_queue_head_t scan_wait_queue;
102 102
103 struct Scsi_Host *scsi_host; 103 struct Scsi_Host *scsi_host;
104 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ 104 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
105 int ndevices; /* number of used elements in .dev[] array. */ 105 int ndevices; /* number of used elements in .dev[] array. */
106 #define HPSA_MAX_SCSI_DEVS_PER_HBA 256 106 #define HPSA_MAX_SCSI_DEVS_PER_HBA 256
107 struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA]; 107 struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
108 /* 108 /*
109 * Performant mode tables. 109 * Performant mode tables.
110 */ 110 */
111 u32 trans_support; 111 u32 trans_support;
112 u32 trans_offset; 112 u32 trans_offset;
113 struct TransTable_struct *transtable; 113 struct TransTable_struct *transtable;
114 unsigned long transMethod; 114 unsigned long transMethod;
115 115
116 /* 116 /*
117 * Performant mode completion buffer 117 * Performant mode completion buffer
118 */ 118 */
119 u64 *reply_pool; 119 u64 *reply_pool;
120 dma_addr_t reply_pool_dhandle; 120 dma_addr_t reply_pool_dhandle;
121 u64 *reply_pool_head; 121 u64 *reply_pool_head;
122 size_t reply_pool_size; 122 size_t reply_pool_size;
123 unsigned char reply_pool_wraparound; 123 unsigned char reply_pool_wraparound;
124 u32 *blockFetchTable; 124 u32 *blockFetchTable;
125 unsigned char *hba_inquiry_data; 125 unsigned char *hba_inquiry_data;
126 }; 126 };
127 #define HPSA_ABORT_MSG 0 127 #define HPSA_ABORT_MSG 0
128 #define HPSA_DEVICE_RESET_MSG 1 128 #define HPSA_DEVICE_RESET_MSG 1
129 #define HPSA_BUS_RESET_MSG 2 129 #define HPSA_BUS_RESET_MSG 2
130 #define HPSA_HOST_RESET_MSG 3 130 #define HPSA_HOST_RESET_MSG 3
131 #define HPSA_MSG_SEND_RETRY_LIMIT 10 131 #define HPSA_MSG_SEND_RETRY_LIMIT 10
132 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000 132 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000
133 133
134 /* Maximum time in seconds driver will wait for command completions 134 /* Maximum time in seconds driver will wait for command completions
135 * when polling before giving up. 135 * when polling before giving up.
136 */ 136 */
137 #define HPSA_MAX_POLL_TIME_SECS (20) 137 #define HPSA_MAX_POLL_TIME_SECS (20)
138 138
139 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines 139 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
140 * how many times to retry TEST UNIT READY on a device 140 * how many times to retry TEST UNIT READY on a device
141 * while waiting for it to become ready before giving up. 141 * while waiting for it to become ready before giving up.
142 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval 142 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
143 * between sending TURs while waiting for a device 143 * between sending TURs while waiting for a device
144 * to become ready. 144 * to become ready.
145 */ 145 */
146 #define HPSA_TUR_RETRY_LIMIT (20) 146 #define HPSA_TUR_RETRY_LIMIT (20)
147 #define HPSA_MAX_WAIT_INTERVAL_SECS (30) 147 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
148 148
149 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board 149 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
150 * to become ready, in seconds, before giving up on it. 150 * to become ready, in seconds, before giving up on it.
151 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait 151 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
152 * between polling the board to see if it is ready, in 152 * between polling the board to see if it is ready, in
153 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and 153 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
154 * HPSA_BOARD_READY_ITERATIONS are derived from those. 154 * HPSA_BOARD_READY_ITERATIONS are derived from those.
155 */ 155 */
156 #define HPSA_BOARD_READY_WAIT_SECS (120) 156 #define HPSA_BOARD_READY_WAIT_SECS (120)
157 #define HPSA_BOARD_NOT_READY_WAIT_SECS (10) 157 #define HPSA_BOARD_NOT_READY_WAIT_SECS (10)
158 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) 158 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
159 #define HPSA_BOARD_READY_POLL_INTERVAL \ 159 #define HPSA_BOARD_READY_POLL_INTERVAL \
160 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) 160 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
161 #define HPSA_BOARD_READY_ITERATIONS \ 161 #define HPSA_BOARD_READY_ITERATIONS \
162 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ 162 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
163 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 163 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
164 #define HPSA_BOARD_NOT_READY_ITERATIONS \ 164 #define HPSA_BOARD_NOT_READY_ITERATIONS \
165 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ 165 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
166 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 166 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
167 #define HPSA_POST_RESET_PAUSE_MSECS (3000) 167 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
168 #define HPSA_POST_RESET_NOOP_RETRIES (12) 168 #define HPSA_POST_RESET_NOOP_RETRIES (12)
169 169
170 /* Defining the diffent access_menthods */ 170 /* Defining the diffent access_menthods */
171 /* 171 /*
172 * Memory mapped FIFO interface (SMART 53xx cards) 172 * Memory mapped FIFO interface (SMART 53xx cards)
173 */ 173 */
174 #define SA5_DOORBELL 0x20 174 #define SA5_DOORBELL 0x20
175 #define SA5_REQUEST_PORT_OFFSET 0x40 175 #define SA5_REQUEST_PORT_OFFSET 0x40
176 #define SA5_REPLY_INTR_MASK_OFFSET 0x34 176 #define SA5_REPLY_INTR_MASK_OFFSET 0x34
177 #define SA5_REPLY_PORT_OFFSET 0x44 177 #define SA5_REPLY_PORT_OFFSET 0x44
178 #define SA5_INTR_STATUS 0x30 178 #define SA5_INTR_STATUS 0x30
179 #define SA5_SCRATCHPAD_OFFSET 0xB0 179 #define SA5_SCRATCHPAD_OFFSET 0xB0
180 180
181 #define SA5_CTCFG_OFFSET 0xB4 181 #define SA5_CTCFG_OFFSET 0xB4
182 #define SA5_CTMEM_OFFSET 0xB8 182 #define SA5_CTMEM_OFFSET 0xB8
183 183
184 #define SA5_INTR_OFF 0x08 184 #define SA5_INTR_OFF 0x08
185 #define SA5B_INTR_OFF 0x04 185 #define SA5B_INTR_OFF 0x04
186 #define SA5_INTR_PENDING 0x08 186 #define SA5_INTR_PENDING 0x08
187 #define SA5B_INTR_PENDING 0x04 187 #define SA5B_INTR_PENDING 0x04
188 #define FIFO_EMPTY 0xffffffff 188 #define FIFO_EMPTY 0xffffffff
189 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 189 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
190 190
191 #define HPSA_ERROR_BIT 0x02 191 #define HPSA_ERROR_BIT 0x02
192 192
193 /* Performant mode flags */ 193 /* Performant mode flags */
194 #define SA5_PERF_INTR_PENDING 0x04 194 #define SA5_PERF_INTR_PENDING 0x04
195 #define SA5_PERF_INTR_OFF 0x05 195 #define SA5_PERF_INTR_OFF 0x05
196 #define SA5_OUTDB_STATUS_PERF_BIT 0x01 196 #define SA5_OUTDB_STATUS_PERF_BIT 0x01
197 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 197 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
198 #define SA5_OUTDB_CLEAR 0xA0 198 #define SA5_OUTDB_CLEAR 0xA0
199 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 199 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
200 #define SA5_OUTDB_STATUS 0x9C 200 #define SA5_OUTDB_STATUS 0x9C
201 201
202 202
203 #define HPSA_INTR_ON 1 203 #define HPSA_INTR_ON 1
204 #define HPSA_INTR_OFF 0 204 #define HPSA_INTR_OFF 0
205 /* 205 /*
206 Send the command to the hardware 206 Send the command to the hardware
207 */ 207 */
208 static void SA5_submit_command(struct ctlr_info *h, 208 static void SA5_submit_command(struct ctlr_info *h,
209 struct CommandList *c) 209 struct CommandList *c)
210 { 210 {
211 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 211 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
212 c->Header.Tag.lower); 212 c->Header.Tag.lower);
213 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 213 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
214 h->commands_outstanding++; 214 h->commands_outstanding++;
215 if (h->commands_outstanding > h->max_outstanding) 215 if (h->commands_outstanding > h->max_outstanding)
216 h->max_outstanding = h->commands_outstanding; 216 h->max_outstanding = h->commands_outstanding;
217 } 217 }
218 218
219 /* 219 /*
220 * This card is the opposite of the other cards. 220 * This card is the opposite of the other cards.
221 * 0 turns interrupts on... 221 * 0 turns interrupts on...
222 * 0x08 turns them off... 222 * 0x08 turns them off...
223 */ 223 */
224 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) 224 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
225 { 225 {
226 if (val) { /* Turn interrupts on */ 226 if (val) { /* Turn interrupts on */
227 h->interrupts_enabled = 1; 227 h->interrupts_enabled = 1;
228 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 228 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
229 } else { /* Turn them off */ 229 } else { /* Turn them off */
230 h->interrupts_enabled = 0; 230 h->interrupts_enabled = 0;
231 writel(SA5_INTR_OFF, 231 writel(SA5_INTR_OFF,
232 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 232 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
233 } 233 }
234 } 234 }
235 235
236 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) 236 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
237 { 237 {
238 if (val) { /* turn on interrupts */ 238 if (val) { /* turn on interrupts */
239 h->interrupts_enabled = 1; 239 h->interrupts_enabled = 1;
240 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 240 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
241 } else { 241 } else {
242 h->interrupts_enabled = 0; 242 h->interrupts_enabled = 0;
243 writel(SA5_PERF_INTR_OFF, 243 writel(SA5_PERF_INTR_OFF,
244 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 244 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
245 } 245 }
246 } 246 }
247 247
248 static unsigned long SA5_performant_completed(struct ctlr_info *h) 248 static unsigned long SA5_performant_completed(struct ctlr_info *h)
249 { 249 {
250 unsigned long register_value = FIFO_EMPTY; 250 unsigned long register_value = FIFO_EMPTY;
251 251
252 /* flush the controller write of the reply queue by reading 252 /* flush the controller write of the reply queue by reading
253 * outbound doorbell status register. 253 * outbound doorbell status register.
254 */ 254 */
255 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 255 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
256 /* msi auto clears the interrupt pending bit. */ 256 /* msi auto clears the interrupt pending bit. */
257 if (!(h->msi_vector || h->msix_vector)) { 257 if (!(h->msi_vector || h->msix_vector)) {
258 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 258 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
259 /* Do a read in order to flush the write to the controller 259 /* Do a read in order to flush the write to the controller
260 * (as per spec.) 260 * (as per spec.)
261 */ 261 */
262 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 262 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
263 } 263 }
264 264
265 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 265 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
266 register_value = *(h->reply_pool_head); 266 register_value = *(h->reply_pool_head);
267 (h->reply_pool_head)++; 267 (h->reply_pool_head)++;
268 h->commands_outstanding--; 268 h->commands_outstanding--;
269 } else { 269 } else {
270 register_value = FIFO_EMPTY; 270 register_value = FIFO_EMPTY;
271 } 271 }
272 /* Check for wraparound */ 272 /* Check for wraparound */
273 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 273 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
274 h->reply_pool_head = h->reply_pool; 274 h->reply_pool_head = h->reply_pool;
275 h->reply_pool_wraparound ^= 1; 275 h->reply_pool_wraparound ^= 1;
276 } 276 }
277 277
278 return register_value; 278 return register_value;
279 } 279 }
280 280
281 /* 281 /*
282 * Returns true if fifo is full. 282 * Returns true if fifo is full.
283 * 283 *
284 */ 284 */
285 static unsigned long SA5_fifo_full(struct ctlr_info *h) 285 static unsigned long SA5_fifo_full(struct ctlr_info *h)
286 { 286 {
287 if (h->commands_outstanding >= h->max_commands) 287 if (h->commands_outstanding >= h->max_commands)
288 return 1; 288 return 1;
289 else 289 else
290 return 0; 290 return 0;
291 291
292 } 292 }
293 /* 293 /*
294 * returns value read from hardware. 294 * returns value read from hardware.
295 * returns FIFO_EMPTY if there is nothing to read 295 * returns FIFO_EMPTY if there is nothing to read
296 */ 296 */
297 static unsigned long SA5_completed(struct ctlr_info *h) 297 static unsigned long SA5_completed(struct ctlr_info *h)
298 { 298 {
299 unsigned long register_value 299 unsigned long register_value
300 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); 300 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
301 301
302 if (register_value != FIFO_EMPTY) 302 if (register_value != FIFO_EMPTY)
303 h->commands_outstanding--; 303 h->commands_outstanding--;
304 304
305 #ifdef HPSA_DEBUG 305 #ifdef HPSA_DEBUG
306 if (register_value != FIFO_EMPTY) 306 if (register_value != FIFO_EMPTY)
307 dev_dbg(&h->pdev->dev, "Read %lx back from board\n", 307 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
308 register_value); 308 register_value);
309 else 309 else
310 dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n"); 310 dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n");
311 #endif 311 #endif
312 312
313 return register_value; 313 return register_value;
314 } 314 }
315 /* 315 /*
316 * Returns true if an interrupt is pending.. 316 * Returns true if an interrupt is pending..
317 */ 317 */
318 static bool SA5_intr_pending(struct ctlr_info *h) 318 static bool SA5_intr_pending(struct ctlr_info *h)
319 { 319 {
320 unsigned long register_value = 320 unsigned long register_value =
321 readl(h->vaddr + SA5_INTR_STATUS); 321 readl(h->vaddr + SA5_INTR_STATUS);
322 dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value); 322 dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
323 return register_value & SA5_INTR_PENDING; 323 return register_value & SA5_INTR_PENDING;
324 } 324 }
325 325
326 static bool SA5_performant_intr_pending(struct ctlr_info *h) 326 static bool SA5_performant_intr_pending(struct ctlr_info *h)
327 { 327 {
328 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 328 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
329 329
330 if (!register_value) 330 if (!register_value)
331 return false; 331 return false;
332 332
333 if (h->msi_vector || h->msix_vector) 333 if (h->msi_vector || h->msix_vector)
334 return true; 334 return true;
335 335
336 /* Read outbound doorbell to flush */ 336 /* Read outbound doorbell to flush */
337 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 337 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
338 return register_value & SA5_OUTDB_STATUS_PERF_BIT; 338 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
339 } 339 }
340 340
341 static struct access_method SA5_access = { 341 static struct access_method SA5_access = {
342 SA5_submit_command, 342 SA5_submit_command,
343 SA5_intr_mask, 343 SA5_intr_mask,
344 SA5_fifo_full, 344 SA5_fifo_full,
345 SA5_intr_pending, 345 SA5_intr_pending,
346 SA5_completed, 346 SA5_completed,
347 }; 347 };
348 348
349 static struct access_method SA5_performant_access = { 349 static struct access_method SA5_performant_access = {
350 SA5_submit_command, 350 SA5_submit_command,
351 SA5_performant_intr_mask, 351 SA5_performant_intr_mask,
352 SA5_fifo_full, 352 SA5_fifo_full,
353 SA5_performant_intr_pending, 353 SA5_performant_intr_pending,
354 SA5_performant_completed, 354 SA5_performant_completed,
355 }; 355 };
356 356
357 struct board_type { 357 struct board_type {
358 u32 board_id; 358 u32 board_id;
359 char *product_name; 359 char *product_name;
360 struct access_method *access; 360 struct access_method *access;
361 }; 361 };
362 362
363 #endif /* HPSA_H */ 363 #endif /* HPSA_H */
364 364
365 365
drivers/scsi/hpsa_cmd.h
1 /* 1 /*
2 * Disk Array driver for HP Smart Array SAS controllers 2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License. 7 * the Free Software Foundation; version 2 of the License.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details. 12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 * 17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 * 19 *
20 */ 20 */
21 #ifndef HPSA_CMD_H 21 #ifndef HPSA_CMD_H
22 #define HPSA_CMD_H 22 #define HPSA_CMD_H
23 23
24 /* general boundary defintions */ 24 /* general boundary defintions */
25 #define SENSEINFOBYTES 32 /* may vary between hbas */ 25 #define SENSEINFOBYTES 32 /* may vary between hbas */
26 #define MAXSGENTRIES 32 26 #define MAXSGENTRIES 32
27 #define HPSA_SG_CHAIN 0x80000000 27 #define HPSA_SG_CHAIN 0x80000000
28 #define MAXREPLYQS 256 28 #define MAXREPLYQS 256
29 29
30 /* Command Status value */ 30 /* Command Status value */
31 #define CMD_SUCCESS 0x0000 31 #define CMD_SUCCESS 0x0000
32 #define CMD_TARGET_STATUS 0x0001 32 #define CMD_TARGET_STATUS 0x0001
33 #define CMD_DATA_UNDERRUN 0x0002 33 #define CMD_DATA_UNDERRUN 0x0002
34 #define CMD_DATA_OVERRUN 0x0003 34 #define CMD_DATA_OVERRUN 0x0003
35 #define CMD_INVALID 0x0004 35 #define CMD_INVALID 0x0004
36 #define CMD_PROTOCOL_ERR 0x0005 36 #define CMD_PROTOCOL_ERR 0x0005
37 #define CMD_HARDWARE_ERR 0x0006 37 #define CMD_HARDWARE_ERR 0x0006
38 #define CMD_CONNECTION_LOST 0x0007 38 #define CMD_CONNECTION_LOST 0x0007
39 #define CMD_ABORTED 0x0008 39 #define CMD_ABORTED 0x0008
40 #define CMD_ABORT_FAILED 0x0009 40 #define CMD_ABORT_FAILED 0x0009
41 #define CMD_UNSOLICITED_ABORT 0x000A 41 #define CMD_UNSOLICITED_ABORT 0x000A
42 #define CMD_TIMEOUT 0x000B 42 #define CMD_TIMEOUT 0x000B
43 #define CMD_UNABORTABLE 0x000C 43 #define CMD_UNABORTABLE 0x000C
44 44
45 /* Unit Attentions ASC's as defined for the MSA2012sa */ 45 /* Unit Attentions ASC's as defined for the MSA2012sa */
46 #define POWER_OR_RESET 0x29 46 #define POWER_OR_RESET 0x29
47 #define STATE_CHANGED 0x2a 47 #define STATE_CHANGED 0x2a
48 #define UNIT_ATTENTION_CLEARED 0x2f 48 #define UNIT_ATTENTION_CLEARED 0x2f
49 #define LUN_FAILED 0x3e 49 #define LUN_FAILED 0x3e
50 #define REPORT_LUNS_CHANGED 0x3f 50 #define REPORT_LUNS_CHANGED 0x3f
51 51
52 /* Unit Attentions ASCQ's as defined for the MSA2012sa */ 52 /* Unit Attentions ASCQ's as defined for the MSA2012sa */
53 53
54 /* These ASCQ's defined for ASC = POWER_OR_RESET */ 54 /* These ASCQ's defined for ASC = POWER_OR_RESET */
55 #define POWER_ON_RESET 0x00 55 #define POWER_ON_RESET 0x00
56 #define POWER_ON_REBOOT 0x01 56 #define POWER_ON_REBOOT 0x01
57 #define SCSI_BUS_RESET 0x02 57 #define SCSI_BUS_RESET 0x02
58 #define MSA_TARGET_RESET 0x03 58 #define MSA_TARGET_RESET 0x03
59 #define CONTROLLER_FAILOVER 0x04 59 #define CONTROLLER_FAILOVER 0x04
60 #define TRANSCEIVER_SE 0x05 60 #define TRANSCEIVER_SE 0x05
61 #define TRANSCEIVER_LVD 0x06 61 #define TRANSCEIVER_LVD 0x06
62 62
63 /* These ASCQ's defined for ASC = STATE_CHANGED */ 63 /* These ASCQ's defined for ASC = STATE_CHANGED */
64 #define RESERVATION_PREEMPTED 0x03 64 #define RESERVATION_PREEMPTED 0x03
65 #define ASYM_ACCESS_CHANGED 0x06 65 #define ASYM_ACCESS_CHANGED 0x06
66 #define LUN_CAPACITY_CHANGED 0x09 66 #define LUN_CAPACITY_CHANGED 0x09
67 67
68 /* transfer direction */ 68 /* transfer direction */
69 #define XFER_NONE 0x00 69 #define XFER_NONE 0x00
70 #define XFER_WRITE 0x01 70 #define XFER_WRITE 0x01
71 #define XFER_READ 0x02 71 #define XFER_READ 0x02
72 #define XFER_RSVD 0x03 72 #define XFER_RSVD 0x03
73 73
74 /* task attribute */ 74 /* task attribute */
75 #define ATTR_UNTAGGED 0x00 75 #define ATTR_UNTAGGED 0x00
76 #define ATTR_SIMPLE 0x04 76 #define ATTR_SIMPLE 0x04
77 #define ATTR_HEADOFQUEUE 0x05 77 #define ATTR_HEADOFQUEUE 0x05
78 #define ATTR_ORDERED 0x06 78 #define ATTR_ORDERED 0x06
79 #define ATTR_ACA 0x07 79 #define ATTR_ACA 0x07
80 80
81 /* cdb type */ 81 /* cdb type */
82 #define TYPE_CMD 0x00 82 #define TYPE_CMD 0x00
83 #define TYPE_MSG 0x01 83 #define TYPE_MSG 0x01
84 84
85 /* config space register offsets */ 85 /* config space register offsets */
86 #define CFG_VENDORID 0x00 86 #define CFG_VENDORID 0x00
87 #define CFG_DEVICEID 0x02 87 #define CFG_DEVICEID 0x02
88 #define CFG_I2OBAR 0x10 88 #define CFG_I2OBAR 0x10
89 #define CFG_MEM1BAR 0x14 89 #define CFG_MEM1BAR 0x14
90 90
91 /* i2o space register offsets */ 91 /* i2o space register offsets */
92 #define I2O_IBDB_SET 0x20 92 #define I2O_IBDB_SET 0x20
93 #define I2O_IBDB_CLEAR 0x70 93 #define I2O_IBDB_CLEAR 0x70
94 #define I2O_INT_STATUS 0x30 94 #define I2O_INT_STATUS 0x30
95 #define I2O_INT_MASK 0x34 95 #define I2O_INT_MASK 0x34
96 #define I2O_IBPOST_Q 0x40 96 #define I2O_IBPOST_Q 0x40
97 #define I2O_OBPOST_Q 0x44 97 #define I2O_OBPOST_Q 0x44
98 #define I2O_DMA1_CFG 0x214 98 #define I2O_DMA1_CFG 0x214
99 99
100 /* Configuration Table */ 100 /* Configuration Table */
101 #define CFGTBL_ChangeReq 0x00000001l 101 #define CFGTBL_ChangeReq 0x00000001l
102 #define CFGTBL_AccCmds 0x00000001l 102 #define CFGTBL_AccCmds 0x00000001l
103 #define DOORBELL_CTLR_RESET 0x00000004l 103 #define DOORBELL_CTLR_RESET 0x00000004l
104 104
105 #define CFGTBL_Trans_Simple 0x00000002l 105 #define CFGTBL_Trans_Simple 0x00000002l
106 #define CFGTBL_Trans_Performant 0x00000004l 106 #define CFGTBL_Trans_Performant 0x00000004l
107 107
108 #define CFGTBL_BusType_Ultra2 0x00000001l 108 #define CFGTBL_BusType_Ultra2 0x00000001l
109 #define CFGTBL_BusType_Ultra3 0x00000002l 109 #define CFGTBL_BusType_Ultra3 0x00000002l
110 #define CFGTBL_BusType_Fibre1G 0x00000100l 110 #define CFGTBL_BusType_Fibre1G 0x00000100l
111 #define CFGTBL_BusType_Fibre2G 0x00000200l 111 #define CFGTBL_BusType_Fibre2G 0x00000200l
112 struct vals32 { 112 struct vals32 {
113 u32 lower; 113 u32 lower;
114 u32 upper; 114 u32 upper;
115 }; 115 };
116 116
117 union u64bit { 117 union u64bit {
118 struct vals32 val32; 118 struct vals32 val32;
119 u64 val; 119 u64 val;
120 }; 120 };
121 121
122 /* FIXME this is a per controller value (barf!) */ 122 /* FIXME this is a per controller value (barf!) */
123 #define HPSA_MAX_TARGETS_PER_CTLR 16 123 #define HPSA_MAX_TARGETS_PER_CTLR 16
124 #define HPSA_MAX_LUN 256 124 #define HPSA_MAX_LUN 256
125 #define HPSA_MAX_PHYS_LUN 1024 125 #define HPSA_MAX_PHYS_LUN 1024
126 126
127 /* SCSI-3 Commands */ 127 /* SCSI-3 Commands */
128 #pragma pack(1) 128 #pragma pack(1)
129 129
130 #define HPSA_INQUIRY 0x12 130 #define HPSA_INQUIRY 0x12
131 struct InquiryData { 131 struct InquiryData {
132 u8 data_byte[36]; 132 u8 data_byte[36];
133 }; 133 };
134 134
135 #define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */ 135 #define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
136 #define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ 136 #define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
137 struct ReportLUNdata { 137 struct ReportLUNdata {
138 u8 LUNListLength[4]; 138 u8 LUNListLength[4];
139 u32 reserved; 139 u32 reserved;
140 u8 LUN[HPSA_MAX_LUN][8]; 140 u8 LUN[HPSA_MAX_LUN][8];
141 }; 141 };
142 142
143 struct ReportExtendedLUNdata { 143 struct ReportExtendedLUNdata {
144 u8 LUNListLength[4]; 144 u8 LUNListLength[4];
145 u8 extended_response_flag; 145 u8 extended_response_flag;
146 u8 reserved[3]; 146 u8 reserved[3];
147 u8 LUN[HPSA_MAX_LUN][24]; 147 u8 LUN[HPSA_MAX_LUN][24];
148 }; 148 };
149 149
150 struct SenseSubsystem_info { 150 struct SenseSubsystem_info {
151 u8 reserved[36]; 151 u8 reserved[36];
152 u8 portname[8]; 152 u8 portname[8];
153 u8 reserved1[1108]; 153 u8 reserved1[1108];
154 }; 154 };
155 155
156 /* BMIC commands */ 156 /* BMIC commands */
157 #define BMIC_READ 0x26 157 #define BMIC_READ 0x26
158 #define BMIC_WRITE 0x27 158 #define BMIC_WRITE 0x27
159 #define BMIC_CACHE_FLUSH 0xc2 159 #define BMIC_CACHE_FLUSH 0xc2
160 #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ 160 #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
161 161
162 /* Command List Structure */ 162 /* Command List Structure */
163 union SCSI3Addr { 163 union SCSI3Addr {
164 struct { 164 struct {
165 u8 Dev; 165 u8 Dev;
166 u8 Bus:6; 166 u8 Bus:6;
167 u8 Mode:2; /* b00 */ 167 u8 Mode:2; /* b00 */
168 } PeripDev; 168 } PeripDev;
169 struct { 169 struct {
170 u8 DevLSB; 170 u8 DevLSB;
171 u8 DevMSB:6; 171 u8 DevMSB:6;
172 u8 Mode:2; /* b01 */ 172 u8 Mode:2; /* b01 */
173 } LogDev; 173 } LogDev;
174 struct { 174 struct {
175 u8 Dev:5; 175 u8 Dev:5;
176 u8 Bus:3; 176 u8 Bus:3;
177 u8 Targ:6; 177 u8 Targ:6;
178 u8 Mode:2; /* b10 */ 178 u8 Mode:2; /* b10 */
179 } LogUnit; 179 } LogUnit;
180 }; 180 };
181 181
182 struct PhysDevAddr { 182 struct PhysDevAddr {
183 u32 TargetId:24; 183 u32 TargetId:24;
184 u32 Bus:6; 184 u32 Bus:6;
185 u32 Mode:2; 185 u32 Mode:2;
186 /* 2 level target device addr */ 186 /* 2 level target device addr */
187 union SCSI3Addr Target[2]; 187 union SCSI3Addr Target[2];
188 }; 188 };
189 189
190 struct LogDevAddr { 190 struct LogDevAddr {
191 u32 VolId:30; 191 u32 VolId:30;
192 u32 Mode:2; 192 u32 Mode:2;
193 u8 reserved[4]; 193 u8 reserved[4];
194 }; 194 };
195 195
196 union LUNAddr { 196 union LUNAddr {
197 u8 LunAddrBytes[8]; 197 u8 LunAddrBytes[8];
198 union SCSI3Addr SCSI3Lun[4]; 198 union SCSI3Addr SCSI3Lun[4];
199 struct PhysDevAddr PhysDev; 199 struct PhysDevAddr PhysDev;
200 struct LogDevAddr LogDev; 200 struct LogDevAddr LogDev;
201 }; 201 };
202 202
203 struct CommandListHeader { 203 struct CommandListHeader {
204 u8 ReplyQueue; 204 u8 ReplyQueue;
205 u8 SGList; 205 u8 SGList;
206 u16 SGTotal; 206 u16 SGTotal;
207 struct vals32 Tag; 207 struct vals32 Tag;
208 union LUNAddr LUN; 208 union LUNAddr LUN;
209 }; 209 };
210 210
211 struct RequestBlock { 211 struct RequestBlock {
212 u8 CDBLen; 212 u8 CDBLen;
213 struct { 213 struct {
214 u8 Type:3; 214 u8 Type:3;
215 u8 Attribute:3; 215 u8 Attribute:3;
216 u8 Direction:2; 216 u8 Direction:2;
217 } Type; 217 } Type;
218 u16 Timeout; 218 u16 Timeout;
219 u8 CDB[16]; 219 u8 CDB[16];
220 }; 220 };
221 221
222 struct ErrDescriptor { 222 struct ErrDescriptor {
223 struct vals32 Addr; 223 struct vals32 Addr;
224 u32 Len; 224 u32 Len;
225 }; 225 };
226 226
227 struct SGDescriptor { 227 struct SGDescriptor {
228 struct vals32 Addr; 228 struct vals32 Addr;
229 u32 Len; 229 u32 Len;
230 u32 Ext; 230 u32 Ext;
231 }; 231 };
232 232
233 union MoreErrInfo { 233 union MoreErrInfo {
234 struct { 234 struct {
235 u8 Reserved[3]; 235 u8 Reserved[3];
236 u8 Type; 236 u8 Type;
237 u32 ErrorInfo; 237 u32 ErrorInfo;
238 } Common_Info; 238 } Common_Info;
239 struct { 239 struct {
240 u8 Reserved[2]; 240 u8 Reserved[2];
241 u8 offense_size; /* size of offending entry */ 241 u8 offense_size; /* size of offending entry */
242 u8 offense_num; /* byte # of offense 0-base */ 242 u8 offense_num; /* byte # of offense 0-base */
243 u32 offense_value; 243 u32 offense_value;
244 } Invalid_Cmd; 244 } Invalid_Cmd;
245 }; 245 };
246 struct ErrorInfo { 246 struct ErrorInfo {
247 u8 ScsiStatus; 247 u8 ScsiStatus;
248 u8 SenseLen; 248 u8 SenseLen;
249 u16 CommandStatus; 249 u16 CommandStatus;
250 u32 ResidualCnt; 250 u32 ResidualCnt;
251 union MoreErrInfo MoreErrInfo; 251 union MoreErrInfo MoreErrInfo;
252 u8 SenseInfo[SENSEINFOBYTES]; 252 u8 SenseInfo[SENSEINFOBYTES];
253 }; 253 };
254 /* Command types */ 254 /* Command types */
255 #define CMD_IOCTL_PEND 0x01 255 #define CMD_IOCTL_PEND 0x01
256 #define CMD_SCSI 0x03 256 #define CMD_SCSI 0x03
257 257
258 /* This structure needs to be divisible by 32 for new 258 /* This structure needs to be divisible by 32 for new
259 * indexing method and performant mode. 259 * indexing method and performant mode.
260 */ 260 */
261 #define PAD32 32 261 #define PAD32 32
262 #define PAD64DIFF 0 262 #define PAD64DIFF 0
263 #define USEEXTRA ((sizeof(void *) - 4)/4) 263 #define USEEXTRA ((sizeof(void *) - 4)/4)
264 #define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA) 264 #define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA)
265 265
266 #define DIRECT_LOOKUP_SHIFT 5 266 #define DIRECT_LOOKUP_SHIFT 5
267 #define DIRECT_LOOKUP_BIT 0x10 267 #define DIRECT_LOOKUP_BIT 0x10
268 #define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) 268 #define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
269 269
270 #define HPSA_ERROR_BIT 0x02 270 #define HPSA_ERROR_BIT 0x02
271 struct ctlr_info; /* defined in hpsa.h */ 271 struct ctlr_info; /* defined in hpsa.h */
272 /* The size of this structure needs to be divisible by 32 272 /* The size of this structure needs to be divisible by 32
273 * on all architectures because low 5 bits of the addresses 273 * on all architectures because low 5 bits of the addresses
274 * are used as follows: 274 * are used as follows:
275 * 275 *
276 * bit 0: to device, used to indicate "performant mode" command 276 * bit 0: to device, used to indicate "performant mode" command
277 * from device, indidcates error status. 277 * from device, indidcates error status.
278 * bit 1-3: to device, indicates block fetch table entry for 278 * bit 1-3: to device, indicates block fetch table entry for
279 * reducing DMA in fetching commands from host memory. 279 * reducing DMA in fetching commands from host memory.
280 * bit 4: used to indicate whether tag is "direct lookup" (index), 280 * bit 4: used to indicate whether tag is "direct lookup" (index),
281 * or a bus address. 281 * or a bus address.
282 */ 282 */
283 283
284 struct CommandList { 284 struct CommandList {
285 struct CommandListHeader Header; 285 struct CommandListHeader Header;
286 struct RequestBlock Request; 286 struct RequestBlock Request;
287 struct ErrDescriptor ErrDesc; 287 struct ErrDescriptor ErrDesc;
288 struct SGDescriptor SG[MAXSGENTRIES]; 288 struct SGDescriptor SG[MAXSGENTRIES];
289 /* information associated with the command */ 289 /* information associated with the command */
290 u32 busaddr; /* physical addr of this record */ 290 u32 busaddr; /* physical addr of this record */
291 struct ErrorInfo *err_info; /* pointer to the allocated mem */ 291 struct ErrorInfo *err_info; /* pointer to the allocated mem */
292 struct ctlr_info *h; 292 struct ctlr_info *h;
293 int cmd_type; 293 int cmd_type;
294 long cmdindex; 294 long cmdindex;
295 struct hlist_node list; 295 struct list_head list;
296 struct request *rq; 296 struct request *rq;
297 struct completion *waiting; 297 struct completion *waiting;
298 void *scsi_cmd; 298 void *scsi_cmd;
299 299
300 /* on 64 bit architectures, to get this to be 32-byte-aligned 300 /* on 64 bit architectures, to get this to be 32-byte-aligned
301 * it so happens we need PAD_64 bytes of padding, on 32 bit systems, 301 * it so happens we need PAD_64 bytes of padding, on 32 bit systems,
302 * we need PAD_32 bytes of padding (see below). This does that. 302 * we need PAD_32 bytes of padding (see below). This does that.
303 * If it happens that 64 bit and 32 bit systems need different 303 * If it happens that 64 bit and 32 bit systems need different
304 * padding, PAD_32 and PAD_64 can be set independently, and. 304 * padding, PAD_32 and PAD_64 can be set independently, and.
305 * the code below will do the right thing. 305 * the code below will do the right thing.
306 */ 306 */
307 #define IS_32_BIT ((8 - sizeof(long))/4) 307 #define IS_32_BIT ((8 - sizeof(long))/4)
308 #define IS_64_BIT (!IS_32_BIT) 308 #define IS_64_BIT (!IS_32_BIT)
309 #define PAD_32 (4) 309 #define PAD_32 (4)
310 #define PAD_64 (4) 310 #define PAD_64 (4)
311 #define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) 311 #define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
312 u8 pad[COMMANDLIST_PAD]; 312 u8 pad[COMMANDLIST_PAD];
313 }; 313 };
314 314
315 /* Configuration Table Structure */ 315 /* Configuration Table Structure */
316 struct HostWrite { 316 struct HostWrite {
317 u32 TransportRequest; 317 u32 TransportRequest;
318 u32 Reserved; 318 u32 Reserved;
319 u32 CoalIntDelay; 319 u32 CoalIntDelay;
320 u32 CoalIntCount; 320 u32 CoalIntCount;
321 }; 321 };
322 322
323 #define SIMPLE_MODE 0x02 323 #define SIMPLE_MODE 0x02
324 #define PERFORMANT_MODE 0x04 324 #define PERFORMANT_MODE 0x04
325 #define MEMQ_MODE 0x08 325 #define MEMQ_MODE 0x08
326 326
327 struct CfgTable { 327 struct CfgTable {
328 u8 Signature[4]; 328 u8 Signature[4];
329 u32 SpecValence; 329 u32 SpecValence;
330 u32 TransportSupport; 330 u32 TransportSupport;
331 u32 TransportActive; 331 u32 TransportActive;
332 struct HostWrite HostWrite; 332 struct HostWrite HostWrite;
333 u32 CmdsOutMax; 333 u32 CmdsOutMax;
334 u32 BusTypes; 334 u32 BusTypes;
335 u32 TransMethodOffset; 335 u32 TransMethodOffset;
336 u8 ServerName[16]; 336 u8 ServerName[16];
337 u32 HeartBeat; 337 u32 HeartBeat;
338 u32 SCSI_Prefetch; 338 u32 SCSI_Prefetch;
339 u32 MaxScatterGatherElements; 339 u32 MaxScatterGatherElements;
340 u32 MaxLogicalUnits; 340 u32 MaxLogicalUnits;
341 u32 MaxPhysicalDevices; 341 u32 MaxPhysicalDevices;
342 u32 MaxPhysicalDrivesPerLogicalUnit; 342 u32 MaxPhysicalDrivesPerLogicalUnit;
343 u32 MaxPerformantModeCommands; 343 u32 MaxPerformantModeCommands;
344 u8 reserved[0x78 - 0x58]; 344 u8 reserved[0x78 - 0x58];
345 u32 misc_fw_support; /* offset 0x78 */ 345 u32 misc_fw_support; /* offset 0x78 */
346 #define MISC_FW_DOORBELL_RESET (0x02) 346 #define MISC_FW_DOORBELL_RESET (0x02)
347 }; 347 };
348 348
349 #define NUM_BLOCKFETCH_ENTRIES 8 349 #define NUM_BLOCKFETCH_ENTRIES 8
350 struct TransTable_struct { 350 struct TransTable_struct {
351 u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES]; 351 u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES];
352 u32 RepQSize; 352 u32 RepQSize;
353 u32 RepQCount; 353 u32 RepQCount;
354 u32 RepQCtrAddrLow32; 354 u32 RepQCtrAddrLow32;
355 u32 RepQCtrAddrHigh32; 355 u32 RepQCtrAddrHigh32;
356 u32 RepQAddr0Low32; 356 u32 RepQAddr0Low32;
357 u32 RepQAddr0High32; 357 u32 RepQAddr0High32;
358 }; 358 };
359 359
360 struct hpsa_pci_info { 360 struct hpsa_pci_info {
361 unsigned char bus; 361 unsigned char bus;
362 unsigned char dev_fn; 362 unsigned char dev_fn;
363 unsigned short domain; 363 unsigned short domain;
364 u32 board_id; 364 u32 board_id;
365 }; 365 };
366 366
367 #pragma pack() 367 #pragma pack()
368 #endif /* HPSA_CMD_H */ 368 #endif /* HPSA_CMD_H */
369 369