Commit 1bfff2f8696ea13fc3d55a977f50abbddee336b2

Authored by Brian King
Committed by James Bottomley
1 parent 89aad42831

[SCSI] ipr: Increase alignment boundary of command blocks

The latest generation of ipr hardware performs best when command blocks
are aligned to a boundary equal to the size of the command block. Ensure
512 byte alignment, since this is the largest size command block we
can send.

Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

Showing 1 changed file with 1 additions and 1 deletions Inline Diff

1 /* 1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters 2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 * 3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation 4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 * 5 *
6 * Copyright (C) 2003, 2004 IBM Corporation 6 * Copyright (C) 2003, 2004 IBM Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version. 11 * (at your option) any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * 21 *
22 */ 22 */
23 23
24 /* 24 /*
25 * Notes: 25 * Notes:
26 * 26 *
27 * This driver is used to control the following SCSI adapters: 27 * This driver is used to control the following SCSI adapters:
28 * 28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B 29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 * 30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter 31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter 32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card 33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems 34 * Embedded SCSI adapter on p615 and p655 systems
35 * 35 *
36 * Supported Hardware Features: 36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller 37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface 38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine 39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache 40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices 41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10 42 * - RAID Levels 0, 5, 10
43 * - Hot spare 43 * - Hot spare
44 * - Background Parity Checking 44 * - Background Parity Checking
45 * - Background Data Scrubbing 45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array 46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks 47 * by adding disks
48 * 48 *
49 * Driver Features: 49 * Driver Features:
50 * - Tagged command queuing 50 * - Tagged command queuing
51 * - Adapter microcode download 51 * - Adapter microcode download
52 * - PCI hot plug 52 * - PCI hot plug
53 * - SCSI device hot plug 53 * - SCSI device hot plug
54 * 54 *
55 */ 55 */
56 56
57 #include <linux/fs.h> 57 #include <linux/fs.h>
58 #include <linux/init.h> 58 #include <linux/init.h>
59 #include <linux/types.h> 59 #include <linux/types.h>
60 #include <linux/errno.h> 60 #include <linux/errno.h>
61 #include <linux/kernel.h> 61 #include <linux/kernel.h>
62 #include <linux/slab.h> 62 #include <linux/slab.h>
63 #include <linux/vmalloc.h> 63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h> 64 #include <linux/ioport.h>
65 #include <linux/delay.h> 65 #include <linux/delay.h>
66 #include <linux/pci.h> 66 #include <linux/pci.h>
67 #include <linux/wait.h> 67 #include <linux/wait.h>
68 #include <linux/spinlock.h> 68 #include <linux/spinlock.h>
69 #include <linux/sched.h> 69 #include <linux/sched.h>
70 #include <linux/interrupt.h> 70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h> 71 #include <linux/blkdev.h>
72 #include <linux/firmware.h> 72 #include <linux/firmware.h>
73 #include <linux/module.h> 73 #include <linux/module.h>
74 #include <linux/moduleparam.h> 74 #include <linux/moduleparam.h>
75 #include <linux/libata.h> 75 #include <linux/libata.h>
76 #include <linux/hdreg.h> 76 #include <linux/hdreg.h>
77 #include <linux/reboot.h> 77 #include <linux/reboot.h>
78 #include <linux/stringify.h> 78 #include <linux/stringify.h>
79 #include <asm/io.h> 79 #include <asm/io.h>
80 #include <asm/irq.h> 80 #include <asm/irq.h>
81 #include <asm/processor.h> 81 #include <asm/processor.h>
82 #include <scsi/scsi.h> 82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h> 83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h> 84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h> 85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h> 86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h" 87 #include "ipr.h"
88 88
89 /* 89 /*
90 * Global Data 90 * Global Data
91 */ 91 */
92 static LIST_HEAD(ipr_ioa_head); 92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; 93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1; 94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0; 95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0; 96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0; 97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0; 98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; 99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1; 100 static unsigned int ipr_dual_ioa_raid = 1;
101 static DEFINE_SPINLOCK(ipr_driver_lock); 101 static DEFINE_SPINLOCK(ipr_driver_lock);
102 102
103 /* This table describes the differences between DMA controller chips */ 103 /* This table describes the differences between DMA controller chips */
104 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { 104 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
105 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ 105 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
106 .mailbox = 0x0042C, 106 .mailbox = 0x0042C,
107 .max_cmds = 100, 107 .max_cmds = 100,
108 .cache_line_size = 0x20, 108 .cache_line_size = 0x20,
109 .clear_isr = 1, 109 .clear_isr = 1,
110 { 110 {
111 .set_interrupt_mask_reg = 0x0022C, 111 .set_interrupt_mask_reg = 0x0022C,
112 .clr_interrupt_mask_reg = 0x00230, 112 .clr_interrupt_mask_reg = 0x00230,
113 .clr_interrupt_mask_reg32 = 0x00230, 113 .clr_interrupt_mask_reg32 = 0x00230,
114 .sense_interrupt_mask_reg = 0x0022C, 114 .sense_interrupt_mask_reg = 0x0022C,
115 .sense_interrupt_mask_reg32 = 0x0022C, 115 .sense_interrupt_mask_reg32 = 0x0022C,
116 .clr_interrupt_reg = 0x00228, 116 .clr_interrupt_reg = 0x00228,
117 .clr_interrupt_reg32 = 0x00228, 117 .clr_interrupt_reg32 = 0x00228,
118 .sense_interrupt_reg = 0x00224, 118 .sense_interrupt_reg = 0x00224,
119 .sense_interrupt_reg32 = 0x00224, 119 .sense_interrupt_reg32 = 0x00224,
120 .ioarrin_reg = 0x00404, 120 .ioarrin_reg = 0x00404,
121 .sense_uproc_interrupt_reg = 0x00214, 121 .sense_uproc_interrupt_reg = 0x00214,
122 .sense_uproc_interrupt_reg32 = 0x00214, 122 .sense_uproc_interrupt_reg32 = 0x00214,
123 .set_uproc_interrupt_reg = 0x00214, 123 .set_uproc_interrupt_reg = 0x00214,
124 .set_uproc_interrupt_reg32 = 0x00214, 124 .set_uproc_interrupt_reg32 = 0x00214,
125 .clr_uproc_interrupt_reg = 0x00218, 125 .clr_uproc_interrupt_reg = 0x00218,
126 .clr_uproc_interrupt_reg32 = 0x00218 126 .clr_uproc_interrupt_reg32 = 0x00218
127 } 127 }
128 }, 128 },
129 { /* Snipe and Scamp */ 129 { /* Snipe and Scamp */
130 .mailbox = 0x0052C, 130 .mailbox = 0x0052C,
131 .max_cmds = 100, 131 .max_cmds = 100,
132 .cache_line_size = 0x20, 132 .cache_line_size = 0x20,
133 .clear_isr = 1, 133 .clear_isr = 1,
134 { 134 {
135 .set_interrupt_mask_reg = 0x00288, 135 .set_interrupt_mask_reg = 0x00288,
136 .clr_interrupt_mask_reg = 0x0028C, 136 .clr_interrupt_mask_reg = 0x0028C,
137 .clr_interrupt_mask_reg32 = 0x0028C, 137 .clr_interrupt_mask_reg32 = 0x0028C,
138 .sense_interrupt_mask_reg = 0x00288, 138 .sense_interrupt_mask_reg = 0x00288,
139 .sense_interrupt_mask_reg32 = 0x00288, 139 .sense_interrupt_mask_reg32 = 0x00288,
140 .clr_interrupt_reg = 0x00284, 140 .clr_interrupt_reg = 0x00284,
141 .clr_interrupt_reg32 = 0x00284, 141 .clr_interrupt_reg32 = 0x00284,
142 .sense_interrupt_reg = 0x00280, 142 .sense_interrupt_reg = 0x00280,
143 .sense_interrupt_reg32 = 0x00280, 143 .sense_interrupt_reg32 = 0x00280,
144 .ioarrin_reg = 0x00504, 144 .ioarrin_reg = 0x00504,
145 .sense_uproc_interrupt_reg = 0x00290, 145 .sense_uproc_interrupt_reg = 0x00290,
146 .sense_uproc_interrupt_reg32 = 0x00290, 146 .sense_uproc_interrupt_reg32 = 0x00290,
147 .set_uproc_interrupt_reg = 0x00290, 147 .set_uproc_interrupt_reg = 0x00290,
148 .set_uproc_interrupt_reg32 = 0x00290, 148 .set_uproc_interrupt_reg32 = 0x00290,
149 .clr_uproc_interrupt_reg = 0x00294, 149 .clr_uproc_interrupt_reg = 0x00294,
150 .clr_uproc_interrupt_reg32 = 0x00294 150 .clr_uproc_interrupt_reg32 = 0x00294
151 } 151 }
152 }, 152 },
153 { /* CRoC */ 153 { /* CRoC */
154 .mailbox = 0x00044, 154 .mailbox = 0x00044,
155 .max_cmds = 1000, 155 .max_cmds = 1000,
156 .cache_line_size = 0x20, 156 .cache_line_size = 0x20,
157 .clear_isr = 0, 157 .clear_isr = 0,
158 { 158 {
159 .set_interrupt_mask_reg = 0x00010, 159 .set_interrupt_mask_reg = 0x00010,
160 .clr_interrupt_mask_reg = 0x00018, 160 .clr_interrupt_mask_reg = 0x00018,
161 .clr_interrupt_mask_reg32 = 0x0001C, 161 .clr_interrupt_mask_reg32 = 0x0001C,
162 .sense_interrupt_mask_reg = 0x00010, 162 .sense_interrupt_mask_reg = 0x00010,
163 .sense_interrupt_mask_reg32 = 0x00014, 163 .sense_interrupt_mask_reg32 = 0x00014,
164 .clr_interrupt_reg = 0x00008, 164 .clr_interrupt_reg = 0x00008,
165 .clr_interrupt_reg32 = 0x0000C, 165 .clr_interrupt_reg32 = 0x0000C,
166 .sense_interrupt_reg = 0x00000, 166 .sense_interrupt_reg = 0x00000,
167 .sense_interrupt_reg32 = 0x00004, 167 .sense_interrupt_reg32 = 0x00004,
168 .ioarrin_reg = 0x00070, 168 .ioarrin_reg = 0x00070,
169 .sense_uproc_interrupt_reg = 0x00020, 169 .sense_uproc_interrupt_reg = 0x00020,
170 .sense_uproc_interrupt_reg32 = 0x00024, 170 .sense_uproc_interrupt_reg32 = 0x00024,
171 .set_uproc_interrupt_reg = 0x00020, 171 .set_uproc_interrupt_reg = 0x00020,
172 .set_uproc_interrupt_reg32 = 0x00024, 172 .set_uproc_interrupt_reg32 = 0x00024,
173 .clr_uproc_interrupt_reg = 0x00028, 173 .clr_uproc_interrupt_reg = 0x00028,
174 .clr_uproc_interrupt_reg32 = 0x0002C, 174 .clr_uproc_interrupt_reg32 = 0x0002C,
175 .init_feedback_reg = 0x0005C, 175 .init_feedback_reg = 0x0005C,
176 .dump_addr_reg = 0x00064, 176 .dump_addr_reg = 0x00064,
177 .dump_data_reg = 0x00068, 177 .dump_data_reg = 0x00068,
178 .endian_swap_reg = 0x00084 178 .endian_swap_reg = 0x00084
179 } 179 }
180 }, 180 },
181 }; 181 };
182 182
183 static const struct ipr_chip_t ipr_chip[] = { 183 static const struct ipr_chip_t ipr_chip[] = {
184 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 184 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
185 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 185 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
186 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 186 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
187 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 187 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, 191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } 192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
193 }; 193 };
194 194
195 static int ipr_max_bus_speeds [] = { 195 static int ipr_max_bus_speeds [] = {
196 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE 196 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
197 }; 197 };
198 198
199 MODULE_AUTHOR("Brian King <brking@us.ibm.com>"); 199 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
200 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver"); 200 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
201 module_param_named(max_speed, ipr_max_speed, uint, 0); 201 module_param_named(max_speed, ipr_max_speed, uint, 0);
202 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320"); 202 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
203 module_param_named(log_level, ipr_log_level, uint, 0); 203 module_param_named(log_level, ipr_log_level, uint, 0);
204 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); 204 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
205 module_param_named(testmode, ipr_testmode, int, 0); 205 module_param_named(testmode, ipr_testmode, int, 0);
206 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); 206 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
207 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR); 207 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
208 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); 208 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
209 module_param_named(transop_timeout, ipr_transop_timeout, int, 0); 209 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
210 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); 210 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
211 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); 211 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 212 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
213 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 213 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
214 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 214 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
215 module_param_named(max_devs, ipr_max_devs, int, 0); 215 module_param_named(max_devs, ipr_max_devs, int, 0);
216 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " 216 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
217 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); 217 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
218 MODULE_LICENSE("GPL"); 218 MODULE_LICENSE("GPL");
219 MODULE_VERSION(IPR_DRIVER_VERSION); 219 MODULE_VERSION(IPR_DRIVER_VERSION);
220 220
221 /* A constant array of IOASCs/URCs/Error Messages */ 221 /* A constant array of IOASCs/URCs/Error Messages */
222 static const 222 static const
223 struct ipr_error_table_t ipr_error_table[] = { 223 struct ipr_error_table_t ipr_error_table[] = {
224 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL, 224 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
225 "8155: An unknown error was received"}, 225 "8155: An unknown error was received"},
226 {0x00330000, 0, 0, 226 {0x00330000, 0, 0,
227 "Soft underlength error"}, 227 "Soft underlength error"},
228 {0x005A0000, 0, 0, 228 {0x005A0000, 0, 0,
229 "Command to be cancelled not found"}, 229 "Command to be cancelled not found"},
230 {0x00808000, 0, 0, 230 {0x00808000, 0, 0,
231 "Qualified success"}, 231 "Qualified success"},
232 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL, 232 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
233 "FFFE: Soft device bus error recovered by the IOA"}, 233 "FFFE: Soft device bus error recovered by the IOA"},
234 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, 234 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
235 "4101: Soft device bus fabric error"}, 235 "4101: Soft device bus fabric error"},
236 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL, 236 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
237 "FFFC: Logical block guard error recovered by the device"}, 237 "FFFC: Logical block guard error recovered by the device"},
238 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL, 238 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FFFC: Logical block reference tag error recovered by the device"}, 239 "FFFC: Logical block reference tag error recovered by the device"},
240 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL, 240 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4171: Recovered scatter list tag / sequence number error"}, 241 "4171: Recovered scatter list tag / sequence number error"},
242 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL, 242 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FF3D: Recovered logical block CRC error on IOA to Host transfer"}, 243 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
244 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL, 244 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
245 "4171: Recovered logical block sequence number error on IOA to Host transfer"}, 245 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
246 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL, 246 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFD: Recovered logical block reference tag error detected by the IOA"}, 247 "FFFD: Recovered logical block reference tag error detected by the IOA"},
248 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL, 248 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFD: Logical block guard error recovered by the IOA"}, 249 "FFFD: Logical block guard error recovered by the IOA"},
250 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, 250 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
251 "FFF9: Device sector reassign successful"}, 251 "FFF9: Device sector reassign successful"},
252 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, 252 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFF7: Media error recovered by device rewrite procedures"}, 253 "FFF7: Media error recovered by device rewrite procedures"},
254 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL, 254 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "7001: IOA sector reassignment successful"}, 255 "7001: IOA sector reassignment successful"},
256 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL, 256 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFF9: Soft media error. Sector reassignment recommended"}, 257 "FFF9: Soft media error. Sector reassignment recommended"},
258 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL, 258 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFF7: Media error recovered by IOA rewrite procedures"}, 259 "FFF7: Media error recovered by IOA rewrite procedures"},
260 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL, 260 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
261 "FF3D: Soft PCI bus error recovered by the IOA"}, 261 "FF3D: Soft PCI bus error recovered by the IOA"},
262 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL, 262 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
263 "FFF6: Device hardware error recovered by the IOA"}, 263 "FFF6: Device hardware error recovered by the IOA"},
264 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL, 264 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFF6: Device hardware error recovered by the device"}, 265 "FFF6: Device hardware error recovered by the device"},
266 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL, 266 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
267 "FF3D: Soft IOA error recovered by the IOA"}, 267 "FF3D: Soft IOA error recovered by the IOA"},
268 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL, 268 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
269 "FFFA: Undefined device response recovered by the IOA"}, 269 "FFFA: Undefined device response recovered by the IOA"},
270 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL, 270 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
271 "FFF6: Device bus error, message or command phase"}, 271 "FFF6: Device bus error, message or command phase"},
272 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL, 272 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
273 "FFFE: Task Management Function failed"}, 273 "FFFE: Task Management Function failed"},
274 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL, 274 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFF6: Failure prediction threshold exceeded"}, 275 "FFF6: Failure prediction threshold exceeded"},
276 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL, 276 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
277 "8009: Impending cache battery pack failure"}, 277 "8009: Impending cache battery pack failure"},
278 {0x02040400, 0, 0, 278 {0x02040400, 0, 0,
279 "34FF: Disk device format in progress"}, 279 "34FF: Disk device format in progress"},
280 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL, 280 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
281 "9070: IOA requested reset"}, 281 "9070: IOA requested reset"},
282 {0x023F0000, 0, 0, 282 {0x023F0000, 0, 0,
283 "Synchronization required"}, 283 "Synchronization required"},
284 {0x024E0000, 0, 0, 284 {0x024E0000, 0, 0,
285 "No ready, IOA shutdown"}, 285 "No ready, IOA shutdown"},
286 {0x025A0000, 0, 0, 286 {0x025A0000, 0, 0,
287 "Not ready, IOA has been shutdown"}, 287 "Not ready, IOA has been shutdown"},
288 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL, 288 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
289 "3020: Storage subsystem configuration error"}, 289 "3020: Storage subsystem configuration error"},
290 {0x03110B00, 0, 0, 290 {0x03110B00, 0, 0,
291 "FFF5: Medium error, data unreadable, recommend reassign"}, 291 "FFF5: Medium error, data unreadable, recommend reassign"},
292 {0x03110C00, 0, 0, 292 {0x03110C00, 0, 0,
293 "7000: Medium error, data unreadable, do not reassign"}, 293 "7000: Medium error, data unreadable, do not reassign"},
294 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL, 294 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
295 "FFF3: Disk media format bad"}, 295 "FFF3: Disk media format bad"},
296 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL, 296 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "3002: Addressed device failed to respond to selection"}, 297 "3002: Addressed device failed to respond to selection"},
298 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL, 298 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
299 "3100: Device bus error"}, 299 "3100: Device bus error"},
300 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL, 300 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
301 "3109: IOA timed out a device command"}, 301 "3109: IOA timed out a device command"},
302 {0x04088000, 0, 0, 302 {0x04088000, 0, 0,
303 "3120: SCSI bus is not operational"}, 303 "3120: SCSI bus is not operational"},
304 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, 304 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
305 "4100: Hard device bus fabric error"}, 305 "4100: Hard device bus fabric error"},
306 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL, 306 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
307 "310C: Logical block guard error detected by the device"}, 307 "310C: Logical block guard error detected by the device"},
308 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL, 308 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
309 "310C: Logical block reference tag error detected by the device"}, 309 "310C: Logical block reference tag error detected by the device"},
310 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL, 310 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
311 "4170: Scatter list tag / sequence number error"}, 311 "4170: Scatter list tag / sequence number error"},
312 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL, 312 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
313 "8150: Logical block CRC error on IOA to Host transfer"}, 313 "8150: Logical block CRC error on IOA to Host transfer"},
314 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL, 314 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
315 "4170: Logical block sequence number error on IOA to Host transfer"}, 315 "4170: Logical block sequence number error on IOA to Host transfer"},
316 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL, 316 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
317 "310D: Logical block reference tag error detected by the IOA"}, 317 "310D: Logical block reference tag error detected by the IOA"},
318 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL, 318 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
319 "310D: Logical block guard error detected by the IOA"}, 319 "310D: Logical block guard error detected by the IOA"},
320 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, 320 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
321 "9000: IOA reserved area data check"}, 321 "9000: IOA reserved area data check"},
322 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, 322 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
323 "9001: IOA reserved area invalid data pattern"}, 323 "9001: IOA reserved area invalid data pattern"},
324 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, 324 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
325 "9002: IOA reserved area LRC error"}, 325 "9002: IOA reserved area LRC error"},
326 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL, 326 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
327 "Hardware Error, IOA metadata access error"}, 327 "Hardware Error, IOA metadata access error"},
328 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, 328 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
329 "102E: Out of alternate sectors for disk storage"}, 329 "102E: Out of alternate sectors for disk storage"},
330 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, 330 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
331 "FFF4: Data transfer underlength error"}, 331 "FFF4: Data transfer underlength error"},
332 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL, 332 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "FFF4: Data transfer overlength error"}, 333 "FFF4: Data transfer overlength error"},
334 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL, 334 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
335 "3400: Logical unit failure"}, 335 "3400: Logical unit failure"},
336 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL, 336 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
337 "FFF4: Device microcode is corrupt"}, 337 "FFF4: Device microcode is corrupt"},
338 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL, 338 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
339 "8150: PCI bus error"}, 339 "8150: PCI bus error"},
340 {0x04430000, 1, 0, 340 {0x04430000, 1, 0,
341 "Unsupported device bus message received"}, 341 "Unsupported device bus message received"},
342 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL, 342 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
343 "FFF4: Disk device problem"}, 343 "FFF4: Disk device problem"},
344 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL, 344 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
345 "8150: Permanent IOA failure"}, 345 "8150: Permanent IOA failure"},
346 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL, 346 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
347 "3010: Disk device returned wrong response to IOA"}, 347 "3010: Disk device returned wrong response to IOA"},
348 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL, 348 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
349 "8151: IOA microcode error"}, 349 "8151: IOA microcode error"},
350 {0x04448500, 0, 0, 350 {0x04448500, 0, 0,
351 "Device bus status error"}, 351 "Device bus status error"},
352 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL, 352 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
353 "8157: IOA error requiring IOA reset to recover"}, 353 "8157: IOA error requiring IOA reset to recover"},
354 {0x04448700, 0, 0, 354 {0x04448700, 0, 0,
355 "ATA device status error"}, 355 "ATA device status error"},
356 {0x04490000, 0, 0, 356 {0x04490000, 0, 0,
357 "Message reject received from the device"}, 357 "Message reject received from the device"},
358 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL, 358 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
359 "8008: A permanent cache battery pack failure occurred"}, 359 "8008: A permanent cache battery pack failure occurred"},
360 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL, 360 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
361 "9090: Disk unit has been modified after the last known status"}, 361 "9090: Disk unit has been modified after the last known status"},
362 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL, 362 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
363 "9081: IOA detected device error"}, 363 "9081: IOA detected device error"},
364 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL, 364 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
365 "9082: IOA detected device error"}, 365 "9082: IOA detected device error"},
366 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL, 366 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
367 "3110: Device bus error, message or command phase"}, 367 "3110: Device bus error, message or command phase"},
368 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL, 368 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
369 "3110: SAS Command / Task Management Function failed"}, 369 "3110: SAS Command / Task Management Function failed"},
370 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL, 370 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
371 "9091: Incorrect hardware configuration change has been detected"}, 371 "9091: Incorrect hardware configuration change has been detected"},
372 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL, 372 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
373 "9073: Invalid multi-adapter configuration"}, 373 "9073: Invalid multi-adapter configuration"},
374 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL, 374 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
375 "4010: Incorrect connection between cascaded expanders"}, 375 "4010: Incorrect connection between cascaded expanders"},
376 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL, 376 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
377 "4020: Connections exceed IOA design limits"}, 377 "4020: Connections exceed IOA design limits"},
378 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL, 378 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
379 "4030: Incorrect multipath connection"}, 379 "4030: Incorrect multipath connection"},
380 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL, 380 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
381 "4110: Unsupported enclosure function"}, 381 "4110: Unsupported enclosure function"},
382 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL, 382 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
383 "FFF4: Command to logical unit failed"}, 383 "FFF4: Command to logical unit failed"},
384 {0x05240000, 1, 0, 384 {0x05240000, 1, 0,
385 "Illegal request, invalid request type or request packet"}, 385 "Illegal request, invalid request type or request packet"},
386 {0x05250000, 0, 0, 386 {0x05250000, 0, 0,
387 "Illegal request, invalid resource handle"}, 387 "Illegal request, invalid resource handle"},
388 {0x05258000, 0, 0, 388 {0x05258000, 0, 0,
389 "Illegal request, commands not allowed to this device"}, 389 "Illegal request, commands not allowed to this device"},
390 {0x05258100, 0, 0, 390 {0x05258100, 0, 0,
391 "Illegal request, command not allowed to a secondary adapter"}, 391 "Illegal request, command not allowed to a secondary adapter"},
392 {0x05258200, 0, 0, 392 {0x05258200, 0, 0,
393 "Illegal request, command not allowed to a non-optimized resource"}, 393 "Illegal request, command not allowed to a non-optimized resource"},
394 {0x05260000, 0, 0, 394 {0x05260000, 0, 0,
395 "Illegal request, invalid field in parameter list"}, 395 "Illegal request, invalid field in parameter list"},
396 {0x05260100, 0, 0, 396 {0x05260100, 0, 0,
397 "Illegal request, parameter not supported"}, 397 "Illegal request, parameter not supported"},
398 {0x05260200, 0, 0, 398 {0x05260200, 0, 0,
399 "Illegal request, parameter value invalid"}, 399 "Illegal request, parameter value invalid"},
400 {0x052C0000, 0, 0, 400 {0x052C0000, 0, 0,
401 "Illegal request, command sequence error"}, 401 "Illegal request, command sequence error"},
402 {0x052C8000, 1, 0, 402 {0x052C8000, 1, 0,
403 "Illegal request, dual adapter support not enabled"}, 403 "Illegal request, dual adapter support not enabled"},
404 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL, 404 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
405 "9031: Array protection temporarily suspended, protection resuming"}, 405 "9031: Array protection temporarily suspended, protection resuming"},
406 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL, 406 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
407 "9040: Array protection temporarily suspended, protection resuming"}, 407 "9040: Array protection temporarily suspended, protection resuming"},
408 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL, 408 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
409 "3140: Device bus not ready to ready transition"}, 409 "3140: Device bus not ready to ready transition"},
410 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL, 410 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
411 "FFFB: SCSI bus was reset"}, 411 "FFFB: SCSI bus was reset"},
412 {0x06290500, 0, 0, 412 {0x06290500, 0, 0,
413 "FFFE: SCSI bus transition to single ended"}, 413 "FFFE: SCSI bus transition to single ended"},
414 {0x06290600, 0, 0, 414 {0x06290600, 0, 0,
415 "FFFE: SCSI bus transition to LVD"}, 415 "FFFE: SCSI bus transition to LVD"},
416 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL, 416 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
417 "FFFB: SCSI bus was reset by another initiator"}, 417 "FFFB: SCSI bus was reset by another initiator"},
418 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL, 418 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
419 "3029: A device replacement has occurred"}, 419 "3029: A device replacement has occurred"},
420 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL, 420 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
421 "9051: IOA cache data exists for a missing or failed device"}, 421 "9051: IOA cache data exists for a missing or failed device"},
422 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL, 422 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
423 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, 423 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
424 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL, 424 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
425 "9025: Disk unit is not supported at its physical location"}, 425 "9025: Disk unit is not supported at its physical location"},
426 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL, 426 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
427 "3020: IOA detected a SCSI bus configuration error"}, 427 "3020: IOA detected a SCSI bus configuration error"},
428 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL, 428 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
429 "3150: SCSI bus configuration error"}, 429 "3150: SCSI bus configuration error"},
430 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL, 430 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9074: Asymmetric advanced function disk configuration"}, 431 "9074: Asymmetric advanced function disk configuration"},
432 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL, 432 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
433 "4040: Incomplete multipath connection between IOA and enclosure"}, 433 "4040: Incomplete multipath connection between IOA and enclosure"},
434 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL, 434 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4041: Incomplete multipath connection between enclosure and device"}, 435 "4041: Incomplete multipath connection between enclosure and device"},
436 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL, 436 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
437 "9075: Incomplete multipath connection between IOA and remote IOA"}, 437 "9075: Incomplete multipath connection between IOA and remote IOA"},
438 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL, 438 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
439 "9076: Configuration error, missing remote IOA"}, 439 "9076: Configuration error, missing remote IOA"},
440 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL, 440 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
441 "4050: Enclosure does not support a required multipath function"}, 441 "4050: Enclosure does not support a required multipath function"},
442 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL, 442 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
443 "4070: Logically bad block written on device"}, 443 "4070: Logically bad block written on device"},
444 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL, 444 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
445 "9041: Array protection temporarily suspended"}, 445 "9041: Array protection temporarily suspended"},
446 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL, 446 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
447 "9042: Corrupt array parity detected on specified device"}, 447 "9042: Corrupt array parity detected on specified device"},
448 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL, 448 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
449 "9030: Array no longer protected due to missing or failed disk unit"}, 449 "9030: Array no longer protected due to missing or failed disk unit"},
450 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL, 450 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
451 "9071: Link operational transition"}, 451 "9071: Link operational transition"},
452 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL, 452 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9072: Link not operational transition"}, 453 "9072: Link not operational transition"},
454 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL, 454 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
455 "9032: Array exposed but still protected"}, 455 "9032: Array exposed but still protected"},
456 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1, 456 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
457 "70DD: Device forced failed by disrupt device command"}, 457 "70DD: Device forced failed by disrupt device command"},
458 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL, 458 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "4061: Multipath redundancy level got better"}, 459 "4061: Multipath redundancy level got better"},
460 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL, 460 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
461 "4060: Multipath redundancy level got worse"}, 461 "4060: Multipath redundancy level got worse"},
462 {0x07270000, 0, 0, 462 {0x07270000, 0, 0,
463 "Failure due to other device"}, 463 "Failure due to other device"},
464 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL, 464 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
465 "9008: IOA does not support functions expected by devices"}, 465 "9008: IOA does not support functions expected by devices"},
466 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL, 466 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
467 "9010: Cache data associated with attached devices cannot be found"}, 467 "9010: Cache data associated with attached devices cannot be found"},
468 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL, 468 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
469 "9011: Cache data belongs to devices other than those attached"}, 469 "9011: Cache data belongs to devices other than those attached"},
470 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL, 470 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9020: Array missing 2 or more devices with only 1 device present"}, 471 "9020: Array missing 2 or more devices with only 1 device present"},
472 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL, 472 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
473 "9021: Array missing 2 or more devices with 2 or more devices present"}, 473 "9021: Array missing 2 or more devices with 2 or more devices present"},
474 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL, 474 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
475 "9022: Exposed array is missing a required device"}, 475 "9022: Exposed array is missing a required device"},
476 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL, 476 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
477 "9023: Array member(s) not at required physical locations"}, 477 "9023: Array member(s) not at required physical locations"},
478 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL, 478 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
479 "9024: Array not functional due to present hardware configuration"}, 479 "9024: Array not functional due to present hardware configuration"},
480 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL, 480 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9026: Array not functional due to present hardware configuration"}, 481 "9026: Array not functional due to present hardware configuration"},
482 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL, 482 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9027: Array is missing a device and parity is out of sync"}, 483 "9027: Array is missing a device and parity is out of sync"},
484 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL, 484 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9028: Maximum number of arrays already exist"}, 485 "9028: Maximum number of arrays already exist"},
486 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL, 486 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9050: Required cache data cannot be located for a disk unit"}, 487 "9050: Required cache data cannot be located for a disk unit"},
488 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL, 488 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9052: Cache data exists for a device that has been modified"}, 489 "9052: Cache data exists for a device that has been modified"},
490 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL, 490 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9054: IOA resources not available due to previous problems"}, 491 "9054: IOA resources not available due to previous problems"},
492 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL, 492 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9092: Disk unit requires initialization before use"}, 493 "9092: Disk unit requires initialization before use"},
494 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL, 494 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9029: Incorrect hardware configuration change has been detected"}, 495 "9029: Incorrect hardware configuration change has been detected"},
496 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL, 496 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
497 "9060: One or more disk pairs are missing from an array"}, 497 "9060: One or more disk pairs are missing from an array"},
498 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL, 498 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
499 "9061: One or more disks are missing from an array"}, 499 "9061: One or more disks are missing from an array"},
500 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL, 500 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
501 "9062: One or more disks are missing from an array"}, 501 "9062: One or more disks are missing from an array"},
502 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL, 502 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
503 "9063: Maximum number of functional arrays has been exceeded"}, 503 "9063: Maximum number of functional arrays has been exceeded"},
504 {0x0B260000, 0, 0, 504 {0x0B260000, 0, 0,
505 "Aborted command, invalid descriptor"}, 505 "Aborted command, invalid descriptor"},
506 {0x0B5A0000, 0, 0, 506 {0x0B5A0000, 0, 0,
507 "Command terminated by host"} 507 "Command terminated by host"}
508 }; 508 };
509 509
510 static const struct ipr_ses_table_entry ipr_ses_table[] = { 510 static const struct ipr_ses_table_entry ipr_ses_table[] = {
511 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 }, 511 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
512 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 }, 512 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
513 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */ 513 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
514 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */ 514 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
515 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */ 515 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
516 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */ 516 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
517 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 }, 517 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
518 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 }, 518 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
519 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, 519 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
520 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, 520 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
521 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 }, 521 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
522 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 }, 522 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
523 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 } 523 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
524 }; 524 };
525 525
526 /* 526 /*
527 * Function Prototypes 527 * Function Prototypes
528 */ 528 */
529 static int ipr_reset_alert(struct ipr_cmnd *); 529 static int ipr_reset_alert(struct ipr_cmnd *);
530 static void ipr_process_ccn(struct ipr_cmnd *); 530 static void ipr_process_ccn(struct ipr_cmnd *);
531 static void ipr_process_error(struct ipr_cmnd *); 531 static void ipr_process_error(struct ipr_cmnd *);
532 static void ipr_reset_ioa_job(struct ipr_cmnd *); 532 static void ipr_reset_ioa_job(struct ipr_cmnd *);
533 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *, 533 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
534 enum ipr_shutdown_type); 534 enum ipr_shutdown_type);
535 535
536 #ifdef CONFIG_SCSI_IPR_TRACE 536 #ifdef CONFIG_SCSI_IPR_TRACE
537 /** 537 /**
538 * ipr_trc_hook - Add a trace entry to the driver trace 538 * ipr_trc_hook - Add a trace entry to the driver trace
539 * @ipr_cmd: ipr command struct 539 * @ipr_cmd: ipr command struct
540 * @type: trace type 540 * @type: trace type
541 * @add_data: additional data 541 * @add_data: additional data
542 * 542 *
543 * Return value: 543 * Return value:
544 * none 544 * none
545 **/ 545 **/
546 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, 546 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
547 u8 type, u32 add_data) 547 u8 type, u32 add_data)
548 { 548 {
549 struct ipr_trace_entry *trace_entry; 549 struct ipr_trace_entry *trace_entry;
550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
551 551
552 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++]; 552 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
553 trace_entry->time = jiffies; 553 trace_entry->time = jiffies;
554 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 554 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
555 trace_entry->type = type; 555 trace_entry->type = type;
556 if (ipr_cmd->ioa_cfg->sis64) 556 if (ipr_cmd->ioa_cfg->sis64)
557 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; 557 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
558 else 558 else
559 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; 559 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
560 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 560 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
561 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 561 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
562 trace_entry->u.add_data = add_data; 562 trace_entry->u.add_data = add_data;
563 } 563 }
564 #else 564 #else
565 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0) 565 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
566 #endif 566 #endif
567 567
568 /** 568 /**
569 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse 569 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
570 * @ipr_cmd: ipr command struct 570 * @ipr_cmd: ipr command struct
571 * 571 *
572 * Return value: 572 * Return value:
573 * none 573 * none
574 **/ 574 **/
575 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) 575 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
576 { 576 {
577 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 577 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
578 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 578 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
579 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 579 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
580 dma_addr_t dma_addr = ipr_cmd->dma_addr; 580 dma_addr_t dma_addr = ipr_cmd->dma_addr;
581 581
582 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 582 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
583 ioarcb->data_transfer_length = 0; 583 ioarcb->data_transfer_length = 0;
584 ioarcb->read_data_transfer_length = 0; 584 ioarcb->read_data_transfer_length = 0;
585 ioarcb->ioadl_len = 0; 585 ioarcb->ioadl_len = 0;
586 ioarcb->read_ioadl_len = 0; 586 ioarcb->read_ioadl_len = 0;
587 587
588 if (ipr_cmd->ioa_cfg->sis64) { 588 if (ipr_cmd->ioa_cfg->sis64) {
589 ioarcb->u.sis64_addr_data.data_ioadl_addr = 589 ioarcb->u.sis64_addr_data.data_ioadl_addr =
590 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 590 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
591 ioasa64->u.gata.status = 0; 591 ioasa64->u.gata.status = 0;
592 } else { 592 } else {
593 ioarcb->write_ioadl_addr = 593 ioarcb->write_ioadl_addr =
594 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 594 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
595 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 595 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
596 ioasa->u.gata.status = 0; 596 ioasa->u.gata.status = 0;
597 } 597 }
598 598
599 ioasa->hdr.ioasc = 0; 599 ioasa->hdr.ioasc = 0;
600 ioasa->hdr.residual_data_len = 0; 600 ioasa->hdr.residual_data_len = 0;
601 ipr_cmd->scsi_cmd = NULL; 601 ipr_cmd->scsi_cmd = NULL;
602 ipr_cmd->qc = NULL; 602 ipr_cmd->qc = NULL;
603 ipr_cmd->sense_buffer[0] = 0; 603 ipr_cmd->sense_buffer[0] = 0;
604 ipr_cmd->dma_use_sg = 0; 604 ipr_cmd->dma_use_sg = 0;
605 } 605 }
606 606
607 /** 607 /**
608 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block 608 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
609 * @ipr_cmd: ipr command struct 609 * @ipr_cmd: ipr command struct
610 * 610 *
611 * Return value: 611 * Return value:
612 * none 612 * none
613 **/ 613 **/
614 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd) 614 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
615 { 615 {
616 ipr_reinit_ipr_cmnd(ipr_cmd); 616 ipr_reinit_ipr_cmnd(ipr_cmd);
617 ipr_cmd->u.scratch = 0; 617 ipr_cmd->u.scratch = 0;
618 ipr_cmd->sibling = NULL; 618 ipr_cmd->sibling = NULL;
619 init_timer(&ipr_cmd->timer); 619 init_timer(&ipr_cmd->timer);
620 } 620 }
621 621
622 /** 622 /**
623 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block 623 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
624 * @ioa_cfg: ioa config struct 624 * @ioa_cfg: ioa config struct
625 * 625 *
626 * Return value: 626 * Return value:
627 * pointer to ipr command struct 627 * pointer to ipr command struct
628 **/ 628 **/
629 static 629 static
630 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 630 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
631 { 631 {
632 struct ipr_cmnd *ipr_cmd; 632 struct ipr_cmnd *ipr_cmd;
633 633
634 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue); 634 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
635 list_del(&ipr_cmd->queue); 635 list_del(&ipr_cmd->queue);
636 ipr_init_ipr_cmnd(ipr_cmd); 636 ipr_init_ipr_cmnd(ipr_cmd);
637 637
638 return ipr_cmd; 638 return ipr_cmd;
639 } 639 }
640 640
641 /** 641 /**
642 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 642 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
643 * @ioa_cfg: ioa config struct 643 * @ioa_cfg: ioa config struct
644 * @clr_ints: interrupts to clear 644 * @clr_ints: interrupts to clear
645 * 645 *
646 * This function masks all interrupts on the adapter, then clears the 646 * This function masks all interrupts on the adapter, then clears the
647 * interrupts specified in the mask 647 * interrupts specified in the mask
648 * 648 *
649 * Return value: 649 * Return value:
650 * none 650 * none
651 **/ 651 **/
652 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, 652 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
653 u32 clr_ints) 653 u32 clr_ints)
654 { 654 {
655 volatile u32 int_reg; 655 volatile u32 int_reg;
656 656
657 /* Stop new interrupts */ 657 /* Stop new interrupts */
658 ioa_cfg->allow_interrupts = 0; 658 ioa_cfg->allow_interrupts = 0;
659 659
660 /* Set interrupt mask to stop all new interrupts */ 660 /* Set interrupt mask to stop all new interrupts */
661 if (ioa_cfg->sis64) 661 if (ioa_cfg->sis64)
662 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); 662 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
663 else 663 else
664 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); 664 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
665 665
666 /* Clear any pending interrupts */ 666 /* Clear any pending interrupts */
667 if (ioa_cfg->sis64) 667 if (ioa_cfg->sis64)
668 writel(~0, ioa_cfg->regs.clr_interrupt_reg); 668 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
669 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); 669 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
670 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 670 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
671 } 671 }
672 672
673 /** 673 /**
674 * ipr_save_pcix_cmd_reg - Save PCI-X command register 674 * ipr_save_pcix_cmd_reg - Save PCI-X command register
675 * @ioa_cfg: ioa config struct 675 * @ioa_cfg: ioa config struct
676 * 676 *
677 * Return value: 677 * Return value:
678 * 0 on success / -EIO on failure 678 * 0 on success / -EIO on failure
679 **/ 679 **/
680 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) 680 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
681 { 681 {
682 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 682 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
683 683
684 if (pcix_cmd_reg == 0) 684 if (pcix_cmd_reg == 0)
685 return 0; 685 return 0;
686 686
687 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 687 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
688 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 688 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
689 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); 689 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
690 return -EIO; 690 return -EIO;
691 } 691 }
692 692
693 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; 693 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
694 return 0; 694 return 0;
695 } 695 }
696 696
697 /** 697 /**
698 * ipr_set_pcix_cmd_reg - Setup PCI-X command register 698 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
699 * @ioa_cfg: ioa config struct 699 * @ioa_cfg: ioa config struct
700 * 700 *
701 * Return value: 701 * Return value:
702 * 0 on success / -EIO on failure 702 * 0 on success / -EIO on failure
703 **/ 703 **/
704 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) 704 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
705 { 705 {
706 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 706 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
707 707
708 if (pcix_cmd_reg) { 708 if (pcix_cmd_reg) {
709 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 709 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
710 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 710 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
711 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); 711 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
712 return -EIO; 712 return -EIO;
713 } 713 }
714 } 714 }
715 715
716 return 0; 716 return 0;
717 } 717 }
718 718
719 /** 719 /**
720 * ipr_sata_eh_done - done function for aborted SATA commands 720 * ipr_sata_eh_done - done function for aborted SATA commands
721 * @ipr_cmd: ipr command struct 721 * @ipr_cmd: ipr command struct
722 * 722 *
723 * This function is invoked for ops generated to SATA 723 * This function is invoked for ops generated to SATA
724 * devices which are being aborted. 724 * devices which are being aborted.
725 * 725 *
726 * Return value: 726 * Return value:
727 * none 727 * none
728 **/ 728 **/
729 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) 729 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
730 { 730 {
731 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 731 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
732 struct ata_queued_cmd *qc = ipr_cmd->qc; 732 struct ata_queued_cmd *qc = ipr_cmd->qc;
733 struct ipr_sata_port *sata_port = qc->ap->private_data; 733 struct ipr_sata_port *sata_port = qc->ap->private_data;
734 734
735 qc->err_mask |= AC_ERR_OTHER; 735 qc->err_mask |= AC_ERR_OTHER;
736 sata_port->ioasa.status |= ATA_BUSY; 736 sata_port->ioasa.status |= ATA_BUSY;
737 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 737 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
738 ata_qc_complete(qc); 738 ata_qc_complete(qc);
739 } 739 }
740 740
741 /** 741 /**
742 * ipr_scsi_eh_done - mid-layer done function for aborted ops 742 * ipr_scsi_eh_done - mid-layer done function for aborted ops
743 * @ipr_cmd: ipr command struct 743 * @ipr_cmd: ipr command struct
744 * 744 *
745 * This function is invoked by the interrupt handler for 745 * This function is invoked by the interrupt handler for
746 * ops generated by the SCSI mid-layer which are being aborted. 746 * ops generated by the SCSI mid-layer which are being aborted.
747 * 747 *
748 * Return value: 748 * Return value:
749 * none 749 * none
750 **/ 750 **/
751 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) 751 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
752 { 752 {
753 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 753 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
754 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 754 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
755 755
756 scsi_cmd->result |= (DID_ERROR << 16); 756 scsi_cmd->result |= (DID_ERROR << 16);
757 757
758 scsi_dma_unmap(ipr_cmd->scsi_cmd); 758 scsi_dma_unmap(ipr_cmd->scsi_cmd);
759 scsi_cmd->scsi_done(scsi_cmd); 759 scsi_cmd->scsi_done(scsi_cmd);
760 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 760 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
761 } 761 }
762 762
763 /** 763 /**
764 * ipr_fail_all_ops - Fails all outstanding ops. 764 * ipr_fail_all_ops - Fails all outstanding ops.
765 * @ioa_cfg: ioa config struct 765 * @ioa_cfg: ioa config struct
766 * 766 *
767 * This function fails all outstanding ops. 767 * This function fails all outstanding ops.
768 * 768 *
769 * Return value: 769 * Return value:
770 * none 770 * none
771 **/ 771 **/
772 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) 772 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
773 { 773 {
774 struct ipr_cmnd *ipr_cmd, *temp; 774 struct ipr_cmnd *ipr_cmd, *temp;
775 775
776 ENTER; 776 ENTER;
777 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { 777 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
778 list_del(&ipr_cmd->queue); 778 list_del(&ipr_cmd->queue);
779 779
780 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); 780 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
781 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID); 781 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
782 782
783 if (ipr_cmd->scsi_cmd) 783 if (ipr_cmd->scsi_cmd)
784 ipr_cmd->done = ipr_scsi_eh_done; 784 ipr_cmd->done = ipr_scsi_eh_done;
785 else if (ipr_cmd->qc) 785 else if (ipr_cmd->qc)
786 ipr_cmd->done = ipr_sata_eh_done; 786 ipr_cmd->done = ipr_sata_eh_done;
787 787
788 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); 788 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
789 del_timer(&ipr_cmd->timer); 789 del_timer(&ipr_cmd->timer);
790 ipr_cmd->done(ipr_cmd); 790 ipr_cmd->done(ipr_cmd);
791 } 791 }
792 792
793 LEAVE; 793 LEAVE;
794 } 794 }
795 795
796 /** 796 /**
797 * ipr_send_command - Send driver initiated requests. 797 * ipr_send_command - Send driver initiated requests.
798 * @ipr_cmd: ipr command struct 798 * @ipr_cmd: ipr command struct
799 * 799 *
800 * This function sends a command to the adapter using the correct write call. 800 * This function sends a command to the adapter using the correct write call.
801 * In the case of sis64, calculate the ioarcb size required. Then or in the 801 * In the case of sis64, calculate the ioarcb size required. Then or in the
802 * appropriate bits. 802 * appropriate bits.
803 * 803 *
804 * Return value: 804 * Return value:
805 * none 805 * none
806 **/ 806 **/
807 static void ipr_send_command(struct ipr_cmnd *ipr_cmd) 807 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
808 { 808 {
809 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 809 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
810 dma_addr_t send_dma_addr = ipr_cmd->dma_addr; 810 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
811 811
812 if (ioa_cfg->sis64) { 812 if (ioa_cfg->sis64) {
813 /* The default size is 256 bytes */ 813 /* The default size is 256 bytes */
814 send_dma_addr |= 0x1; 814 send_dma_addr |= 0x1;
815 815
816 /* If the number of ioadls * size of ioadl > 128 bytes, 816 /* If the number of ioadls * size of ioadl > 128 bytes,
817 then use a 512 byte ioarcb */ 817 then use a 512 byte ioarcb */
818 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) 818 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
819 send_dma_addr |= 0x4; 819 send_dma_addr |= 0x4;
820 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 820 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
821 } else 821 } else
822 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 822 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
823 } 823 }
824 824
825 /** 825 /**
826 * ipr_do_req - Send driver initiated requests. 826 * ipr_do_req - Send driver initiated requests.
827 * @ipr_cmd: ipr command struct 827 * @ipr_cmd: ipr command struct
828 * @done: done function 828 * @done: done function
829 * @timeout_func: timeout function 829 * @timeout_func: timeout function
830 * @timeout: timeout value 830 * @timeout: timeout value
831 * 831 *
832 * This function sends the specified command to the adapter with the 832 * This function sends the specified command to the adapter with the
833 * timeout given. The done function is invoked on command completion. 833 * timeout given. The done function is invoked on command completion.
834 * 834 *
835 * Return value: 835 * Return value:
836 * none 836 * none
837 **/ 837 **/
838 static void ipr_do_req(struct ipr_cmnd *ipr_cmd, 838 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
839 void (*done) (struct ipr_cmnd *), 839 void (*done) (struct ipr_cmnd *),
840 void (*timeout_func) (struct ipr_cmnd *), u32 timeout) 840 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
841 { 841 {
842 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 842 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
843 843
844 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 844 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
845 845
846 ipr_cmd->done = done; 846 ipr_cmd->done = done;
847 847
848 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 848 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
849 ipr_cmd->timer.expires = jiffies + timeout; 849 ipr_cmd->timer.expires = jiffies + timeout;
850 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func; 850 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
851 851
852 add_timer(&ipr_cmd->timer); 852 add_timer(&ipr_cmd->timer);
853 853
854 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); 854 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
855 855
856 ipr_send_command(ipr_cmd); 856 ipr_send_command(ipr_cmd);
857 } 857 }
858 858
859 /** 859 /**
860 * ipr_internal_cmd_done - Op done function for an internally generated op. 860 * ipr_internal_cmd_done - Op done function for an internally generated op.
861 * @ipr_cmd: ipr command struct 861 * @ipr_cmd: ipr command struct
862 * 862 *
863 * This function is the op done function for an internally generated, 863 * This function is the op done function for an internally generated,
864 * blocking op. It simply wakes the sleeping thread. 864 * blocking op. It simply wakes the sleeping thread.
865 * 865 *
866 * Return value: 866 * Return value:
867 * none 867 * none
868 **/ 868 **/
869 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) 869 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
870 { 870 {
871 if (ipr_cmd->sibling) 871 if (ipr_cmd->sibling)
872 ipr_cmd->sibling = NULL; 872 ipr_cmd->sibling = NULL;
873 else 873 else
874 complete(&ipr_cmd->completion); 874 complete(&ipr_cmd->completion);
875 } 875 }
876 876
877 /** 877 /**
878 * ipr_init_ioadl - initialize the ioadl for the correct SIS type 878 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
879 * @ipr_cmd: ipr command struct 879 * @ipr_cmd: ipr command struct
880 * @dma_addr: dma address 880 * @dma_addr: dma address
881 * @len: transfer length 881 * @len: transfer length
882 * @flags: ioadl flag value 882 * @flags: ioadl flag value
883 * 883 *
884 * This function initializes an ioadl in the case where there is only a single 884 * This function initializes an ioadl in the case where there is only a single
885 * descriptor. 885 * descriptor.
886 * 886 *
887 * Return value: 887 * Return value:
888 * nothing 888 * nothing
889 **/ 889 **/
890 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, 890 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
891 u32 len, int flags) 891 u32 len, int flags)
892 { 892 {
893 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 893 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
894 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 894 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
895 895
896 ipr_cmd->dma_use_sg = 1; 896 ipr_cmd->dma_use_sg = 1;
897 897
898 if (ipr_cmd->ioa_cfg->sis64) { 898 if (ipr_cmd->ioa_cfg->sis64) {
899 ioadl64->flags = cpu_to_be32(flags); 899 ioadl64->flags = cpu_to_be32(flags);
900 ioadl64->data_len = cpu_to_be32(len); 900 ioadl64->data_len = cpu_to_be32(len);
901 ioadl64->address = cpu_to_be64(dma_addr); 901 ioadl64->address = cpu_to_be64(dma_addr);
902 902
903 ipr_cmd->ioarcb.ioadl_len = 903 ipr_cmd->ioarcb.ioadl_len =
904 cpu_to_be32(sizeof(struct ipr_ioadl64_desc)); 904 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
905 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 905 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
906 } else { 906 } else {
907 ioadl->flags_and_data_len = cpu_to_be32(flags | len); 907 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
908 ioadl->address = cpu_to_be32(dma_addr); 908 ioadl->address = cpu_to_be32(dma_addr);
909 909
910 if (flags == IPR_IOADL_FLAGS_READ_LAST) { 910 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
911 ipr_cmd->ioarcb.read_ioadl_len = 911 ipr_cmd->ioarcb.read_ioadl_len =
912 cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 912 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
913 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); 913 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
914 } else { 914 } else {
915 ipr_cmd->ioarcb.ioadl_len = 915 ipr_cmd->ioarcb.ioadl_len =
916 cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 916 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
917 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 917 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
918 } 918 }
919 } 919 }
920 } 920 }
921 921
922 /** 922 /**
923 * ipr_send_blocking_cmd - Send command and sleep on its completion. 923 * ipr_send_blocking_cmd - Send command and sleep on its completion.
924 * @ipr_cmd: ipr command struct 924 * @ipr_cmd: ipr command struct
925 * @timeout_func: function to invoke if command times out 925 * @timeout_func: function to invoke if command times out
926 * @timeout: timeout 926 * @timeout: timeout
927 * 927 *
928 * Return value: 928 * Return value:
929 * none 929 * none
930 **/ 930 **/
931 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, 931 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
932 void (*timeout_func) (struct ipr_cmnd *ipr_cmd), 932 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
933 u32 timeout) 933 u32 timeout)
934 { 934 {
935 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 935 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
936 936
937 init_completion(&ipr_cmd->completion); 937 init_completion(&ipr_cmd->completion);
938 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout); 938 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
939 939
940 spin_unlock_irq(ioa_cfg->host->host_lock); 940 spin_unlock_irq(ioa_cfg->host->host_lock);
941 wait_for_completion(&ipr_cmd->completion); 941 wait_for_completion(&ipr_cmd->completion);
942 spin_lock_irq(ioa_cfg->host->host_lock); 942 spin_lock_irq(ioa_cfg->host->host_lock);
943 } 943 }
944 944
945 /** 945 /**
946 * ipr_send_hcam - Send an HCAM to the adapter. 946 * ipr_send_hcam - Send an HCAM to the adapter.
947 * @ioa_cfg: ioa config struct 947 * @ioa_cfg: ioa config struct
948 * @type: HCAM type 948 * @type: HCAM type
949 * @hostrcb: hostrcb struct 949 * @hostrcb: hostrcb struct
950 * 950 *
951 * This function will send a Host Controlled Async command to the adapter. 951 * This function will send a Host Controlled Async command to the adapter.
952 * If HCAMs are currently not allowed to be issued to the adapter, it will 952 * If HCAMs are currently not allowed to be issued to the adapter, it will
953 * place the hostrcb on the free queue. 953 * place the hostrcb on the free queue.
954 * 954 *
955 * Return value: 955 * Return value:
956 * none 956 * none
957 **/ 957 **/
958 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, 958 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
959 struct ipr_hostrcb *hostrcb) 959 struct ipr_hostrcb *hostrcb)
960 { 960 {
961 struct ipr_cmnd *ipr_cmd; 961 struct ipr_cmnd *ipr_cmd;
962 struct ipr_ioarcb *ioarcb; 962 struct ipr_ioarcb *ioarcb;
963 963
964 if (ioa_cfg->allow_cmds) { 964 if (ioa_cfg->allow_cmds) {
965 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 965 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
966 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 966 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
967 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); 967 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
968 968
969 ipr_cmd->u.hostrcb = hostrcb; 969 ipr_cmd->u.hostrcb = hostrcb;
970 ioarcb = &ipr_cmd->ioarcb; 970 ioarcb = &ipr_cmd->ioarcb;
971 971
972 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 972 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
973 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; 973 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
974 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; 974 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
975 ioarcb->cmd_pkt.cdb[1] = type; 975 ioarcb->cmd_pkt.cdb[1] = type;
976 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; 976 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
977 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; 977 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
978 978
979 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, 979 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
980 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); 980 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
981 981
982 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) 982 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
983 ipr_cmd->done = ipr_process_ccn; 983 ipr_cmd->done = ipr_process_ccn;
984 else 984 else
985 ipr_cmd->done = ipr_process_error; 985 ipr_cmd->done = ipr_process_error;
986 986
987 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); 987 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
988 988
989 ipr_send_command(ipr_cmd); 989 ipr_send_command(ipr_cmd);
990 } else { 990 } else {
991 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 991 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
992 } 992 }
993 } 993 }
994 994
995 /** 995 /**
996 * ipr_update_ata_class - Update the ata class in the resource entry 996 * ipr_update_ata_class - Update the ata class in the resource entry
997 * @res: resource entry struct 997 * @res: resource entry struct
998 * @proto: cfgte device bus protocol value 998 * @proto: cfgte device bus protocol value
999 * 999 *
1000 * Return value: 1000 * Return value:
1001 * none 1001 * none
1002 **/ 1002 **/
1003 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto) 1003 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1004 { 1004 {
1005 switch(proto) { 1005 switch(proto) {
1006 case IPR_PROTO_SATA: 1006 case IPR_PROTO_SATA:
1007 case IPR_PROTO_SAS_STP: 1007 case IPR_PROTO_SAS_STP:
1008 res->ata_class = ATA_DEV_ATA; 1008 res->ata_class = ATA_DEV_ATA;
1009 break; 1009 break;
1010 case IPR_PROTO_SATA_ATAPI: 1010 case IPR_PROTO_SATA_ATAPI:
1011 case IPR_PROTO_SAS_STP_ATAPI: 1011 case IPR_PROTO_SAS_STP_ATAPI:
1012 res->ata_class = ATA_DEV_ATAPI; 1012 res->ata_class = ATA_DEV_ATAPI;
1013 break; 1013 break;
1014 default: 1014 default:
1015 res->ata_class = ATA_DEV_UNKNOWN; 1015 res->ata_class = ATA_DEV_UNKNOWN;
1016 break; 1016 break;
1017 }; 1017 };
1018 } 1018 }
1019 1019
1020 /** 1020 /**
1021 * ipr_init_res_entry - Initialize a resource entry struct. 1021 * ipr_init_res_entry - Initialize a resource entry struct.
1022 * @res: resource entry struct 1022 * @res: resource entry struct
1023 * @cfgtew: config table entry wrapper struct 1023 * @cfgtew: config table entry wrapper struct
1024 * 1024 *
1025 * Return value: 1025 * Return value:
1026 * none 1026 * none
1027 **/ 1027 **/
1028 static void ipr_init_res_entry(struct ipr_resource_entry *res, 1028 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1029 struct ipr_config_table_entry_wrapper *cfgtew) 1029 struct ipr_config_table_entry_wrapper *cfgtew)
1030 { 1030 {
1031 int found = 0; 1031 int found = 0;
1032 unsigned int proto; 1032 unsigned int proto;
1033 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1033 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1034 struct ipr_resource_entry *gscsi_res = NULL; 1034 struct ipr_resource_entry *gscsi_res = NULL;
1035 1035
1036 res->needs_sync_complete = 0; 1036 res->needs_sync_complete = 0;
1037 res->in_erp = 0; 1037 res->in_erp = 0;
1038 res->add_to_ml = 0; 1038 res->add_to_ml = 0;
1039 res->del_from_ml = 0; 1039 res->del_from_ml = 0;
1040 res->resetting_device = 0; 1040 res->resetting_device = 0;
1041 res->sdev = NULL; 1041 res->sdev = NULL;
1042 res->sata_port = NULL; 1042 res->sata_port = NULL;
1043 1043
1044 if (ioa_cfg->sis64) { 1044 if (ioa_cfg->sis64) {
1045 proto = cfgtew->u.cfgte64->proto; 1045 proto = cfgtew->u.cfgte64->proto;
1046 res->res_flags = cfgtew->u.cfgte64->res_flags; 1046 res->res_flags = cfgtew->u.cfgte64->res_flags;
1047 res->qmodel = IPR_QUEUEING_MODEL64(res); 1047 res->qmodel = IPR_QUEUEING_MODEL64(res);
1048 res->type = cfgtew->u.cfgte64->res_type; 1048 res->type = cfgtew->u.cfgte64->res_type;
1049 1049
1050 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1050 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1051 sizeof(res->res_path)); 1051 sizeof(res->res_path));
1052 1052
1053 res->bus = 0; 1053 res->bus = 0;
1054 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1054 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1055 sizeof(res->dev_lun.scsi_lun)); 1055 sizeof(res->dev_lun.scsi_lun));
1056 res->lun = scsilun_to_int(&res->dev_lun); 1056 res->lun = scsilun_to_int(&res->dev_lun);
1057 1057
1058 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1058 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1059 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { 1059 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1060 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { 1060 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1061 found = 1; 1061 found = 1;
1062 res->target = gscsi_res->target; 1062 res->target = gscsi_res->target;
1063 break; 1063 break;
1064 } 1064 }
1065 } 1065 }
1066 if (!found) { 1066 if (!found) {
1067 res->target = find_first_zero_bit(ioa_cfg->target_ids, 1067 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1068 ioa_cfg->max_devs_supported); 1068 ioa_cfg->max_devs_supported);
1069 set_bit(res->target, ioa_cfg->target_ids); 1069 set_bit(res->target, ioa_cfg->target_ids);
1070 } 1070 }
1071 } else if (res->type == IPR_RES_TYPE_IOAFP) { 1071 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1072 res->bus = IPR_IOAFP_VIRTUAL_BUS; 1072 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1073 res->target = 0; 1073 res->target = 0;
1074 } else if (res->type == IPR_RES_TYPE_ARRAY) { 1074 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1075 res->bus = IPR_ARRAY_VIRTUAL_BUS; 1075 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1076 res->target = find_first_zero_bit(ioa_cfg->array_ids, 1076 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1077 ioa_cfg->max_devs_supported); 1077 ioa_cfg->max_devs_supported);
1078 set_bit(res->target, ioa_cfg->array_ids); 1078 set_bit(res->target, ioa_cfg->array_ids);
1079 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { 1079 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1080 res->bus = IPR_VSET_VIRTUAL_BUS; 1080 res->bus = IPR_VSET_VIRTUAL_BUS;
1081 res->target = find_first_zero_bit(ioa_cfg->vset_ids, 1081 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1082 ioa_cfg->max_devs_supported); 1082 ioa_cfg->max_devs_supported);
1083 set_bit(res->target, ioa_cfg->vset_ids); 1083 set_bit(res->target, ioa_cfg->vset_ids);
1084 } else { 1084 } else {
1085 res->target = find_first_zero_bit(ioa_cfg->target_ids, 1085 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1086 ioa_cfg->max_devs_supported); 1086 ioa_cfg->max_devs_supported);
1087 set_bit(res->target, ioa_cfg->target_ids); 1087 set_bit(res->target, ioa_cfg->target_ids);
1088 } 1088 }
1089 } else { 1089 } else {
1090 proto = cfgtew->u.cfgte->proto; 1090 proto = cfgtew->u.cfgte->proto;
1091 res->qmodel = IPR_QUEUEING_MODEL(res); 1091 res->qmodel = IPR_QUEUEING_MODEL(res);
1092 res->flags = cfgtew->u.cfgte->flags; 1092 res->flags = cfgtew->u.cfgte->flags;
1093 if (res->flags & IPR_IS_IOA_RESOURCE) 1093 if (res->flags & IPR_IS_IOA_RESOURCE)
1094 res->type = IPR_RES_TYPE_IOAFP; 1094 res->type = IPR_RES_TYPE_IOAFP;
1095 else 1095 else
1096 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1096 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1097 1097
1098 res->bus = cfgtew->u.cfgte->res_addr.bus; 1098 res->bus = cfgtew->u.cfgte->res_addr.bus;
1099 res->target = cfgtew->u.cfgte->res_addr.target; 1099 res->target = cfgtew->u.cfgte->res_addr.target;
1100 res->lun = cfgtew->u.cfgte->res_addr.lun; 1100 res->lun = cfgtew->u.cfgte->res_addr.lun;
1101 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); 1101 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1102 } 1102 }
1103 1103
1104 ipr_update_ata_class(res, proto); 1104 ipr_update_ata_class(res, proto);
1105 } 1105 }
1106 1106
1107 /** 1107 /**
1108 * ipr_is_same_device - Determine if two devices are the same. 1108 * ipr_is_same_device - Determine if two devices are the same.
1109 * @res: resource entry struct 1109 * @res: resource entry struct
1110 * @cfgtew: config table entry wrapper struct 1110 * @cfgtew: config table entry wrapper struct
1111 * 1111 *
1112 * Return value: 1112 * Return value:
1113 * 1 if the devices are the same / 0 otherwise 1113 * 1 if the devices are the same / 0 otherwise
1114 **/ 1114 **/
1115 static int ipr_is_same_device(struct ipr_resource_entry *res, 1115 static int ipr_is_same_device(struct ipr_resource_entry *res,
1116 struct ipr_config_table_entry_wrapper *cfgtew) 1116 struct ipr_config_table_entry_wrapper *cfgtew)
1117 { 1117 {
1118 if (res->ioa_cfg->sis64) { 1118 if (res->ioa_cfg->sis64) {
1119 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, 1119 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1120 sizeof(cfgtew->u.cfgte64->dev_id)) && 1120 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1121 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1121 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1122 sizeof(cfgtew->u.cfgte64->lun))) { 1122 sizeof(cfgtew->u.cfgte64->lun))) {
1123 return 1; 1123 return 1;
1124 } 1124 }
1125 } else { 1125 } else {
1126 if (res->bus == cfgtew->u.cfgte->res_addr.bus && 1126 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1127 res->target == cfgtew->u.cfgte->res_addr.target && 1127 res->target == cfgtew->u.cfgte->res_addr.target &&
1128 res->lun == cfgtew->u.cfgte->res_addr.lun) 1128 res->lun == cfgtew->u.cfgte->res_addr.lun)
1129 return 1; 1129 return 1;
1130 } 1130 }
1131 1131
1132 return 0; 1132 return 0;
1133 } 1133 }
1134 1134
1135 /** 1135 /**
1136 * ipr_format_res_path - Format the resource path for printing. 1136 * ipr_format_res_path - Format the resource path for printing.
1137 * @res_path: resource path 1137 * @res_path: resource path
1138 * @buf: buffer 1138 * @buf: buffer
1139 * 1139 *
1140 * Return value: 1140 * Return value:
1141 * pointer to buffer 1141 * pointer to buffer
1142 **/ 1142 **/
1143 static char *ipr_format_res_path(u8 *res_path, char *buffer, int len) 1143 static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1144 { 1144 {
1145 int i; 1145 int i;
1146 char *p = buffer; 1146 char *p = buffer;
1147 1147
1148 *p = '\0'; 1148 *p = '\0';
1149 p += snprintf(p, buffer + len - p, "%02X", res_path[0]); 1149 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1150 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++) 1150 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1151 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]); 1151 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1152 1152
1153 return buffer; 1153 return buffer;
1154 } 1154 }
1155 1155
1156 /** 1156 /**
1157 * ipr_update_res_entry - Update the resource entry. 1157 * ipr_update_res_entry - Update the resource entry.
1158 * @res: resource entry struct 1158 * @res: resource entry struct
1159 * @cfgtew: config table entry wrapper struct 1159 * @cfgtew: config table entry wrapper struct
1160 * 1160 *
1161 * Return value: 1161 * Return value:
1162 * none 1162 * none
1163 **/ 1163 **/
1164 static void ipr_update_res_entry(struct ipr_resource_entry *res, 1164 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1165 struct ipr_config_table_entry_wrapper *cfgtew) 1165 struct ipr_config_table_entry_wrapper *cfgtew)
1166 { 1166 {
1167 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1167 char buffer[IPR_MAX_RES_PATH_LENGTH];
1168 unsigned int proto; 1168 unsigned int proto;
1169 int new_path = 0; 1169 int new_path = 0;
1170 1170
1171 if (res->ioa_cfg->sis64) { 1171 if (res->ioa_cfg->sis64) {
1172 res->flags = cfgtew->u.cfgte64->flags; 1172 res->flags = cfgtew->u.cfgte64->flags;
1173 res->res_flags = cfgtew->u.cfgte64->res_flags; 1173 res->res_flags = cfgtew->u.cfgte64->res_flags;
1174 res->type = cfgtew->u.cfgte64->res_type; 1174 res->type = cfgtew->u.cfgte64->res_type;
1175 1175
1176 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, 1176 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1177 sizeof(struct ipr_std_inq_data)); 1177 sizeof(struct ipr_std_inq_data));
1178 1178
1179 res->qmodel = IPR_QUEUEING_MODEL64(res); 1179 res->qmodel = IPR_QUEUEING_MODEL64(res);
1180 proto = cfgtew->u.cfgte64->proto; 1180 proto = cfgtew->u.cfgte64->proto;
1181 res->res_handle = cfgtew->u.cfgte64->res_handle; 1181 res->res_handle = cfgtew->u.cfgte64->res_handle;
1182 res->dev_id = cfgtew->u.cfgte64->dev_id; 1182 res->dev_id = cfgtew->u.cfgte64->dev_id;
1183 1183
1184 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1184 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1185 sizeof(res->dev_lun.scsi_lun)); 1185 sizeof(res->dev_lun.scsi_lun));
1186 1186
1187 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, 1187 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1188 sizeof(res->res_path))) { 1188 sizeof(res->res_path))) {
1189 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1189 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1190 sizeof(res->res_path)); 1190 sizeof(res->res_path));
1191 new_path = 1; 1191 new_path = 1;
1192 } 1192 }
1193 1193
1194 if (res->sdev && new_path) 1194 if (res->sdev && new_path)
1195 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", 1195 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1196 ipr_format_res_path(res->res_path, buffer, 1196 ipr_format_res_path(res->res_path, buffer,
1197 sizeof(buffer))); 1197 sizeof(buffer)));
1198 } else { 1198 } else {
1199 res->flags = cfgtew->u.cfgte->flags; 1199 res->flags = cfgtew->u.cfgte->flags;
1200 if (res->flags & IPR_IS_IOA_RESOURCE) 1200 if (res->flags & IPR_IS_IOA_RESOURCE)
1201 res->type = IPR_RES_TYPE_IOAFP; 1201 res->type = IPR_RES_TYPE_IOAFP;
1202 else 1202 else
1203 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1203 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1204 1204
1205 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, 1205 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1206 sizeof(struct ipr_std_inq_data)); 1206 sizeof(struct ipr_std_inq_data));
1207 1207
1208 res->qmodel = IPR_QUEUEING_MODEL(res); 1208 res->qmodel = IPR_QUEUEING_MODEL(res);
1209 proto = cfgtew->u.cfgte->proto; 1209 proto = cfgtew->u.cfgte->proto;
1210 res->res_handle = cfgtew->u.cfgte->res_handle; 1210 res->res_handle = cfgtew->u.cfgte->res_handle;
1211 } 1211 }
1212 1212
1213 ipr_update_ata_class(res, proto); 1213 ipr_update_ata_class(res, proto);
1214 } 1214 }
1215 1215
1216 /** 1216 /**
1217 * ipr_clear_res_target - Clear the bit in the bit map representing the target 1217 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1218 * for the resource. 1218 * for the resource.
1219 * @res: resource entry struct 1219 * @res: resource entry struct
1220 * @cfgtew: config table entry wrapper struct 1220 * @cfgtew: config table entry wrapper struct
1221 * 1221 *
1222 * Return value: 1222 * Return value:
1223 * none 1223 * none
1224 **/ 1224 **/
1225 static void ipr_clear_res_target(struct ipr_resource_entry *res) 1225 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1226 { 1226 {
1227 struct ipr_resource_entry *gscsi_res = NULL; 1227 struct ipr_resource_entry *gscsi_res = NULL;
1228 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1228 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1229 1229
1230 if (!ioa_cfg->sis64) 1230 if (!ioa_cfg->sis64)
1231 return; 1231 return;
1232 1232
1233 if (res->bus == IPR_ARRAY_VIRTUAL_BUS) 1233 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1234 clear_bit(res->target, ioa_cfg->array_ids); 1234 clear_bit(res->target, ioa_cfg->array_ids);
1235 else if (res->bus == IPR_VSET_VIRTUAL_BUS) 1235 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1236 clear_bit(res->target, ioa_cfg->vset_ids); 1236 clear_bit(res->target, ioa_cfg->vset_ids);
1237 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1237 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1238 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) 1238 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1239 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) 1239 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1240 return; 1240 return;
1241 clear_bit(res->target, ioa_cfg->target_ids); 1241 clear_bit(res->target, ioa_cfg->target_ids);
1242 1242
1243 } else if (res->bus == 0) 1243 } else if (res->bus == 0)
1244 clear_bit(res->target, ioa_cfg->target_ids); 1244 clear_bit(res->target, ioa_cfg->target_ids);
1245 } 1245 }
1246 1246
1247 /** 1247 /**
1248 * ipr_handle_config_change - Handle a config change from the adapter 1248 * ipr_handle_config_change - Handle a config change from the adapter
1249 * @ioa_cfg: ioa config struct 1249 * @ioa_cfg: ioa config struct
1250 * @hostrcb: hostrcb 1250 * @hostrcb: hostrcb
1251 * 1251 *
1252 * Return value: 1252 * Return value:
1253 * none 1253 * none
1254 **/ 1254 **/
1255 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, 1255 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1256 struct ipr_hostrcb *hostrcb) 1256 struct ipr_hostrcb *hostrcb)
1257 { 1257 {
1258 struct ipr_resource_entry *res = NULL; 1258 struct ipr_resource_entry *res = NULL;
1259 struct ipr_config_table_entry_wrapper cfgtew; 1259 struct ipr_config_table_entry_wrapper cfgtew;
1260 __be32 cc_res_handle; 1260 __be32 cc_res_handle;
1261 1261
1262 u32 is_ndn = 1; 1262 u32 is_ndn = 1;
1263 1263
1264 if (ioa_cfg->sis64) { 1264 if (ioa_cfg->sis64) {
1265 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; 1265 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1266 cc_res_handle = cfgtew.u.cfgte64->res_handle; 1266 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1267 } else { 1267 } else {
1268 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; 1268 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1269 cc_res_handle = cfgtew.u.cfgte->res_handle; 1269 cc_res_handle = cfgtew.u.cfgte->res_handle;
1270 } 1270 }
1271 1271
1272 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 1272 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1273 if (res->res_handle == cc_res_handle) { 1273 if (res->res_handle == cc_res_handle) {
1274 is_ndn = 0; 1274 is_ndn = 0;
1275 break; 1275 break;
1276 } 1276 }
1277 } 1277 }
1278 1278
1279 if (is_ndn) { 1279 if (is_ndn) {
1280 if (list_empty(&ioa_cfg->free_res_q)) { 1280 if (list_empty(&ioa_cfg->free_res_q)) {
1281 ipr_send_hcam(ioa_cfg, 1281 ipr_send_hcam(ioa_cfg,
1282 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, 1282 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1283 hostrcb); 1283 hostrcb);
1284 return; 1284 return;
1285 } 1285 }
1286 1286
1287 res = list_entry(ioa_cfg->free_res_q.next, 1287 res = list_entry(ioa_cfg->free_res_q.next,
1288 struct ipr_resource_entry, queue); 1288 struct ipr_resource_entry, queue);
1289 1289
1290 list_del(&res->queue); 1290 list_del(&res->queue);
1291 ipr_init_res_entry(res, &cfgtew); 1291 ipr_init_res_entry(res, &cfgtew);
1292 list_add_tail(&res->queue, &ioa_cfg->used_res_q); 1292 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1293 } 1293 }
1294 1294
1295 ipr_update_res_entry(res, &cfgtew); 1295 ipr_update_res_entry(res, &cfgtew);
1296 1296
1297 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { 1297 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1298 if (res->sdev) { 1298 if (res->sdev) {
1299 res->del_from_ml = 1; 1299 res->del_from_ml = 1;
1300 res->res_handle = IPR_INVALID_RES_HANDLE; 1300 res->res_handle = IPR_INVALID_RES_HANDLE;
1301 if (ioa_cfg->allow_ml_add_del) 1301 if (ioa_cfg->allow_ml_add_del)
1302 schedule_work(&ioa_cfg->work_q); 1302 schedule_work(&ioa_cfg->work_q);
1303 } else { 1303 } else {
1304 ipr_clear_res_target(res); 1304 ipr_clear_res_target(res);
1305 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1305 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1306 } 1306 }
1307 } else if (!res->sdev || res->del_from_ml) { 1307 } else if (!res->sdev || res->del_from_ml) {
1308 res->add_to_ml = 1; 1308 res->add_to_ml = 1;
1309 if (ioa_cfg->allow_ml_add_del) 1309 if (ioa_cfg->allow_ml_add_del)
1310 schedule_work(&ioa_cfg->work_q); 1310 schedule_work(&ioa_cfg->work_q);
1311 } 1311 }
1312 1312
1313 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1313 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1314 } 1314 }
1315 1315
1316 /** 1316 /**
1317 * ipr_process_ccn - Op done function for a CCN. 1317 * ipr_process_ccn - Op done function for a CCN.
1318 * @ipr_cmd: ipr command struct 1318 * @ipr_cmd: ipr command struct
1319 * 1319 *
1320 * This function is the op done function for a configuration 1320 * This function is the op done function for a configuration
1321 * change notification host controlled async from the adapter. 1321 * change notification host controlled async from the adapter.
1322 * 1322 *
1323 * Return value: 1323 * Return value:
1324 * none 1324 * none
1325 **/ 1325 **/
1326 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) 1326 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1327 { 1327 {
1328 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1328 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1329 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 1329 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1330 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 1330 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1331 1331
1332 list_del(&hostrcb->queue); 1332 list_del(&hostrcb->queue);
1333 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 1333 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1334 1334
1335 if (ioasc) { 1335 if (ioasc) {
1336 if (ioasc != IPR_IOASC_IOA_WAS_RESET) 1336 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1337 dev_err(&ioa_cfg->pdev->dev, 1337 dev_err(&ioa_cfg->pdev->dev,
1338 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 1338 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1339 1339
1340 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1340 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1341 } else { 1341 } else {
1342 ipr_handle_config_change(ioa_cfg, hostrcb); 1342 ipr_handle_config_change(ioa_cfg, hostrcb);
1343 } 1343 }
1344 } 1344 }
1345 1345
1346 /** 1346 /**
1347 * strip_and_pad_whitespace - Strip and pad trailing whitespace. 1347 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1348 * @i: index into buffer 1348 * @i: index into buffer
1349 * @buf: string to modify 1349 * @buf: string to modify
1350 * 1350 *
1351 * This function will strip all trailing whitespace, pad the end 1351 * This function will strip all trailing whitespace, pad the end
1352 * of the string with a single space, and NULL terminate the string. 1352 * of the string with a single space, and NULL terminate the string.
1353 * 1353 *
1354 * Return value: 1354 * Return value:
1355 * new length of string 1355 * new length of string
1356 **/ 1356 **/
1357 static int strip_and_pad_whitespace(int i, char *buf) 1357 static int strip_and_pad_whitespace(int i, char *buf)
1358 { 1358 {
1359 while (i && buf[i] == ' ') 1359 while (i && buf[i] == ' ')
1360 i--; 1360 i--;
1361 buf[i+1] = ' '; 1361 buf[i+1] = ' ';
1362 buf[i+2] = '\0'; 1362 buf[i+2] = '\0';
1363 return i + 2; 1363 return i + 2;
1364 } 1364 }
1365 1365
1366 /** 1366 /**
1367 * ipr_log_vpd_compact - Log the passed extended VPD compactly. 1367 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1368 * @prefix: string to print at start of printk 1368 * @prefix: string to print at start of printk
1369 * @hostrcb: hostrcb pointer 1369 * @hostrcb: hostrcb pointer
1370 * @vpd: vendor/product id/sn struct 1370 * @vpd: vendor/product id/sn struct
1371 * 1371 *
1372 * Return value: 1372 * Return value:
1373 * none 1373 * none
1374 **/ 1374 **/
1375 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, 1375 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1376 struct ipr_vpd *vpd) 1376 struct ipr_vpd *vpd)
1377 { 1377 {
1378 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3]; 1378 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1379 int i = 0; 1379 int i = 0;
1380 1380
1381 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); 1381 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1382 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); 1382 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1383 1383
1384 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); 1384 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1385 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); 1385 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1386 1386
1387 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); 1387 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1388 buffer[IPR_SERIAL_NUM_LEN + i] = '\0'; 1388 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1389 1389
1390 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer); 1390 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1391 } 1391 }
1392 1392
1393 /** 1393 /**
1394 * ipr_log_vpd - Log the passed VPD to the error log. 1394 * ipr_log_vpd - Log the passed VPD to the error log.
1395 * @vpd: vendor/product id/sn struct 1395 * @vpd: vendor/product id/sn struct
1396 * 1396 *
1397 * Return value: 1397 * Return value:
1398 * none 1398 * none
1399 **/ 1399 **/
1400 static void ipr_log_vpd(struct ipr_vpd *vpd) 1400 static void ipr_log_vpd(struct ipr_vpd *vpd)
1401 { 1401 {
1402 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN 1402 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1403 + IPR_SERIAL_NUM_LEN]; 1403 + IPR_SERIAL_NUM_LEN];
1404 1404
1405 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); 1405 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1406 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, 1406 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1407 IPR_PROD_ID_LEN); 1407 IPR_PROD_ID_LEN);
1408 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0'; 1408 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1409 ipr_err("Vendor/Product ID: %s\n", buffer); 1409 ipr_err("Vendor/Product ID: %s\n", buffer);
1410 1410
1411 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); 1411 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1412 buffer[IPR_SERIAL_NUM_LEN] = '\0'; 1412 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1413 ipr_err(" Serial Number: %s\n", buffer); 1413 ipr_err(" Serial Number: %s\n", buffer);
1414 } 1414 }
1415 1415
1416 /** 1416 /**
1417 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly. 1417 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1418 * @prefix: string to print at start of printk 1418 * @prefix: string to print at start of printk
1419 * @hostrcb: hostrcb pointer 1419 * @hostrcb: hostrcb pointer
1420 * @vpd: vendor/product id/sn/wwn struct 1420 * @vpd: vendor/product id/sn/wwn struct
1421 * 1421 *
1422 * Return value: 1422 * Return value:
1423 * none 1423 * none
1424 **/ 1424 **/
1425 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, 1425 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1426 struct ipr_ext_vpd *vpd) 1426 struct ipr_ext_vpd *vpd)
1427 { 1427 {
1428 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); 1428 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1429 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix, 1429 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1430 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); 1430 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1431 } 1431 }
1432 1432
1433 /** 1433 /**
1434 * ipr_log_ext_vpd - Log the passed extended VPD to the error log. 1434 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1435 * @vpd: vendor/product id/sn/wwn struct 1435 * @vpd: vendor/product id/sn/wwn struct
1436 * 1436 *
1437 * Return value: 1437 * Return value:
1438 * none 1438 * none
1439 **/ 1439 **/
1440 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd) 1440 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1441 { 1441 {
1442 ipr_log_vpd(&vpd->vpd); 1442 ipr_log_vpd(&vpd->vpd);
1443 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), 1443 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1444 be32_to_cpu(vpd->wwid[1])); 1444 be32_to_cpu(vpd->wwid[1]));
1445 } 1445 }
1446 1446
1447 /** 1447 /**
1448 * ipr_log_enhanced_cache_error - Log a cache error. 1448 * ipr_log_enhanced_cache_error - Log a cache error.
1449 * @ioa_cfg: ioa config struct 1449 * @ioa_cfg: ioa config struct
1450 * @hostrcb: hostrcb struct 1450 * @hostrcb: hostrcb struct
1451 * 1451 *
1452 * Return value: 1452 * Return value:
1453 * none 1453 * none
1454 **/ 1454 **/
1455 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1455 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1456 struct ipr_hostrcb *hostrcb) 1456 struct ipr_hostrcb *hostrcb)
1457 { 1457 {
1458 struct ipr_hostrcb_type_12_error *error; 1458 struct ipr_hostrcb_type_12_error *error;
1459 1459
1460 if (ioa_cfg->sis64) 1460 if (ioa_cfg->sis64)
1461 error = &hostrcb->hcam.u.error64.u.type_12_error; 1461 error = &hostrcb->hcam.u.error64.u.type_12_error;
1462 else 1462 else
1463 error = &hostrcb->hcam.u.error.u.type_12_error; 1463 error = &hostrcb->hcam.u.error.u.type_12_error;
1464 1464
1465 ipr_err("-----Current Configuration-----\n"); 1465 ipr_err("-----Current Configuration-----\n");
1466 ipr_err("Cache Directory Card Information:\n"); 1466 ipr_err("Cache Directory Card Information:\n");
1467 ipr_log_ext_vpd(&error->ioa_vpd); 1467 ipr_log_ext_vpd(&error->ioa_vpd);
1468 ipr_err("Adapter Card Information:\n"); 1468 ipr_err("Adapter Card Information:\n");
1469 ipr_log_ext_vpd(&error->cfc_vpd); 1469 ipr_log_ext_vpd(&error->cfc_vpd);
1470 1470
1471 ipr_err("-----Expected Configuration-----\n"); 1471 ipr_err("-----Expected Configuration-----\n");
1472 ipr_err("Cache Directory Card Information:\n"); 1472 ipr_err("Cache Directory Card Information:\n");
1473 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); 1473 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1474 ipr_err("Adapter Card Information:\n"); 1474 ipr_err("Adapter Card Information:\n");
1475 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); 1475 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1476 1476
1477 ipr_err("Additional IOA Data: %08X %08X %08X\n", 1477 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1478 be32_to_cpu(error->ioa_data[0]), 1478 be32_to_cpu(error->ioa_data[0]),
1479 be32_to_cpu(error->ioa_data[1]), 1479 be32_to_cpu(error->ioa_data[1]),
1480 be32_to_cpu(error->ioa_data[2])); 1480 be32_to_cpu(error->ioa_data[2]));
1481 } 1481 }
1482 1482
1483 /** 1483 /**
1484 * ipr_log_cache_error - Log a cache error. 1484 * ipr_log_cache_error - Log a cache error.
1485 * @ioa_cfg: ioa config struct 1485 * @ioa_cfg: ioa config struct
1486 * @hostrcb: hostrcb struct 1486 * @hostrcb: hostrcb struct
1487 * 1487 *
1488 * Return value: 1488 * Return value:
1489 * none 1489 * none
1490 **/ 1490 **/
1491 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1491 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1492 struct ipr_hostrcb *hostrcb) 1492 struct ipr_hostrcb *hostrcb)
1493 { 1493 {
1494 struct ipr_hostrcb_type_02_error *error = 1494 struct ipr_hostrcb_type_02_error *error =
1495 &hostrcb->hcam.u.error.u.type_02_error; 1495 &hostrcb->hcam.u.error.u.type_02_error;
1496 1496
1497 ipr_err("-----Current Configuration-----\n"); 1497 ipr_err("-----Current Configuration-----\n");
1498 ipr_err("Cache Directory Card Information:\n"); 1498 ipr_err("Cache Directory Card Information:\n");
1499 ipr_log_vpd(&error->ioa_vpd); 1499 ipr_log_vpd(&error->ioa_vpd);
1500 ipr_err("Adapter Card Information:\n"); 1500 ipr_err("Adapter Card Information:\n");
1501 ipr_log_vpd(&error->cfc_vpd); 1501 ipr_log_vpd(&error->cfc_vpd);
1502 1502
1503 ipr_err("-----Expected Configuration-----\n"); 1503 ipr_err("-----Expected Configuration-----\n");
1504 ipr_err("Cache Directory Card Information:\n"); 1504 ipr_err("Cache Directory Card Information:\n");
1505 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); 1505 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1506 ipr_err("Adapter Card Information:\n"); 1506 ipr_err("Adapter Card Information:\n");
1507 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); 1507 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1508 1508
1509 ipr_err("Additional IOA Data: %08X %08X %08X\n", 1509 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1510 be32_to_cpu(error->ioa_data[0]), 1510 be32_to_cpu(error->ioa_data[0]),
1511 be32_to_cpu(error->ioa_data[1]), 1511 be32_to_cpu(error->ioa_data[1]),
1512 be32_to_cpu(error->ioa_data[2])); 1512 be32_to_cpu(error->ioa_data[2]));
1513 } 1513 }
1514 1514
1515 /** 1515 /**
1516 * ipr_log_enhanced_config_error - Log a configuration error. 1516 * ipr_log_enhanced_config_error - Log a configuration error.
1517 * @ioa_cfg: ioa config struct 1517 * @ioa_cfg: ioa config struct
1518 * @hostrcb: hostrcb struct 1518 * @hostrcb: hostrcb struct
1519 * 1519 *
1520 * Return value: 1520 * Return value:
1521 * none 1521 * none
1522 **/ 1522 **/
1523 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, 1523 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1524 struct ipr_hostrcb *hostrcb) 1524 struct ipr_hostrcb *hostrcb)
1525 { 1525 {
1526 int errors_logged, i; 1526 int errors_logged, i;
1527 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry; 1527 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1528 struct ipr_hostrcb_type_13_error *error; 1528 struct ipr_hostrcb_type_13_error *error;
1529 1529
1530 error = &hostrcb->hcam.u.error.u.type_13_error; 1530 error = &hostrcb->hcam.u.error.u.type_13_error;
1531 errors_logged = be32_to_cpu(error->errors_logged); 1531 errors_logged = be32_to_cpu(error->errors_logged);
1532 1532
1533 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1533 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1534 be32_to_cpu(error->errors_detected), errors_logged); 1534 be32_to_cpu(error->errors_detected), errors_logged);
1535 1535
1536 dev_entry = error->dev; 1536 dev_entry = error->dev;
1537 1537
1538 for (i = 0; i < errors_logged; i++, dev_entry++) { 1538 for (i = 0; i < errors_logged; i++, dev_entry++) {
1539 ipr_err_separator; 1539 ipr_err_separator;
1540 1540
1541 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1541 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1542 ipr_log_ext_vpd(&dev_entry->vpd); 1542 ipr_log_ext_vpd(&dev_entry->vpd);
1543 1543
1544 ipr_err("-----New Device Information-----\n"); 1544 ipr_err("-----New Device Information-----\n");
1545 ipr_log_ext_vpd(&dev_entry->new_vpd); 1545 ipr_log_ext_vpd(&dev_entry->new_vpd);
1546 1546
1547 ipr_err("Cache Directory Card Information:\n"); 1547 ipr_err("Cache Directory Card Information:\n");
1548 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1548 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1549 1549
1550 ipr_err("Adapter Card Information:\n"); 1550 ipr_err("Adapter Card Information:\n");
1551 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1551 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1552 } 1552 }
1553 } 1553 }
1554 1554
1555 /** 1555 /**
1556 * ipr_log_sis64_config_error - Log a device error. 1556 * ipr_log_sis64_config_error - Log a device error.
1557 * @ioa_cfg: ioa config struct 1557 * @ioa_cfg: ioa config struct
1558 * @hostrcb: hostrcb struct 1558 * @hostrcb: hostrcb struct
1559 * 1559 *
1560 * Return value: 1560 * Return value:
1561 * none 1561 * none
1562 **/ 1562 **/
1563 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, 1563 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1564 struct ipr_hostrcb *hostrcb) 1564 struct ipr_hostrcb *hostrcb)
1565 { 1565 {
1566 int errors_logged, i; 1566 int errors_logged, i;
1567 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry; 1567 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1568 struct ipr_hostrcb_type_23_error *error; 1568 struct ipr_hostrcb_type_23_error *error;
1569 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1569 char buffer[IPR_MAX_RES_PATH_LENGTH];
1570 1570
1571 error = &hostrcb->hcam.u.error64.u.type_23_error; 1571 error = &hostrcb->hcam.u.error64.u.type_23_error;
1572 errors_logged = be32_to_cpu(error->errors_logged); 1572 errors_logged = be32_to_cpu(error->errors_logged);
1573 1573
1574 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1574 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1575 be32_to_cpu(error->errors_detected), errors_logged); 1575 be32_to_cpu(error->errors_detected), errors_logged);
1576 1576
1577 dev_entry = error->dev; 1577 dev_entry = error->dev;
1578 1578
1579 for (i = 0; i < errors_logged; i++, dev_entry++) { 1579 for (i = 0; i < errors_logged; i++, dev_entry++) {
1580 ipr_err_separator; 1580 ipr_err_separator;
1581 1581
1582 ipr_err("Device %d : %s", i + 1, 1582 ipr_err("Device %d : %s", i + 1,
1583 ipr_format_res_path(dev_entry->res_path, buffer, 1583 ipr_format_res_path(dev_entry->res_path, buffer,
1584 sizeof(buffer))); 1584 sizeof(buffer)));
1585 ipr_log_ext_vpd(&dev_entry->vpd); 1585 ipr_log_ext_vpd(&dev_entry->vpd);
1586 1586
1587 ipr_err("-----New Device Information-----\n"); 1587 ipr_err("-----New Device Information-----\n");
1588 ipr_log_ext_vpd(&dev_entry->new_vpd); 1588 ipr_log_ext_vpd(&dev_entry->new_vpd);
1589 1589
1590 ipr_err("Cache Directory Card Information:\n"); 1590 ipr_err("Cache Directory Card Information:\n");
1591 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1591 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1592 1592
1593 ipr_err("Adapter Card Information:\n"); 1593 ipr_err("Adapter Card Information:\n");
1594 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1594 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1595 } 1595 }
1596 } 1596 }
1597 1597
1598 /** 1598 /**
1599 * ipr_log_config_error - Log a configuration error. 1599 * ipr_log_config_error - Log a configuration error.
1600 * @ioa_cfg: ioa config struct 1600 * @ioa_cfg: ioa config struct
1601 * @hostrcb: hostrcb struct 1601 * @hostrcb: hostrcb struct
1602 * 1602 *
1603 * Return value: 1603 * Return value:
1604 * none 1604 * none
1605 **/ 1605 **/
1606 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, 1606 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1607 struct ipr_hostrcb *hostrcb) 1607 struct ipr_hostrcb *hostrcb)
1608 { 1608 {
1609 int errors_logged, i; 1609 int errors_logged, i;
1610 struct ipr_hostrcb_device_data_entry *dev_entry; 1610 struct ipr_hostrcb_device_data_entry *dev_entry;
1611 struct ipr_hostrcb_type_03_error *error; 1611 struct ipr_hostrcb_type_03_error *error;
1612 1612
1613 error = &hostrcb->hcam.u.error.u.type_03_error; 1613 error = &hostrcb->hcam.u.error.u.type_03_error;
1614 errors_logged = be32_to_cpu(error->errors_logged); 1614 errors_logged = be32_to_cpu(error->errors_logged);
1615 1615
1616 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1616 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1617 be32_to_cpu(error->errors_detected), errors_logged); 1617 be32_to_cpu(error->errors_detected), errors_logged);
1618 1618
1619 dev_entry = error->dev; 1619 dev_entry = error->dev;
1620 1620
1621 for (i = 0; i < errors_logged; i++, dev_entry++) { 1621 for (i = 0; i < errors_logged; i++, dev_entry++) {
1622 ipr_err_separator; 1622 ipr_err_separator;
1623 1623
1624 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1624 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1625 ipr_log_vpd(&dev_entry->vpd); 1625 ipr_log_vpd(&dev_entry->vpd);
1626 1626
1627 ipr_err("-----New Device Information-----\n"); 1627 ipr_err("-----New Device Information-----\n");
1628 ipr_log_vpd(&dev_entry->new_vpd); 1628 ipr_log_vpd(&dev_entry->new_vpd);
1629 1629
1630 ipr_err("Cache Directory Card Information:\n"); 1630 ipr_err("Cache Directory Card Information:\n");
1631 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); 1631 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1632 1632
1633 ipr_err("Adapter Card Information:\n"); 1633 ipr_err("Adapter Card Information:\n");
1634 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); 1634 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1635 1635
1636 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", 1636 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1637 be32_to_cpu(dev_entry->ioa_data[0]), 1637 be32_to_cpu(dev_entry->ioa_data[0]),
1638 be32_to_cpu(dev_entry->ioa_data[1]), 1638 be32_to_cpu(dev_entry->ioa_data[1]),
1639 be32_to_cpu(dev_entry->ioa_data[2]), 1639 be32_to_cpu(dev_entry->ioa_data[2]),
1640 be32_to_cpu(dev_entry->ioa_data[3]), 1640 be32_to_cpu(dev_entry->ioa_data[3]),
1641 be32_to_cpu(dev_entry->ioa_data[4])); 1641 be32_to_cpu(dev_entry->ioa_data[4]));
1642 } 1642 }
1643 } 1643 }
1644 1644
1645 /** 1645 /**
1646 * ipr_log_enhanced_array_error - Log an array configuration error. 1646 * ipr_log_enhanced_array_error - Log an array configuration error.
1647 * @ioa_cfg: ioa config struct 1647 * @ioa_cfg: ioa config struct
1648 * @hostrcb: hostrcb struct 1648 * @hostrcb: hostrcb struct
1649 * 1649 *
1650 * Return value: 1650 * Return value:
1651 * none 1651 * none
1652 **/ 1652 **/
1653 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, 1653 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1654 struct ipr_hostrcb *hostrcb) 1654 struct ipr_hostrcb *hostrcb)
1655 { 1655 {
1656 int i, num_entries; 1656 int i, num_entries;
1657 struct ipr_hostrcb_type_14_error *error; 1657 struct ipr_hostrcb_type_14_error *error;
1658 struct ipr_hostrcb_array_data_entry_enhanced *array_entry; 1658 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1659 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1659 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1660 1660
1661 error = &hostrcb->hcam.u.error.u.type_14_error; 1661 error = &hostrcb->hcam.u.error.u.type_14_error;
1662 1662
1663 ipr_err_separator; 1663 ipr_err_separator;
1664 1664
1665 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", 1665 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1666 error->protection_level, 1666 error->protection_level,
1667 ioa_cfg->host->host_no, 1667 ioa_cfg->host->host_no,
1668 error->last_func_vset_res_addr.bus, 1668 error->last_func_vset_res_addr.bus,
1669 error->last_func_vset_res_addr.target, 1669 error->last_func_vset_res_addr.target,
1670 error->last_func_vset_res_addr.lun); 1670 error->last_func_vset_res_addr.lun);
1671 1671
1672 ipr_err_separator; 1672 ipr_err_separator;
1673 1673
1674 array_entry = error->array_member; 1674 array_entry = error->array_member;
1675 num_entries = min_t(u32, be32_to_cpu(error->num_entries), 1675 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1676 ARRAY_SIZE(error->array_member)); 1676 ARRAY_SIZE(error->array_member));
1677 1677
1678 for (i = 0; i < num_entries; i++, array_entry++) { 1678 for (i = 0; i < num_entries; i++, array_entry++) {
1679 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1679 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1680 continue; 1680 continue;
1681 1681
1682 if (be32_to_cpu(error->exposed_mode_adn) == i) 1682 if (be32_to_cpu(error->exposed_mode_adn) == i)
1683 ipr_err("Exposed Array Member %d:\n", i); 1683 ipr_err("Exposed Array Member %d:\n", i);
1684 else 1684 else
1685 ipr_err("Array Member %d:\n", i); 1685 ipr_err("Array Member %d:\n", i);
1686 1686
1687 ipr_log_ext_vpd(&array_entry->vpd); 1687 ipr_log_ext_vpd(&array_entry->vpd);
1688 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); 1688 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1689 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, 1689 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1690 "Expected Location"); 1690 "Expected Location");
1691 1691
1692 ipr_err_separator; 1692 ipr_err_separator;
1693 } 1693 }
1694 } 1694 }
1695 1695
1696 /** 1696 /**
1697 * ipr_log_array_error - Log an array configuration error. 1697 * ipr_log_array_error - Log an array configuration error.
1698 * @ioa_cfg: ioa config struct 1698 * @ioa_cfg: ioa config struct
1699 * @hostrcb: hostrcb struct 1699 * @hostrcb: hostrcb struct
1700 * 1700 *
1701 * Return value: 1701 * Return value:
1702 * none 1702 * none
1703 **/ 1703 **/
1704 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, 1704 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1705 struct ipr_hostrcb *hostrcb) 1705 struct ipr_hostrcb *hostrcb)
1706 { 1706 {
1707 int i; 1707 int i;
1708 struct ipr_hostrcb_type_04_error *error; 1708 struct ipr_hostrcb_type_04_error *error;
1709 struct ipr_hostrcb_array_data_entry *array_entry; 1709 struct ipr_hostrcb_array_data_entry *array_entry;
1710 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1710 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1711 1711
1712 error = &hostrcb->hcam.u.error.u.type_04_error; 1712 error = &hostrcb->hcam.u.error.u.type_04_error;
1713 1713
1714 ipr_err_separator; 1714 ipr_err_separator;
1715 1715
1716 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", 1716 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1717 error->protection_level, 1717 error->protection_level,
1718 ioa_cfg->host->host_no, 1718 ioa_cfg->host->host_no,
1719 error->last_func_vset_res_addr.bus, 1719 error->last_func_vset_res_addr.bus,
1720 error->last_func_vset_res_addr.target, 1720 error->last_func_vset_res_addr.target,
1721 error->last_func_vset_res_addr.lun); 1721 error->last_func_vset_res_addr.lun);
1722 1722
1723 ipr_err_separator; 1723 ipr_err_separator;
1724 1724
1725 array_entry = error->array_member; 1725 array_entry = error->array_member;
1726 1726
1727 for (i = 0; i < 18; i++) { 1727 for (i = 0; i < 18; i++) {
1728 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1728 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1729 continue; 1729 continue;
1730 1730
1731 if (be32_to_cpu(error->exposed_mode_adn) == i) 1731 if (be32_to_cpu(error->exposed_mode_adn) == i)
1732 ipr_err("Exposed Array Member %d:\n", i); 1732 ipr_err("Exposed Array Member %d:\n", i);
1733 else 1733 else
1734 ipr_err("Array Member %d:\n", i); 1734 ipr_err("Array Member %d:\n", i);
1735 1735
1736 ipr_log_vpd(&array_entry->vpd); 1736 ipr_log_vpd(&array_entry->vpd);
1737 1737
1738 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); 1738 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1739 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, 1739 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1740 "Expected Location"); 1740 "Expected Location");
1741 1741
1742 ipr_err_separator; 1742 ipr_err_separator;
1743 1743
1744 if (i == 9) 1744 if (i == 9)
1745 array_entry = error->array_member2; 1745 array_entry = error->array_member2;
1746 else 1746 else
1747 array_entry++; 1747 array_entry++;
1748 } 1748 }
1749 } 1749 }
1750 1750
1751 /** 1751 /**
1752 * ipr_log_hex_data - Log additional hex IOA error data. 1752 * ipr_log_hex_data - Log additional hex IOA error data.
1753 * @ioa_cfg: ioa config struct 1753 * @ioa_cfg: ioa config struct
1754 * @data: IOA error data 1754 * @data: IOA error data
1755 * @len: data length 1755 * @len: data length
1756 * 1756 *
1757 * Return value: 1757 * Return value:
1758 * none 1758 * none
1759 **/ 1759 **/
1760 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len) 1760 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1761 { 1761 {
1762 int i; 1762 int i;
1763 1763
1764 if (len == 0) 1764 if (len == 0)
1765 return; 1765 return;
1766 1766
1767 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) 1767 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1768 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP); 1768 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1769 1769
1770 for (i = 0; i < len / 4; i += 4) { 1770 for (i = 0; i < len / 4; i += 4) {
1771 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 1771 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1772 be32_to_cpu(data[i]), 1772 be32_to_cpu(data[i]),
1773 be32_to_cpu(data[i+1]), 1773 be32_to_cpu(data[i+1]),
1774 be32_to_cpu(data[i+2]), 1774 be32_to_cpu(data[i+2]),
1775 be32_to_cpu(data[i+3])); 1775 be32_to_cpu(data[i+3]));
1776 } 1776 }
1777 } 1777 }
1778 1778
1779 /** 1779 /**
1780 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error. 1780 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1781 * @ioa_cfg: ioa config struct 1781 * @ioa_cfg: ioa config struct
1782 * @hostrcb: hostrcb struct 1782 * @hostrcb: hostrcb struct
1783 * 1783 *
1784 * Return value: 1784 * Return value:
1785 * none 1785 * none
1786 **/ 1786 **/
1787 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, 1787 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1788 struct ipr_hostrcb *hostrcb) 1788 struct ipr_hostrcb *hostrcb)
1789 { 1789 {
1790 struct ipr_hostrcb_type_17_error *error; 1790 struct ipr_hostrcb_type_17_error *error;
1791 1791
1792 if (ioa_cfg->sis64) 1792 if (ioa_cfg->sis64)
1793 error = &hostrcb->hcam.u.error64.u.type_17_error; 1793 error = &hostrcb->hcam.u.error64.u.type_17_error;
1794 else 1794 else
1795 error = &hostrcb->hcam.u.error.u.type_17_error; 1795 error = &hostrcb->hcam.u.error.u.type_17_error;
1796 1796
1797 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1797 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1798 strim(error->failure_reason); 1798 strim(error->failure_reason);
1799 1799
1800 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1800 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1801 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1801 be32_to_cpu(hostrcb->hcam.u.error.prc));
1802 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); 1802 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1803 ipr_log_hex_data(ioa_cfg, error->data, 1803 ipr_log_hex_data(ioa_cfg, error->data,
1804 be32_to_cpu(hostrcb->hcam.length) - 1804 be32_to_cpu(hostrcb->hcam.length) -
1805 (offsetof(struct ipr_hostrcb_error, u) + 1805 (offsetof(struct ipr_hostrcb_error, u) +
1806 offsetof(struct ipr_hostrcb_type_17_error, data))); 1806 offsetof(struct ipr_hostrcb_type_17_error, data)));
1807 } 1807 }
1808 1808
1809 /** 1809 /**
1810 * ipr_log_dual_ioa_error - Log a dual adapter error. 1810 * ipr_log_dual_ioa_error - Log a dual adapter error.
1811 * @ioa_cfg: ioa config struct 1811 * @ioa_cfg: ioa config struct
1812 * @hostrcb: hostrcb struct 1812 * @hostrcb: hostrcb struct
1813 * 1813 *
1814 * Return value: 1814 * Return value:
1815 * none 1815 * none
1816 **/ 1816 **/
1817 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, 1817 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1818 struct ipr_hostrcb *hostrcb) 1818 struct ipr_hostrcb *hostrcb)
1819 { 1819 {
1820 struct ipr_hostrcb_type_07_error *error; 1820 struct ipr_hostrcb_type_07_error *error;
1821 1821
1822 error = &hostrcb->hcam.u.error.u.type_07_error; 1822 error = &hostrcb->hcam.u.error.u.type_07_error;
1823 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1823 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1824 strim(error->failure_reason); 1824 strim(error->failure_reason);
1825 1825
1826 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1826 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1827 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1827 be32_to_cpu(hostrcb->hcam.u.error.prc));
1828 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); 1828 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1829 ipr_log_hex_data(ioa_cfg, error->data, 1829 ipr_log_hex_data(ioa_cfg, error->data,
1830 be32_to_cpu(hostrcb->hcam.length) - 1830 be32_to_cpu(hostrcb->hcam.length) -
1831 (offsetof(struct ipr_hostrcb_error, u) + 1831 (offsetof(struct ipr_hostrcb_error, u) +
1832 offsetof(struct ipr_hostrcb_type_07_error, data))); 1832 offsetof(struct ipr_hostrcb_type_07_error, data)));
1833 } 1833 }
1834 1834
1835 static const struct { 1835 static const struct {
1836 u8 active; 1836 u8 active;
1837 char *desc; 1837 char *desc;
1838 } path_active_desc[] = { 1838 } path_active_desc[] = {
1839 { IPR_PATH_NO_INFO, "Path" }, 1839 { IPR_PATH_NO_INFO, "Path" },
1840 { IPR_PATH_ACTIVE, "Active path" }, 1840 { IPR_PATH_ACTIVE, "Active path" },
1841 { IPR_PATH_NOT_ACTIVE, "Inactive path" } 1841 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1842 }; 1842 };
1843 1843
1844 static const struct { 1844 static const struct {
1845 u8 state; 1845 u8 state;
1846 char *desc; 1846 char *desc;
1847 } path_state_desc[] = { 1847 } path_state_desc[] = {
1848 { IPR_PATH_STATE_NO_INFO, "has no path state information available" }, 1848 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1849 { IPR_PATH_HEALTHY, "is healthy" }, 1849 { IPR_PATH_HEALTHY, "is healthy" },
1850 { IPR_PATH_DEGRADED, "is degraded" }, 1850 { IPR_PATH_DEGRADED, "is degraded" },
1851 { IPR_PATH_FAILED, "is failed" } 1851 { IPR_PATH_FAILED, "is failed" }
1852 }; 1852 };
1853 1853
1854 /** 1854 /**
1855 * ipr_log_fabric_path - Log a fabric path error 1855 * ipr_log_fabric_path - Log a fabric path error
1856 * @hostrcb: hostrcb struct 1856 * @hostrcb: hostrcb struct
1857 * @fabric: fabric descriptor 1857 * @fabric: fabric descriptor
1858 * 1858 *
1859 * Return value: 1859 * Return value:
1860 * none 1860 * none
1861 **/ 1861 **/
1862 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb, 1862 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1863 struct ipr_hostrcb_fabric_desc *fabric) 1863 struct ipr_hostrcb_fabric_desc *fabric)
1864 { 1864 {
1865 int i, j; 1865 int i, j;
1866 u8 path_state = fabric->path_state; 1866 u8 path_state = fabric->path_state;
1867 u8 active = path_state & IPR_PATH_ACTIVE_MASK; 1867 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1868 u8 state = path_state & IPR_PATH_STATE_MASK; 1868 u8 state = path_state & IPR_PATH_STATE_MASK;
1869 1869
1870 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 1870 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1871 if (path_active_desc[i].active != active) 1871 if (path_active_desc[i].active != active)
1872 continue; 1872 continue;
1873 1873
1874 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 1874 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1875 if (path_state_desc[j].state != state) 1875 if (path_state_desc[j].state != state)
1876 continue; 1876 continue;
1877 1877
1878 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { 1878 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1879 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n", 1879 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1880 path_active_desc[i].desc, path_state_desc[j].desc, 1880 path_active_desc[i].desc, path_state_desc[j].desc,
1881 fabric->ioa_port); 1881 fabric->ioa_port);
1882 } else if (fabric->cascaded_expander == 0xff) { 1882 } else if (fabric->cascaded_expander == 0xff) {
1883 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n", 1883 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1884 path_active_desc[i].desc, path_state_desc[j].desc, 1884 path_active_desc[i].desc, path_state_desc[j].desc,
1885 fabric->ioa_port, fabric->phy); 1885 fabric->ioa_port, fabric->phy);
1886 } else if (fabric->phy == 0xff) { 1886 } else if (fabric->phy == 0xff) {
1887 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n", 1887 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1888 path_active_desc[i].desc, path_state_desc[j].desc, 1888 path_active_desc[i].desc, path_state_desc[j].desc,
1889 fabric->ioa_port, fabric->cascaded_expander); 1889 fabric->ioa_port, fabric->cascaded_expander);
1890 } else { 1890 } else {
1891 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n", 1891 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1892 path_active_desc[i].desc, path_state_desc[j].desc, 1892 path_active_desc[i].desc, path_state_desc[j].desc,
1893 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 1893 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1894 } 1894 }
1895 return; 1895 return;
1896 } 1896 }
1897 } 1897 }
1898 1898
1899 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state, 1899 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1900 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 1900 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1901 } 1901 }
1902 1902
1903 /** 1903 /**
1904 * ipr_log64_fabric_path - Log a fabric path error 1904 * ipr_log64_fabric_path - Log a fabric path error
1905 * @hostrcb: hostrcb struct 1905 * @hostrcb: hostrcb struct
1906 * @fabric: fabric descriptor 1906 * @fabric: fabric descriptor
1907 * 1907 *
1908 * Return value: 1908 * Return value:
1909 * none 1909 * none
1910 **/ 1910 **/
1911 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb, 1911 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1912 struct ipr_hostrcb64_fabric_desc *fabric) 1912 struct ipr_hostrcb64_fabric_desc *fabric)
1913 { 1913 {
1914 int i, j; 1914 int i, j;
1915 u8 path_state = fabric->path_state; 1915 u8 path_state = fabric->path_state;
1916 u8 active = path_state & IPR_PATH_ACTIVE_MASK; 1916 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1917 u8 state = path_state & IPR_PATH_STATE_MASK; 1917 u8 state = path_state & IPR_PATH_STATE_MASK;
1918 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1918 char buffer[IPR_MAX_RES_PATH_LENGTH];
1919 1919
1920 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 1920 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1921 if (path_active_desc[i].active != active) 1921 if (path_active_desc[i].active != active)
1922 continue; 1922 continue;
1923 1923
1924 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 1924 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1925 if (path_state_desc[j].state != state) 1925 if (path_state_desc[j].state != state)
1926 continue; 1926 continue;
1927 1927
1928 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", 1928 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1929 path_active_desc[i].desc, path_state_desc[j].desc, 1929 path_active_desc[i].desc, path_state_desc[j].desc,
1930 ipr_format_res_path(fabric->res_path, buffer, 1930 ipr_format_res_path(fabric->res_path, buffer,
1931 sizeof(buffer))); 1931 sizeof(buffer)));
1932 return; 1932 return;
1933 } 1933 }
1934 } 1934 }
1935 1935
1936 ipr_err("Path state=%02X Resource Path=%s\n", path_state, 1936 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1937 ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer))); 1937 ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1938 } 1938 }
1939 1939
1940 static const struct { 1940 static const struct {
1941 u8 type; 1941 u8 type;
1942 char *desc; 1942 char *desc;
1943 } path_type_desc[] = { 1943 } path_type_desc[] = {
1944 { IPR_PATH_CFG_IOA_PORT, "IOA port" }, 1944 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1945 { IPR_PATH_CFG_EXP_PORT, "Expander port" }, 1945 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1946 { IPR_PATH_CFG_DEVICE_PORT, "Device port" }, 1946 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1947 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" } 1947 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1948 }; 1948 };
1949 1949
1950 static const struct { 1950 static const struct {
1951 u8 status; 1951 u8 status;
1952 char *desc; 1952 char *desc;
1953 } path_status_desc[] = { 1953 } path_status_desc[] = {
1954 { IPR_PATH_CFG_NO_PROB, "Functional" }, 1954 { IPR_PATH_CFG_NO_PROB, "Functional" },
1955 { IPR_PATH_CFG_DEGRADED, "Degraded" }, 1955 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1956 { IPR_PATH_CFG_FAILED, "Failed" }, 1956 { IPR_PATH_CFG_FAILED, "Failed" },
1957 { IPR_PATH_CFG_SUSPECT, "Suspect" }, 1957 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1958 { IPR_PATH_NOT_DETECTED, "Missing" }, 1958 { IPR_PATH_NOT_DETECTED, "Missing" },
1959 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" } 1959 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1960 }; 1960 };
1961 1961
1962 static const char *link_rate[] = { 1962 static const char *link_rate[] = {
1963 "unknown", 1963 "unknown",
1964 "disabled", 1964 "disabled",
1965 "phy reset problem", 1965 "phy reset problem",
1966 "spinup hold", 1966 "spinup hold",
1967 "port selector", 1967 "port selector",
1968 "unknown", 1968 "unknown",
1969 "unknown", 1969 "unknown",
1970 "unknown", 1970 "unknown",
1971 "1.5Gbps", 1971 "1.5Gbps",
1972 "3.0Gbps", 1972 "3.0Gbps",
1973 "unknown", 1973 "unknown",
1974 "unknown", 1974 "unknown",
1975 "unknown", 1975 "unknown",
1976 "unknown", 1976 "unknown",
1977 "unknown", 1977 "unknown",
1978 "unknown" 1978 "unknown"
1979 }; 1979 };
1980 1980
1981 /** 1981 /**
1982 * ipr_log_path_elem - Log a fabric path element. 1982 * ipr_log_path_elem - Log a fabric path element.
1983 * @hostrcb: hostrcb struct 1983 * @hostrcb: hostrcb struct
1984 * @cfg: fabric path element struct 1984 * @cfg: fabric path element struct
1985 * 1985 *
1986 * Return value: 1986 * Return value:
1987 * none 1987 * none
1988 **/ 1988 **/
1989 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb, 1989 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1990 struct ipr_hostrcb_config_element *cfg) 1990 struct ipr_hostrcb_config_element *cfg)
1991 { 1991 {
1992 int i, j; 1992 int i, j;
1993 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 1993 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1994 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 1994 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1995 1995
1996 if (type == IPR_PATH_CFG_NOT_EXIST) 1996 if (type == IPR_PATH_CFG_NOT_EXIST)
1997 return; 1997 return;
1998 1998
1999 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 1999 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2000 if (path_type_desc[i].type != type) 2000 if (path_type_desc[i].type != type)
2001 continue; 2001 continue;
2002 2002
2003 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 2003 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2004 if (path_status_desc[j].status != status) 2004 if (path_status_desc[j].status != status)
2005 continue; 2005 continue;
2006 2006
2007 if (type == IPR_PATH_CFG_IOA_PORT) { 2007 if (type == IPR_PATH_CFG_IOA_PORT) {
2008 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n", 2008 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2009 path_status_desc[j].desc, path_type_desc[i].desc, 2009 path_status_desc[j].desc, path_type_desc[i].desc,
2010 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2010 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2011 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2011 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2012 } else { 2012 } else {
2013 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { 2013 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2014 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n", 2014 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2015 path_status_desc[j].desc, path_type_desc[i].desc, 2015 path_status_desc[j].desc, path_type_desc[i].desc,
2016 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2016 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2017 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2017 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2018 } else if (cfg->cascaded_expander == 0xff) { 2018 } else if (cfg->cascaded_expander == 0xff) {
2019 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, " 2019 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2020 "WWN=%08X%08X\n", path_status_desc[j].desc, 2020 "WWN=%08X%08X\n", path_status_desc[j].desc,
2021 path_type_desc[i].desc, cfg->phy, 2021 path_type_desc[i].desc, cfg->phy,
2022 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2022 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2023 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2023 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2024 } else if (cfg->phy == 0xff) { 2024 } else if (cfg->phy == 0xff) {
2025 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, " 2025 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2026 "WWN=%08X%08X\n", path_status_desc[j].desc, 2026 "WWN=%08X%08X\n", path_status_desc[j].desc,
2027 path_type_desc[i].desc, cfg->cascaded_expander, 2027 path_type_desc[i].desc, cfg->cascaded_expander,
2028 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2028 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2029 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2029 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2030 } else { 2030 } else {
2031 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s " 2031 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2032 "WWN=%08X%08X\n", path_status_desc[j].desc, 2032 "WWN=%08X%08X\n", path_status_desc[j].desc,
2033 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, 2033 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2034 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2034 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2035 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2035 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2036 } 2036 }
2037 } 2037 }
2038 return; 2038 return;
2039 } 2039 }
2040 } 2040 }
2041 2041
2042 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s " 2042 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2043 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, 2043 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2044 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2044 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2045 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2045 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2046 } 2046 }
2047 2047
2048 /** 2048 /**
2049 * ipr_log64_path_elem - Log a fabric path element. 2049 * ipr_log64_path_elem - Log a fabric path element.
2050 * @hostrcb: hostrcb struct 2050 * @hostrcb: hostrcb struct
2051 * @cfg: fabric path element struct 2051 * @cfg: fabric path element struct
2052 * 2052 *
2053 * Return value: 2053 * Return value:
2054 * none 2054 * none
2055 **/ 2055 **/
2056 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, 2056 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2057 struct ipr_hostrcb64_config_element *cfg) 2057 struct ipr_hostrcb64_config_element *cfg)
2058 { 2058 {
2059 int i, j; 2059 int i, j;
2060 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; 2060 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2061 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 2061 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2062 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 2062 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2063 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2063 char buffer[IPR_MAX_RES_PATH_LENGTH];
2064 2064
2065 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64) 2065 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2066 return; 2066 return;
2067 2067
2068 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 2068 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2069 if (path_type_desc[i].type != type) 2069 if (path_type_desc[i].type != type)
2070 continue; 2070 continue;
2071 2071
2072 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 2072 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2073 if (path_status_desc[j].status != status) 2073 if (path_status_desc[j].status != status)
2074 continue; 2074 continue;
2075 2075
2076 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", 2076 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2077 path_status_desc[j].desc, path_type_desc[i].desc, 2077 path_status_desc[j].desc, path_type_desc[i].desc,
2078 ipr_format_res_path(cfg->res_path, buffer, 2078 ipr_format_res_path(cfg->res_path, buffer,
2079 sizeof(buffer)), 2079 sizeof(buffer)),
2080 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2080 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2081 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2081 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2082 return; 2082 return;
2083 } 2083 }
2084 } 2084 }
2085 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " 2085 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2086 "WWN=%08X%08X\n", cfg->type_status, 2086 "WWN=%08X%08X\n", cfg->type_status,
2087 ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)), 2087 ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2088 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2088 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2089 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2089 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2090 } 2090 }
2091 2091
2092 /** 2092 /**
2093 * ipr_log_fabric_error - Log a fabric error. 2093 * ipr_log_fabric_error - Log a fabric error.
2094 * @ioa_cfg: ioa config struct 2094 * @ioa_cfg: ioa config struct
2095 * @hostrcb: hostrcb struct 2095 * @hostrcb: hostrcb struct
2096 * 2096 *
2097 * Return value: 2097 * Return value:
2098 * none 2098 * none
2099 **/ 2099 **/
2100 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 2100 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2101 struct ipr_hostrcb *hostrcb) 2101 struct ipr_hostrcb *hostrcb)
2102 { 2102 {
2103 struct ipr_hostrcb_type_20_error *error; 2103 struct ipr_hostrcb_type_20_error *error;
2104 struct ipr_hostrcb_fabric_desc *fabric; 2104 struct ipr_hostrcb_fabric_desc *fabric;
2105 struct ipr_hostrcb_config_element *cfg; 2105 struct ipr_hostrcb_config_element *cfg;
2106 int i, add_len; 2106 int i, add_len;
2107 2107
2108 error = &hostrcb->hcam.u.error.u.type_20_error; 2108 error = &hostrcb->hcam.u.error.u.type_20_error;
2109 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2109 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2110 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 2110 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2111 2111
2112 add_len = be32_to_cpu(hostrcb->hcam.length) - 2112 add_len = be32_to_cpu(hostrcb->hcam.length) -
2113 (offsetof(struct ipr_hostrcb_error, u) + 2113 (offsetof(struct ipr_hostrcb_error, u) +
2114 offsetof(struct ipr_hostrcb_type_20_error, desc)); 2114 offsetof(struct ipr_hostrcb_type_20_error, desc));
2115 2115
2116 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 2116 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2117 ipr_log_fabric_path(hostrcb, fabric); 2117 ipr_log_fabric_path(hostrcb, fabric);
2118 for_each_fabric_cfg(fabric, cfg) 2118 for_each_fabric_cfg(fabric, cfg)
2119 ipr_log_path_elem(hostrcb, cfg); 2119 ipr_log_path_elem(hostrcb, cfg);
2120 2120
2121 add_len -= be16_to_cpu(fabric->length); 2121 add_len -= be16_to_cpu(fabric->length);
2122 fabric = (struct ipr_hostrcb_fabric_desc *) 2122 fabric = (struct ipr_hostrcb_fabric_desc *)
2123 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2123 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2124 } 2124 }
2125 2125
2126 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); 2126 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2127 } 2127 }
2128 2128
2129 /** 2129 /**
2130 * ipr_log_sis64_array_error - Log a sis64 array error. 2130 * ipr_log_sis64_array_error - Log a sis64 array error.
2131 * @ioa_cfg: ioa config struct 2131 * @ioa_cfg: ioa config struct
2132 * @hostrcb: hostrcb struct 2132 * @hostrcb: hostrcb struct
2133 * 2133 *
2134 * Return value: 2134 * Return value:
2135 * none 2135 * none
2136 **/ 2136 **/
2137 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, 2137 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2138 struct ipr_hostrcb *hostrcb) 2138 struct ipr_hostrcb *hostrcb)
2139 { 2139 {
2140 int i, num_entries; 2140 int i, num_entries;
2141 struct ipr_hostrcb_type_24_error *error; 2141 struct ipr_hostrcb_type_24_error *error;
2142 struct ipr_hostrcb64_array_data_entry *array_entry; 2142 struct ipr_hostrcb64_array_data_entry *array_entry;
2143 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2143 char buffer[IPR_MAX_RES_PATH_LENGTH];
2144 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 2144 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2145 2145
2146 error = &hostrcb->hcam.u.error64.u.type_24_error; 2146 error = &hostrcb->hcam.u.error64.u.type_24_error;
2147 2147
2148 ipr_err_separator; 2148 ipr_err_separator;
2149 2149
2150 ipr_err("RAID %s Array Configuration: %s\n", 2150 ipr_err("RAID %s Array Configuration: %s\n",
2151 error->protection_level, 2151 error->protection_level,
2152 ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer))); 2152 ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2153 2153
2154 ipr_err_separator; 2154 ipr_err_separator;
2155 2155
2156 array_entry = error->array_member; 2156 array_entry = error->array_member;
2157 num_entries = min_t(u32, error->num_entries, 2157 num_entries = min_t(u32, error->num_entries,
2158 ARRAY_SIZE(error->array_member)); 2158 ARRAY_SIZE(error->array_member));
2159 2159
2160 for (i = 0; i < num_entries; i++, array_entry++) { 2160 for (i = 0; i < num_entries; i++, array_entry++) {
2161 2161
2162 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 2162 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2163 continue; 2163 continue;
2164 2164
2165 if (error->exposed_mode_adn == i) 2165 if (error->exposed_mode_adn == i)
2166 ipr_err("Exposed Array Member %d:\n", i); 2166 ipr_err("Exposed Array Member %d:\n", i);
2167 else 2167 else
2168 ipr_err("Array Member %d:\n", i); 2168 ipr_err("Array Member %d:\n", i);
2169 2169
2170 ipr_err("Array Member %d:\n", i); 2170 ipr_err("Array Member %d:\n", i);
2171 ipr_log_ext_vpd(&array_entry->vpd); 2171 ipr_log_ext_vpd(&array_entry->vpd);
2172 ipr_err("Current Location: %s\n", 2172 ipr_err("Current Location: %s\n",
2173 ipr_format_res_path(array_entry->res_path, buffer, 2173 ipr_format_res_path(array_entry->res_path, buffer,
2174 sizeof(buffer))); 2174 sizeof(buffer)));
2175 ipr_err("Expected Location: %s\n", 2175 ipr_err("Expected Location: %s\n",
2176 ipr_format_res_path(array_entry->expected_res_path, 2176 ipr_format_res_path(array_entry->expected_res_path,
2177 buffer, sizeof(buffer))); 2177 buffer, sizeof(buffer)));
2178 2178
2179 ipr_err_separator; 2179 ipr_err_separator;
2180 } 2180 }
2181 } 2181 }
2182 2182
2183 /** 2183 /**
2184 * ipr_log_sis64_fabric_error - Log a sis64 fabric error. 2184 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2185 * @ioa_cfg: ioa config struct 2185 * @ioa_cfg: ioa config struct
2186 * @hostrcb: hostrcb struct 2186 * @hostrcb: hostrcb struct
2187 * 2187 *
2188 * Return value: 2188 * Return value:
2189 * none 2189 * none
2190 **/ 2190 **/
2191 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 2191 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2192 struct ipr_hostrcb *hostrcb) 2192 struct ipr_hostrcb *hostrcb)
2193 { 2193 {
2194 struct ipr_hostrcb_type_30_error *error; 2194 struct ipr_hostrcb_type_30_error *error;
2195 struct ipr_hostrcb64_fabric_desc *fabric; 2195 struct ipr_hostrcb64_fabric_desc *fabric;
2196 struct ipr_hostrcb64_config_element *cfg; 2196 struct ipr_hostrcb64_config_element *cfg;
2197 int i, add_len; 2197 int i, add_len;
2198 2198
2199 error = &hostrcb->hcam.u.error64.u.type_30_error; 2199 error = &hostrcb->hcam.u.error64.u.type_30_error;
2200 2200
2201 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2201 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2202 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 2202 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2203 2203
2204 add_len = be32_to_cpu(hostrcb->hcam.length) - 2204 add_len = be32_to_cpu(hostrcb->hcam.length) -
2205 (offsetof(struct ipr_hostrcb64_error, u) + 2205 (offsetof(struct ipr_hostrcb64_error, u) +
2206 offsetof(struct ipr_hostrcb_type_30_error, desc)); 2206 offsetof(struct ipr_hostrcb_type_30_error, desc));
2207 2207
2208 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 2208 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2209 ipr_log64_fabric_path(hostrcb, fabric); 2209 ipr_log64_fabric_path(hostrcb, fabric);
2210 for_each_fabric_cfg(fabric, cfg) 2210 for_each_fabric_cfg(fabric, cfg)
2211 ipr_log64_path_elem(hostrcb, cfg); 2211 ipr_log64_path_elem(hostrcb, cfg);
2212 2212
2213 add_len -= be16_to_cpu(fabric->length); 2213 add_len -= be16_to_cpu(fabric->length);
2214 fabric = (struct ipr_hostrcb64_fabric_desc *) 2214 fabric = (struct ipr_hostrcb64_fabric_desc *)
2215 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2215 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2216 } 2216 }
2217 2217
2218 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); 2218 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2219 } 2219 }
2220 2220
2221 /** 2221 /**
2222 * ipr_log_generic_error - Log an adapter error. 2222 * ipr_log_generic_error - Log an adapter error.
2223 * @ioa_cfg: ioa config struct 2223 * @ioa_cfg: ioa config struct
2224 * @hostrcb: hostrcb struct 2224 * @hostrcb: hostrcb struct
2225 * 2225 *
2226 * Return value: 2226 * Return value:
2227 * none 2227 * none
2228 **/ 2228 **/
2229 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, 2229 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2230 struct ipr_hostrcb *hostrcb) 2230 struct ipr_hostrcb *hostrcb)
2231 { 2231 {
2232 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, 2232 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2233 be32_to_cpu(hostrcb->hcam.length)); 2233 be32_to_cpu(hostrcb->hcam.length));
2234 } 2234 }
2235 2235
2236 /** 2236 /**
2237 * ipr_get_error - Find the specfied IOASC in the ipr_error_table. 2237 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2238 * @ioasc: IOASC 2238 * @ioasc: IOASC
2239 * 2239 *
2240 * This function will return the index of into the ipr_error_table 2240 * This function will return the index of into the ipr_error_table
2241 * for the specified IOASC. If the IOASC is not in the table, 2241 * for the specified IOASC. If the IOASC is not in the table,
2242 * 0 will be returned, which points to the entry used for unknown errors. 2242 * 0 will be returned, which points to the entry used for unknown errors.
2243 * 2243 *
2244 * Return value: 2244 * Return value:
2245 * index into the ipr_error_table 2245 * index into the ipr_error_table
2246 **/ 2246 **/
2247 static u32 ipr_get_error(u32 ioasc) 2247 static u32 ipr_get_error(u32 ioasc)
2248 { 2248 {
2249 int i; 2249 int i;
2250 2250
2251 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) 2251 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2252 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK)) 2252 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2253 return i; 2253 return i;
2254 2254
2255 return 0; 2255 return 0;
2256 } 2256 }
2257 2257
2258 /** 2258 /**
2259 * ipr_handle_log_data - Log an adapter error. 2259 * ipr_handle_log_data - Log an adapter error.
2260 * @ioa_cfg: ioa config struct 2260 * @ioa_cfg: ioa config struct
2261 * @hostrcb: hostrcb struct 2261 * @hostrcb: hostrcb struct
2262 * 2262 *
2263 * This function logs an adapter error to the system. 2263 * This function logs an adapter error to the system.
2264 * 2264 *
2265 * Return value: 2265 * Return value:
2266 * none 2266 * none
2267 **/ 2267 **/
2268 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, 2268 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2269 struct ipr_hostrcb *hostrcb) 2269 struct ipr_hostrcb *hostrcb)
2270 { 2270 {
2271 u32 ioasc; 2271 u32 ioasc;
2272 int error_index; 2272 int error_index;
2273 2273
2274 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) 2274 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2275 return; 2275 return;
2276 2276
2277 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) 2277 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2278 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); 2278 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2279 2279
2280 if (ioa_cfg->sis64) 2280 if (ioa_cfg->sis64)
2281 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2281 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2282 else 2282 else
2283 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2283 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2284 2284
2285 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || 2285 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2286 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) { 2286 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2287 /* Tell the midlayer we had a bus reset so it will handle the UA properly */ 2287 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2288 scsi_report_bus_reset(ioa_cfg->host, 2288 scsi_report_bus_reset(ioa_cfg->host,
2289 hostrcb->hcam.u.error.fd_res_addr.bus); 2289 hostrcb->hcam.u.error.fd_res_addr.bus);
2290 } 2290 }
2291 2291
2292 error_index = ipr_get_error(ioasc); 2292 error_index = ipr_get_error(ioasc);
2293 2293
2294 if (!ipr_error_table[error_index].log_hcam) 2294 if (!ipr_error_table[error_index].log_hcam)
2295 return; 2295 return;
2296 2296
2297 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error); 2297 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2298 2298
2299 /* Set indication we have logged an error */ 2299 /* Set indication we have logged an error */
2300 ioa_cfg->errors_logged++; 2300 ioa_cfg->errors_logged++;
2301 2301
2302 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) 2302 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2303 return; 2303 return;
2304 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) 2304 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2305 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); 2305 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2306 2306
2307 switch (hostrcb->hcam.overlay_id) { 2307 switch (hostrcb->hcam.overlay_id) {
2308 case IPR_HOST_RCB_OVERLAY_ID_2: 2308 case IPR_HOST_RCB_OVERLAY_ID_2:
2309 ipr_log_cache_error(ioa_cfg, hostrcb); 2309 ipr_log_cache_error(ioa_cfg, hostrcb);
2310 break; 2310 break;
2311 case IPR_HOST_RCB_OVERLAY_ID_3: 2311 case IPR_HOST_RCB_OVERLAY_ID_3:
2312 ipr_log_config_error(ioa_cfg, hostrcb); 2312 ipr_log_config_error(ioa_cfg, hostrcb);
2313 break; 2313 break;
2314 case IPR_HOST_RCB_OVERLAY_ID_4: 2314 case IPR_HOST_RCB_OVERLAY_ID_4:
2315 case IPR_HOST_RCB_OVERLAY_ID_6: 2315 case IPR_HOST_RCB_OVERLAY_ID_6:
2316 ipr_log_array_error(ioa_cfg, hostrcb); 2316 ipr_log_array_error(ioa_cfg, hostrcb);
2317 break; 2317 break;
2318 case IPR_HOST_RCB_OVERLAY_ID_7: 2318 case IPR_HOST_RCB_OVERLAY_ID_7:
2319 ipr_log_dual_ioa_error(ioa_cfg, hostrcb); 2319 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2320 break; 2320 break;
2321 case IPR_HOST_RCB_OVERLAY_ID_12: 2321 case IPR_HOST_RCB_OVERLAY_ID_12:
2322 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); 2322 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2323 break; 2323 break;
2324 case IPR_HOST_RCB_OVERLAY_ID_13: 2324 case IPR_HOST_RCB_OVERLAY_ID_13:
2325 ipr_log_enhanced_config_error(ioa_cfg, hostrcb); 2325 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2326 break; 2326 break;
2327 case IPR_HOST_RCB_OVERLAY_ID_14: 2327 case IPR_HOST_RCB_OVERLAY_ID_14:
2328 case IPR_HOST_RCB_OVERLAY_ID_16: 2328 case IPR_HOST_RCB_OVERLAY_ID_16:
2329 ipr_log_enhanced_array_error(ioa_cfg, hostrcb); 2329 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2330 break; 2330 break;
2331 case IPR_HOST_RCB_OVERLAY_ID_17: 2331 case IPR_HOST_RCB_OVERLAY_ID_17:
2332 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); 2332 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2333 break; 2333 break;
2334 case IPR_HOST_RCB_OVERLAY_ID_20: 2334 case IPR_HOST_RCB_OVERLAY_ID_20:
2335 ipr_log_fabric_error(ioa_cfg, hostrcb); 2335 ipr_log_fabric_error(ioa_cfg, hostrcb);
2336 break; 2336 break;
2337 case IPR_HOST_RCB_OVERLAY_ID_23: 2337 case IPR_HOST_RCB_OVERLAY_ID_23:
2338 ipr_log_sis64_config_error(ioa_cfg, hostrcb); 2338 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2339 break; 2339 break;
2340 case IPR_HOST_RCB_OVERLAY_ID_24: 2340 case IPR_HOST_RCB_OVERLAY_ID_24:
2341 case IPR_HOST_RCB_OVERLAY_ID_26: 2341 case IPR_HOST_RCB_OVERLAY_ID_26:
2342 ipr_log_sis64_array_error(ioa_cfg, hostrcb); 2342 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2343 break; 2343 break;
2344 case IPR_HOST_RCB_OVERLAY_ID_30: 2344 case IPR_HOST_RCB_OVERLAY_ID_30:
2345 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); 2345 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2346 break; 2346 break;
2347 case IPR_HOST_RCB_OVERLAY_ID_1: 2347 case IPR_HOST_RCB_OVERLAY_ID_1:
2348 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 2348 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2349 default: 2349 default:
2350 ipr_log_generic_error(ioa_cfg, hostrcb); 2350 ipr_log_generic_error(ioa_cfg, hostrcb);
2351 break; 2351 break;
2352 } 2352 }
2353 } 2353 }
2354 2354
2355 /** 2355 /**
2356 * ipr_process_error - Op done function for an adapter error log. 2356 * ipr_process_error - Op done function for an adapter error log.
2357 * @ipr_cmd: ipr command struct 2357 * @ipr_cmd: ipr command struct
2358 * 2358 *
2359 * This function is the op done function for an error log host 2359 * This function is the op done function for an error log host
2360 * controlled async from the adapter. It will log the error and 2360 * controlled async from the adapter. It will log the error and
2361 * send the HCAM back to the adapter. 2361 * send the HCAM back to the adapter.
2362 * 2362 *
2363 * Return value: 2363 * Return value:
2364 * none 2364 * none
2365 **/ 2365 **/
2366 static void ipr_process_error(struct ipr_cmnd *ipr_cmd) 2366 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2367 { 2367 {
2368 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2368 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2369 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2369 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2370 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 2370 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2371 u32 fd_ioasc; 2371 u32 fd_ioasc;
2372 2372
2373 if (ioa_cfg->sis64) 2373 if (ioa_cfg->sis64)
2374 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2374 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2375 else 2375 else
2376 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2376 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2377 2377
2378 list_del(&hostrcb->queue); 2378 list_del(&hostrcb->queue);
2379 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 2379 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2380 2380
2381 if (!ioasc) { 2381 if (!ioasc) {
2382 ipr_handle_log_data(ioa_cfg, hostrcb); 2382 ipr_handle_log_data(ioa_cfg, hostrcb);
2383 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED) 2383 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2384 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 2384 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2385 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) { 2385 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2386 dev_err(&ioa_cfg->pdev->dev, 2386 dev_err(&ioa_cfg->pdev->dev,
2387 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 2387 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2388 } 2388 }
2389 2389
2390 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 2390 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2391 } 2391 }
2392 2392
2393 /** 2393 /**
2394 * ipr_timeout - An internally generated op has timed out. 2394 * ipr_timeout - An internally generated op has timed out.
2395 * @ipr_cmd: ipr command struct 2395 * @ipr_cmd: ipr command struct
2396 * 2396 *
2397 * This function blocks host requests and initiates an 2397 * This function blocks host requests and initiates an
2398 * adapter reset. 2398 * adapter reset.
2399 * 2399 *
2400 * Return value: 2400 * Return value:
2401 * none 2401 * none
2402 **/ 2402 **/
2403 static void ipr_timeout(struct ipr_cmnd *ipr_cmd) 2403 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2404 { 2404 {
2405 unsigned long lock_flags = 0; 2405 unsigned long lock_flags = 0;
2406 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2406 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2407 2407
2408 ENTER; 2408 ENTER;
2409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2410 2410
2411 ioa_cfg->errors_logged++; 2411 ioa_cfg->errors_logged++;
2412 dev_err(&ioa_cfg->pdev->dev, 2412 dev_err(&ioa_cfg->pdev->dev,
2413 "Adapter being reset due to command timeout.\n"); 2413 "Adapter being reset due to command timeout.\n");
2414 2414
2415 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 2415 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2416 ioa_cfg->sdt_state = GET_DUMP; 2416 ioa_cfg->sdt_state = GET_DUMP;
2417 2417
2418 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) 2418 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2419 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 2419 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2420 2420
2421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2422 LEAVE; 2422 LEAVE;
2423 } 2423 }
2424 2424
2425 /** 2425 /**
2426 * ipr_oper_timeout - Adapter timed out transitioning to operational 2426 * ipr_oper_timeout - Adapter timed out transitioning to operational
2427 * @ipr_cmd: ipr command struct 2427 * @ipr_cmd: ipr command struct
2428 * 2428 *
2429 * This function blocks host requests and initiates an 2429 * This function blocks host requests and initiates an
2430 * adapter reset. 2430 * adapter reset.
2431 * 2431 *
2432 * Return value: 2432 * Return value:
2433 * none 2433 * none
2434 **/ 2434 **/
2435 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd) 2435 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2436 { 2436 {
2437 unsigned long lock_flags = 0; 2437 unsigned long lock_flags = 0;
2438 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2438 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2439 2439
2440 ENTER; 2440 ENTER;
2441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2442 2442
2443 ioa_cfg->errors_logged++; 2443 ioa_cfg->errors_logged++;
2444 dev_err(&ioa_cfg->pdev->dev, 2444 dev_err(&ioa_cfg->pdev->dev,
2445 "Adapter timed out transitioning to operational.\n"); 2445 "Adapter timed out transitioning to operational.\n");
2446 2446
2447 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 2447 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2448 ioa_cfg->sdt_state = GET_DUMP; 2448 ioa_cfg->sdt_state = GET_DUMP;
2449 2449
2450 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { 2450 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2451 if (ipr_fastfail) 2451 if (ipr_fastfail)
2452 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; 2452 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2453 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 2453 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2454 } 2454 }
2455 2455
2456 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2456 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2457 LEAVE; 2457 LEAVE;
2458 } 2458 }
2459 2459
2460 /** 2460 /**
2461 * ipr_reset_reload - Reset/Reload the IOA 2461 * ipr_reset_reload - Reset/Reload the IOA
2462 * @ioa_cfg: ioa config struct 2462 * @ioa_cfg: ioa config struct
2463 * @shutdown_type: shutdown type 2463 * @shutdown_type: shutdown type
2464 * 2464 *
2465 * This function resets the adapter and re-initializes it. 2465 * This function resets the adapter and re-initializes it.
2466 * This function assumes that all new host commands have been stopped. 2466 * This function assumes that all new host commands have been stopped.
2467 * Return value: 2467 * Return value:
2468 * SUCCESS / FAILED 2468 * SUCCESS / FAILED
2469 **/ 2469 **/
2470 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg, 2470 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2471 enum ipr_shutdown_type shutdown_type) 2471 enum ipr_shutdown_type shutdown_type)
2472 { 2472 {
2473 if (!ioa_cfg->in_reset_reload) 2473 if (!ioa_cfg->in_reset_reload)
2474 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); 2474 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2475 2475
2476 spin_unlock_irq(ioa_cfg->host->host_lock); 2476 spin_unlock_irq(ioa_cfg->host->host_lock);
2477 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 2477 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2478 spin_lock_irq(ioa_cfg->host->host_lock); 2478 spin_lock_irq(ioa_cfg->host->host_lock);
2479 2479
2480 /* If we got hit with a host reset while we were already resetting 2480 /* If we got hit with a host reset while we were already resetting
2481 the adapter for some reason, and the reset failed. */ 2481 the adapter for some reason, and the reset failed. */
2482 if (ioa_cfg->ioa_is_dead) { 2482 if (ioa_cfg->ioa_is_dead) {
2483 ipr_trace; 2483 ipr_trace;
2484 return FAILED; 2484 return FAILED;
2485 } 2485 }
2486 2486
2487 return SUCCESS; 2487 return SUCCESS;
2488 } 2488 }
2489 2489
2490 /** 2490 /**
2491 * ipr_find_ses_entry - Find matching SES in SES table 2491 * ipr_find_ses_entry - Find matching SES in SES table
2492 * @res: resource entry struct of SES 2492 * @res: resource entry struct of SES
2493 * 2493 *
2494 * Return value: 2494 * Return value:
2495 * pointer to SES table entry / NULL on failure 2495 * pointer to SES table entry / NULL on failure
2496 **/ 2496 **/
2497 static const struct ipr_ses_table_entry * 2497 static const struct ipr_ses_table_entry *
2498 ipr_find_ses_entry(struct ipr_resource_entry *res) 2498 ipr_find_ses_entry(struct ipr_resource_entry *res)
2499 { 2499 {
2500 int i, j, matches; 2500 int i, j, matches;
2501 struct ipr_std_inq_vpids *vpids; 2501 struct ipr_std_inq_vpids *vpids;
2502 const struct ipr_ses_table_entry *ste = ipr_ses_table; 2502 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2503 2503
2504 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { 2504 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2505 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { 2505 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2506 if (ste->compare_product_id_byte[j] == 'X') { 2506 if (ste->compare_product_id_byte[j] == 'X') {
2507 vpids = &res->std_inq_data.vpids; 2507 vpids = &res->std_inq_data.vpids;
2508 if (vpids->product_id[j] == ste->product_id[j]) 2508 if (vpids->product_id[j] == ste->product_id[j])
2509 matches++; 2509 matches++;
2510 else 2510 else
2511 break; 2511 break;
2512 } else 2512 } else
2513 matches++; 2513 matches++;
2514 } 2514 }
2515 2515
2516 if (matches == IPR_PROD_ID_LEN) 2516 if (matches == IPR_PROD_ID_LEN)
2517 return ste; 2517 return ste;
2518 } 2518 }
2519 2519
2520 return NULL; 2520 return NULL;
2521 } 2521 }
2522 2522
2523 /** 2523 /**
2524 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus 2524 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2525 * @ioa_cfg: ioa config struct 2525 * @ioa_cfg: ioa config struct
2526 * @bus: SCSI bus 2526 * @bus: SCSI bus
2527 * @bus_width: bus width 2527 * @bus_width: bus width
2528 * 2528 *
2529 * Return value: 2529 * Return value:
2530 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz 2530 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2531 * For a 2-byte wide SCSI bus, the maximum transfer speed is 2531 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2532 * twice the maximum transfer rate (e.g. for a wide enabled bus, 2532 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2533 * max 160MHz = max 320MB/sec). 2533 * max 160MHz = max 320MB/sec).
2534 **/ 2534 **/
2535 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) 2535 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2536 { 2536 {
2537 struct ipr_resource_entry *res; 2537 struct ipr_resource_entry *res;
2538 const struct ipr_ses_table_entry *ste; 2538 const struct ipr_ses_table_entry *ste;
2539 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width); 2539 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2540 2540
2541 /* Loop through each config table entry in the config table buffer */ 2541 /* Loop through each config table entry in the config table buffer */
2542 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2542 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2543 if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) 2543 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2544 continue; 2544 continue;
2545 2545
2546 if (bus != res->bus) 2546 if (bus != res->bus)
2547 continue; 2547 continue;
2548 2548
2549 if (!(ste = ipr_find_ses_entry(res))) 2549 if (!(ste = ipr_find_ses_entry(res)))
2550 continue; 2550 continue;
2551 2551
2552 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); 2552 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2553 } 2553 }
2554 2554
2555 return max_xfer_rate; 2555 return max_xfer_rate;
2556 } 2556 }
2557 2557
2558 /** 2558 /**
2559 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA 2559 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2560 * @ioa_cfg: ioa config struct 2560 * @ioa_cfg: ioa config struct
2561 * @max_delay: max delay in micro-seconds to wait 2561 * @max_delay: max delay in micro-seconds to wait
2562 * 2562 *
2563 * Waits for an IODEBUG ACK from the IOA, doing busy looping. 2563 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2564 * 2564 *
2565 * Return value: 2565 * Return value:
2566 * 0 on success / other on failure 2566 * 0 on success / other on failure
2567 **/ 2567 **/
2568 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) 2568 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2569 { 2569 {
2570 volatile u32 pcii_reg; 2570 volatile u32 pcii_reg;
2571 int delay = 1; 2571 int delay = 1;
2572 2572
2573 /* Read interrupt reg until IOA signals IO Debug Acknowledge */ 2573 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2574 while (delay < max_delay) { 2574 while (delay < max_delay) {
2575 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 2575 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2576 2576
2577 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE) 2577 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2578 return 0; 2578 return 0;
2579 2579
2580 /* udelay cannot be used if delay is more than a few milliseconds */ 2580 /* udelay cannot be used if delay is more than a few milliseconds */
2581 if ((delay / 1000) > MAX_UDELAY_MS) 2581 if ((delay / 1000) > MAX_UDELAY_MS)
2582 mdelay(delay / 1000); 2582 mdelay(delay / 1000);
2583 else 2583 else
2584 udelay(delay); 2584 udelay(delay);
2585 2585
2586 delay += delay; 2586 delay += delay;
2587 } 2587 }
2588 return -EIO; 2588 return -EIO;
2589 } 2589 }
2590 2590
2591 /** 2591 /**
2592 * ipr_get_sis64_dump_data_section - Dump IOA memory 2592 * ipr_get_sis64_dump_data_section - Dump IOA memory
2593 * @ioa_cfg: ioa config struct 2593 * @ioa_cfg: ioa config struct
2594 * @start_addr: adapter address to dump 2594 * @start_addr: adapter address to dump
2595 * @dest: destination kernel buffer 2595 * @dest: destination kernel buffer
2596 * @length_in_words: length to dump in 4 byte words 2596 * @length_in_words: length to dump in 4 byte words
2597 * 2597 *
2598 * Return value: 2598 * Return value:
2599 * 0 on success 2599 * 0 on success
2600 **/ 2600 **/
2601 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, 2601 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2602 u32 start_addr, 2602 u32 start_addr,
2603 __be32 *dest, u32 length_in_words) 2603 __be32 *dest, u32 length_in_words)
2604 { 2604 {
2605 int i; 2605 int i;
2606 2606
2607 for (i = 0; i < length_in_words; i++) { 2607 for (i = 0; i < length_in_words; i++) {
2608 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); 2608 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2609 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); 2609 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2610 dest++; 2610 dest++;
2611 } 2611 }
2612 2612
2613 return 0; 2613 return 0;
2614 } 2614 }
2615 2615
2616 /** 2616 /**
2617 * ipr_get_ldump_data_section - Dump IOA memory 2617 * ipr_get_ldump_data_section - Dump IOA memory
2618 * @ioa_cfg: ioa config struct 2618 * @ioa_cfg: ioa config struct
2619 * @start_addr: adapter address to dump 2619 * @start_addr: adapter address to dump
2620 * @dest: destination kernel buffer 2620 * @dest: destination kernel buffer
2621 * @length_in_words: length to dump in 4 byte words 2621 * @length_in_words: length to dump in 4 byte words
2622 * 2622 *
2623 * Return value: 2623 * Return value:
2624 * 0 on success / -EIO on failure 2624 * 0 on success / -EIO on failure
2625 **/ 2625 **/
2626 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, 2626 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2627 u32 start_addr, 2627 u32 start_addr,
2628 __be32 *dest, u32 length_in_words) 2628 __be32 *dest, u32 length_in_words)
2629 { 2629 {
2630 volatile u32 temp_pcii_reg; 2630 volatile u32 temp_pcii_reg;
2631 int i, delay = 0; 2631 int i, delay = 0;
2632 2632
2633 if (ioa_cfg->sis64) 2633 if (ioa_cfg->sis64)
2634 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, 2634 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2635 dest, length_in_words); 2635 dest, length_in_words);
2636 2636
2637 /* Write IOA interrupt reg starting LDUMP state */ 2637 /* Write IOA interrupt reg starting LDUMP state */
2638 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), 2638 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2639 ioa_cfg->regs.set_uproc_interrupt_reg32); 2639 ioa_cfg->regs.set_uproc_interrupt_reg32);
2640 2640
2641 /* Wait for IO debug acknowledge */ 2641 /* Wait for IO debug acknowledge */
2642 if (ipr_wait_iodbg_ack(ioa_cfg, 2642 if (ipr_wait_iodbg_ack(ioa_cfg,
2643 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) { 2643 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2644 dev_err(&ioa_cfg->pdev->dev, 2644 dev_err(&ioa_cfg->pdev->dev,
2645 "IOA dump long data transfer timeout\n"); 2645 "IOA dump long data transfer timeout\n");
2646 return -EIO; 2646 return -EIO;
2647 } 2647 }
2648 2648
2649 /* Signal LDUMP interlocked - clear IO debug ack */ 2649 /* Signal LDUMP interlocked - clear IO debug ack */
2650 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2650 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2651 ioa_cfg->regs.clr_interrupt_reg); 2651 ioa_cfg->regs.clr_interrupt_reg);
2652 2652
2653 /* Write Mailbox with starting address */ 2653 /* Write Mailbox with starting address */
2654 writel(start_addr, ioa_cfg->ioa_mailbox); 2654 writel(start_addr, ioa_cfg->ioa_mailbox);
2655 2655
2656 /* Signal address valid - clear IOA Reset alert */ 2656 /* Signal address valid - clear IOA Reset alert */
2657 writel(IPR_UPROCI_RESET_ALERT, 2657 writel(IPR_UPROCI_RESET_ALERT,
2658 ioa_cfg->regs.clr_uproc_interrupt_reg32); 2658 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2659 2659
2660 for (i = 0; i < length_in_words; i++) { 2660 for (i = 0; i < length_in_words; i++) {
2661 /* Wait for IO debug acknowledge */ 2661 /* Wait for IO debug acknowledge */
2662 if (ipr_wait_iodbg_ack(ioa_cfg, 2662 if (ipr_wait_iodbg_ack(ioa_cfg,
2663 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) { 2663 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2664 dev_err(&ioa_cfg->pdev->dev, 2664 dev_err(&ioa_cfg->pdev->dev,
2665 "IOA dump short data transfer timeout\n"); 2665 "IOA dump short data transfer timeout\n");
2666 return -EIO; 2666 return -EIO;
2667 } 2667 }
2668 2668
2669 /* Read data from mailbox and increment destination pointer */ 2669 /* Read data from mailbox and increment destination pointer */
2670 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); 2670 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2671 dest++; 2671 dest++;
2672 2672
2673 /* For all but the last word of data, signal data received */ 2673 /* For all but the last word of data, signal data received */
2674 if (i < (length_in_words - 1)) { 2674 if (i < (length_in_words - 1)) {
2675 /* Signal dump data received - Clear IO debug Ack */ 2675 /* Signal dump data received - Clear IO debug Ack */
2676 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2676 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2677 ioa_cfg->regs.clr_interrupt_reg); 2677 ioa_cfg->regs.clr_interrupt_reg);
2678 } 2678 }
2679 } 2679 }
2680 2680
2681 /* Signal end of block transfer. Set reset alert then clear IO debug ack */ 2681 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2682 writel(IPR_UPROCI_RESET_ALERT, 2682 writel(IPR_UPROCI_RESET_ALERT,
2683 ioa_cfg->regs.set_uproc_interrupt_reg32); 2683 ioa_cfg->regs.set_uproc_interrupt_reg32);
2684 2684
2685 writel(IPR_UPROCI_IO_DEBUG_ALERT, 2685 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2686 ioa_cfg->regs.clr_uproc_interrupt_reg32); 2686 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2687 2687
2688 /* Signal dump data received - Clear IO debug Ack */ 2688 /* Signal dump data received - Clear IO debug Ack */
2689 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2689 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2690 ioa_cfg->regs.clr_interrupt_reg); 2690 ioa_cfg->regs.clr_interrupt_reg);
2691 2691
2692 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ 2692 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2693 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { 2693 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2694 temp_pcii_reg = 2694 temp_pcii_reg =
2695 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 2695 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2696 2696
2697 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) 2697 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2698 return 0; 2698 return 0;
2699 2699
2700 udelay(10); 2700 udelay(10);
2701 delay += 10; 2701 delay += 10;
2702 } 2702 }
2703 2703
2704 return 0; 2704 return 0;
2705 } 2705 }
2706 2706
2707 #ifdef CONFIG_SCSI_IPR_DUMP 2707 #ifdef CONFIG_SCSI_IPR_DUMP
2708 /** 2708 /**
2709 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer 2709 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2710 * @ioa_cfg: ioa config struct 2710 * @ioa_cfg: ioa config struct
2711 * @pci_address: adapter address 2711 * @pci_address: adapter address
2712 * @length: length of data to copy 2712 * @length: length of data to copy
2713 * 2713 *
2714 * Copy data from PCI adapter to kernel buffer. 2714 * Copy data from PCI adapter to kernel buffer.
2715 * Note: length MUST be a 4 byte multiple 2715 * Note: length MUST be a 4 byte multiple
2716 * Return value: 2716 * Return value:
2717 * 0 on success / other on failure 2717 * 0 on success / other on failure
2718 **/ 2718 **/
2719 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, 2719 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2720 unsigned long pci_address, u32 length) 2720 unsigned long pci_address, u32 length)
2721 { 2721 {
2722 int bytes_copied = 0; 2722 int bytes_copied = 0;
2723 int cur_len, rc, rem_len, rem_page_len, max_dump_size; 2723 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2724 __be32 *page; 2724 __be32 *page;
2725 unsigned long lock_flags = 0; 2725 unsigned long lock_flags = 0;
2726 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; 2726 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2727 2727
2728 if (ioa_cfg->sis64) 2728 if (ioa_cfg->sis64)
2729 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; 2729 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2730 else 2730 else
2731 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; 2731 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2732 2732
2733 while (bytes_copied < length && 2733 while (bytes_copied < length &&
2734 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { 2734 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2735 if (ioa_dump->page_offset >= PAGE_SIZE || 2735 if (ioa_dump->page_offset >= PAGE_SIZE ||
2736 ioa_dump->page_offset == 0) { 2736 ioa_dump->page_offset == 0) {
2737 page = (__be32 *)__get_free_page(GFP_ATOMIC); 2737 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2738 2738
2739 if (!page) { 2739 if (!page) {
2740 ipr_trace; 2740 ipr_trace;
2741 return bytes_copied; 2741 return bytes_copied;
2742 } 2742 }
2743 2743
2744 ioa_dump->page_offset = 0; 2744 ioa_dump->page_offset = 0;
2745 ioa_dump->ioa_data[ioa_dump->next_page_index] = page; 2745 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2746 ioa_dump->next_page_index++; 2746 ioa_dump->next_page_index++;
2747 } else 2747 } else
2748 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; 2748 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2749 2749
2750 rem_len = length - bytes_copied; 2750 rem_len = length - bytes_copied;
2751 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; 2751 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2752 cur_len = min(rem_len, rem_page_len); 2752 cur_len = min(rem_len, rem_page_len);
2753 2753
2754 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2754 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2755 if (ioa_cfg->sdt_state == ABORT_DUMP) { 2755 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2756 rc = -EIO; 2756 rc = -EIO;
2757 } else { 2757 } else {
2758 rc = ipr_get_ldump_data_section(ioa_cfg, 2758 rc = ipr_get_ldump_data_section(ioa_cfg,
2759 pci_address + bytes_copied, 2759 pci_address + bytes_copied,
2760 &page[ioa_dump->page_offset / 4], 2760 &page[ioa_dump->page_offset / 4],
2761 (cur_len / sizeof(u32))); 2761 (cur_len / sizeof(u32)));
2762 } 2762 }
2763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2764 2764
2765 if (!rc) { 2765 if (!rc) {
2766 ioa_dump->page_offset += cur_len; 2766 ioa_dump->page_offset += cur_len;
2767 bytes_copied += cur_len; 2767 bytes_copied += cur_len;
2768 } else { 2768 } else {
2769 ipr_trace; 2769 ipr_trace;
2770 break; 2770 break;
2771 } 2771 }
2772 schedule(); 2772 schedule();
2773 } 2773 }
2774 2774
2775 return bytes_copied; 2775 return bytes_copied;
2776 } 2776 }
2777 2777
2778 /** 2778 /**
2779 * ipr_init_dump_entry_hdr - Initialize a dump entry header. 2779 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2780 * @hdr: dump entry header struct 2780 * @hdr: dump entry header struct
2781 * 2781 *
2782 * Return value: 2782 * Return value:
2783 * nothing 2783 * nothing
2784 **/ 2784 **/
2785 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr) 2785 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2786 { 2786 {
2787 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; 2787 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2788 hdr->num_elems = 1; 2788 hdr->num_elems = 1;
2789 hdr->offset = sizeof(*hdr); 2789 hdr->offset = sizeof(*hdr);
2790 hdr->status = IPR_DUMP_STATUS_SUCCESS; 2790 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2791 } 2791 }
2792 2792
2793 /** 2793 /**
2794 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump. 2794 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2795 * @ioa_cfg: ioa config struct 2795 * @ioa_cfg: ioa config struct
2796 * @driver_dump: driver dump struct 2796 * @driver_dump: driver dump struct
2797 * 2797 *
2798 * Return value: 2798 * Return value:
2799 * nothing 2799 * nothing
2800 **/ 2800 **/
2801 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, 2801 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2802 struct ipr_driver_dump *driver_dump) 2802 struct ipr_driver_dump *driver_dump)
2803 { 2803 {
2804 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 2804 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2805 2805
2806 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); 2806 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2807 driver_dump->ioa_type_entry.hdr.len = 2807 driver_dump->ioa_type_entry.hdr.len =
2808 sizeof(struct ipr_dump_ioa_type_entry) - 2808 sizeof(struct ipr_dump_ioa_type_entry) -
2809 sizeof(struct ipr_dump_entry_header); 2809 sizeof(struct ipr_dump_entry_header);
2810 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 2810 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2811 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; 2811 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2812 driver_dump->ioa_type_entry.type = ioa_cfg->type; 2812 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2813 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | 2813 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2814 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | 2814 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2815 ucode_vpd->minor_release[1]; 2815 ucode_vpd->minor_release[1];
2816 driver_dump->hdr.num_entries++; 2816 driver_dump->hdr.num_entries++;
2817 } 2817 }
2818 2818
2819 /** 2819 /**
2820 * ipr_dump_version_data - Fill in the driver version in the dump. 2820 * ipr_dump_version_data - Fill in the driver version in the dump.
2821 * @ioa_cfg: ioa config struct 2821 * @ioa_cfg: ioa config struct
2822 * @driver_dump: driver dump struct 2822 * @driver_dump: driver dump struct
2823 * 2823 *
2824 * Return value: 2824 * Return value:
2825 * nothing 2825 * nothing
2826 **/ 2826 **/
2827 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, 2827 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2828 struct ipr_driver_dump *driver_dump) 2828 struct ipr_driver_dump *driver_dump)
2829 { 2829 {
2830 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); 2830 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2831 driver_dump->version_entry.hdr.len = 2831 driver_dump->version_entry.hdr.len =
2832 sizeof(struct ipr_dump_version_entry) - 2832 sizeof(struct ipr_dump_version_entry) -
2833 sizeof(struct ipr_dump_entry_header); 2833 sizeof(struct ipr_dump_entry_header);
2834 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 2834 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2835 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; 2835 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2836 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); 2836 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2837 driver_dump->hdr.num_entries++; 2837 driver_dump->hdr.num_entries++;
2838 } 2838 }
2839 2839
2840 /** 2840 /**
2841 * ipr_dump_trace_data - Fill in the IOA trace in the dump. 2841 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2842 * @ioa_cfg: ioa config struct 2842 * @ioa_cfg: ioa config struct
2843 * @driver_dump: driver dump struct 2843 * @driver_dump: driver dump struct
2844 * 2844 *
2845 * Return value: 2845 * Return value:
2846 * nothing 2846 * nothing
2847 **/ 2847 **/
2848 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, 2848 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2849 struct ipr_driver_dump *driver_dump) 2849 struct ipr_driver_dump *driver_dump)
2850 { 2850 {
2851 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); 2851 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2852 driver_dump->trace_entry.hdr.len = 2852 driver_dump->trace_entry.hdr.len =
2853 sizeof(struct ipr_dump_trace_entry) - 2853 sizeof(struct ipr_dump_trace_entry) -
2854 sizeof(struct ipr_dump_entry_header); 2854 sizeof(struct ipr_dump_entry_header);
2855 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 2855 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2856 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; 2856 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2857 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); 2857 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2858 driver_dump->hdr.num_entries++; 2858 driver_dump->hdr.num_entries++;
2859 } 2859 }
2860 2860
2861 /** 2861 /**
2862 * ipr_dump_location_data - Fill in the IOA location in the dump. 2862 * ipr_dump_location_data - Fill in the IOA location in the dump.
2863 * @ioa_cfg: ioa config struct 2863 * @ioa_cfg: ioa config struct
2864 * @driver_dump: driver dump struct 2864 * @driver_dump: driver dump struct
2865 * 2865 *
2866 * Return value: 2866 * Return value:
2867 * nothing 2867 * nothing
2868 **/ 2868 **/
2869 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, 2869 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2870 struct ipr_driver_dump *driver_dump) 2870 struct ipr_driver_dump *driver_dump)
2871 { 2871 {
2872 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); 2872 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2873 driver_dump->location_entry.hdr.len = 2873 driver_dump->location_entry.hdr.len =
2874 sizeof(struct ipr_dump_location_entry) - 2874 sizeof(struct ipr_dump_location_entry) -
2875 sizeof(struct ipr_dump_entry_header); 2875 sizeof(struct ipr_dump_entry_header);
2876 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 2876 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2877 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; 2877 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2878 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); 2878 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2879 driver_dump->hdr.num_entries++; 2879 driver_dump->hdr.num_entries++;
2880 } 2880 }
2881 2881
2882 /** 2882 /**
2883 * ipr_get_ioa_dump - Perform a dump of the driver and adapter. 2883 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2884 * @ioa_cfg: ioa config struct 2884 * @ioa_cfg: ioa config struct
2885 * @dump: dump struct 2885 * @dump: dump struct
2886 * 2886 *
2887 * Return value: 2887 * Return value:
2888 * nothing 2888 * nothing
2889 **/ 2889 **/
2890 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) 2890 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2891 { 2891 {
2892 unsigned long start_addr, sdt_word; 2892 unsigned long start_addr, sdt_word;
2893 unsigned long lock_flags = 0; 2893 unsigned long lock_flags = 0;
2894 struct ipr_driver_dump *driver_dump = &dump->driver_dump; 2894 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2895 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; 2895 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2896 u32 num_entries, max_num_entries, start_off, end_off; 2896 u32 num_entries, max_num_entries, start_off, end_off;
2897 u32 max_dump_size, bytes_to_copy, bytes_copied, rc; 2897 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2898 struct ipr_sdt *sdt; 2898 struct ipr_sdt *sdt;
2899 int valid = 1; 2899 int valid = 1;
2900 int i; 2900 int i;
2901 2901
2902 ENTER; 2902 ENTER;
2903 2903
2904 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2904 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2905 2905
2906 if (ioa_cfg->sdt_state != READ_DUMP) { 2906 if (ioa_cfg->sdt_state != READ_DUMP) {
2907 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2907 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2908 return; 2908 return;
2909 } 2909 }
2910 2910
2911 if (ioa_cfg->sis64) { 2911 if (ioa_cfg->sis64) {
2912 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2912 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2913 ssleep(IPR_DUMP_DELAY_SECONDS); 2913 ssleep(IPR_DUMP_DELAY_SECONDS);
2914 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2914 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2915 } 2915 }
2916 2916
2917 start_addr = readl(ioa_cfg->ioa_mailbox); 2917 start_addr = readl(ioa_cfg->ioa_mailbox);
2918 2918
2919 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { 2919 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2920 dev_err(&ioa_cfg->pdev->dev, 2920 dev_err(&ioa_cfg->pdev->dev,
2921 "Invalid dump table format: %lx\n", start_addr); 2921 "Invalid dump table format: %lx\n", start_addr);
2922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2923 return; 2923 return;
2924 } 2924 }
2925 2925
2926 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); 2926 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2927 2927
2928 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; 2928 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2929 2929
2930 /* Initialize the overall dump header */ 2930 /* Initialize the overall dump header */
2931 driver_dump->hdr.len = sizeof(struct ipr_driver_dump); 2931 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2932 driver_dump->hdr.num_entries = 1; 2932 driver_dump->hdr.num_entries = 1;
2933 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); 2933 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2934 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; 2934 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2935 driver_dump->hdr.os = IPR_DUMP_OS_LINUX; 2935 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2936 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; 2936 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2937 2937
2938 ipr_dump_version_data(ioa_cfg, driver_dump); 2938 ipr_dump_version_data(ioa_cfg, driver_dump);
2939 ipr_dump_location_data(ioa_cfg, driver_dump); 2939 ipr_dump_location_data(ioa_cfg, driver_dump);
2940 ipr_dump_ioa_type_data(ioa_cfg, driver_dump); 2940 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2941 ipr_dump_trace_data(ioa_cfg, driver_dump); 2941 ipr_dump_trace_data(ioa_cfg, driver_dump);
2942 2942
2943 /* Update dump_header */ 2943 /* Update dump_header */
2944 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); 2944 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2945 2945
2946 /* IOA Dump entry */ 2946 /* IOA Dump entry */
2947 ipr_init_dump_entry_hdr(&ioa_dump->hdr); 2947 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2948 ioa_dump->hdr.len = 0; 2948 ioa_dump->hdr.len = 0;
2949 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 2949 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2950 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; 2950 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2951 2951
2952 /* First entries in sdt are actually a list of dump addresses and 2952 /* First entries in sdt are actually a list of dump addresses and
2953 lengths to gather the real dump data. sdt represents the pointer 2953 lengths to gather the real dump data. sdt represents the pointer
2954 to the ioa generated dump table. Dump data will be extracted based 2954 to the ioa generated dump table. Dump data will be extracted based
2955 on entries in this table */ 2955 on entries in this table */
2956 sdt = &ioa_dump->sdt; 2956 sdt = &ioa_dump->sdt;
2957 2957
2958 if (ioa_cfg->sis64) { 2958 if (ioa_cfg->sis64) {
2959 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES; 2959 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
2960 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; 2960 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2961 } else { 2961 } else {
2962 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES; 2962 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
2963 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; 2963 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2964 } 2964 }
2965 2965
2966 bytes_to_copy = offsetof(struct ipr_sdt, entry) + 2966 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
2967 (max_num_entries * sizeof(struct ipr_sdt_entry)); 2967 (max_num_entries * sizeof(struct ipr_sdt_entry));
2968 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, 2968 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2969 bytes_to_copy / sizeof(__be32)); 2969 bytes_to_copy / sizeof(__be32));
2970 2970
2971 /* Smart Dump table is ready to use and the first entry is valid */ 2971 /* Smart Dump table is ready to use and the first entry is valid */
2972 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 2972 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2973 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 2973 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2974 dev_err(&ioa_cfg->pdev->dev, 2974 dev_err(&ioa_cfg->pdev->dev,
2975 "Dump of IOA failed. Dump table not valid: %d, %X.\n", 2975 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2976 rc, be32_to_cpu(sdt->hdr.state)); 2976 rc, be32_to_cpu(sdt->hdr.state));
2977 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; 2977 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2978 ioa_cfg->sdt_state = DUMP_OBTAINED; 2978 ioa_cfg->sdt_state = DUMP_OBTAINED;
2979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2980 return; 2980 return;
2981 } 2981 }
2982 2982
2983 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); 2983 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2984 2984
2985 if (num_entries > max_num_entries) 2985 if (num_entries > max_num_entries)
2986 num_entries = max_num_entries; 2986 num_entries = max_num_entries;
2987 2987
2988 /* Update dump length to the actual data to be copied */ 2988 /* Update dump length to the actual data to be copied */
2989 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); 2989 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
2990 if (ioa_cfg->sis64) 2990 if (ioa_cfg->sis64)
2991 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); 2991 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
2992 else 2992 else
2993 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); 2993 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
2994 2994
2995 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2995 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2996 2996
2997 for (i = 0; i < num_entries; i++) { 2997 for (i = 0; i < num_entries; i++) {
2998 if (ioa_dump->hdr.len > max_dump_size) { 2998 if (ioa_dump->hdr.len > max_dump_size) {
2999 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 2999 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3000 break; 3000 break;
3001 } 3001 }
3002 3002
3003 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { 3003 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3004 sdt_word = be32_to_cpu(sdt->entry[i].start_token); 3004 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3005 if (ioa_cfg->sis64) 3005 if (ioa_cfg->sis64)
3006 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); 3006 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3007 else { 3007 else {
3008 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; 3008 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3009 end_off = be32_to_cpu(sdt->entry[i].end_token); 3009 end_off = be32_to_cpu(sdt->entry[i].end_token);
3010 3010
3011 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) 3011 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3012 bytes_to_copy = end_off - start_off; 3012 bytes_to_copy = end_off - start_off;
3013 else 3013 else
3014 valid = 0; 3014 valid = 0;
3015 } 3015 }
3016 if (valid) { 3016 if (valid) {
3017 if (bytes_to_copy > max_dump_size) { 3017 if (bytes_to_copy > max_dump_size) {
3018 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 3018 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3019 continue; 3019 continue;
3020 } 3020 }
3021 3021
3022 /* Copy data from adapter to driver buffers */ 3022 /* Copy data from adapter to driver buffers */
3023 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, 3023 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3024 bytes_to_copy); 3024 bytes_to_copy);
3025 3025
3026 ioa_dump->hdr.len += bytes_copied; 3026 ioa_dump->hdr.len += bytes_copied;
3027 3027
3028 if (bytes_copied != bytes_to_copy) { 3028 if (bytes_copied != bytes_to_copy) {
3029 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 3029 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3030 break; 3030 break;
3031 } 3031 }
3032 } 3032 }
3033 } 3033 }
3034 } 3034 }
3035 3035
3036 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); 3036 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3037 3037
3038 /* Update dump_header */ 3038 /* Update dump_header */
3039 driver_dump->hdr.len += ioa_dump->hdr.len; 3039 driver_dump->hdr.len += ioa_dump->hdr.len;
3040 wmb(); 3040 wmb();
3041 ioa_cfg->sdt_state = DUMP_OBTAINED; 3041 ioa_cfg->sdt_state = DUMP_OBTAINED;
3042 LEAVE; 3042 LEAVE;
3043 } 3043 }
3044 3044
3045 #else 3045 #else
3046 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0) 3046 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3047 #endif 3047 #endif
3048 3048
3049 /** 3049 /**
3050 * ipr_release_dump - Free adapter dump memory 3050 * ipr_release_dump - Free adapter dump memory
3051 * @kref: kref struct 3051 * @kref: kref struct
3052 * 3052 *
3053 * Return value: 3053 * Return value:
3054 * nothing 3054 * nothing
3055 **/ 3055 **/
3056 static void ipr_release_dump(struct kref *kref) 3056 static void ipr_release_dump(struct kref *kref)
3057 { 3057 {
3058 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref); 3058 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3059 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; 3059 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3060 unsigned long lock_flags = 0; 3060 unsigned long lock_flags = 0;
3061 int i; 3061 int i;
3062 3062
3063 ENTER; 3063 ENTER;
3064 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3064 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3065 ioa_cfg->dump = NULL; 3065 ioa_cfg->dump = NULL;
3066 ioa_cfg->sdt_state = INACTIVE; 3066 ioa_cfg->sdt_state = INACTIVE;
3067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3068 3068
3069 for (i = 0; i < dump->ioa_dump.next_page_index; i++) 3069 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3070 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); 3070 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3071 3071
3072 vfree(dump->ioa_dump.ioa_data); 3072 vfree(dump->ioa_dump.ioa_data);
3073 kfree(dump); 3073 kfree(dump);
3074 LEAVE; 3074 LEAVE;
3075 } 3075 }
3076 3076
3077 /** 3077 /**
3078 * ipr_worker_thread - Worker thread 3078 * ipr_worker_thread - Worker thread
3079 * @work: ioa config struct 3079 * @work: ioa config struct
3080 * 3080 *
3081 * Called at task level from a work thread. This function takes care 3081 * Called at task level from a work thread. This function takes care
3082 * of adding and removing device from the mid-layer as configuration 3082 * of adding and removing device from the mid-layer as configuration
3083 * changes are detected by the adapter. 3083 * changes are detected by the adapter.
3084 * 3084 *
3085 * Return value: 3085 * Return value:
3086 * nothing 3086 * nothing
3087 **/ 3087 **/
3088 static void ipr_worker_thread(struct work_struct *work) 3088 static void ipr_worker_thread(struct work_struct *work)
3089 { 3089 {
3090 unsigned long lock_flags; 3090 unsigned long lock_flags;
3091 struct ipr_resource_entry *res; 3091 struct ipr_resource_entry *res;
3092 struct scsi_device *sdev; 3092 struct scsi_device *sdev;
3093 struct ipr_dump *dump; 3093 struct ipr_dump *dump;
3094 struct ipr_ioa_cfg *ioa_cfg = 3094 struct ipr_ioa_cfg *ioa_cfg =
3095 container_of(work, struct ipr_ioa_cfg, work_q); 3095 container_of(work, struct ipr_ioa_cfg, work_q);
3096 u8 bus, target, lun; 3096 u8 bus, target, lun;
3097 int did_work; 3097 int did_work;
3098 3098
3099 ENTER; 3099 ENTER;
3100 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3100 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3101 3101
3102 if (ioa_cfg->sdt_state == READ_DUMP) { 3102 if (ioa_cfg->sdt_state == READ_DUMP) {
3103 dump = ioa_cfg->dump; 3103 dump = ioa_cfg->dump;
3104 if (!dump) { 3104 if (!dump) {
3105 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3105 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3106 return; 3106 return;
3107 } 3107 }
3108 kref_get(&dump->kref); 3108 kref_get(&dump->kref);
3109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3110 ipr_get_ioa_dump(ioa_cfg, dump); 3110 ipr_get_ioa_dump(ioa_cfg, dump);
3111 kref_put(&dump->kref, ipr_release_dump); 3111 kref_put(&dump->kref, ipr_release_dump);
3112 3112
3113 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3113 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3114 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) 3114 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3115 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3115 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3116 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3116 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3117 return; 3117 return;
3118 } 3118 }
3119 3119
3120 restart: 3120 restart:
3121 do { 3121 do {
3122 did_work = 0; 3122 did_work = 0;
3123 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) { 3123 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3124 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3124 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3125 return; 3125 return;
3126 } 3126 }
3127 3127
3128 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3128 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3129 if (res->del_from_ml && res->sdev) { 3129 if (res->del_from_ml && res->sdev) {
3130 did_work = 1; 3130 did_work = 1;
3131 sdev = res->sdev; 3131 sdev = res->sdev;
3132 if (!scsi_device_get(sdev)) { 3132 if (!scsi_device_get(sdev)) {
3133 if (!res->add_to_ml) 3133 if (!res->add_to_ml)
3134 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 3134 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3135 else 3135 else
3136 res->del_from_ml = 0; 3136 res->del_from_ml = 0;
3137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3138 scsi_remove_device(sdev); 3138 scsi_remove_device(sdev);
3139 scsi_device_put(sdev); 3139 scsi_device_put(sdev);
3140 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3140 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3141 } 3141 }
3142 break; 3142 break;
3143 } 3143 }
3144 } 3144 }
3145 } while(did_work); 3145 } while(did_work);
3146 3146
3147 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3147 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3148 if (res->add_to_ml) { 3148 if (res->add_to_ml) {
3149 bus = res->bus; 3149 bus = res->bus;
3150 target = res->target; 3150 target = res->target;
3151 lun = res->lun; 3151 lun = res->lun;
3152 res->add_to_ml = 0; 3152 res->add_to_ml = 0;
3153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3154 scsi_add_device(ioa_cfg->host, bus, target, lun); 3154 scsi_add_device(ioa_cfg->host, bus, target, lun);
3155 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3155 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3156 goto restart; 3156 goto restart;
3157 } 3157 }
3158 } 3158 }
3159 3159
3160 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3160 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3161 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); 3161 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3162 LEAVE; 3162 LEAVE;
3163 } 3163 }
3164 3164
3165 #ifdef CONFIG_SCSI_IPR_TRACE 3165 #ifdef CONFIG_SCSI_IPR_TRACE
3166 /** 3166 /**
3167 * ipr_read_trace - Dump the adapter trace 3167 * ipr_read_trace - Dump the adapter trace
3168 * @filp: open sysfs file 3168 * @filp: open sysfs file
3169 * @kobj: kobject struct 3169 * @kobj: kobject struct
3170 * @bin_attr: bin_attribute struct 3170 * @bin_attr: bin_attribute struct
3171 * @buf: buffer 3171 * @buf: buffer
3172 * @off: offset 3172 * @off: offset
3173 * @count: buffer size 3173 * @count: buffer size
3174 * 3174 *
3175 * Return value: 3175 * Return value:
3176 * number of bytes printed to buffer 3176 * number of bytes printed to buffer
3177 **/ 3177 **/
3178 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj, 3178 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3179 struct bin_attribute *bin_attr, 3179 struct bin_attribute *bin_attr,
3180 char *buf, loff_t off, size_t count) 3180 char *buf, loff_t off, size_t count)
3181 { 3181 {
3182 struct device *dev = container_of(kobj, struct device, kobj); 3182 struct device *dev = container_of(kobj, struct device, kobj);
3183 struct Scsi_Host *shost = class_to_shost(dev); 3183 struct Scsi_Host *shost = class_to_shost(dev);
3184 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3184 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3185 unsigned long lock_flags = 0; 3185 unsigned long lock_flags = 0;
3186 ssize_t ret; 3186 ssize_t ret;
3187 3187
3188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3189 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, 3189 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3190 IPR_TRACE_SIZE); 3190 IPR_TRACE_SIZE);
3191 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3191 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3192 3192
3193 return ret; 3193 return ret;
3194 } 3194 }
3195 3195
3196 static struct bin_attribute ipr_trace_attr = { 3196 static struct bin_attribute ipr_trace_attr = {
3197 .attr = { 3197 .attr = {
3198 .name = "trace", 3198 .name = "trace",
3199 .mode = S_IRUGO, 3199 .mode = S_IRUGO,
3200 }, 3200 },
3201 .size = 0, 3201 .size = 0,
3202 .read = ipr_read_trace, 3202 .read = ipr_read_trace,
3203 }; 3203 };
3204 #endif 3204 #endif
3205 3205
3206 /** 3206 /**
3207 * ipr_show_fw_version - Show the firmware version 3207 * ipr_show_fw_version - Show the firmware version
3208 * @dev: class device struct 3208 * @dev: class device struct
3209 * @buf: buffer 3209 * @buf: buffer
3210 * 3210 *
3211 * Return value: 3211 * Return value:
3212 * number of bytes printed to buffer 3212 * number of bytes printed to buffer
3213 **/ 3213 **/
3214 static ssize_t ipr_show_fw_version(struct device *dev, 3214 static ssize_t ipr_show_fw_version(struct device *dev,
3215 struct device_attribute *attr, char *buf) 3215 struct device_attribute *attr, char *buf)
3216 { 3216 {
3217 struct Scsi_Host *shost = class_to_shost(dev); 3217 struct Scsi_Host *shost = class_to_shost(dev);
3218 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3218 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3219 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 3219 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3220 unsigned long lock_flags = 0; 3220 unsigned long lock_flags = 0;
3221 int len; 3221 int len;
3222 3222
3223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n", 3224 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3225 ucode_vpd->major_release, ucode_vpd->card_type, 3225 ucode_vpd->major_release, ucode_vpd->card_type,
3226 ucode_vpd->minor_release[0], 3226 ucode_vpd->minor_release[0],
3227 ucode_vpd->minor_release[1]); 3227 ucode_vpd->minor_release[1]);
3228 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3228 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3229 return len; 3229 return len;
3230 } 3230 }
3231 3231
3232 static struct device_attribute ipr_fw_version_attr = { 3232 static struct device_attribute ipr_fw_version_attr = {
3233 .attr = { 3233 .attr = {
3234 .name = "fw_version", 3234 .name = "fw_version",
3235 .mode = S_IRUGO, 3235 .mode = S_IRUGO,
3236 }, 3236 },
3237 .show = ipr_show_fw_version, 3237 .show = ipr_show_fw_version,
3238 }; 3238 };
3239 3239
3240 /** 3240 /**
3241 * ipr_show_log_level - Show the adapter's error logging level 3241 * ipr_show_log_level - Show the adapter's error logging level
3242 * @dev: class device struct 3242 * @dev: class device struct
3243 * @buf: buffer 3243 * @buf: buffer
3244 * 3244 *
3245 * Return value: 3245 * Return value:
3246 * number of bytes printed to buffer 3246 * number of bytes printed to buffer
3247 **/ 3247 **/
3248 static ssize_t ipr_show_log_level(struct device *dev, 3248 static ssize_t ipr_show_log_level(struct device *dev,
3249 struct device_attribute *attr, char *buf) 3249 struct device_attribute *attr, char *buf)
3250 { 3250 {
3251 struct Scsi_Host *shost = class_to_shost(dev); 3251 struct Scsi_Host *shost = class_to_shost(dev);
3252 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3252 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3253 unsigned long lock_flags = 0; 3253 unsigned long lock_flags = 0;
3254 int len; 3254 int len;
3255 3255
3256 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3256 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3257 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); 3257 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3258 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3258 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259 return len; 3259 return len;
3260 } 3260 }
3261 3261
3262 /** 3262 /**
3263 * ipr_store_log_level - Change the adapter's error logging level 3263 * ipr_store_log_level - Change the adapter's error logging level
3264 * @dev: class device struct 3264 * @dev: class device struct
3265 * @buf: buffer 3265 * @buf: buffer
3266 * 3266 *
3267 * Return value: 3267 * Return value:
3268 * number of bytes printed to buffer 3268 * number of bytes printed to buffer
3269 **/ 3269 **/
3270 static ssize_t ipr_store_log_level(struct device *dev, 3270 static ssize_t ipr_store_log_level(struct device *dev,
3271 struct device_attribute *attr, 3271 struct device_attribute *attr,
3272 const char *buf, size_t count) 3272 const char *buf, size_t count)
3273 { 3273 {
3274 struct Scsi_Host *shost = class_to_shost(dev); 3274 struct Scsi_Host *shost = class_to_shost(dev);
3275 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3275 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3276 unsigned long lock_flags = 0; 3276 unsigned long lock_flags = 0;
3277 3277
3278 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3278 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3279 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); 3279 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3280 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3280 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3281 return strlen(buf); 3281 return strlen(buf);
3282 } 3282 }
3283 3283
3284 static struct device_attribute ipr_log_level_attr = { 3284 static struct device_attribute ipr_log_level_attr = {
3285 .attr = { 3285 .attr = {
3286 .name = "log_level", 3286 .name = "log_level",
3287 .mode = S_IRUGO | S_IWUSR, 3287 .mode = S_IRUGO | S_IWUSR,
3288 }, 3288 },
3289 .show = ipr_show_log_level, 3289 .show = ipr_show_log_level,
3290 .store = ipr_store_log_level 3290 .store = ipr_store_log_level
3291 }; 3291 };
3292 3292
3293 /** 3293 /**
3294 * ipr_store_diagnostics - IOA Diagnostics interface 3294 * ipr_store_diagnostics - IOA Diagnostics interface
3295 * @dev: device struct 3295 * @dev: device struct
3296 * @buf: buffer 3296 * @buf: buffer
3297 * @count: buffer size 3297 * @count: buffer size
3298 * 3298 *
3299 * This function will reset the adapter and wait a reasonable 3299 * This function will reset the adapter and wait a reasonable
3300 * amount of time for any errors that the adapter might log. 3300 * amount of time for any errors that the adapter might log.
3301 * 3301 *
3302 * Return value: 3302 * Return value:
3303 * count on success / other on failure 3303 * count on success / other on failure
3304 **/ 3304 **/
3305 static ssize_t ipr_store_diagnostics(struct device *dev, 3305 static ssize_t ipr_store_diagnostics(struct device *dev,
3306 struct device_attribute *attr, 3306 struct device_attribute *attr,
3307 const char *buf, size_t count) 3307 const char *buf, size_t count)
3308 { 3308 {
3309 struct Scsi_Host *shost = class_to_shost(dev); 3309 struct Scsi_Host *shost = class_to_shost(dev);
3310 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3310 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3311 unsigned long lock_flags = 0; 3311 unsigned long lock_flags = 0;
3312 int rc = count; 3312 int rc = count;
3313 3313
3314 if (!capable(CAP_SYS_ADMIN)) 3314 if (!capable(CAP_SYS_ADMIN))
3315 return -EACCES; 3315 return -EACCES;
3316 3316
3317 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3317 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3318 while(ioa_cfg->in_reset_reload) { 3318 while(ioa_cfg->in_reset_reload) {
3319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3320 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3320 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3321 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3321 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3322 } 3322 }
3323 3323
3324 ioa_cfg->errors_logged = 0; 3324 ioa_cfg->errors_logged = 0;
3325 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3325 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3326 3326
3327 if (ioa_cfg->in_reset_reload) { 3327 if (ioa_cfg->in_reset_reload) {
3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3329 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3330 3330
3331 /* Wait for a second for any errors to be logged */ 3331 /* Wait for a second for any errors to be logged */
3332 msleep(1000); 3332 msleep(1000);
3333 } else { 3333 } else {
3334 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3334 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3335 return -EIO; 3335 return -EIO;
3336 } 3336 }
3337 3337
3338 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3338 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3339 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) 3339 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3340 rc = -EIO; 3340 rc = -EIO;
3341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3342 3342
3343 return rc; 3343 return rc;
3344 } 3344 }
3345 3345
3346 static struct device_attribute ipr_diagnostics_attr = { 3346 static struct device_attribute ipr_diagnostics_attr = {
3347 .attr = { 3347 .attr = {
3348 .name = "run_diagnostics", 3348 .name = "run_diagnostics",
3349 .mode = S_IWUSR, 3349 .mode = S_IWUSR,
3350 }, 3350 },
3351 .store = ipr_store_diagnostics 3351 .store = ipr_store_diagnostics
3352 }; 3352 };
3353 3353
3354 /** 3354 /**
3355 * ipr_show_adapter_state - Show the adapter's state 3355 * ipr_show_adapter_state - Show the adapter's state
3356 * @class_dev: device struct 3356 * @class_dev: device struct
3357 * @buf: buffer 3357 * @buf: buffer
3358 * 3358 *
3359 * Return value: 3359 * Return value:
3360 * number of bytes printed to buffer 3360 * number of bytes printed to buffer
3361 **/ 3361 **/
3362 static ssize_t ipr_show_adapter_state(struct device *dev, 3362 static ssize_t ipr_show_adapter_state(struct device *dev,
3363 struct device_attribute *attr, char *buf) 3363 struct device_attribute *attr, char *buf)
3364 { 3364 {
3365 struct Scsi_Host *shost = class_to_shost(dev); 3365 struct Scsi_Host *shost = class_to_shost(dev);
3366 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3366 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3367 unsigned long lock_flags = 0; 3367 unsigned long lock_flags = 0;
3368 int len; 3368 int len;
3369 3369
3370 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3370 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3371 if (ioa_cfg->ioa_is_dead) 3371 if (ioa_cfg->ioa_is_dead)
3372 len = snprintf(buf, PAGE_SIZE, "offline\n"); 3372 len = snprintf(buf, PAGE_SIZE, "offline\n");
3373 else 3373 else
3374 len = snprintf(buf, PAGE_SIZE, "online\n"); 3374 len = snprintf(buf, PAGE_SIZE, "online\n");
3375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3376 return len; 3376 return len;
3377 } 3377 }
3378 3378
3379 /** 3379 /**
3380 * ipr_store_adapter_state - Change adapter state 3380 * ipr_store_adapter_state - Change adapter state
3381 * @dev: device struct 3381 * @dev: device struct
3382 * @buf: buffer 3382 * @buf: buffer
3383 * @count: buffer size 3383 * @count: buffer size
3384 * 3384 *
3385 * This function will change the adapter's state. 3385 * This function will change the adapter's state.
3386 * 3386 *
3387 * Return value: 3387 * Return value:
3388 * count on success / other on failure 3388 * count on success / other on failure
3389 **/ 3389 **/
3390 static ssize_t ipr_store_adapter_state(struct device *dev, 3390 static ssize_t ipr_store_adapter_state(struct device *dev,
3391 struct device_attribute *attr, 3391 struct device_attribute *attr,
3392 const char *buf, size_t count) 3392 const char *buf, size_t count)
3393 { 3393 {
3394 struct Scsi_Host *shost = class_to_shost(dev); 3394 struct Scsi_Host *shost = class_to_shost(dev);
3395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3396 unsigned long lock_flags; 3396 unsigned long lock_flags;
3397 int result = count; 3397 int result = count;
3398 3398
3399 if (!capable(CAP_SYS_ADMIN)) 3399 if (!capable(CAP_SYS_ADMIN))
3400 return -EACCES; 3400 return -EACCES;
3401 3401
3402 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3402 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3403 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) { 3403 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3404 ioa_cfg->ioa_is_dead = 0; 3404 ioa_cfg->ioa_is_dead = 0;
3405 ioa_cfg->reset_retries = 0; 3405 ioa_cfg->reset_retries = 0;
3406 ioa_cfg->in_ioa_bringdown = 0; 3406 ioa_cfg->in_ioa_bringdown = 0;
3407 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3407 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3408 } 3408 }
3409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3410 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3410 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3411 3411
3412 return result; 3412 return result;
3413 } 3413 }
3414 3414
3415 static struct device_attribute ipr_ioa_state_attr = { 3415 static struct device_attribute ipr_ioa_state_attr = {
3416 .attr = { 3416 .attr = {
3417 .name = "online_state", 3417 .name = "online_state",
3418 .mode = S_IRUGO | S_IWUSR, 3418 .mode = S_IRUGO | S_IWUSR,
3419 }, 3419 },
3420 .show = ipr_show_adapter_state, 3420 .show = ipr_show_adapter_state,
3421 .store = ipr_store_adapter_state 3421 .store = ipr_store_adapter_state
3422 }; 3422 };
3423 3423
3424 /** 3424 /**
3425 * ipr_store_reset_adapter - Reset the adapter 3425 * ipr_store_reset_adapter - Reset the adapter
3426 * @dev: device struct 3426 * @dev: device struct
3427 * @buf: buffer 3427 * @buf: buffer
3428 * @count: buffer size 3428 * @count: buffer size
3429 * 3429 *
3430 * This function will reset the adapter. 3430 * This function will reset the adapter.
3431 * 3431 *
3432 * Return value: 3432 * Return value:
3433 * count on success / other on failure 3433 * count on success / other on failure
3434 **/ 3434 **/
3435 static ssize_t ipr_store_reset_adapter(struct device *dev, 3435 static ssize_t ipr_store_reset_adapter(struct device *dev,
3436 struct device_attribute *attr, 3436 struct device_attribute *attr,
3437 const char *buf, size_t count) 3437 const char *buf, size_t count)
3438 { 3438 {
3439 struct Scsi_Host *shost = class_to_shost(dev); 3439 struct Scsi_Host *shost = class_to_shost(dev);
3440 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3440 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3441 unsigned long lock_flags; 3441 unsigned long lock_flags;
3442 int result = count; 3442 int result = count;
3443 3443
3444 if (!capable(CAP_SYS_ADMIN)) 3444 if (!capable(CAP_SYS_ADMIN))
3445 return -EACCES; 3445 return -EACCES;
3446 3446
3447 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3447 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3448 if (!ioa_cfg->in_reset_reload) 3448 if (!ioa_cfg->in_reset_reload)
3449 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3449 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3451 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3451 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3452 3452
3453 return result; 3453 return result;
3454 } 3454 }
3455 3455
3456 static struct device_attribute ipr_ioa_reset_attr = { 3456 static struct device_attribute ipr_ioa_reset_attr = {
3457 .attr = { 3457 .attr = {
3458 .name = "reset_host", 3458 .name = "reset_host",
3459 .mode = S_IWUSR, 3459 .mode = S_IWUSR,
3460 }, 3460 },
3461 .store = ipr_store_reset_adapter 3461 .store = ipr_store_reset_adapter
3462 }; 3462 };
3463 3463
3464 /** 3464 /**
3465 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer 3465 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3466 * @buf_len: buffer length 3466 * @buf_len: buffer length
3467 * 3467 *
3468 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather 3468 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3469 * list to use for microcode download 3469 * list to use for microcode download
3470 * 3470 *
3471 * Return value: 3471 * Return value:
3472 * pointer to sglist / NULL on failure 3472 * pointer to sglist / NULL on failure
3473 **/ 3473 **/
3474 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) 3474 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3475 { 3475 {
3476 int sg_size, order, bsize_elem, num_elem, i, j; 3476 int sg_size, order, bsize_elem, num_elem, i, j;
3477 struct ipr_sglist *sglist; 3477 struct ipr_sglist *sglist;
3478 struct scatterlist *scatterlist; 3478 struct scatterlist *scatterlist;
3479 struct page *page; 3479 struct page *page;
3480 3480
3481 /* Get the minimum size per scatter/gather element */ 3481 /* Get the minimum size per scatter/gather element */
3482 sg_size = buf_len / (IPR_MAX_SGLIST - 1); 3482 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3483 3483
3484 /* Get the actual size per element */ 3484 /* Get the actual size per element */
3485 order = get_order(sg_size); 3485 order = get_order(sg_size);
3486 3486
3487 /* Determine the actual number of bytes per element */ 3487 /* Determine the actual number of bytes per element */
3488 bsize_elem = PAGE_SIZE * (1 << order); 3488 bsize_elem = PAGE_SIZE * (1 << order);
3489 3489
3490 /* Determine the actual number of sg entries needed */ 3490 /* Determine the actual number of sg entries needed */
3491 if (buf_len % bsize_elem) 3491 if (buf_len % bsize_elem)
3492 num_elem = (buf_len / bsize_elem) + 1; 3492 num_elem = (buf_len / bsize_elem) + 1;
3493 else 3493 else
3494 num_elem = buf_len / bsize_elem; 3494 num_elem = buf_len / bsize_elem;
3495 3495
3496 /* Allocate a scatter/gather list for the DMA */ 3496 /* Allocate a scatter/gather list for the DMA */
3497 sglist = kzalloc(sizeof(struct ipr_sglist) + 3497 sglist = kzalloc(sizeof(struct ipr_sglist) +
3498 (sizeof(struct scatterlist) * (num_elem - 1)), 3498 (sizeof(struct scatterlist) * (num_elem - 1)),
3499 GFP_KERNEL); 3499 GFP_KERNEL);
3500 3500
3501 if (sglist == NULL) { 3501 if (sglist == NULL) {
3502 ipr_trace; 3502 ipr_trace;
3503 return NULL; 3503 return NULL;
3504 } 3504 }
3505 3505
3506 scatterlist = sglist->scatterlist; 3506 scatterlist = sglist->scatterlist;
3507 sg_init_table(scatterlist, num_elem); 3507 sg_init_table(scatterlist, num_elem);
3508 3508
3509 sglist->order = order; 3509 sglist->order = order;
3510 sglist->num_sg = num_elem; 3510 sglist->num_sg = num_elem;
3511 3511
3512 /* Allocate a bunch of sg elements */ 3512 /* Allocate a bunch of sg elements */
3513 for (i = 0; i < num_elem; i++) { 3513 for (i = 0; i < num_elem; i++) {
3514 page = alloc_pages(GFP_KERNEL, order); 3514 page = alloc_pages(GFP_KERNEL, order);
3515 if (!page) { 3515 if (!page) {
3516 ipr_trace; 3516 ipr_trace;
3517 3517
3518 /* Free up what we already allocated */ 3518 /* Free up what we already allocated */
3519 for (j = i - 1; j >= 0; j--) 3519 for (j = i - 1; j >= 0; j--)
3520 __free_pages(sg_page(&scatterlist[j]), order); 3520 __free_pages(sg_page(&scatterlist[j]), order);
3521 kfree(sglist); 3521 kfree(sglist);
3522 return NULL; 3522 return NULL;
3523 } 3523 }
3524 3524
3525 sg_set_page(&scatterlist[i], page, 0, 0); 3525 sg_set_page(&scatterlist[i], page, 0, 0);
3526 } 3526 }
3527 3527
3528 return sglist; 3528 return sglist;
3529 } 3529 }
3530 3530
3531 /** 3531 /**
3532 * ipr_free_ucode_buffer - Frees a microcode download buffer 3532 * ipr_free_ucode_buffer - Frees a microcode download buffer
3533 * @p_dnld: scatter/gather list pointer 3533 * @p_dnld: scatter/gather list pointer
3534 * 3534 *
3535 * Free a DMA'able ucode download buffer previously allocated with 3535 * Free a DMA'able ucode download buffer previously allocated with
3536 * ipr_alloc_ucode_buffer 3536 * ipr_alloc_ucode_buffer
3537 * 3537 *
3538 * Return value: 3538 * Return value:
3539 * nothing 3539 * nothing
3540 **/ 3540 **/
3541 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist) 3541 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3542 { 3542 {
3543 int i; 3543 int i;
3544 3544
3545 for (i = 0; i < sglist->num_sg; i++) 3545 for (i = 0; i < sglist->num_sg; i++)
3546 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order); 3546 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3547 3547
3548 kfree(sglist); 3548 kfree(sglist);
3549 } 3549 }
3550 3550
3551 /** 3551 /**
3552 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer 3552 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3553 * @sglist: scatter/gather list pointer 3553 * @sglist: scatter/gather list pointer
3554 * @buffer: buffer pointer 3554 * @buffer: buffer pointer
3555 * @len: buffer length 3555 * @len: buffer length
3556 * 3556 *
3557 * Copy a microcode image from a user buffer into a buffer allocated by 3557 * Copy a microcode image from a user buffer into a buffer allocated by
3558 * ipr_alloc_ucode_buffer 3558 * ipr_alloc_ucode_buffer
3559 * 3559 *
3560 * Return value: 3560 * Return value:
3561 * 0 on success / other on failure 3561 * 0 on success / other on failure
3562 **/ 3562 **/
3563 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, 3563 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3564 u8 *buffer, u32 len) 3564 u8 *buffer, u32 len)
3565 { 3565 {
3566 int bsize_elem, i, result = 0; 3566 int bsize_elem, i, result = 0;
3567 struct scatterlist *scatterlist; 3567 struct scatterlist *scatterlist;
3568 void *kaddr; 3568 void *kaddr;
3569 3569
3570 /* Determine the actual number of bytes per element */ 3570 /* Determine the actual number of bytes per element */
3571 bsize_elem = PAGE_SIZE * (1 << sglist->order); 3571 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3572 3572
3573 scatterlist = sglist->scatterlist; 3573 scatterlist = sglist->scatterlist;
3574 3574
3575 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { 3575 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3576 struct page *page = sg_page(&scatterlist[i]); 3576 struct page *page = sg_page(&scatterlist[i]);
3577 3577
3578 kaddr = kmap(page); 3578 kaddr = kmap(page);
3579 memcpy(kaddr, buffer, bsize_elem); 3579 memcpy(kaddr, buffer, bsize_elem);
3580 kunmap(page); 3580 kunmap(page);
3581 3581
3582 scatterlist[i].length = bsize_elem; 3582 scatterlist[i].length = bsize_elem;
3583 3583
3584 if (result != 0) { 3584 if (result != 0) {
3585 ipr_trace; 3585 ipr_trace;
3586 return result; 3586 return result;
3587 } 3587 }
3588 } 3588 }
3589 3589
3590 if (len % bsize_elem) { 3590 if (len % bsize_elem) {
3591 struct page *page = sg_page(&scatterlist[i]); 3591 struct page *page = sg_page(&scatterlist[i]);
3592 3592
3593 kaddr = kmap(page); 3593 kaddr = kmap(page);
3594 memcpy(kaddr, buffer, len % bsize_elem); 3594 memcpy(kaddr, buffer, len % bsize_elem);
3595 kunmap(page); 3595 kunmap(page);
3596 3596
3597 scatterlist[i].length = len % bsize_elem; 3597 scatterlist[i].length = len % bsize_elem;
3598 } 3598 }
3599 3599
3600 sglist->buffer_len = len; 3600 sglist->buffer_len = len;
3601 return result; 3601 return result;
3602 } 3602 }
3603 3603
3604 /** 3604 /**
3605 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL 3605 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3606 * @ipr_cmd: ipr command struct 3606 * @ipr_cmd: ipr command struct
3607 * @sglist: scatter/gather list 3607 * @sglist: scatter/gather list
3608 * 3608 *
3609 * Builds a microcode download IOA data list (IOADL). 3609 * Builds a microcode download IOA data list (IOADL).
3610 * 3610 *
3611 **/ 3611 **/
3612 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd, 3612 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3613 struct ipr_sglist *sglist) 3613 struct ipr_sglist *sglist)
3614 { 3614 {
3615 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3615 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3616 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 3616 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3617 struct scatterlist *scatterlist = sglist->scatterlist; 3617 struct scatterlist *scatterlist = sglist->scatterlist;
3618 int i; 3618 int i;
3619 3619
3620 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3620 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3621 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3621 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3622 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 3622 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3623 3623
3624 ioarcb->ioadl_len = 3624 ioarcb->ioadl_len =
3625 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 3625 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3626 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3626 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3627 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); 3627 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3628 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i])); 3628 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3629 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i])); 3629 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3630 } 3630 }
3631 3631
3632 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 3632 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3633 } 3633 }
3634 3634
3635 /** 3635 /**
3636 * ipr_build_ucode_ioadl - Build a microcode download IOADL 3636 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3637 * @ipr_cmd: ipr command struct 3637 * @ipr_cmd: ipr command struct
3638 * @sglist: scatter/gather list 3638 * @sglist: scatter/gather list
3639 * 3639 *
3640 * Builds a microcode download IOA data list (IOADL). 3640 * Builds a microcode download IOA data list (IOADL).
3641 * 3641 *
3642 **/ 3642 **/
3643 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, 3643 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3644 struct ipr_sglist *sglist) 3644 struct ipr_sglist *sglist)
3645 { 3645 {
3646 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3646 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3647 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 3647 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3648 struct scatterlist *scatterlist = sglist->scatterlist; 3648 struct scatterlist *scatterlist = sglist->scatterlist;
3649 int i; 3649 int i;
3650 3650
3651 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3651 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3652 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3652 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3653 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 3653 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3654 3654
3655 ioarcb->ioadl_len = 3655 ioarcb->ioadl_len =
3656 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 3656 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3657 3657
3658 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3658 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3659 ioadl[i].flags_and_data_len = 3659 ioadl[i].flags_and_data_len =
3660 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i])); 3660 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3661 ioadl[i].address = 3661 ioadl[i].address =
3662 cpu_to_be32(sg_dma_address(&scatterlist[i])); 3662 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3663 } 3663 }
3664 3664
3665 ioadl[i-1].flags_and_data_len |= 3665 ioadl[i-1].flags_and_data_len |=
3666 cpu_to_be32(IPR_IOADL_FLAGS_LAST); 3666 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3667 } 3667 }
3668 3668
3669 /** 3669 /**
3670 * ipr_update_ioa_ucode - Update IOA's microcode 3670 * ipr_update_ioa_ucode - Update IOA's microcode
3671 * @ioa_cfg: ioa config struct 3671 * @ioa_cfg: ioa config struct
3672 * @sglist: scatter/gather list 3672 * @sglist: scatter/gather list
3673 * 3673 *
3674 * Initiate an adapter reset to update the IOA's microcode 3674 * Initiate an adapter reset to update the IOA's microcode
3675 * 3675 *
3676 * Return value: 3676 * Return value:
3677 * 0 on success / -EIO on failure 3677 * 0 on success / -EIO on failure
3678 **/ 3678 **/
3679 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, 3679 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3680 struct ipr_sglist *sglist) 3680 struct ipr_sglist *sglist)
3681 { 3681 {
3682 unsigned long lock_flags; 3682 unsigned long lock_flags;
3683 3683
3684 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3684 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3685 while(ioa_cfg->in_reset_reload) { 3685 while(ioa_cfg->in_reset_reload) {
3686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3687 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3687 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3688 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3688 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3689 } 3689 }
3690 3690
3691 if (ioa_cfg->ucode_sglist) { 3691 if (ioa_cfg->ucode_sglist) {
3692 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3692 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3693 dev_err(&ioa_cfg->pdev->dev, 3693 dev_err(&ioa_cfg->pdev->dev,
3694 "Microcode download already in progress\n"); 3694 "Microcode download already in progress\n");
3695 return -EIO; 3695 return -EIO;
3696 } 3696 }
3697 3697
3698 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist, 3698 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3699 sglist->num_sg, DMA_TO_DEVICE); 3699 sglist->num_sg, DMA_TO_DEVICE);
3700 3700
3701 if (!sglist->num_dma_sg) { 3701 if (!sglist->num_dma_sg) {
3702 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3702 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3703 dev_err(&ioa_cfg->pdev->dev, 3703 dev_err(&ioa_cfg->pdev->dev,
3704 "Failed to map microcode download buffer!\n"); 3704 "Failed to map microcode download buffer!\n");
3705 return -EIO; 3705 return -EIO;
3706 } 3706 }
3707 3707
3708 ioa_cfg->ucode_sglist = sglist; 3708 ioa_cfg->ucode_sglist = sglist;
3709 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3709 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3711 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3711 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3712 3712
3713 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3713 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3714 ioa_cfg->ucode_sglist = NULL; 3714 ioa_cfg->ucode_sglist = NULL;
3715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3716 return 0; 3716 return 0;
3717 } 3717 }
3718 3718
3719 /** 3719 /**
3720 * ipr_store_update_fw - Update the firmware on the adapter 3720 * ipr_store_update_fw - Update the firmware on the adapter
3721 * @class_dev: device struct 3721 * @class_dev: device struct
3722 * @buf: buffer 3722 * @buf: buffer
3723 * @count: buffer size 3723 * @count: buffer size
3724 * 3724 *
3725 * This function will update the firmware on the adapter. 3725 * This function will update the firmware on the adapter.
3726 * 3726 *
3727 * Return value: 3727 * Return value:
3728 * count on success / other on failure 3728 * count on success / other on failure
3729 **/ 3729 **/
3730 static ssize_t ipr_store_update_fw(struct device *dev, 3730 static ssize_t ipr_store_update_fw(struct device *dev,
3731 struct device_attribute *attr, 3731 struct device_attribute *attr,
3732 const char *buf, size_t count) 3732 const char *buf, size_t count)
3733 { 3733 {
3734 struct Scsi_Host *shost = class_to_shost(dev); 3734 struct Scsi_Host *shost = class_to_shost(dev);
3735 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3735 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3736 struct ipr_ucode_image_header *image_hdr; 3736 struct ipr_ucode_image_header *image_hdr;
3737 const struct firmware *fw_entry; 3737 const struct firmware *fw_entry;
3738 struct ipr_sglist *sglist; 3738 struct ipr_sglist *sglist;
3739 char fname[100]; 3739 char fname[100];
3740 char *src; 3740 char *src;
3741 int len, result, dnld_size; 3741 int len, result, dnld_size;
3742 3742
3743 if (!capable(CAP_SYS_ADMIN)) 3743 if (!capable(CAP_SYS_ADMIN))
3744 return -EACCES; 3744 return -EACCES;
3745 3745
3746 len = snprintf(fname, 99, "%s", buf); 3746 len = snprintf(fname, 99, "%s", buf);
3747 fname[len-1] = '\0'; 3747 fname[len-1] = '\0';
3748 3748
3749 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { 3749 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3750 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); 3750 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3751 return -EIO; 3751 return -EIO;
3752 } 3752 }
3753 3753
3754 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; 3754 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3755 3755
3756 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); 3756 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3757 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); 3757 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3758 sglist = ipr_alloc_ucode_buffer(dnld_size); 3758 sglist = ipr_alloc_ucode_buffer(dnld_size);
3759 3759
3760 if (!sglist) { 3760 if (!sglist) {
3761 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); 3761 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3762 release_firmware(fw_entry); 3762 release_firmware(fw_entry);
3763 return -ENOMEM; 3763 return -ENOMEM;
3764 } 3764 }
3765 3765
3766 result = ipr_copy_ucode_buffer(sglist, src, dnld_size); 3766 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3767 3767
3768 if (result) { 3768 if (result) {
3769 dev_err(&ioa_cfg->pdev->dev, 3769 dev_err(&ioa_cfg->pdev->dev,
3770 "Microcode buffer copy to DMA buffer failed\n"); 3770 "Microcode buffer copy to DMA buffer failed\n");
3771 goto out; 3771 goto out;
3772 } 3772 }
3773 3773
3774 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n"); 3774 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3775 3775
3776 result = ipr_update_ioa_ucode(ioa_cfg, sglist); 3776 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3777 3777
3778 if (!result) 3778 if (!result)
3779 result = count; 3779 result = count;
3780 out: 3780 out:
3781 ipr_free_ucode_buffer(sglist); 3781 ipr_free_ucode_buffer(sglist);
3782 release_firmware(fw_entry); 3782 release_firmware(fw_entry);
3783 return result; 3783 return result;
3784 } 3784 }
3785 3785
3786 static struct device_attribute ipr_update_fw_attr = { 3786 static struct device_attribute ipr_update_fw_attr = {
3787 .attr = { 3787 .attr = {
3788 .name = "update_fw", 3788 .name = "update_fw",
3789 .mode = S_IWUSR, 3789 .mode = S_IWUSR,
3790 }, 3790 },
3791 .store = ipr_store_update_fw 3791 .store = ipr_store_update_fw
3792 }; 3792 };
3793 3793
3794 /** 3794 /**
3795 * ipr_show_fw_type - Show the adapter's firmware type. 3795 * ipr_show_fw_type - Show the adapter's firmware type.
3796 * @dev: class device struct 3796 * @dev: class device struct
3797 * @buf: buffer 3797 * @buf: buffer
3798 * 3798 *
3799 * Return value: 3799 * Return value:
3800 * number of bytes printed to buffer 3800 * number of bytes printed to buffer
3801 **/ 3801 **/
3802 static ssize_t ipr_show_fw_type(struct device *dev, 3802 static ssize_t ipr_show_fw_type(struct device *dev,
3803 struct device_attribute *attr, char *buf) 3803 struct device_attribute *attr, char *buf)
3804 { 3804 {
3805 struct Scsi_Host *shost = class_to_shost(dev); 3805 struct Scsi_Host *shost = class_to_shost(dev);
3806 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3806 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3807 unsigned long lock_flags = 0; 3807 unsigned long lock_flags = 0;
3808 int len; 3808 int len;
3809 3809
3810 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3810 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3811 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); 3811 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3813 return len; 3813 return len;
3814 } 3814 }
3815 3815
3816 static struct device_attribute ipr_ioa_fw_type_attr = { 3816 static struct device_attribute ipr_ioa_fw_type_attr = {
3817 .attr = { 3817 .attr = {
3818 .name = "fw_type", 3818 .name = "fw_type",
3819 .mode = S_IRUGO, 3819 .mode = S_IRUGO,
3820 }, 3820 },
3821 .show = ipr_show_fw_type 3821 .show = ipr_show_fw_type
3822 }; 3822 };
3823 3823
3824 static struct device_attribute *ipr_ioa_attrs[] = { 3824 static struct device_attribute *ipr_ioa_attrs[] = {
3825 &ipr_fw_version_attr, 3825 &ipr_fw_version_attr,
3826 &ipr_log_level_attr, 3826 &ipr_log_level_attr,
3827 &ipr_diagnostics_attr, 3827 &ipr_diagnostics_attr,
3828 &ipr_ioa_state_attr, 3828 &ipr_ioa_state_attr,
3829 &ipr_ioa_reset_attr, 3829 &ipr_ioa_reset_attr,
3830 &ipr_update_fw_attr, 3830 &ipr_update_fw_attr,
3831 &ipr_ioa_fw_type_attr, 3831 &ipr_ioa_fw_type_attr,
3832 NULL, 3832 NULL,
3833 }; 3833 };
3834 3834
3835 #ifdef CONFIG_SCSI_IPR_DUMP 3835 #ifdef CONFIG_SCSI_IPR_DUMP
3836 /** 3836 /**
3837 * ipr_read_dump - Dump the adapter 3837 * ipr_read_dump - Dump the adapter
3838 * @filp: open sysfs file 3838 * @filp: open sysfs file
3839 * @kobj: kobject struct 3839 * @kobj: kobject struct
3840 * @bin_attr: bin_attribute struct 3840 * @bin_attr: bin_attribute struct
3841 * @buf: buffer 3841 * @buf: buffer
3842 * @off: offset 3842 * @off: offset
3843 * @count: buffer size 3843 * @count: buffer size
3844 * 3844 *
3845 * Return value: 3845 * Return value:
3846 * number of bytes printed to buffer 3846 * number of bytes printed to buffer
3847 **/ 3847 **/
3848 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, 3848 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3849 struct bin_attribute *bin_attr, 3849 struct bin_attribute *bin_attr,
3850 char *buf, loff_t off, size_t count) 3850 char *buf, loff_t off, size_t count)
3851 { 3851 {
3852 struct device *cdev = container_of(kobj, struct device, kobj); 3852 struct device *cdev = container_of(kobj, struct device, kobj);
3853 struct Scsi_Host *shost = class_to_shost(cdev); 3853 struct Scsi_Host *shost = class_to_shost(cdev);
3854 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3854 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3855 struct ipr_dump *dump; 3855 struct ipr_dump *dump;
3856 unsigned long lock_flags = 0; 3856 unsigned long lock_flags = 0;
3857 char *src; 3857 char *src;
3858 int len, sdt_end; 3858 int len, sdt_end;
3859 size_t rc = count; 3859 size_t rc = count;
3860 3860
3861 if (!capable(CAP_SYS_ADMIN)) 3861 if (!capable(CAP_SYS_ADMIN))
3862 return -EACCES; 3862 return -EACCES;
3863 3863
3864 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3864 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3865 dump = ioa_cfg->dump; 3865 dump = ioa_cfg->dump;
3866 3866
3867 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { 3867 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3868 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3868 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3869 return 0; 3869 return 0;
3870 } 3870 }
3871 kref_get(&dump->kref); 3871 kref_get(&dump->kref);
3872 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3872 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3873 3873
3874 if (off > dump->driver_dump.hdr.len) { 3874 if (off > dump->driver_dump.hdr.len) {
3875 kref_put(&dump->kref, ipr_release_dump); 3875 kref_put(&dump->kref, ipr_release_dump);
3876 return 0; 3876 return 0;
3877 } 3877 }
3878 3878
3879 if (off + count > dump->driver_dump.hdr.len) { 3879 if (off + count > dump->driver_dump.hdr.len) {
3880 count = dump->driver_dump.hdr.len - off; 3880 count = dump->driver_dump.hdr.len - off;
3881 rc = count; 3881 rc = count;
3882 } 3882 }
3883 3883
3884 if (count && off < sizeof(dump->driver_dump)) { 3884 if (count && off < sizeof(dump->driver_dump)) {
3885 if (off + count > sizeof(dump->driver_dump)) 3885 if (off + count > sizeof(dump->driver_dump))
3886 len = sizeof(dump->driver_dump) - off; 3886 len = sizeof(dump->driver_dump) - off;
3887 else 3887 else
3888 len = count; 3888 len = count;
3889 src = (u8 *)&dump->driver_dump + off; 3889 src = (u8 *)&dump->driver_dump + off;
3890 memcpy(buf, src, len); 3890 memcpy(buf, src, len);
3891 buf += len; 3891 buf += len;
3892 off += len; 3892 off += len;
3893 count -= len; 3893 count -= len;
3894 } 3894 }
3895 3895
3896 off -= sizeof(dump->driver_dump); 3896 off -= sizeof(dump->driver_dump);
3897 3897
3898 if (ioa_cfg->sis64) 3898 if (ioa_cfg->sis64)
3899 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + 3899 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3900 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * 3900 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3901 sizeof(struct ipr_sdt_entry)); 3901 sizeof(struct ipr_sdt_entry));
3902 else 3902 else
3903 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + 3903 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3904 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry)); 3904 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3905 3905
3906 if (count && off < sdt_end) { 3906 if (count && off < sdt_end) {
3907 if (off + count > sdt_end) 3907 if (off + count > sdt_end)
3908 len = sdt_end - off; 3908 len = sdt_end - off;
3909 else 3909 else
3910 len = count; 3910 len = count;
3911 src = (u8 *)&dump->ioa_dump + off; 3911 src = (u8 *)&dump->ioa_dump + off;
3912 memcpy(buf, src, len); 3912 memcpy(buf, src, len);
3913 buf += len; 3913 buf += len;
3914 off += len; 3914 off += len;
3915 count -= len; 3915 count -= len;
3916 } 3916 }
3917 3917
3918 off -= sdt_end; 3918 off -= sdt_end;
3919 3919
3920 while (count) { 3920 while (count) {
3921 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) 3921 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3922 len = PAGE_ALIGN(off) - off; 3922 len = PAGE_ALIGN(off) - off;
3923 else 3923 else
3924 len = count; 3924 len = count;
3925 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; 3925 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3926 src += off & ~PAGE_MASK; 3926 src += off & ~PAGE_MASK;
3927 memcpy(buf, src, len); 3927 memcpy(buf, src, len);
3928 buf += len; 3928 buf += len;
3929 off += len; 3929 off += len;
3930 count -= len; 3930 count -= len;
3931 } 3931 }
3932 3932
3933 kref_put(&dump->kref, ipr_release_dump); 3933 kref_put(&dump->kref, ipr_release_dump);
3934 return rc; 3934 return rc;
3935 } 3935 }
3936 3936
3937 /** 3937 /**
3938 * ipr_alloc_dump - Prepare for adapter dump 3938 * ipr_alloc_dump - Prepare for adapter dump
3939 * @ioa_cfg: ioa config struct 3939 * @ioa_cfg: ioa config struct
3940 * 3940 *
3941 * Return value: 3941 * Return value:
3942 * 0 on success / other on failure 3942 * 0 on success / other on failure
3943 **/ 3943 **/
3944 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) 3944 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3945 { 3945 {
3946 struct ipr_dump *dump; 3946 struct ipr_dump *dump;
3947 __be32 **ioa_data; 3947 __be32 **ioa_data;
3948 unsigned long lock_flags = 0; 3948 unsigned long lock_flags = 0;
3949 3949
3950 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); 3950 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3951 3951
3952 if (!dump) { 3952 if (!dump) {
3953 ipr_err("Dump memory allocation failed\n"); 3953 ipr_err("Dump memory allocation failed\n");
3954 return -ENOMEM; 3954 return -ENOMEM;
3955 } 3955 }
3956 3956
3957 if (ioa_cfg->sis64) 3957 if (ioa_cfg->sis64)
3958 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); 3958 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3959 else 3959 else
3960 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); 3960 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3961 3961
3962 if (!ioa_data) { 3962 if (!ioa_data) {
3963 ipr_err("Dump memory allocation failed\n"); 3963 ipr_err("Dump memory allocation failed\n");
3964 kfree(dump); 3964 kfree(dump);
3965 return -ENOMEM; 3965 return -ENOMEM;
3966 } 3966 }
3967 3967
3968 dump->ioa_dump.ioa_data = ioa_data; 3968 dump->ioa_dump.ioa_data = ioa_data;
3969 3969
3970 kref_init(&dump->kref); 3970 kref_init(&dump->kref);
3971 dump->ioa_cfg = ioa_cfg; 3971 dump->ioa_cfg = ioa_cfg;
3972 3972
3973 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3973 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3974 3974
3975 if (INACTIVE != ioa_cfg->sdt_state) { 3975 if (INACTIVE != ioa_cfg->sdt_state) {
3976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3977 vfree(dump->ioa_dump.ioa_data); 3977 vfree(dump->ioa_dump.ioa_data);
3978 kfree(dump); 3978 kfree(dump);
3979 return 0; 3979 return 0;
3980 } 3980 }
3981 3981
3982 ioa_cfg->dump = dump; 3982 ioa_cfg->dump = dump;
3983 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 3983 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3984 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) { 3984 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3985 ioa_cfg->dump_taken = 1; 3985 ioa_cfg->dump_taken = 1;
3986 schedule_work(&ioa_cfg->work_q); 3986 schedule_work(&ioa_cfg->work_q);
3987 } 3987 }
3988 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3988 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3989 3989
3990 return 0; 3990 return 0;
3991 } 3991 }
3992 3992
3993 /** 3993 /**
3994 * ipr_free_dump - Free adapter dump memory 3994 * ipr_free_dump - Free adapter dump memory
3995 * @ioa_cfg: ioa config struct 3995 * @ioa_cfg: ioa config struct
3996 * 3996 *
3997 * Return value: 3997 * Return value:
3998 * 0 on success / other on failure 3998 * 0 on success / other on failure
3999 **/ 3999 **/
4000 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) 4000 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4001 { 4001 {
4002 struct ipr_dump *dump; 4002 struct ipr_dump *dump;
4003 unsigned long lock_flags = 0; 4003 unsigned long lock_flags = 0;
4004 4004
4005 ENTER; 4005 ENTER;
4006 4006
4007 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4007 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4008 dump = ioa_cfg->dump; 4008 dump = ioa_cfg->dump;
4009 if (!dump) { 4009 if (!dump) {
4010 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4010 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4011 return 0; 4011 return 0;
4012 } 4012 }
4013 4013
4014 ioa_cfg->dump = NULL; 4014 ioa_cfg->dump = NULL;
4015 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4015 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4016 4016
4017 kref_put(&dump->kref, ipr_release_dump); 4017 kref_put(&dump->kref, ipr_release_dump);
4018 4018
4019 LEAVE; 4019 LEAVE;
4020 return 0; 4020 return 0;
4021 } 4021 }
4022 4022
4023 /** 4023 /**
4024 * ipr_write_dump - Setup dump state of adapter 4024 * ipr_write_dump - Setup dump state of adapter
4025 * @filp: open sysfs file 4025 * @filp: open sysfs file
4026 * @kobj: kobject struct 4026 * @kobj: kobject struct
4027 * @bin_attr: bin_attribute struct 4027 * @bin_attr: bin_attribute struct
4028 * @buf: buffer 4028 * @buf: buffer
4029 * @off: offset 4029 * @off: offset
4030 * @count: buffer size 4030 * @count: buffer size
4031 * 4031 *
4032 * Return value: 4032 * Return value:
4033 * number of bytes printed to buffer 4033 * number of bytes printed to buffer
4034 **/ 4034 **/
4035 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj, 4035 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4036 struct bin_attribute *bin_attr, 4036 struct bin_attribute *bin_attr,
4037 char *buf, loff_t off, size_t count) 4037 char *buf, loff_t off, size_t count)
4038 { 4038 {
4039 struct device *cdev = container_of(kobj, struct device, kobj); 4039 struct device *cdev = container_of(kobj, struct device, kobj);
4040 struct Scsi_Host *shost = class_to_shost(cdev); 4040 struct Scsi_Host *shost = class_to_shost(cdev);
4041 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4041 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4042 int rc; 4042 int rc;
4043 4043
4044 if (!capable(CAP_SYS_ADMIN)) 4044 if (!capable(CAP_SYS_ADMIN))
4045 return -EACCES; 4045 return -EACCES;
4046 4046
4047 if (buf[0] == '1') 4047 if (buf[0] == '1')
4048 rc = ipr_alloc_dump(ioa_cfg); 4048 rc = ipr_alloc_dump(ioa_cfg);
4049 else if (buf[0] == '0') 4049 else if (buf[0] == '0')
4050 rc = ipr_free_dump(ioa_cfg); 4050 rc = ipr_free_dump(ioa_cfg);
4051 else 4051 else
4052 return -EINVAL; 4052 return -EINVAL;
4053 4053
4054 if (rc) 4054 if (rc)
4055 return rc; 4055 return rc;
4056 else 4056 else
4057 return count; 4057 return count;
4058 } 4058 }
4059 4059
4060 static struct bin_attribute ipr_dump_attr = { 4060 static struct bin_attribute ipr_dump_attr = {
4061 .attr = { 4061 .attr = {
4062 .name = "dump", 4062 .name = "dump",
4063 .mode = S_IRUSR | S_IWUSR, 4063 .mode = S_IRUSR | S_IWUSR,
4064 }, 4064 },
4065 .size = 0, 4065 .size = 0,
4066 .read = ipr_read_dump, 4066 .read = ipr_read_dump,
4067 .write = ipr_write_dump 4067 .write = ipr_write_dump
4068 }; 4068 };
4069 #else 4069 #else
4070 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; 4070 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4071 #endif 4071 #endif
4072 4072
4073 /** 4073 /**
4074 * ipr_change_queue_depth - Change the device's queue depth 4074 * ipr_change_queue_depth - Change the device's queue depth
4075 * @sdev: scsi device struct 4075 * @sdev: scsi device struct
4076 * @qdepth: depth to set 4076 * @qdepth: depth to set
4077 * @reason: calling context 4077 * @reason: calling context
4078 * 4078 *
4079 * Return value: 4079 * Return value:
4080 * actual depth set 4080 * actual depth set
4081 **/ 4081 **/
4082 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth, 4082 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4083 int reason) 4083 int reason)
4084 { 4084 {
4085 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4085 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4086 struct ipr_resource_entry *res; 4086 struct ipr_resource_entry *res;
4087 unsigned long lock_flags = 0; 4087 unsigned long lock_flags = 0;
4088 4088
4089 if (reason != SCSI_QDEPTH_DEFAULT) 4089 if (reason != SCSI_QDEPTH_DEFAULT)
4090 return -EOPNOTSUPP; 4090 return -EOPNOTSUPP;
4091 4091
4092 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4092 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4093 res = (struct ipr_resource_entry *)sdev->hostdata; 4093 res = (struct ipr_resource_entry *)sdev->hostdata;
4094 4094
4095 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN) 4095 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4096 qdepth = IPR_MAX_CMD_PER_ATA_LUN; 4096 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4097 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4097 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4098 4098
4099 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 4099 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4100 return sdev->queue_depth; 4100 return sdev->queue_depth;
4101 } 4101 }
4102 4102
4103 /** 4103 /**
4104 * ipr_change_queue_type - Change the device's queue type 4104 * ipr_change_queue_type - Change the device's queue type
4105 * @dsev: scsi device struct 4105 * @dsev: scsi device struct
4106 * @tag_type: type of tags to use 4106 * @tag_type: type of tags to use
4107 * 4107 *
4108 * Return value: 4108 * Return value:
4109 * actual queue type set 4109 * actual queue type set
4110 **/ 4110 **/
4111 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type) 4111 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4112 { 4112 {
4113 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4113 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4114 struct ipr_resource_entry *res; 4114 struct ipr_resource_entry *res;
4115 unsigned long lock_flags = 0; 4115 unsigned long lock_flags = 0;
4116 4116
4117 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4117 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4118 res = (struct ipr_resource_entry *)sdev->hostdata; 4118 res = (struct ipr_resource_entry *)sdev->hostdata;
4119 4119
4120 if (res) { 4120 if (res) {
4121 if (ipr_is_gscsi(res) && sdev->tagged_supported) { 4121 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4122 /* 4122 /*
4123 * We don't bother quiescing the device here since the 4123 * We don't bother quiescing the device here since the
4124 * adapter firmware does it for us. 4124 * adapter firmware does it for us.
4125 */ 4125 */
4126 scsi_set_tag_type(sdev, tag_type); 4126 scsi_set_tag_type(sdev, tag_type);
4127 4127
4128 if (tag_type) 4128 if (tag_type)
4129 scsi_activate_tcq(sdev, sdev->queue_depth); 4129 scsi_activate_tcq(sdev, sdev->queue_depth);
4130 else 4130 else
4131 scsi_deactivate_tcq(sdev, sdev->queue_depth); 4131 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4132 } else 4132 } else
4133 tag_type = 0; 4133 tag_type = 0;
4134 } else 4134 } else
4135 tag_type = 0; 4135 tag_type = 0;
4136 4136
4137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4138 return tag_type; 4138 return tag_type;
4139 } 4139 }
4140 4140
4141 /** 4141 /**
4142 * ipr_show_adapter_handle - Show the adapter's resource handle for this device 4142 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4143 * @dev: device struct 4143 * @dev: device struct
4144 * @attr: device attribute structure 4144 * @attr: device attribute structure
4145 * @buf: buffer 4145 * @buf: buffer
4146 * 4146 *
4147 * Return value: 4147 * Return value:
4148 * number of bytes printed to buffer 4148 * number of bytes printed to buffer
4149 **/ 4149 **/
4150 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf) 4150 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4151 { 4151 {
4152 struct scsi_device *sdev = to_scsi_device(dev); 4152 struct scsi_device *sdev = to_scsi_device(dev);
4153 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4153 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4154 struct ipr_resource_entry *res; 4154 struct ipr_resource_entry *res;
4155 unsigned long lock_flags = 0; 4155 unsigned long lock_flags = 0;
4156 ssize_t len = -ENXIO; 4156 ssize_t len = -ENXIO;
4157 4157
4158 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4158 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4159 res = (struct ipr_resource_entry *)sdev->hostdata; 4159 res = (struct ipr_resource_entry *)sdev->hostdata;
4160 if (res) 4160 if (res)
4161 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); 4161 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4163 return len; 4163 return len;
4164 } 4164 }
4165 4165
4166 static struct device_attribute ipr_adapter_handle_attr = { 4166 static struct device_attribute ipr_adapter_handle_attr = {
4167 .attr = { 4167 .attr = {
4168 .name = "adapter_handle", 4168 .name = "adapter_handle",
4169 .mode = S_IRUSR, 4169 .mode = S_IRUSR,
4170 }, 4170 },
4171 .show = ipr_show_adapter_handle 4171 .show = ipr_show_adapter_handle
4172 }; 4172 };
4173 4173
4174 /** 4174 /**
4175 * ipr_show_resource_path - Show the resource path or the resource address for 4175 * ipr_show_resource_path - Show the resource path or the resource address for
4176 * this device. 4176 * this device.
4177 * @dev: device struct 4177 * @dev: device struct
4178 * @attr: device attribute structure 4178 * @attr: device attribute structure
4179 * @buf: buffer 4179 * @buf: buffer
4180 * 4180 *
4181 * Return value: 4181 * Return value:
4182 * number of bytes printed to buffer 4182 * number of bytes printed to buffer
4183 **/ 4183 **/
4184 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf) 4184 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4185 { 4185 {
4186 struct scsi_device *sdev = to_scsi_device(dev); 4186 struct scsi_device *sdev = to_scsi_device(dev);
4187 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4187 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4188 struct ipr_resource_entry *res; 4188 struct ipr_resource_entry *res;
4189 unsigned long lock_flags = 0; 4189 unsigned long lock_flags = 0;
4190 ssize_t len = -ENXIO; 4190 ssize_t len = -ENXIO;
4191 char buffer[IPR_MAX_RES_PATH_LENGTH]; 4191 char buffer[IPR_MAX_RES_PATH_LENGTH];
4192 4192
4193 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4193 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4194 res = (struct ipr_resource_entry *)sdev->hostdata; 4194 res = (struct ipr_resource_entry *)sdev->hostdata;
4195 if (res && ioa_cfg->sis64) 4195 if (res && ioa_cfg->sis64)
4196 len = snprintf(buf, PAGE_SIZE, "%s\n", 4196 len = snprintf(buf, PAGE_SIZE, "%s\n",
4197 ipr_format_res_path(res->res_path, buffer, 4197 ipr_format_res_path(res->res_path, buffer,
4198 sizeof(buffer))); 4198 sizeof(buffer)));
4199 else if (res) 4199 else if (res)
4200 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, 4200 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4201 res->bus, res->target, res->lun); 4201 res->bus, res->target, res->lun);
4202 4202
4203 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4203 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4204 return len; 4204 return len;
4205 } 4205 }
4206 4206
4207 static struct device_attribute ipr_resource_path_attr = { 4207 static struct device_attribute ipr_resource_path_attr = {
4208 .attr = { 4208 .attr = {
4209 .name = "resource_path", 4209 .name = "resource_path",
4210 .mode = S_IRUGO, 4210 .mode = S_IRUGO,
4211 }, 4211 },
4212 .show = ipr_show_resource_path 4212 .show = ipr_show_resource_path
4213 }; 4213 };
4214 4214
4215 /** 4215 /**
4216 * ipr_show_device_id - Show the device_id for this device. 4216 * ipr_show_device_id - Show the device_id for this device.
4217 * @dev: device struct 4217 * @dev: device struct
4218 * @attr: device attribute structure 4218 * @attr: device attribute structure
4219 * @buf: buffer 4219 * @buf: buffer
4220 * 4220 *
4221 * Return value: 4221 * Return value:
4222 * number of bytes printed to buffer 4222 * number of bytes printed to buffer
4223 **/ 4223 **/
4224 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) 4224 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4225 { 4225 {
4226 struct scsi_device *sdev = to_scsi_device(dev); 4226 struct scsi_device *sdev = to_scsi_device(dev);
4227 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4227 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4228 struct ipr_resource_entry *res; 4228 struct ipr_resource_entry *res;
4229 unsigned long lock_flags = 0; 4229 unsigned long lock_flags = 0;
4230 ssize_t len = -ENXIO; 4230 ssize_t len = -ENXIO;
4231 4231
4232 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4232 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4233 res = (struct ipr_resource_entry *)sdev->hostdata; 4233 res = (struct ipr_resource_entry *)sdev->hostdata;
4234 if (res && ioa_cfg->sis64) 4234 if (res && ioa_cfg->sis64)
4235 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id); 4235 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4236 else if (res) 4236 else if (res)
4237 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); 4237 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4238 4238
4239 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4239 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4240 return len; 4240 return len;
4241 } 4241 }
4242 4242
4243 static struct device_attribute ipr_device_id_attr = { 4243 static struct device_attribute ipr_device_id_attr = {
4244 .attr = { 4244 .attr = {
4245 .name = "device_id", 4245 .name = "device_id",
4246 .mode = S_IRUGO, 4246 .mode = S_IRUGO,
4247 }, 4247 },
4248 .show = ipr_show_device_id 4248 .show = ipr_show_device_id
4249 }; 4249 };
4250 4250
4251 /** 4251 /**
4252 * ipr_show_resource_type - Show the resource type for this device. 4252 * ipr_show_resource_type - Show the resource type for this device.
4253 * @dev: device struct 4253 * @dev: device struct
4254 * @attr: device attribute structure 4254 * @attr: device attribute structure
4255 * @buf: buffer 4255 * @buf: buffer
4256 * 4256 *
4257 * Return value: 4257 * Return value:
4258 * number of bytes printed to buffer 4258 * number of bytes printed to buffer
4259 **/ 4259 **/
4260 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf) 4260 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4261 { 4261 {
4262 struct scsi_device *sdev = to_scsi_device(dev); 4262 struct scsi_device *sdev = to_scsi_device(dev);
4263 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4263 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4264 struct ipr_resource_entry *res; 4264 struct ipr_resource_entry *res;
4265 unsigned long lock_flags = 0; 4265 unsigned long lock_flags = 0;
4266 ssize_t len = -ENXIO; 4266 ssize_t len = -ENXIO;
4267 4267
4268 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4268 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4269 res = (struct ipr_resource_entry *)sdev->hostdata; 4269 res = (struct ipr_resource_entry *)sdev->hostdata;
4270 4270
4271 if (res) 4271 if (res)
4272 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); 4272 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4273 4273
4274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4275 return len; 4275 return len;
4276 } 4276 }
4277 4277
4278 static struct device_attribute ipr_resource_type_attr = { 4278 static struct device_attribute ipr_resource_type_attr = {
4279 .attr = { 4279 .attr = {
4280 .name = "resource_type", 4280 .name = "resource_type",
4281 .mode = S_IRUGO, 4281 .mode = S_IRUGO,
4282 }, 4282 },
4283 .show = ipr_show_resource_type 4283 .show = ipr_show_resource_type
4284 }; 4284 };
4285 4285
4286 static struct device_attribute *ipr_dev_attrs[] = { 4286 static struct device_attribute *ipr_dev_attrs[] = {
4287 &ipr_adapter_handle_attr, 4287 &ipr_adapter_handle_attr,
4288 &ipr_resource_path_attr, 4288 &ipr_resource_path_attr,
4289 &ipr_device_id_attr, 4289 &ipr_device_id_attr,
4290 &ipr_resource_type_attr, 4290 &ipr_resource_type_attr,
4291 NULL, 4291 NULL,
4292 }; 4292 };
4293 4293
4294 /** 4294 /**
4295 * ipr_biosparam - Return the HSC mapping 4295 * ipr_biosparam - Return the HSC mapping
4296 * @sdev: scsi device struct 4296 * @sdev: scsi device struct
4297 * @block_device: block device pointer 4297 * @block_device: block device pointer
4298 * @capacity: capacity of the device 4298 * @capacity: capacity of the device
4299 * @parm: Array containing returned HSC values. 4299 * @parm: Array containing returned HSC values.
4300 * 4300 *
4301 * This function generates the HSC parms that fdisk uses. 4301 * This function generates the HSC parms that fdisk uses.
4302 * We want to make sure we return something that places partitions 4302 * We want to make sure we return something that places partitions
4303 * on 4k boundaries for best performance with the IOA. 4303 * on 4k boundaries for best performance with the IOA.
4304 * 4304 *
4305 * Return value: 4305 * Return value:
4306 * 0 on success 4306 * 0 on success
4307 **/ 4307 **/
4308 static int ipr_biosparam(struct scsi_device *sdev, 4308 static int ipr_biosparam(struct scsi_device *sdev,
4309 struct block_device *block_device, 4309 struct block_device *block_device,
4310 sector_t capacity, int *parm) 4310 sector_t capacity, int *parm)
4311 { 4311 {
4312 int heads, sectors; 4312 int heads, sectors;
4313 sector_t cylinders; 4313 sector_t cylinders;
4314 4314
4315 heads = 128; 4315 heads = 128;
4316 sectors = 32; 4316 sectors = 32;
4317 4317
4318 cylinders = capacity; 4318 cylinders = capacity;
4319 sector_div(cylinders, (128 * 32)); 4319 sector_div(cylinders, (128 * 32));
4320 4320
4321 /* return result */ 4321 /* return result */
4322 parm[0] = heads; 4322 parm[0] = heads;
4323 parm[1] = sectors; 4323 parm[1] = sectors;
4324 parm[2] = cylinders; 4324 parm[2] = cylinders;
4325 4325
4326 return 0; 4326 return 0;
4327 } 4327 }
4328 4328
4329 /** 4329 /**
4330 * ipr_find_starget - Find target based on bus/target. 4330 * ipr_find_starget - Find target based on bus/target.
4331 * @starget: scsi target struct 4331 * @starget: scsi target struct
4332 * 4332 *
4333 * Return value: 4333 * Return value:
4334 * resource entry pointer if found / NULL if not found 4334 * resource entry pointer if found / NULL if not found
4335 **/ 4335 **/
4336 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) 4336 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4337 { 4337 {
4338 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4338 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4340 struct ipr_resource_entry *res; 4340 struct ipr_resource_entry *res;
4341 4341
4342 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4342 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4343 if ((res->bus == starget->channel) && 4343 if ((res->bus == starget->channel) &&
4344 (res->target == starget->id)) { 4344 (res->target == starget->id)) {
4345 return res; 4345 return res;
4346 } 4346 }
4347 } 4347 }
4348 4348
4349 return NULL; 4349 return NULL;
4350 } 4350 }
4351 4351
4352 static struct ata_port_info sata_port_info; 4352 static struct ata_port_info sata_port_info;
4353 4353
4354 /** 4354 /**
4355 * ipr_target_alloc - Prepare for commands to a SCSI target 4355 * ipr_target_alloc - Prepare for commands to a SCSI target
4356 * @starget: scsi target struct 4356 * @starget: scsi target struct
4357 * 4357 *
4358 * If the device is a SATA device, this function allocates an 4358 * If the device is a SATA device, this function allocates an
4359 * ATA port with libata, else it does nothing. 4359 * ATA port with libata, else it does nothing.
4360 * 4360 *
4361 * Return value: 4361 * Return value:
4362 * 0 on success / non-0 on failure 4362 * 0 on success / non-0 on failure
4363 **/ 4363 **/
4364 static int ipr_target_alloc(struct scsi_target *starget) 4364 static int ipr_target_alloc(struct scsi_target *starget)
4365 { 4365 {
4366 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4366 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4367 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4367 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4368 struct ipr_sata_port *sata_port; 4368 struct ipr_sata_port *sata_port;
4369 struct ata_port *ap; 4369 struct ata_port *ap;
4370 struct ipr_resource_entry *res; 4370 struct ipr_resource_entry *res;
4371 unsigned long lock_flags; 4371 unsigned long lock_flags;
4372 4372
4373 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4373 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4374 res = ipr_find_starget(starget); 4374 res = ipr_find_starget(starget);
4375 starget->hostdata = NULL; 4375 starget->hostdata = NULL;
4376 4376
4377 if (res && ipr_is_gata(res)) { 4377 if (res && ipr_is_gata(res)) {
4378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4379 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL); 4379 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4380 if (!sata_port) 4380 if (!sata_port)
4381 return -ENOMEM; 4381 return -ENOMEM;
4382 4382
4383 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); 4383 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4384 if (ap) { 4384 if (ap) {
4385 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4385 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4386 sata_port->ioa_cfg = ioa_cfg; 4386 sata_port->ioa_cfg = ioa_cfg;
4387 sata_port->ap = ap; 4387 sata_port->ap = ap;
4388 sata_port->res = res; 4388 sata_port->res = res;
4389 4389
4390 res->sata_port = sata_port; 4390 res->sata_port = sata_port;
4391 ap->private_data = sata_port; 4391 ap->private_data = sata_port;
4392 starget->hostdata = sata_port; 4392 starget->hostdata = sata_port;
4393 } else { 4393 } else {
4394 kfree(sata_port); 4394 kfree(sata_port);
4395 return -ENOMEM; 4395 return -ENOMEM;
4396 } 4396 }
4397 } 4397 }
4398 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4398 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4399 4399
4400 return 0; 4400 return 0;
4401 } 4401 }
4402 4402
4403 /** 4403 /**
4404 * ipr_target_destroy - Destroy a SCSI target 4404 * ipr_target_destroy - Destroy a SCSI target
4405 * @starget: scsi target struct 4405 * @starget: scsi target struct
4406 * 4406 *
4407 * If the device was a SATA device, this function frees the libata 4407 * If the device was a SATA device, this function frees the libata
4408 * ATA port, else it does nothing. 4408 * ATA port, else it does nothing.
4409 * 4409 *
4410 **/ 4410 **/
4411 static void ipr_target_destroy(struct scsi_target *starget) 4411 static void ipr_target_destroy(struct scsi_target *starget)
4412 { 4412 {
4413 struct ipr_sata_port *sata_port = starget->hostdata; 4413 struct ipr_sata_port *sata_port = starget->hostdata;
4414 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4414 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4415 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4415 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4416 4416
4417 if (ioa_cfg->sis64) { 4417 if (ioa_cfg->sis64) {
4418 if (!ipr_find_starget(starget)) { 4418 if (!ipr_find_starget(starget)) {
4419 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) 4419 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4420 clear_bit(starget->id, ioa_cfg->array_ids); 4420 clear_bit(starget->id, ioa_cfg->array_ids);
4421 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) 4421 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4422 clear_bit(starget->id, ioa_cfg->vset_ids); 4422 clear_bit(starget->id, ioa_cfg->vset_ids);
4423 else if (starget->channel == 0) 4423 else if (starget->channel == 0)
4424 clear_bit(starget->id, ioa_cfg->target_ids); 4424 clear_bit(starget->id, ioa_cfg->target_ids);
4425 } 4425 }
4426 } 4426 }
4427 4427
4428 if (sata_port) { 4428 if (sata_port) {
4429 starget->hostdata = NULL; 4429 starget->hostdata = NULL;
4430 ata_sas_port_destroy(sata_port->ap); 4430 ata_sas_port_destroy(sata_port->ap);
4431 kfree(sata_port); 4431 kfree(sata_port);
4432 } 4432 }
4433 } 4433 }
4434 4434
4435 /** 4435 /**
4436 * ipr_find_sdev - Find device based on bus/target/lun. 4436 * ipr_find_sdev - Find device based on bus/target/lun.
4437 * @sdev: scsi device struct 4437 * @sdev: scsi device struct
4438 * 4438 *
4439 * Return value: 4439 * Return value:
4440 * resource entry pointer if found / NULL if not found 4440 * resource entry pointer if found / NULL if not found
4441 **/ 4441 **/
4442 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev) 4442 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4443 { 4443 {
4444 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4444 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4445 struct ipr_resource_entry *res; 4445 struct ipr_resource_entry *res;
4446 4446
4447 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4447 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4448 if ((res->bus == sdev->channel) && 4448 if ((res->bus == sdev->channel) &&
4449 (res->target == sdev->id) && 4449 (res->target == sdev->id) &&
4450 (res->lun == sdev->lun)) 4450 (res->lun == sdev->lun))
4451 return res; 4451 return res;
4452 } 4452 }
4453 4453
4454 return NULL; 4454 return NULL;
4455 } 4455 }
4456 4456
4457 /** 4457 /**
4458 * ipr_slave_destroy - Unconfigure a SCSI device 4458 * ipr_slave_destroy - Unconfigure a SCSI device
4459 * @sdev: scsi device struct 4459 * @sdev: scsi device struct
4460 * 4460 *
4461 * Return value: 4461 * Return value:
4462 * nothing 4462 * nothing
4463 **/ 4463 **/
4464 static void ipr_slave_destroy(struct scsi_device *sdev) 4464 static void ipr_slave_destroy(struct scsi_device *sdev)
4465 { 4465 {
4466 struct ipr_resource_entry *res; 4466 struct ipr_resource_entry *res;
4467 struct ipr_ioa_cfg *ioa_cfg; 4467 struct ipr_ioa_cfg *ioa_cfg;
4468 unsigned long lock_flags = 0; 4468 unsigned long lock_flags = 0;
4469 4469
4470 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4470 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4471 4471
4472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4473 res = (struct ipr_resource_entry *) sdev->hostdata; 4473 res = (struct ipr_resource_entry *) sdev->hostdata;
4474 if (res) { 4474 if (res) {
4475 if (res->sata_port) 4475 if (res->sata_port)
4476 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE; 4476 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4477 sdev->hostdata = NULL; 4477 sdev->hostdata = NULL;
4478 res->sdev = NULL; 4478 res->sdev = NULL;
4479 res->sata_port = NULL; 4479 res->sata_port = NULL;
4480 } 4480 }
4481 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4481 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4482 } 4482 }
4483 4483
4484 /** 4484 /**
4485 * ipr_slave_configure - Configure a SCSI device 4485 * ipr_slave_configure - Configure a SCSI device
4486 * @sdev: scsi device struct 4486 * @sdev: scsi device struct
4487 * 4487 *
4488 * This function configures the specified scsi device. 4488 * This function configures the specified scsi device.
4489 * 4489 *
4490 * Return value: 4490 * Return value:
4491 * 0 on success 4491 * 0 on success
4492 **/ 4492 **/
4493 static int ipr_slave_configure(struct scsi_device *sdev) 4493 static int ipr_slave_configure(struct scsi_device *sdev)
4494 { 4494 {
4495 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4495 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4496 struct ipr_resource_entry *res; 4496 struct ipr_resource_entry *res;
4497 struct ata_port *ap = NULL; 4497 struct ata_port *ap = NULL;
4498 unsigned long lock_flags = 0; 4498 unsigned long lock_flags = 0;
4499 char buffer[IPR_MAX_RES_PATH_LENGTH]; 4499 char buffer[IPR_MAX_RES_PATH_LENGTH];
4500 4500
4501 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4501 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4502 res = sdev->hostdata; 4502 res = sdev->hostdata;
4503 if (res) { 4503 if (res) {
4504 if (ipr_is_af_dasd_device(res)) 4504 if (ipr_is_af_dasd_device(res))
4505 sdev->type = TYPE_RAID; 4505 sdev->type = TYPE_RAID;
4506 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) { 4506 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4507 sdev->scsi_level = 4; 4507 sdev->scsi_level = 4;
4508 sdev->no_uld_attach = 1; 4508 sdev->no_uld_attach = 1;
4509 } 4509 }
4510 if (ipr_is_vset_device(res)) { 4510 if (ipr_is_vset_device(res)) {
4511 blk_queue_rq_timeout(sdev->request_queue, 4511 blk_queue_rq_timeout(sdev->request_queue,
4512 IPR_VSET_RW_TIMEOUT); 4512 IPR_VSET_RW_TIMEOUT);
4513 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4513 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4514 } 4514 }
4515 if (ipr_is_gata(res) && res->sata_port) 4515 if (ipr_is_gata(res) && res->sata_port)
4516 ap = res->sata_port->ap; 4516 ap = res->sata_port->ap;
4517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4518 4518
4519 if (ap) { 4519 if (ap) {
4520 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN); 4520 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4521 ata_sas_slave_configure(sdev, ap); 4521 ata_sas_slave_configure(sdev, ap);
4522 } else 4522 } else
4523 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 4523 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4524 if (ioa_cfg->sis64) 4524 if (ioa_cfg->sis64)
4525 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", 4525 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4526 ipr_format_res_path(res->res_path, buffer, 4526 ipr_format_res_path(res->res_path, buffer,
4527 sizeof(buffer))); 4527 sizeof(buffer)));
4528 return 0; 4528 return 0;
4529 } 4529 }
4530 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4530 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4531 return 0; 4531 return 0;
4532 } 4532 }
4533 4533
4534 /** 4534 /**
4535 * ipr_ata_slave_alloc - Prepare for commands to a SATA device 4535 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4536 * @sdev: scsi device struct 4536 * @sdev: scsi device struct
4537 * 4537 *
4538 * This function initializes an ATA port so that future commands 4538 * This function initializes an ATA port so that future commands
4539 * sent through queuecommand will work. 4539 * sent through queuecommand will work.
4540 * 4540 *
4541 * Return value: 4541 * Return value:
4542 * 0 on success 4542 * 0 on success
4543 **/ 4543 **/
4544 static int ipr_ata_slave_alloc(struct scsi_device *sdev) 4544 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4545 { 4545 {
4546 struct ipr_sata_port *sata_port = NULL; 4546 struct ipr_sata_port *sata_port = NULL;
4547 int rc = -ENXIO; 4547 int rc = -ENXIO;
4548 4548
4549 ENTER; 4549 ENTER;
4550 if (sdev->sdev_target) 4550 if (sdev->sdev_target)
4551 sata_port = sdev->sdev_target->hostdata; 4551 sata_port = sdev->sdev_target->hostdata;
4552 if (sata_port) 4552 if (sata_port)
4553 rc = ata_sas_port_init(sata_port->ap); 4553 rc = ata_sas_port_init(sata_port->ap);
4554 if (rc) 4554 if (rc)
4555 ipr_slave_destroy(sdev); 4555 ipr_slave_destroy(sdev);
4556 4556
4557 LEAVE; 4557 LEAVE;
4558 return rc; 4558 return rc;
4559 } 4559 }
4560 4560
4561 /** 4561 /**
4562 * ipr_slave_alloc - Prepare for commands to a device. 4562 * ipr_slave_alloc - Prepare for commands to a device.
4563 * @sdev: scsi device struct 4563 * @sdev: scsi device struct
4564 * 4564 *
4565 * This function saves a pointer to the resource entry 4565 * This function saves a pointer to the resource entry
4566 * in the scsi device struct if the device exists. We 4566 * in the scsi device struct if the device exists. We
4567 * can then use this pointer in ipr_queuecommand when 4567 * can then use this pointer in ipr_queuecommand when
4568 * handling new commands. 4568 * handling new commands.
4569 * 4569 *
4570 * Return value: 4570 * Return value:
4571 * 0 on success / -ENXIO if device does not exist 4571 * 0 on success / -ENXIO if device does not exist
4572 **/ 4572 **/
4573 static int ipr_slave_alloc(struct scsi_device *sdev) 4573 static int ipr_slave_alloc(struct scsi_device *sdev)
4574 { 4574 {
4575 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4575 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4576 struct ipr_resource_entry *res; 4576 struct ipr_resource_entry *res;
4577 unsigned long lock_flags; 4577 unsigned long lock_flags;
4578 int rc = -ENXIO; 4578 int rc = -ENXIO;
4579 4579
4580 sdev->hostdata = NULL; 4580 sdev->hostdata = NULL;
4581 4581
4582 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4582 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4583 4583
4584 res = ipr_find_sdev(sdev); 4584 res = ipr_find_sdev(sdev);
4585 if (res) { 4585 if (res) {
4586 res->sdev = sdev; 4586 res->sdev = sdev;
4587 res->add_to_ml = 0; 4587 res->add_to_ml = 0;
4588 res->in_erp = 0; 4588 res->in_erp = 0;
4589 sdev->hostdata = res; 4589 sdev->hostdata = res;
4590 if (!ipr_is_naca_model(res)) 4590 if (!ipr_is_naca_model(res))
4591 res->needs_sync_complete = 1; 4591 res->needs_sync_complete = 1;
4592 rc = 0; 4592 rc = 0;
4593 if (ipr_is_gata(res)) { 4593 if (ipr_is_gata(res)) {
4594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4595 return ipr_ata_slave_alloc(sdev); 4595 return ipr_ata_slave_alloc(sdev);
4596 } 4596 }
4597 } 4597 }
4598 4598
4599 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4599 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4600 4600
4601 return rc; 4601 return rc;
4602 } 4602 }
4603 4603
4604 /** 4604 /**
4605 * ipr_eh_host_reset - Reset the host adapter 4605 * ipr_eh_host_reset - Reset the host adapter
4606 * @scsi_cmd: scsi command struct 4606 * @scsi_cmd: scsi command struct
4607 * 4607 *
4608 * Return value: 4608 * Return value:
4609 * SUCCESS / FAILED 4609 * SUCCESS / FAILED
4610 **/ 4610 **/
4611 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) 4611 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4612 { 4612 {
4613 struct ipr_ioa_cfg *ioa_cfg; 4613 struct ipr_ioa_cfg *ioa_cfg;
4614 int rc; 4614 int rc;
4615 4615
4616 ENTER; 4616 ENTER;
4617 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 4617 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4618 4618
4619 if (!ioa_cfg->in_reset_reload) { 4619 if (!ioa_cfg->in_reset_reload) {
4620 dev_err(&ioa_cfg->pdev->dev, 4620 dev_err(&ioa_cfg->pdev->dev,
4621 "Adapter being reset as a result of error recovery.\n"); 4621 "Adapter being reset as a result of error recovery.\n");
4622 4622
4623 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 4623 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4624 ioa_cfg->sdt_state = GET_DUMP; 4624 ioa_cfg->sdt_state = GET_DUMP;
4625 } 4625 }
4626 4626
4627 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV); 4627 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4628 4628
4629 LEAVE; 4629 LEAVE;
4630 return rc; 4630 return rc;
4631 } 4631 }
4632 4632
4633 static int ipr_eh_host_reset(struct scsi_cmnd * cmd) 4633 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4634 { 4634 {
4635 int rc; 4635 int rc;
4636 4636
4637 spin_lock_irq(cmd->device->host->host_lock); 4637 spin_lock_irq(cmd->device->host->host_lock);
4638 rc = __ipr_eh_host_reset(cmd); 4638 rc = __ipr_eh_host_reset(cmd);
4639 spin_unlock_irq(cmd->device->host->host_lock); 4639 spin_unlock_irq(cmd->device->host->host_lock);
4640 4640
4641 return rc; 4641 return rc;
4642 } 4642 }
4643 4643
4644 /** 4644 /**
4645 * ipr_device_reset - Reset the device 4645 * ipr_device_reset - Reset the device
4646 * @ioa_cfg: ioa config struct 4646 * @ioa_cfg: ioa config struct
4647 * @res: resource entry struct 4647 * @res: resource entry struct
4648 * 4648 *
4649 * This function issues a device reset to the affected device. 4649 * This function issues a device reset to the affected device.
4650 * If the device is a SCSI device, a LUN reset will be sent 4650 * If the device is a SCSI device, a LUN reset will be sent
4651 * to the device first. If that does not work, a target reset 4651 * to the device first. If that does not work, a target reset
4652 * will be sent. If the device is a SATA device, a PHY reset will 4652 * will be sent. If the device is a SATA device, a PHY reset will
4653 * be sent. 4653 * be sent.
4654 * 4654 *
4655 * Return value: 4655 * Return value:
4656 * 0 on success / non-zero on failure 4656 * 0 on success / non-zero on failure
4657 **/ 4657 **/
4658 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, 4658 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4659 struct ipr_resource_entry *res) 4659 struct ipr_resource_entry *res)
4660 { 4660 {
4661 struct ipr_cmnd *ipr_cmd; 4661 struct ipr_cmnd *ipr_cmd;
4662 struct ipr_ioarcb *ioarcb; 4662 struct ipr_ioarcb *ioarcb;
4663 struct ipr_cmd_pkt *cmd_pkt; 4663 struct ipr_cmd_pkt *cmd_pkt;
4664 struct ipr_ioarcb_ata_regs *regs; 4664 struct ipr_ioarcb_ata_regs *regs;
4665 u32 ioasc; 4665 u32 ioasc;
4666 4666
4667 ENTER; 4667 ENTER;
4668 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4668 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4669 ioarcb = &ipr_cmd->ioarcb; 4669 ioarcb = &ipr_cmd->ioarcb;
4670 cmd_pkt = &ioarcb->cmd_pkt; 4670 cmd_pkt = &ioarcb->cmd_pkt;
4671 4671
4672 if (ipr_cmd->ioa_cfg->sis64) { 4672 if (ipr_cmd->ioa_cfg->sis64) {
4673 regs = &ipr_cmd->i.ata_ioadl.regs; 4673 regs = &ipr_cmd->i.ata_ioadl.regs;
4674 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 4674 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4675 } else 4675 } else
4676 regs = &ioarcb->u.add_data.u.regs; 4676 regs = &ioarcb->u.add_data.u.regs;
4677 4677
4678 ioarcb->res_handle = res->res_handle; 4678 ioarcb->res_handle = res->res_handle;
4679 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4679 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4680 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 4680 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4681 if (ipr_is_gata(res)) { 4681 if (ipr_is_gata(res)) {
4682 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; 4682 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4683 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); 4683 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4684 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 4684 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4685 } 4685 }
4686 4686
4687 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 4687 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4688 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 4688 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4689 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4689 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4690 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { 4690 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4691 if (ipr_cmd->ioa_cfg->sis64) 4691 if (ipr_cmd->ioa_cfg->sis64)
4692 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 4692 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4693 sizeof(struct ipr_ioasa_gata)); 4693 sizeof(struct ipr_ioasa_gata));
4694 else 4694 else
4695 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, 4695 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4696 sizeof(struct ipr_ioasa_gata)); 4696 sizeof(struct ipr_ioasa_gata));
4697 } 4697 }
4698 4698
4699 LEAVE; 4699 LEAVE;
4700 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); 4700 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4701 } 4701 }
4702 4702
4703 /** 4703 /**
4704 * ipr_sata_reset - Reset the SATA port 4704 * ipr_sata_reset - Reset the SATA port
4705 * @link: SATA link to reset 4705 * @link: SATA link to reset
4706 * @classes: class of the attached device 4706 * @classes: class of the attached device
4707 * 4707 *
4708 * This function issues a SATA phy reset to the affected ATA link. 4708 * This function issues a SATA phy reset to the affected ATA link.
4709 * 4709 *
4710 * Return value: 4710 * Return value:
4711 * 0 on success / non-zero on failure 4711 * 0 on success / non-zero on failure
4712 **/ 4712 **/
4713 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, 4713 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4714 unsigned long deadline) 4714 unsigned long deadline)
4715 { 4715 {
4716 struct ipr_sata_port *sata_port = link->ap->private_data; 4716 struct ipr_sata_port *sata_port = link->ap->private_data;
4717 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 4717 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4718 struct ipr_resource_entry *res; 4718 struct ipr_resource_entry *res;
4719 unsigned long lock_flags = 0; 4719 unsigned long lock_flags = 0;
4720 int rc = -ENXIO; 4720 int rc = -ENXIO;
4721 4721
4722 ENTER; 4722 ENTER;
4723 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4723 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4724 while(ioa_cfg->in_reset_reload) { 4724 while(ioa_cfg->in_reset_reload) {
4725 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4725 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4726 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 4726 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4727 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4727 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4728 } 4728 }
4729 4729
4730 res = sata_port->res; 4730 res = sata_port->res;
4731 if (res) { 4731 if (res) {
4732 rc = ipr_device_reset(ioa_cfg, res); 4732 rc = ipr_device_reset(ioa_cfg, res);
4733 *classes = res->ata_class; 4733 *classes = res->ata_class;
4734 } 4734 }
4735 4735
4736 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4736 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4737 LEAVE; 4737 LEAVE;
4738 return rc; 4738 return rc;
4739 } 4739 }
4740 4740
4741 /** 4741 /**
4742 * ipr_eh_dev_reset - Reset the device 4742 * ipr_eh_dev_reset - Reset the device
4743 * @scsi_cmd: scsi command struct 4743 * @scsi_cmd: scsi command struct
4744 * 4744 *
4745 * This function issues a device reset to the affected device. 4745 * This function issues a device reset to the affected device.
4746 * A LUN reset will be sent to the device first. If that does 4746 * A LUN reset will be sent to the device first. If that does
4747 * not work, a target reset will be sent. 4747 * not work, a target reset will be sent.
4748 * 4748 *
4749 * Return value: 4749 * Return value:
4750 * SUCCESS / FAILED 4750 * SUCCESS / FAILED
4751 **/ 4751 **/
4752 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) 4752 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4753 { 4753 {
4754 struct ipr_cmnd *ipr_cmd; 4754 struct ipr_cmnd *ipr_cmd;
4755 struct ipr_ioa_cfg *ioa_cfg; 4755 struct ipr_ioa_cfg *ioa_cfg;
4756 struct ipr_resource_entry *res; 4756 struct ipr_resource_entry *res;
4757 struct ata_port *ap; 4757 struct ata_port *ap;
4758 int rc = 0; 4758 int rc = 0;
4759 4759
4760 ENTER; 4760 ENTER;
4761 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 4761 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4762 res = scsi_cmd->device->hostdata; 4762 res = scsi_cmd->device->hostdata;
4763 4763
4764 if (!res) 4764 if (!res)
4765 return FAILED; 4765 return FAILED;
4766 4766
4767 /* 4767 /*
4768 * If we are currently going through reset/reload, return failed. This will force the 4768 * If we are currently going through reset/reload, return failed. This will force the
4769 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the 4769 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4770 * reset to complete 4770 * reset to complete
4771 */ 4771 */
4772 if (ioa_cfg->in_reset_reload) 4772 if (ioa_cfg->in_reset_reload)
4773 return FAILED; 4773 return FAILED;
4774 if (ioa_cfg->ioa_is_dead) 4774 if (ioa_cfg->ioa_is_dead)
4775 return FAILED; 4775 return FAILED;
4776 4776
4777 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4777 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4778 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 4778 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4779 if (ipr_cmd->scsi_cmd) 4779 if (ipr_cmd->scsi_cmd)
4780 ipr_cmd->done = ipr_scsi_eh_done; 4780 ipr_cmd->done = ipr_scsi_eh_done;
4781 if (ipr_cmd->qc) 4781 if (ipr_cmd->qc)
4782 ipr_cmd->done = ipr_sata_eh_done; 4782 ipr_cmd->done = ipr_sata_eh_done;
4783 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { 4783 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4784 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; 4784 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4785 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; 4785 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4786 } 4786 }
4787 } 4787 }
4788 } 4788 }
4789 4789
4790 res->resetting_device = 1; 4790 res->resetting_device = 1;
4791 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); 4791 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4792 4792
4793 if (ipr_is_gata(res) && res->sata_port) { 4793 if (ipr_is_gata(res) && res->sata_port) {
4794 ap = res->sata_port->ap; 4794 ap = res->sata_port->ap;
4795 spin_unlock_irq(scsi_cmd->device->host->host_lock); 4795 spin_unlock_irq(scsi_cmd->device->host->host_lock);
4796 ata_std_error_handler(ap); 4796 ata_std_error_handler(ap);
4797 spin_lock_irq(scsi_cmd->device->host->host_lock); 4797 spin_lock_irq(scsi_cmd->device->host->host_lock);
4798 4798
4799 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4799 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4800 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 4800 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4801 rc = -EIO; 4801 rc = -EIO;
4802 break; 4802 break;
4803 } 4803 }
4804 } 4804 }
4805 } else 4805 } else
4806 rc = ipr_device_reset(ioa_cfg, res); 4806 rc = ipr_device_reset(ioa_cfg, res);
4807 res->resetting_device = 0; 4807 res->resetting_device = 0;
4808 4808
4809 LEAVE; 4809 LEAVE;
4810 return (rc ? FAILED : SUCCESS); 4810 return (rc ? FAILED : SUCCESS);
4811 } 4811 }
4812 4812
4813 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd) 4813 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4814 { 4814 {
4815 int rc; 4815 int rc;
4816 4816
4817 spin_lock_irq(cmd->device->host->host_lock); 4817 spin_lock_irq(cmd->device->host->host_lock);
4818 rc = __ipr_eh_dev_reset(cmd); 4818 rc = __ipr_eh_dev_reset(cmd);
4819 spin_unlock_irq(cmd->device->host->host_lock); 4819 spin_unlock_irq(cmd->device->host->host_lock);
4820 4820
4821 return rc; 4821 return rc;
4822 } 4822 }
4823 4823
4824 /** 4824 /**
4825 * ipr_bus_reset_done - Op done function for bus reset. 4825 * ipr_bus_reset_done - Op done function for bus reset.
4826 * @ipr_cmd: ipr command struct 4826 * @ipr_cmd: ipr command struct
4827 * 4827 *
4828 * This function is the op done function for a bus reset 4828 * This function is the op done function for a bus reset
4829 * 4829 *
4830 * Return value: 4830 * Return value:
4831 * none 4831 * none
4832 **/ 4832 **/
4833 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) 4833 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4834 { 4834 {
4835 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 4835 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4836 struct ipr_resource_entry *res; 4836 struct ipr_resource_entry *res;
4837 4837
4838 ENTER; 4838 ENTER;
4839 if (!ioa_cfg->sis64) 4839 if (!ioa_cfg->sis64)
4840 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4840 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4841 if (res->res_handle == ipr_cmd->ioarcb.res_handle) { 4841 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4842 scsi_report_bus_reset(ioa_cfg->host, res->bus); 4842 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4843 break; 4843 break;
4844 } 4844 }
4845 } 4845 }
4846 4846
4847 /* 4847 /*
4848 * If abort has not completed, indicate the reset has, else call the 4848 * If abort has not completed, indicate the reset has, else call the
4849 * abort's done function to wake the sleeping eh thread 4849 * abort's done function to wake the sleeping eh thread
4850 */ 4850 */
4851 if (ipr_cmd->sibling->sibling) 4851 if (ipr_cmd->sibling->sibling)
4852 ipr_cmd->sibling->sibling = NULL; 4852 ipr_cmd->sibling->sibling = NULL;
4853 else 4853 else
4854 ipr_cmd->sibling->done(ipr_cmd->sibling); 4854 ipr_cmd->sibling->done(ipr_cmd->sibling);
4855 4855
4856 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4856 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4857 LEAVE; 4857 LEAVE;
4858 } 4858 }
4859 4859
4860 /** 4860 /**
4861 * ipr_abort_timeout - An abort task has timed out 4861 * ipr_abort_timeout - An abort task has timed out
4862 * @ipr_cmd: ipr command struct 4862 * @ipr_cmd: ipr command struct
4863 * 4863 *
4864 * This function handles when an abort task times out. If this 4864 * This function handles when an abort task times out. If this
4865 * happens we issue a bus reset since we have resources tied 4865 * happens we issue a bus reset since we have resources tied
4866 * up that must be freed before returning to the midlayer. 4866 * up that must be freed before returning to the midlayer.
4867 * 4867 *
4868 * Return value: 4868 * Return value:
4869 * none 4869 * none
4870 **/ 4870 **/
4871 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) 4871 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4872 { 4872 {
4873 struct ipr_cmnd *reset_cmd; 4873 struct ipr_cmnd *reset_cmd;
4874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 4874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4875 struct ipr_cmd_pkt *cmd_pkt; 4875 struct ipr_cmd_pkt *cmd_pkt;
4876 unsigned long lock_flags = 0; 4876 unsigned long lock_flags = 0;
4877 4877
4878 ENTER; 4878 ENTER;
4879 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4879 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4880 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { 4880 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4881 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4881 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4882 return; 4882 return;
4883 } 4883 }
4884 4884
4885 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); 4885 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4886 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4886 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4887 ipr_cmd->sibling = reset_cmd; 4887 ipr_cmd->sibling = reset_cmd;
4888 reset_cmd->sibling = ipr_cmd; 4888 reset_cmd->sibling = ipr_cmd;
4889 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; 4889 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4890 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; 4890 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4891 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4891 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4892 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 4892 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4893 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; 4893 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4894 4894
4895 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 4895 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4896 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4896 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4897 LEAVE; 4897 LEAVE;
4898 } 4898 }
4899 4899
4900 /** 4900 /**
4901 * ipr_cancel_op - Cancel specified op 4901 * ipr_cancel_op - Cancel specified op
4902 * @scsi_cmd: scsi command struct 4902 * @scsi_cmd: scsi command struct
4903 * 4903 *
4904 * This function cancels specified op. 4904 * This function cancels specified op.
4905 * 4905 *
4906 * Return value: 4906 * Return value:
4907 * SUCCESS / FAILED 4907 * SUCCESS / FAILED
4908 **/ 4908 **/
4909 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) 4909 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4910 { 4910 {
4911 struct ipr_cmnd *ipr_cmd; 4911 struct ipr_cmnd *ipr_cmd;
4912 struct ipr_ioa_cfg *ioa_cfg; 4912 struct ipr_ioa_cfg *ioa_cfg;
4913 struct ipr_resource_entry *res; 4913 struct ipr_resource_entry *res;
4914 struct ipr_cmd_pkt *cmd_pkt; 4914 struct ipr_cmd_pkt *cmd_pkt;
4915 u32 ioasc, int_reg; 4915 u32 ioasc, int_reg;
4916 int op_found = 0; 4916 int op_found = 0;
4917 4917
4918 ENTER; 4918 ENTER;
4919 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; 4919 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4920 res = scsi_cmd->device->hostdata; 4920 res = scsi_cmd->device->hostdata;
4921 4921
4922 /* If we are currently going through reset/reload, return failed. 4922 /* If we are currently going through reset/reload, return failed.
4923 * This will force the mid-layer to call ipr_eh_host_reset, 4923 * This will force the mid-layer to call ipr_eh_host_reset,
4924 * which will then go to sleep and wait for the reset to complete 4924 * which will then go to sleep and wait for the reset to complete
4925 */ 4925 */
4926 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead) 4926 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4927 return FAILED; 4927 return FAILED;
4928 if (!res) 4928 if (!res)
4929 return FAILED; 4929 return FAILED;
4930 4930
4931 /* 4931 /*
4932 * If we are aborting a timed out op, chances are that the timeout was caused 4932 * If we are aborting a timed out op, chances are that the timeout was caused
4933 * by a still not detected EEH error. In such cases, reading a register will 4933 * by a still not detected EEH error. In such cases, reading a register will
4934 * trigger the EEH recovery infrastructure. 4934 * trigger the EEH recovery infrastructure.
4935 */ 4935 */
4936 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 4936 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4937 4937
4938 if (!ipr_is_gscsi(res)) 4938 if (!ipr_is_gscsi(res))
4939 return FAILED; 4939 return FAILED;
4940 4940
4941 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4941 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4942 if (ipr_cmd->scsi_cmd == scsi_cmd) { 4942 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4943 ipr_cmd->done = ipr_scsi_eh_done; 4943 ipr_cmd->done = ipr_scsi_eh_done;
4944 op_found = 1; 4944 op_found = 1;
4945 break; 4945 break;
4946 } 4946 }
4947 } 4947 }
4948 4948
4949 if (!op_found) 4949 if (!op_found)
4950 return SUCCESS; 4950 return SUCCESS;
4951 4951
4952 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4952 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4953 ipr_cmd->ioarcb.res_handle = res->res_handle; 4953 ipr_cmd->ioarcb.res_handle = res->res_handle;
4954 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 4954 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4955 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4955 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4956 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 4956 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4957 ipr_cmd->u.sdev = scsi_cmd->device; 4957 ipr_cmd->u.sdev = scsi_cmd->device;
4958 4958
4959 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", 4959 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4960 scsi_cmd->cmnd[0]); 4960 scsi_cmd->cmnd[0]);
4961 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); 4961 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4962 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 4962 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4963 4963
4964 /* 4964 /*
4965 * If the abort task timed out and we sent a bus reset, we will get 4965 * If the abort task timed out and we sent a bus reset, we will get
4966 * one the following responses to the abort 4966 * one the following responses to the abort
4967 */ 4967 */
4968 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) { 4968 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4969 ioasc = 0; 4969 ioasc = 0;
4970 ipr_trace; 4970 ipr_trace;
4971 } 4971 }
4972 4972
4973 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4973 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4974 if (!ipr_is_naca_model(res)) 4974 if (!ipr_is_naca_model(res))
4975 res->needs_sync_complete = 1; 4975 res->needs_sync_complete = 1;
4976 4976
4977 LEAVE; 4977 LEAVE;
4978 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); 4978 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4979 } 4979 }
4980 4980
4981 /** 4981 /**
4982 * ipr_eh_abort - Abort a single op 4982 * ipr_eh_abort - Abort a single op
4983 * @scsi_cmd: scsi command struct 4983 * @scsi_cmd: scsi command struct
4984 * 4984 *
4985 * Return value: 4985 * Return value:
4986 * SUCCESS / FAILED 4986 * SUCCESS / FAILED
4987 **/ 4987 **/
4988 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) 4988 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4989 { 4989 {
4990 unsigned long flags; 4990 unsigned long flags;
4991 int rc; 4991 int rc;
4992 4992
4993 ENTER; 4993 ENTER;
4994 4994
4995 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); 4995 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4996 rc = ipr_cancel_op(scsi_cmd); 4996 rc = ipr_cancel_op(scsi_cmd);
4997 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); 4997 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4998 4998
4999 LEAVE; 4999 LEAVE;
5000 return rc; 5000 return rc;
5001 } 5001 }
5002 5002
5003 /** 5003 /**
5004 * ipr_handle_other_interrupt - Handle "other" interrupts 5004 * ipr_handle_other_interrupt - Handle "other" interrupts
5005 * @ioa_cfg: ioa config struct 5005 * @ioa_cfg: ioa config struct
5006 * @int_reg: interrupt register 5006 * @int_reg: interrupt register
5007 * 5007 *
5008 * Return value: 5008 * Return value:
5009 * IRQ_NONE / IRQ_HANDLED 5009 * IRQ_NONE / IRQ_HANDLED
5010 **/ 5010 **/
5011 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, 5011 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5012 u32 int_reg) 5012 u32 int_reg)
5013 { 5013 {
5014 irqreturn_t rc = IRQ_HANDLED; 5014 irqreturn_t rc = IRQ_HANDLED;
5015 u32 int_mask_reg; 5015 u32 int_mask_reg;
5016 5016
5017 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 5017 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5018 int_reg &= ~int_mask_reg; 5018 int_reg &= ~int_mask_reg;
5019 5019
5020 /* If an interrupt on the adapter did not occur, ignore it. 5020 /* If an interrupt on the adapter did not occur, ignore it.
5021 * Or in the case of SIS 64, check for a stage change interrupt. 5021 * Or in the case of SIS 64, check for a stage change interrupt.
5022 */ 5022 */
5023 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { 5023 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5024 if (ioa_cfg->sis64) { 5024 if (ioa_cfg->sis64) {
5025 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 5025 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5026 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5026 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5027 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { 5027 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5028 5028
5029 /* clear stage change */ 5029 /* clear stage change */
5030 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); 5030 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5031 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5031 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5032 list_del(&ioa_cfg->reset_cmd->queue); 5032 list_del(&ioa_cfg->reset_cmd->queue);
5033 del_timer(&ioa_cfg->reset_cmd->timer); 5033 del_timer(&ioa_cfg->reset_cmd->timer);
5034 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5034 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5035 return IRQ_HANDLED; 5035 return IRQ_HANDLED;
5036 } 5036 }
5037 } 5037 }
5038 5038
5039 return IRQ_NONE; 5039 return IRQ_NONE;
5040 } 5040 }
5041 5041
5042 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 5042 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5043 /* Mask the interrupt */ 5043 /* Mask the interrupt */
5044 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); 5044 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5045 5045
5046 /* Clear the interrupt */ 5046 /* Clear the interrupt */
5047 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg); 5047 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5048 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 5048 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5049 5049
5050 list_del(&ioa_cfg->reset_cmd->queue); 5050 list_del(&ioa_cfg->reset_cmd->queue);
5051 del_timer(&ioa_cfg->reset_cmd->timer); 5051 del_timer(&ioa_cfg->reset_cmd->timer);
5052 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5052 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5053 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { 5053 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5054 if (ioa_cfg->clear_isr) { 5054 if (ioa_cfg->clear_isr) {
5055 if (ipr_debug && printk_ratelimit()) 5055 if (ipr_debug && printk_ratelimit())
5056 dev_err(&ioa_cfg->pdev->dev, 5056 dev_err(&ioa_cfg->pdev->dev,
5057 "Spurious interrupt detected. 0x%08X\n", int_reg); 5057 "Spurious interrupt detected. 0x%08X\n", int_reg);
5058 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5058 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5059 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5059 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5060 return IRQ_NONE; 5060 return IRQ_NONE;
5061 } 5061 }
5062 } else { 5062 } else {
5063 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) 5063 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5064 ioa_cfg->ioa_unit_checked = 1; 5064 ioa_cfg->ioa_unit_checked = 1;
5065 else 5065 else
5066 dev_err(&ioa_cfg->pdev->dev, 5066 dev_err(&ioa_cfg->pdev->dev,
5067 "Permanent IOA failure. 0x%08X\n", int_reg); 5067 "Permanent IOA failure. 0x%08X\n", int_reg);
5068 5068
5069 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5069 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5070 ioa_cfg->sdt_state = GET_DUMP; 5070 ioa_cfg->sdt_state = GET_DUMP;
5071 5071
5072 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 5072 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5073 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5073 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5074 } 5074 }
5075 5075
5076 return rc; 5076 return rc;
5077 } 5077 }
5078 5078
5079 /** 5079 /**
5080 * ipr_isr_eh - Interrupt service routine error handler 5080 * ipr_isr_eh - Interrupt service routine error handler
5081 * @ioa_cfg: ioa config struct 5081 * @ioa_cfg: ioa config struct
5082 * @msg: message to log 5082 * @msg: message to log
5083 * 5083 *
5084 * Return value: 5084 * Return value:
5085 * none 5085 * none
5086 **/ 5086 **/
5087 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg) 5087 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5088 { 5088 {
5089 ioa_cfg->errors_logged++; 5089 ioa_cfg->errors_logged++;
5090 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg); 5090 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
5091 5091
5092 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5092 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5093 ioa_cfg->sdt_state = GET_DUMP; 5093 ioa_cfg->sdt_state = GET_DUMP;
5094 5094
5095 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5095 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5096 } 5096 }
5097 5097
5098 /** 5098 /**
5099 * ipr_isr - Interrupt service routine 5099 * ipr_isr - Interrupt service routine
5100 * @irq: irq number 5100 * @irq: irq number
5101 * @devp: pointer to ioa config struct 5101 * @devp: pointer to ioa config struct
5102 * 5102 *
5103 * Return value: 5103 * Return value:
5104 * IRQ_NONE / IRQ_HANDLED 5104 * IRQ_NONE / IRQ_HANDLED
5105 **/ 5105 **/
5106 static irqreturn_t ipr_isr(int irq, void *devp) 5106 static irqreturn_t ipr_isr(int irq, void *devp)
5107 { 5107 {
5108 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 5108 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5109 unsigned long lock_flags = 0; 5109 unsigned long lock_flags = 0;
5110 u32 int_reg = 0; 5110 u32 int_reg = 0;
5111 u32 ioasc; 5111 u32 ioasc;
5112 u16 cmd_index; 5112 u16 cmd_index;
5113 int num_hrrq = 0; 5113 int num_hrrq = 0;
5114 int irq_none = 0; 5114 int irq_none = 0;
5115 struct ipr_cmnd *ipr_cmd; 5115 struct ipr_cmnd *ipr_cmd;
5116 irqreturn_t rc = IRQ_NONE; 5116 irqreturn_t rc = IRQ_NONE;
5117 5117
5118 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5118 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5119 5119
5120 /* If interrupts are disabled, ignore the interrupt */ 5120 /* If interrupts are disabled, ignore the interrupt */
5121 if (!ioa_cfg->allow_interrupts) { 5121 if (!ioa_cfg->allow_interrupts) {
5122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5123 return IRQ_NONE; 5123 return IRQ_NONE;
5124 } 5124 }
5125 5125
5126 while (1) { 5126 while (1) {
5127 ipr_cmd = NULL; 5127 ipr_cmd = NULL;
5128 5128
5129 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5129 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5130 ioa_cfg->toggle_bit) { 5130 ioa_cfg->toggle_bit) {
5131 5131
5132 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) & 5132 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5133 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; 5133 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5134 5134
5135 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { 5135 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5136 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA"); 5136 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5138 return IRQ_HANDLED; 5138 return IRQ_HANDLED;
5139 } 5139 }
5140 5140
5141 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; 5141 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5142 5142
5143 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5143 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5144 5144
5145 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 5145 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5146 5146
5147 list_del(&ipr_cmd->queue); 5147 list_del(&ipr_cmd->queue);
5148 del_timer(&ipr_cmd->timer); 5148 del_timer(&ipr_cmd->timer);
5149 ipr_cmd->done(ipr_cmd); 5149 ipr_cmd->done(ipr_cmd);
5150 5150
5151 rc = IRQ_HANDLED; 5151 rc = IRQ_HANDLED;
5152 5152
5153 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) { 5153 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5154 ioa_cfg->hrrq_curr++; 5154 ioa_cfg->hrrq_curr++;
5155 } else { 5155 } else {
5156 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start; 5156 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5157 ioa_cfg->toggle_bit ^= 1u; 5157 ioa_cfg->toggle_bit ^= 1u;
5158 } 5158 }
5159 } 5159 }
5160 5160
5161 if (ipr_cmd && !ioa_cfg->clear_isr) 5161 if (ipr_cmd && !ioa_cfg->clear_isr)
5162 break; 5162 break;
5163 5163
5164 if (ipr_cmd != NULL) { 5164 if (ipr_cmd != NULL) {
5165 /* Clear the PCI interrupt */ 5165 /* Clear the PCI interrupt */
5166 num_hrrq = 0; 5166 num_hrrq = 0;
5167 do { 5167 do {
5168 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5168 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5169 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5169 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5170 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5170 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5171 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5171 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5172 5172
5173 } else if (rc == IRQ_NONE && irq_none == 0) { 5173 } else if (rc == IRQ_NONE && irq_none == 0) {
5174 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5174 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5175 irq_none++; 5175 irq_none++;
5176 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && 5176 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5177 int_reg & IPR_PCII_HRRQ_UPDATED) { 5177 int_reg & IPR_PCII_HRRQ_UPDATED) {
5178 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ"); 5178 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5179 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5179 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5180 return IRQ_HANDLED; 5180 return IRQ_HANDLED;
5181 } else 5181 } else
5182 break; 5182 break;
5183 } 5183 }
5184 5184
5185 if (unlikely(rc == IRQ_NONE)) 5185 if (unlikely(rc == IRQ_NONE))
5186 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 5186 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5187 5187
5188 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5188 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5189 return rc; 5189 return rc;
5190 } 5190 }
5191 5191
5192 /** 5192 /**
5193 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer 5193 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5194 * @ioa_cfg: ioa config struct 5194 * @ioa_cfg: ioa config struct
5195 * @ipr_cmd: ipr command struct 5195 * @ipr_cmd: ipr command struct
5196 * 5196 *
5197 * Return value: 5197 * Return value:
5198 * 0 on success / -1 on failure 5198 * 0 on success / -1 on failure
5199 **/ 5199 **/
5200 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, 5200 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5201 struct ipr_cmnd *ipr_cmd) 5201 struct ipr_cmnd *ipr_cmd)
5202 { 5202 {
5203 int i, nseg; 5203 int i, nseg;
5204 struct scatterlist *sg; 5204 struct scatterlist *sg;
5205 u32 length; 5205 u32 length;
5206 u32 ioadl_flags = 0; 5206 u32 ioadl_flags = 0;
5207 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5207 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5208 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5208 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5209 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 5209 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5210 5210
5211 length = scsi_bufflen(scsi_cmd); 5211 length = scsi_bufflen(scsi_cmd);
5212 if (!length) 5212 if (!length)
5213 return 0; 5213 return 0;
5214 5214
5215 nseg = scsi_dma_map(scsi_cmd); 5215 nseg = scsi_dma_map(scsi_cmd);
5216 if (nseg < 0) { 5216 if (nseg < 0) {
5217 if (printk_ratelimit()) 5217 if (printk_ratelimit())
5218 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 5218 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5219 return -1; 5219 return -1;
5220 } 5220 }
5221 5221
5222 ipr_cmd->dma_use_sg = nseg; 5222 ipr_cmd->dma_use_sg = nseg;
5223 5223
5224 ioarcb->data_transfer_length = cpu_to_be32(length); 5224 ioarcb->data_transfer_length = cpu_to_be32(length);
5225 ioarcb->ioadl_len = 5225 ioarcb->ioadl_len =
5226 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 5226 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5227 5227
5228 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5228 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5229 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5229 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5230 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5230 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5231 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) 5231 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5232 ioadl_flags = IPR_IOADL_FLAGS_READ; 5232 ioadl_flags = IPR_IOADL_FLAGS_READ;
5233 5233
5234 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 5234 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5235 ioadl64[i].flags = cpu_to_be32(ioadl_flags); 5235 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5236 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); 5236 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5237 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); 5237 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5238 } 5238 }
5239 5239
5240 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 5240 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5241 return 0; 5241 return 0;
5242 } 5242 }
5243 5243
5244 /** 5244 /**
5245 * ipr_build_ioadl - Build a scatter/gather list and map the buffer 5245 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5246 * @ioa_cfg: ioa config struct 5246 * @ioa_cfg: ioa config struct
5247 * @ipr_cmd: ipr command struct 5247 * @ipr_cmd: ipr command struct
5248 * 5248 *
5249 * Return value: 5249 * Return value:
5250 * 0 on success / -1 on failure 5250 * 0 on success / -1 on failure
5251 **/ 5251 **/
5252 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 5252 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5253 struct ipr_cmnd *ipr_cmd) 5253 struct ipr_cmnd *ipr_cmd)
5254 { 5254 {
5255 int i, nseg; 5255 int i, nseg;
5256 struct scatterlist *sg; 5256 struct scatterlist *sg;
5257 u32 length; 5257 u32 length;
5258 u32 ioadl_flags = 0; 5258 u32 ioadl_flags = 0;
5259 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5259 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5260 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5260 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5261 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 5261 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5262 5262
5263 length = scsi_bufflen(scsi_cmd); 5263 length = scsi_bufflen(scsi_cmd);
5264 if (!length) 5264 if (!length)
5265 return 0; 5265 return 0;
5266 5266
5267 nseg = scsi_dma_map(scsi_cmd); 5267 nseg = scsi_dma_map(scsi_cmd);
5268 if (nseg < 0) { 5268 if (nseg < 0) {
5269 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 5269 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5270 return -1; 5270 return -1;
5271 } 5271 }
5272 5272
5273 ipr_cmd->dma_use_sg = nseg; 5273 ipr_cmd->dma_use_sg = nseg;
5274 5274
5275 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5275 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5276 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5276 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5277 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5277 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5278 ioarcb->data_transfer_length = cpu_to_be32(length); 5278 ioarcb->data_transfer_length = cpu_to_be32(length);
5279 ioarcb->ioadl_len = 5279 ioarcb->ioadl_len =
5280 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5280 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5281 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { 5281 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5282 ioadl_flags = IPR_IOADL_FLAGS_READ; 5282 ioadl_flags = IPR_IOADL_FLAGS_READ;
5283 ioarcb->read_data_transfer_length = cpu_to_be32(length); 5283 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5284 ioarcb->read_ioadl_len = 5284 ioarcb->read_ioadl_len =
5285 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5285 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5286 } 5286 }
5287 5287
5288 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { 5288 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5289 ioadl = ioarcb->u.add_data.u.ioadl; 5289 ioadl = ioarcb->u.add_data.u.ioadl;
5290 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + 5290 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5291 offsetof(struct ipr_ioarcb, u.add_data)); 5291 offsetof(struct ipr_ioarcb, u.add_data));
5292 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5292 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5293 } 5293 }
5294 5294
5295 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 5295 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5296 ioadl[i].flags_and_data_len = 5296 ioadl[i].flags_and_data_len =
5297 cpu_to_be32(ioadl_flags | sg_dma_len(sg)); 5297 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5298 ioadl[i].address = cpu_to_be32(sg_dma_address(sg)); 5298 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5299 } 5299 }
5300 5300
5301 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 5301 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5302 return 0; 5302 return 0;
5303 } 5303 }
5304 5304
5305 /** 5305 /**
5306 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes 5306 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5307 * @scsi_cmd: scsi command struct 5307 * @scsi_cmd: scsi command struct
5308 * 5308 *
5309 * Return value: 5309 * Return value:
5310 * task attributes 5310 * task attributes
5311 **/ 5311 **/
5312 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd) 5312 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5313 { 5313 {
5314 u8 tag[2]; 5314 u8 tag[2];
5315 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK; 5315 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5316 5316
5317 if (scsi_populate_tag_msg(scsi_cmd, tag)) { 5317 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5318 switch (tag[0]) { 5318 switch (tag[0]) {
5319 case MSG_SIMPLE_TAG: 5319 case MSG_SIMPLE_TAG:
5320 rc = IPR_FLAGS_LO_SIMPLE_TASK; 5320 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5321 break; 5321 break;
5322 case MSG_HEAD_TAG: 5322 case MSG_HEAD_TAG:
5323 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK; 5323 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5324 break; 5324 break;
5325 case MSG_ORDERED_TAG: 5325 case MSG_ORDERED_TAG:
5326 rc = IPR_FLAGS_LO_ORDERED_TASK; 5326 rc = IPR_FLAGS_LO_ORDERED_TASK;
5327 break; 5327 break;
5328 }; 5328 };
5329 } 5329 }
5330 5330
5331 return rc; 5331 return rc;
5332 } 5332 }
5333 5333
5334 /** 5334 /**
5335 * ipr_erp_done - Process completion of ERP for a device 5335 * ipr_erp_done - Process completion of ERP for a device
5336 * @ipr_cmd: ipr command struct 5336 * @ipr_cmd: ipr command struct
5337 * 5337 *
5338 * This function copies the sense buffer into the scsi_cmd 5338 * This function copies the sense buffer into the scsi_cmd
5339 * struct and pushes the scsi_done function. 5339 * struct and pushes the scsi_done function.
5340 * 5340 *
5341 * Return value: 5341 * Return value:
5342 * nothing 5342 * nothing
5343 **/ 5343 **/
5344 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) 5344 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5345 { 5345 {
5346 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5346 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5347 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5347 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5348 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5348 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5349 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5349 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5350 5350
5351 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5351 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5352 scsi_cmd->result |= (DID_ERROR << 16); 5352 scsi_cmd->result |= (DID_ERROR << 16);
5353 scmd_printk(KERN_ERR, scsi_cmd, 5353 scmd_printk(KERN_ERR, scsi_cmd,
5354 "Request Sense failed with IOASC: 0x%08X\n", ioasc); 5354 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5355 } else { 5355 } else {
5356 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, 5356 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5357 SCSI_SENSE_BUFFERSIZE); 5357 SCSI_SENSE_BUFFERSIZE);
5358 } 5358 }
5359 5359
5360 if (res) { 5360 if (res) {
5361 if (!ipr_is_naca_model(res)) 5361 if (!ipr_is_naca_model(res))
5362 res->needs_sync_complete = 1; 5362 res->needs_sync_complete = 1;
5363 res->in_erp = 0; 5363 res->in_erp = 0;
5364 } 5364 }
5365 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5365 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5366 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5366 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5367 scsi_cmd->scsi_done(scsi_cmd); 5367 scsi_cmd->scsi_done(scsi_cmd);
5368 } 5368 }
5369 5369
5370 /** 5370 /**
5371 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP 5371 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5372 * @ipr_cmd: ipr command struct 5372 * @ipr_cmd: ipr command struct
5373 * 5373 *
5374 * Return value: 5374 * Return value:
5375 * none 5375 * none
5376 **/ 5376 **/
5377 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) 5377 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5378 { 5378 {
5379 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5379 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5380 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 5380 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5381 dma_addr_t dma_addr = ipr_cmd->dma_addr; 5381 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5382 5382
5383 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 5383 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5384 ioarcb->data_transfer_length = 0; 5384 ioarcb->data_transfer_length = 0;
5385 ioarcb->read_data_transfer_length = 0; 5385 ioarcb->read_data_transfer_length = 0;
5386 ioarcb->ioadl_len = 0; 5386 ioarcb->ioadl_len = 0;
5387 ioarcb->read_ioadl_len = 0; 5387 ioarcb->read_ioadl_len = 0;
5388 ioasa->hdr.ioasc = 0; 5388 ioasa->hdr.ioasc = 0;
5389 ioasa->hdr.residual_data_len = 0; 5389 ioasa->hdr.residual_data_len = 0;
5390 5390
5391 if (ipr_cmd->ioa_cfg->sis64) 5391 if (ipr_cmd->ioa_cfg->sis64)
5392 ioarcb->u.sis64_addr_data.data_ioadl_addr = 5392 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5393 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 5393 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5394 else { 5394 else {
5395 ioarcb->write_ioadl_addr = 5395 ioarcb->write_ioadl_addr =
5396 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 5396 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5397 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5397 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5398 } 5398 }
5399 } 5399 }
5400 5400
5401 /** 5401 /**
5402 * ipr_erp_request_sense - Send request sense to a device 5402 * ipr_erp_request_sense - Send request sense to a device
5403 * @ipr_cmd: ipr command struct 5403 * @ipr_cmd: ipr command struct
5404 * 5404 *
5405 * This function sends a request sense to a device as a result 5405 * This function sends a request sense to a device as a result
5406 * of a check condition. 5406 * of a check condition.
5407 * 5407 *
5408 * Return value: 5408 * Return value:
5409 * nothing 5409 * nothing
5410 **/ 5410 **/
5411 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) 5411 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5412 { 5412 {
5413 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5413 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5414 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5414 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5415 5415
5416 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5416 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5417 ipr_erp_done(ipr_cmd); 5417 ipr_erp_done(ipr_cmd);
5418 return; 5418 return;
5419 } 5419 }
5420 5420
5421 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 5421 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5422 5422
5423 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; 5423 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5424 cmd_pkt->cdb[0] = REQUEST_SENSE; 5424 cmd_pkt->cdb[0] = REQUEST_SENSE;
5425 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; 5425 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5426 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; 5426 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5427 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5427 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5428 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); 5428 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5429 5429
5430 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, 5430 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5431 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST); 5431 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5432 5432
5433 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, 5433 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5434 IPR_REQUEST_SENSE_TIMEOUT * 2); 5434 IPR_REQUEST_SENSE_TIMEOUT * 2);
5435 } 5435 }
5436 5436
5437 /** 5437 /**
5438 * ipr_erp_cancel_all - Send cancel all to a device 5438 * ipr_erp_cancel_all - Send cancel all to a device
5439 * @ipr_cmd: ipr command struct 5439 * @ipr_cmd: ipr command struct
5440 * 5440 *
5441 * This function sends a cancel all to a device to clear the 5441 * This function sends a cancel all to a device to clear the
5442 * queue. If we are running TCQ on the device, QERR is set to 1, 5442 * queue. If we are running TCQ on the device, QERR is set to 1,
5443 * which means all outstanding ops have been dropped on the floor. 5443 * which means all outstanding ops have been dropped on the floor.
5444 * Cancel all will return them to us. 5444 * Cancel all will return them to us.
5445 * 5445 *
5446 * Return value: 5446 * Return value:
5447 * nothing 5447 * nothing
5448 **/ 5448 **/
5449 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) 5449 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5450 { 5450 {
5451 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5451 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5452 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5452 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5453 struct ipr_cmd_pkt *cmd_pkt; 5453 struct ipr_cmd_pkt *cmd_pkt;
5454 5454
5455 res->in_erp = 1; 5455 res->in_erp = 1;
5456 5456
5457 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 5457 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5458 5458
5459 if (!scsi_get_tag_type(scsi_cmd->device)) { 5459 if (!scsi_get_tag_type(scsi_cmd->device)) {
5460 ipr_erp_request_sense(ipr_cmd); 5460 ipr_erp_request_sense(ipr_cmd);
5461 return; 5461 return;
5462 } 5462 }
5463 5463
5464 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5464 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5465 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5465 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5466 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 5466 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5467 5467
5468 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout, 5468 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5469 IPR_CANCEL_ALL_TIMEOUT); 5469 IPR_CANCEL_ALL_TIMEOUT);
5470 } 5470 }
5471 5471
5472 /** 5472 /**
5473 * ipr_dump_ioasa - Dump contents of IOASA 5473 * ipr_dump_ioasa - Dump contents of IOASA
5474 * @ioa_cfg: ioa config struct 5474 * @ioa_cfg: ioa config struct
5475 * @ipr_cmd: ipr command struct 5475 * @ipr_cmd: ipr command struct
5476 * @res: resource entry struct 5476 * @res: resource entry struct
5477 * 5477 *
5478 * This function is invoked by the interrupt handler when ops 5478 * This function is invoked by the interrupt handler when ops
5479 * fail. It will log the IOASA if appropriate. Only called 5479 * fail. It will log the IOASA if appropriate. Only called
5480 * for GPDD ops. 5480 * for GPDD ops.
5481 * 5481 *
5482 * Return value: 5482 * Return value:
5483 * none 5483 * none
5484 **/ 5484 **/
5485 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, 5485 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5486 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res) 5486 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5487 { 5487 {
5488 int i; 5488 int i;
5489 u16 data_len; 5489 u16 data_len;
5490 u32 ioasc, fd_ioasc; 5490 u32 ioasc, fd_ioasc;
5491 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 5491 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5492 __be32 *ioasa_data = (__be32 *)ioasa; 5492 __be32 *ioasa_data = (__be32 *)ioasa;
5493 int error_index; 5493 int error_index;
5494 5494
5495 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; 5495 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5496 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; 5496 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5497 5497
5498 if (0 == ioasc) 5498 if (0 == ioasc)
5499 return; 5499 return;
5500 5500
5501 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) 5501 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5502 return; 5502 return;
5503 5503
5504 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc) 5504 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5505 error_index = ipr_get_error(fd_ioasc); 5505 error_index = ipr_get_error(fd_ioasc);
5506 else 5506 else
5507 error_index = ipr_get_error(ioasc); 5507 error_index = ipr_get_error(ioasc);
5508 5508
5509 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { 5509 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5510 /* Don't log an error if the IOA already logged one */ 5510 /* Don't log an error if the IOA already logged one */
5511 if (ioasa->hdr.ilid != 0) 5511 if (ioasa->hdr.ilid != 0)
5512 return; 5512 return;
5513 5513
5514 if (!ipr_is_gscsi(res)) 5514 if (!ipr_is_gscsi(res))
5515 return; 5515 return;
5516 5516
5517 if (ipr_error_table[error_index].log_ioasa == 0) 5517 if (ipr_error_table[error_index].log_ioasa == 0)
5518 return; 5518 return;
5519 } 5519 }
5520 5520
5521 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); 5521 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5522 5522
5523 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); 5523 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5524 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) 5524 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5525 data_len = sizeof(struct ipr_ioasa64); 5525 data_len = sizeof(struct ipr_ioasa64);
5526 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) 5526 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5527 data_len = sizeof(struct ipr_ioasa); 5527 data_len = sizeof(struct ipr_ioasa);
5528 5528
5529 ipr_err("IOASA Dump:\n"); 5529 ipr_err("IOASA Dump:\n");
5530 5530
5531 for (i = 0; i < data_len / 4; i += 4) { 5531 for (i = 0; i < data_len / 4; i += 4) {
5532 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 5532 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5533 be32_to_cpu(ioasa_data[i]), 5533 be32_to_cpu(ioasa_data[i]),
5534 be32_to_cpu(ioasa_data[i+1]), 5534 be32_to_cpu(ioasa_data[i+1]),
5535 be32_to_cpu(ioasa_data[i+2]), 5535 be32_to_cpu(ioasa_data[i+2]),
5536 be32_to_cpu(ioasa_data[i+3])); 5536 be32_to_cpu(ioasa_data[i+3]));
5537 } 5537 }
5538 } 5538 }
5539 5539
5540 /** 5540 /**
5541 * ipr_gen_sense - Generate SCSI sense data from an IOASA 5541 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5542 * @ioasa: IOASA 5542 * @ioasa: IOASA
5543 * @sense_buf: sense data buffer 5543 * @sense_buf: sense data buffer
5544 * 5544 *
5545 * Return value: 5545 * Return value:
5546 * none 5546 * none
5547 **/ 5547 **/
5548 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) 5548 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5549 { 5549 {
5550 u32 failing_lba; 5550 u32 failing_lba;
5551 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; 5551 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5552 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; 5552 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5553 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 5553 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5554 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); 5554 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5555 5555
5556 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 5556 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5557 5557
5558 if (ioasc >= IPR_FIRST_DRIVER_IOASC) 5558 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5559 return; 5559 return;
5560 5560
5561 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; 5561 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5562 5562
5563 if (ipr_is_vset_device(res) && 5563 if (ipr_is_vset_device(res) &&
5564 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC && 5564 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5565 ioasa->u.vset.failing_lba_hi != 0) { 5565 ioasa->u.vset.failing_lba_hi != 0) {
5566 sense_buf[0] = 0x72; 5566 sense_buf[0] = 0x72;
5567 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc); 5567 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5568 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc); 5568 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5569 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc); 5569 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5570 5570
5571 sense_buf[7] = 12; 5571 sense_buf[7] = 12;
5572 sense_buf[8] = 0; 5572 sense_buf[8] = 0;
5573 sense_buf[9] = 0x0A; 5573 sense_buf[9] = 0x0A;
5574 sense_buf[10] = 0x80; 5574 sense_buf[10] = 0x80;
5575 5575
5576 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); 5576 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5577 5577
5578 sense_buf[12] = (failing_lba & 0xff000000) >> 24; 5578 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5579 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; 5579 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5580 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; 5580 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5581 sense_buf[15] = failing_lba & 0x000000ff; 5581 sense_buf[15] = failing_lba & 0x000000ff;
5582 5582
5583 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); 5583 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5584 5584
5585 sense_buf[16] = (failing_lba & 0xff000000) >> 24; 5585 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5586 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; 5586 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5587 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; 5587 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5588 sense_buf[19] = failing_lba & 0x000000ff; 5588 sense_buf[19] = failing_lba & 0x000000ff;
5589 } else { 5589 } else {
5590 sense_buf[0] = 0x70; 5590 sense_buf[0] = 0x70;
5591 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc); 5591 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5592 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc); 5592 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5593 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc); 5593 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5594 5594
5595 /* Illegal request */ 5595 /* Illegal request */
5596 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && 5596 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5597 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { 5597 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5598 sense_buf[7] = 10; /* additional length */ 5598 sense_buf[7] = 10; /* additional length */
5599 5599
5600 /* IOARCB was in error */ 5600 /* IOARCB was in error */
5601 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24) 5601 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5602 sense_buf[15] = 0xC0; 5602 sense_buf[15] = 0xC0;
5603 else /* Parameter data was invalid */ 5603 else /* Parameter data was invalid */
5604 sense_buf[15] = 0x80; 5604 sense_buf[15] = 0x80;
5605 5605
5606 sense_buf[16] = 5606 sense_buf[16] =
5607 ((IPR_FIELD_POINTER_MASK & 5607 ((IPR_FIELD_POINTER_MASK &
5608 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; 5608 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5609 sense_buf[17] = 5609 sense_buf[17] =
5610 (IPR_FIELD_POINTER_MASK & 5610 (IPR_FIELD_POINTER_MASK &
5611 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; 5611 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5612 } else { 5612 } else {
5613 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { 5613 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5614 if (ipr_is_vset_device(res)) 5614 if (ipr_is_vset_device(res))
5615 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); 5615 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5616 else 5616 else
5617 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); 5617 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5618 5618
5619 sense_buf[0] |= 0x80; /* Or in the Valid bit */ 5619 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5620 sense_buf[3] = (failing_lba & 0xff000000) >> 24; 5620 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5621 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16; 5621 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5622 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8; 5622 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5623 sense_buf[6] = failing_lba & 0x000000ff; 5623 sense_buf[6] = failing_lba & 0x000000ff;
5624 } 5624 }
5625 5625
5626 sense_buf[7] = 6; /* additional length */ 5626 sense_buf[7] = 6; /* additional length */
5627 } 5627 }
5628 } 5628 }
5629 } 5629 }
5630 5630
5631 /** 5631 /**
5632 * ipr_get_autosense - Copy autosense data to sense buffer 5632 * ipr_get_autosense - Copy autosense data to sense buffer
5633 * @ipr_cmd: ipr command struct 5633 * @ipr_cmd: ipr command struct
5634 * 5634 *
5635 * This function copies the autosense buffer to the buffer 5635 * This function copies the autosense buffer to the buffer
5636 * in the scsi_cmd, if there is autosense available. 5636 * in the scsi_cmd, if there is autosense available.
5637 * 5637 *
5638 * Return value: 5638 * Return value:
5639 * 1 if autosense was available / 0 if not 5639 * 1 if autosense was available / 0 if not
5640 **/ 5640 **/
5641 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) 5641 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5642 { 5642 {
5643 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 5643 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5644 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 5644 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5645 5645
5646 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) 5646 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5647 return 0; 5647 return 0;
5648 5648
5649 if (ipr_cmd->ioa_cfg->sis64) 5649 if (ipr_cmd->ioa_cfg->sis64)
5650 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, 5650 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5651 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), 5651 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5652 SCSI_SENSE_BUFFERSIZE)); 5652 SCSI_SENSE_BUFFERSIZE));
5653 else 5653 else
5654 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, 5654 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5655 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), 5655 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5656 SCSI_SENSE_BUFFERSIZE)); 5656 SCSI_SENSE_BUFFERSIZE));
5657 return 1; 5657 return 1;
5658 } 5658 }
5659 5659
5660 /** 5660 /**
5661 * ipr_erp_start - Process an error response for a SCSI op 5661 * ipr_erp_start - Process an error response for a SCSI op
5662 * @ioa_cfg: ioa config struct 5662 * @ioa_cfg: ioa config struct
5663 * @ipr_cmd: ipr command struct 5663 * @ipr_cmd: ipr command struct
5664 * 5664 *
5665 * This function determines whether or not to initiate ERP 5665 * This function determines whether or not to initiate ERP
5666 * on the affected device. 5666 * on the affected device.
5667 * 5667 *
5668 * Return value: 5668 * Return value:
5669 * nothing 5669 * nothing
5670 **/ 5670 **/
5671 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, 5671 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5672 struct ipr_cmnd *ipr_cmd) 5672 struct ipr_cmnd *ipr_cmd)
5673 { 5673 {
5674 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5674 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5675 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5675 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5676 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5676 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5677 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; 5677 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5678 5678
5679 if (!res) { 5679 if (!res) {
5680 ipr_scsi_eh_done(ipr_cmd); 5680 ipr_scsi_eh_done(ipr_cmd);
5681 return; 5681 return;
5682 } 5682 }
5683 5683
5684 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS) 5684 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5685 ipr_gen_sense(ipr_cmd); 5685 ipr_gen_sense(ipr_cmd);
5686 5686
5687 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 5687 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5688 5688
5689 switch (masked_ioasc) { 5689 switch (masked_ioasc) {
5690 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: 5690 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5691 if (ipr_is_naca_model(res)) 5691 if (ipr_is_naca_model(res))
5692 scsi_cmd->result |= (DID_ABORT << 16); 5692 scsi_cmd->result |= (DID_ABORT << 16);
5693 else 5693 else
5694 scsi_cmd->result |= (DID_IMM_RETRY << 16); 5694 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5695 break; 5695 break;
5696 case IPR_IOASC_IR_RESOURCE_HANDLE: 5696 case IPR_IOASC_IR_RESOURCE_HANDLE:
5697 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA: 5697 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5698 scsi_cmd->result |= (DID_NO_CONNECT << 16); 5698 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5699 break; 5699 break;
5700 case IPR_IOASC_HW_SEL_TIMEOUT: 5700 case IPR_IOASC_HW_SEL_TIMEOUT:
5701 scsi_cmd->result |= (DID_NO_CONNECT << 16); 5701 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5702 if (!ipr_is_naca_model(res)) 5702 if (!ipr_is_naca_model(res))
5703 res->needs_sync_complete = 1; 5703 res->needs_sync_complete = 1;
5704 break; 5704 break;
5705 case IPR_IOASC_SYNC_REQUIRED: 5705 case IPR_IOASC_SYNC_REQUIRED:
5706 if (!res->in_erp) 5706 if (!res->in_erp)
5707 res->needs_sync_complete = 1; 5707 res->needs_sync_complete = 1;
5708 scsi_cmd->result |= (DID_IMM_RETRY << 16); 5708 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5709 break; 5709 break;
5710 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ 5710 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5711 case IPR_IOASA_IR_DUAL_IOA_DISABLED: 5711 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5712 scsi_cmd->result |= (DID_PASSTHROUGH << 16); 5712 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5713 break; 5713 break;
5714 case IPR_IOASC_BUS_WAS_RESET: 5714 case IPR_IOASC_BUS_WAS_RESET:
5715 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: 5715 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5716 /* 5716 /*
5717 * Report the bus reset and ask for a retry. The device 5717 * Report the bus reset and ask for a retry. The device
5718 * will give CC/UA the next command. 5718 * will give CC/UA the next command.
5719 */ 5719 */
5720 if (!res->resetting_device) 5720 if (!res->resetting_device)
5721 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); 5721 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5722 scsi_cmd->result |= (DID_ERROR << 16); 5722 scsi_cmd->result |= (DID_ERROR << 16);
5723 if (!ipr_is_naca_model(res)) 5723 if (!ipr_is_naca_model(res))
5724 res->needs_sync_complete = 1; 5724 res->needs_sync_complete = 1;
5725 break; 5725 break;
5726 case IPR_IOASC_HW_DEV_BUS_STATUS: 5726 case IPR_IOASC_HW_DEV_BUS_STATUS:
5727 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); 5727 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5728 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { 5728 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5729 if (!ipr_get_autosense(ipr_cmd)) { 5729 if (!ipr_get_autosense(ipr_cmd)) {
5730 if (!ipr_is_naca_model(res)) { 5730 if (!ipr_is_naca_model(res)) {
5731 ipr_erp_cancel_all(ipr_cmd); 5731 ipr_erp_cancel_all(ipr_cmd);
5732 return; 5732 return;
5733 } 5733 }
5734 } 5734 }
5735 } 5735 }
5736 if (!ipr_is_naca_model(res)) 5736 if (!ipr_is_naca_model(res))
5737 res->needs_sync_complete = 1; 5737 res->needs_sync_complete = 1;
5738 break; 5738 break;
5739 case IPR_IOASC_NR_INIT_CMD_REQUIRED: 5739 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5740 break; 5740 break;
5741 default: 5741 default:
5742 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 5742 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5743 scsi_cmd->result |= (DID_ERROR << 16); 5743 scsi_cmd->result |= (DID_ERROR << 16);
5744 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) 5744 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5745 res->needs_sync_complete = 1; 5745 res->needs_sync_complete = 1;
5746 break; 5746 break;
5747 } 5747 }
5748 5748
5749 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5749 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5750 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5750 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5751 scsi_cmd->scsi_done(scsi_cmd); 5751 scsi_cmd->scsi_done(scsi_cmd);
5752 } 5752 }
5753 5753
5754 /** 5754 /**
5755 * ipr_scsi_done - mid-layer done function 5755 * ipr_scsi_done - mid-layer done function
5756 * @ipr_cmd: ipr command struct 5756 * @ipr_cmd: ipr command struct
5757 * 5757 *
5758 * This function is invoked by the interrupt handler for 5758 * This function is invoked by the interrupt handler for
5759 * ops generated by the SCSI mid-layer 5759 * ops generated by the SCSI mid-layer
5760 * 5760 *
5761 * Return value: 5761 * Return value:
5762 * none 5762 * none
5763 **/ 5763 **/
5764 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) 5764 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5765 { 5765 {
5766 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5766 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5767 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5767 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5768 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5768 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5769 5769
5770 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 5770 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5771 5771
5772 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 5772 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5773 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5773 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5774 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5774 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5775 scsi_cmd->scsi_done(scsi_cmd); 5775 scsi_cmd->scsi_done(scsi_cmd);
5776 } else 5776 } else
5777 ipr_erp_start(ioa_cfg, ipr_cmd); 5777 ipr_erp_start(ioa_cfg, ipr_cmd);
5778 } 5778 }
5779 5779
5780 /** 5780 /**
5781 * ipr_queuecommand - Queue a mid-layer request 5781 * ipr_queuecommand - Queue a mid-layer request
5782 * @scsi_cmd: scsi command struct 5782 * @scsi_cmd: scsi command struct
5783 * @done: done function 5783 * @done: done function
5784 * 5784 *
5785 * This function queues a request generated by the mid-layer. 5785 * This function queues a request generated by the mid-layer.
5786 * 5786 *
5787 * Return value: 5787 * Return value:
5788 * 0 on success 5788 * 0 on success
5789 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy 5789 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5790 * SCSI_MLQUEUE_HOST_BUSY if host is busy 5790 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5791 **/ 5791 **/
5792 static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, 5792 static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5793 void (*done) (struct scsi_cmnd *)) 5793 void (*done) (struct scsi_cmnd *))
5794 { 5794 {
5795 struct ipr_ioa_cfg *ioa_cfg; 5795 struct ipr_ioa_cfg *ioa_cfg;
5796 struct ipr_resource_entry *res; 5796 struct ipr_resource_entry *res;
5797 struct ipr_ioarcb *ioarcb; 5797 struct ipr_ioarcb *ioarcb;
5798 struct ipr_cmnd *ipr_cmd; 5798 struct ipr_cmnd *ipr_cmd;
5799 int rc = 0; 5799 int rc = 0;
5800 5800
5801 scsi_cmd->scsi_done = done; 5801 scsi_cmd->scsi_done = done;
5802 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; 5802 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5803 res = scsi_cmd->device->hostdata; 5803 res = scsi_cmd->device->hostdata;
5804 scsi_cmd->result = (DID_OK << 16); 5804 scsi_cmd->result = (DID_OK << 16);
5805 5805
5806 /* 5806 /*
5807 * We are currently blocking all devices due to a host reset 5807 * We are currently blocking all devices due to a host reset
5808 * We have told the host to stop giving us new requests, but 5808 * We have told the host to stop giving us new requests, but
5809 * ERP ops don't count. FIXME 5809 * ERP ops don't count. FIXME
5810 */ 5810 */
5811 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) 5811 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5812 return SCSI_MLQUEUE_HOST_BUSY; 5812 return SCSI_MLQUEUE_HOST_BUSY;
5813 5813
5814 /* 5814 /*
5815 * FIXME - Create scsi_set_host_offline interface 5815 * FIXME - Create scsi_set_host_offline interface
5816 * and the ioa_is_dead check can be removed 5816 * and the ioa_is_dead check can be removed
5817 */ 5817 */
5818 if (unlikely(ioa_cfg->ioa_is_dead || !res)) { 5818 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5819 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 5819 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5820 scsi_cmd->result = (DID_NO_CONNECT << 16); 5820 scsi_cmd->result = (DID_NO_CONNECT << 16);
5821 scsi_cmd->scsi_done(scsi_cmd); 5821 scsi_cmd->scsi_done(scsi_cmd);
5822 return 0; 5822 return 0;
5823 } 5823 }
5824 5824
5825 if (ipr_is_gata(res) && res->sata_port) 5825 if (ipr_is_gata(res) && res->sata_port)
5826 return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); 5826 return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
5827 5827
5828 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5828 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5829 ioarcb = &ipr_cmd->ioarcb; 5829 ioarcb = &ipr_cmd->ioarcb;
5830 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 5830 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5831 5831
5832 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 5832 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5833 ipr_cmd->scsi_cmd = scsi_cmd; 5833 ipr_cmd->scsi_cmd = scsi_cmd;
5834 ioarcb->res_handle = res->res_handle; 5834 ioarcb->res_handle = res->res_handle;
5835 ipr_cmd->done = ipr_scsi_done; 5835 ipr_cmd->done = ipr_scsi_done;
5836 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 5836 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5837 5837
5838 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 5838 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5839 if (scsi_cmd->underflow == 0) 5839 if (scsi_cmd->underflow == 0)
5840 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5840 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5841 5841
5842 if (res->needs_sync_complete) { 5842 if (res->needs_sync_complete) {
5843 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; 5843 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5844 res->needs_sync_complete = 0; 5844 res->needs_sync_complete = 0;
5845 } 5845 }
5846 5846
5847 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5847 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5848 if (ipr_is_gscsi(res)) 5848 if (ipr_is_gscsi(res))
5849 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 5849 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5850 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; 5850 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5851 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); 5851 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5852 } 5852 }
5853 5853
5854 if (scsi_cmd->cmnd[0] >= 0xC0 && 5854 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5855 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 5855 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5856 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 5856 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5857 5857
5858 if (likely(rc == 0)) { 5858 if (likely(rc == 0)) {
5859 if (ioa_cfg->sis64) 5859 if (ioa_cfg->sis64)
5860 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); 5860 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5861 else 5861 else
5862 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 5862 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5863 } 5863 }
5864 5864
5865 if (unlikely(rc != 0)) { 5865 if (unlikely(rc != 0)) {
5866 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5866 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5867 return SCSI_MLQUEUE_HOST_BUSY; 5867 return SCSI_MLQUEUE_HOST_BUSY;
5868 } 5868 }
5869 5869
5870 ipr_send_command(ipr_cmd); 5870 ipr_send_command(ipr_cmd);
5871 return 0; 5871 return 0;
5872 } 5872 }
5873 5873
5874 static DEF_SCSI_QCMD(ipr_queuecommand) 5874 static DEF_SCSI_QCMD(ipr_queuecommand)
5875 5875
5876 /** 5876 /**
5877 * ipr_ioctl - IOCTL handler 5877 * ipr_ioctl - IOCTL handler
5878 * @sdev: scsi device struct 5878 * @sdev: scsi device struct
5879 * @cmd: IOCTL cmd 5879 * @cmd: IOCTL cmd
5880 * @arg: IOCTL arg 5880 * @arg: IOCTL arg
5881 * 5881 *
5882 * Return value: 5882 * Return value:
5883 * 0 on success / other on failure 5883 * 0 on success / other on failure
5884 **/ 5884 **/
5885 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 5885 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5886 { 5886 {
5887 struct ipr_resource_entry *res; 5887 struct ipr_resource_entry *res;
5888 5888
5889 res = (struct ipr_resource_entry *)sdev->hostdata; 5889 res = (struct ipr_resource_entry *)sdev->hostdata;
5890 if (res && ipr_is_gata(res)) { 5890 if (res && ipr_is_gata(res)) {
5891 if (cmd == HDIO_GET_IDENTITY) 5891 if (cmd == HDIO_GET_IDENTITY)
5892 return -ENOTTY; 5892 return -ENOTTY;
5893 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg); 5893 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5894 } 5894 }
5895 5895
5896 return -EINVAL; 5896 return -EINVAL;
5897 } 5897 }
5898 5898
5899 /** 5899 /**
5900 * ipr_info - Get information about the card/driver 5900 * ipr_info - Get information about the card/driver
5901 * @scsi_host: scsi host struct 5901 * @scsi_host: scsi host struct
5902 * 5902 *
5903 * Return value: 5903 * Return value:
5904 * pointer to buffer with description string 5904 * pointer to buffer with description string
5905 **/ 5905 **/
5906 static const char * ipr_ioa_info(struct Scsi_Host *host) 5906 static const char * ipr_ioa_info(struct Scsi_Host *host)
5907 { 5907 {
5908 static char buffer[512]; 5908 static char buffer[512];
5909 struct ipr_ioa_cfg *ioa_cfg; 5909 struct ipr_ioa_cfg *ioa_cfg;
5910 unsigned long lock_flags = 0; 5910 unsigned long lock_flags = 0;
5911 5911
5912 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; 5912 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5913 5913
5914 spin_lock_irqsave(host->host_lock, lock_flags); 5914 spin_lock_irqsave(host->host_lock, lock_flags);
5915 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); 5915 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5916 spin_unlock_irqrestore(host->host_lock, lock_flags); 5916 spin_unlock_irqrestore(host->host_lock, lock_flags);
5917 5917
5918 return buffer; 5918 return buffer;
5919 } 5919 }
5920 5920
5921 static struct scsi_host_template driver_template = { 5921 static struct scsi_host_template driver_template = {
5922 .module = THIS_MODULE, 5922 .module = THIS_MODULE,
5923 .name = "IPR", 5923 .name = "IPR",
5924 .info = ipr_ioa_info, 5924 .info = ipr_ioa_info,
5925 .ioctl = ipr_ioctl, 5925 .ioctl = ipr_ioctl,
5926 .queuecommand = ipr_queuecommand, 5926 .queuecommand = ipr_queuecommand,
5927 .eh_abort_handler = ipr_eh_abort, 5927 .eh_abort_handler = ipr_eh_abort,
5928 .eh_device_reset_handler = ipr_eh_dev_reset, 5928 .eh_device_reset_handler = ipr_eh_dev_reset,
5929 .eh_host_reset_handler = ipr_eh_host_reset, 5929 .eh_host_reset_handler = ipr_eh_host_reset,
5930 .slave_alloc = ipr_slave_alloc, 5930 .slave_alloc = ipr_slave_alloc,
5931 .slave_configure = ipr_slave_configure, 5931 .slave_configure = ipr_slave_configure,
5932 .slave_destroy = ipr_slave_destroy, 5932 .slave_destroy = ipr_slave_destroy,
5933 .target_alloc = ipr_target_alloc, 5933 .target_alloc = ipr_target_alloc,
5934 .target_destroy = ipr_target_destroy, 5934 .target_destroy = ipr_target_destroy,
5935 .change_queue_depth = ipr_change_queue_depth, 5935 .change_queue_depth = ipr_change_queue_depth,
5936 .change_queue_type = ipr_change_queue_type, 5936 .change_queue_type = ipr_change_queue_type,
5937 .bios_param = ipr_biosparam, 5937 .bios_param = ipr_biosparam,
5938 .can_queue = IPR_MAX_COMMANDS, 5938 .can_queue = IPR_MAX_COMMANDS,
5939 .this_id = -1, 5939 .this_id = -1,
5940 .sg_tablesize = IPR_MAX_SGLIST, 5940 .sg_tablesize = IPR_MAX_SGLIST,
5941 .max_sectors = IPR_IOA_MAX_SECTORS, 5941 .max_sectors = IPR_IOA_MAX_SECTORS,
5942 .cmd_per_lun = IPR_MAX_CMD_PER_LUN, 5942 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5943 .use_clustering = ENABLE_CLUSTERING, 5943 .use_clustering = ENABLE_CLUSTERING,
5944 .shost_attrs = ipr_ioa_attrs, 5944 .shost_attrs = ipr_ioa_attrs,
5945 .sdev_attrs = ipr_dev_attrs, 5945 .sdev_attrs = ipr_dev_attrs,
5946 .proc_name = IPR_NAME 5946 .proc_name = IPR_NAME
5947 }; 5947 };
5948 5948
5949 /** 5949 /**
5950 * ipr_ata_phy_reset - libata phy_reset handler 5950 * ipr_ata_phy_reset - libata phy_reset handler
5951 * @ap: ata port to reset 5951 * @ap: ata port to reset
5952 * 5952 *
5953 **/ 5953 **/
5954 static void ipr_ata_phy_reset(struct ata_port *ap) 5954 static void ipr_ata_phy_reset(struct ata_port *ap)
5955 { 5955 {
5956 unsigned long flags; 5956 unsigned long flags;
5957 struct ipr_sata_port *sata_port = ap->private_data; 5957 struct ipr_sata_port *sata_port = ap->private_data;
5958 struct ipr_resource_entry *res = sata_port->res; 5958 struct ipr_resource_entry *res = sata_port->res;
5959 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 5959 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5960 int rc; 5960 int rc;
5961 5961
5962 ENTER; 5962 ENTER;
5963 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 5963 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5964 while(ioa_cfg->in_reset_reload) { 5964 while(ioa_cfg->in_reset_reload) {
5965 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 5965 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5966 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 5966 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5967 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 5967 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5968 } 5968 }
5969 5969
5970 if (!ioa_cfg->allow_cmds) 5970 if (!ioa_cfg->allow_cmds)
5971 goto out_unlock; 5971 goto out_unlock;
5972 5972
5973 rc = ipr_device_reset(ioa_cfg, res); 5973 rc = ipr_device_reset(ioa_cfg, res);
5974 5974
5975 if (rc) { 5975 if (rc) {
5976 ap->link.device[0].class = ATA_DEV_NONE; 5976 ap->link.device[0].class = ATA_DEV_NONE;
5977 goto out_unlock; 5977 goto out_unlock;
5978 } 5978 }
5979 5979
5980 ap->link.device[0].class = res->ata_class; 5980 ap->link.device[0].class = res->ata_class;
5981 if (ap->link.device[0].class == ATA_DEV_UNKNOWN) 5981 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5982 ap->link.device[0].class = ATA_DEV_NONE; 5982 ap->link.device[0].class = ATA_DEV_NONE;
5983 5983
5984 out_unlock: 5984 out_unlock:
5985 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 5985 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5986 LEAVE; 5986 LEAVE;
5987 } 5987 }
5988 5988
5989 /** 5989 /**
5990 * ipr_ata_post_internal - Cleanup after an internal command 5990 * ipr_ata_post_internal - Cleanup after an internal command
5991 * @qc: ATA queued command 5991 * @qc: ATA queued command
5992 * 5992 *
5993 * Return value: 5993 * Return value:
5994 * none 5994 * none
5995 **/ 5995 **/
5996 static void ipr_ata_post_internal(struct ata_queued_cmd *qc) 5996 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5997 { 5997 {
5998 struct ipr_sata_port *sata_port = qc->ap->private_data; 5998 struct ipr_sata_port *sata_port = qc->ap->private_data;
5999 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 5999 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6000 struct ipr_cmnd *ipr_cmd; 6000 struct ipr_cmnd *ipr_cmd;
6001 unsigned long flags; 6001 unsigned long flags;
6002 6002
6003 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6003 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6004 while(ioa_cfg->in_reset_reload) { 6004 while(ioa_cfg->in_reset_reload) {
6005 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6005 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6006 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 6006 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6007 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6007 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6008 } 6008 }
6009 6009
6010 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 6010 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
6011 if (ipr_cmd->qc == qc) { 6011 if (ipr_cmd->qc == qc) {
6012 ipr_device_reset(ioa_cfg, sata_port->res); 6012 ipr_device_reset(ioa_cfg, sata_port->res);
6013 break; 6013 break;
6014 } 6014 }
6015 } 6015 }
6016 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6016 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6017 } 6017 }
6018 6018
6019 /** 6019 /**
6020 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure 6020 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6021 * @regs: destination 6021 * @regs: destination
6022 * @tf: source ATA taskfile 6022 * @tf: source ATA taskfile
6023 * 6023 *
6024 * Return value: 6024 * Return value:
6025 * none 6025 * none
6026 **/ 6026 **/
6027 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs, 6027 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6028 struct ata_taskfile *tf) 6028 struct ata_taskfile *tf)
6029 { 6029 {
6030 regs->feature = tf->feature; 6030 regs->feature = tf->feature;
6031 regs->nsect = tf->nsect; 6031 regs->nsect = tf->nsect;
6032 regs->lbal = tf->lbal; 6032 regs->lbal = tf->lbal;
6033 regs->lbam = tf->lbam; 6033 regs->lbam = tf->lbam;
6034 regs->lbah = tf->lbah; 6034 regs->lbah = tf->lbah;
6035 regs->device = tf->device; 6035 regs->device = tf->device;
6036 regs->command = tf->command; 6036 regs->command = tf->command;
6037 regs->hob_feature = tf->hob_feature; 6037 regs->hob_feature = tf->hob_feature;
6038 regs->hob_nsect = tf->hob_nsect; 6038 regs->hob_nsect = tf->hob_nsect;
6039 regs->hob_lbal = tf->hob_lbal; 6039 regs->hob_lbal = tf->hob_lbal;
6040 regs->hob_lbam = tf->hob_lbam; 6040 regs->hob_lbam = tf->hob_lbam;
6041 regs->hob_lbah = tf->hob_lbah; 6041 regs->hob_lbah = tf->hob_lbah;
6042 regs->ctl = tf->ctl; 6042 regs->ctl = tf->ctl;
6043 } 6043 }
6044 6044
6045 /** 6045 /**
6046 * ipr_sata_done - done function for SATA commands 6046 * ipr_sata_done - done function for SATA commands
6047 * @ipr_cmd: ipr command struct 6047 * @ipr_cmd: ipr command struct
6048 * 6048 *
6049 * This function is invoked by the interrupt handler for 6049 * This function is invoked by the interrupt handler for
6050 * ops generated by the SCSI mid-layer to SATA devices 6050 * ops generated by the SCSI mid-layer to SATA devices
6051 * 6051 *
6052 * Return value: 6052 * Return value:
6053 * none 6053 * none
6054 **/ 6054 **/
6055 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) 6055 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6056 { 6056 {
6057 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6057 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6058 struct ata_queued_cmd *qc = ipr_cmd->qc; 6058 struct ata_queued_cmd *qc = ipr_cmd->qc;
6059 struct ipr_sata_port *sata_port = qc->ap->private_data; 6059 struct ipr_sata_port *sata_port = qc->ap->private_data;
6060 struct ipr_resource_entry *res = sata_port->res; 6060 struct ipr_resource_entry *res = sata_port->res;
6061 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6061 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6062 6062
6063 if (ipr_cmd->ioa_cfg->sis64) 6063 if (ipr_cmd->ioa_cfg->sis64)
6064 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 6064 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6065 sizeof(struct ipr_ioasa_gata)); 6065 sizeof(struct ipr_ioasa_gata));
6066 else 6066 else
6067 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, 6067 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6068 sizeof(struct ipr_ioasa_gata)); 6068 sizeof(struct ipr_ioasa_gata));
6069 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 6069 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6070 6070
6071 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 6071 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6072 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); 6072 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6073 6073
6074 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 6074 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6075 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); 6075 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6076 else 6076 else
6077 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); 6077 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6078 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6078 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6079 ata_qc_complete(qc); 6079 ata_qc_complete(qc);
6080 } 6080 }
6081 6081
6082 /** 6082 /**
6083 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list 6083 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6084 * @ipr_cmd: ipr command struct 6084 * @ipr_cmd: ipr command struct
6085 * @qc: ATA queued command 6085 * @qc: ATA queued command
6086 * 6086 *
6087 **/ 6087 **/
6088 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, 6088 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6089 struct ata_queued_cmd *qc) 6089 struct ata_queued_cmd *qc)
6090 { 6090 {
6091 u32 ioadl_flags = 0; 6091 u32 ioadl_flags = 0;
6092 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6092 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6093 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 6093 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6094 struct ipr_ioadl64_desc *last_ioadl64 = NULL; 6094 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6095 int len = qc->nbytes; 6095 int len = qc->nbytes;
6096 struct scatterlist *sg; 6096 struct scatterlist *sg;
6097 unsigned int si; 6097 unsigned int si;
6098 dma_addr_t dma_addr = ipr_cmd->dma_addr; 6098 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6099 6099
6100 if (len == 0) 6100 if (len == 0)
6101 return; 6101 return;
6102 6102
6103 if (qc->dma_dir == DMA_TO_DEVICE) { 6103 if (qc->dma_dir == DMA_TO_DEVICE) {
6104 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6104 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6105 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6105 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6106 } else if (qc->dma_dir == DMA_FROM_DEVICE) 6106 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6107 ioadl_flags = IPR_IOADL_FLAGS_READ; 6107 ioadl_flags = IPR_IOADL_FLAGS_READ;
6108 6108
6109 ioarcb->data_transfer_length = cpu_to_be32(len); 6109 ioarcb->data_transfer_length = cpu_to_be32(len);
6110 ioarcb->ioadl_len = 6110 ioarcb->ioadl_len =
6111 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 6111 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6112 ioarcb->u.sis64_addr_data.data_ioadl_addr = 6112 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6113 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); 6113 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6114 6114
6115 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6115 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6116 ioadl64->flags = cpu_to_be32(ioadl_flags); 6116 ioadl64->flags = cpu_to_be32(ioadl_flags);
6117 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); 6117 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6118 ioadl64->address = cpu_to_be64(sg_dma_address(sg)); 6118 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6119 6119
6120 last_ioadl64 = ioadl64; 6120 last_ioadl64 = ioadl64;
6121 ioadl64++; 6121 ioadl64++;
6122 } 6122 }
6123 6123
6124 if (likely(last_ioadl64)) 6124 if (likely(last_ioadl64))
6125 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6125 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6126 } 6126 }
6127 6127
6128 /** 6128 /**
6129 * ipr_build_ata_ioadl - Build an ATA scatter/gather list 6129 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6130 * @ipr_cmd: ipr command struct 6130 * @ipr_cmd: ipr command struct
6131 * @qc: ATA queued command 6131 * @qc: ATA queued command
6132 * 6132 *
6133 **/ 6133 **/
6134 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, 6134 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6135 struct ata_queued_cmd *qc) 6135 struct ata_queued_cmd *qc)
6136 { 6136 {
6137 u32 ioadl_flags = 0; 6137 u32 ioadl_flags = 0;
6138 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6138 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6139 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 6139 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6140 struct ipr_ioadl_desc *last_ioadl = NULL; 6140 struct ipr_ioadl_desc *last_ioadl = NULL;
6141 int len = qc->nbytes; 6141 int len = qc->nbytes;
6142 struct scatterlist *sg; 6142 struct scatterlist *sg;
6143 unsigned int si; 6143 unsigned int si;
6144 6144
6145 if (len == 0) 6145 if (len == 0)
6146 return; 6146 return;
6147 6147
6148 if (qc->dma_dir == DMA_TO_DEVICE) { 6148 if (qc->dma_dir == DMA_TO_DEVICE) {
6149 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6149 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6150 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6150 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6151 ioarcb->data_transfer_length = cpu_to_be32(len); 6151 ioarcb->data_transfer_length = cpu_to_be32(len);
6152 ioarcb->ioadl_len = 6152 ioarcb->ioadl_len =
6153 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6153 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6154 } else if (qc->dma_dir == DMA_FROM_DEVICE) { 6154 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6155 ioadl_flags = IPR_IOADL_FLAGS_READ; 6155 ioadl_flags = IPR_IOADL_FLAGS_READ;
6156 ioarcb->read_data_transfer_length = cpu_to_be32(len); 6156 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6157 ioarcb->read_ioadl_len = 6157 ioarcb->read_ioadl_len =
6158 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6158 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6159 } 6159 }
6160 6160
6161 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6161 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6162 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); 6162 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6163 ioadl->address = cpu_to_be32(sg_dma_address(sg)); 6163 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6164 6164
6165 last_ioadl = ioadl; 6165 last_ioadl = ioadl;
6166 ioadl++; 6166 ioadl++;
6167 } 6167 }
6168 6168
6169 if (likely(last_ioadl)) 6169 if (likely(last_ioadl))
6170 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6170 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6171 } 6171 }
6172 6172
6173 /** 6173 /**
6174 * ipr_qc_issue - Issue a SATA qc to a device 6174 * ipr_qc_issue - Issue a SATA qc to a device
6175 * @qc: queued command 6175 * @qc: queued command
6176 * 6176 *
6177 * Return value: 6177 * Return value:
6178 * 0 if success 6178 * 0 if success
6179 **/ 6179 **/
6180 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) 6180 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6181 { 6181 {
6182 struct ata_port *ap = qc->ap; 6182 struct ata_port *ap = qc->ap;
6183 struct ipr_sata_port *sata_port = ap->private_data; 6183 struct ipr_sata_port *sata_port = ap->private_data;
6184 struct ipr_resource_entry *res = sata_port->res; 6184 struct ipr_resource_entry *res = sata_port->res;
6185 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6185 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6186 struct ipr_cmnd *ipr_cmd; 6186 struct ipr_cmnd *ipr_cmd;
6187 struct ipr_ioarcb *ioarcb; 6187 struct ipr_ioarcb *ioarcb;
6188 struct ipr_ioarcb_ata_regs *regs; 6188 struct ipr_ioarcb_ata_regs *regs;
6189 6189
6190 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead)) 6190 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6191 return AC_ERR_SYSTEM; 6191 return AC_ERR_SYSTEM;
6192 6192
6193 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 6193 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6194 ioarcb = &ipr_cmd->ioarcb; 6194 ioarcb = &ipr_cmd->ioarcb;
6195 6195
6196 if (ioa_cfg->sis64) { 6196 if (ioa_cfg->sis64) {
6197 regs = &ipr_cmd->i.ata_ioadl.regs; 6197 regs = &ipr_cmd->i.ata_ioadl.regs;
6198 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 6198 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6199 } else 6199 } else
6200 regs = &ioarcb->u.add_data.u.regs; 6200 regs = &ioarcb->u.add_data.u.regs;
6201 6201
6202 memset(regs, 0, sizeof(*regs)); 6202 memset(regs, 0, sizeof(*regs));
6203 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); 6203 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6204 6204
6205 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 6205 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6206 ipr_cmd->qc = qc; 6206 ipr_cmd->qc = qc;
6207 ipr_cmd->done = ipr_sata_done; 6207 ipr_cmd->done = ipr_sata_done;
6208 ipr_cmd->ioarcb.res_handle = res->res_handle; 6208 ipr_cmd->ioarcb.res_handle = res->res_handle;
6209 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 6209 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6210 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 6210 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6211 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6211 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6212 ipr_cmd->dma_use_sg = qc->n_elem; 6212 ipr_cmd->dma_use_sg = qc->n_elem;
6213 6213
6214 if (ioa_cfg->sis64) 6214 if (ioa_cfg->sis64)
6215 ipr_build_ata_ioadl64(ipr_cmd, qc); 6215 ipr_build_ata_ioadl64(ipr_cmd, qc);
6216 else 6216 else
6217 ipr_build_ata_ioadl(ipr_cmd, qc); 6217 ipr_build_ata_ioadl(ipr_cmd, qc);
6218 6218
6219 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 6219 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6220 ipr_copy_sata_tf(regs, &qc->tf); 6220 ipr_copy_sata_tf(regs, &qc->tf);
6221 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); 6221 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6222 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 6222 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6223 6223
6224 switch (qc->tf.protocol) { 6224 switch (qc->tf.protocol) {
6225 case ATA_PROT_NODATA: 6225 case ATA_PROT_NODATA:
6226 case ATA_PROT_PIO: 6226 case ATA_PROT_PIO:
6227 break; 6227 break;
6228 6228
6229 case ATA_PROT_DMA: 6229 case ATA_PROT_DMA:
6230 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; 6230 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6231 break; 6231 break;
6232 6232
6233 case ATAPI_PROT_PIO: 6233 case ATAPI_PROT_PIO:
6234 case ATAPI_PROT_NODATA: 6234 case ATAPI_PROT_NODATA:
6235 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; 6235 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6236 break; 6236 break;
6237 6237
6238 case ATAPI_PROT_DMA: 6238 case ATAPI_PROT_DMA:
6239 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; 6239 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6240 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; 6240 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6241 break; 6241 break;
6242 6242
6243 default: 6243 default:
6244 WARN_ON(1); 6244 WARN_ON(1);
6245 return AC_ERR_INVALID; 6245 return AC_ERR_INVALID;
6246 } 6246 }
6247 6247
6248 ipr_send_command(ipr_cmd); 6248 ipr_send_command(ipr_cmd);
6249 6249
6250 return 0; 6250 return 0;
6251 } 6251 }
6252 6252
6253 /** 6253 /**
6254 * ipr_qc_fill_rtf - Read result TF 6254 * ipr_qc_fill_rtf - Read result TF
6255 * @qc: ATA queued command 6255 * @qc: ATA queued command
6256 * 6256 *
6257 * Return value: 6257 * Return value:
6258 * true 6258 * true
6259 **/ 6259 **/
6260 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc) 6260 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6261 { 6261 {
6262 struct ipr_sata_port *sata_port = qc->ap->private_data; 6262 struct ipr_sata_port *sata_port = qc->ap->private_data;
6263 struct ipr_ioasa_gata *g = &sata_port->ioasa; 6263 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6264 struct ata_taskfile *tf = &qc->result_tf; 6264 struct ata_taskfile *tf = &qc->result_tf;
6265 6265
6266 tf->feature = g->error; 6266 tf->feature = g->error;
6267 tf->nsect = g->nsect; 6267 tf->nsect = g->nsect;
6268 tf->lbal = g->lbal; 6268 tf->lbal = g->lbal;
6269 tf->lbam = g->lbam; 6269 tf->lbam = g->lbam;
6270 tf->lbah = g->lbah; 6270 tf->lbah = g->lbah;
6271 tf->device = g->device; 6271 tf->device = g->device;
6272 tf->command = g->status; 6272 tf->command = g->status;
6273 tf->hob_nsect = g->hob_nsect; 6273 tf->hob_nsect = g->hob_nsect;
6274 tf->hob_lbal = g->hob_lbal; 6274 tf->hob_lbal = g->hob_lbal;
6275 tf->hob_lbam = g->hob_lbam; 6275 tf->hob_lbam = g->hob_lbam;
6276 tf->hob_lbah = g->hob_lbah; 6276 tf->hob_lbah = g->hob_lbah;
6277 tf->ctl = g->alt_status; 6277 tf->ctl = g->alt_status;
6278 6278
6279 return true; 6279 return true;
6280 } 6280 }
6281 6281
6282 static struct ata_port_operations ipr_sata_ops = { 6282 static struct ata_port_operations ipr_sata_ops = {
6283 .phy_reset = ipr_ata_phy_reset, 6283 .phy_reset = ipr_ata_phy_reset,
6284 .hardreset = ipr_sata_reset, 6284 .hardreset = ipr_sata_reset,
6285 .post_internal_cmd = ipr_ata_post_internal, 6285 .post_internal_cmd = ipr_ata_post_internal,
6286 .qc_prep = ata_noop_qc_prep, 6286 .qc_prep = ata_noop_qc_prep,
6287 .qc_issue = ipr_qc_issue, 6287 .qc_issue = ipr_qc_issue,
6288 .qc_fill_rtf = ipr_qc_fill_rtf, 6288 .qc_fill_rtf = ipr_qc_fill_rtf,
6289 .port_start = ata_sas_port_start, 6289 .port_start = ata_sas_port_start,
6290 .port_stop = ata_sas_port_stop 6290 .port_stop = ata_sas_port_stop
6291 }; 6291 };
6292 6292
6293 static struct ata_port_info sata_port_info = { 6293 static struct ata_port_info sata_port_info = {
6294 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 6294 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6295 .pio_mask = ATA_PIO4_ONLY, 6295 .pio_mask = ATA_PIO4_ONLY,
6296 .mwdma_mask = ATA_MWDMA2, 6296 .mwdma_mask = ATA_MWDMA2,
6297 .udma_mask = ATA_UDMA6, 6297 .udma_mask = ATA_UDMA6,
6298 .port_ops = &ipr_sata_ops 6298 .port_ops = &ipr_sata_ops
6299 }; 6299 };
6300 6300
6301 #ifdef CONFIG_PPC_PSERIES 6301 #ifdef CONFIG_PPC_PSERIES
6302 static const u16 ipr_blocked_processors[] = { 6302 static const u16 ipr_blocked_processors[] = {
6303 PV_NORTHSTAR, 6303 PV_NORTHSTAR,
6304 PV_PULSAR, 6304 PV_PULSAR,
6305 PV_POWER4, 6305 PV_POWER4,
6306 PV_ICESTAR, 6306 PV_ICESTAR,
6307 PV_SSTAR, 6307 PV_SSTAR,
6308 PV_POWER4p, 6308 PV_POWER4p,
6309 PV_630, 6309 PV_630,
6310 PV_630p 6310 PV_630p
6311 }; 6311 };
6312 6312
6313 /** 6313 /**
6314 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware 6314 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6315 * @ioa_cfg: ioa cfg struct 6315 * @ioa_cfg: ioa cfg struct
6316 * 6316 *
6317 * Adapters that use Gemstone revision < 3.1 do not work reliably on 6317 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6318 * certain pSeries hardware. This function determines if the given 6318 * certain pSeries hardware. This function determines if the given
6319 * adapter is in one of these confgurations or not. 6319 * adapter is in one of these confgurations or not.
6320 * 6320 *
6321 * Return value: 6321 * Return value:
6322 * 1 if adapter is not supported / 0 if adapter is supported 6322 * 1 if adapter is not supported / 0 if adapter is supported
6323 **/ 6323 **/
6324 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) 6324 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6325 { 6325 {
6326 int i; 6326 int i;
6327 6327
6328 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { 6328 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6329 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){ 6329 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6330 if (__is_processor(ipr_blocked_processors[i])) 6330 if (__is_processor(ipr_blocked_processors[i]))
6331 return 1; 6331 return 1;
6332 } 6332 }
6333 } 6333 }
6334 return 0; 6334 return 0;
6335 } 6335 }
6336 #else 6336 #else
6337 #define ipr_invalid_adapter(ioa_cfg) 0 6337 #define ipr_invalid_adapter(ioa_cfg) 0
6338 #endif 6338 #endif
6339 6339
6340 /** 6340 /**
6341 * ipr_ioa_bringdown_done - IOA bring down completion. 6341 * ipr_ioa_bringdown_done - IOA bring down completion.
6342 * @ipr_cmd: ipr command struct 6342 * @ipr_cmd: ipr command struct
6343 * 6343 *
6344 * This function processes the completion of an adapter bring down. 6344 * This function processes the completion of an adapter bring down.
6345 * It wakes any reset sleepers. 6345 * It wakes any reset sleepers.
6346 * 6346 *
6347 * Return value: 6347 * Return value:
6348 * IPR_RC_JOB_RETURN 6348 * IPR_RC_JOB_RETURN
6349 **/ 6349 **/
6350 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) 6350 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6351 { 6351 {
6352 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6352 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6353 6353
6354 ENTER; 6354 ENTER;
6355 ioa_cfg->in_reset_reload = 0; 6355 ioa_cfg->in_reset_reload = 0;
6356 ioa_cfg->reset_retries = 0; 6356 ioa_cfg->reset_retries = 0;
6357 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6357 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6358 wake_up_all(&ioa_cfg->reset_wait_q); 6358 wake_up_all(&ioa_cfg->reset_wait_q);
6359 6359
6360 spin_unlock_irq(ioa_cfg->host->host_lock); 6360 spin_unlock_irq(ioa_cfg->host->host_lock);
6361 scsi_unblock_requests(ioa_cfg->host); 6361 scsi_unblock_requests(ioa_cfg->host);
6362 spin_lock_irq(ioa_cfg->host->host_lock); 6362 spin_lock_irq(ioa_cfg->host->host_lock);
6363 LEAVE; 6363 LEAVE;
6364 6364
6365 return IPR_RC_JOB_RETURN; 6365 return IPR_RC_JOB_RETURN;
6366 } 6366 }
6367 6367
6368 /** 6368 /**
6369 * ipr_ioa_reset_done - IOA reset completion. 6369 * ipr_ioa_reset_done - IOA reset completion.
6370 * @ipr_cmd: ipr command struct 6370 * @ipr_cmd: ipr command struct
6371 * 6371 *
6372 * This function processes the completion of an adapter reset. 6372 * This function processes the completion of an adapter reset.
6373 * It schedules any necessary mid-layer add/removes and 6373 * It schedules any necessary mid-layer add/removes and
6374 * wakes any reset sleepers. 6374 * wakes any reset sleepers.
6375 * 6375 *
6376 * Return value: 6376 * Return value:
6377 * IPR_RC_JOB_RETURN 6377 * IPR_RC_JOB_RETURN
6378 **/ 6378 **/
6379 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) 6379 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6380 { 6380 {
6381 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6381 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6382 struct ipr_resource_entry *res; 6382 struct ipr_resource_entry *res;
6383 struct ipr_hostrcb *hostrcb, *temp; 6383 struct ipr_hostrcb *hostrcb, *temp;
6384 int i = 0; 6384 int i = 0;
6385 6385
6386 ENTER; 6386 ENTER;
6387 ioa_cfg->in_reset_reload = 0; 6387 ioa_cfg->in_reset_reload = 0;
6388 ioa_cfg->allow_cmds = 1; 6388 ioa_cfg->allow_cmds = 1;
6389 ioa_cfg->reset_cmd = NULL; 6389 ioa_cfg->reset_cmd = NULL;
6390 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; 6390 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6391 6391
6392 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 6392 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6393 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) { 6393 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6394 ipr_trace; 6394 ipr_trace;
6395 break; 6395 break;
6396 } 6396 }
6397 } 6397 }
6398 schedule_work(&ioa_cfg->work_q); 6398 schedule_work(&ioa_cfg->work_q);
6399 6399
6400 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) { 6400 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6401 list_del(&hostrcb->queue); 6401 list_del(&hostrcb->queue);
6402 if (i++ < IPR_NUM_LOG_HCAMS) 6402 if (i++ < IPR_NUM_LOG_HCAMS)
6403 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 6403 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6404 else 6404 else
6405 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 6405 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6406 } 6406 }
6407 6407
6408 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); 6408 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6409 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); 6409 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6410 6410
6411 ioa_cfg->reset_retries = 0; 6411 ioa_cfg->reset_retries = 0;
6412 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6412 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6413 wake_up_all(&ioa_cfg->reset_wait_q); 6413 wake_up_all(&ioa_cfg->reset_wait_q);
6414 6414
6415 spin_unlock(ioa_cfg->host->host_lock); 6415 spin_unlock(ioa_cfg->host->host_lock);
6416 scsi_unblock_requests(ioa_cfg->host); 6416 scsi_unblock_requests(ioa_cfg->host);
6417 spin_lock(ioa_cfg->host->host_lock); 6417 spin_lock(ioa_cfg->host->host_lock);
6418 6418
6419 if (!ioa_cfg->allow_cmds) 6419 if (!ioa_cfg->allow_cmds)
6420 scsi_block_requests(ioa_cfg->host); 6420 scsi_block_requests(ioa_cfg->host);
6421 6421
6422 LEAVE; 6422 LEAVE;
6423 return IPR_RC_JOB_RETURN; 6423 return IPR_RC_JOB_RETURN;
6424 } 6424 }
6425 6425
6426 /** 6426 /**
6427 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer 6427 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6428 * @supported_dev: supported device struct 6428 * @supported_dev: supported device struct
6429 * @vpids: vendor product id struct 6429 * @vpids: vendor product id struct
6430 * 6430 *
6431 * Return value: 6431 * Return value:
6432 * none 6432 * none
6433 **/ 6433 **/
6434 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, 6434 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6435 struct ipr_std_inq_vpids *vpids) 6435 struct ipr_std_inq_vpids *vpids)
6436 { 6436 {
6437 memset(supported_dev, 0, sizeof(struct ipr_supported_device)); 6437 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6438 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); 6438 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6439 supported_dev->num_records = 1; 6439 supported_dev->num_records = 1;
6440 supported_dev->data_length = 6440 supported_dev->data_length =
6441 cpu_to_be16(sizeof(struct ipr_supported_device)); 6441 cpu_to_be16(sizeof(struct ipr_supported_device));
6442 supported_dev->reserved = 0; 6442 supported_dev->reserved = 0;
6443 } 6443 }
6444 6444
6445 /** 6445 /**
6446 * ipr_set_supported_devs - Send Set Supported Devices for a device 6446 * ipr_set_supported_devs - Send Set Supported Devices for a device
6447 * @ipr_cmd: ipr command struct 6447 * @ipr_cmd: ipr command struct
6448 * 6448 *
6449 * This function sends a Set Supported Devices to the adapter 6449 * This function sends a Set Supported Devices to the adapter
6450 * 6450 *
6451 * Return value: 6451 * Return value:
6452 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 6452 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6453 **/ 6453 **/
6454 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) 6454 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6455 { 6455 {
6456 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6456 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6457 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; 6457 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6458 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6458 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6459 struct ipr_resource_entry *res = ipr_cmd->u.res; 6459 struct ipr_resource_entry *res = ipr_cmd->u.res;
6460 6460
6461 ipr_cmd->job_step = ipr_ioa_reset_done; 6461 ipr_cmd->job_step = ipr_ioa_reset_done;
6462 6462
6463 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { 6463 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6464 if (!ipr_is_scsi_disk(res)) 6464 if (!ipr_is_scsi_disk(res))
6465 continue; 6465 continue;
6466 6466
6467 ipr_cmd->u.res = res; 6467 ipr_cmd->u.res = res;
6468 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); 6468 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6469 6469
6470 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6470 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6471 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6471 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6472 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6472 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6473 6473
6474 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; 6474 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6475 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; 6475 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6476 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; 6476 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6477 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; 6477 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6478 6478
6479 ipr_init_ioadl(ipr_cmd, 6479 ipr_init_ioadl(ipr_cmd,
6480 ioa_cfg->vpd_cbs_dma + 6480 ioa_cfg->vpd_cbs_dma +
6481 offsetof(struct ipr_misc_cbs, supp_dev), 6481 offsetof(struct ipr_misc_cbs, supp_dev),
6482 sizeof(struct ipr_supported_device), 6482 sizeof(struct ipr_supported_device),
6483 IPR_IOADL_FLAGS_WRITE_LAST); 6483 IPR_IOADL_FLAGS_WRITE_LAST);
6484 6484
6485 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 6485 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6486 IPR_SET_SUP_DEVICE_TIMEOUT); 6486 IPR_SET_SUP_DEVICE_TIMEOUT);
6487 6487
6488 if (!ioa_cfg->sis64) 6488 if (!ioa_cfg->sis64)
6489 ipr_cmd->job_step = ipr_set_supported_devs; 6489 ipr_cmd->job_step = ipr_set_supported_devs;
6490 return IPR_RC_JOB_RETURN; 6490 return IPR_RC_JOB_RETURN;
6491 } 6491 }
6492 6492
6493 return IPR_RC_JOB_CONTINUE; 6493 return IPR_RC_JOB_CONTINUE;
6494 } 6494 }
6495 6495
6496 /** 6496 /**
6497 * ipr_get_mode_page - Locate specified mode page 6497 * ipr_get_mode_page - Locate specified mode page
6498 * @mode_pages: mode page buffer 6498 * @mode_pages: mode page buffer
6499 * @page_code: page code to find 6499 * @page_code: page code to find
6500 * @len: minimum required length for mode page 6500 * @len: minimum required length for mode page
6501 * 6501 *
6502 * Return value: 6502 * Return value:
6503 * pointer to mode page / NULL on failure 6503 * pointer to mode page / NULL on failure
6504 **/ 6504 **/
6505 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages, 6505 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6506 u32 page_code, u32 len) 6506 u32 page_code, u32 len)
6507 { 6507 {
6508 struct ipr_mode_page_hdr *mode_hdr; 6508 struct ipr_mode_page_hdr *mode_hdr;
6509 u32 page_length; 6509 u32 page_length;
6510 u32 length; 6510 u32 length;
6511 6511
6512 if (!mode_pages || (mode_pages->hdr.length == 0)) 6512 if (!mode_pages || (mode_pages->hdr.length == 0))
6513 return NULL; 6513 return NULL;
6514 6514
6515 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; 6515 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6516 mode_hdr = (struct ipr_mode_page_hdr *) 6516 mode_hdr = (struct ipr_mode_page_hdr *)
6517 (mode_pages->data + mode_pages->hdr.block_desc_len); 6517 (mode_pages->data + mode_pages->hdr.block_desc_len);
6518 6518
6519 while (length) { 6519 while (length) {
6520 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) { 6520 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6521 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) 6521 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6522 return mode_hdr; 6522 return mode_hdr;
6523 break; 6523 break;
6524 } else { 6524 } else {
6525 page_length = (sizeof(struct ipr_mode_page_hdr) + 6525 page_length = (sizeof(struct ipr_mode_page_hdr) +
6526 mode_hdr->page_length); 6526 mode_hdr->page_length);
6527 length -= page_length; 6527 length -= page_length;
6528 mode_hdr = (struct ipr_mode_page_hdr *) 6528 mode_hdr = (struct ipr_mode_page_hdr *)
6529 ((unsigned long)mode_hdr + page_length); 6529 ((unsigned long)mode_hdr + page_length);
6530 } 6530 }
6531 } 6531 }
6532 return NULL; 6532 return NULL;
6533 } 6533 }
6534 6534
6535 /** 6535 /**
6536 * ipr_check_term_power - Check for term power errors 6536 * ipr_check_term_power - Check for term power errors
6537 * @ioa_cfg: ioa config struct 6537 * @ioa_cfg: ioa config struct
6538 * @mode_pages: IOAFP mode pages buffer 6538 * @mode_pages: IOAFP mode pages buffer
6539 * 6539 *
6540 * Check the IOAFP's mode page 28 for term power errors 6540 * Check the IOAFP's mode page 28 for term power errors
6541 * 6541 *
6542 * Return value: 6542 * Return value:
6543 * nothing 6543 * nothing
6544 **/ 6544 **/
6545 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, 6545 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6546 struct ipr_mode_pages *mode_pages) 6546 struct ipr_mode_pages *mode_pages)
6547 { 6547 {
6548 int i; 6548 int i;
6549 int entry_length; 6549 int entry_length;
6550 struct ipr_dev_bus_entry *bus; 6550 struct ipr_dev_bus_entry *bus;
6551 struct ipr_mode_page28 *mode_page; 6551 struct ipr_mode_page28 *mode_page;
6552 6552
6553 mode_page = ipr_get_mode_page(mode_pages, 0x28, 6553 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6554 sizeof(struct ipr_mode_page28)); 6554 sizeof(struct ipr_mode_page28));
6555 6555
6556 entry_length = mode_page->entry_length; 6556 entry_length = mode_page->entry_length;
6557 6557
6558 bus = mode_page->bus; 6558 bus = mode_page->bus;
6559 6559
6560 for (i = 0; i < mode_page->num_entries; i++) { 6560 for (i = 0; i < mode_page->num_entries; i++) {
6561 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { 6561 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6562 dev_err(&ioa_cfg->pdev->dev, 6562 dev_err(&ioa_cfg->pdev->dev,
6563 "Term power is absent on scsi bus %d\n", 6563 "Term power is absent on scsi bus %d\n",
6564 bus->res_addr.bus); 6564 bus->res_addr.bus);
6565 } 6565 }
6566 6566
6567 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length); 6567 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6568 } 6568 }
6569 } 6569 }
6570 6570
6571 /** 6571 /**
6572 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table 6572 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6573 * @ioa_cfg: ioa config struct 6573 * @ioa_cfg: ioa config struct
6574 * 6574 *
6575 * Looks through the config table checking for SES devices. If 6575 * Looks through the config table checking for SES devices. If
6576 * the SES device is in the SES table indicating a maximum SCSI 6576 * the SES device is in the SES table indicating a maximum SCSI
6577 * bus speed, the speed is limited for the bus. 6577 * bus speed, the speed is limited for the bus.
6578 * 6578 *
6579 * Return value: 6579 * Return value:
6580 * none 6580 * none
6581 **/ 6581 **/
6582 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) 6582 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6583 { 6583 {
6584 u32 max_xfer_rate; 6584 u32 max_xfer_rate;
6585 int i; 6585 int i;
6586 6586
6587 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { 6587 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6588 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, 6588 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6589 ioa_cfg->bus_attr[i].bus_width); 6589 ioa_cfg->bus_attr[i].bus_width);
6590 6590
6591 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) 6591 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6592 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; 6592 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6593 } 6593 }
6594 } 6594 }
6595 6595
6596 /** 6596 /**
6597 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28 6597 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6598 * @ioa_cfg: ioa config struct 6598 * @ioa_cfg: ioa config struct
6599 * @mode_pages: mode page 28 buffer 6599 * @mode_pages: mode page 28 buffer
6600 * 6600 *
6601 * Updates mode page 28 based on driver configuration 6601 * Updates mode page 28 based on driver configuration
6602 * 6602 *
6603 * Return value: 6603 * Return value:
6604 * none 6604 * none
6605 **/ 6605 **/
6606 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, 6606 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6607 struct ipr_mode_pages *mode_pages) 6607 struct ipr_mode_pages *mode_pages)
6608 { 6608 {
6609 int i, entry_length; 6609 int i, entry_length;
6610 struct ipr_dev_bus_entry *bus; 6610 struct ipr_dev_bus_entry *bus;
6611 struct ipr_bus_attributes *bus_attr; 6611 struct ipr_bus_attributes *bus_attr;
6612 struct ipr_mode_page28 *mode_page; 6612 struct ipr_mode_page28 *mode_page;
6613 6613
6614 mode_page = ipr_get_mode_page(mode_pages, 0x28, 6614 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6615 sizeof(struct ipr_mode_page28)); 6615 sizeof(struct ipr_mode_page28));
6616 6616
6617 entry_length = mode_page->entry_length; 6617 entry_length = mode_page->entry_length;
6618 6618
6619 /* Loop for each device bus entry */ 6619 /* Loop for each device bus entry */
6620 for (i = 0, bus = mode_page->bus; 6620 for (i = 0, bus = mode_page->bus;
6621 i < mode_page->num_entries; 6621 i < mode_page->num_entries;
6622 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) { 6622 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6623 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { 6623 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6624 dev_err(&ioa_cfg->pdev->dev, 6624 dev_err(&ioa_cfg->pdev->dev,
6625 "Invalid resource address reported: 0x%08X\n", 6625 "Invalid resource address reported: 0x%08X\n",
6626 IPR_GET_PHYS_LOC(bus->res_addr)); 6626 IPR_GET_PHYS_LOC(bus->res_addr));
6627 continue; 6627 continue;
6628 } 6628 }
6629 6629
6630 bus_attr = &ioa_cfg->bus_attr[i]; 6630 bus_attr = &ioa_cfg->bus_attr[i];
6631 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; 6631 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6632 bus->bus_width = bus_attr->bus_width; 6632 bus->bus_width = bus_attr->bus_width;
6633 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); 6633 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6634 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; 6634 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6635 if (bus_attr->qas_enabled) 6635 if (bus_attr->qas_enabled)
6636 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; 6636 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6637 else 6637 else
6638 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; 6638 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6639 } 6639 }
6640 } 6640 }
6641 6641
6642 /** 6642 /**
6643 * ipr_build_mode_select - Build a mode select command 6643 * ipr_build_mode_select - Build a mode select command
6644 * @ipr_cmd: ipr command struct 6644 * @ipr_cmd: ipr command struct
6645 * @res_handle: resource handle to send command to 6645 * @res_handle: resource handle to send command to
6646 * @parm: Byte 2 of Mode Sense command 6646 * @parm: Byte 2 of Mode Sense command
6647 * @dma_addr: DMA buffer address 6647 * @dma_addr: DMA buffer address
6648 * @xfer_len: data transfer length 6648 * @xfer_len: data transfer length
6649 * 6649 *
6650 * Return value: 6650 * Return value:
6651 * none 6651 * none
6652 **/ 6652 **/
6653 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, 6653 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6654 __be32 res_handle, u8 parm, 6654 __be32 res_handle, u8 parm,
6655 dma_addr_t dma_addr, u8 xfer_len) 6655 dma_addr_t dma_addr, u8 xfer_len)
6656 { 6656 {
6657 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6657 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6658 6658
6659 ioarcb->res_handle = res_handle; 6659 ioarcb->res_handle = res_handle;
6660 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 6660 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6661 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6661 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6662 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; 6662 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6663 ioarcb->cmd_pkt.cdb[1] = parm; 6663 ioarcb->cmd_pkt.cdb[1] = parm;
6664 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6664 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6665 6665
6666 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST); 6666 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6667 } 6667 }
6668 6668
6669 /** 6669 /**
6670 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA 6670 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6671 * @ipr_cmd: ipr command struct 6671 * @ipr_cmd: ipr command struct
6672 * 6672 *
6673 * This function sets up the SCSI bus attributes and sends 6673 * This function sets up the SCSI bus attributes and sends
6674 * a Mode Select for Page 28 to activate them. 6674 * a Mode Select for Page 28 to activate them.
6675 * 6675 *
6676 * Return value: 6676 * Return value:
6677 * IPR_RC_JOB_RETURN 6677 * IPR_RC_JOB_RETURN
6678 **/ 6678 **/
6679 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) 6679 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6680 { 6680 {
6681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6682 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; 6682 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6683 int length; 6683 int length;
6684 6684
6685 ENTER; 6685 ENTER;
6686 ipr_scsi_bus_speed_limit(ioa_cfg); 6686 ipr_scsi_bus_speed_limit(ioa_cfg);
6687 ipr_check_term_power(ioa_cfg, mode_pages); 6687 ipr_check_term_power(ioa_cfg, mode_pages);
6688 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); 6688 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6689 length = mode_pages->hdr.length + 1; 6689 length = mode_pages->hdr.length + 1;
6690 mode_pages->hdr.length = 0; 6690 mode_pages->hdr.length = 0;
6691 6691
6692 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 6692 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6693 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 6693 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6694 length); 6694 length);
6695 6695
6696 ipr_cmd->job_step = ipr_set_supported_devs; 6696 ipr_cmd->job_step = ipr_set_supported_devs;
6697 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 6697 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6698 struct ipr_resource_entry, queue); 6698 struct ipr_resource_entry, queue);
6699 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6699 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6700 6700
6701 LEAVE; 6701 LEAVE;
6702 return IPR_RC_JOB_RETURN; 6702 return IPR_RC_JOB_RETURN;
6703 } 6703 }
6704 6704
6705 /** 6705 /**
6706 * ipr_build_mode_sense - Builds a mode sense command 6706 * ipr_build_mode_sense - Builds a mode sense command
6707 * @ipr_cmd: ipr command struct 6707 * @ipr_cmd: ipr command struct
6708 * @res: resource entry struct 6708 * @res: resource entry struct
6709 * @parm: Byte 2 of mode sense command 6709 * @parm: Byte 2 of mode sense command
6710 * @dma_addr: DMA address of mode sense buffer 6710 * @dma_addr: DMA address of mode sense buffer
6711 * @xfer_len: Size of DMA buffer 6711 * @xfer_len: Size of DMA buffer
6712 * 6712 *
6713 * Return value: 6713 * Return value:
6714 * none 6714 * none
6715 **/ 6715 **/
6716 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, 6716 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6717 __be32 res_handle, 6717 __be32 res_handle,
6718 u8 parm, dma_addr_t dma_addr, u8 xfer_len) 6718 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6719 { 6719 {
6720 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6720 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6721 6721
6722 ioarcb->res_handle = res_handle; 6722 ioarcb->res_handle = res_handle;
6723 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; 6723 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6724 ioarcb->cmd_pkt.cdb[2] = parm; 6724 ioarcb->cmd_pkt.cdb[2] = parm;
6725 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6725 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6726 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 6726 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6727 6727
6728 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 6728 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6729 } 6729 }
6730 6730
6731 /** 6731 /**
6732 * ipr_reset_cmd_failed - Handle failure of IOA reset command 6732 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6733 * @ipr_cmd: ipr command struct 6733 * @ipr_cmd: ipr command struct
6734 * 6734 *
6735 * This function handles the failure of an IOA bringup command. 6735 * This function handles the failure of an IOA bringup command.
6736 * 6736 *
6737 * Return value: 6737 * Return value:
6738 * IPR_RC_JOB_RETURN 6738 * IPR_RC_JOB_RETURN
6739 **/ 6739 **/
6740 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) 6740 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6741 { 6741 {
6742 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6742 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6743 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6743 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6744 6744
6745 dev_err(&ioa_cfg->pdev->dev, 6745 dev_err(&ioa_cfg->pdev->dev,
6746 "0x%02X failed with IOASC: 0x%08X\n", 6746 "0x%02X failed with IOASC: 0x%08X\n",
6747 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); 6747 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6748 6748
6749 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 6749 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6750 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6750 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6751 return IPR_RC_JOB_RETURN; 6751 return IPR_RC_JOB_RETURN;
6752 } 6752 }
6753 6753
6754 /** 6754 /**
6755 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense 6755 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6756 * @ipr_cmd: ipr command struct 6756 * @ipr_cmd: ipr command struct
6757 * 6757 *
6758 * This function handles the failure of a Mode Sense to the IOAFP. 6758 * This function handles the failure of a Mode Sense to the IOAFP.
6759 * Some adapters do not handle all mode pages. 6759 * Some adapters do not handle all mode pages.
6760 * 6760 *
6761 * Return value: 6761 * Return value:
6762 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 6762 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6763 **/ 6763 **/
6764 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 6764 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6765 { 6765 {
6766 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6766 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6767 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6767 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6768 6768
6769 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 6769 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6770 ipr_cmd->job_step = ipr_set_supported_devs; 6770 ipr_cmd->job_step = ipr_set_supported_devs;
6771 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 6771 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6772 struct ipr_resource_entry, queue); 6772 struct ipr_resource_entry, queue);
6773 return IPR_RC_JOB_CONTINUE; 6773 return IPR_RC_JOB_CONTINUE;
6774 } 6774 }
6775 6775
6776 return ipr_reset_cmd_failed(ipr_cmd); 6776 return ipr_reset_cmd_failed(ipr_cmd);
6777 } 6777 }
6778 6778
6779 /** 6779 /**
6780 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA 6780 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6781 * @ipr_cmd: ipr command struct 6781 * @ipr_cmd: ipr command struct
6782 * 6782 *
6783 * This function send a Page 28 mode sense to the IOA to 6783 * This function send a Page 28 mode sense to the IOA to
6784 * retrieve SCSI bus attributes. 6784 * retrieve SCSI bus attributes.
6785 * 6785 *
6786 * Return value: 6786 * Return value:
6787 * IPR_RC_JOB_RETURN 6787 * IPR_RC_JOB_RETURN
6788 **/ 6788 **/
6789 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) 6789 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6790 { 6790 {
6791 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6791 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6792 6792
6793 ENTER; 6793 ENTER;
6794 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 6794 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6795 0x28, ioa_cfg->vpd_cbs_dma + 6795 0x28, ioa_cfg->vpd_cbs_dma +
6796 offsetof(struct ipr_misc_cbs, mode_pages), 6796 offsetof(struct ipr_misc_cbs, mode_pages),
6797 sizeof(struct ipr_mode_pages)); 6797 sizeof(struct ipr_mode_pages));
6798 6798
6799 ipr_cmd->job_step = ipr_ioafp_mode_select_page28; 6799 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6800 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; 6800 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6801 6801
6802 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6802 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6803 6803
6804 LEAVE; 6804 LEAVE;
6805 return IPR_RC_JOB_RETURN; 6805 return IPR_RC_JOB_RETURN;
6806 } 6806 }
6807 6807
6808 /** 6808 /**
6809 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA 6809 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6810 * @ipr_cmd: ipr command struct 6810 * @ipr_cmd: ipr command struct
6811 * 6811 *
6812 * This function enables dual IOA RAID support if possible. 6812 * This function enables dual IOA RAID support if possible.
6813 * 6813 *
6814 * Return value: 6814 * Return value:
6815 * IPR_RC_JOB_RETURN 6815 * IPR_RC_JOB_RETURN
6816 **/ 6816 **/
6817 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) 6817 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6818 { 6818 {
6819 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6819 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6820 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; 6820 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6821 struct ipr_mode_page24 *mode_page; 6821 struct ipr_mode_page24 *mode_page;
6822 int length; 6822 int length;
6823 6823
6824 ENTER; 6824 ENTER;
6825 mode_page = ipr_get_mode_page(mode_pages, 0x24, 6825 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6826 sizeof(struct ipr_mode_page24)); 6826 sizeof(struct ipr_mode_page24));
6827 6827
6828 if (mode_page) 6828 if (mode_page)
6829 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; 6829 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6830 6830
6831 length = mode_pages->hdr.length + 1; 6831 length = mode_pages->hdr.length + 1;
6832 mode_pages->hdr.length = 0; 6832 mode_pages->hdr.length = 0;
6833 6833
6834 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 6834 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6835 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 6835 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6836 length); 6836 length);
6837 6837
6838 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 6838 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6839 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6839 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6840 6840
6841 LEAVE; 6841 LEAVE;
6842 return IPR_RC_JOB_RETURN; 6842 return IPR_RC_JOB_RETURN;
6843 } 6843 }
6844 6844
6845 /** 6845 /**
6846 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense 6846 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6847 * @ipr_cmd: ipr command struct 6847 * @ipr_cmd: ipr command struct
6848 * 6848 *
6849 * This function handles the failure of a Mode Sense to the IOAFP. 6849 * This function handles the failure of a Mode Sense to the IOAFP.
6850 * Some adapters do not handle all mode pages. 6850 * Some adapters do not handle all mode pages.
6851 * 6851 *
6852 * Return value: 6852 * Return value:
6853 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 6853 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6854 **/ 6854 **/
6855 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) 6855 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6856 { 6856 {
6857 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6857 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6858 6858
6859 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 6859 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6860 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 6860 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6861 return IPR_RC_JOB_CONTINUE; 6861 return IPR_RC_JOB_CONTINUE;
6862 } 6862 }
6863 6863
6864 return ipr_reset_cmd_failed(ipr_cmd); 6864 return ipr_reset_cmd_failed(ipr_cmd);
6865 } 6865 }
6866 6866
6867 /** 6867 /**
6868 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA 6868 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6869 * @ipr_cmd: ipr command struct 6869 * @ipr_cmd: ipr command struct
6870 * 6870 *
6871 * This function send a mode sense to the IOA to retrieve 6871 * This function send a mode sense to the IOA to retrieve
6872 * the IOA Advanced Function Control mode page. 6872 * the IOA Advanced Function Control mode page.
6873 * 6873 *
6874 * Return value: 6874 * Return value:
6875 * IPR_RC_JOB_RETURN 6875 * IPR_RC_JOB_RETURN
6876 **/ 6876 **/
6877 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd) 6877 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6878 { 6878 {
6879 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6879 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6880 6880
6881 ENTER; 6881 ENTER;
6882 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 6882 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6883 0x24, ioa_cfg->vpd_cbs_dma + 6883 0x24, ioa_cfg->vpd_cbs_dma +
6884 offsetof(struct ipr_misc_cbs, mode_pages), 6884 offsetof(struct ipr_misc_cbs, mode_pages),
6885 sizeof(struct ipr_mode_pages)); 6885 sizeof(struct ipr_mode_pages));
6886 6886
6887 ipr_cmd->job_step = ipr_ioafp_mode_select_page24; 6887 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6888 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; 6888 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6889 6889
6890 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6890 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6891 6891
6892 LEAVE; 6892 LEAVE;
6893 return IPR_RC_JOB_RETURN; 6893 return IPR_RC_JOB_RETURN;
6894 } 6894 }
6895 6895
6896 /** 6896 /**
6897 * ipr_init_res_table - Initialize the resource table 6897 * ipr_init_res_table - Initialize the resource table
6898 * @ipr_cmd: ipr command struct 6898 * @ipr_cmd: ipr command struct
6899 * 6899 *
6900 * This function looks through the existing resource table, comparing 6900 * This function looks through the existing resource table, comparing
6901 * it with the config table. This function will take care of old/new 6901 * it with the config table. This function will take care of old/new
6902 * devices and schedule adding/removing them from the mid-layer 6902 * devices and schedule adding/removing them from the mid-layer
6903 * as appropriate. 6903 * as appropriate.
6904 * 6904 *
6905 * Return value: 6905 * Return value:
6906 * IPR_RC_JOB_CONTINUE 6906 * IPR_RC_JOB_CONTINUE
6907 **/ 6907 **/
6908 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) 6908 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6909 { 6909 {
6910 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6910 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6911 struct ipr_resource_entry *res, *temp; 6911 struct ipr_resource_entry *res, *temp;
6912 struct ipr_config_table_entry_wrapper cfgtew; 6912 struct ipr_config_table_entry_wrapper cfgtew;
6913 int entries, found, flag, i; 6913 int entries, found, flag, i;
6914 LIST_HEAD(old_res); 6914 LIST_HEAD(old_res);
6915 6915
6916 ENTER; 6916 ENTER;
6917 if (ioa_cfg->sis64) 6917 if (ioa_cfg->sis64)
6918 flag = ioa_cfg->u.cfg_table64->hdr64.flags; 6918 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6919 else 6919 else
6920 flag = ioa_cfg->u.cfg_table->hdr.flags; 6920 flag = ioa_cfg->u.cfg_table->hdr.flags;
6921 6921
6922 if (flag & IPR_UCODE_DOWNLOAD_REQ) 6922 if (flag & IPR_UCODE_DOWNLOAD_REQ)
6923 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); 6923 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6924 6924
6925 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) 6925 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6926 list_move_tail(&res->queue, &old_res); 6926 list_move_tail(&res->queue, &old_res);
6927 6927
6928 if (ioa_cfg->sis64) 6928 if (ioa_cfg->sis64)
6929 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); 6929 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6930 else 6930 else
6931 entries = ioa_cfg->u.cfg_table->hdr.num_entries; 6931 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6932 6932
6933 for (i = 0; i < entries; i++) { 6933 for (i = 0; i < entries; i++) {
6934 if (ioa_cfg->sis64) 6934 if (ioa_cfg->sis64)
6935 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; 6935 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6936 else 6936 else
6937 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; 6937 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6938 found = 0; 6938 found = 0;
6939 6939
6940 list_for_each_entry_safe(res, temp, &old_res, queue) { 6940 list_for_each_entry_safe(res, temp, &old_res, queue) {
6941 if (ipr_is_same_device(res, &cfgtew)) { 6941 if (ipr_is_same_device(res, &cfgtew)) {
6942 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6942 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6943 found = 1; 6943 found = 1;
6944 break; 6944 break;
6945 } 6945 }
6946 } 6946 }
6947 6947
6948 if (!found) { 6948 if (!found) {
6949 if (list_empty(&ioa_cfg->free_res_q)) { 6949 if (list_empty(&ioa_cfg->free_res_q)) {
6950 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); 6950 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6951 break; 6951 break;
6952 } 6952 }
6953 6953
6954 found = 1; 6954 found = 1;
6955 res = list_entry(ioa_cfg->free_res_q.next, 6955 res = list_entry(ioa_cfg->free_res_q.next,
6956 struct ipr_resource_entry, queue); 6956 struct ipr_resource_entry, queue);
6957 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6957 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6958 ipr_init_res_entry(res, &cfgtew); 6958 ipr_init_res_entry(res, &cfgtew);
6959 res->add_to_ml = 1; 6959 res->add_to_ml = 1;
6960 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) 6960 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
6961 res->sdev->allow_restart = 1; 6961 res->sdev->allow_restart = 1;
6962 6962
6963 if (found) 6963 if (found)
6964 ipr_update_res_entry(res, &cfgtew); 6964 ipr_update_res_entry(res, &cfgtew);
6965 } 6965 }
6966 6966
6967 list_for_each_entry_safe(res, temp, &old_res, queue) { 6967 list_for_each_entry_safe(res, temp, &old_res, queue) {
6968 if (res->sdev) { 6968 if (res->sdev) {
6969 res->del_from_ml = 1; 6969 res->del_from_ml = 1;
6970 res->res_handle = IPR_INVALID_RES_HANDLE; 6970 res->res_handle = IPR_INVALID_RES_HANDLE;
6971 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6971 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6972 } 6972 }
6973 } 6973 }
6974 6974
6975 list_for_each_entry_safe(res, temp, &old_res, queue) { 6975 list_for_each_entry_safe(res, temp, &old_res, queue) {
6976 ipr_clear_res_target(res); 6976 ipr_clear_res_target(res);
6977 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 6977 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6978 } 6978 }
6979 6979
6980 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 6980 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6981 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; 6981 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6982 else 6982 else
6983 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 6983 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6984 6984
6985 LEAVE; 6985 LEAVE;
6986 return IPR_RC_JOB_CONTINUE; 6986 return IPR_RC_JOB_CONTINUE;
6987 } 6987 }
6988 6988
6989 /** 6989 /**
6990 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter. 6990 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6991 * @ipr_cmd: ipr command struct 6991 * @ipr_cmd: ipr command struct
6992 * 6992 *
6993 * This function sends a Query IOA Configuration command 6993 * This function sends a Query IOA Configuration command
6994 * to the adapter to retrieve the IOA configuration table. 6994 * to the adapter to retrieve the IOA configuration table.
6995 * 6995 *
6996 * Return value: 6996 * Return value:
6997 * IPR_RC_JOB_RETURN 6997 * IPR_RC_JOB_RETURN
6998 **/ 6998 **/
6999 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) 6999 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7000 { 7000 {
7001 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7001 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7002 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7002 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7003 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 7003 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7004 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 7004 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7005 7005
7006 ENTER; 7006 ENTER;
7007 if (cap->cap & IPR_CAP_DUAL_IOA_RAID) 7007 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7008 ioa_cfg->dual_raid = 1; 7008 ioa_cfg->dual_raid = 1;
7009 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", 7009 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7010 ucode_vpd->major_release, ucode_vpd->card_type, 7010 ucode_vpd->major_release, ucode_vpd->card_type,
7011 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); 7011 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7012 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7012 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7013 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7013 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7014 7014
7015 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 7015 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7016 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; 7016 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7017 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; 7017 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7018 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; 7018 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7019 7019
7020 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, 7020 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7021 IPR_IOADL_FLAGS_READ_LAST); 7021 IPR_IOADL_FLAGS_READ_LAST);
7022 7022
7023 ipr_cmd->job_step = ipr_init_res_table; 7023 ipr_cmd->job_step = ipr_init_res_table;
7024 7024
7025 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7025 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7026 7026
7027 LEAVE; 7027 LEAVE;
7028 return IPR_RC_JOB_RETURN; 7028 return IPR_RC_JOB_RETURN;
7029 } 7029 }
7030 7030
7031 /** 7031 /**
7032 * ipr_ioafp_inquiry - Send an Inquiry to the adapter. 7032 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7033 * @ipr_cmd: ipr command struct 7033 * @ipr_cmd: ipr command struct
7034 * 7034 *
7035 * This utility function sends an inquiry to the adapter. 7035 * This utility function sends an inquiry to the adapter.
7036 * 7036 *
7037 * Return value: 7037 * Return value:
7038 * none 7038 * none
7039 **/ 7039 **/
7040 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, 7040 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7041 dma_addr_t dma_addr, u8 xfer_len) 7041 dma_addr_t dma_addr, u8 xfer_len)
7042 { 7042 {
7043 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7043 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7044 7044
7045 ENTER; 7045 ENTER;
7046 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7046 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7047 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7047 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7048 7048
7049 ioarcb->cmd_pkt.cdb[0] = INQUIRY; 7049 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7050 ioarcb->cmd_pkt.cdb[1] = flags; 7050 ioarcb->cmd_pkt.cdb[1] = flags;
7051 ioarcb->cmd_pkt.cdb[2] = page; 7051 ioarcb->cmd_pkt.cdb[2] = page;
7052 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7052 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7053 7053
7054 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 7054 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7055 7055
7056 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7056 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7057 LEAVE; 7057 LEAVE;
7058 } 7058 }
7059 7059
7060 /** 7060 /**
7061 * ipr_inquiry_page_supported - Is the given inquiry page supported 7061 * ipr_inquiry_page_supported - Is the given inquiry page supported
7062 * @page0: inquiry page 0 buffer 7062 * @page0: inquiry page 0 buffer
7063 * @page: page code. 7063 * @page: page code.
7064 * 7064 *
7065 * This function determines if the specified inquiry page is supported. 7065 * This function determines if the specified inquiry page is supported.
7066 * 7066 *
7067 * Return value: 7067 * Return value:
7068 * 1 if page is supported / 0 if not 7068 * 1 if page is supported / 0 if not
7069 **/ 7069 **/
7070 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) 7070 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7071 { 7071 {
7072 int i; 7072 int i;
7073 7073
7074 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) 7074 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7075 if (page0->page[i] == page) 7075 if (page0->page[i] == page)
7076 return 1; 7076 return 1;
7077 7077
7078 return 0; 7078 return 0;
7079 } 7079 }
7080 7080
7081 /** 7081 /**
7082 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter. 7082 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7083 * @ipr_cmd: ipr command struct 7083 * @ipr_cmd: ipr command struct
7084 * 7084 *
7085 * This function sends a Page 0xD0 inquiry to the adapter 7085 * This function sends a Page 0xD0 inquiry to the adapter
7086 * to retrieve adapter capabilities. 7086 * to retrieve adapter capabilities.
7087 * 7087 *
7088 * Return value: 7088 * Return value:
7089 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7089 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7090 **/ 7090 **/
7091 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd) 7091 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7092 { 7092 {
7093 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7093 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7094 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; 7094 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7095 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 7095 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7096 7096
7097 ENTER; 7097 ENTER;
7098 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; 7098 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7099 memset(cap, 0, sizeof(*cap)); 7099 memset(cap, 0, sizeof(*cap));
7100 7100
7101 if (ipr_inquiry_page_supported(page0, 0xD0)) { 7101 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7102 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0, 7102 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7103 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), 7103 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7104 sizeof(struct ipr_inquiry_cap)); 7104 sizeof(struct ipr_inquiry_cap));
7105 return IPR_RC_JOB_RETURN; 7105 return IPR_RC_JOB_RETURN;
7106 } 7106 }
7107 7107
7108 LEAVE; 7108 LEAVE;
7109 return IPR_RC_JOB_CONTINUE; 7109 return IPR_RC_JOB_CONTINUE;
7110 } 7110 }
7111 7111
7112 /** 7112 /**
7113 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. 7113 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7114 * @ipr_cmd: ipr command struct 7114 * @ipr_cmd: ipr command struct
7115 * 7115 *
7116 * This function sends a Page 3 inquiry to the adapter 7116 * This function sends a Page 3 inquiry to the adapter
7117 * to retrieve software VPD information. 7117 * to retrieve software VPD information.
7118 * 7118 *
7119 * Return value: 7119 * Return value:
7120 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7120 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7121 **/ 7121 **/
7122 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) 7122 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7123 { 7123 {
7124 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7124 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7125 7125
7126 ENTER; 7126 ENTER;
7127 7127
7128 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; 7128 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7129 7129
7130 ipr_ioafp_inquiry(ipr_cmd, 1, 3, 7130 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7131 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), 7131 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7132 sizeof(struct ipr_inquiry_page3)); 7132 sizeof(struct ipr_inquiry_page3));
7133 7133
7134 LEAVE; 7134 LEAVE;
7135 return IPR_RC_JOB_RETURN; 7135 return IPR_RC_JOB_RETURN;
7136 } 7136 }
7137 7137
7138 /** 7138 /**
7139 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter. 7139 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7140 * @ipr_cmd: ipr command struct 7140 * @ipr_cmd: ipr command struct
7141 * 7141 *
7142 * This function sends a Page 0 inquiry to the adapter 7142 * This function sends a Page 0 inquiry to the adapter
7143 * to retrieve supported inquiry pages. 7143 * to retrieve supported inquiry pages.
7144 * 7144 *
7145 * Return value: 7145 * Return value:
7146 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7146 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7147 **/ 7147 **/
7148 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd) 7148 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7149 { 7149 {
7150 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7150 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7151 char type[5]; 7151 char type[5];
7152 7152
7153 ENTER; 7153 ENTER;
7154 7154
7155 /* Grab the type out of the VPD and store it away */ 7155 /* Grab the type out of the VPD and store it away */
7156 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); 7156 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7157 type[4] = '\0'; 7157 type[4] = '\0';
7158 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); 7158 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7159 7159
7160 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; 7160 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7161 7161
7162 ipr_ioafp_inquiry(ipr_cmd, 1, 0, 7162 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7163 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), 7163 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7164 sizeof(struct ipr_inquiry_page0)); 7164 sizeof(struct ipr_inquiry_page0));
7165 7165
7166 LEAVE; 7166 LEAVE;
7167 return IPR_RC_JOB_RETURN; 7167 return IPR_RC_JOB_RETURN;
7168 } 7168 }
7169 7169
7170 /** 7170 /**
7171 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter. 7171 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7172 * @ipr_cmd: ipr command struct 7172 * @ipr_cmd: ipr command struct
7173 * 7173 *
7174 * This function sends a standard inquiry to the adapter. 7174 * This function sends a standard inquiry to the adapter.
7175 * 7175 *
7176 * Return value: 7176 * Return value:
7177 * IPR_RC_JOB_RETURN 7177 * IPR_RC_JOB_RETURN
7178 **/ 7178 **/
7179 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) 7179 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7180 { 7180 {
7181 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7181 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7182 7182
7183 ENTER; 7183 ENTER;
7184 ipr_cmd->job_step = ipr_ioafp_page0_inquiry; 7184 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7185 7185
7186 ipr_ioafp_inquiry(ipr_cmd, 0, 0, 7186 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7187 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), 7187 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7188 sizeof(struct ipr_ioa_vpd)); 7188 sizeof(struct ipr_ioa_vpd));
7189 7189
7190 LEAVE; 7190 LEAVE;
7191 return IPR_RC_JOB_RETURN; 7191 return IPR_RC_JOB_RETURN;
7192 } 7192 }
7193 7193
7194 /** 7194 /**
7195 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ. 7195 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7196 * @ipr_cmd: ipr command struct 7196 * @ipr_cmd: ipr command struct
7197 * 7197 *
7198 * This function send an Identify Host Request Response Queue 7198 * This function send an Identify Host Request Response Queue
7199 * command to establish the HRRQ with the adapter. 7199 * command to establish the HRRQ with the adapter.
7200 * 7200 *
7201 * Return value: 7201 * Return value:
7202 * IPR_RC_JOB_RETURN 7202 * IPR_RC_JOB_RETURN
7203 **/ 7203 **/
7204 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) 7204 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7205 { 7205 {
7206 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7206 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7207 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7207 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7208 7208
7209 ENTER; 7209 ENTER;
7210 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); 7210 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7211 7211
7212 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; 7212 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7213 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7213 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7214 7214
7215 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7215 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7216 if (ioa_cfg->sis64) 7216 if (ioa_cfg->sis64)
7217 ioarcb->cmd_pkt.cdb[1] = 0x1; 7217 ioarcb->cmd_pkt.cdb[1] = 0x1;
7218 ioarcb->cmd_pkt.cdb[2] = 7218 ioarcb->cmd_pkt.cdb[2] =
7219 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff; 7219 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7220 ioarcb->cmd_pkt.cdb[3] = 7220 ioarcb->cmd_pkt.cdb[3] =
7221 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff; 7221 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7222 ioarcb->cmd_pkt.cdb[4] = 7222 ioarcb->cmd_pkt.cdb[4] =
7223 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff; 7223 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7224 ioarcb->cmd_pkt.cdb[5] = 7224 ioarcb->cmd_pkt.cdb[5] =
7225 ((u64) ioa_cfg->host_rrq_dma) & 0xff; 7225 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
7226 ioarcb->cmd_pkt.cdb[7] = 7226 ioarcb->cmd_pkt.cdb[7] =
7227 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff; 7227 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7228 ioarcb->cmd_pkt.cdb[8] = 7228 ioarcb->cmd_pkt.cdb[8] =
7229 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff; 7229 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7230 7230
7231 if (ioa_cfg->sis64) { 7231 if (ioa_cfg->sis64) {
7232 ioarcb->cmd_pkt.cdb[10] = 7232 ioarcb->cmd_pkt.cdb[10] =
7233 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff; 7233 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7234 ioarcb->cmd_pkt.cdb[11] = 7234 ioarcb->cmd_pkt.cdb[11] =
7235 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff; 7235 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7236 ioarcb->cmd_pkt.cdb[12] = 7236 ioarcb->cmd_pkt.cdb[12] =
7237 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff; 7237 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7238 ioarcb->cmd_pkt.cdb[13] = 7238 ioarcb->cmd_pkt.cdb[13] =
7239 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff; 7239 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7240 } 7240 }
7241 7241
7242 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 7242 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7243 7243
7244 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7244 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7245 7245
7246 LEAVE; 7246 LEAVE;
7247 return IPR_RC_JOB_RETURN; 7247 return IPR_RC_JOB_RETURN;
7248 } 7248 }
7249 7249
7250 /** 7250 /**
7251 * ipr_reset_timer_done - Adapter reset timer function 7251 * ipr_reset_timer_done - Adapter reset timer function
7252 * @ipr_cmd: ipr command struct 7252 * @ipr_cmd: ipr command struct
7253 * 7253 *
7254 * Description: This function is used in adapter reset processing 7254 * Description: This function is used in adapter reset processing
7255 * for timing events. If the reset_cmd pointer in the IOA 7255 * for timing events. If the reset_cmd pointer in the IOA
7256 * config struct is not this adapter's we are doing nested 7256 * config struct is not this adapter's we are doing nested
7257 * resets and fail_all_ops will take care of freeing the 7257 * resets and fail_all_ops will take care of freeing the
7258 * command block. 7258 * command block.
7259 * 7259 *
7260 * Return value: 7260 * Return value:
7261 * none 7261 * none
7262 **/ 7262 **/
7263 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd) 7263 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7264 { 7264 {
7265 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7265 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7266 unsigned long lock_flags = 0; 7266 unsigned long lock_flags = 0;
7267 7267
7268 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 7268 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7269 7269
7270 if (ioa_cfg->reset_cmd == ipr_cmd) { 7270 if (ioa_cfg->reset_cmd == ipr_cmd) {
7271 list_del(&ipr_cmd->queue); 7271 list_del(&ipr_cmd->queue);
7272 ipr_cmd->done(ipr_cmd); 7272 ipr_cmd->done(ipr_cmd);
7273 } 7273 }
7274 7274
7275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 7275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7276 } 7276 }
7277 7277
7278 /** 7278 /**
7279 * ipr_reset_start_timer - Start a timer for adapter reset job 7279 * ipr_reset_start_timer - Start a timer for adapter reset job
7280 * @ipr_cmd: ipr command struct 7280 * @ipr_cmd: ipr command struct
7281 * @timeout: timeout value 7281 * @timeout: timeout value
7282 * 7282 *
7283 * Description: This function is used in adapter reset processing 7283 * Description: This function is used in adapter reset processing
7284 * for timing events. If the reset_cmd pointer in the IOA 7284 * for timing events. If the reset_cmd pointer in the IOA
7285 * config struct is not this adapter's we are doing nested 7285 * config struct is not this adapter's we are doing nested
7286 * resets and fail_all_ops will take care of freeing the 7286 * resets and fail_all_ops will take care of freeing the
7287 * command block. 7287 * command block.
7288 * 7288 *
7289 * Return value: 7289 * Return value:
7290 * none 7290 * none
7291 **/ 7291 **/
7292 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, 7292 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7293 unsigned long timeout) 7293 unsigned long timeout)
7294 { 7294 {
7295 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q); 7295 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7296 ipr_cmd->done = ipr_reset_ioa_job; 7296 ipr_cmd->done = ipr_reset_ioa_job;
7297 7297
7298 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7298 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7299 ipr_cmd->timer.expires = jiffies + timeout; 7299 ipr_cmd->timer.expires = jiffies + timeout;
7300 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done; 7300 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7301 add_timer(&ipr_cmd->timer); 7301 add_timer(&ipr_cmd->timer);
7302 } 7302 }
7303 7303
7304 /** 7304 /**
7305 * ipr_init_ioa_mem - Initialize ioa_cfg control block 7305 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7306 * @ioa_cfg: ioa cfg struct 7306 * @ioa_cfg: ioa cfg struct
7307 * 7307 *
7308 * Return value: 7308 * Return value:
7309 * nothing 7309 * nothing
7310 **/ 7310 **/
7311 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) 7311 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7312 { 7312 {
7313 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS); 7313 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7314 7314
7315 /* Initialize Host RRQ pointers */ 7315 /* Initialize Host RRQ pointers */
7316 ioa_cfg->hrrq_start = ioa_cfg->host_rrq; 7316 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7317 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1]; 7317 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7318 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start; 7318 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7319 ioa_cfg->toggle_bit = 1; 7319 ioa_cfg->toggle_bit = 1;
7320 7320
7321 /* Zero out config table */ 7321 /* Zero out config table */
7322 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); 7322 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7323 } 7323 }
7324 7324
7325 /** 7325 /**
7326 * ipr_reset_next_stage - Process IPL stage change based on feedback register. 7326 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7327 * @ipr_cmd: ipr command struct 7327 * @ipr_cmd: ipr command struct
7328 * 7328 *
7329 * Return value: 7329 * Return value:
7330 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7330 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7331 **/ 7331 **/
7332 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) 7332 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7333 { 7333 {
7334 unsigned long stage, stage_time; 7334 unsigned long stage, stage_time;
7335 u32 feedback; 7335 u32 feedback;
7336 volatile u32 int_reg; 7336 volatile u32 int_reg;
7337 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7337 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7338 u64 maskval = 0; 7338 u64 maskval = 0;
7339 7339
7340 feedback = readl(ioa_cfg->regs.init_feedback_reg); 7340 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7341 stage = feedback & IPR_IPL_INIT_STAGE_MASK; 7341 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7342 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK; 7342 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7343 7343
7344 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); 7344 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7345 7345
7346 /* sanity check the stage_time value */ 7346 /* sanity check the stage_time value */
7347 if (stage_time == 0) 7347 if (stage_time == 0)
7348 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; 7348 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7349 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) 7349 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7350 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; 7350 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7351 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) 7351 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7352 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; 7352 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7353 7353
7354 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) { 7354 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7355 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); 7355 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7356 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7356 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7357 stage_time = ioa_cfg->transop_timeout; 7357 stage_time = ioa_cfg->transop_timeout;
7358 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7358 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7359 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) { 7359 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7360 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 7360 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7361 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 7361 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7362 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7362 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7363 maskval = IPR_PCII_IPL_STAGE_CHANGE; 7363 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7364 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER; 7364 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7365 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); 7365 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7366 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7366 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7367 return IPR_RC_JOB_CONTINUE; 7367 return IPR_RC_JOB_CONTINUE;
7368 } 7368 }
7369 } 7369 }
7370 7370
7371 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7371 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7372 ipr_cmd->timer.expires = jiffies + stage_time * HZ; 7372 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7373 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 7373 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7374 ipr_cmd->done = ipr_reset_ioa_job; 7374 ipr_cmd->done = ipr_reset_ioa_job;
7375 add_timer(&ipr_cmd->timer); 7375 add_timer(&ipr_cmd->timer);
7376 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 7376 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7377 7377
7378 return IPR_RC_JOB_RETURN; 7378 return IPR_RC_JOB_RETURN;
7379 } 7379 }
7380 7380
7381 /** 7381 /**
7382 * ipr_reset_enable_ioa - Enable the IOA following a reset. 7382 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7383 * @ipr_cmd: ipr command struct 7383 * @ipr_cmd: ipr command struct
7384 * 7384 *
7385 * This function reinitializes some control blocks and 7385 * This function reinitializes some control blocks and
7386 * enables destructive diagnostics on the adapter. 7386 * enables destructive diagnostics on the adapter.
7387 * 7387 *
7388 * Return value: 7388 * Return value:
7389 * IPR_RC_JOB_RETURN 7389 * IPR_RC_JOB_RETURN
7390 **/ 7390 **/
7391 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) 7391 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7392 { 7392 {
7393 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7393 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7394 volatile u32 int_reg; 7394 volatile u32 int_reg;
7395 volatile u64 maskval; 7395 volatile u64 maskval;
7396 7396
7397 ENTER; 7397 ENTER;
7398 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7398 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7399 ipr_init_ioa_mem(ioa_cfg); 7399 ipr_init_ioa_mem(ioa_cfg);
7400 7400
7401 ioa_cfg->allow_interrupts = 1; 7401 ioa_cfg->allow_interrupts = 1;
7402 if (ioa_cfg->sis64) { 7402 if (ioa_cfg->sis64) {
7403 /* Set the adapter to the correct endian mode. */ 7403 /* Set the adapter to the correct endian mode. */
7404 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 7404 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7405 int_reg = readl(ioa_cfg->regs.endian_swap_reg); 7405 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7406 } 7406 }
7407 7407
7408 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 7408 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7409 7409
7410 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 7410 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7411 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 7411 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7412 ioa_cfg->regs.clr_interrupt_mask_reg32); 7412 ioa_cfg->regs.clr_interrupt_mask_reg32);
7413 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7413 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7414 return IPR_RC_JOB_CONTINUE; 7414 return IPR_RC_JOB_CONTINUE;
7415 } 7415 }
7416 7416
7417 /* Enable destructive diagnostics on IOA */ 7417 /* Enable destructive diagnostics on IOA */
7418 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); 7418 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7419 7419
7420 if (ioa_cfg->sis64) { 7420 if (ioa_cfg->sis64) {
7421 maskval = IPR_PCII_IPL_STAGE_CHANGE; 7421 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7422 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; 7422 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7423 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); 7423 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7424 } else 7424 } else
7425 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); 7425 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7426 7426
7427 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7427 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7428 7428
7429 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 7429 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7430 7430
7431 if (ioa_cfg->sis64) { 7431 if (ioa_cfg->sis64) {
7432 ipr_cmd->job_step = ipr_reset_next_stage; 7432 ipr_cmd->job_step = ipr_reset_next_stage;
7433 return IPR_RC_JOB_CONTINUE; 7433 return IPR_RC_JOB_CONTINUE;
7434 } 7434 }
7435 7435
7436 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7436 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7437 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); 7437 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7438 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 7438 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7439 ipr_cmd->done = ipr_reset_ioa_job; 7439 ipr_cmd->done = ipr_reset_ioa_job;
7440 add_timer(&ipr_cmd->timer); 7440 add_timer(&ipr_cmd->timer);
7441 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 7441 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7442 7442
7443 LEAVE; 7443 LEAVE;
7444 return IPR_RC_JOB_RETURN; 7444 return IPR_RC_JOB_RETURN;
7445 } 7445 }
7446 7446
7447 /** 7447 /**
7448 * ipr_reset_wait_for_dump - Wait for a dump to timeout. 7448 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7449 * @ipr_cmd: ipr command struct 7449 * @ipr_cmd: ipr command struct
7450 * 7450 *
7451 * This function is invoked when an adapter dump has run out 7451 * This function is invoked when an adapter dump has run out
7452 * of processing time. 7452 * of processing time.
7453 * 7453 *
7454 * Return value: 7454 * Return value:
7455 * IPR_RC_JOB_CONTINUE 7455 * IPR_RC_JOB_CONTINUE
7456 **/ 7456 **/
7457 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd) 7457 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7458 { 7458 {
7459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7460 7460
7461 if (ioa_cfg->sdt_state == GET_DUMP) 7461 if (ioa_cfg->sdt_state == GET_DUMP)
7462 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 7462 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7463 else if (ioa_cfg->sdt_state == READ_DUMP) 7463 else if (ioa_cfg->sdt_state == READ_DUMP)
7464 ioa_cfg->sdt_state = ABORT_DUMP; 7464 ioa_cfg->sdt_state = ABORT_DUMP;
7465 7465
7466 ioa_cfg->dump_timeout = 1; 7466 ioa_cfg->dump_timeout = 1;
7467 ipr_cmd->job_step = ipr_reset_alert; 7467 ipr_cmd->job_step = ipr_reset_alert;
7468 7468
7469 return IPR_RC_JOB_CONTINUE; 7469 return IPR_RC_JOB_CONTINUE;
7470 } 7470 }
7471 7471
7472 /** 7472 /**
7473 * ipr_unit_check_no_data - Log a unit check/no data error log 7473 * ipr_unit_check_no_data - Log a unit check/no data error log
7474 * @ioa_cfg: ioa config struct 7474 * @ioa_cfg: ioa config struct
7475 * 7475 *
7476 * Logs an error indicating the adapter unit checked, but for some 7476 * Logs an error indicating the adapter unit checked, but for some
7477 * reason, we were unable to fetch the unit check buffer. 7477 * reason, we were unable to fetch the unit check buffer.
7478 * 7478 *
7479 * Return value: 7479 * Return value:
7480 * nothing 7480 * nothing
7481 **/ 7481 **/
7482 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) 7482 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7483 { 7483 {
7484 ioa_cfg->errors_logged++; 7484 ioa_cfg->errors_logged++;
7485 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); 7485 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7486 } 7486 }
7487 7487
7488 /** 7488 /**
7489 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA 7489 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7490 * @ioa_cfg: ioa config struct 7490 * @ioa_cfg: ioa config struct
7491 * 7491 *
7492 * Fetches the unit check buffer from the adapter by clocking the data 7492 * Fetches the unit check buffer from the adapter by clocking the data
7493 * through the mailbox register. 7493 * through the mailbox register.
7494 * 7494 *
7495 * Return value: 7495 * Return value:
7496 * nothing 7496 * nothing
7497 **/ 7497 **/
7498 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) 7498 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7499 { 7499 {
7500 unsigned long mailbox; 7500 unsigned long mailbox;
7501 struct ipr_hostrcb *hostrcb; 7501 struct ipr_hostrcb *hostrcb;
7502 struct ipr_uc_sdt sdt; 7502 struct ipr_uc_sdt sdt;
7503 int rc, length; 7503 int rc, length;
7504 u32 ioasc; 7504 u32 ioasc;
7505 7505
7506 mailbox = readl(ioa_cfg->ioa_mailbox); 7506 mailbox = readl(ioa_cfg->ioa_mailbox);
7507 7507
7508 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { 7508 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7509 ipr_unit_check_no_data(ioa_cfg); 7509 ipr_unit_check_no_data(ioa_cfg);
7510 return; 7510 return;
7511 } 7511 }
7512 7512
7513 memset(&sdt, 0, sizeof(struct ipr_uc_sdt)); 7513 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7514 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, 7514 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7515 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); 7515 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7516 7516
7517 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) || 7517 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7518 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 7518 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7519 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 7519 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7520 ipr_unit_check_no_data(ioa_cfg); 7520 ipr_unit_check_no_data(ioa_cfg);
7521 return; 7521 return;
7522 } 7522 }
7523 7523
7524 /* Find length of the first sdt entry (UC buffer) */ 7524 /* Find length of the first sdt entry (UC buffer) */
7525 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE) 7525 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7526 length = be32_to_cpu(sdt.entry[0].end_token); 7526 length = be32_to_cpu(sdt.entry[0].end_token);
7527 else 7527 else
7528 length = (be32_to_cpu(sdt.entry[0].end_token) - 7528 length = (be32_to_cpu(sdt.entry[0].end_token) -
7529 be32_to_cpu(sdt.entry[0].start_token)) & 7529 be32_to_cpu(sdt.entry[0].start_token)) &
7530 IPR_FMT2_MBX_ADDR_MASK; 7530 IPR_FMT2_MBX_ADDR_MASK;
7531 7531
7532 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, 7532 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7533 struct ipr_hostrcb, queue); 7533 struct ipr_hostrcb, queue);
7534 list_del(&hostrcb->queue); 7534 list_del(&hostrcb->queue);
7535 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); 7535 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7536 7536
7537 rc = ipr_get_ldump_data_section(ioa_cfg, 7537 rc = ipr_get_ldump_data_section(ioa_cfg,
7538 be32_to_cpu(sdt.entry[0].start_token), 7538 be32_to_cpu(sdt.entry[0].start_token),
7539 (__be32 *)&hostrcb->hcam, 7539 (__be32 *)&hostrcb->hcam,
7540 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); 7540 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7541 7541
7542 if (!rc) { 7542 if (!rc) {
7543 ipr_handle_log_data(ioa_cfg, hostrcb); 7543 ipr_handle_log_data(ioa_cfg, hostrcb);
7544 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 7544 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7545 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && 7545 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7546 ioa_cfg->sdt_state == GET_DUMP) 7546 ioa_cfg->sdt_state == GET_DUMP)
7547 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 7547 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7548 } else 7548 } else
7549 ipr_unit_check_no_data(ioa_cfg); 7549 ipr_unit_check_no_data(ioa_cfg);
7550 7550
7551 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 7551 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7552 } 7552 }
7553 7553
7554 /** 7554 /**
7555 * ipr_reset_get_unit_check_job - Call to get the unit check buffer. 7555 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7556 * @ipr_cmd: ipr command struct 7556 * @ipr_cmd: ipr command struct
7557 * 7557 *
7558 * Description: This function will call to get the unit check buffer. 7558 * Description: This function will call to get the unit check buffer.
7559 * 7559 *
7560 * Return value: 7560 * Return value:
7561 * IPR_RC_JOB_RETURN 7561 * IPR_RC_JOB_RETURN
7562 **/ 7562 **/
7563 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd) 7563 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7564 { 7564 {
7565 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7565 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7566 7566
7567 ENTER; 7567 ENTER;
7568 ioa_cfg->ioa_unit_checked = 0; 7568 ioa_cfg->ioa_unit_checked = 0;
7569 ipr_get_unit_check_buffer(ioa_cfg); 7569 ipr_get_unit_check_buffer(ioa_cfg);
7570 ipr_cmd->job_step = ipr_reset_alert; 7570 ipr_cmd->job_step = ipr_reset_alert;
7571 ipr_reset_start_timer(ipr_cmd, 0); 7571 ipr_reset_start_timer(ipr_cmd, 0);
7572 7572
7573 LEAVE; 7573 LEAVE;
7574 return IPR_RC_JOB_RETURN; 7574 return IPR_RC_JOB_RETURN;
7575 } 7575 }
7576 7576
7577 /** 7577 /**
7578 * ipr_reset_restore_cfg_space - Restore PCI config space. 7578 * ipr_reset_restore_cfg_space - Restore PCI config space.
7579 * @ipr_cmd: ipr command struct 7579 * @ipr_cmd: ipr command struct
7580 * 7580 *
7581 * Description: This function restores the saved PCI config space of 7581 * Description: This function restores the saved PCI config space of
7582 * the adapter, fails all outstanding ops back to the callers, and 7582 * the adapter, fails all outstanding ops back to the callers, and
7583 * fetches the dump/unit check if applicable to this reset. 7583 * fetches the dump/unit check if applicable to this reset.
7584 * 7584 *
7585 * Return value: 7585 * Return value:
7586 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7586 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7587 **/ 7587 **/
7588 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) 7588 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7589 { 7589 {
7590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7591 u32 int_reg; 7591 u32 int_reg;
7592 7592
7593 ENTER; 7593 ENTER;
7594 ioa_cfg->pdev->state_saved = true; 7594 ioa_cfg->pdev->state_saved = true;
7595 pci_restore_state(ioa_cfg->pdev); 7595 pci_restore_state(ioa_cfg->pdev);
7596 7596
7597 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { 7597 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7598 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 7598 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7599 return IPR_RC_JOB_CONTINUE; 7599 return IPR_RC_JOB_CONTINUE;
7600 } 7600 }
7601 7601
7602 ipr_fail_all_ops(ioa_cfg); 7602 ipr_fail_all_ops(ioa_cfg);
7603 7603
7604 if (ioa_cfg->sis64) { 7604 if (ioa_cfg->sis64) {
7605 /* Set the adapter to the correct endian mode. */ 7605 /* Set the adapter to the correct endian mode. */
7606 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 7606 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7607 int_reg = readl(ioa_cfg->regs.endian_swap_reg); 7607 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7608 } 7608 }
7609 7609
7610 if (ioa_cfg->ioa_unit_checked) { 7610 if (ioa_cfg->ioa_unit_checked) {
7611 if (ioa_cfg->sis64) { 7611 if (ioa_cfg->sis64) {
7612 ipr_cmd->job_step = ipr_reset_get_unit_check_job; 7612 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7613 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT); 7613 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7614 return IPR_RC_JOB_RETURN; 7614 return IPR_RC_JOB_RETURN;
7615 } else { 7615 } else {
7616 ioa_cfg->ioa_unit_checked = 0; 7616 ioa_cfg->ioa_unit_checked = 0;
7617 ipr_get_unit_check_buffer(ioa_cfg); 7617 ipr_get_unit_check_buffer(ioa_cfg);
7618 ipr_cmd->job_step = ipr_reset_alert; 7618 ipr_cmd->job_step = ipr_reset_alert;
7619 ipr_reset_start_timer(ipr_cmd, 0); 7619 ipr_reset_start_timer(ipr_cmd, 0);
7620 return IPR_RC_JOB_RETURN; 7620 return IPR_RC_JOB_RETURN;
7621 } 7621 }
7622 } 7622 }
7623 7623
7624 if (ioa_cfg->in_ioa_bringdown) { 7624 if (ioa_cfg->in_ioa_bringdown) {
7625 ipr_cmd->job_step = ipr_ioa_bringdown_done; 7625 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7626 } else { 7626 } else {
7627 ipr_cmd->job_step = ipr_reset_enable_ioa; 7627 ipr_cmd->job_step = ipr_reset_enable_ioa;
7628 7628
7629 if (GET_DUMP == ioa_cfg->sdt_state) { 7629 if (GET_DUMP == ioa_cfg->sdt_state) {
7630 ioa_cfg->sdt_state = READ_DUMP; 7630 ioa_cfg->sdt_state = READ_DUMP;
7631 ioa_cfg->dump_timeout = 0; 7631 ioa_cfg->dump_timeout = 0;
7632 if (ioa_cfg->sis64) 7632 if (ioa_cfg->sis64)
7633 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); 7633 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7634 else 7634 else
7635 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT); 7635 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
7636 ipr_cmd->job_step = ipr_reset_wait_for_dump; 7636 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7637 schedule_work(&ioa_cfg->work_q); 7637 schedule_work(&ioa_cfg->work_q);
7638 return IPR_RC_JOB_RETURN; 7638 return IPR_RC_JOB_RETURN;
7639 } 7639 }
7640 } 7640 }
7641 7641
7642 LEAVE; 7642 LEAVE;
7643 return IPR_RC_JOB_CONTINUE; 7643 return IPR_RC_JOB_CONTINUE;
7644 } 7644 }
7645 7645
7646 /** 7646 /**
7647 * ipr_reset_bist_done - BIST has completed on the adapter. 7647 * ipr_reset_bist_done - BIST has completed on the adapter.
7648 * @ipr_cmd: ipr command struct 7648 * @ipr_cmd: ipr command struct
7649 * 7649 *
7650 * Description: Unblock config space and resume the reset process. 7650 * Description: Unblock config space and resume the reset process.
7651 * 7651 *
7652 * Return value: 7652 * Return value:
7653 * IPR_RC_JOB_CONTINUE 7653 * IPR_RC_JOB_CONTINUE
7654 **/ 7654 **/
7655 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd) 7655 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7656 { 7656 {
7657 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7657 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7658 7658
7659 ENTER; 7659 ENTER;
7660 if (ioa_cfg->cfg_locked) 7660 if (ioa_cfg->cfg_locked)
7661 pci_cfg_access_unlock(ioa_cfg->pdev); 7661 pci_cfg_access_unlock(ioa_cfg->pdev);
7662 ioa_cfg->cfg_locked = 0; 7662 ioa_cfg->cfg_locked = 0;
7663 ipr_cmd->job_step = ipr_reset_restore_cfg_space; 7663 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7664 LEAVE; 7664 LEAVE;
7665 return IPR_RC_JOB_CONTINUE; 7665 return IPR_RC_JOB_CONTINUE;
7666 } 7666 }
7667 7667
7668 /** 7668 /**
7669 * ipr_reset_start_bist - Run BIST on the adapter. 7669 * ipr_reset_start_bist - Run BIST on the adapter.
7670 * @ipr_cmd: ipr command struct 7670 * @ipr_cmd: ipr command struct
7671 * 7671 *
7672 * Description: This function runs BIST on the adapter, then delays 2 seconds. 7672 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7673 * 7673 *
7674 * Return value: 7674 * Return value:
7675 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7675 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7676 **/ 7676 **/
7677 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) 7677 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7678 { 7678 {
7679 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7679 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7680 int rc = PCIBIOS_SUCCESSFUL; 7680 int rc = PCIBIOS_SUCCESSFUL;
7681 7681
7682 ENTER; 7682 ENTER;
7683 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) 7683 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7684 writel(IPR_UPROCI_SIS64_START_BIST, 7684 writel(IPR_UPROCI_SIS64_START_BIST,
7685 ioa_cfg->regs.set_uproc_interrupt_reg32); 7685 ioa_cfg->regs.set_uproc_interrupt_reg32);
7686 else 7686 else
7687 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); 7687 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7688 7688
7689 if (rc == PCIBIOS_SUCCESSFUL) { 7689 if (rc == PCIBIOS_SUCCESSFUL) {
7690 ipr_cmd->job_step = ipr_reset_bist_done; 7690 ipr_cmd->job_step = ipr_reset_bist_done;
7691 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 7691 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7692 rc = IPR_RC_JOB_RETURN; 7692 rc = IPR_RC_JOB_RETURN;
7693 } else { 7693 } else {
7694 if (ioa_cfg->cfg_locked) 7694 if (ioa_cfg->cfg_locked)
7695 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); 7695 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7696 ioa_cfg->cfg_locked = 0; 7696 ioa_cfg->cfg_locked = 0;
7697 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 7697 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7698 rc = IPR_RC_JOB_CONTINUE; 7698 rc = IPR_RC_JOB_CONTINUE;
7699 } 7699 }
7700 7700
7701 LEAVE; 7701 LEAVE;
7702 return rc; 7702 return rc;
7703 } 7703 }
7704 7704
7705 /** 7705 /**
7706 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter 7706 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7707 * @ipr_cmd: ipr command struct 7707 * @ipr_cmd: ipr command struct
7708 * 7708 *
7709 * Description: This clears PCI reset to the adapter and delays two seconds. 7709 * Description: This clears PCI reset to the adapter and delays two seconds.
7710 * 7710 *
7711 * Return value: 7711 * Return value:
7712 * IPR_RC_JOB_RETURN 7712 * IPR_RC_JOB_RETURN
7713 **/ 7713 **/
7714 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) 7714 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7715 { 7715 {
7716 ENTER; 7716 ENTER;
7717 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset); 7717 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7718 ipr_cmd->job_step = ipr_reset_bist_done; 7718 ipr_cmd->job_step = ipr_reset_bist_done;
7719 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 7719 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7720 LEAVE; 7720 LEAVE;
7721 return IPR_RC_JOB_RETURN; 7721 return IPR_RC_JOB_RETURN;
7722 } 7722 }
7723 7723
7724 /** 7724 /**
7725 * ipr_reset_slot_reset - Reset the PCI slot of the adapter. 7725 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7726 * @ipr_cmd: ipr command struct 7726 * @ipr_cmd: ipr command struct
7727 * 7727 *
7728 * Description: This asserts PCI reset to the adapter. 7728 * Description: This asserts PCI reset to the adapter.
7729 * 7729 *
7730 * Return value: 7730 * Return value:
7731 * IPR_RC_JOB_RETURN 7731 * IPR_RC_JOB_RETURN
7732 **/ 7732 **/
7733 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd) 7733 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7734 { 7734 {
7735 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7735 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7736 struct pci_dev *pdev = ioa_cfg->pdev; 7736 struct pci_dev *pdev = ioa_cfg->pdev;
7737 7737
7738 ENTER; 7738 ENTER;
7739 pci_set_pcie_reset_state(pdev, pcie_warm_reset); 7739 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7740 ipr_cmd->job_step = ipr_reset_slot_reset_done; 7740 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7741 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT); 7741 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7742 LEAVE; 7742 LEAVE;
7743 return IPR_RC_JOB_RETURN; 7743 return IPR_RC_JOB_RETURN;
7744 } 7744 }
7745 7745
7746 /** 7746 /**
7747 * ipr_reset_block_config_access_wait - Wait for permission to block config access 7747 * ipr_reset_block_config_access_wait - Wait for permission to block config access
7748 * @ipr_cmd: ipr command struct 7748 * @ipr_cmd: ipr command struct
7749 * 7749 *
7750 * Description: This attempts to block config access to the IOA. 7750 * Description: This attempts to block config access to the IOA.
7751 * 7751 *
7752 * Return value: 7752 * Return value:
7753 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7753 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7754 **/ 7754 **/
7755 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd) 7755 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
7756 { 7756 {
7757 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7757 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7758 int rc = IPR_RC_JOB_CONTINUE; 7758 int rc = IPR_RC_JOB_CONTINUE;
7759 7759
7760 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { 7760 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
7761 ioa_cfg->cfg_locked = 1; 7761 ioa_cfg->cfg_locked = 1;
7762 ipr_cmd->job_step = ioa_cfg->reset; 7762 ipr_cmd->job_step = ioa_cfg->reset;
7763 } else { 7763 } else {
7764 if (ipr_cmd->u.time_left) { 7764 if (ipr_cmd->u.time_left) {
7765 rc = IPR_RC_JOB_RETURN; 7765 rc = IPR_RC_JOB_RETURN;
7766 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 7766 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7767 ipr_reset_start_timer(ipr_cmd, 7767 ipr_reset_start_timer(ipr_cmd,
7768 IPR_CHECK_FOR_RESET_TIMEOUT); 7768 IPR_CHECK_FOR_RESET_TIMEOUT);
7769 } else { 7769 } else {
7770 ipr_cmd->job_step = ioa_cfg->reset; 7770 ipr_cmd->job_step = ioa_cfg->reset;
7771 dev_err(&ioa_cfg->pdev->dev, 7771 dev_err(&ioa_cfg->pdev->dev,
7772 "Timed out waiting to lock config access. Resetting anyway.\n"); 7772 "Timed out waiting to lock config access. Resetting anyway.\n");
7773 } 7773 }
7774 } 7774 }
7775 7775
7776 return rc; 7776 return rc;
7777 } 7777 }
7778 7778
7779 /** 7779 /**
7780 * ipr_reset_block_config_access - Block config access to the IOA 7780 * ipr_reset_block_config_access - Block config access to the IOA
7781 * @ipr_cmd: ipr command struct 7781 * @ipr_cmd: ipr command struct
7782 * 7782 *
7783 * Description: This attempts to block config access to the IOA 7783 * Description: This attempts to block config access to the IOA
7784 * 7784 *
7785 * Return value: 7785 * Return value:
7786 * IPR_RC_JOB_CONTINUE 7786 * IPR_RC_JOB_CONTINUE
7787 **/ 7787 **/
7788 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd) 7788 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
7789 { 7789 {
7790 ipr_cmd->ioa_cfg->cfg_locked = 0; 7790 ipr_cmd->ioa_cfg->cfg_locked = 0;
7791 ipr_cmd->job_step = ipr_reset_block_config_access_wait; 7791 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
7792 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 7792 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7793 return IPR_RC_JOB_CONTINUE; 7793 return IPR_RC_JOB_CONTINUE;
7794 } 7794 }
7795 7795
7796 /** 7796 /**
7797 * ipr_reset_allowed - Query whether or not IOA can be reset 7797 * ipr_reset_allowed - Query whether or not IOA can be reset
7798 * @ioa_cfg: ioa config struct 7798 * @ioa_cfg: ioa config struct
7799 * 7799 *
7800 * Return value: 7800 * Return value:
7801 * 0 if reset not allowed / non-zero if reset is allowed 7801 * 0 if reset not allowed / non-zero if reset is allowed
7802 **/ 7802 **/
7803 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) 7803 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7804 { 7804 {
7805 volatile u32 temp_reg; 7805 volatile u32 temp_reg;
7806 7806
7807 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 7807 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7808 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0); 7808 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7809 } 7809 }
7810 7810
7811 /** 7811 /**
7812 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA. 7812 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7813 * @ipr_cmd: ipr command struct 7813 * @ipr_cmd: ipr command struct
7814 * 7814 *
7815 * Description: This function waits for adapter permission to run BIST, 7815 * Description: This function waits for adapter permission to run BIST,
7816 * then runs BIST. If the adapter does not give permission after a 7816 * then runs BIST. If the adapter does not give permission after a
7817 * reasonable time, we will reset the adapter anyway. The impact of 7817 * reasonable time, we will reset the adapter anyway. The impact of
7818 * resetting the adapter without warning the adapter is the risk of 7818 * resetting the adapter without warning the adapter is the risk of
7819 * losing the persistent error log on the adapter. If the adapter is 7819 * losing the persistent error log on the adapter. If the adapter is
7820 * reset while it is writing to the flash on the adapter, the flash 7820 * reset while it is writing to the flash on the adapter, the flash
7821 * segment will have bad ECC and be zeroed. 7821 * segment will have bad ECC and be zeroed.
7822 * 7822 *
7823 * Return value: 7823 * Return value:
7824 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7824 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7825 **/ 7825 **/
7826 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) 7826 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7827 { 7827 {
7828 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7828 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7829 int rc = IPR_RC_JOB_RETURN; 7829 int rc = IPR_RC_JOB_RETURN;
7830 7830
7831 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { 7831 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7832 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 7832 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7833 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 7833 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7834 } else { 7834 } else {
7835 ipr_cmd->job_step = ipr_reset_block_config_access; 7835 ipr_cmd->job_step = ipr_reset_block_config_access;
7836 rc = IPR_RC_JOB_CONTINUE; 7836 rc = IPR_RC_JOB_CONTINUE;
7837 } 7837 }
7838 7838
7839 return rc; 7839 return rc;
7840 } 7840 }
7841 7841
7842 /** 7842 /**
7843 * ipr_reset_alert - Alert the adapter of a pending reset 7843 * ipr_reset_alert - Alert the adapter of a pending reset
7844 * @ipr_cmd: ipr command struct 7844 * @ipr_cmd: ipr command struct
7845 * 7845 *
7846 * Description: This function alerts the adapter that it will be reset. 7846 * Description: This function alerts the adapter that it will be reset.
7847 * If memory space is not currently enabled, proceed directly 7847 * If memory space is not currently enabled, proceed directly
7848 * to running BIST on the adapter. The timer must always be started 7848 * to running BIST on the adapter. The timer must always be started
7849 * so we guarantee we do not run BIST from ipr_isr. 7849 * so we guarantee we do not run BIST from ipr_isr.
7850 * 7850 *
7851 * Return value: 7851 * Return value:
7852 * IPR_RC_JOB_RETURN 7852 * IPR_RC_JOB_RETURN
7853 **/ 7853 **/
7854 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) 7854 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7855 { 7855 {
7856 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7856 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7857 u16 cmd_reg; 7857 u16 cmd_reg;
7858 int rc; 7858 int rc;
7859 7859
7860 ENTER; 7860 ENTER;
7861 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); 7861 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7862 7862
7863 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { 7863 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7864 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 7864 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7865 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); 7865 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7866 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; 7866 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7867 } else { 7867 } else {
7868 ipr_cmd->job_step = ipr_reset_block_config_access; 7868 ipr_cmd->job_step = ipr_reset_block_config_access;
7869 } 7869 }
7870 7870
7871 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 7871 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7872 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 7872 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7873 7873
7874 LEAVE; 7874 LEAVE;
7875 return IPR_RC_JOB_RETURN; 7875 return IPR_RC_JOB_RETURN;
7876 } 7876 }
7877 7877
7878 /** 7878 /**
7879 * ipr_reset_ucode_download_done - Microcode download completion 7879 * ipr_reset_ucode_download_done - Microcode download completion
7880 * @ipr_cmd: ipr command struct 7880 * @ipr_cmd: ipr command struct
7881 * 7881 *
7882 * Description: This function unmaps the microcode download buffer. 7882 * Description: This function unmaps the microcode download buffer.
7883 * 7883 *
7884 * Return value: 7884 * Return value:
7885 * IPR_RC_JOB_CONTINUE 7885 * IPR_RC_JOB_CONTINUE
7886 **/ 7886 **/
7887 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd) 7887 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7888 { 7888 {
7889 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7889 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7890 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 7890 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7891 7891
7892 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist, 7892 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7893 sglist->num_sg, DMA_TO_DEVICE); 7893 sglist->num_sg, DMA_TO_DEVICE);
7894 7894
7895 ipr_cmd->job_step = ipr_reset_alert; 7895 ipr_cmd->job_step = ipr_reset_alert;
7896 return IPR_RC_JOB_CONTINUE; 7896 return IPR_RC_JOB_CONTINUE;
7897 } 7897 }
7898 7898
7899 /** 7899 /**
7900 * ipr_reset_ucode_download - Download microcode to the adapter 7900 * ipr_reset_ucode_download - Download microcode to the adapter
7901 * @ipr_cmd: ipr command struct 7901 * @ipr_cmd: ipr command struct
7902 * 7902 *
7903 * Description: This function checks to see if it there is microcode 7903 * Description: This function checks to see if it there is microcode
7904 * to download to the adapter. If there is, a download is performed. 7904 * to download to the adapter. If there is, a download is performed.
7905 * 7905 *
7906 * Return value: 7906 * Return value:
7907 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7907 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7908 **/ 7908 **/
7909 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) 7909 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7910 { 7910 {
7911 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7911 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7912 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 7912 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7913 7913
7914 ENTER; 7914 ENTER;
7915 ipr_cmd->job_step = ipr_reset_alert; 7915 ipr_cmd->job_step = ipr_reset_alert;
7916 7916
7917 if (!sglist) 7917 if (!sglist)
7918 return IPR_RC_JOB_CONTINUE; 7918 return IPR_RC_JOB_CONTINUE;
7919 7919
7920 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7920 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7921 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7921 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7922 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; 7922 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7923 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; 7923 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7924 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; 7924 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7925 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; 7925 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7926 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; 7926 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7927 7927
7928 if (ioa_cfg->sis64) 7928 if (ioa_cfg->sis64)
7929 ipr_build_ucode_ioadl64(ipr_cmd, sglist); 7929 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7930 else 7930 else
7931 ipr_build_ucode_ioadl(ipr_cmd, sglist); 7931 ipr_build_ucode_ioadl(ipr_cmd, sglist);
7932 ipr_cmd->job_step = ipr_reset_ucode_download_done; 7932 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7933 7933
7934 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7934 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7935 IPR_WRITE_BUFFER_TIMEOUT); 7935 IPR_WRITE_BUFFER_TIMEOUT);
7936 7936
7937 LEAVE; 7937 LEAVE;
7938 return IPR_RC_JOB_RETURN; 7938 return IPR_RC_JOB_RETURN;
7939 } 7939 }
7940 7940
7941 /** 7941 /**
7942 * ipr_reset_shutdown_ioa - Shutdown the adapter 7942 * ipr_reset_shutdown_ioa - Shutdown the adapter
7943 * @ipr_cmd: ipr command struct 7943 * @ipr_cmd: ipr command struct
7944 * 7944 *
7945 * Description: This function issues an adapter shutdown of the 7945 * Description: This function issues an adapter shutdown of the
7946 * specified type to the specified adapter as part of the 7946 * specified type to the specified adapter as part of the
7947 * adapter reset job. 7947 * adapter reset job.
7948 * 7948 *
7949 * Return value: 7949 * Return value:
7950 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7950 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7951 **/ 7951 **/
7952 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) 7952 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7953 { 7953 {
7954 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7954 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7955 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; 7955 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7956 unsigned long timeout; 7956 unsigned long timeout;
7957 int rc = IPR_RC_JOB_CONTINUE; 7957 int rc = IPR_RC_JOB_CONTINUE;
7958 7958
7959 ENTER; 7959 ENTER;
7960 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) { 7960 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7961 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7961 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7962 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7962 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7963 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 7963 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7964 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; 7964 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7965 7965
7966 if (shutdown_type == IPR_SHUTDOWN_NORMAL) 7966 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7967 timeout = IPR_SHUTDOWN_TIMEOUT; 7967 timeout = IPR_SHUTDOWN_TIMEOUT;
7968 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) 7968 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7969 timeout = IPR_INTERNAL_TIMEOUT; 7969 timeout = IPR_INTERNAL_TIMEOUT;
7970 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 7970 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7971 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO; 7971 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7972 else 7972 else
7973 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; 7973 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7974 7974
7975 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); 7975 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7976 7976
7977 rc = IPR_RC_JOB_RETURN; 7977 rc = IPR_RC_JOB_RETURN;
7978 ipr_cmd->job_step = ipr_reset_ucode_download; 7978 ipr_cmd->job_step = ipr_reset_ucode_download;
7979 } else 7979 } else
7980 ipr_cmd->job_step = ipr_reset_alert; 7980 ipr_cmd->job_step = ipr_reset_alert;
7981 7981
7982 LEAVE; 7982 LEAVE;
7983 return rc; 7983 return rc;
7984 } 7984 }
7985 7985
7986 /** 7986 /**
7987 * ipr_reset_ioa_job - Adapter reset job 7987 * ipr_reset_ioa_job - Adapter reset job
7988 * @ipr_cmd: ipr command struct 7988 * @ipr_cmd: ipr command struct
7989 * 7989 *
7990 * Description: This function is the job router for the adapter reset job. 7990 * Description: This function is the job router for the adapter reset job.
7991 * 7991 *
7992 * Return value: 7992 * Return value:
7993 * none 7993 * none
7994 **/ 7994 **/
7995 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) 7995 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7996 { 7996 {
7997 u32 rc, ioasc; 7997 u32 rc, ioasc;
7998 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7998 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7999 7999
8000 do { 8000 do {
8001 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 8001 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8002 8002
8003 if (ioa_cfg->reset_cmd != ipr_cmd) { 8003 if (ioa_cfg->reset_cmd != ipr_cmd) {
8004 /* 8004 /*
8005 * We are doing nested adapter resets and this is 8005 * We are doing nested adapter resets and this is
8006 * not the current reset job. 8006 * not the current reset job.
8007 */ 8007 */
8008 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 8008 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8009 return; 8009 return;
8010 } 8010 }
8011 8011
8012 if (IPR_IOASC_SENSE_KEY(ioasc)) { 8012 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8013 rc = ipr_cmd->job_step_failed(ipr_cmd); 8013 rc = ipr_cmd->job_step_failed(ipr_cmd);
8014 if (rc == IPR_RC_JOB_RETURN) 8014 if (rc == IPR_RC_JOB_RETURN)
8015 return; 8015 return;
8016 } 8016 }
8017 8017
8018 ipr_reinit_ipr_cmnd(ipr_cmd); 8018 ipr_reinit_ipr_cmnd(ipr_cmd);
8019 ipr_cmd->job_step_failed = ipr_reset_cmd_failed; 8019 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8020 rc = ipr_cmd->job_step(ipr_cmd); 8020 rc = ipr_cmd->job_step(ipr_cmd);
8021 } while(rc == IPR_RC_JOB_CONTINUE); 8021 } while(rc == IPR_RC_JOB_CONTINUE);
8022 } 8022 }
8023 8023
8024 /** 8024 /**
8025 * _ipr_initiate_ioa_reset - Initiate an adapter reset 8025 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8026 * @ioa_cfg: ioa config struct 8026 * @ioa_cfg: ioa config struct
8027 * @job_step: first job step of reset job 8027 * @job_step: first job step of reset job
8028 * @shutdown_type: shutdown type 8028 * @shutdown_type: shutdown type
8029 * 8029 *
8030 * Description: This function will initiate the reset of the given adapter 8030 * Description: This function will initiate the reset of the given adapter
8031 * starting at the selected job step. 8031 * starting at the selected job step.
8032 * If the caller needs to wait on the completion of the reset, 8032 * If the caller needs to wait on the completion of the reset,
8033 * the caller must sleep on the reset_wait_q. 8033 * the caller must sleep on the reset_wait_q.
8034 * 8034 *
8035 * Return value: 8035 * Return value:
8036 * none 8036 * none
8037 **/ 8037 **/
8038 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 8038 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8039 int (*job_step) (struct ipr_cmnd *), 8039 int (*job_step) (struct ipr_cmnd *),
8040 enum ipr_shutdown_type shutdown_type) 8040 enum ipr_shutdown_type shutdown_type)
8041 { 8041 {
8042 struct ipr_cmnd *ipr_cmd; 8042 struct ipr_cmnd *ipr_cmd;
8043 8043
8044 ioa_cfg->in_reset_reload = 1; 8044 ioa_cfg->in_reset_reload = 1;
8045 ioa_cfg->allow_cmds = 0; 8045 ioa_cfg->allow_cmds = 0;
8046 scsi_block_requests(ioa_cfg->host); 8046 scsi_block_requests(ioa_cfg->host);
8047 8047
8048 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 8048 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8049 ioa_cfg->reset_cmd = ipr_cmd; 8049 ioa_cfg->reset_cmd = ipr_cmd;
8050 ipr_cmd->job_step = job_step; 8050 ipr_cmd->job_step = job_step;
8051 ipr_cmd->u.shutdown_type = shutdown_type; 8051 ipr_cmd->u.shutdown_type = shutdown_type;
8052 8052
8053 ipr_reset_ioa_job(ipr_cmd); 8053 ipr_reset_ioa_job(ipr_cmd);
8054 } 8054 }
8055 8055
8056 /** 8056 /**
8057 * ipr_initiate_ioa_reset - Initiate an adapter reset 8057 * ipr_initiate_ioa_reset - Initiate an adapter reset
8058 * @ioa_cfg: ioa config struct 8058 * @ioa_cfg: ioa config struct
8059 * @shutdown_type: shutdown type 8059 * @shutdown_type: shutdown type
8060 * 8060 *
8061 * Description: This function will initiate the reset of the given adapter. 8061 * Description: This function will initiate the reset of the given adapter.
8062 * If the caller needs to wait on the completion of the reset, 8062 * If the caller needs to wait on the completion of the reset,
8063 * the caller must sleep on the reset_wait_q. 8063 * the caller must sleep on the reset_wait_q.
8064 * 8064 *
8065 * Return value: 8065 * Return value:
8066 * none 8066 * none
8067 **/ 8067 **/
8068 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 8068 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8069 enum ipr_shutdown_type shutdown_type) 8069 enum ipr_shutdown_type shutdown_type)
8070 { 8070 {
8071 if (ioa_cfg->ioa_is_dead) 8071 if (ioa_cfg->ioa_is_dead)
8072 return; 8072 return;
8073 8073
8074 if (ioa_cfg->in_reset_reload) { 8074 if (ioa_cfg->in_reset_reload) {
8075 if (ioa_cfg->sdt_state == GET_DUMP) 8075 if (ioa_cfg->sdt_state == GET_DUMP)
8076 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8076 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8077 else if (ioa_cfg->sdt_state == READ_DUMP) 8077 else if (ioa_cfg->sdt_state == READ_DUMP)
8078 ioa_cfg->sdt_state = ABORT_DUMP; 8078 ioa_cfg->sdt_state = ABORT_DUMP;
8079 } 8079 }
8080 8080
8081 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { 8081 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8082 dev_err(&ioa_cfg->pdev->dev, 8082 dev_err(&ioa_cfg->pdev->dev,
8083 "IOA taken offline - error recovery failed\n"); 8083 "IOA taken offline - error recovery failed\n");
8084 8084
8085 ioa_cfg->reset_retries = 0; 8085 ioa_cfg->reset_retries = 0;
8086 ioa_cfg->ioa_is_dead = 1; 8086 ioa_cfg->ioa_is_dead = 1;
8087 8087
8088 if (ioa_cfg->in_ioa_bringdown) { 8088 if (ioa_cfg->in_ioa_bringdown) {
8089 ioa_cfg->reset_cmd = NULL; 8089 ioa_cfg->reset_cmd = NULL;
8090 ioa_cfg->in_reset_reload = 0; 8090 ioa_cfg->in_reset_reload = 0;
8091 ipr_fail_all_ops(ioa_cfg); 8091 ipr_fail_all_ops(ioa_cfg);
8092 wake_up_all(&ioa_cfg->reset_wait_q); 8092 wake_up_all(&ioa_cfg->reset_wait_q);
8093 8093
8094 spin_unlock_irq(ioa_cfg->host->host_lock); 8094 spin_unlock_irq(ioa_cfg->host->host_lock);
8095 scsi_unblock_requests(ioa_cfg->host); 8095 scsi_unblock_requests(ioa_cfg->host);
8096 spin_lock_irq(ioa_cfg->host->host_lock); 8096 spin_lock_irq(ioa_cfg->host->host_lock);
8097 return; 8097 return;
8098 } else { 8098 } else {
8099 ioa_cfg->in_ioa_bringdown = 1; 8099 ioa_cfg->in_ioa_bringdown = 1;
8100 shutdown_type = IPR_SHUTDOWN_NONE; 8100 shutdown_type = IPR_SHUTDOWN_NONE;
8101 } 8101 }
8102 } 8102 }
8103 8103
8104 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, 8104 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8105 shutdown_type); 8105 shutdown_type);
8106 } 8106 }
8107 8107
8108 /** 8108 /**
8109 * ipr_reset_freeze - Hold off all I/O activity 8109 * ipr_reset_freeze - Hold off all I/O activity
8110 * @ipr_cmd: ipr command struct 8110 * @ipr_cmd: ipr command struct
8111 * 8111 *
8112 * Description: If the PCI slot is frozen, hold off all I/O 8112 * Description: If the PCI slot is frozen, hold off all I/O
8113 * activity; then, as soon as the slot is available again, 8113 * activity; then, as soon as the slot is available again,
8114 * initiate an adapter reset. 8114 * initiate an adapter reset.
8115 */ 8115 */
8116 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) 8116 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8117 { 8117 {
8118 /* Disallow new interrupts, avoid loop */ 8118 /* Disallow new interrupts, avoid loop */
8119 ipr_cmd->ioa_cfg->allow_interrupts = 0; 8119 ipr_cmd->ioa_cfg->allow_interrupts = 0;
8120 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q); 8120 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
8121 ipr_cmd->done = ipr_reset_ioa_job; 8121 ipr_cmd->done = ipr_reset_ioa_job;
8122 return IPR_RC_JOB_RETURN; 8122 return IPR_RC_JOB_RETURN;
8123 } 8123 }
8124 8124
8125 /** 8125 /**
8126 * ipr_pci_frozen - Called when slot has experienced a PCI bus error. 8126 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8127 * @pdev: PCI device struct 8127 * @pdev: PCI device struct
8128 * 8128 *
8129 * Description: This routine is called to tell us that the PCI bus 8129 * Description: This routine is called to tell us that the PCI bus
8130 * is down. Can't do anything here, except put the device driver 8130 * is down. Can't do anything here, except put the device driver
8131 * into a holding pattern, waiting for the PCI bus to come back. 8131 * into a holding pattern, waiting for the PCI bus to come back.
8132 */ 8132 */
8133 static void ipr_pci_frozen(struct pci_dev *pdev) 8133 static void ipr_pci_frozen(struct pci_dev *pdev)
8134 { 8134 {
8135 unsigned long flags = 0; 8135 unsigned long flags = 0;
8136 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8136 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8137 8137
8138 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8138 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8139 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); 8139 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8140 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8140 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8141 } 8141 }
8142 8142
8143 /** 8143 /**
8144 * ipr_pci_slot_reset - Called when PCI slot has been reset. 8144 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8145 * @pdev: PCI device struct 8145 * @pdev: PCI device struct
8146 * 8146 *
8147 * Description: This routine is called by the pci error recovery 8147 * Description: This routine is called by the pci error recovery
8148 * code after the PCI slot has been reset, just before we 8148 * code after the PCI slot has been reset, just before we
8149 * should resume normal operations. 8149 * should resume normal operations.
8150 */ 8150 */
8151 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) 8151 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8152 { 8152 {
8153 unsigned long flags = 0; 8153 unsigned long flags = 0;
8154 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8154 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8155 8155
8156 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8156 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8157 if (ioa_cfg->needs_warm_reset) 8157 if (ioa_cfg->needs_warm_reset)
8158 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8158 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8159 else 8159 else
8160 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, 8160 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8161 IPR_SHUTDOWN_NONE); 8161 IPR_SHUTDOWN_NONE);
8162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8163 return PCI_ERS_RESULT_RECOVERED; 8163 return PCI_ERS_RESULT_RECOVERED;
8164 } 8164 }
8165 8165
8166 /** 8166 /**
8167 * ipr_pci_perm_failure - Called when PCI slot is dead for good. 8167 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8168 * @pdev: PCI device struct 8168 * @pdev: PCI device struct
8169 * 8169 *
8170 * Description: This routine is called when the PCI bus has 8170 * Description: This routine is called when the PCI bus has
8171 * permanently failed. 8171 * permanently failed.
8172 */ 8172 */
8173 static void ipr_pci_perm_failure(struct pci_dev *pdev) 8173 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8174 { 8174 {
8175 unsigned long flags = 0; 8175 unsigned long flags = 0;
8176 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8176 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8177 8177
8178 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8178 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8179 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 8179 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8180 ioa_cfg->sdt_state = ABORT_DUMP; 8180 ioa_cfg->sdt_state = ABORT_DUMP;
8181 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; 8181 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8182 ioa_cfg->in_ioa_bringdown = 1; 8182 ioa_cfg->in_ioa_bringdown = 1;
8183 ioa_cfg->allow_cmds = 0; 8183 ioa_cfg->allow_cmds = 0;
8184 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8184 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8186 } 8186 }
8187 8187
8188 /** 8188 /**
8189 * ipr_pci_error_detected - Called when a PCI error is detected. 8189 * ipr_pci_error_detected - Called when a PCI error is detected.
8190 * @pdev: PCI device struct 8190 * @pdev: PCI device struct
8191 * @state: PCI channel state 8191 * @state: PCI channel state
8192 * 8192 *
8193 * Description: Called when a PCI error is detected. 8193 * Description: Called when a PCI error is detected.
8194 * 8194 *
8195 * Return value: 8195 * Return value:
8196 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 8196 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8197 */ 8197 */
8198 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, 8198 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8199 pci_channel_state_t state) 8199 pci_channel_state_t state)
8200 { 8200 {
8201 switch (state) { 8201 switch (state) {
8202 case pci_channel_io_frozen: 8202 case pci_channel_io_frozen:
8203 ipr_pci_frozen(pdev); 8203 ipr_pci_frozen(pdev);
8204 return PCI_ERS_RESULT_NEED_RESET; 8204 return PCI_ERS_RESULT_NEED_RESET;
8205 case pci_channel_io_perm_failure: 8205 case pci_channel_io_perm_failure:
8206 ipr_pci_perm_failure(pdev); 8206 ipr_pci_perm_failure(pdev);
8207 return PCI_ERS_RESULT_DISCONNECT; 8207 return PCI_ERS_RESULT_DISCONNECT;
8208 break; 8208 break;
8209 default: 8209 default:
8210 break; 8210 break;
8211 } 8211 }
8212 return PCI_ERS_RESULT_NEED_RESET; 8212 return PCI_ERS_RESULT_NEED_RESET;
8213 } 8213 }
8214 8214
8215 /** 8215 /**
8216 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) 8216 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8217 * @ioa_cfg: ioa cfg struct 8217 * @ioa_cfg: ioa cfg struct
8218 * 8218 *
8219 * Description: This is the second phase of adapter intialization 8219 * Description: This is the second phase of adapter intialization
8220 * This function takes care of initilizing the adapter to the point 8220 * This function takes care of initilizing the adapter to the point
8221 * where it can accept new commands. 8221 * where it can accept new commands.
8222 8222
8223 * Return value: 8223 * Return value:
8224 * 0 on success / -EIO on failure 8224 * 0 on success / -EIO on failure
8225 **/ 8225 **/
8226 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) 8226 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8227 { 8227 {
8228 int rc = 0; 8228 int rc = 0;
8229 unsigned long host_lock_flags = 0; 8229 unsigned long host_lock_flags = 0;
8230 8230
8231 ENTER; 8231 ENTER;
8232 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 8232 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8233 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); 8233 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8234 if (ioa_cfg->needs_hard_reset) { 8234 if (ioa_cfg->needs_hard_reset) {
8235 ioa_cfg->needs_hard_reset = 0; 8235 ioa_cfg->needs_hard_reset = 0;
8236 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8236 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8237 } else 8237 } else
8238 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, 8238 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8239 IPR_SHUTDOWN_NONE); 8239 IPR_SHUTDOWN_NONE);
8240 8240
8241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 8241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8242 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 8242 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8243 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 8243 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8244 8244
8245 if (ioa_cfg->ioa_is_dead) { 8245 if (ioa_cfg->ioa_is_dead) {
8246 rc = -EIO; 8246 rc = -EIO;
8247 } else if (ipr_invalid_adapter(ioa_cfg)) { 8247 } else if (ipr_invalid_adapter(ioa_cfg)) {
8248 if (!ipr_testmode) 8248 if (!ipr_testmode)
8249 rc = -EIO; 8249 rc = -EIO;
8250 8250
8251 dev_err(&ioa_cfg->pdev->dev, 8251 dev_err(&ioa_cfg->pdev->dev,
8252 "Adapter not supported in this hardware configuration.\n"); 8252 "Adapter not supported in this hardware configuration.\n");
8253 } 8253 }
8254 8254
8255 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 8255 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8256 8256
8257 LEAVE; 8257 LEAVE;
8258 return rc; 8258 return rc;
8259 } 8259 }
8260 8260
8261 /** 8261 /**
8262 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter 8262 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8263 * @ioa_cfg: ioa config struct 8263 * @ioa_cfg: ioa config struct
8264 * 8264 *
8265 * Return value: 8265 * Return value:
8266 * none 8266 * none
8267 **/ 8267 **/
8268 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) 8268 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8269 { 8269 {
8270 int i; 8270 int i;
8271 8271
8272 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 8272 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8273 if (ioa_cfg->ipr_cmnd_list[i]) 8273 if (ioa_cfg->ipr_cmnd_list[i])
8274 pci_pool_free(ioa_cfg->ipr_cmd_pool, 8274 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8275 ioa_cfg->ipr_cmnd_list[i], 8275 ioa_cfg->ipr_cmnd_list[i],
8276 ioa_cfg->ipr_cmnd_list_dma[i]); 8276 ioa_cfg->ipr_cmnd_list_dma[i]);
8277 8277
8278 ioa_cfg->ipr_cmnd_list[i] = NULL; 8278 ioa_cfg->ipr_cmnd_list[i] = NULL;
8279 } 8279 }
8280 8280
8281 if (ioa_cfg->ipr_cmd_pool) 8281 if (ioa_cfg->ipr_cmd_pool)
8282 pci_pool_destroy (ioa_cfg->ipr_cmd_pool); 8282 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8283 8283
8284 kfree(ioa_cfg->ipr_cmnd_list); 8284 kfree(ioa_cfg->ipr_cmnd_list);
8285 kfree(ioa_cfg->ipr_cmnd_list_dma); 8285 kfree(ioa_cfg->ipr_cmnd_list_dma);
8286 ioa_cfg->ipr_cmnd_list = NULL; 8286 ioa_cfg->ipr_cmnd_list = NULL;
8287 ioa_cfg->ipr_cmnd_list_dma = NULL; 8287 ioa_cfg->ipr_cmnd_list_dma = NULL;
8288 ioa_cfg->ipr_cmd_pool = NULL; 8288 ioa_cfg->ipr_cmd_pool = NULL;
8289 } 8289 }
8290 8290
8291 /** 8291 /**
8292 * ipr_free_mem - Frees memory allocated for an adapter 8292 * ipr_free_mem - Frees memory allocated for an adapter
8293 * @ioa_cfg: ioa cfg struct 8293 * @ioa_cfg: ioa cfg struct
8294 * 8294 *
8295 * Return value: 8295 * Return value:
8296 * nothing 8296 * nothing
8297 **/ 8297 **/
8298 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) 8298 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8299 { 8299 {
8300 int i; 8300 int i;
8301 8301
8302 kfree(ioa_cfg->res_entries); 8302 kfree(ioa_cfg->res_entries);
8303 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs), 8303 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8304 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 8304 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8305 ipr_free_cmd_blks(ioa_cfg); 8305 ipr_free_cmd_blks(ioa_cfg);
8306 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 8306 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8307 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 8307 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8308 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size, 8308 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8309 ioa_cfg->u.cfg_table, 8309 ioa_cfg->u.cfg_table,
8310 ioa_cfg->cfg_table_dma); 8310 ioa_cfg->cfg_table_dma);
8311 8311
8312 for (i = 0; i < IPR_NUM_HCAMS; i++) { 8312 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8313 pci_free_consistent(ioa_cfg->pdev, 8313 pci_free_consistent(ioa_cfg->pdev,
8314 sizeof(struct ipr_hostrcb), 8314 sizeof(struct ipr_hostrcb),
8315 ioa_cfg->hostrcb[i], 8315 ioa_cfg->hostrcb[i],
8316 ioa_cfg->hostrcb_dma[i]); 8316 ioa_cfg->hostrcb_dma[i]);
8317 } 8317 }
8318 8318
8319 ipr_free_dump(ioa_cfg); 8319 ipr_free_dump(ioa_cfg);
8320 kfree(ioa_cfg->trace); 8320 kfree(ioa_cfg->trace);
8321 } 8321 }
8322 8322
8323 /** 8323 /**
8324 * ipr_free_all_resources - Free all allocated resources for an adapter. 8324 * ipr_free_all_resources - Free all allocated resources for an adapter.
8325 * @ipr_cmd: ipr command struct 8325 * @ipr_cmd: ipr command struct
8326 * 8326 *
8327 * This function frees all allocated resources for the 8327 * This function frees all allocated resources for the
8328 * specified adapter. 8328 * specified adapter.
8329 * 8329 *
8330 * Return value: 8330 * Return value:
8331 * none 8331 * none
8332 **/ 8332 **/
8333 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) 8333 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8334 { 8334 {
8335 struct pci_dev *pdev = ioa_cfg->pdev; 8335 struct pci_dev *pdev = ioa_cfg->pdev;
8336 8336
8337 ENTER; 8337 ENTER;
8338 free_irq(pdev->irq, ioa_cfg); 8338 free_irq(pdev->irq, ioa_cfg);
8339 pci_disable_msi(pdev); 8339 pci_disable_msi(pdev);
8340 iounmap(ioa_cfg->hdw_dma_regs); 8340 iounmap(ioa_cfg->hdw_dma_regs);
8341 pci_release_regions(pdev); 8341 pci_release_regions(pdev);
8342 ipr_free_mem(ioa_cfg); 8342 ipr_free_mem(ioa_cfg);
8343 scsi_host_put(ioa_cfg->host); 8343 scsi_host_put(ioa_cfg->host);
8344 pci_disable_device(pdev); 8344 pci_disable_device(pdev);
8345 LEAVE; 8345 LEAVE;
8346 } 8346 }
8347 8347
8348 /** 8348 /**
8349 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter 8349 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8350 * @ioa_cfg: ioa config struct 8350 * @ioa_cfg: ioa config struct
8351 * 8351 *
8352 * Return value: 8352 * Return value:
8353 * 0 on success / -ENOMEM on allocation failure 8353 * 0 on success / -ENOMEM on allocation failure
8354 **/ 8354 **/
8355 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) 8355 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8356 { 8356 {
8357 struct ipr_cmnd *ipr_cmd; 8357 struct ipr_cmnd *ipr_cmd;
8358 struct ipr_ioarcb *ioarcb; 8358 struct ipr_ioarcb *ioarcb;
8359 dma_addr_t dma_addr; 8359 dma_addr_t dma_addr;
8360 int i; 8360 int i;
8361 8361
8362 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, 8362 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8363 sizeof(struct ipr_cmnd), 16, 0); 8363 sizeof(struct ipr_cmnd), 512, 0);
8364 8364
8365 if (!ioa_cfg->ipr_cmd_pool) 8365 if (!ioa_cfg->ipr_cmd_pool)
8366 return -ENOMEM; 8366 return -ENOMEM;
8367 8367
8368 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); 8368 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8369 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); 8369 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8370 8370
8371 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { 8371 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8372 ipr_free_cmd_blks(ioa_cfg); 8372 ipr_free_cmd_blks(ioa_cfg);
8373 return -ENOMEM; 8373 return -ENOMEM;
8374 } 8374 }
8375 8375
8376 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 8376 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8377 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); 8377 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8378 8378
8379 if (!ipr_cmd) { 8379 if (!ipr_cmd) {
8380 ipr_free_cmd_blks(ioa_cfg); 8380 ipr_free_cmd_blks(ioa_cfg);
8381 return -ENOMEM; 8381 return -ENOMEM;
8382 } 8382 }
8383 8383
8384 memset(ipr_cmd, 0, sizeof(*ipr_cmd)); 8384 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8385 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; 8385 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8386 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; 8386 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8387 8387
8388 ioarcb = &ipr_cmd->ioarcb; 8388 ioarcb = &ipr_cmd->ioarcb;
8389 ipr_cmd->dma_addr = dma_addr; 8389 ipr_cmd->dma_addr = dma_addr;
8390 if (ioa_cfg->sis64) 8390 if (ioa_cfg->sis64)
8391 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); 8391 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8392 else 8392 else
8393 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); 8393 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8394 8394
8395 ioarcb->host_response_handle = cpu_to_be32(i << 2); 8395 ioarcb->host_response_handle = cpu_to_be32(i << 2);
8396 if (ioa_cfg->sis64) { 8396 if (ioa_cfg->sis64) {
8397 ioarcb->u.sis64_addr_data.data_ioadl_addr = 8397 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8398 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 8398 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8399 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = 8399 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8400 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); 8400 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8401 } else { 8401 } else {
8402 ioarcb->write_ioadl_addr = 8402 ioarcb->write_ioadl_addr =
8403 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 8403 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8404 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 8404 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8405 ioarcb->ioasa_host_pci_addr = 8405 ioarcb->ioasa_host_pci_addr =
8406 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); 8406 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8407 } 8407 }
8408 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 8408 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8409 ipr_cmd->cmd_index = i; 8409 ipr_cmd->cmd_index = i;
8410 ipr_cmd->ioa_cfg = ioa_cfg; 8410 ipr_cmd->ioa_cfg = ioa_cfg;
8411 ipr_cmd->sense_buffer_dma = dma_addr + 8411 ipr_cmd->sense_buffer_dma = dma_addr +
8412 offsetof(struct ipr_cmnd, sense_buffer); 8412 offsetof(struct ipr_cmnd, sense_buffer);
8413 8413
8414 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 8414 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8415 } 8415 }
8416 8416
8417 return 0; 8417 return 0;
8418 } 8418 }
8419 8419
8420 /** 8420 /**
8421 * ipr_alloc_mem - Allocate memory for an adapter 8421 * ipr_alloc_mem - Allocate memory for an adapter
8422 * @ioa_cfg: ioa config struct 8422 * @ioa_cfg: ioa config struct
8423 * 8423 *
8424 * Return value: 8424 * Return value:
8425 * 0 on success / non-zero for error 8425 * 0 on success / non-zero for error
8426 **/ 8426 **/
8427 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) 8427 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8428 { 8428 {
8429 struct pci_dev *pdev = ioa_cfg->pdev; 8429 struct pci_dev *pdev = ioa_cfg->pdev;
8430 int i, rc = -ENOMEM; 8430 int i, rc = -ENOMEM;
8431 8431
8432 ENTER; 8432 ENTER;
8433 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * 8433 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8434 ioa_cfg->max_devs_supported, GFP_KERNEL); 8434 ioa_cfg->max_devs_supported, GFP_KERNEL);
8435 8435
8436 if (!ioa_cfg->res_entries) 8436 if (!ioa_cfg->res_entries)
8437 goto out; 8437 goto out;
8438 8438
8439 if (ioa_cfg->sis64) { 8439 if (ioa_cfg->sis64) {
8440 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) * 8440 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8441 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); 8441 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8442 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) * 8442 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8443 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); 8443 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8444 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) * 8444 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8445 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); 8445 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8446 } 8446 }
8447 8447
8448 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { 8448 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8449 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 8449 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8450 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; 8450 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8451 } 8451 }
8452 8452
8453 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, 8453 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8454 sizeof(struct ipr_misc_cbs), 8454 sizeof(struct ipr_misc_cbs),
8455 &ioa_cfg->vpd_cbs_dma); 8455 &ioa_cfg->vpd_cbs_dma);
8456 8456
8457 if (!ioa_cfg->vpd_cbs) 8457 if (!ioa_cfg->vpd_cbs)
8458 goto out_free_res_entries; 8458 goto out_free_res_entries;
8459 8459
8460 if (ipr_alloc_cmd_blks(ioa_cfg)) 8460 if (ipr_alloc_cmd_blks(ioa_cfg))
8461 goto out_free_vpd_cbs; 8461 goto out_free_vpd_cbs;
8462 8462
8463 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev, 8463 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8464 sizeof(u32) * IPR_NUM_CMD_BLKS, 8464 sizeof(u32) * IPR_NUM_CMD_BLKS,
8465 &ioa_cfg->host_rrq_dma); 8465 &ioa_cfg->host_rrq_dma);
8466 8466
8467 if (!ioa_cfg->host_rrq) 8467 if (!ioa_cfg->host_rrq)
8468 goto out_ipr_free_cmd_blocks; 8468 goto out_ipr_free_cmd_blocks;
8469 8469
8470 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 8470 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8471 ioa_cfg->cfg_table_size, 8471 ioa_cfg->cfg_table_size,
8472 &ioa_cfg->cfg_table_dma); 8472 &ioa_cfg->cfg_table_dma);
8473 8473
8474 if (!ioa_cfg->u.cfg_table) 8474 if (!ioa_cfg->u.cfg_table)
8475 goto out_free_host_rrq; 8475 goto out_free_host_rrq;
8476 8476
8477 for (i = 0; i < IPR_NUM_HCAMS; i++) { 8477 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8478 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev, 8478 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8479 sizeof(struct ipr_hostrcb), 8479 sizeof(struct ipr_hostrcb),
8480 &ioa_cfg->hostrcb_dma[i]); 8480 &ioa_cfg->hostrcb_dma[i]);
8481 8481
8482 if (!ioa_cfg->hostrcb[i]) 8482 if (!ioa_cfg->hostrcb[i])
8483 goto out_free_hostrcb_dma; 8483 goto out_free_hostrcb_dma;
8484 8484
8485 ioa_cfg->hostrcb[i]->hostrcb_dma = 8485 ioa_cfg->hostrcb[i]->hostrcb_dma =
8486 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); 8486 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8487 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; 8487 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8488 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); 8488 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8489 } 8489 }
8490 8490
8491 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) * 8491 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8492 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL); 8492 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8493 8493
8494 if (!ioa_cfg->trace) 8494 if (!ioa_cfg->trace)
8495 goto out_free_hostrcb_dma; 8495 goto out_free_hostrcb_dma;
8496 8496
8497 rc = 0; 8497 rc = 0;
8498 out: 8498 out:
8499 LEAVE; 8499 LEAVE;
8500 return rc; 8500 return rc;
8501 8501
8502 out_free_hostrcb_dma: 8502 out_free_hostrcb_dma:
8503 while (i-- > 0) { 8503 while (i-- > 0) {
8504 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb), 8504 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8505 ioa_cfg->hostrcb[i], 8505 ioa_cfg->hostrcb[i],
8506 ioa_cfg->hostrcb_dma[i]); 8506 ioa_cfg->hostrcb_dma[i]);
8507 } 8507 }
8508 pci_free_consistent(pdev, ioa_cfg->cfg_table_size, 8508 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8509 ioa_cfg->u.cfg_table, 8509 ioa_cfg->u.cfg_table,
8510 ioa_cfg->cfg_table_dma); 8510 ioa_cfg->cfg_table_dma);
8511 out_free_host_rrq: 8511 out_free_host_rrq:
8512 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 8512 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8513 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 8513 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8514 out_ipr_free_cmd_blocks: 8514 out_ipr_free_cmd_blocks:
8515 ipr_free_cmd_blks(ioa_cfg); 8515 ipr_free_cmd_blks(ioa_cfg);
8516 out_free_vpd_cbs: 8516 out_free_vpd_cbs:
8517 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs), 8517 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8518 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 8518 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8519 out_free_res_entries: 8519 out_free_res_entries:
8520 kfree(ioa_cfg->res_entries); 8520 kfree(ioa_cfg->res_entries);
8521 goto out; 8521 goto out;
8522 } 8522 }
8523 8523
8524 /** 8524 /**
8525 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values 8525 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8526 * @ioa_cfg: ioa config struct 8526 * @ioa_cfg: ioa config struct
8527 * 8527 *
8528 * Return value: 8528 * Return value:
8529 * none 8529 * none
8530 **/ 8530 **/
8531 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) 8531 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8532 { 8532 {
8533 int i; 8533 int i;
8534 8534
8535 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { 8535 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8536 ioa_cfg->bus_attr[i].bus = i; 8536 ioa_cfg->bus_attr[i].bus = i;
8537 ioa_cfg->bus_attr[i].qas_enabled = 0; 8537 ioa_cfg->bus_attr[i].qas_enabled = 0;
8538 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; 8538 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8539 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds)) 8539 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8540 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; 8540 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8541 else 8541 else
8542 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; 8542 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8543 } 8543 }
8544 } 8544 }
8545 8545
8546 /** 8546 /**
8547 * ipr_init_ioa_cfg - Initialize IOA config struct 8547 * ipr_init_ioa_cfg - Initialize IOA config struct
8548 * @ioa_cfg: ioa config struct 8548 * @ioa_cfg: ioa config struct
8549 * @host: scsi host struct 8549 * @host: scsi host struct
8550 * @pdev: PCI dev struct 8550 * @pdev: PCI dev struct
8551 * 8551 *
8552 * Return value: 8552 * Return value:
8553 * none 8553 * none
8554 **/ 8554 **/
8555 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, 8555 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8556 struct Scsi_Host *host, struct pci_dev *pdev) 8556 struct Scsi_Host *host, struct pci_dev *pdev)
8557 { 8557 {
8558 const struct ipr_interrupt_offsets *p; 8558 const struct ipr_interrupt_offsets *p;
8559 struct ipr_interrupts *t; 8559 struct ipr_interrupts *t;
8560 void __iomem *base; 8560 void __iomem *base;
8561 8561
8562 ioa_cfg->host = host; 8562 ioa_cfg->host = host;
8563 ioa_cfg->pdev = pdev; 8563 ioa_cfg->pdev = pdev;
8564 ioa_cfg->log_level = ipr_log_level; 8564 ioa_cfg->log_level = ipr_log_level;
8565 ioa_cfg->doorbell = IPR_DOORBELL; 8565 ioa_cfg->doorbell = IPR_DOORBELL;
8566 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); 8566 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8567 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); 8567 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8568 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL); 8568 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8569 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL); 8569 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8570 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); 8570 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8571 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); 8571 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8572 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); 8572 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8573 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); 8573 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8574 8574
8575 INIT_LIST_HEAD(&ioa_cfg->free_q); 8575 INIT_LIST_HEAD(&ioa_cfg->free_q);
8576 INIT_LIST_HEAD(&ioa_cfg->pending_q); 8576 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8577 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); 8577 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8578 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 8578 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8579 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 8579 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8580 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 8580 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8581 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 8581 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8582 init_waitqueue_head(&ioa_cfg->reset_wait_q); 8582 init_waitqueue_head(&ioa_cfg->reset_wait_q);
8583 init_waitqueue_head(&ioa_cfg->msi_wait_q); 8583 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8584 ioa_cfg->sdt_state = INACTIVE; 8584 ioa_cfg->sdt_state = INACTIVE;
8585 8585
8586 ipr_initialize_bus_attr(ioa_cfg); 8586 ipr_initialize_bus_attr(ioa_cfg);
8587 ioa_cfg->max_devs_supported = ipr_max_devs; 8587 ioa_cfg->max_devs_supported = ipr_max_devs;
8588 8588
8589 if (ioa_cfg->sis64) { 8589 if (ioa_cfg->sis64) {
8590 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; 8590 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8591 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; 8591 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8592 if (ipr_max_devs > IPR_MAX_SIS64_DEVS) 8592 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8593 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; 8593 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8594 } else { 8594 } else {
8595 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 8595 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8596 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 8596 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8597 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) 8597 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8598 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; 8598 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8599 } 8599 }
8600 host->max_channel = IPR_MAX_BUS_TO_SCAN; 8600 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8601 host->unique_id = host->host_no; 8601 host->unique_id = host->host_no;
8602 host->max_cmd_len = IPR_MAX_CDB_LEN; 8602 host->max_cmd_len = IPR_MAX_CDB_LEN;
8603 host->can_queue = ioa_cfg->max_cmds; 8603 host->can_queue = ioa_cfg->max_cmds;
8604 pci_set_drvdata(pdev, ioa_cfg); 8604 pci_set_drvdata(pdev, ioa_cfg);
8605 8605
8606 p = &ioa_cfg->chip_cfg->regs; 8606 p = &ioa_cfg->chip_cfg->regs;
8607 t = &ioa_cfg->regs; 8607 t = &ioa_cfg->regs;
8608 base = ioa_cfg->hdw_dma_regs; 8608 base = ioa_cfg->hdw_dma_regs;
8609 8609
8610 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 8610 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8611 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 8611 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8612 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; 8612 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8613 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 8613 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8614 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; 8614 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8615 t->clr_interrupt_reg = base + p->clr_interrupt_reg; 8615 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8616 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; 8616 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8617 t->sense_interrupt_reg = base + p->sense_interrupt_reg; 8617 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8618 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; 8618 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8619 t->ioarrin_reg = base + p->ioarrin_reg; 8619 t->ioarrin_reg = base + p->ioarrin_reg;
8620 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; 8620 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8621 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; 8621 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8622 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; 8622 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8623 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; 8623 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8624 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; 8624 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8625 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; 8625 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8626 8626
8627 if (ioa_cfg->sis64) { 8627 if (ioa_cfg->sis64) {
8628 t->init_feedback_reg = base + p->init_feedback_reg; 8628 t->init_feedback_reg = base + p->init_feedback_reg;
8629 t->dump_addr_reg = base + p->dump_addr_reg; 8629 t->dump_addr_reg = base + p->dump_addr_reg;
8630 t->dump_data_reg = base + p->dump_data_reg; 8630 t->dump_data_reg = base + p->dump_data_reg;
8631 t->endian_swap_reg = base + p->endian_swap_reg; 8631 t->endian_swap_reg = base + p->endian_swap_reg;
8632 } 8632 }
8633 } 8633 }
8634 8634
8635 /** 8635 /**
8636 * ipr_get_chip_info - Find adapter chip information 8636 * ipr_get_chip_info - Find adapter chip information
8637 * @dev_id: PCI device id struct 8637 * @dev_id: PCI device id struct
8638 * 8638 *
8639 * Return value: 8639 * Return value:
8640 * ptr to chip information on success / NULL on failure 8640 * ptr to chip information on success / NULL on failure
8641 **/ 8641 **/
8642 static const struct ipr_chip_t * __devinit 8642 static const struct ipr_chip_t * __devinit
8643 ipr_get_chip_info(const struct pci_device_id *dev_id) 8643 ipr_get_chip_info(const struct pci_device_id *dev_id)
8644 { 8644 {
8645 int i; 8645 int i;
8646 8646
8647 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) 8647 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8648 if (ipr_chip[i].vendor == dev_id->vendor && 8648 if (ipr_chip[i].vendor == dev_id->vendor &&
8649 ipr_chip[i].device == dev_id->device) 8649 ipr_chip[i].device == dev_id->device)
8650 return &ipr_chip[i]; 8650 return &ipr_chip[i];
8651 return NULL; 8651 return NULL;
8652 } 8652 }
8653 8653
8654 /** 8654 /**
8655 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). 8655 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8656 * @pdev: PCI device struct 8656 * @pdev: PCI device struct
8657 * 8657 *
8658 * Description: Simply set the msi_received flag to 1 indicating that 8658 * Description: Simply set the msi_received flag to 1 indicating that
8659 * Message Signaled Interrupts are supported. 8659 * Message Signaled Interrupts are supported.
8660 * 8660 *
8661 * Return value: 8661 * Return value:
8662 * 0 on success / non-zero on failure 8662 * 0 on success / non-zero on failure
8663 **/ 8663 **/
8664 static irqreturn_t __devinit ipr_test_intr(int irq, void *devp) 8664 static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8665 { 8665 {
8666 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 8666 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8667 unsigned long lock_flags = 0; 8667 unsigned long lock_flags = 0;
8668 irqreturn_t rc = IRQ_HANDLED; 8668 irqreturn_t rc = IRQ_HANDLED;
8669 8669
8670 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 8670 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8671 8671
8672 ioa_cfg->msi_received = 1; 8672 ioa_cfg->msi_received = 1;
8673 wake_up(&ioa_cfg->msi_wait_q); 8673 wake_up(&ioa_cfg->msi_wait_q);
8674 8674
8675 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8675 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8676 return rc; 8676 return rc;
8677 } 8677 }
8678 8678
8679 /** 8679 /**
8680 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. 8680 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8681 * @pdev: PCI device struct 8681 * @pdev: PCI device struct
8682 * 8682 *
8683 * Description: The return value from pci_enable_msi() can not always be 8683 * Description: The return value from pci_enable_msi() can not always be
8684 * trusted. This routine sets up and initiates a test interrupt to determine 8684 * trusted. This routine sets up and initiates a test interrupt to determine
8685 * if the interrupt is received via the ipr_test_intr() service routine. 8685 * if the interrupt is received via the ipr_test_intr() service routine.
8686 * If the tests fails, the driver will fall back to LSI. 8686 * If the tests fails, the driver will fall back to LSI.
8687 * 8687 *
8688 * Return value: 8688 * Return value:
8689 * 0 on success / non-zero on failure 8689 * 0 on success / non-zero on failure
8690 **/ 8690 **/
8691 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, 8691 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8692 struct pci_dev *pdev) 8692 struct pci_dev *pdev)
8693 { 8693 {
8694 int rc; 8694 int rc;
8695 volatile u32 int_reg; 8695 volatile u32 int_reg;
8696 unsigned long lock_flags = 0; 8696 unsigned long lock_flags = 0;
8697 8697
8698 ENTER; 8698 ENTER;
8699 8699
8700 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 8700 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8701 init_waitqueue_head(&ioa_cfg->msi_wait_q); 8701 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8702 ioa_cfg->msi_received = 0; 8702 ioa_cfg->msi_received = 0;
8703 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8703 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8704 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); 8704 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8705 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8705 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8706 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8706 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8707 8707
8708 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); 8708 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8709 if (rc) { 8709 if (rc) {
8710 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); 8710 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8711 return rc; 8711 return rc;
8712 } else if (ipr_debug) 8712 } else if (ipr_debug)
8713 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); 8713 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8714 8714
8715 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); 8715 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8716 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 8716 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8717 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 8717 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8718 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8718 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8719 8719
8720 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 8720 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8721 if (!ioa_cfg->msi_received) { 8721 if (!ioa_cfg->msi_received) {
8722 /* MSI test failed */ 8722 /* MSI test failed */
8723 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); 8723 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8724 rc = -EOPNOTSUPP; 8724 rc = -EOPNOTSUPP;
8725 } else if (ipr_debug) 8725 } else if (ipr_debug)
8726 dev_info(&pdev->dev, "MSI test succeeded.\n"); 8726 dev_info(&pdev->dev, "MSI test succeeded.\n");
8727 8727
8728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8729 8729
8730 free_irq(pdev->irq, ioa_cfg); 8730 free_irq(pdev->irq, ioa_cfg);
8731 8731
8732 LEAVE; 8732 LEAVE;
8733 8733
8734 return rc; 8734 return rc;
8735 } 8735 }
8736 8736
8737 /** 8737 /**
8738 * ipr_probe_ioa - Allocates memory and does first stage of initialization 8738 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8739 * @pdev: PCI device struct 8739 * @pdev: PCI device struct
8740 * @dev_id: PCI device id struct 8740 * @dev_id: PCI device id struct
8741 * 8741 *
8742 * Return value: 8742 * Return value:
8743 * 0 on success / non-zero on failure 8743 * 0 on success / non-zero on failure
8744 **/ 8744 **/
8745 static int __devinit ipr_probe_ioa(struct pci_dev *pdev, 8745 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8746 const struct pci_device_id *dev_id) 8746 const struct pci_device_id *dev_id)
8747 { 8747 {
8748 struct ipr_ioa_cfg *ioa_cfg; 8748 struct ipr_ioa_cfg *ioa_cfg;
8749 struct Scsi_Host *host; 8749 struct Scsi_Host *host;
8750 unsigned long ipr_regs_pci; 8750 unsigned long ipr_regs_pci;
8751 void __iomem *ipr_regs; 8751 void __iomem *ipr_regs;
8752 int rc = PCIBIOS_SUCCESSFUL; 8752 int rc = PCIBIOS_SUCCESSFUL;
8753 volatile u32 mask, uproc, interrupts; 8753 volatile u32 mask, uproc, interrupts;
8754 8754
8755 ENTER; 8755 ENTER;
8756 8756
8757 if ((rc = pci_enable_device(pdev))) { 8757 if ((rc = pci_enable_device(pdev))) {
8758 dev_err(&pdev->dev, "Cannot enable adapter\n"); 8758 dev_err(&pdev->dev, "Cannot enable adapter\n");
8759 goto out; 8759 goto out;
8760 } 8760 }
8761 8761
8762 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 8762 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8763 8763
8764 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); 8764 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8765 8765
8766 if (!host) { 8766 if (!host) {
8767 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); 8767 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8768 rc = -ENOMEM; 8768 rc = -ENOMEM;
8769 goto out_disable; 8769 goto out_disable;
8770 } 8770 }
8771 8771
8772 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 8772 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8773 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 8773 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8774 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, 8774 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8775 sata_port_info.flags, &ipr_sata_ops); 8775 sata_port_info.flags, &ipr_sata_ops);
8776 8776
8777 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); 8777 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8778 8778
8779 if (!ioa_cfg->ipr_chip) { 8779 if (!ioa_cfg->ipr_chip) {
8780 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", 8780 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8781 dev_id->vendor, dev_id->device); 8781 dev_id->vendor, dev_id->device);
8782 goto out_scsi_host_put; 8782 goto out_scsi_host_put;
8783 } 8783 }
8784 8784
8785 /* set SIS 32 or SIS 64 */ 8785 /* set SIS 32 or SIS 64 */
8786 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; 8786 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8787 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 8787 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8788 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; 8788 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
8789 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; 8789 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
8790 8790
8791 if (ipr_transop_timeout) 8791 if (ipr_transop_timeout)
8792 ioa_cfg->transop_timeout = ipr_transop_timeout; 8792 ioa_cfg->transop_timeout = ipr_transop_timeout;
8793 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) 8793 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8794 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; 8794 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8795 else 8795 else
8796 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; 8796 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8797 8797
8798 ioa_cfg->revid = pdev->revision; 8798 ioa_cfg->revid = pdev->revision;
8799 8799
8800 ipr_regs_pci = pci_resource_start(pdev, 0); 8800 ipr_regs_pci = pci_resource_start(pdev, 0);
8801 8801
8802 rc = pci_request_regions(pdev, IPR_NAME); 8802 rc = pci_request_regions(pdev, IPR_NAME);
8803 if (rc < 0) { 8803 if (rc < 0) {
8804 dev_err(&pdev->dev, 8804 dev_err(&pdev->dev,
8805 "Couldn't register memory range of registers\n"); 8805 "Couldn't register memory range of registers\n");
8806 goto out_scsi_host_put; 8806 goto out_scsi_host_put;
8807 } 8807 }
8808 8808
8809 ipr_regs = pci_ioremap_bar(pdev, 0); 8809 ipr_regs = pci_ioremap_bar(pdev, 0);
8810 8810
8811 if (!ipr_regs) { 8811 if (!ipr_regs) {
8812 dev_err(&pdev->dev, 8812 dev_err(&pdev->dev,
8813 "Couldn't map memory range of registers\n"); 8813 "Couldn't map memory range of registers\n");
8814 rc = -ENOMEM; 8814 rc = -ENOMEM;
8815 goto out_release_regions; 8815 goto out_release_regions;
8816 } 8816 }
8817 8817
8818 ioa_cfg->hdw_dma_regs = ipr_regs; 8818 ioa_cfg->hdw_dma_regs = ipr_regs;
8819 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; 8819 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8820 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; 8820 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8821 8821
8822 ipr_init_ioa_cfg(ioa_cfg, host, pdev); 8822 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8823 8823
8824 pci_set_master(pdev); 8824 pci_set_master(pdev);
8825 8825
8826 if (ioa_cfg->sis64) { 8826 if (ioa_cfg->sis64) {
8827 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 8827 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8828 if (rc < 0) { 8828 if (rc < 0) {
8829 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n"); 8829 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8830 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 8830 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8831 } 8831 }
8832 8832
8833 } else 8833 } else
8834 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 8834 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8835 8835
8836 if (rc < 0) { 8836 if (rc < 0) {
8837 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); 8837 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8838 goto cleanup_nomem; 8838 goto cleanup_nomem;
8839 } 8839 }
8840 8840
8841 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 8841 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8842 ioa_cfg->chip_cfg->cache_line_size); 8842 ioa_cfg->chip_cfg->cache_line_size);
8843 8843
8844 if (rc != PCIBIOS_SUCCESSFUL) { 8844 if (rc != PCIBIOS_SUCCESSFUL) {
8845 dev_err(&pdev->dev, "Write of cache line size failed\n"); 8845 dev_err(&pdev->dev, "Write of cache line size failed\n");
8846 rc = -EIO; 8846 rc = -EIO;
8847 goto cleanup_nomem; 8847 goto cleanup_nomem;
8848 } 8848 }
8849 8849
8850 /* Enable MSI style interrupts if they are supported. */ 8850 /* Enable MSI style interrupts if they are supported. */
8851 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) { 8851 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8852 rc = ipr_test_msi(ioa_cfg, pdev); 8852 rc = ipr_test_msi(ioa_cfg, pdev);
8853 if (rc == -EOPNOTSUPP) 8853 if (rc == -EOPNOTSUPP)
8854 pci_disable_msi(pdev); 8854 pci_disable_msi(pdev);
8855 else if (rc) 8855 else if (rc)
8856 goto out_msi_disable; 8856 goto out_msi_disable;
8857 else 8857 else
8858 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq); 8858 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8859 } else if (ipr_debug) 8859 } else if (ipr_debug)
8860 dev_info(&pdev->dev, "Cannot enable MSI.\n"); 8860 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8861 8861
8862 /* Save away PCI config space for use following IOA reset */ 8862 /* Save away PCI config space for use following IOA reset */
8863 rc = pci_save_state(pdev); 8863 rc = pci_save_state(pdev);
8864 8864
8865 if (rc != PCIBIOS_SUCCESSFUL) { 8865 if (rc != PCIBIOS_SUCCESSFUL) {
8866 dev_err(&pdev->dev, "Failed to save PCI config space\n"); 8866 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8867 rc = -EIO; 8867 rc = -EIO;
8868 goto out_msi_disable; 8868 goto out_msi_disable;
8869 } 8869 }
8870 8870
8871 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) 8871 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8872 goto out_msi_disable; 8872 goto out_msi_disable;
8873 8873
8874 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 8874 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8875 goto out_msi_disable; 8875 goto out_msi_disable;
8876 8876
8877 if (ioa_cfg->sis64) 8877 if (ioa_cfg->sis64)
8878 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) 8878 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8879 + ((sizeof(struct ipr_config_table_entry64) 8879 + ((sizeof(struct ipr_config_table_entry64)
8880 * ioa_cfg->max_devs_supported))); 8880 * ioa_cfg->max_devs_supported)));
8881 else 8881 else
8882 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) 8882 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8883 + ((sizeof(struct ipr_config_table_entry) 8883 + ((sizeof(struct ipr_config_table_entry)
8884 * ioa_cfg->max_devs_supported))); 8884 * ioa_cfg->max_devs_supported)));
8885 8885
8886 rc = ipr_alloc_mem(ioa_cfg); 8886 rc = ipr_alloc_mem(ioa_cfg);
8887 if (rc < 0) { 8887 if (rc < 0) {
8888 dev_err(&pdev->dev, 8888 dev_err(&pdev->dev,
8889 "Couldn't allocate enough memory for device driver!\n"); 8889 "Couldn't allocate enough memory for device driver!\n");
8890 goto out_msi_disable; 8890 goto out_msi_disable;
8891 } 8891 }
8892 8892
8893 /* 8893 /*
8894 * If HRRQ updated interrupt is not masked, or reset alert is set, 8894 * If HRRQ updated interrupt is not masked, or reset alert is set,
8895 * the card is in an unknown state and needs a hard reset 8895 * the card is in an unknown state and needs a hard reset
8896 */ 8896 */
8897 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 8897 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8898 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); 8898 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8899 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 8899 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8900 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 8900 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8901 ioa_cfg->needs_hard_reset = 1; 8901 ioa_cfg->needs_hard_reset = 1;
8902 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices) 8902 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
8903 ioa_cfg->needs_hard_reset = 1; 8903 ioa_cfg->needs_hard_reset = 1;
8904 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) 8904 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8905 ioa_cfg->ioa_unit_checked = 1; 8905 ioa_cfg->ioa_unit_checked = 1;
8906 8906
8907 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8907 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8908 rc = request_irq(pdev->irq, ipr_isr, 8908 rc = request_irq(pdev->irq, ipr_isr,
8909 ioa_cfg->msi_received ? 0 : IRQF_SHARED, 8909 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8910 IPR_NAME, ioa_cfg); 8910 IPR_NAME, ioa_cfg);
8911 8911
8912 if (rc) { 8912 if (rc) {
8913 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", 8913 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8914 pdev->irq, rc); 8914 pdev->irq, rc);
8915 goto cleanup_nolog; 8915 goto cleanup_nolog;
8916 } 8916 }
8917 8917
8918 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || 8918 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8919 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { 8919 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8920 ioa_cfg->needs_warm_reset = 1; 8920 ioa_cfg->needs_warm_reset = 1;
8921 ioa_cfg->reset = ipr_reset_slot_reset; 8921 ioa_cfg->reset = ipr_reset_slot_reset;
8922 } else 8922 } else
8923 ioa_cfg->reset = ipr_reset_start_bist; 8923 ioa_cfg->reset = ipr_reset_start_bist;
8924 8924
8925 spin_lock(&ipr_driver_lock); 8925 spin_lock(&ipr_driver_lock);
8926 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); 8926 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8927 spin_unlock(&ipr_driver_lock); 8927 spin_unlock(&ipr_driver_lock);
8928 8928
8929 LEAVE; 8929 LEAVE;
8930 out: 8930 out:
8931 return rc; 8931 return rc;
8932 8932
8933 cleanup_nolog: 8933 cleanup_nolog:
8934 ipr_free_mem(ioa_cfg); 8934 ipr_free_mem(ioa_cfg);
8935 out_msi_disable: 8935 out_msi_disable:
8936 pci_disable_msi(pdev); 8936 pci_disable_msi(pdev);
8937 cleanup_nomem: 8937 cleanup_nomem:
8938 iounmap(ipr_regs); 8938 iounmap(ipr_regs);
8939 out_release_regions: 8939 out_release_regions:
8940 pci_release_regions(pdev); 8940 pci_release_regions(pdev);
8941 out_scsi_host_put: 8941 out_scsi_host_put:
8942 scsi_host_put(host); 8942 scsi_host_put(host);
8943 out_disable: 8943 out_disable:
8944 pci_disable_device(pdev); 8944 pci_disable_device(pdev);
8945 goto out; 8945 goto out;
8946 } 8946 }
8947 8947
8948 /** 8948 /**
8949 * ipr_scan_vsets - Scans for VSET devices 8949 * ipr_scan_vsets - Scans for VSET devices
8950 * @ioa_cfg: ioa config struct 8950 * @ioa_cfg: ioa config struct
8951 * 8951 *
8952 * Description: Since the VSET resources do not follow SAM in that we can have 8952 * Description: Since the VSET resources do not follow SAM in that we can have
8953 * sparse LUNs with no LUN 0, we have to scan for these ourselves. 8953 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8954 * 8954 *
8955 * Return value: 8955 * Return value:
8956 * none 8956 * none
8957 **/ 8957 **/
8958 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg) 8958 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8959 { 8959 {
8960 int target, lun; 8960 int target, lun;
8961 8961
8962 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++) 8962 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8963 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ ) 8963 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8964 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun); 8964 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8965 } 8965 }
8966 8966
8967 /** 8967 /**
8968 * ipr_initiate_ioa_bringdown - Bring down an adapter 8968 * ipr_initiate_ioa_bringdown - Bring down an adapter
8969 * @ioa_cfg: ioa config struct 8969 * @ioa_cfg: ioa config struct
8970 * @shutdown_type: shutdown type 8970 * @shutdown_type: shutdown type
8971 * 8971 *
8972 * Description: This function will initiate bringing down the adapter. 8972 * Description: This function will initiate bringing down the adapter.
8973 * This consists of issuing an IOA shutdown to the adapter 8973 * This consists of issuing an IOA shutdown to the adapter
8974 * to flush the cache, and running BIST. 8974 * to flush the cache, and running BIST.
8975 * If the caller needs to wait on the completion of the reset, 8975 * If the caller needs to wait on the completion of the reset,
8976 * the caller must sleep on the reset_wait_q. 8976 * the caller must sleep on the reset_wait_q.
8977 * 8977 *
8978 * Return value: 8978 * Return value:
8979 * none 8979 * none
8980 **/ 8980 **/
8981 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, 8981 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8982 enum ipr_shutdown_type shutdown_type) 8982 enum ipr_shutdown_type shutdown_type)
8983 { 8983 {
8984 ENTER; 8984 ENTER;
8985 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 8985 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8986 ioa_cfg->sdt_state = ABORT_DUMP; 8986 ioa_cfg->sdt_state = ABORT_DUMP;
8987 ioa_cfg->reset_retries = 0; 8987 ioa_cfg->reset_retries = 0;
8988 ioa_cfg->in_ioa_bringdown = 1; 8988 ioa_cfg->in_ioa_bringdown = 1;
8989 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); 8989 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8990 LEAVE; 8990 LEAVE;
8991 } 8991 }
8992 8992
8993 /** 8993 /**
8994 * __ipr_remove - Remove a single adapter 8994 * __ipr_remove - Remove a single adapter
8995 * @pdev: pci device struct 8995 * @pdev: pci device struct
8996 * 8996 *
8997 * Adapter hot plug remove entry point. 8997 * Adapter hot plug remove entry point.
8998 * 8998 *
8999 * Return value: 8999 * Return value:
9000 * none 9000 * none
9001 **/ 9001 **/
9002 static void __ipr_remove(struct pci_dev *pdev) 9002 static void __ipr_remove(struct pci_dev *pdev)
9003 { 9003 {
9004 unsigned long host_lock_flags = 0; 9004 unsigned long host_lock_flags = 0;
9005 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9005 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9006 ENTER; 9006 ENTER;
9007 9007
9008 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9008 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9009 while(ioa_cfg->in_reset_reload) { 9009 while(ioa_cfg->in_reset_reload) {
9010 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9010 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9011 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 9011 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9012 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9012 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9013 } 9013 }
9014 9014
9015 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); 9015 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9016 9016
9017 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9017 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9018 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 9018 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9019 flush_work_sync(&ioa_cfg->work_q); 9019 flush_work_sync(&ioa_cfg->work_q);
9020 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9020 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9021 9021
9022 spin_lock(&ipr_driver_lock); 9022 spin_lock(&ipr_driver_lock);
9023 list_del(&ioa_cfg->queue); 9023 list_del(&ioa_cfg->queue);
9024 spin_unlock(&ipr_driver_lock); 9024 spin_unlock(&ipr_driver_lock);
9025 9025
9026 if (ioa_cfg->sdt_state == ABORT_DUMP) 9026 if (ioa_cfg->sdt_state == ABORT_DUMP)
9027 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 9027 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9029 9029
9030 ipr_free_all_resources(ioa_cfg); 9030 ipr_free_all_resources(ioa_cfg);
9031 9031
9032 LEAVE; 9032 LEAVE;
9033 } 9033 }
9034 9034
9035 /** 9035 /**
9036 * ipr_remove - IOA hot plug remove entry point 9036 * ipr_remove - IOA hot plug remove entry point
9037 * @pdev: pci device struct 9037 * @pdev: pci device struct
9038 * 9038 *
9039 * Adapter hot plug remove entry point. 9039 * Adapter hot plug remove entry point.
9040 * 9040 *
9041 * Return value: 9041 * Return value:
9042 * none 9042 * none
9043 **/ 9043 **/
9044 static void __devexit ipr_remove(struct pci_dev *pdev) 9044 static void __devexit ipr_remove(struct pci_dev *pdev)
9045 { 9045 {
9046 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9046 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9047 9047
9048 ENTER; 9048 ENTER;
9049 9049
9050 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 9050 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9051 &ipr_trace_attr); 9051 &ipr_trace_attr);
9052 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, 9052 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9053 &ipr_dump_attr); 9053 &ipr_dump_attr);
9054 scsi_remove_host(ioa_cfg->host); 9054 scsi_remove_host(ioa_cfg->host);
9055 9055
9056 __ipr_remove(pdev); 9056 __ipr_remove(pdev);
9057 9057
9058 LEAVE; 9058 LEAVE;
9059 } 9059 }
9060 9060
9061 /** 9061 /**
9062 * ipr_probe - Adapter hot plug add entry point 9062 * ipr_probe - Adapter hot plug add entry point
9063 * 9063 *
9064 * Return value: 9064 * Return value:
9065 * 0 on success / non-zero on failure 9065 * 0 on success / non-zero on failure
9066 **/ 9066 **/
9067 static int __devinit ipr_probe(struct pci_dev *pdev, 9067 static int __devinit ipr_probe(struct pci_dev *pdev,
9068 const struct pci_device_id *dev_id) 9068 const struct pci_device_id *dev_id)
9069 { 9069 {
9070 struct ipr_ioa_cfg *ioa_cfg; 9070 struct ipr_ioa_cfg *ioa_cfg;
9071 int rc; 9071 int rc;
9072 9072
9073 rc = ipr_probe_ioa(pdev, dev_id); 9073 rc = ipr_probe_ioa(pdev, dev_id);
9074 9074
9075 if (rc) 9075 if (rc)
9076 return rc; 9076 return rc;
9077 9077
9078 ioa_cfg = pci_get_drvdata(pdev); 9078 ioa_cfg = pci_get_drvdata(pdev);
9079 rc = ipr_probe_ioa_part2(ioa_cfg); 9079 rc = ipr_probe_ioa_part2(ioa_cfg);
9080 9080
9081 if (rc) { 9081 if (rc) {
9082 __ipr_remove(pdev); 9082 __ipr_remove(pdev);
9083 return rc; 9083 return rc;
9084 } 9084 }
9085 9085
9086 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); 9086 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9087 9087
9088 if (rc) { 9088 if (rc) {
9089 __ipr_remove(pdev); 9089 __ipr_remove(pdev);
9090 return rc; 9090 return rc;
9091 } 9091 }
9092 9092
9093 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, 9093 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9094 &ipr_trace_attr); 9094 &ipr_trace_attr);
9095 9095
9096 if (rc) { 9096 if (rc) {
9097 scsi_remove_host(ioa_cfg->host); 9097 scsi_remove_host(ioa_cfg->host);
9098 __ipr_remove(pdev); 9098 __ipr_remove(pdev);
9099 return rc; 9099 return rc;
9100 } 9100 }
9101 9101
9102 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, 9102 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9103 &ipr_dump_attr); 9103 &ipr_dump_attr);
9104 9104
9105 if (rc) { 9105 if (rc) {
9106 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 9106 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9107 &ipr_trace_attr); 9107 &ipr_trace_attr);
9108 scsi_remove_host(ioa_cfg->host); 9108 scsi_remove_host(ioa_cfg->host);
9109 __ipr_remove(pdev); 9109 __ipr_remove(pdev);
9110 return rc; 9110 return rc;
9111 } 9111 }
9112 9112
9113 scsi_scan_host(ioa_cfg->host); 9113 scsi_scan_host(ioa_cfg->host);
9114 ipr_scan_vsets(ioa_cfg); 9114 ipr_scan_vsets(ioa_cfg);
9115 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN); 9115 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9116 ioa_cfg->allow_ml_add_del = 1; 9116 ioa_cfg->allow_ml_add_del = 1;
9117 ioa_cfg->host->max_channel = IPR_VSET_BUS; 9117 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9118 schedule_work(&ioa_cfg->work_q); 9118 schedule_work(&ioa_cfg->work_q);
9119 return 0; 9119 return 0;
9120 } 9120 }
9121 9121
9122 /** 9122 /**
9123 * ipr_shutdown - Shutdown handler. 9123 * ipr_shutdown - Shutdown handler.
9124 * @pdev: pci device struct 9124 * @pdev: pci device struct
9125 * 9125 *
9126 * This function is invoked upon system shutdown/reboot. It will issue 9126 * This function is invoked upon system shutdown/reboot. It will issue
9127 * an adapter shutdown to the adapter to flush the write cache. 9127 * an adapter shutdown to the adapter to flush the write cache.
9128 * 9128 *
9129 * Return value: 9129 * Return value:
9130 * none 9130 * none
9131 **/ 9131 **/
9132 static void ipr_shutdown(struct pci_dev *pdev) 9132 static void ipr_shutdown(struct pci_dev *pdev)
9133 { 9133 {
9134 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9134 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9135 unsigned long lock_flags = 0; 9135 unsigned long lock_flags = 0;
9136 9136
9137 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9137 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9138 while(ioa_cfg->in_reset_reload) { 9138 while(ioa_cfg->in_reset_reload) {
9139 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9139 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9140 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 9140 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9141 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9141 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9142 } 9142 }
9143 9143
9144 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); 9144 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9145 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9145 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9146 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 9146 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9147 } 9147 }
9148 9148
9149 static struct pci_device_id ipr_pci_table[] __devinitdata = { 9149 static struct pci_device_id ipr_pci_table[] __devinitdata = {
9150 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 9150 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9151 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 }, 9151 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9152 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 9152 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9153 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 }, 9153 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9154 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 9154 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9155 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 }, 9155 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9156 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 9156 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9157 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 }, 9157 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9158 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 9158 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9159 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 }, 9159 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9160 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 9160 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9161 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 }, 9161 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9162 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 9162 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9163 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, 9163 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9164 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 9164 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9165 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 9165 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9166 IPR_USE_LONG_TRANSOP_TIMEOUT }, 9166 IPR_USE_LONG_TRANSOP_TIMEOUT },
9167 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 9167 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9168 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 9168 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9169 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 9169 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9170 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 9170 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9171 IPR_USE_LONG_TRANSOP_TIMEOUT }, 9171 IPR_USE_LONG_TRANSOP_TIMEOUT },
9172 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 9172 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9173 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 9173 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9174 IPR_USE_LONG_TRANSOP_TIMEOUT }, 9174 IPR_USE_LONG_TRANSOP_TIMEOUT },
9175 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 9175 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9176 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 9176 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9177 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 9177 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9178 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 9178 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9179 IPR_USE_LONG_TRANSOP_TIMEOUT}, 9179 IPR_USE_LONG_TRANSOP_TIMEOUT},
9180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 9180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9181 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 9181 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9182 IPR_USE_LONG_TRANSOP_TIMEOUT }, 9182 IPR_USE_LONG_TRANSOP_TIMEOUT },
9183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 9183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9184 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 9184 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9185 IPR_USE_LONG_TRANSOP_TIMEOUT }, 9185 IPR_USE_LONG_TRANSOP_TIMEOUT },
9186 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 9186 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9187 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 9187 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 9188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9189 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 }, 9189 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 9190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9191 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 9191 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9192 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET }, 9192 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 9193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9194 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, 9194 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 9195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9196 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, 9196 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9197 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 9197 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9198 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 9198 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9199 IPR_USE_LONG_TRANSOP_TIMEOUT }, 9199 IPR_USE_LONG_TRANSOP_TIMEOUT },
9200 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 9200 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9201 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 9201 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9202 IPR_USE_LONG_TRANSOP_TIMEOUT }, 9202 IPR_USE_LONG_TRANSOP_TIMEOUT },
9203 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9203 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9204 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 }, 9204 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9205 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9205 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9206 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, 9206 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9207 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9207 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9208 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, 9208 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9209 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9209 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9210 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 }, 9210 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9211 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9211 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9212 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, 9212 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9213 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 9213 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9214 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, 9214 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9215 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 9215 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9216 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 }, 9216 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9217 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 9217 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9218 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 }, 9218 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9219 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 9219 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9220 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 }, 9220 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9221 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 9221 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9222 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, 9222 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9223 { } 9223 { }
9224 }; 9224 };
9225 MODULE_DEVICE_TABLE(pci, ipr_pci_table); 9225 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9226 9226
9227 static struct pci_error_handlers ipr_err_handler = { 9227 static struct pci_error_handlers ipr_err_handler = {
9228 .error_detected = ipr_pci_error_detected, 9228 .error_detected = ipr_pci_error_detected,
9229 .slot_reset = ipr_pci_slot_reset, 9229 .slot_reset = ipr_pci_slot_reset,
9230 }; 9230 };
9231 9231
9232 static struct pci_driver ipr_driver = { 9232 static struct pci_driver ipr_driver = {
9233 .name = IPR_NAME, 9233 .name = IPR_NAME,
9234 .id_table = ipr_pci_table, 9234 .id_table = ipr_pci_table,
9235 .probe = ipr_probe, 9235 .probe = ipr_probe,
9236 .remove = __devexit_p(ipr_remove), 9236 .remove = __devexit_p(ipr_remove),
9237 .shutdown = ipr_shutdown, 9237 .shutdown = ipr_shutdown,
9238 .err_handler = &ipr_err_handler, 9238 .err_handler = &ipr_err_handler,
9239 }; 9239 };
9240 9240
9241 /** 9241 /**
9242 * ipr_halt_done - Shutdown prepare completion 9242 * ipr_halt_done - Shutdown prepare completion
9243 * 9243 *
9244 * Return value: 9244 * Return value:
9245 * none 9245 * none
9246 **/ 9246 **/
9247 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) 9247 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9248 { 9248 {
9249 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9249 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9250 9250
9251 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 9251 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9252 } 9252 }
9253 9253
9254 /** 9254 /**
9255 * ipr_halt - Issue shutdown prepare to all adapters 9255 * ipr_halt - Issue shutdown prepare to all adapters
9256 * 9256 *
9257 * Return value: 9257 * Return value:
9258 * NOTIFY_OK on success / NOTIFY_DONE on failure 9258 * NOTIFY_OK on success / NOTIFY_DONE on failure
9259 **/ 9259 **/
9260 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) 9260 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9261 { 9261 {
9262 struct ipr_cmnd *ipr_cmd; 9262 struct ipr_cmnd *ipr_cmd;
9263 struct ipr_ioa_cfg *ioa_cfg; 9263 struct ipr_ioa_cfg *ioa_cfg;
9264 unsigned long flags = 0; 9264 unsigned long flags = 0;
9265 9265
9266 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) 9266 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9267 return NOTIFY_DONE; 9267 return NOTIFY_DONE;
9268 9268
9269 spin_lock(&ipr_driver_lock); 9269 spin_lock(&ipr_driver_lock);
9270 9270
9271 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { 9271 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9272 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9272 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9273 if (!ioa_cfg->allow_cmds) { 9273 if (!ioa_cfg->allow_cmds) {
9274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9275 continue; 9275 continue;
9276 } 9276 }
9277 9277
9278 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 9278 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9279 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 9279 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9280 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 9280 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9281 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 9281 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9282 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; 9282 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9283 9283
9284 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 9284 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9286 } 9286 }
9287 spin_unlock(&ipr_driver_lock); 9287 spin_unlock(&ipr_driver_lock);
9288 9288
9289 return NOTIFY_OK; 9289 return NOTIFY_OK;
9290 } 9290 }
9291 9291
9292 static struct notifier_block ipr_notifier = { 9292 static struct notifier_block ipr_notifier = {
9293 ipr_halt, NULL, 0 9293 ipr_halt, NULL, 0
9294 }; 9294 };
9295 9295
9296 /** 9296 /**
9297 * ipr_init - Module entry point 9297 * ipr_init - Module entry point
9298 * 9298 *
9299 * Return value: 9299 * Return value:
9300 * 0 on success / negative value on failure 9300 * 0 on success / negative value on failure
9301 **/ 9301 **/
9302 static int __init ipr_init(void) 9302 static int __init ipr_init(void)
9303 { 9303 {
9304 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", 9304 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9305 IPR_DRIVER_VERSION, IPR_DRIVER_DATE); 9305 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9306 9306
9307 register_reboot_notifier(&ipr_notifier); 9307 register_reboot_notifier(&ipr_notifier);
9308 return pci_register_driver(&ipr_driver); 9308 return pci_register_driver(&ipr_driver);
9309 } 9309 }
9310 9310
9311 /** 9311 /**
9312 * ipr_exit - Module unload 9312 * ipr_exit - Module unload
9313 * 9313 *
9314 * Module unload entry point. 9314 * Module unload entry point.
9315 * 9315 *
9316 * Return value: 9316 * Return value:
9317 * none 9317 * none
9318 **/ 9318 **/
9319 static void __exit ipr_exit(void) 9319 static void __exit ipr_exit(void)
9320 { 9320 {
9321 unregister_reboot_notifier(&ipr_notifier); 9321 unregister_reboot_notifier(&ipr_notifier);
9322 pci_unregister_driver(&ipr_driver); 9322 pci_unregister_driver(&ipr_driver);
9323 } 9323 }
9324 9324
9325 module_init(ipr_init); 9325 module_init(ipr_init);
9326 module_exit(ipr_exit); 9326 module_exit(ipr_exit);
9327 9327