Commit 8e65e2f07c1fdbd952570591bf78316aeed1c74a

Authored by Bradley Grove
Committed by James Bottomley
1 parent 9588d24e36

[SCSI] esas2r: Fixes for big-endian platforms

In esas2r_format_init_msg(), sgl_page_size and epoch_time params
are converted to little endian and the firmware version read from
the hba is converted to cpu endianess.

In esas2r_rq_init_request, correct and simplify the construction
of the SCSI handle.

These fixes are the result of testing on a PPC64 machine.

Signed-off-by: Bradley Grove <bgrove@attotech.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

Showing 2 changed files with 9 additions and 9 deletions Inline Diff

drivers/scsi/esas2r/esas2r.h
1 /* 1 /*
2 * linux/drivers/scsi/esas2r/esas2r.h 2 * linux/drivers/scsi/esas2r/esas2r.h
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers 3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 * 4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc. 5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com) 6 * (mailto:linuxdrivers@attotech.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2 10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version. 11 * of the License, or (at your option) any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * NO WARRANTY 18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and 23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its 24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to 25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data, 26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations. 27 * programs or equipment, and unavailability or interruption of operations.
28 * 28 *
29 * DISCLAIMER OF LIABILITY 29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 * 37 *
38 * You should have received a copy of the GNU General Public License 38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software 39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA. 41 * USA.
42 */ 42 */
43 43
44 #include <linux/kernel.h> 44 #include <linux/kernel.h>
45 #include <linux/delay.h> 45 #include <linux/delay.h>
46 #include <linux/pci.h> 46 #include <linux/pci.h>
47 #include <linux/proc_fs.h> 47 #include <linux/proc_fs.h>
48 #include <linux/workqueue.h> 48 #include <linux/workqueue.h>
49 #include <linux/interrupt.h> 49 #include <linux/interrupt.h>
50 #include <linux/module.h> 50 #include <linux/module.h>
51 #include <linux/vmalloc.h> 51 #include <linux/vmalloc.h>
52 #include <scsi/scsi.h> 52 #include <scsi/scsi.h>
53 #include <scsi/scsi_host.h> 53 #include <scsi/scsi_host.h>
54 #include <scsi/scsi_cmnd.h> 54 #include <scsi/scsi_cmnd.h>
55 #include <scsi/scsi_device.h> 55 #include <scsi/scsi_device.h>
56 #include <scsi/scsi_eh.h> 56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h> 57 #include <scsi/scsi_tcq.h>
58 58
59 #include "esas2r_log.h" 59 #include "esas2r_log.h"
60 #include "atioctl.h" 60 #include "atioctl.h"
61 #include "atvda.h" 61 #include "atvda.h"
62 62
63 #ifndef ESAS2R_H 63 #ifndef ESAS2R_H
64 #define ESAS2R_H 64 #define ESAS2R_H
65 65
66 /* Global Variables */ 66 /* Global Variables */
67 extern struct esas2r_adapter *esas2r_adapters[]; 67 extern struct esas2r_adapter *esas2r_adapters[];
68 extern u8 *esas2r_buffered_ioctl; 68 extern u8 *esas2r_buffered_ioctl;
69 extern dma_addr_t esas2r_buffered_ioctl_addr; 69 extern dma_addr_t esas2r_buffered_ioctl_addr;
70 extern u32 esas2r_buffered_ioctl_size; 70 extern u32 esas2r_buffered_ioctl_size;
71 extern struct pci_dev *esas2r_buffered_ioctl_pcid; 71 extern struct pci_dev *esas2r_buffered_ioctl_pcid;
72 #define SGL_PG_SZ_MIN 64 72 #define SGL_PG_SZ_MIN 64
73 #define SGL_PG_SZ_MAX 1024 73 #define SGL_PG_SZ_MAX 1024
74 extern int sgl_page_size; 74 extern int sgl_page_size;
75 #define NUM_SGL_MIN 8 75 #define NUM_SGL_MIN 8
76 #define NUM_SGL_MAX 2048 76 #define NUM_SGL_MAX 2048
77 extern int num_sg_lists; 77 extern int num_sg_lists;
78 #define NUM_REQ_MIN 4 78 #define NUM_REQ_MIN 4
79 #define NUM_REQ_MAX 256 79 #define NUM_REQ_MAX 256
80 extern int num_requests; 80 extern int num_requests;
81 #define NUM_AE_MIN 2 81 #define NUM_AE_MIN 2
82 #define NUM_AE_MAX 8 82 #define NUM_AE_MAX 8
83 extern int num_ae_requests; 83 extern int num_ae_requests;
84 extern int cmd_per_lun; 84 extern int cmd_per_lun;
85 extern int can_queue; 85 extern int can_queue;
86 extern int esas2r_max_sectors; 86 extern int esas2r_max_sectors;
87 extern int sg_tablesize; 87 extern int sg_tablesize;
88 extern int interrupt_mode; 88 extern int interrupt_mode;
89 extern int num_io_requests; 89 extern int num_io_requests;
90 90
91 /* Macro defintions */ 91 /* Macro defintions */
92 #define ESAS2R_MAX_ID 255 92 #define ESAS2R_MAX_ID 255
93 #define MAX_ADAPTERS 32 93 #define MAX_ADAPTERS 32
94 #define ESAS2R_DRVR_NAME "esas2r" 94 #define ESAS2R_DRVR_NAME "esas2r"
95 #define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter" 95 #define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter"
96 #define ESAS2R_MAX_DEVICES 32 96 #define ESAS2R_MAX_DEVICES 32
97 #define ATTONODE_NAME "ATTONode" 97 #define ATTONODE_NAME "ATTONode"
98 #define ESAS2R_MAJOR_REV 1 98 #define ESAS2R_MAJOR_REV 1
99 #define ESAS2R_MINOR_REV 00 99 #define ESAS2R_MINOR_REV 00
100 #define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \ 100 #define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \
101 DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV) 101 DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV)
102 #define ESAS2R_COPYRIGHT_YEARS "2001-2013" 102 #define ESAS2R_COPYRIGHT_YEARS "2001-2013"
103 #define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384 103 #define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384
104 #define ESAS2R_DEFAULT_CMD_PER_LUN 64 104 #define ESAS2R_DEFAULT_CMD_PER_LUN 64
105 #define ESAS2R_DEFAULT_NUM_SG_LISTS 1024 105 #define ESAS2R_DEFAULT_NUM_SG_LISTS 1024
106 #define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num) 106 #define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num)
107 #define NUM_TO_STR(num) #num 107 #define NUM_TO_STR(num) #num
108 108
109 #define ESAS2R_SGL_ALIGN 16 109 #define ESAS2R_SGL_ALIGN 16
110 #define ESAS2R_LIST_ALIGN 16 110 #define ESAS2R_LIST_ALIGN 16
111 #define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA 111 #define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA
112 #define ESAS2R_DATA_BUF_LEN 256 112 #define ESAS2R_DATA_BUF_LEN 256
113 #define ESAS2R_DEFAULT_TMO 5000 113 #define ESAS2R_DEFAULT_TMO 5000
114 #define ESAS2R_DISC_BUF_LEN 512 114 #define ESAS2R_DISC_BUF_LEN 512
115 #define ESAS2R_FWCOREDUMP_SZ 0x80000 115 #define ESAS2R_FWCOREDUMP_SZ 0x80000
116 #define ESAS2R_NUM_PHYS 8 116 #define ESAS2R_NUM_PHYS 8
117 #define ESAS2R_TARG_ID_INV 0xFFFF 117 #define ESAS2R_TARG_ID_INV 0xFFFF
118 #define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK 118 #define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK
119 #define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK 119 #define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK
120 #define ESAS2R_INT_DIS_MASK 0 120 #define ESAS2R_INT_DIS_MASK 0
121 #define ESAS2R_MAX_TARGETS 256 121 #define ESAS2R_MAX_TARGETS 256
122 #define ESAS2R_KOBJ_NAME_LEN 20 122 #define ESAS2R_KOBJ_NAME_LEN 20
123 123
124 /* u16 (WORD) component macros */ 124 /* u16 (WORD) component macros */
125 #define LOBYTE(w) ((u8)(u16)(w)) 125 #define LOBYTE(w) ((u8)(u16)(w))
126 #define HIBYTE(w) ((u8)(((u16)(w)) >> 8)) 126 #define HIBYTE(w) ((u8)(((u16)(w)) >> 8))
127 #define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8))) 127 #define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8)))
128 128
129 /* u32 (DWORD) component macros */ 129 /* u32 (DWORD) component macros */
130 #define LOWORD(d) ((u16)(u32)(d)) 130 #define LOWORD(d) ((u16)(u32)(d))
131 #define HIWORD(d) ((u16)(((u32)(d)) >> 16)) 131 #define HIWORD(d) ((u16)(((u32)(d)) >> 16))
132 #define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16))) 132 #define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16)))
133 133
134 /* macro to get the lowest nonzero bit of a value */ 134 /* macro to get the lowest nonzero bit of a value */
135 #define LOBIT(x) ((x) & (0 - (x))) 135 #define LOBIT(x) ((x) & (0 - (x)))
136 136
137 /* These functions are provided to access the chip's control registers. 137 /* These functions are provided to access the chip's control registers.
138 * The register is specified by its byte offset from the register base 138 * The register is specified by its byte offset from the register base
139 * for the adapter. 139 * for the adapter.
140 */ 140 */
141 #define esas2r_read_register_dword(a, reg) \ 141 #define esas2r_read_register_dword(a, reg) \
142 readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG) 142 readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG)
143 143
144 #define esas2r_write_register_dword(a, reg, data) \ 144 #define esas2r_write_register_dword(a, reg, data) \
145 writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG)) 145 writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG))
146 146
147 #define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r) 147 #define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r)
148 148
149 /* This function is provided to access the chip's data window. The 149 /* This function is provided to access the chip's data window. The
150 * register is specified by its byte offset from the window base 150 * register is specified by its byte offset from the window base
151 * for the adapter. 151 * for the adapter.
152 */ 152 */
153 #define esas2r_read_data_byte(a, reg) \ 153 #define esas2r_read_data_byte(a, reg) \
154 readb((void __iomem *)a->data_window + (reg)) 154 readb((void __iomem *)a->data_window + (reg))
155 155
156 /* ATTO vendor and device Ids */ 156 /* ATTO vendor and device Ids */
157 #define ATTO_VENDOR_ID 0x117C 157 #define ATTO_VENDOR_ID 0x117C
158 #define ATTO_DID_INTEL_IOP348 0x002C 158 #define ATTO_DID_INTEL_IOP348 0x002C
159 #define ATTO_DID_MV_88RC9580 0x0049 159 #define ATTO_DID_MV_88RC9580 0x0049
160 #define ATTO_DID_MV_88RC9580TS 0x0066 160 #define ATTO_DID_MV_88RC9580TS 0x0066
161 #define ATTO_DID_MV_88RC9580TSE 0x0067 161 #define ATTO_DID_MV_88RC9580TSE 0x0067
162 #define ATTO_DID_MV_88RC9580TL 0x0068 162 #define ATTO_DID_MV_88RC9580TL 0x0068
163 163
164 /* ATTO subsystem device Ids */ 164 /* ATTO subsystem device Ids */
165 #define ATTO_SSDID_TBT 0x4000 165 #define ATTO_SSDID_TBT 0x4000
166 #define ATTO_TSSC_3808 0x4066 166 #define ATTO_TSSC_3808 0x4066
167 #define ATTO_TSSC_3808E 0x4067 167 #define ATTO_TSSC_3808E 0x4067
168 #define ATTO_TLSH_1068 0x4068 168 #define ATTO_TLSH_1068 0x4068
169 #define ATTO_ESAS_R680 0x0049 169 #define ATTO_ESAS_R680 0x0049
170 #define ATTO_ESAS_R608 0x004A 170 #define ATTO_ESAS_R608 0x004A
171 #define ATTO_ESAS_R60F 0x004B 171 #define ATTO_ESAS_R60F 0x004B
172 #define ATTO_ESAS_R6F0 0x004C 172 #define ATTO_ESAS_R6F0 0x004C
173 #define ATTO_ESAS_R644 0x004D 173 #define ATTO_ESAS_R644 0x004D
174 #define ATTO_ESAS_R648 0x004E 174 #define ATTO_ESAS_R648 0x004E
175 175
176 /* 176 /*
177 * flash definitions & structures 177 * flash definitions & structures
178 * define the code types 178 * define the code types
179 */ 179 */
180 #define FBT_CPYR 0xAA00 180 #define FBT_CPYR 0xAA00
181 #define FBT_SETUP 0xAA02 181 #define FBT_SETUP 0xAA02
182 #define FBT_FLASH_VER 0xAA04 182 #define FBT_FLASH_VER 0xAA04
183 183
184 /* offsets to various locations in flash */ 184 /* offsets to various locations in flash */
185 #define FLS_OFFSET_BOOT (u32)(0x00700000) 185 #define FLS_OFFSET_BOOT (u32)(0x00700000)
186 #define FLS_OFFSET_NVR (u32)(0x007C0000) 186 #define FLS_OFFSET_NVR (u32)(0x007C0000)
187 #define FLS_OFFSET_CPYR FLS_OFFSET_NVR 187 #define FLS_OFFSET_CPYR FLS_OFFSET_NVR
188 #define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT) 188 #define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT)
189 #define FLS_BLOCK_SIZE (u32)(0x00020000) 189 #define FLS_BLOCK_SIZE (u32)(0x00020000)
190 #define FI_NVR_2KB 0x0800 190 #define FI_NVR_2KB 0x0800
191 #define FI_NVR_8KB 0x2000 191 #define FI_NVR_8KB 0x2000
192 #define FM_BUF_SZ 0x800 192 #define FM_BUF_SZ 0x800
193 193
194 /* 194 /*
195 * marvell frey (88R9580) register definitions 195 * marvell frey (88R9580) register definitions
196 * chip revision identifiers 196 * chip revision identifiers
197 */ 197 */
198 #define MVR_FREY_B2 0xB2 198 #define MVR_FREY_B2 0xB2
199 199
200 /* 200 /*
201 * memory window definitions. window 0 is the data window with definitions 201 * memory window definitions. window 0 is the data window with definitions
202 * of MW_DATA_XXX. window 1 is the register window with definitions of 202 * of MW_DATA_XXX. window 1 is the register window with definitions of
203 * MW_REG_XXX. 203 * MW_REG_XXX.
204 */ 204 */
205 #define MW_REG_WINDOW_SIZE (u32)(0x00040000) 205 #define MW_REG_WINDOW_SIZE (u32)(0x00040000)
206 #define MW_REG_OFFSET_HWREG (u32)(0x00000000) 206 #define MW_REG_OFFSET_HWREG (u32)(0x00000000)
207 #define MW_REG_OFFSET_PCI (u32)(0x00008000) 207 #define MW_REG_OFFSET_PCI (u32)(0x00008000)
208 #define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG) 208 #define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG)
209 #define MW_DATA_WINDOW_SIZE (u32)(0x00020000) 209 #define MW_DATA_WINDOW_SIZE (u32)(0x00020000)
210 #define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000) 210 #define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000)
211 #define MW_DATA_ADDR_SRAM (u32)(0xF4000000) 211 #define MW_DATA_ADDR_SRAM (u32)(0xF4000000)
212 #define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000) 212 #define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000)
213 213
214 /* 214 /*
215 * the following registers are for the communication 215 * the following registers are for the communication
216 * list interface (AKA message unit (MU)) 216 * list interface (AKA message unit (MU))
217 */ 217 */
218 #define MU_IN_LIST_ADDR_LO (u32)(0x00004000) 218 #define MU_IN_LIST_ADDR_LO (u32)(0x00004000)
219 #define MU_IN_LIST_ADDR_HI (u32)(0x00004004) 219 #define MU_IN_LIST_ADDR_HI (u32)(0x00004004)
220 220
221 #define MU_IN_LIST_WRITE (u32)(0x00004018) 221 #define MU_IN_LIST_WRITE (u32)(0x00004018)
222 #define MU_ILW_TOGGLE (u32)(0x00004000) 222 #define MU_ILW_TOGGLE (u32)(0x00004000)
223 223
224 #define MU_IN_LIST_READ (u32)(0x0000401C) 224 #define MU_IN_LIST_READ (u32)(0x0000401C)
225 #define MU_ILR_TOGGLE (u32)(0x00004000) 225 #define MU_ILR_TOGGLE (u32)(0x00004000)
226 #define MU_ILIC_LIST (u32)(0x0000000F) 226 #define MU_ILIC_LIST (u32)(0x0000000F)
227 #define MU_ILIC_LIST_F0 (u32)(0x00000000) 227 #define MU_ILIC_LIST_F0 (u32)(0x00000000)
228 #define MU_ILIC_DEST (u32)(0x00000F00) 228 #define MU_ILIC_DEST (u32)(0x00000F00)
229 #define MU_ILIC_DEST_DDR (u32)(0x00000200) 229 #define MU_ILIC_DEST_DDR (u32)(0x00000200)
230 #define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028) 230 #define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028)
231 231
232 #define MU_IN_LIST_CONFIG (u32)(0x0000402C) 232 #define MU_IN_LIST_CONFIG (u32)(0x0000402C)
233 #define MU_ILC_ENABLE (u32)(0x00000001) 233 #define MU_ILC_ENABLE (u32)(0x00000001)
234 #define MU_ILC_ENTRY_MASK (u32)(0x000000F0) 234 #define MU_ILC_ENTRY_MASK (u32)(0x000000F0)
235 #define MU_ILC_ENTRY_4_DW (u32)(0x00000020) 235 #define MU_ILC_ENTRY_4_DW (u32)(0x00000020)
236 #define MU_ILC_DYNAMIC_SRC (u32)(0x00008000) 236 #define MU_ILC_DYNAMIC_SRC (u32)(0x00008000)
237 #define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000) 237 #define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000)
238 #define MU_ILC_NUMBER_SHIFT 16 238 #define MU_ILC_NUMBER_SHIFT 16
239 239
240 #define MU_OUT_LIST_ADDR_LO (u32)(0x00004050) 240 #define MU_OUT_LIST_ADDR_LO (u32)(0x00004050)
241 #define MU_OUT_LIST_ADDR_HI (u32)(0x00004054) 241 #define MU_OUT_LIST_ADDR_HI (u32)(0x00004054)
242 242
243 #define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058) 243 #define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058)
244 #define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C) 244 #define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C)
245 245
246 #define MU_OUT_LIST_WRITE (u32)(0x00004068) 246 #define MU_OUT_LIST_WRITE (u32)(0x00004068)
247 #define MU_OLW_TOGGLE (u32)(0x00004000) 247 #define MU_OLW_TOGGLE (u32)(0x00004000)
248 248
249 #define MU_OUT_LIST_COPY (u32)(0x0000406C) 249 #define MU_OUT_LIST_COPY (u32)(0x0000406C)
250 #define MU_OLC_TOGGLE (u32)(0x00004000) 250 #define MU_OLC_TOGGLE (u32)(0x00004000)
251 #define MU_OLC_WRT_PTR (u32)(0x00003FFF) 251 #define MU_OLC_WRT_PTR (u32)(0x00003FFF)
252 252
253 #define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078) 253 #define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078)
254 #define MU_OLIC_LIST (u32)(0x0000000F) 254 #define MU_OLIC_LIST (u32)(0x0000000F)
255 #define MU_OLIC_LIST_F0 (u32)(0x00000000) 255 #define MU_OLIC_LIST_F0 (u32)(0x00000000)
256 #define MU_OLIC_SOURCE (u32)(0x00000F00) 256 #define MU_OLIC_SOURCE (u32)(0x00000F00)
257 #define MU_OLIC_SOURCE_DDR (u32)(0x00000200) 257 #define MU_OLIC_SOURCE_DDR (u32)(0x00000200)
258 258
259 #define MU_OUT_LIST_CONFIG (u32)(0x0000407C) 259 #define MU_OUT_LIST_CONFIG (u32)(0x0000407C)
260 #define MU_OLC_ENABLE (u32)(0x00000001) 260 #define MU_OLC_ENABLE (u32)(0x00000001)
261 #define MU_OLC_ENTRY_MASK (u32)(0x000000F0) 261 #define MU_OLC_ENTRY_MASK (u32)(0x000000F0)
262 #define MU_OLC_ENTRY_4_DW (u32)(0x00000020) 262 #define MU_OLC_ENTRY_4_DW (u32)(0x00000020)
263 #define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000) 263 #define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000)
264 #define MU_OLC_NUMBER_SHIFT 16 264 #define MU_OLC_NUMBER_SHIFT 16
265 265
266 #define MU_OUT_LIST_INT_STAT (u32)(0x00004088) 266 #define MU_OUT_LIST_INT_STAT (u32)(0x00004088)
267 #define MU_OLIS_INT (u32)(0x00000001) 267 #define MU_OLIS_INT (u32)(0x00000001)
268 268
269 #define MU_OUT_LIST_INT_MASK (u32)(0x0000408C) 269 #define MU_OUT_LIST_INT_MASK (u32)(0x0000408C)
270 #define MU_OLIS_MASK (u32)(0x00000001) 270 #define MU_OLIS_MASK (u32)(0x00000001)
271 271
272 /* 272 /*
273 * the maximum size of the communication lists is two greater than the 273 * the maximum size of the communication lists is two greater than the
274 * maximum amount of VDA requests. the extra are to prevent queue overflow. 274 * maximum amount of VDA requests. the extra are to prevent queue overflow.
275 */ 275 */
276 #define ESAS2R_MAX_NUM_REQS 256 276 #define ESAS2R_MAX_NUM_REQS 256
277 #define ESAS2R_NUM_EXTRA 2 277 #define ESAS2R_NUM_EXTRA 2
278 #define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA) 278 #define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA)
279 279
280 /* 280 /*
281 * the following registers are for the CPU interface 281 * the following registers are for the CPU interface
282 */ 282 */
283 #define MU_CTL_STATUS_IN (u32)(0x00010108) 283 #define MU_CTL_STATUS_IN (u32)(0x00010108)
284 #define MU_CTL_IN_FULL_RST (u32)(0x00000020) 284 #define MU_CTL_IN_FULL_RST (u32)(0x00000020)
285 #define MU_CTL_STATUS_IN_B2 (u32)(0x00010130) 285 #define MU_CTL_STATUS_IN_B2 (u32)(0x00010130)
286 #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000) 286 #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000)
287 #define MU_DOORBELL_IN (u32)(0x00010460) 287 #define MU_DOORBELL_IN (u32)(0x00010460)
288 #define DRBL_RESET_BUS (u32)(0x00000002) 288 #define DRBL_RESET_BUS (u32)(0x00000002)
289 #define DRBL_PAUSE_AE (u32)(0x00000004) 289 #define DRBL_PAUSE_AE (u32)(0x00000004)
290 #define DRBL_RESUME_AE (u32)(0x00000008) 290 #define DRBL_RESUME_AE (u32)(0x00000008)
291 #define DRBL_MSG_IFC_DOWN (u32)(0x00000010) 291 #define DRBL_MSG_IFC_DOWN (u32)(0x00000010)
292 #define DRBL_FLASH_REQ (u32)(0x00000020) 292 #define DRBL_FLASH_REQ (u32)(0x00000020)
293 #define DRBL_FLASH_DONE (u32)(0x00000040) 293 #define DRBL_FLASH_DONE (u32)(0x00000040)
294 #define DRBL_FORCE_INT (u32)(0x00000080) 294 #define DRBL_FORCE_INT (u32)(0x00000080)
295 #define DRBL_MSG_IFC_INIT (u32)(0x00000100) 295 #define DRBL_MSG_IFC_INIT (u32)(0x00000100)
296 #define DRBL_POWER_DOWN (u32)(0x00000200) 296 #define DRBL_POWER_DOWN (u32)(0x00000200)
297 #define DRBL_DRV_VER_1 (u32)(0x00010000) 297 #define DRBL_DRV_VER_1 (u32)(0x00010000)
298 #define DRBL_DRV_VER DRBL_DRV_VER_1 298 #define DRBL_DRV_VER DRBL_DRV_VER_1
299 #define MU_DOORBELL_IN_ENB (u32)(0x00010464) 299 #define MU_DOORBELL_IN_ENB (u32)(0x00010464)
300 #define MU_DOORBELL_OUT (u32)(0x00010480) 300 #define MU_DOORBELL_OUT (u32)(0x00010480)
301 #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000) 301 #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000)
302 #define DRBL_UNUSED_HANDLER (u32)(0x00100000) 302 #define DRBL_UNUSED_HANDLER (u32)(0x00100000)
303 #define DRBL_UNDEF_INSTR (u32)(0x00200000) 303 #define DRBL_UNDEF_INSTR (u32)(0x00200000)
304 #define DRBL_PREFETCH_ABORT (u32)(0x00300000) 304 #define DRBL_PREFETCH_ABORT (u32)(0x00300000)
305 #define DRBL_DATA_ABORT (u32)(0x00400000) 305 #define DRBL_DATA_ABORT (u32)(0x00400000)
306 #define DRBL_JUMP_TO_ZERO (u32)(0x00500000) 306 #define DRBL_JUMP_TO_ZERO (u32)(0x00500000)
307 #define DRBL_FW_RESET (u32)(0x00080000) 307 #define DRBL_FW_RESET (u32)(0x00080000)
308 #define DRBL_FW_VER_MSK (u32)(0x00070000) 308 #define DRBL_FW_VER_MSK (u32)(0x00070000)
309 #define DRBL_FW_VER_0 (u32)(0x00000000) 309 #define DRBL_FW_VER_0 (u32)(0x00000000)
310 #define DRBL_FW_VER_1 (u32)(0x00010000) 310 #define DRBL_FW_VER_1 (u32)(0x00010000)
311 #define DRBL_FW_VER DRBL_FW_VER_1 311 #define DRBL_FW_VER DRBL_FW_VER_1
312 #define MU_DOORBELL_OUT_ENB (u32)(0x00010484) 312 #define MU_DOORBELL_OUT_ENB (u32)(0x00010484)
313 #define DRBL_ENB_MASK (u32)(0x00F803FF) 313 #define DRBL_ENB_MASK (u32)(0x00F803FF)
314 #define MU_INT_STATUS_OUT (u32)(0x00010200) 314 #define MU_INT_STATUS_OUT (u32)(0x00010200)
315 #define MU_INTSTAT_POST_OUT (u32)(0x00000010) 315 #define MU_INTSTAT_POST_OUT (u32)(0x00000010)
316 #define MU_INTSTAT_DRBL_IN (u32)(0x00000100) 316 #define MU_INTSTAT_DRBL_IN (u32)(0x00000100)
317 #define MU_INTSTAT_DRBL (u32)(0x00001000) 317 #define MU_INTSTAT_DRBL (u32)(0x00001000)
318 #define MU_INTSTAT_MASK (u32)(0x00001010) 318 #define MU_INTSTAT_MASK (u32)(0x00001010)
319 #define MU_INT_MASK_OUT (u32)(0x0001020C) 319 #define MU_INT_MASK_OUT (u32)(0x0001020C)
320 320
321 /* PCI express registers accessed via window 1 */ 321 /* PCI express registers accessed via window 1 */
322 #define MVR_PCI_WIN1_REMAP (u32)(0x00008438) 322 #define MVR_PCI_WIN1_REMAP (u32)(0x00008438)
323 #define MVRPW1R_ENABLE (u32)(0x00000001) 323 #define MVRPW1R_ENABLE (u32)(0x00000001)
324 324
325 325
326 /* structures */ 326 /* structures */
327 327
328 /* inbound list dynamic source entry */ 328 /* inbound list dynamic source entry */
329 struct esas2r_inbound_list_source_entry { 329 struct esas2r_inbound_list_source_entry {
330 u64 address; 330 u64 address;
331 u32 length; 331 u32 length;
332 #define HWILSE_INTERFACE_F0 0x00000000 332 #define HWILSE_INTERFACE_F0 0x00000000
333 u32 reserved; 333 u32 reserved;
334 }; 334 };
335 335
336 /* PCI data structure in expansion ROM images */ 336 /* PCI data structure in expansion ROM images */
337 struct __packed esas2r_boot_header { 337 struct __packed esas2r_boot_header {
338 char signature[4]; 338 char signature[4];
339 u16 vendor_id; 339 u16 vendor_id;
340 u16 device_id; 340 u16 device_id;
341 u16 VPD; 341 u16 VPD;
342 u16 struct_length; 342 u16 struct_length;
343 u8 struct_revision; 343 u8 struct_revision;
344 u8 class_code[3]; 344 u8 class_code[3];
345 u16 image_length; 345 u16 image_length;
346 u16 code_revision; 346 u16 code_revision;
347 u8 code_type; 347 u8 code_type;
348 #define CODE_TYPE_PC 0 348 #define CODE_TYPE_PC 0
349 #define CODE_TYPE_OPEN 1 349 #define CODE_TYPE_OPEN 1
350 #define CODE_TYPE_EFI 3 350 #define CODE_TYPE_EFI 3
351 u8 indicator; 351 u8 indicator;
352 #define INDICATOR_LAST 0x80 352 #define INDICATOR_LAST 0x80
353 u8 reserved[2]; 353 u8 reserved[2];
354 }; 354 };
355 355
356 struct __packed esas2r_boot_image { 356 struct __packed esas2r_boot_image {
357 u16 signature; 357 u16 signature;
358 u8 reserved[22]; 358 u8 reserved[22];
359 u16 header_offset; 359 u16 header_offset;
360 u16 pnp_offset; 360 u16 pnp_offset;
361 }; 361 };
362 362
363 struct __packed esas2r_pc_image { 363 struct __packed esas2r_pc_image {
364 u16 signature; 364 u16 signature;
365 u8 length; 365 u8 length;
366 u8 entry_point[3]; 366 u8 entry_point[3];
367 u8 checksum; 367 u8 checksum;
368 u16 image_end; 368 u16 image_end;
369 u16 min_size; 369 u16 min_size;
370 u8 rom_flags; 370 u8 rom_flags;
371 u8 reserved[12]; 371 u8 reserved[12];
372 u16 header_offset; 372 u16 header_offset;
373 u16 pnp_offset; 373 u16 pnp_offset;
374 struct esas2r_boot_header boot_image; 374 struct esas2r_boot_header boot_image;
375 }; 375 };
376 376
377 struct __packed esas2r_efi_image { 377 struct __packed esas2r_efi_image {
378 u16 signature; 378 u16 signature;
379 u16 length; 379 u16 length;
380 u32 efi_signature; 380 u32 efi_signature;
381 #define EFI_ROM_SIG 0x00000EF1 381 #define EFI_ROM_SIG 0x00000EF1
382 u16 image_type; 382 u16 image_type;
383 #define EFI_IMAGE_APP 10 383 #define EFI_IMAGE_APP 10
384 #define EFI_IMAGE_BSD 11 384 #define EFI_IMAGE_BSD 11
385 #define EFI_IMAGE_RTD 12 385 #define EFI_IMAGE_RTD 12
386 u16 machine_type; 386 u16 machine_type;
387 #define EFI_MACHINE_IA32 0x014c 387 #define EFI_MACHINE_IA32 0x014c
388 #define EFI_MACHINE_IA64 0x0200 388 #define EFI_MACHINE_IA64 0x0200
389 #define EFI_MACHINE_X64 0x8664 389 #define EFI_MACHINE_X64 0x8664
390 #define EFI_MACHINE_EBC 0x0EBC 390 #define EFI_MACHINE_EBC 0x0EBC
391 u16 compression; 391 u16 compression;
392 #define EFI_UNCOMPRESSED 0x0000 392 #define EFI_UNCOMPRESSED 0x0000
393 #define EFI_COMPRESSED 0x0001 393 #define EFI_COMPRESSED 0x0001
394 u8 reserved[8]; 394 u8 reserved[8];
395 u16 efi_offset; 395 u16 efi_offset;
396 u16 header_offset; 396 u16 header_offset;
397 u16 reserved2; 397 u16 reserved2;
398 struct esas2r_boot_header boot_image; 398 struct esas2r_boot_header boot_image;
399 }; 399 };
400 400
401 struct esas2r_adapter; 401 struct esas2r_adapter;
402 struct esas2r_sg_context; 402 struct esas2r_sg_context;
403 struct esas2r_request; 403 struct esas2r_request;
404 404
405 typedef void (*RQCALLBK) (struct esas2r_adapter *a, 405 typedef void (*RQCALLBK) (struct esas2r_adapter *a,
406 struct esas2r_request *rq); 406 struct esas2r_request *rq);
407 typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a, 407 typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a,
408 struct esas2r_sg_context *sgc); 408 struct esas2r_sg_context *sgc);
409 409
410 struct esas2r_component_header { 410 struct esas2r_component_header {
411 u8 img_type; 411 u8 img_type;
412 #define CH_IT_FW 0x00 412 #define CH_IT_FW 0x00
413 #define CH_IT_NVR 0x01 413 #define CH_IT_NVR 0x01
414 #define CH_IT_BIOS 0x02 414 #define CH_IT_BIOS 0x02
415 #define CH_IT_MAC 0x03 415 #define CH_IT_MAC 0x03
416 #define CH_IT_CFG 0x04 416 #define CH_IT_CFG 0x04
417 #define CH_IT_EFI 0x05 417 #define CH_IT_EFI 0x05
418 u8 status; 418 u8 status;
419 #define CH_STAT_PENDING 0xff 419 #define CH_STAT_PENDING 0xff
420 #define CH_STAT_FAILED 0x00 420 #define CH_STAT_FAILED 0x00
421 #define CH_STAT_SUCCESS 0x01 421 #define CH_STAT_SUCCESS 0x01
422 #define CH_STAT_RETRY 0x02 422 #define CH_STAT_RETRY 0x02
423 #define CH_STAT_INVALID 0x03 423 #define CH_STAT_INVALID 0x03
424 u8 pad[2]; 424 u8 pad[2];
425 u32 version; 425 u32 version;
426 u32 length; 426 u32 length;
427 u32 image_offset; 427 u32 image_offset;
428 }; 428 };
429 429
430 #define FI_REL_VER_SZ 16 430 #define FI_REL_VER_SZ 16
431 431
432 struct esas2r_flash_img_v0 { 432 struct esas2r_flash_img_v0 {
433 u8 fi_version; 433 u8 fi_version;
434 #define FI_VERSION_0 00 434 #define FI_VERSION_0 00
435 u8 status; 435 u8 status;
436 u8 adap_typ; 436 u8 adap_typ;
437 u8 action; 437 u8 action;
438 u32 length; 438 u32 length;
439 u16 checksum; 439 u16 checksum;
440 u16 driver_error; 440 u16 driver_error;
441 u16 flags; 441 u16 flags;
442 u16 num_comps; 442 u16 num_comps;
443 #define FI_NUM_COMPS_V0 5 443 #define FI_NUM_COMPS_V0 5
444 u8 rel_version[FI_REL_VER_SZ]; 444 u8 rel_version[FI_REL_VER_SZ];
445 struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0]; 445 struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0];
446 u8 scratch_buf[FM_BUF_SZ]; 446 u8 scratch_buf[FM_BUF_SZ];
447 }; 447 };
448 448
449 struct esas2r_flash_img { 449 struct esas2r_flash_img {
450 u8 fi_version; 450 u8 fi_version;
451 #define FI_VERSION_1 01 451 #define FI_VERSION_1 01
452 u8 status; 452 u8 status;
453 #define FI_STAT_SUCCESS 0x00 453 #define FI_STAT_SUCCESS 0x00
454 #define FI_STAT_FAILED 0x01 454 #define FI_STAT_FAILED 0x01
455 #define FI_STAT_REBOOT 0x02 455 #define FI_STAT_REBOOT 0x02
456 #define FI_STAT_ADAPTYP 0x03 456 #define FI_STAT_ADAPTYP 0x03
457 #define FI_STAT_INVALID 0x04 457 #define FI_STAT_INVALID 0x04
458 #define FI_STAT_CHKSUM 0x05 458 #define FI_STAT_CHKSUM 0x05
459 #define FI_STAT_LENGTH 0x06 459 #define FI_STAT_LENGTH 0x06
460 #define FI_STAT_UNKNOWN 0x07 460 #define FI_STAT_UNKNOWN 0x07
461 #define FI_STAT_IMG_VER 0x08 461 #define FI_STAT_IMG_VER 0x08
462 #define FI_STAT_BUSY 0x09 462 #define FI_STAT_BUSY 0x09
463 #define FI_STAT_DUAL 0x0A 463 #define FI_STAT_DUAL 0x0A
464 #define FI_STAT_MISSING 0x0B 464 #define FI_STAT_MISSING 0x0B
465 #define FI_STAT_UNSUPP 0x0C 465 #define FI_STAT_UNSUPP 0x0C
466 #define FI_STAT_ERASE 0x0D 466 #define FI_STAT_ERASE 0x0D
467 #define FI_STAT_FLASH 0x0E 467 #define FI_STAT_FLASH 0x0E
468 #define FI_STAT_DEGRADED 0x0F 468 #define FI_STAT_DEGRADED 0x0F
469 u8 adap_typ; 469 u8 adap_typ;
470 #define FI_AT_UNKNWN 0xFF 470 #define FI_AT_UNKNWN 0xFF
471 #define FI_AT_SUN_LAKE 0x0B 471 #define FI_AT_SUN_LAKE 0x0B
472 #define FI_AT_MV_9580 0x0F 472 #define FI_AT_MV_9580 0x0F
473 u8 action; 473 u8 action;
474 #define FI_ACT_DOWN 0x00 474 #define FI_ACT_DOWN 0x00
475 #define FI_ACT_UP 0x01 475 #define FI_ACT_UP 0x01
476 #define FI_ACT_UPSZ 0x02 476 #define FI_ACT_UPSZ 0x02
477 #define FI_ACT_MAX 0x02 477 #define FI_ACT_MAX 0x02
478 #define FI_ACT_DOWN1 0x80 478 #define FI_ACT_DOWN1 0x80
479 u32 length; 479 u32 length;
480 u16 checksum; 480 u16 checksum;
481 u16 driver_error; 481 u16 driver_error;
482 u16 flags; 482 u16 flags;
483 #define FI_FLG_NVR_DEF 0x0001 483 #define FI_FLG_NVR_DEF 0x0001
484 u16 num_comps; 484 u16 num_comps;
485 #define FI_NUM_COMPS_V1 6 485 #define FI_NUM_COMPS_V1 6
486 u8 rel_version[FI_REL_VER_SZ]; 486 u8 rel_version[FI_REL_VER_SZ];
487 struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1]; 487 struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1];
488 u8 scratch_buf[FM_BUF_SZ]; 488 u8 scratch_buf[FM_BUF_SZ];
489 }; 489 };
490 490
491 /* definitions for flash script (FS) commands */ 491 /* definitions for flash script (FS) commands */
492 struct esas2r_ioctlfs_command { 492 struct esas2r_ioctlfs_command {
493 u8 command; 493 u8 command;
494 #define ESAS2R_FS_CMD_ERASE 0 494 #define ESAS2R_FS_CMD_ERASE 0
495 #define ESAS2R_FS_CMD_READ 1 495 #define ESAS2R_FS_CMD_READ 1
496 #define ESAS2R_FS_CMD_BEGINW 2 496 #define ESAS2R_FS_CMD_BEGINW 2
497 #define ESAS2R_FS_CMD_WRITE 3 497 #define ESAS2R_FS_CMD_WRITE 3
498 #define ESAS2R_FS_CMD_COMMIT 4 498 #define ESAS2R_FS_CMD_COMMIT 4
499 #define ESAS2R_FS_CMD_CANCEL 5 499 #define ESAS2R_FS_CMD_CANCEL 5
500 u8 checksum; 500 u8 checksum;
501 u8 reserved[2]; 501 u8 reserved[2];
502 u32 flash_addr; 502 u32 flash_addr;
503 u32 length; 503 u32 length;
504 u32 image_offset; 504 u32 image_offset;
505 }; 505 };
506 506
507 struct esas2r_ioctl_fs { 507 struct esas2r_ioctl_fs {
508 u8 version; 508 u8 version;
509 #define ESAS2R_FS_VER 0 509 #define ESAS2R_FS_VER 0
510 u8 status; 510 u8 status;
511 u8 driver_error; 511 u8 driver_error;
512 u8 adap_type; 512 u8 adap_type;
513 #define ESAS2R_FS_AT_ESASRAID2 3 513 #define ESAS2R_FS_AT_ESASRAID2 3
514 #define ESAS2R_FS_AT_TSSASRAID2 4 514 #define ESAS2R_FS_AT_TSSASRAID2 4
515 #define ESAS2R_FS_AT_TSSASRAID2E 5 515 #define ESAS2R_FS_AT_TSSASRAID2E 5
516 #define ESAS2R_FS_AT_TLSASHBA 6 516 #define ESAS2R_FS_AT_TLSASHBA 6
517 u8 driver_ver; 517 u8 driver_ver;
518 u8 reserved[11]; 518 u8 reserved[11];
519 struct esas2r_ioctlfs_command command; 519 struct esas2r_ioctlfs_command command;
520 u8 data[1]; 520 u8 data[1];
521 }; 521 };
522 522
523 struct esas2r_sas_nvram { 523 struct esas2r_sas_nvram {
524 u8 signature[4]; 524 u8 signature[4];
525 u8 version; 525 u8 version;
526 #define SASNVR_VERSION_0 0x00 526 #define SASNVR_VERSION_0 0x00
527 #define SASNVR_VERSION SASNVR_VERSION_0 527 #define SASNVR_VERSION SASNVR_VERSION_0
528 u8 checksum; 528 u8 checksum;
529 #define SASNVR_CKSUM_SEED 0x5A 529 #define SASNVR_CKSUM_SEED 0x5A
530 u8 max_lun_for_target; 530 u8 max_lun_for_target;
531 u8 pci_latency; 531 u8 pci_latency;
532 #define SASNVR_PCILAT_DIS 0x00 532 #define SASNVR_PCILAT_DIS 0x00
533 #define SASNVR_PCILAT_MIN 0x10 533 #define SASNVR_PCILAT_MIN 0x10
534 #define SASNVR_PCILAT_MAX 0xF8 534 #define SASNVR_PCILAT_MAX 0xF8
535 u8 options1; 535 u8 options1;
536 #define SASNVR1_BOOT_DRVR 0x01 536 #define SASNVR1_BOOT_DRVR 0x01
537 #define SASNVR1_BOOT_SCAN 0x02 537 #define SASNVR1_BOOT_SCAN 0x02
538 #define SASNVR1_DIS_PCI_MWI 0x04 538 #define SASNVR1_DIS_PCI_MWI 0x04
539 #define SASNVR1_FORCE_ORD_Q 0x08 539 #define SASNVR1_FORCE_ORD_Q 0x08
540 #define SASNVR1_CACHELINE_0 0x10 540 #define SASNVR1_CACHELINE_0 0x10
541 #define SASNVR1_DIS_DEVSORT 0x20 541 #define SASNVR1_DIS_DEVSORT 0x20
542 #define SASNVR1_PWR_MGT_EN 0x40 542 #define SASNVR1_PWR_MGT_EN 0x40
543 #define SASNVR1_WIDEPORT 0x80 543 #define SASNVR1_WIDEPORT 0x80
544 u8 options2; 544 u8 options2;
545 #define SASNVR2_SINGLE_BUS 0x01 545 #define SASNVR2_SINGLE_BUS 0x01
546 #define SASNVR2_SLOT_BIND 0x02 546 #define SASNVR2_SLOT_BIND 0x02
547 #define SASNVR2_EXP_PROG 0x04 547 #define SASNVR2_EXP_PROG 0x04
548 #define SASNVR2_CMDTHR_LUN 0x08 548 #define SASNVR2_CMDTHR_LUN 0x08
549 #define SASNVR2_HEARTBEAT 0x10 549 #define SASNVR2_HEARTBEAT 0x10
550 #define SASNVR2_INT_CONNECT 0x20 550 #define SASNVR2_INT_CONNECT 0x20
551 #define SASNVR2_SW_MUX_CTRL 0x40 551 #define SASNVR2_SW_MUX_CTRL 0x40
552 #define SASNVR2_DISABLE_NCQ 0x80 552 #define SASNVR2_DISABLE_NCQ 0x80
553 u8 int_coalescing; 553 u8 int_coalescing;
554 #define SASNVR_COAL_DIS 0x00 554 #define SASNVR_COAL_DIS 0x00
555 #define SASNVR_COAL_LOW 0x01 555 #define SASNVR_COAL_LOW 0x01
556 #define SASNVR_COAL_MED 0x02 556 #define SASNVR_COAL_MED 0x02
557 #define SASNVR_COAL_HI 0x03 557 #define SASNVR_COAL_HI 0x03
558 u8 cmd_throttle; 558 u8 cmd_throttle;
559 #define SASNVR_CMDTHR_NONE 0x00 559 #define SASNVR_CMDTHR_NONE 0x00
560 u8 dev_wait_time; 560 u8 dev_wait_time;
561 u8 dev_wait_count; 561 u8 dev_wait_count;
562 u8 spin_up_delay; 562 u8 spin_up_delay;
563 #define SASNVR_SPINUP_MAX 0x14 563 #define SASNVR_SPINUP_MAX 0x14
564 u8 ssp_align_rate; 564 u8 ssp_align_rate;
565 u8 sas_addr[8]; 565 u8 sas_addr[8];
566 u8 phy_speed[16]; 566 u8 phy_speed[16];
567 #define SASNVR_SPEED_AUTO 0x00 567 #define SASNVR_SPEED_AUTO 0x00
568 #define SASNVR_SPEED_1_5GB 0x01 568 #define SASNVR_SPEED_1_5GB 0x01
569 #define SASNVR_SPEED_3GB 0x02 569 #define SASNVR_SPEED_3GB 0x02
570 #define SASNVR_SPEED_6GB 0x03 570 #define SASNVR_SPEED_6GB 0x03
571 #define SASNVR_SPEED_12GB 0x04 571 #define SASNVR_SPEED_12GB 0x04
572 u8 phy_mux[16]; 572 u8 phy_mux[16];
573 #define SASNVR_MUX_DISABLED 0x00 573 #define SASNVR_MUX_DISABLED 0x00
574 #define SASNVR_MUX_1_5GB 0x01 574 #define SASNVR_MUX_1_5GB 0x01
575 #define SASNVR_MUX_3GB 0x02 575 #define SASNVR_MUX_3GB 0x02
576 #define SASNVR_MUX_6GB 0x03 576 #define SASNVR_MUX_6GB 0x03
577 u8 phy_flags[16]; 577 u8 phy_flags[16];
578 #define SASNVR_PHF_DISABLED 0x01 578 #define SASNVR_PHF_DISABLED 0x01
579 #define SASNVR_PHF_RD_ONLY 0x02 579 #define SASNVR_PHF_RD_ONLY 0x02
580 u8 sort_type; 580 u8 sort_type;
581 #define SASNVR_SORT_SAS_ADDR 0x00 581 #define SASNVR_SORT_SAS_ADDR 0x00
582 #define SASNVR_SORT_H308_CONN 0x01 582 #define SASNVR_SORT_H308_CONN 0x01
583 #define SASNVR_SORT_PHY_ID 0x02 583 #define SASNVR_SORT_PHY_ID 0x02
584 #define SASNVR_SORT_SLOT_ID 0x03 584 #define SASNVR_SORT_SLOT_ID 0x03
585 u8 dpm_reqcmd_lmt; 585 u8 dpm_reqcmd_lmt;
586 u8 dpm_stndby_time; 586 u8 dpm_stndby_time;
587 u8 dpm_active_time; 587 u8 dpm_active_time;
588 u8 phy_target_id[16]; 588 u8 phy_target_id[16];
589 #define SASNVR_PTI_DISABLED 0xFF 589 #define SASNVR_PTI_DISABLED 0xFF
590 u8 virt_ses_mode; 590 u8 virt_ses_mode;
591 #define SASNVR_VSMH_DISABLED 0x00 591 #define SASNVR_VSMH_DISABLED 0x00
592 u8 read_write_mode; 592 u8 read_write_mode;
593 #define SASNVR_RWM_DEFAULT 0x00 593 #define SASNVR_RWM_DEFAULT 0x00
594 u8 link_down_to; 594 u8 link_down_to;
595 u8 reserved[0xA1]; 595 u8 reserved[0xA1];
596 }; 596 };
597 597
598 typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr); 598 typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr);
599 599
600 struct esas2r_sg_context { 600 struct esas2r_sg_context {
601 struct esas2r_adapter *adapter; 601 struct esas2r_adapter *adapter;
602 struct esas2r_request *first_req; 602 struct esas2r_request *first_req;
603 u32 length; 603 u32 length;
604 u8 *cur_offset; 604 u8 *cur_offset;
605 PGETPHYSADDR get_phys_addr; 605 PGETPHYSADDR get_phys_addr;
606 union { 606 union {
607 struct { 607 struct {
608 struct atto_vda_sge *curr; 608 struct atto_vda_sge *curr;
609 struct atto_vda_sge *last; 609 struct atto_vda_sge *last;
610 struct atto_vda_sge *limit; 610 struct atto_vda_sge *limit;
611 struct atto_vda_sge *chain; 611 struct atto_vda_sge *chain;
612 } a64; 612 } a64;
613 struct { 613 struct {
614 struct atto_physical_region_description *curr; 614 struct atto_physical_region_description *curr;
615 struct atto_physical_region_description *chain; 615 struct atto_physical_region_description *chain;
616 u32 sgl_max_cnt; 616 u32 sgl_max_cnt;
617 u32 sge_cnt; 617 u32 sge_cnt;
618 } prd; 618 } prd;
619 } sge; 619 } sge;
620 struct scatterlist *cur_sgel; 620 struct scatterlist *cur_sgel;
621 u8 *exp_offset; 621 u8 *exp_offset;
622 int num_sgel; 622 int num_sgel;
623 int sgel_count; 623 int sgel_count;
624 }; 624 };
625 625
626 struct esas2r_target { 626 struct esas2r_target {
627 u8 flags; 627 u8 flags;
628 #define TF_PASS_THRU 0x01 628 #define TF_PASS_THRU 0x01
629 #define TF_USED 0x02 629 #define TF_USED 0x02
630 u8 new_target_state; 630 u8 new_target_state;
631 u8 target_state; 631 u8 target_state;
632 u8 buffered_target_state; 632 u8 buffered_target_state;
633 #define TS_NOT_PRESENT 0x00 633 #define TS_NOT_PRESENT 0x00
634 #define TS_PRESENT 0x05 634 #define TS_PRESENT 0x05
635 #define TS_LUN_CHANGE 0x06 635 #define TS_LUN_CHANGE 0x06
636 #define TS_INVALID 0xFF 636 #define TS_INVALID 0xFF
637 u32 block_size; 637 u32 block_size;
638 u32 inter_block; 638 u32 inter_block;
639 u32 inter_byte; 639 u32 inter_byte;
640 u16 virt_targ_id; 640 u16 virt_targ_id;
641 u16 phys_targ_id; 641 u16 phys_targ_id;
642 u8 identifier_len; 642 u8 identifier_len;
643 u64 sas_addr; 643 u64 sas_addr;
644 u8 identifier[60]; 644 u8 identifier[60];
645 struct atto_vda_ae_lu lu_event; 645 struct atto_vda_ae_lu lu_event;
646 }; 646 };
647 647
648 struct esas2r_request { 648 struct esas2r_request {
649 struct list_head comp_list; 649 struct list_head comp_list;
650 struct list_head req_list; 650 struct list_head req_list;
651 union atto_vda_req *vrq; 651 union atto_vda_req *vrq;
652 struct esas2r_mem_desc *vrq_md; 652 struct esas2r_mem_desc *vrq_md;
653 union { 653 union {
654 void *data_buf; 654 void *data_buf;
655 union atto_vda_rsp_data *vda_rsp_data; 655 union atto_vda_rsp_data *vda_rsp_data;
656 }; 656 };
657 u8 *sense_buf; 657 u8 *sense_buf;
658 struct list_head sg_table_head; 658 struct list_head sg_table_head;
659 struct esas2r_mem_desc *sg_table; 659 struct esas2r_mem_desc *sg_table;
660 u32 timeout; 660 u32 timeout;
661 #define RQ_TIMEOUT_S1 0xFFFFFFFF 661 #define RQ_TIMEOUT_S1 0xFFFFFFFF
662 #define RQ_TIMEOUT_S2 0xFFFFFFFE 662 #define RQ_TIMEOUT_S2 0xFFFFFFFE
663 #define RQ_MAX_TIMEOUT 0xFFFFFFFD 663 #define RQ_MAX_TIMEOUT 0xFFFFFFFD
664 u16 target_id; 664 u16 target_id;
665 u8 req_type; 665 u8 req_type;
666 #define RT_INI_REQ 0x01 666 #define RT_INI_REQ 0x01
667 #define RT_DISC_REQ 0x02 667 #define RT_DISC_REQ 0x02
668 u8 sense_len; 668 u8 sense_len;
669 union atto_vda_func_rsp func_rsp; 669 union atto_vda_func_rsp func_rsp;
670 RQCALLBK comp_cb; 670 RQCALLBK comp_cb;
671 RQCALLBK interrupt_cb; 671 RQCALLBK interrupt_cb;
672 void *interrupt_cx; 672 void *interrupt_cx;
673 u8 flags; 673 u8 flags;
674 #define RF_1ST_IBLK_BASE 0x04 674 #define RF_1ST_IBLK_BASE 0x04
675 #define RF_FAILURE_OK 0x08 675 #define RF_FAILURE_OK 0x08
676 u8 req_stat; 676 u8 req_stat;
677 u16 vda_req_sz; 677 u16 vda_req_sz;
678 #define RQ_SIZE_DEFAULT 0 678 #define RQ_SIZE_DEFAULT 0
679 u64 lba; 679 u64 lba;
680 RQCALLBK aux_req_cb; 680 RQCALLBK aux_req_cb;
681 void *aux_req_cx; 681 void *aux_req_cx;
682 u32 blk_len; 682 u32 blk_len;
683 u32 max_blk_len; 683 u32 max_blk_len;
684 union { 684 union {
685 struct scsi_cmnd *cmd; 685 struct scsi_cmnd *cmd;
686 u8 *task_management_status_ptr; 686 u8 *task_management_status_ptr;
687 }; 687 };
688 }; 688 };
689 689
690 struct esas2r_flash_context { 690 struct esas2r_flash_context {
691 struct esas2r_flash_img *fi; 691 struct esas2r_flash_img *fi;
692 RQCALLBK interrupt_cb; 692 RQCALLBK interrupt_cb;
693 u8 *sgc_offset; 693 u8 *sgc_offset;
694 u8 *scratch; 694 u8 *scratch;
695 u32 fi_hdr_len; 695 u32 fi_hdr_len;
696 u8 task; 696 u8 task;
697 #define FMTSK_ERASE_BOOT 0 697 #define FMTSK_ERASE_BOOT 0
698 #define FMTSK_WRTBIOS 1 698 #define FMTSK_WRTBIOS 1
699 #define FMTSK_READBIOS 2 699 #define FMTSK_READBIOS 2
700 #define FMTSK_WRTMAC 3 700 #define FMTSK_WRTMAC 3
701 #define FMTSK_READMAC 4 701 #define FMTSK_READMAC 4
702 #define FMTSK_WRTEFI 5 702 #define FMTSK_WRTEFI 5
703 #define FMTSK_READEFI 6 703 #define FMTSK_READEFI 6
704 #define FMTSK_WRTCFG 7 704 #define FMTSK_WRTCFG 7
705 #define FMTSK_READCFG 8 705 #define FMTSK_READCFG 8
706 u8 func; 706 u8 func;
707 u16 num_comps; 707 u16 num_comps;
708 u32 cmp_len; 708 u32 cmp_len;
709 u32 flsh_addr; 709 u32 flsh_addr;
710 u32 curr_len; 710 u32 curr_len;
711 u8 comp_typ; 711 u8 comp_typ;
712 struct esas2r_sg_context sgc; 712 struct esas2r_sg_context sgc;
713 }; 713 };
714 714
715 struct esas2r_disc_context { 715 struct esas2r_disc_context {
716 u8 disc_evt; 716 u8 disc_evt;
717 #define DCDE_DEV_CHANGE 0x01 717 #define DCDE_DEV_CHANGE 0x01
718 #define DCDE_DEV_SCAN 0x02 718 #define DCDE_DEV_SCAN 0x02
719 u8 state; 719 u8 state;
720 #define DCS_DEV_RMV 0x00 720 #define DCS_DEV_RMV 0x00
721 #define DCS_DEV_ADD 0x01 721 #define DCS_DEV_ADD 0x01
722 #define DCS_BLOCK_DEV_SCAN 0x02 722 #define DCS_BLOCK_DEV_SCAN 0x02
723 #define DCS_RAID_GRP_INFO 0x03 723 #define DCS_RAID_GRP_INFO 0x03
724 #define DCS_PART_INFO 0x04 724 #define DCS_PART_INFO 0x04
725 #define DCS_PT_DEV_INFO 0x05 725 #define DCS_PT_DEV_INFO 0x05
726 #define DCS_PT_DEV_ADDR 0x06 726 #define DCS_PT_DEV_ADDR 0x06
727 #define DCS_DISC_DONE 0xFF 727 #define DCS_DISC_DONE 0xFF
728 u16 flags; 728 u16 flags;
729 #define DCF_DEV_CHANGE 0x0001 729 #define DCF_DEV_CHANGE 0x0001
730 #define DCF_DEV_SCAN 0x0002 730 #define DCF_DEV_SCAN 0x0002
731 #define DCF_POLLED 0x8000 731 #define DCF_POLLED 0x8000
732 u32 interleave; 732 u32 interleave;
733 u32 block_size; 733 u32 block_size;
734 u16 dev_ix; 734 u16 dev_ix;
735 u8 part_num; 735 u8 part_num;
736 u8 raid_grp_ix; 736 u8 raid_grp_ix;
737 char raid_grp_name[16]; 737 char raid_grp_name[16];
738 struct esas2r_target *curr_targ; 738 struct esas2r_target *curr_targ;
739 u16 curr_virt_id; 739 u16 curr_virt_id;
740 u16 curr_phys_id; 740 u16 curr_phys_id;
741 u8 scan_gen; 741 u8 scan_gen;
742 u8 dev_addr_type; 742 u8 dev_addr_type;
743 u64 sas_addr; 743 u64 sas_addr;
744 }; 744 };
745 745
746 struct esas2r_mem_desc { 746 struct esas2r_mem_desc {
747 struct list_head next_desc; 747 struct list_head next_desc;
748 void *virt_addr; 748 void *virt_addr;
749 u64 phys_addr; 749 u64 phys_addr;
750 void *pad; 750 void *pad;
751 void *esas2r_data; 751 void *esas2r_data;
752 u32 esas2r_param; 752 u32 esas2r_param;
753 u32 size; 753 u32 size;
754 }; 754 };
755 755
756 enum fw_event_type { 756 enum fw_event_type {
757 fw_event_null, 757 fw_event_null,
758 fw_event_lun_change, 758 fw_event_lun_change,
759 fw_event_present, 759 fw_event_present,
760 fw_event_not_present, 760 fw_event_not_present,
761 fw_event_vda_ae 761 fw_event_vda_ae
762 }; 762 };
763 763
764 struct esas2r_vda_ae { 764 struct esas2r_vda_ae {
765 u32 signature; 765 u32 signature;
766 #define ESAS2R_VDA_EVENT_SIG 0x4154544F 766 #define ESAS2R_VDA_EVENT_SIG 0x4154544F
767 u8 bus_number; 767 u8 bus_number;
768 u8 devfn; 768 u8 devfn;
769 u8 pad[2]; 769 u8 pad[2];
770 union atto_vda_ae vda_ae; 770 union atto_vda_ae vda_ae;
771 }; 771 };
772 772
773 struct esas2r_fw_event_work { 773 struct esas2r_fw_event_work {
774 struct list_head list; 774 struct list_head list;
775 struct delayed_work work; 775 struct delayed_work work;
776 struct esas2r_adapter *a; 776 struct esas2r_adapter *a;
777 enum fw_event_type type; 777 enum fw_event_type type;
778 u8 data[sizeof(struct esas2r_vda_ae)]; 778 u8 data[sizeof(struct esas2r_vda_ae)];
779 }; 779 };
780 780
781 enum state { 781 enum state {
782 FW_INVALID_ST, 782 FW_INVALID_ST,
783 FW_STATUS_ST, 783 FW_STATUS_ST,
784 FW_COMMAND_ST 784 FW_COMMAND_ST
785 }; 785 };
786 786
787 struct esas2r_firmware { 787 struct esas2r_firmware {
788 enum state state; 788 enum state state;
789 struct esas2r_flash_img header; 789 struct esas2r_flash_img header;
790 u8 *data; 790 u8 *data;
791 u64 phys; 791 u64 phys;
792 int orig_len; 792 int orig_len;
793 void *header_buff; 793 void *header_buff;
794 u64 header_buff_phys; 794 u64 header_buff_phys;
795 }; 795 };
796 796
797 struct esas2r_adapter { 797 struct esas2r_adapter {
798 struct esas2r_target targetdb[ESAS2R_MAX_TARGETS]; 798 struct esas2r_target targetdb[ESAS2R_MAX_TARGETS];
799 struct esas2r_target *targetdb_end; 799 struct esas2r_target *targetdb_end;
800 unsigned char *regs; 800 unsigned char *regs;
801 unsigned char *data_window; 801 unsigned char *data_window;
802 long flags; 802 long flags;
803 #define AF_PORT_CHANGE 0 803 #define AF_PORT_CHANGE 0
804 #define AF_CHPRST_NEEDED 1 804 #define AF_CHPRST_NEEDED 1
805 #define AF_CHPRST_PENDING 2 805 #define AF_CHPRST_PENDING 2
806 #define AF_CHPRST_DETECTED 3 806 #define AF_CHPRST_DETECTED 3
807 #define AF_BUSRST_NEEDED 4 807 #define AF_BUSRST_NEEDED 4
808 #define AF_BUSRST_PENDING 5 808 #define AF_BUSRST_PENDING 5
809 #define AF_BUSRST_DETECTED 6 809 #define AF_BUSRST_DETECTED 6
810 #define AF_DISABLED 7 810 #define AF_DISABLED 7
811 #define AF_FLASH_LOCK 8 811 #define AF_FLASH_LOCK 8
812 #define AF_OS_RESET 9 812 #define AF_OS_RESET 9
813 #define AF_FLASHING 10 813 #define AF_FLASHING 10
814 #define AF_POWER_MGT 11 814 #define AF_POWER_MGT 11
815 #define AF_NVR_VALID 12 815 #define AF_NVR_VALID 12
816 #define AF_DEGRADED_MODE 13 816 #define AF_DEGRADED_MODE 13
817 #define AF_DISC_PENDING 14 817 #define AF_DISC_PENDING 14
818 #define AF_TASKLET_SCHEDULED 15 818 #define AF_TASKLET_SCHEDULED 15
819 #define AF_HEARTBEAT 16 819 #define AF_HEARTBEAT 16
820 #define AF_HEARTBEAT_ENB 17 820 #define AF_HEARTBEAT_ENB 17
821 #define AF_NOT_PRESENT 18 821 #define AF_NOT_PRESENT 18
822 #define AF_CHPRST_STARTED 19 822 #define AF_CHPRST_STARTED 19
823 #define AF_FIRST_INIT 20 823 #define AF_FIRST_INIT 20
824 #define AF_POWER_DOWN 21 824 #define AF_POWER_DOWN 21
825 #define AF_DISC_IN_PROG 22 825 #define AF_DISC_IN_PROG 22
826 #define AF_COMM_LIST_TOGGLE 23 826 #define AF_COMM_LIST_TOGGLE 23
827 #define AF_LEGACY_SGE_MODE 24 827 #define AF_LEGACY_SGE_MODE 24
828 #define AF_DISC_POLLED 25 828 #define AF_DISC_POLLED 25
829 long flags2; 829 long flags2;
830 #define AF2_SERIAL_FLASH 0 830 #define AF2_SERIAL_FLASH 0
831 #define AF2_DEV_SCAN 1 831 #define AF2_DEV_SCAN 1
832 #define AF2_DEV_CNT_OK 2 832 #define AF2_DEV_CNT_OK 2
833 #define AF2_COREDUMP_AVAIL 3 833 #define AF2_COREDUMP_AVAIL 3
834 #define AF2_COREDUMP_SAVED 4 834 #define AF2_COREDUMP_SAVED 4
835 #define AF2_VDA_POWER_DOWN 5 835 #define AF2_VDA_POWER_DOWN 5
836 #define AF2_THUNDERLINK 6 836 #define AF2_THUNDERLINK 6
837 #define AF2_THUNDERBOLT 7 837 #define AF2_THUNDERBOLT 7
838 #define AF2_INIT_DONE 8 838 #define AF2_INIT_DONE 8
839 #define AF2_INT_PENDING 9 839 #define AF2_INT_PENDING 9
840 #define AF2_TIMER_TICK 10 840 #define AF2_TIMER_TICK 10
841 #define AF2_IRQ_CLAIMED 11 841 #define AF2_IRQ_CLAIMED 11
842 #define AF2_MSI_ENABLED 12 842 #define AF2_MSI_ENABLED 12
843 atomic_t disable_cnt; 843 atomic_t disable_cnt;
844 atomic_t dis_ints_cnt; 844 atomic_t dis_ints_cnt;
845 u32 int_stat; 845 u32 int_stat;
846 u32 int_mask; 846 u32 int_mask;
847 u32 volatile *outbound_copy; 847 u32 volatile *outbound_copy;
848 struct list_head avail_request; 848 struct list_head avail_request;
849 spinlock_t request_lock; 849 spinlock_t request_lock;
850 spinlock_t sg_list_lock; 850 spinlock_t sg_list_lock;
851 spinlock_t queue_lock; 851 spinlock_t queue_lock;
852 spinlock_t mem_lock; 852 spinlock_t mem_lock;
853 struct list_head free_sg_list_head; 853 struct list_head free_sg_list_head;
854 struct esas2r_mem_desc *sg_list_mds; 854 struct esas2r_mem_desc *sg_list_mds;
855 struct list_head active_list; 855 struct list_head active_list;
856 struct list_head defer_list; 856 struct list_head defer_list;
857 struct esas2r_request **req_table; 857 struct esas2r_request **req_table;
858 union { 858 union {
859 u16 prev_dev_cnt; 859 u16 prev_dev_cnt;
860 u32 heartbeat_time; 860 u32 heartbeat_time;
861 #define ESAS2R_HEARTBEAT_TIME (3000) 861 #define ESAS2R_HEARTBEAT_TIME (3000)
862 }; 862 };
863 u32 chip_uptime; 863 u32 chip_uptime;
864 #define ESAS2R_CHP_UPTIME_MAX (60000) 864 #define ESAS2R_CHP_UPTIME_MAX (60000)
865 #define ESAS2R_CHP_UPTIME_CNT (20000) 865 #define ESAS2R_CHP_UPTIME_CNT (20000)
866 u64 uncached_phys; 866 u64 uncached_phys;
867 u8 *uncached; 867 u8 *uncached;
868 struct esas2r_sas_nvram *nvram; 868 struct esas2r_sas_nvram *nvram;
869 struct esas2r_request general_req; 869 struct esas2r_request general_req;
870 u8 init_msg; 870 u8 init_msg;
871 #define ESAS2R_INIT_MSG_START 1 871 #define ESAS2R_INIT_MSG_START 1
872 #define ESAS2R_INIT_MSG_INIT 2 872 #define ESAS2R_INIT_MSG_INIT 2
873 #define ESAS2R_INIT_MSG_GET_INIT 3 873 #define ESAS2R_INIT_MSG_GET_INIT 3
874 #define ESAS2R_INIT_MSG_REINIT 4 874 #define ESAS2R_INIT_MSG_REINIT 4
875 u16 cmd_ref_no; 875 u16 cmd_ref_no;
876 u32 fw_version; 876 u32 fw_version;
877 u32 fw_build; 877 u32 fw_build;
878 u32 chip_init_time; 878 u32 chip_init_time;
879 #define ESAS2R_CHPRST_TIME (180000) 879 #define ESAS2R_CHPRST_TIME (180000)
880 #define ESAS2R_CHPRST_WAIT_TIME (2000) 880 #define ESAS2R_CHPRST_WAIT_TIME (2000)
881 u32 last_tick_time; 881 u32 last_tick_time;
882 u32 window_base; 882 u32 window_base;
883 RQBUILDSGL build_sgl; 883 RQBUILDSGL build_sgl;
884 struct esas2r_request *first_ae_req; 884 struct esas2r_request *first_ae_req;
885 u32 list_size; 885 u32 list_size;
886 u32 last_write; 886 u32 last_write;
887 u32 last_read; 887 u32 last_read;
888 u16 max_vdareq_size; 888 u16 max_vdareq_size;
889 u16 disc_wait_cnt; 889 u16 disc_wait_cnt;
890 struct esas2r_mem_desc inbound_list_md; 890 struct esas2r_mem_desc inbound_list_md;
891 struct esas2r_mem_desc outbound_list_md; 891 struct esas2r_mem_desc outbound_list_md;
892 struct esas2r_disc_context disc_ctx; 892 struct esas2r_disc_context disc_ctx;
893 u8 *disc_buffer; 893 u8 *disc_buffer;
894 u32 disc_start_time; 894 u32 disc_start_time;
895 u32 disc_wait_time; 895 u32 disc_wait_time;
896 u32 flash_ver; 896 u32 flash_ver;
897 char flash_rev[16]; 897 char flash_rev[16];
898 char fw_rev[16]; 898 char fw_rev[16];
899 char image_type[16]; 899 char image_type[16];
900 struct esas2r_flash_context flash_context; 900 struct esas2r_flash_context flash_context;
901 u32 num_targets_backend; 901 u32 num_targets_backend;
902 u32 ioctl_tunnel; 902 u32 ioctl_tunnel;
903 struct tasklet_struct tasklet; 903 struct tasklet_struct tasklet;
904 struct pci_dev *pcid; 904 struct pci_dev *pcid;
905 struct Scsi_Host *host; 905 struct Scsi_Host *host;
906 unsigned int index; 906 unsigned int index;
907 char name[32]; 907 char name[32];
908 struct timer_list timer; 908 struct timer_list timer;
909 struct esas2r_firmware firmware; 909 struct esas2r_firmware firmware;
910 wait_queue_head_t nvram_waiter; 910 wait_queue_head_t nvram_waiter;
911 int nvram_command_done; 911 int nvram_command_done;
912 wait_queue_head_t fm_api_waiter; 912 wait_queue_head_t fm_api_waiter;
913 int fm_api_command_done; 913 int fm_api_command_done;
914 wait_queue_head_t vda_waiter; 914 wait_queue_head_t vda_waiter;
915 int vda_command_done; 915 int vda_command_done;
916 u8 *vda_buffer; 916 u8 *vda_buffer;
917 u64 ppvda_buffer; 917 u64 ppvda_buffer;
918 #define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data)) 918 #define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data))
919 #define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ) 919 #define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ)
920 wait_queue_head_t fs_api_waiter; 920 wait_queue_head_t fs_api_waiter;
921 int fs_api_command_done; 921 int fs_api_command_done;
922 u64 ppfs_api_buffer; 922 u64 ppfs_api_buffer;
923 u8 *fs_api_buffer; 923 u8 *fs_api_buffer;
924 u32 fs_api_buffer_size; 924 u32 fs_api_buffer_size;
925 wait_queue_head_t buffered_ioctl_waiter; 925 wait_queue_head_t buffered_ioctl_waiter;
926 int buffered_ioctl_done; 926 int buffered_ioctl_done;
927 int uncached_size; 927 int uncached_size;
928 struct workqueue_struct *fw_event_q; 928 struct workqueue_struct *fw_event_q;
929 struct list_head fw_event_list; 929 struct list_head fw_event_list;
930 spinlock_t fw_event_lock; 930 spinlock_t fw_event_lock;
931 u8 fw_events_off; /* if '1', then ignore events */ 931 u8 fw_events_off; /* if '1', then ignore events */
932 char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN]; 932 char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
933 /* 933 /*
934 * intr_mode stores the interrupt mode currently being used by this 934 * intr_mode stores the interrupt mode currently being used by this
935 * adapter. it is based on the interrupt_mode module parameter, but 935 * adapter. it is based on the interrupt_mode module parameter, but
936 * can be changed based on the ability (or not) to utilize the 936 * can be changed based on the ability (or not) to utilize the
937 * mode requested by the parameter. 937 * mode requested by the parameter.
938 */ 938 */
939 int intr_mode; 939 int intr_mode;
940 #define INTR_MODE_LEGACY 0 940 #define INTR_MODE_LEGACY 0
941 #define INTR_MODE_MSI 1 941 #define INTR_MODE_MSI 1
942 #define INTR_MODE_MSIX 2 942 #define INTR_MODE_MSIX 2
943 struct esas2r_sg_context fm_api_sgc; 943 struct esas2r_sg_context fm_api_sgc;
944 u8 *save_offset; 944 u8 *save_offset;
945 struct list_head vrq_mds_head; 945 struct list_head vrq_mds_head;
946 struct esas2r_mem_desc *vrq_mds; 946 struct esas2r_mem_desc *vrq_mds;
947 int num_vrqs; 947 int num_vrqs;
948 struct semaphore fm_api_semaphore; 948 struct semaphore fm_api_semaphore;
949 struct semaphore fs_api_semaphore; 949 struct semaphore fs_api_semaphore;
950 struct semaphore nvram_semaphore; 950 struct semaphore nvram_semaphore;
951 struct atto_ioctl *local_atto_ioctl; 951 struct atto_ioctl *local_atto_ioctl;
952 u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ]; 952 u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
953 unsigned int sysfs_fw_created:1; 953 unsigned int sysfs_fw_created:1;
954 unsigned int sysfs_fs_created:1; 954 unsigned int sysfs_fs_created:1;
955 unsigned int sysfs_vda_created:1; 955 unsigned int sysfs_vda_created:1;
956 unsigned int sysfs_hw_created:1; 956 unsigned int sysfs_hw_created:1;
957 unsigned int sysfs_live_nvram_created:1; 957 unsigned int sysfs_live_nvram_created:1;
958 unsigned int sysfs_default_nvram_created:1; 958 unsigned int sysfs_default_nvram_created:1;
959 }; 959 };
960 960
961 /* 961 /*
962 * Function Declarations 962 * Function Declarations
963 * SCSI functions 963 * SCSI functions
964 */ 964 */
965 int esas2r_release(struct Scsi_Host *); 965 int esas2r_release(struct Scsi_Host *);
966 const char *esas2r_info(struct Scsi_Host *); 966 const char *esas2r_info(struct Scsi_Host *);
967 int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, 967 int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
968 struct esas2r_sas_nvram *data); 968 struct esas2r_sas_nvram *data);
969 int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg); 969 int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg);
970 int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 970 int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
971 u8 handle_hba_ioctl(struct esas2r_adapter *a, 971 u8 handle_hba_ioctl(struct esas2r_adapter *a,
972 struct atto_ioctl *ioctl_hba); 972 struct atto_ioctl *ioctl_hba);
973 int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd); 973 int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
974 int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh); 974 int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
975 int esas2r_slave_alloc(struct scsi_device *dev); 975 int esas2r_slave_alloc(struct scsi_device *dev);
976 int esas2r_slave_configure(struct scsi_device *dev); 976 int esas2r_slave_configure(struct scsi_device *dev);
977 void esas2r_slave_destroy(struct scsi_device *dev); 977 void esas2r_slave_destroy(struct scsi_device *dev);
978 int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason); 978 int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason);
979 int esas2r_change_queue_type(struct scsi_device *dev, int type); 979 int esas2r_change_queue_type(struct scsi_device *dev, int type);
980 long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); 980 long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
981 981
982 /* SCSI error handler (eh) functions */ 982 /* SCSI error handler (eh) functions */
983 int esas2r_eh_abort(struct scsi_cmnd *cmd); 983 int esas2r_eh_abort(struct scsi_cmnd *cmd);
984 int esas2r_device_reset(struct scsi_cmnd *cmd); 984 int esas2r_device_reset(struct scsi_cmnd *cmd);
985 int esas2r_host_reset(struct scsi_cmnd *cmd); 985 int esas2r_host_reset(struct scsi_cmnd *cmd);
986 int esas2r_bus_reset(struct scsi_cmnd *cmd); 986 int esas2r_bus_reset(struct scsi_cmnd *cmd);
987 int esas2r_target_reset(struct scsi_cmnd *cmd); 987 int esas2r_target_reset(struct scsi_cmnd *cmd);
988 988
989 /* Internal functions */ 989 /* Internal functions */
990 int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, 990 int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
991 int index); 991 int index);
992 int esas2r_cleanup(struct Scsi_Host *host); 992 int esas2r_cleanup(struct Scsi_Host *host);
993 int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count); 993 int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count);
994 int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, 994 int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
995 int count); 995 int count);
996 int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count); 996 int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count);
997 int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, 997 int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
998 int count); 998 int count);
999 int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count); 999 int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count);
1000 int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, 1000 int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
1001 int count); 1001 int count);
1002 void esas2r_adapter_tasklet(unsigned long context); 1002 void esas2r_adapter_tasklet(unsigned long context);
1003 irqreturn_t esas2r_interrupt(int irq, void *dev_id); 1003 irqreturn_t esas2r_interrupt(int irq, void *dev_id);
1004 irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id); 1004 irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id);
1005 void esas2r_kickoff_timer(struct esas2r_adapter *a); 1005 void esas2r_kickoff_timer(struct esas2r_adapter *a);
1006 int esas2r_suspend(struct pci_dev *pcid, pm_message_t state); 1006 int esas2r_suspend(struct pci_dev *pcid, pm_message_t state);
1007 int esas2r_resume(struct pci_dev *pcid); 1007 int esas2r_resume(struct pci_dev *pcid);
1008 void esas2r_fw_event_off(struct esas2r_adapter *a); 1008 void esas2r_fw_event_off(struct esas2r_adapter *a);
1009 void esas2r_fw_event_on(struct esas2r_adapter *a); 1009 void esas2r_fw_event_on(struct esas2r_adapter *a);
1010 bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, 1010 bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
1011 struct esas2r_sas_nvram *nvram); 1011 struct esas2r_sas_nvram *nvram);
1012 void esas2r_nvram_get_defaults(struct esas2r_adapter *a, 1012 void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
1013 struct esas2r_sas_nvram *nvram); 1013 struct esas2r_sas_nvram *nvram);
1014 void esas2r_complete_request_cb(struct esas2r_adapter *a, 1014 void esas2r_complete_request_cb(struct esas2r_adapter *a,
1015 struct esas2r_request *rq); 1015 struct esas2r_request *rq);
1016 void esas2r_reset_detected(struct esas2r_adapter *a); 1016 void esas2r_reset_detected(struct esas2r_adapter *a);
1017 void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id, 1017 void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id,
1018 u8 state); 1018 u8 state);
1019 int esas2r_req_status_to_error(u8 req_stat); 1019 int esas2r_req_status_to_error(u8 req_stat);
1020 void esas2r_kill_adapter(int i); 1020 void esas2r_kill_adapter(int i);
1021 void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq); 1021 void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1022 struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a); 1022 struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a);
1023 u32 esas2r_get_uncached_size(struct esas2r_adapter *a); 1023 u32 esas2r_get_uncached_size(struct esas2r_adapter *a);
1024 bool esas2r_init_adapter_struct(struct esas2r_adapter *a, 1024 bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
1025 void **uncached_area); 1025 void **uncached_area);
1026 bool esas2r_check_adapter(struct esas2r_adapter *a); 1026 bool esas2r_check_adapter(struct esas2r_adapter *a);
1027 bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll); 1027 bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll);
1028 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq); 1028 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1029 bool esas2r_send_task_mgmt(struct esas2r_adapter *a, 1029 bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
1030 struct esas2r_request *rqaux, u8 task_mgt_func); 1030 struct esas2r_request *rqaux, u8 task_mgt_func);
1031 void esas2r_do_tasklet_tasks(struct esas2r_adapter *a); 1031 void esas2r_do_tasklet_tasks(struct esas2r_adapter *a);
1032 void esas2r_adapter_interrupt(struct esas2r_adapter *a); 1032 void esas2r_adapter_interrupt(struct esas2r_adapter *a);
1033 void esas2r_do_deferred_processes(struct esas2r_adapter *a); 1033 void esas2r_do_deferred_processes(struct esas2r_adapter *a);
1034 void esas2r_reset_bus(struct esas2r_adapter *a); 1034 void esas2r_reset_bus(struct esas2r_adapter *a);
1035 void esas2r_reset_adapter(struct esas2r_adapter *a); 1035 void esas2r_reset_adapter(struct esas2r_adapter *a);
1036 void esas2r_timer_tick(struct esas2r_adapter *a); 1036 void esas2r_timer_tick(struct esas2r_adapter *a);
1037 const char *esas2r_get_model_name(struct esas2r_adapter *a); 1037 const char *esas2r_get_model_name(struct esas2r_adapter *a);
1038 const char *esas2r_get_model_name_short(struct esas2r_adapter *a); 1038 const char *esas2r_get_model_name_short(struct esas2r_adapter *a);
1039 u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time, 1039 u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time,
1040 u32 *delay); 1040 u32 *delay);
1041 void esas2r_build_flash_req(struct esas2r_adapter *a, 1041 void esas2r_build_flash_req(struct esas2r_adapter *a,
1042 struct esas2r_request *rq, 1042 struct esas2r_request *rq,
1043 u8 sub_func, 1043 u8 sub_func,
1044 u8 cksum, 1044 u8 cksum,
1045 u32 addr, 1045 u32 addr,
1046 u32 length); 1046 u32 length);
1047 void esas2r_build_mgt_req(struct esas2r_adapter *a, 1047 void esas2r_build_mgt_req(struct esas2r_adapter *a,
1048 struct esas2r_request *rq, 1048 struct esas2r_request *rq,
1049 u8 sub_func, 1049 u8 sub_func,
1050 u8 scan_gen, 1050 u8 scan_gen,
1051 u16 dev_index, 1051 u16 dev_index,
1052 u32 length, 1052 u32 length,
1053 void *data); 1053 void *data);
1054 void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq); 1054 void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
1055 void esas2r_build_cli_req(struct esas2r_adapter *a, 1055 void esas2r_build_cli_req(struct esas2r_adapter *a,
1056 struct esas2r_request *rq, 1056 struct esas2r_request *rq,
1057 u32 length, 1057 u32 length,
1058 u32 cmd_rsp_len); 1058 u32 cmd_rsp_len);
1059 void esas2r_build_ioctl_req(struct esas2r_adapter *a, 1059 void esas2r_build_ioctl_req(struct esas2r_adapter *a,
1060 struct esas2r_request *rq, 1060 struct esas2r_request *rq,
1061 u32 length, 1061 u32 length,
1062 u8 sub_func); 1062 u8 sub_func);
1063 void esas2r_build_cfg_req(struct esas2r_adapter *a, 1063 void esas2r_build_cfg_req(struct esas2r_adapter *a,
1064 struct esas2r_request *rq, 1064 struct esas2r_request *rq,
1065 u8 sub_func, 1065 u8 sub_func,
1066 u32 length, 1066 u32 length,
1067 void *data); 1067 void *data);
1068 void esas2r_power_down(struct esas2r_adapter *a); 1068 void esas2r_power_down(struct esas2r_adapter *a);
1069 bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll); 1069 bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll);
1070 void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq); 1070 void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1071 u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo); 1071 u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo);
1072 bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, 1072 bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
1073 struct esas2r_ioctl_fs *fs, 1073 struct esas2r_ioctl_fs *fs,
1074 struct esas2r_request *rq, 1074 struct esas2r_request *rq,
1075 struct esas2r_sg_context *sgc); 1075 struct esas2r_sg_context *sgc);
1076 bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from, 1076 bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from,
1077 u32 size); 1077 u32 size);
1078 bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from, 1078 bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from,
1079 u32 size); 1079 u32 size);
1080 bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, 1080 bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
1081 struct esas2r_request *rq, struct esas2r_sg_context *sgc); 1081 struct esas2r_request *rq, struct esas2r_sg_context *sgc);
1082 void esas2r_force_interrupt(struct esas2r_adapter *a); 1082 void esas2r_force_interrupt(struct esas2r_adapter *a);
1083 void esas2r_local_start_request(struct esas2r_adapter *a, 1083 void esas2r_local_start_request(struct esas2r_adapter *a,
1084 struct esas2r_request *rq); 1084 struct esas2r_request *rq);
1085 void esas2r_process_adapter_reset(struct esas2r_adapter *a); 1085 void esas2r_process_adapter_reset(struct esas2r_adapter *a);
1086 void esas2r_complete_request(struct esas2r_adapter *a, 1086 void esas2r_complete_request(struct esas2r_adapter *a,
1087 struct esas2r_request *rq); 1087 struct esas2r_request *rq);
1088 void esas2r_dummy_complete(struct esas2r_adapter *a, 1088 void esas2r_dummy_complete(struct esas2r_adapter *a,
1089 struct esas2r_request *rq); 1089 struct esas2r_request *rq);
1090 void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq); 1090 void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq);
1091 void esas2r_start_vda_request(struct esas2r_adapter *a, 1091 void esas2r_start_vda_request(struct esas2r_adapter *a,
1092 struct esas2r_request *rq); 1092 struct esas2r_request *rq);
1093 bool esas2r_read_flash_rev(struct esas2r_adapter *a); 1093 bool esas2r_read_flash_rev(struct esas2r_adapter *a);
1094 bool esas2r_read_image_type(struct esas2r_adapter *a); 1094 bool esas2r_read_image_type(struct esas2r_adapter *a);
1095 bool esas2r_nvram_read_direct(struct esas2r_adapter *a); 1095 bool esas2r_nvram_read_direct(struct esas2r_adapter *a);
1096 bool esas2r_nvram_validate(struct esas2r_adapter *a); 1096 bool esas2r_nvram_validate(struct esas2r_adapter *a);
1097 void esas2r_nvram_set_defaults(struct esas2r_adapter *a); 1097 void esas2r_nvram_set_defaults(struct esas2r_adapter *a);
1098 bool esas2r_print_flash_rev(struct esas2r_adapter *a); 1098 bool esas2r_print_flash_rev(struct esas2r_adapter *a);
1099 void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt); 1099 void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt);
1100 bool esas2r_init_msgs(struct esas2r_adapter *a); 1100 bool esas2r_init_msgs(struct esas2r_adapter *a);
1101 bool esas2r_is_adapter_present(struct esas2r_adapter *a); 1101 bool esas2r_is_adapter_present(struct esas2r_adapter *a);
1102 void esas2r_nuxi_mgt_data(u8 function, void *data); 1102 void esas2r_nuxi_mgt_data(u8 function, void *data);
1103 void esas2r_nuxi_cfg_data(u8 function, void *data); 1103 void esas2r_nuxi_cfg_data(u8 function, void *data);
1104 void esas2r_nuxi_ae_data(union atto_vda_ae *ae); 1104 void esas2r_nuxi_ae_data(union atto_vda_ae *ae);
1105 void esas2r_reset_chip(struct esas2r_adapter *a); 1105 void esas2r_reset_chip(struct esas2r_adapter *a);
1106 void esas2r_log_request_failure(struct esas2r_adapter *a, 1106 void esas2r_log_request_failure(struct esas2r_adapter *a,
1107 struct esas2r_request *rq); 1107 struct esas2r_request *rq);
1108 void esas2r_polled_interrupt(struct esas2r_adapter *a); 1108 void esas2r_polled_interrupt(struct esas2r_adapter *a);
1109 bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq, 1109 bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
1110 u8 status); 1110 u8 status);
1111 bool esas2r_build_sg_list_sge(struct esas2r_adapter *a, 1111 bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
1112 struct esas2r_sg_context *sgc); 1112 struct esas2r_sg_context *sgc);
1113 bool esas2r_build_sg_list_prd(struct esas2r_adapter *a, 1113 bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
1114 struct esas2r_sg_context *sgc); 1114 struct esas2r_sg_context *sgc);
1115 void esas2r_targ_db_initialize(struct esas2r_adapter *a); 1115 void esas2r_targ_db_initialize(struct esas2r_adapter *a);
1116 void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify); 1116 void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify);
1117 void esas2r_targ_db_report_changes(struct esas2r_adapter *a); 1117 void esas2r_targ_db_report_changes(struct esas2r_adapter *a);
1118 struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a, 1118 struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
1119 struct esas2r_disc_context *dc); 1119 struct esas2r_disc_context *dc);
1120 struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a, 1120 struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
1121 struct esas2r_disc_context *dc, 1121 struct esas2r_disc_context *dc,
1122 u8 *ident, 1122 u8 *ident,
1123 u8 ident_len); 1123 u8 ident_len);
1124 void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t); 1124 void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t);
1125 struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a, 1125 struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
1126 u64 *sas_addr); 1126 u64 *sas_addr);
1127 struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a, 1127 struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
1128 void *identifier, 1128 void *identifier,
1129 u8 ident_len); 1129 u8 ident_len);
1130 u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id); 1130 u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id);
1131 struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a, 1131 struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
1132 u16 virt_id); 1132 u16 virt_id);
1133 u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a); 1133 u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a);
1134 void esas2r_disc_initialize(struct esas2r_adapter *a); 1134 void esas2r_disc_initialize(struct esas2r_adapter *a);
1135 void esas2r_disc_start_waiting(struct esas2r_adapter *a); 1135 void esas2r_disc_start_waiting(struct esas2r_adapter *a);
1136 void esas2r_disc_check_for_work(struct esas2r_adapter *a); 1136 void esas2r_disc_check_for_work(struct esas2r_adapter *a);
1137 void esas2r_disc_check_complete(struct esas2r_adapter *a); 1137 void esas2r_disc_check_complete(struct esas2r_adapter *a);
1138 void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt); 1138 void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt);
1139 bool esas2r_disc_start_port(struct esas2r_adapter *a); 1139 bool esas2r_disc_start_port(struct esas2r_adapter *a);
1140 void esas2r_disc_local_start_request(struct esas2r_adapter *a, 1140 void esas2r_disc_local_start_request(struct esas2r_adapter *a,
1141 struct esas2r_request *rq); 1141 struct esas2r_request *rq);
1142 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str); 1142 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str);
1143 bool esas2r_process_vda_ioctl(struct esas2r_adapter *a, 1143 bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
1144 struct atto_ioctl_vda *vi, 1144 struct atto_ioctl_vda *vi,
1145 struct esas2r_request *rq, 1145 struct esas2r_request *rq,
1146 struct esas2r_sg_context *sgc); 1146 struct esas2r_sg_context *sgc);
1147 void esas2r_queue_fw_event(struct esas2r_adapter *a, 1147 void esas2r_queue_fw_event(struct esas2r_adapter *a,
1148 enum fw_event_type type, 1148 enum fw_event_type type,
1149 void *data, 1149 void *data,
1150 int data_sz); 1150 int data_sz);
1151 1151
1152 /* Inline functions */ 1152 /* Inline functions */
1153 1153
1154 /* Allocate a chip scatter/gather list entry */ 1154 /* Allocate a chip scatter/gather list entry */
1155 static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a) 1155 static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
1156 { 1156 {
1157 unsigned long flags; 1157 unsigned long flags;
1158 struct list_head *sgl; 1158 struct list_head *sgl;
1159 struct esas2r_mem_desc *result = NULL; 1159 struct esas2r_mem_desc *result = NULL;
1160 1160
1161 spin_lock_irqsave(&a->sg_list_lock, flags); 1161 spin_lock_irqsave(&a->sg_list_lock, flags);
1162 if (likely(!list_empty(&a->free_sg_list_head))) { 1162 if (likely(!list_empty(&a->free_sg_list_head))) {
1163 sgl = a->free_sg_list_head.next; 1163 sgl = a->free_sg_list_head.next;
1164 result = list_entry(sgl, struct esas2r_mem_desc, next_desc); 1164 result = list_entry(sgl, struct esas2r_mem_desc, next_desc);
1165 list_del_init(sgl); 1165 list_del_init(sgl);
1166 } 1166 }
1167 spin_unlock_irqrestore(&a->sg_list_lock, flags); 1167 spin_unlock_irqrestore(&a->sg_list_lock, flags);
1168 1168
1169 return result; 1169 return result;
1170 } 1170 }
1171 1171
1172 /* Initialize a scatter/gather context */ 1172 /* Initialize a scatter/gather context */
1173 static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc, 1173 static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc,
1174 struct esas2r_adapter *a, 1174 struct esas2r_adapter *a,
1175 struct esas2r_request *rq, 1175 struct esas2r_request *rq,
1176 struct atto_vda_sge *first) 1176 struct atto_vda_sge *first)
1177 { 1177 {
1178 sgc->adapter = a; 1178 sgc->adapter = a;
1179 sgc->first_req = rq; 1179 sgc->first_req = rq;
1180 1180
1181 /* 1181 /*
1182 * set the limit pointer such that an SGE pointer above this value 1182 * set the limit pointer such that an SGE pointer above this value
1183 * would be the first one to overflow the SGL. 1183 * would be the first one to overflow the SGL.
1184 */ 1184 */
1185 sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq 1185 sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq
1186 + (sizeof(union 1186 + (sizeof(union
1187 atto_vda_req) / 1187 atto_vda_req) /
1188 8) 1188 8)
1189 - sizeof(struct 1189 - sizeof(struct
1190 atto_vda_sge)); 1190 atto_vda_sge));
1191 if (first) { 1191 if (first) {
1192 sgc->sge.a64.last = 1192 sgc->sge.a64.last =
1193 sgc->sge.a64.curr = first; 1193 sgc->sge.a64.curr = first;
1194 rq->vrq->scsi.sg_list_offset = (u8) 1194 rq->vrq->scsi.sg_list_offset = (u8)
1195 ((u8 *)first - 1195 ((u8 *)first -
1196 (u8 *)rq->vrq); 1196 (u8 *)rq->vrq);
1197 } else { 1197 } else {
1198 sgc->sge.a64.last = 1198 sgc->sge.a64.last =
1199 sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0]; 1199 sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
1200 rq->vrq->scsi.sg_list_offset = 1200 rq->vrq->scsi.sg_list_offset =
1201 (u8)offsetof(struct atto_vda_scsi_req, u.sge); 1201 (u8)offsetof(struct atto_vda_scsi_req, u.sge);
1202 } 1202 }
1203 sgc->sge.a64.chain = NULL; 1203 sgc->sge.a64.chain = NULL;
1204 } 1204 }
1205 1205
1206 static inline void esas2r_rq_init_request(struct esas2r_request *rq, 1206 static inline void esas2r_rq_init_request(struct esas2r_request *rq,
1207 struct esas2r_adapter *a) 1207 struct esas2r_adapter *a)
1208 { 1208 {
1209 union atto_vda_req *vrq = rq->vrq; 1209 union atto_vda_req *vrq = rq->vrq;
1210 u32 handle;
1211 1210
1212 INIT_LIST_HEAD(&rq->sg_table_head); 1211 INIT_LIST_HEAD(&rq->sg_table_head);
1213 rq->data_buf = (void *)(vrq + 1); 1212 rq->data_buf = (void *)(vrq + 1);
1214 rq->interrupt_cb = NULL; 1213 rq->interrupt_cb = NULL;
1215 rq->comp_cb = esas2r_complete_request_cb; 1214 rq->comp_cb = esas2r_complete_request_cb;
1216 rq->flags = 0; 1215 rq->flags = 0;
1217 rq->timeout = 0; 1216 rq->timeout = 0;
1218 rq->req_stat = RS_PENDING; 1217 rq->req_stat = RS_PENDING;
1219 rq->req_type = RT_INI_REQ; 1218 rq->req_type = RT_INI_REQ;
1220 1219
1221 /* clear the outbound response */ 1220 /* clear the outbound response */
1222 rq->func_rsp.dwords[0] = 0; 1221 rq->func_rsp.dwords[0] = 0;
1223 rq->func_rsp.dwords[1] = 0; 1222 rq->func_rsp.dwords[1] = 0;
1224 1223
1225 /* 1224 /*
1226 * clear the size of the VDA request. esas2r_build_sg_list() will 1225 * clear the size of the VDA request. esas2r_build_sg_list() will
1227 * only allow the size of the request to grow. there are some 1226 * only allow the size of the request to grow. there are some
1228 * management requests that go through there twice and the second 1227 * management requests that go through there twice and the second
1229 * time through sets a smaller request size. if this is not modified 1228 * time through sets a smaller request size. if this is not modified
1230 * at all we'll set it to the size of the entire VDA request. 1229 * at all we'll set it to the size of the entire VDA request.
1231 */ 1230 */
1232 rq->vda_req_sz = RQ_SIZE_DEFAULT; 1231 rq->vda_req_sz = RQ_SIZE_DEFAULT;
1233 1232
1234 /* req_table entry should be NULL at this point - if not, halt */ 1233 /* req_table entry should be NULL at this point - if not, halt */
1235 1234
1236 if (a->req_table[LOWORD(vrq->scsi.handle)]) 1235 if (a->req_table[LOWORD(vrq->scsi.handle)])
1237 esas2r_bugon(); 1236 esas2r_bugon();
1238 1237
1239 /* fill in the table for this handle so we can get back to the 1238 /* fill in the table for this handle so we can get back to the
1240 * request. 1239 * request.
1241 */ 1240 */
1242 a->req_table[LOWORD(vrq->scsi.handle)] = rq; 1241 a->req_table[LOWORD(vrq->scsi.handle)] = rq;
1243 1242
1244 /* 1243 /*
1245 * add a reference number to the handle to make it unique (until it 1244 * add a reference number to the handle to make it unique (until it
1246 * wraps of course) while preserving the upper word 1245 * wraps of course) while preserving the least significant word
1247 */ 1246 */
1248 1247 vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle;
1249 handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000;
1250 vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++);
1251 1248
1252 /* 1249 /*
1253 * the following formats a SCSI request. the caller can override as 1250 * the following formats a SCSI request. the caller can override as
1254 * necessary. clear_vda_request can be called to clear the VDA 1251 * necessary. clear_vda_request can be called to clear the VDA
1255 * request for another type of request. 1252 * request for another type of request.
1256 */ 1253 */
1257 vrq->scsi.function = VDA_FUNC_SCSI; 1254 vrq->scsi.function = VDA_FUNC_SCSI;
1258 vrq->scsi.sense_len = SENSE_DATA_SZ; 1255 vrq->scsi.sense_len = SENSE_DATA_SZ;
1259 1256
1260 /* clear out sg_list_offset and chain_offset */ 1257 /* clear out sg_list_offset and chain_offset */
1261 vrq->scsi.sg_list_offset = 0; 1258 vrq->scsi.sg_list_offset = 0;
1262 vrq->scsi.chain_offset = 0; 1259 vrq->scsi.chain_offset = 0;
1263 vrq->scsi.flags = 0; 1260 vrq->scsi.flags = 0;
1264 vrq->scsi.reserved = 0; 1261 vrq->scsi.reserved = 0;
1265 1262
1266 /* set the sense buffer to be the data payload buffer */ 1263 /* set the sense buffer to be the data payload buffer */
1267 vrq->scsi.ppsense_buf 1264 vrq->scsi.ppsense_buf
1268 = cpu_to_le64(rq->vrq_md->phys_addr + 1265 = cpu_to_le64(rq->vrq_md->phys_addr +
1269 sizeof(union atto_vda_req)); 1266 sizeof(union atto_vda_req));
1270 } 1267 }
1271 1268
1272 static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq, 1269 static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
1273 struct esas2r_adapter *a) 1270 struct esas2r_adapter *a)
1274 { 1271 {
1275 unsigned long flags; 1272 unsigned long flags;
1276 1273
1277 if (list_empty(&rq->sg_table_head)) 1274 if (list_empty(&rq->sg_table_head))
1278 return; 1275 return;
1279 1276
1280 spin_lock_irqsave(&a->sg_list_lock, flags); 1277 spin_lock_irqsave(&a->sg_list_lock, flags);
1281 list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head); 1278 list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head);
1282 spin_unlock_irqrestore(&a->sg_list_lock, flags); 1279 spin_unlock_irqrestore(&a->sg_list_lock, flags);
1283 } 1280 }
1284 1281
1285 static inline void esas2r_rq_destroy_request(struct esas2r_request *rq, 1282 static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
1286 struct esas2r_adapter *a) 1283 struct esas2r_adapter *a)
1287 1284
1288 { 1285 {
1289 esas2r_rq_free_sg_lists(rq, a); 1286 esas2r_rq_free_sg_lists(rq, a);
1290 a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL; 1287 a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL;
1291 rq->data_buf = NULL; 1288 rq->data_buf = NULL;
1292 } 1289 }
1293 1290
1294 static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a) 1291 static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
1295 { 1292 {
1296 1293
1297 return test_bit(AF_BUSRST_NEEDED, &a->flags) || 1294 return test_bit(AF_BUSRST_NEEDED, &a->flags) ||
1298 test_bit(AF_BUSRST_DETECTED, &a->flags) || 1295 test_bit(AF_BUSRST_DETECTED, &a->flags) ||
1299 test_bit(AF_CHPRST_NEEDED, &a->flags) || 1296 test_bit(AF_CHPRST_NEEDED, &a->flags) ||
1300 test_bit(AF_CHPRST_DETECTED, &a->flags) || 1297 test_bit(AF_CHPRST_DETECTED, &a->flags) ||
1301 test_bit(AF_PORT_CHANGE, &a->flags); 1298 test_bit(AF_PORT_CHANGE, &a->flags);
1302 1299
1303 } 1300 }
1304 1301
1305 /* 1302 /*
1306 * Build the scatter/gather list for an I/O request according to the 1303 * Build the scatter/gather list for an I/O request according to the
1307 * specifications placed in the esas2r_sg_context. The caller must initialize 1304 * specifications placed in the esas2r_sg_context. The caller must initialize
1308 * struct esas2r_sg_context prior to the initial call by calling 1305 * struct esas2r_sg_context prior to the initial call by calling
1309 * esas2r_sgc_init() 1306 * esas2r_sgc_init()
1310 */ 1307 */
1311 static inline bool esas2r_build_sg_list(struct esas2r_adapter *a, 1308 static inline bool esas2r_build_sg_list(struct esas2r_adapter *a,
1312 struct esas2r_request *rq, 1309 struct esas2r_request *rq,
1313 struct esas2r_sg_context *sgc) 1310 struct esas2r_sg_context *sgc)
1314 { 1311 {
1315 if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0)) 1312 if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0))
1316 return true; 1313 return true;
1317 1314
1318 return (*a->build_sgl)(a, sgc); 1315 return (*a->build_sgl)(a, sgc);
1319 } 1316 }
1320 1317
1321 static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a) 1318 static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a)
1322 { 1319 {
1323 if (atomic_inc_return(&a->dis_ints_cnt) == 1) 1320 if (atomic_inc_return(&a->dis_ints_cnt) == 1)
1324 esas2r_write_register_dword(a, MU_INT_MASK_OUT, 1321 esas2r_write_register_dword(a, MU_INT_MASK_OUT,
1325 ESAS2R_INT_DIS_MASK); 1322 ESAS2R_INT_DIS_MASK);
1326 } 1323 }
1327 1324
1328 static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a) 1325 static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
1329 { 1326 {
1330 if (atomic_dec_return(&a->dis_ints_cnt) == 0) 1327 if (atomic_dec_return(&a->dis_ints_cnt) == 0)
1331 esas2r_write_register_dword(a, MU_INT_MASK_OUT, 1328 esas2r_write_register_dword(a, MU_INT_MASK_OUT,
1332 ESAS2R_INT_ENB_MASK); 1329 ESAS2R_INT_ENB_MASK);
1333 } 1330 }
1334 1331
1335 /* Schedule a TASKLET to perform non-interrupt tasks that may require delays 1332 /* Schedule a TASKLET to perform non-interrupt tasks that may require delays
1336 * or long completion times. 1333 * or long completion times.
1337 */ 1334 */
1338 static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a) 1335 static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
1339 { 1336 {
1340 /* make sure we don't schedule twice */ 1337 /* make sure we don't schedule twice */
1341 if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags)) 1338 if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags))
1342 tasklet_hi_schedule(&a->tasklet); 1339 tasklet_hi_schedule(&a->tasklet);
1343 } 1340 }
1344 1341
1345 static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a) 1342 static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
1346 { 1343 {
1347 if (!test_bit(AF_DEGRADED_MODE, &a->flags) && 1344 if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
1348 !test_bit(AF_CHPRST_PENDING, &a->flags) && 1345 !test_bit(AF_CHPRST_PENDING, &a->flags) &&
1349 (a->nvram->options2 & SASNVR2_HEARTBEAT)) 1346 (a->nvram->options2 & SASNVR2_HEARTBEAT))
1350 set_bit(AF_HEARTBEAT_ENB, &a->flags); 1347 set_bit(AF_HEARTBEAT_ENB, &a->flags);
1351 else 1348 else
1352 clear_bit(AF_HEARTBEAT_ENB, &a->flags); 1349 clear_bit(AF_HEARTBEAT_ENB, &a->flags);
1353 } 1350 }
1354 1351
1355 static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a) 1352 static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
1356 { 1353 {
1357 clear_bit(AF_HEARTBEAT_ENB, &a->flags); 1354 clear_bit(AF_HEARTBEAT_ENB, &a->flags);
1358 clear_bit(AF_HEARTBEAT, &a->flags); 1355 clear_bit(AF_HEARTBEAT, &a->flags);
1359 } 1356 }
1360 1357
1361 /* Set the initial state for resetting the adapter on the next pass through 1358 /* Set the initial state for resetting the adapter on the next pass through
1362 * esas2r_do_deferred. 1359 * esas2r_do_deferred.
1363 */ 1360 */
1364 static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a) 1361 static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
1365 { 1362 {
1366 esas2r_disable_heartbeat(a); 1363 esas2r_disable_heartbeat(a);
1367 1364
1368 set_bit(AF_CHPRST_NEEDED, &a->flags); 1365 set_bit(AF_CHPRST_NEEDED, &a->flags);
1369 set_bit(AF_CHPRST_PENDING, &a->flags); 1366 set_bit(AF_CHPRST_PENDING, &a->flags);
1370 set_bit(AF_DISC_PENDING, &a->flags); 1367 set_bit(AF_DISC_PENDING, &a->flags);
1371 } 1368 }
1372 1369
1373 /* See if an interrupt is pending on the adapter. */ 1370 /* See if an interrupt is pending on the adapter. */
1374 static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a) 1371 static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a)
1375 { 1372 {
1376 u32 intstat; 1373 u32 intstat;
1377 1374
1378 if (a->int_mask == 0) 1375 if (a->int_mask == 0)
1379 return false; 1376 return false;
1380 1377
1381 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); 1378 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
1382 1379
1383 if ((intstat & a->int_mask) == 0) 1380 if ((intstat & a->int_mask) == 0)
1384 return false; 1381 return false;
1385 1382
1386 esas2r_disable_chip_interrupts(a); 1383 esas2r_disable_chip_interrupts(a);
1387 1384
1388 a->int_stat = intstat; 1385 a->int_stat = intstat;
1389 a->int_mask = 0; 1386 a->int_mask = 0;
1390 1387
1391 return true; 1388 return true;
1392 } 1389 }
1393 1390
1394 static inline u16 esas2r_targ_get_id(struct esas2r_target *t, 1391 static inline u16 esas2r_targ_get_id(struct esas2r_target *t,
1395 struct esas2r_adapter *a) 1392 struct esas2r_adapter *a)
1396 { 1393 {
1397 return (u16)(uintptr_t)(t - a->targetdb); 1394 return (u16)(uintptr_t)(t - a->targetdb);
1398 } 1395 }
1399 1396
1400 /* Build and start an asynchronous event request */ 1397 /* Build and start an asynchronous event request */
1401 static inline void esas2r_start_ae_request(struct esas2r_adapter *a, 1398 static inline void esas2r_start_ae_request(struct esas2r_adapter *a,
1402 struct esas2r_request *rq) 1399 struct esas2r_request *rq)
1403 { 1400 {
1404 unsigned long flags; 1401 unsigned long flags;
1405 1402
1406 esas2r_build_ae_req(a, rq); 1403 esas2r_build_ae_req(a, rq);
1407 1404
1408 spin_lock_irqsave(&a->queue_lock, flags); 1405 spin_lock_irqsave(&a->queue_lock, flags);
1409 esas2r_start_vda_request(a, rq); 1406 esas2r_start_vda_request(a, rq);
1410 spin_unlock_irqrestore(&a->queue_lock, flags); 1407 spin_unlock_irqrestore(&a->queue_lock, flags);
1411 } 1408 }
1412 1409
1413 static inline void esas2r_comp_list_drain(struct esas2r_adapter *a, 1410 static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
1414 struct list_head *comp_list) 1411 struct list_head *comp_list)
1415 { 1412 {
1416 struct esas2r_request *rq; 1413 struct esas2r_request *rq;
1417 struct list_head *element, *next; 1414 struct list_head *element, *next;
1418 1415
1419 list_for_each_safe(element, next, comp_list) { 1416 list_for_each_safe(element, next, comp_list) {
1420 rq = list_entry(element, struct esas2r_request, comp_list); 1417 rq = list_entry(element, struct esas2r_request, comp_list);
1421 list_del_init(element); 1418 list_del_init(element);
1422 esas2r_complete_request(a, rq); 1419 esas2r_complete_request(a, rq);
1423 } 1420 }
1424 } 1421 }
1425 1422
1426 /* sysfs handlers */ 1423 /* sysfs handlers */
1427 extern struct bin_attribute bin_attr_fw; 1424 extern struct bin_attribute bin_attr_fw;
1428 extern struct bin_attribute bin_attr_fs; 1425 extern struct bin_attribute bin_attr_fs;
1429 extern struct bin_attribute bin_attr_vda; 1426 extern struct bin_attribute bin_attr_vda;
1430 extern struct bin_attribute bin_attr_hw; 1427 extern struct bin_attribute bin_attr_hw;
1431 extern struct bin_attribute bin_attr_live_nvram; 1428 extern struct bin_attribute bin_attr_live_nvram;
1432 extern struct bin_attribute bin_attr_default_nvram; 1429 extern struct bin_attribute bin_attr_default_nvram;
1433 1430
1434 #endif /* ESAS2R_H */ 1431 #endif /* ESAS2R_H */
1435 1432
drivers/scsi/esas2r/esas2r_init.c
1 /* 1 /*
2 * linux/drivers/scsi/esas2r/esas2r_init.c 2 * linux/drivers/scsi/esas2r/esas2r_init.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers 3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 * 4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc. 5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. 6 * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2 10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version. 11 * of the License, or (at your option) any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * NO WARRANTY 18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and 23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its 24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to 25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data, 26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations. 27 * programs or equipment, and unavailability or interruption of operations.
28 * 28 *
29 * DISCLAIMER OF LIABILITY 29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 * 37 *
38 * You should have received a copy of the GNU General Public License 38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software 39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA. 41 * USA.
42 */ 42 */
43 43
44 #include "esas2r.h" 44 #include "esas2r.h"
45 45
46 static bool esas2r_initmem_alloc(struct esas2r_adapter *a, 46 static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
47 struct esas2r_mem_desc *mem_desc, 47 struct esas2r_mem_desc *mem_desc,
48 u32 align) 48 u32 align)
49 { 49 {
50 mem_desc->esas2r_param = mem_desc->size + align; 50 mem_desc->esas2r_param = mem_desc->size + align;
51 mem_desc->virt_addr = NULL; 51 mem_desc->virt_addr = NULL;
52 mem_desc->phys_addr = 0; 52 mem_desc->phys_addr = 0;
53 mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev, 53 mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
54 (size_t)mem_desc-> 54 (size_t)mem_desc->
55 esas2r_param, 55 esas2r_param,
56 (dma_addr_t *)&mem_desc-> 56 (dma_addr_t *)&mem_desc->
57 phys_addr, 57 phys_addr,
58 GFP_KERNEL); 58 GFP_KERNEL);
59 59
60 if (mem_desc->esas2r_data == NULL) { 60 if (mem_desc->esas2r_data == NULL) {
61 esas2r_log(ESAS2R_LOG_CRIT, 61 esas2r_log(ESAS2R_LOG_CRIT,
62 "failed to allocate %lu bytes of consistent memory!", 62 "failed to allocate %lu bytes of consistent memory!",
63 (long 63 (long
64 unsigned 64 unsigned
65 int)mem_desc->esas2r_param); 65 int)mem_desc->esas2r_param);
66 return false; 66 return false;
67 } 67 }
68 68
69 mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align); 69 mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
70 mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align); 70 mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
71 memset(mem_desc->virt_addr, 0, mem_desc->size); 71 memset(mem_desc->virt_addr, 0, mem_desc->size);
72 return true; 72 return true;
73 } 73 }
74 74
75 static void esas2r_initmem_free(struct esas2r_adapter *a, 75 static void esas2r_initmem_free(struct esas2r_adapter *a,
76 struct esas2r_mem_desc *mem_desc) 76 struct esas2r_mem_desc *mem_desc)
77 { 77 {
78 if (mem_desc->virt_addr == NULL) 78 if (mem_desc->virt_addr == NULL)
79 return; 79 return;
80 80
81 /* 81 /*
82 * Careful! phys_addr and virt_addr may have been adjusted from the 82 * Careful! phys_addr and virt_addr may have been adjusted from the
83 * original allocation in order to return the desired alignment. That 83 * original allocation in order to return the desired alignment. That
84 * means we have to use the original address (in esas2r_data) and size 84 * means we have to use the original address (in esas2r_data) and size
85 * (esas2r_param) and calculate the original physical address based on 85 * (esas2r_param) and calculate the original physical address based on
86 * the difference between the requested and actual allocation size. 86 * the difference between the requested and actual allocation size.
87 */ 87 */
88 if (mem_desc->phys_addr) { 88 if (mem_desc->phys_addr) {
89 int unalign = ((u8 *)mem_desc->virt_addr) - 89 int unalign = ((u8 *)mem_desc->virt_addr) -
90 ((u8 *)mem_desc->esas2r_data); 90 ((u8 *)mem_desc->esas2r_data);
91 91
92 dma_free_coherent(&a->pcid->dev, 92 dma_free_coherent(&a->pcid->dev,
93 (size_t)mem_desc->esas2r_param, 93 (size_t)mem_desc->esas2r_param,
94 mem_desc->esas2r_data, 94 mem_desc->esas2r_data,
95 (dma_addr_t)(mem_desc->phys_addr - unalign)); 95 (dma_addr_t)(mem_desc->phys_addr - unalign));
96 } else { 96 } else {
97 kfree(mem_desc->esas2r_data); 97 kfree(mem_desc->esas2r_data);
98 } 98 }
99 99
100 mem_desc->virt_addr = NULL; 100 mem_desc->virt_addr = NULL;
101 } 101 }
102 102
103 static bool alloc_vda_req(struct esas2r_adapter *a, 103 static bool alloc_vda_req(struct esas2r_adapter *a,
104 struct esas2r_request *rq) 104 struct esas2r_request *rq)
105 { 105 {
106 struct esas2r_mem_desc *memdesc = kzalloc( 106 struct esas2r_mem_desc *memdesc = kzalloc(
107 sizeof(struct esas2r_mem_desc), GFP_KERNEL); 107 sizeof(struct esas2r_mem_desc), GFP_KERNEL);
108 108
109 if (memdesc == NULL) { 109 if (memdesc == NULL) {
110 esas2r_hdebug("could not alloc mem for vda request memdesc\n"); 110 esas2r_hdebug("could not alloc mem for vda request memdesc\n");
111 return false; 111 return false;
112 } 112 }
113 113
114 memdesc->size = sizeof(union atto_vda_req) + 114 memdesc->size = sizeof(union atto_vda_req) +
115 ESAS2R_DATA_BUF_LEN; 115 ESAS2R_DATA_BUF_LEN;
116 116
117 if (!esas2r_initmem_alloc(a, memdesc, 256)) { 117 if (!esas2r_initmem_alloc(a, memdesc, 256)) {
118 esas2r_hdebug("could not alloc mem for vda request\n"); 118 esas2r_hdebug("could not alloc mem for vda request\n");
119 kfree(memdesc); 119 kfree(memdesc);
120 return false; 120 return false;
121 } 121 }
122 122
123 a->num_vrqs++; 123 a->num_vrqs++;
124 list_add(&memdesc->next_desc, &a->vrq_mds_head); 124 list_add(&memdesc->next_desc, &a->vrq_mds_head);
125 125
126 rq->vrq_md = memdesc; 126 rq->vrq_md = memdesc;
127 rq->vrq = (union atto_vda_req *)memdesc->virt_addr; 127 rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
128 rq->vrq->scsi.handle = a->num_vrqs; 128 rq->vrq->scsi.handle = a->num_vrqs;
129 129
130 return true; 130 return true;
131 } 131 }
132 132
133 static void esas2r_unmap_regions(struct esas2r_adapter *a) 133 static void esas2r_unmap_regions(struct esas2r_adapter *a)
134 { 134 {
135 if (a->regs) 135 if (a->regs)
136 iounmap((void __iomem *)a->regs); 136 iounmap((void __iomem *)a->regs);
137 137
138 a->regs = NULL; 138 a->regs = NULL;
139 139
140 pci_release_region(a->pcid, 2); 140 pci_release_region(a->pcid, 2);
141 141
142 if (a->data_window) 142 if (a->data_window)
143 iounmap((void __iomem *)a->data_window); 143 iounmap((void __iomem *)a->data_window);
144 144
145 a->data_window = NULL; 145 a->data_window = NULL;
146 146
147 pci_release_region(a->pcid, 0); 147 pci_release_region(a->pcid, 0);
148 } 148 }
149 149
150 static int esas2r_map_regions(struct esas2r_adapter *a) 150 static int esas2r_map_regions(struct esas2r_adapter *a)
151 { 151 {
152 int error; 152 int error;
153 153
154 a->regs = NULL; 154 a->regs = NULL;
155 a->data_window = NULL; 155 a->data_window = NULL;
156 156
157 error = pci_request_region(a->pcid, 2, a->name); 157 error = pci_request_region(a->pcid, 2, a->name);
158 if (error != 0) { 158 if (error != 0) {
159 esas2r_log(ESAS2R_LOG_CRIT, 159 esas2r_log(ESAS2R_LOG_CRIT,
160 "pci_request_region(2) failed, error %d", 160 "pci_request_region(2) failed, error %d",
161 error); 161 error);
162 162
163 return error; 163 return error;
164 } 164 }
165 165
166 a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2), 166 a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
167 pci_resource_len(a->pcid, 2)); 167 pci_resource_len(a->pcid, 2));
168 if (a->regs == NULL) { 168 if (a->regs == NULL) {
169 esas2r_log(ESAS2R_LOG_CRIT, 169 esas2r_log(ESAS2R_LOG_CRIT,
170 "ioremap failed for regs mem region\n"); 170 "ioremap failed for regs mem region\n");
171 pci_release_region(a->pcid, 2); 171 pci_release_region(a->pcid, 2);
172 return -EFAULT; 172 return -EFAULT;
173 } 173 }
174 174
175 error = pci_request_region(a->pcid, 0, a->name); 175 error = pci_request_region(a->pcid, 0, a->name);
176 if (error != 0) { 176 if (error != 0) {
177 esas2r_log(ESAS2R_LOG_CRIT, 177 esas2r_log(ESAS2R_LOG_CRIT,
178 "pci_request_region(2) failed, error %d", 178 "pci_request_region(2) failed, error %d",
179 error); 179 error);
180 esas2r_unmap_regions(a); 180 esas2r_unmap_regions(a);
181 return error; 181 return error;
182 } 182 }
183 183
184 a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid, 184 a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
185 0), 185 0),
186 pci_resource_len(a->pcid, 0)); 186 pci_resource_len(a->pcid, 0));
187 if (a->data_window == NULL) { 187 if (a->data_window == NULL) {
188 esas2r_log(ESAS2R_LOG_CRIT, 188 esas2r_log(ESAS2R_LOG_CRIT,
189 "ioremap failed for data_window mem region\n"); 189 "ioremap failed for data_window mem region\n");
190 esas2r_unmap_regions(a); 190 esas2r_unmap_regions(a);
191 return -EFAULT; 191 return -EFAULT;
192 } 192 }
193 193
194 return 0; 194 return 0;
195 } 195 }
196 196
197 static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode) 197 static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
198 { 198 {
199 int i; 199 int i;
200 200
201 /* Set up interrupt mode based on the requested value */ 201 /* Set up interrupt mode based on the requested value */
202 switch (intr_mode) { 202 switch (intr_mode) {
203 case INTR_MODE_LEGACY: 203 case INTR_MODE_LEGACY:
204 use_legacy_interrupts: 204 use_legacy_interrupts:
205 a->intr_mode = INTR_MODE_LEGACY; 205 a->intr_mode = INTR_MODE_LEGACY;
206 break; 206 break;
207 207
208 case INTR_MODE_MSI: 208 case INTR_MODE_MSI:
209 i = pci_enable_msi(a->pcid); 209 i = pci_enable_msi(a->pcid);
210 if (i != 0) { 210 if (i != 0) {
211 esas2r_log(ESAS2R_LOG_WARN, 211 esas2r_log(ESAS2R_LOG_WARN,
212 "failed to enable MSI for adapter %d, " 212 "failed to enable MSI for adapter %d, "
213 "falling back to legacy interrupts " 213 "falling back to legacy interrupts "
214 "(err=%d)", a->index, 214 "(err=%d)", a->index,
215 i); 215 i);
216 goto use_legacy_interrupts; 216 goto use_legacy_interrupts;
217 } 217 }
218 a->intr_mode = INTR_MODE_MSI; 218 a->intr_mode = INTR_MODE_MSI;
219 set_bit(AF2_MSI_ENABLED, &a->flags2); 219 set_bit(AF2_MSI_ENABLED, &a->flags2);
220 break; 220 break;
221 221
222 222
223 default: 223 default:
224 esas2r_log(ESAS2R_LOG_WARN, 224 esas2r_log(ESAS2R_LOG_WARN,
225 "unknown interrupt_mode %d requested, " 225 "unknown interrupt_mode %d requested, "
226 "falling back to legacy interrupt", 226 "falling back to legacy interrupt",
227 interrupt_mode); 227 interrupt_mode);
228 goto use_legacy_interrupts; 228 goto use_legacy_interrupts;
229 } 229 }
230 } 230 }
231 231
232 static void esas2r_claim_interrupts(struct esas2r_adapter *a) 232 static void esas2r_claim_interrupts(struct esas2r_adapter *a)
233 { 233 {
234 unsigned long flags = IRQF_DISABLED; 234 unsigned long flags = IRQF_DISABLED;
235 235
236 if (a->intr_mode == INTR_MODE_LEGACY) 236 if (a->intr_mode == INTR_MODE_LEGACY)
237 flags |= IRQF_SHARED; 237 flags |= IRQF_SHARED;
238 238
239 esas2r_log(ESAS2R_LOG_INFO, 239 esas2r_log(ESAS2R_LOG_INFO,
240 "esas2r_claim_interrupts irq=%d (%p, %s, %x)", 240 "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
241 a->pcid->irq, a, a->name, flags); 241 a->pcid->irq, a, a->name, flags);
242 242
243 if (request_irq(a->pcid->irq, 243 if (request_irq(a->pcid->irq,
244 (a->intr_mode == 244 (a->intr_mode ==
245 INTR_MODE_LEGACY) ? esas2r_interrupt : 245 INTR_MODE_LEGACY) ? esas2r_interrupt :
246 esas2r_msi_interrupt, 246 esas2r_msi_interrupt,
247 flags, 247 flags,
248 a->name, 248 a->name,
249 a)) { 249 a)) {
250 esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X", 250 esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
251 a->pcid->irq); 251 a->pcid->irq);
252 return; 252 return;
253 } 253 }
254 254
255 set_bit(AF2_IRQ_CLAIMED, &a->flags2); 255 set_bit(AF2_IRQ_CLAIMED, &a->flags2);
256 esas2r_log(ESAS2R_LOG_INFO, 256 esas2r_log(ESAS2R_LOG_INFO,
257 "claimed IRQ %d flags: 0x%lx", 257 "claimed IRQ %d flags: 0x%lx",
258 a->pcid->irq, flags); 258 a->pcid->irq, flags);
259 } 259 }
260 260
261 int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, 261 int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
262 int index) 262 int index)
263 { 263 {
264 struct esas2r_adapter *a; 264 struct esas2r_adapter *a;
265 u64 bus_addr = 0; 265 u64 bus_addr = 0;
266 int i; 266 int i;
267 void *next_uncached; 267 void *next_uncached;
268 struct esas2r_request *first_request, *last_request; 268 struct esas2r_request *first_request, *last_request;
269 269
270 if (index >= MAX_ADAPTERS) { 270 if (index >= MAX_ADAPTERS) {
271 esas2r_log(ESAS2R_LOG_CRIT, 271 esas2r_log(ESAS2R_LOG_CRIT,
272 "tried to init invalid adapter index %u!", 272 "tried to init invalid adapter index %u!",
273 index); 273 index);
274 return 0; 274 return 0;
275 } 275 }
276 276
277 if (esas2r_adapters[index]) { 277 if (esas2r_adapters[index]) {
278 esas2r_log(ESAS2R_LOG_CRIT, 278 esas2r_log(ESAS2R_LOG_CRIT,
279 "tried to init existing adapter index %u!", 279 "tried to init existing adapter index %u!",
280 index); 280 index);
281 return 0; 281 return 0;
282 } 282 }
283 283
284 a = (struct esas2r_adapter *)host->hostdata; 284 a = (struct esas2r_adapter *)host->hostdata;
285 memset(a, 0, sizeof(struct esas2r_adapter)); 285 memset(a, 0, sizeof(struct esas2r_adapter));
286 a->pcid = pcid; 286 a->pcid = pcid;
287 a->host = host; 287 a->host = host;
288 288
289 if (sizeof(dma_addr_t) > 4) { 289 if (sizeof(dma_addr_t) > 4) {
290 const uint64_t required_mask = dma_get_required_mask 290 const uint64_t required_mask = dma_get_required_mask
291 (&pcid->dev); 291 (&pcid->dev);
292 if (required_mask > DMA_BIT_MASK(32) 292 if (required_mask > DMA_BIT_MASK(32)
293 && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64)) 293 && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
294 && !pci_set_consistent_dma_mask(pcid, 294 && !pci_set_consistent_dma_mask(pcid,
295 DMA_BIT_MASK(64))) { 295 DMA_BIT_MASK(64))) {
296 esas2r_log_dev(ESAS2R_LOG_INFO, 296 esas2r_log_dev(ESAS2R_LOG_INFO,
297 &(a->pcid->dev), 297 &(a->pcid->dev),
298 "64-bit PCI addressing enabled\n"); 298 "64-bit PCI addressing enabled\n");
299 } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32)) 299 } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
300 && !pci_set_consistent_dma_mask(pcid, 300 && !pci_set_consistent_dma_mask(pcid,
301 DMA_BIT_MASK(32))) { 301 DMA_BIT_MASK(32))) {
302 esas2r_log_dev(ESAS2R_LOG_INFO, 302 esas2r_log_dev(ESAS2R_LOG_INFO,
303 &(a->pcid->dev), 303 &(a->pcid->dev),
304 "32-bit PCI addressing enabled\n"); 304 "32-bit PCI addressing enabled\n");
305 } else { 305 } else {
306 esas2r_log(ESAS2R_LOG_CRIT, 306 esas2r_log(ESAS2R_LOG_CRIT,
307 "failed to set DMA mask"); 307 "failed to set DMA mask");
308 esas2r_kill_adapter(index); 308 esas2r_kill_adapter(index);
309 return 0; 309 return 0;
310 } 310 }
311 } else { 311 } else {
312 if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32)) 312 if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
313 && !pci_set_consistent_dma_mask(pcid, 313 && !pci_set_consistent_dma_mask(pcid,
314 DMA_BIT_MASK(32))) { 314 DMA_BIT_MASK(32))) {
315 esas2r_log_dev(ESAS2R_LOG_INFO, 315 esas2r_log_dev(ESAS2R_LOG_INFO,
316 &(a->pcid->dev), 316 &(a->pcid->dev),
317 "32-bit PCI addressing enabled\n"); 317 "32-bit PCI addressing enabled\n");
318 } else { 318 } else {
319 esas2r_log(ESAS2R_LOG_CRIT, 319 esas2r_log(ESAS2R_LOG_CRIT,
320 "failed to set DMA mask"); 320 "failed to set DMA mask");
321 esas2r_kill_adapter(index); 321 esas2r_kill_adapter(index);
322 return 0; 322 return 0;
323 } 323 }
324 } 324 }
325 esas2r_adapters[index] = a; 325 esas2r_adapters[index] = a;
326 sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index); 326 sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
327 esas2r_debug("new adapter %p, name %s", a, a->name); 327 esas2r_debug("new adapter %p, name %s", a, a->name);
328 spin_lock_init(&a->request_lock); 328 spin_lock_init(&a->request_lock);
329 spin_lock_init(&a->fw_event_lock); 329 spin_lock_init(&a->fw_event_lock);
330 sema_init(&a->fm_api_semaphore, 1); 330 sema_init(&a->fm_api_semaphore, 1);
331 sema_init(&a->fs_api_semaphore, 1); 331 sema_init(&a->fs_api_semaphore, 1);
332 sema_init(&a->nvram_semaphore, 1); 332 sema_init(&a->nvram_semaphore, 1);
333 333
334 esas2r_fw_event_off(a); 334 esas2r_fw_event_off(a);
335 snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", 335 snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
336 a->index); 336 a->index);
337 a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name); 337 a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
338 338
339 init_waitqueue_head(&a->buffered_ioctl_waiter); 339 init_waitqueue_head(&a->buffered_ioctl_waiter);
340 init_waitqueue_head(&a->nvram_waiter); 340 init_waitqueue_head(&a->nvram_waiter);
341 init_waitqueue_head(&a->fm_api_waiter); 341 init_waitqueue_head(&a->fm_api_waiter);
342 init_waitqueue_head(&a->fs_api_waiter); 342 init_waitqueue_head(&a->fs_api_waiter);
343 init_waitqueue_head(&a->vda_waiter); 343 init_waitqueue_head(&a->vda_waiter);
344 344
345 INIT_LIST_HEAD(&a->general_req.req_list); 345 INIT_LIST_HEAD(&a->general_req.req_list);
346 INIT_LIST_HEAD(&a->active_list); 346 INIT_LIST_HEAD(&a->active_list);
347 INIT_LIST_HEAD(&a->defer_list); 347 INIT_LIST_HEAD(&a->defer_list);
348 INIT_LIST_HEAD(&a->free_sg_list_head); 348 INIT_LIST_HEAD(&a->free_sg_list_head);
349 INIT_LIST_HEAD(&a->avail_request); 349 INIT_LIST_HEAD(&a->avail_request);
350 INIT_LIST_HEAD(&a->vrq_mds_head); 350 INIT_LIST_HEAD(&a->vrq_mds_head);
351 INIT_LIST_HEAD(&a->fw_event_list); 351 INIT_LIST_HEAD(&a->fw_event_list);
352 352
353 first_request = (struct esas2r_request *)((u8 *)(a + 1)); 353 first_request = (struct esas2r_request *)((u8 *)(a + 1));
354 354
355 for (last_request = first_request, i = 1; i < num_requests; 355 for (last_request = first_request, i = 1; i < num_requests;
356 last_request++, i++) { 356 last_request++, i++) {
357 INIT_LIST_HEAD(&last_request->req_list); 357 INIT_LIST_HEAD(&last_request->req_list);
358 list_add_tail(&last_request->comp_list, &a->avail_request); 358 list_add_tail(&last_request->comp_list, &a->avail_request);
359 if (!alloc_vda_req(a, last_request)) { 359 if (!alloc_vda_req(a, last_request)) {
360 esas2r_log(ESAS2R_LOG_CRIT, 360 esas2r_log(ESAS2R_LOG_CRIT,
361 "failed to allocate a VDA request!"); 361 "failed to allocate a VDA request!");
362 esas2r_kill_adapter(index); 362 esas2r_kill_adapter(index);
363 return 0; 363 return 0;
364 } 364 }
365 } 365 }
366 366
367 esas2r_debug("requests: %p to %p (%d, %d)", first_request, 367 esas2r_debug("requests: %p to %p (%d, %d)", first_request,
368 last_request, 368 last_request,
369 sizeof(*first_request), 369 sizeof(*first_request),
370 num_requests); 370 num_requests);
371 371
372 if (esas2r_map_regions(a) != 0) { 372 if (esas2r_map_regions(a) != 0) {
373 esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!"); 373 esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
374 esas2r_kill_adapter(index); 374 esas2r_kill_adapter(index);
375 return 0; 375 return 0;
376 } 376 }
377 377
378 a->index = index; 378 a->index = index;
379 379
380 /* interrupts will be disabled until we are done with init */ 380 /* interrupts will be disabled until we are done with init */
381 atomic_inc(&a->dis_ints_cnt); 381 atomic_inc(&a->dis_ints_cnt);
382 atomic_inc(&a->disable_cnt); 382 atomic_inc(&a->disable_cnt);
383 set_bit(AF_CHPRST_PENDING, &a->flags); 383 set_bit(AF_CHPRST_PENDING, &a->flags);
384 set_bit(AF_DISC_PENDING, &a->flags); 384 set_bit(AF_DISC_PENDING, &a->flags);
385 set_bit(AF_FIRST_INIT, &a->flags); 385 set_bit(AF_FIRST_INIT, &a->flags);
386 set_bit(AF_LEGACY_SGE_MODE, &a->flags); 386 set_bit(AF_LEGACY_SGE_MODE, &a->flags);
387 387
388 a->init_msg = ESAS2R_INIT_MSG_START; 388 a->init_msg = ESAS2R_INIT_MSG_START;
389 a->max_vdareq_size = 128; 389 a->max_vdareq_size = 128;
390 a->build_sgl = esas2r_build_sg_list_sge; 390 a->build_sgl = esas2r_build_sg_list_sge;
391 391
392 esas2r_setup_interrupts(a, interrupt_mode); 392 esas2r_setup_interrupts(a, interrupt_mode);
393 393
394 a->uncached_size = esas2r_get_uncached_size(a); 394 a->uncached_size = esas2r_get_uncached_size(a);
395 a->uncached = dma_alloc_coherent(&pcid->dev, 395 a->uncached = dma_alloc_coherent(&pcid->dev,
396 (size_t)a->uncached_size, 396 (size_t)a->uncached_size,
397 (dma_addr_t *)&bus_addr, 397 (dma_addr_t *)&bus_addr,
398 GFP_KERNEL); 398 GFP_KERNEL);
399 if (a->uncached == NULL) { 399 if (a->uncached == NULL) {
400 esas2r_log(ESAS2R_LOG_CRIT, 400 esas2r_log(ESAS2R_LOG_CRIT,
401 "failed to allocate %d bytes of consistent memory!", 401 "failed to allocate %d bytes of consistent memory!",
402 a->uncached_size); 402 a->uncached_size);
403 esas2r_kill_adapter(index); 403 esas2r_kill_adapter(index);
404 return 0; 404 return 0;
405 } 405 }
406 406
407 a->uncached_phys = bus_addr; 407 a->uncached_phys = bus_addr;
408 408
409 esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)", 409 esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
410 a->uncached_size, 410 a->uncached_size,
411 a->uncached, 411 a->uncached,
412 upper_32_bits(bus_addr), 412 upper_32_bits(bus_addr),
413 lower_32_bits(bus_addr)); 413 lower_32_bits(bus_addr));
414 memset(a->uncached, 0, a->uncached_size); 414 memset(a->uncached, 0, a->uncached_size);
415 next_uncached = a->uncached; 415 next_uncached = a->uncached;
416 416
417 if (!esas2r_init_adapter_struct(a, 417 if (!esas2r_init_adapter_struct(a,
418 &next_uncached)) { 418 &next_uncached)) {
419 esas2r_log(ESAS2R_LOG_CRIT, 419 esas2r_log(ESAS2R_LOG_CRIT,
420 "failed to initialize adapter structure (2)!"); 420 "failed to initialize adapter structure (2)!");
421 esas2r_kill_adapter(index); 421 esas2r_kill_adapter(index);
422 return 0; 422 return 0;
423 } 423 }
424 424
425 tasklet_init(&a->tasklet, 425 tasklet_init(&a->tasklet,
426 esas2r_adapter_tasklet, 426 esas2r_adapter_tasklet,
427 (unsigned long)a); 427 (unsigned long)a);
428 428
429 /* 429 /*
430 * Disable chip interrupts to prevent spurious interrupts 430 * Disable chip interrupts to prevent spurious interrupts
431 * until we claim the IRQ. 431 * until we claim the IRQ.
432 */ 432 */
433 esas2r_disable_chip_interrupts(a); 433 esas2r_disable_chip_interrupts(a);
434 esas2r_check_adapter(a); 434 esas2r_check_adapter(a);
435 435
436 if (!esas2r_init_adapter_hw(a, true)) 436 if (!esas2r_init_adapter_hw(a, true))
437 esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!"); 437 esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
438 else 438 else
439 esas2r_debug("esas2r_init_adapter ok"); 439 esas2r_debug("esas2r_init_adapter ok");
440 440
441 esas2r_claim_interrupts(a); 441 esas2r_claim_interrupts(a);
442 442
443 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) 443 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
444 esas2r_enable_chip_interrupts(a); 444 esas2r_enable_chip_interrupts(a);
445 445
446 set_bit(AF2_INIT_DONE, &a->flags2); 446 set_bit(AF2_INIT_DONE, &a->flags2);
447 if (!test_bit(AF_DEGRADED_MODE, &a->flags)) 447 if (!test_bit(AF_DEGRADED_MODE, &a->flags))
448 esas2r_kickoff_timer(a); 448 esas2r_kickoff_timer(a);
449 esas2r_debug("esas2r_init_adapter done for %p (%d)", 449 esas2r_debug("esas2r_init_adapter done for %p (%d)",
450 a, a->disable_cnt); 450 a, a->disable_cnt);
451 451
452 return 1; 452 return 1;
453 } 453 }
454 454
455 static void esas2r_adapter_power_down(struct esas2r_adapter *a, 455 static void esas2r_adapter_power_down(struct esas2r_adapter *a,
456 int power_management) 456 int power_management)
457 { 457 {
458 struct esas2r_mem_desc *memdesc, *next; 458 struct esas2r_mem_desc *memdesc, *next;
459 459
460 if ((test_bit(AF2_INIT_DONE, &a->flags2)) 460 if ((test_bit(AF2_INIT_DONE, &a->flags2))
461 && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { 461 && (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
462 if (!power_management) { 462 if (!power_management) {
463 del_timer_sync(&a->timer); 463 del_timer_sync(&a->timer);
464 tasklet_kill(&a->tasklet); 464 tasklet_kill(&a->tasklet);
465 } 465 }
466 esas2r_power_down(a); 466 esas2r_power_down(a);
467 467
468 /* 468 /*
469 * There are versions of firmware that do not handle the sync 469 * There are versions of firmware that do not handle the sync
470 * cache command correctly. Stall here to ensure that the 470 * cache command correctly. Stall here to ensure that the
471 * cache is lazily flushed. 471 * cache is lazily flushed.
472 */ 472 */
473 mdelay(500); 473 mdelay(500);
474 esas2r_debug("chip halted"); 474 esas2r_debug("chip halted");
475 } 475 }
476 476
477 /* Remove sysfs binary files */ 477 /* Remove sysfs binary files */
478 if (a->sysfs_fw_created) { 478 if (a->sysfs_fw_created) {
479 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw); 479 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
480 a->sysfs_fw_created = 0; 480 a->sysfs_fw_created = 0;
481 } 481 }
482 482
483 if (a->sysfs_fs_created) { 483 if (a->sysfs_fs_created) {
484 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs); 484 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
485 a->sysfs_fs_created = 0; 485 a->sysfs_fs_created = 0;
486 } 486 }
487 487
488 if (a->sysfs_vda_created) { 488 if (a->sysfs_vda_created) {
489 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda); 489 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
490 a->sysfs_vda_created = 0; 490 a->sysfs_vda_created = 0;
491 } 491 }
492 492
493 if (a->sysfs_hw_created) { 493 if (a->sysfs_hw_created) {
494 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw); 494 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
495 a->sysfs_hw_created = 0; 495 a->sysfs_hw_created = 0;
496 } 496 }
497 497
498 if (a->sysfs_live_nvram_created) { 498 if (a->sysfs_live_nvram_created) {
499 sysfs_remove_bin_file(&a->host->shost_dev.kobj, 499 sysfs_remove_bin_file(&a->host->shost_dev.kobj,
500 &bin_attr_live_nvram); 500 &bin_attr_live_nvram);
501 a->sysfs_live_nvram_created = 0; 501 a->sysfs_live_nvram_created = 0;
502 } 502 }
503 503
504 if (a->sysfs_default_nvram_created) { 504 if (a->sysfs_default_nvram_created) {
505 sysfs_remove_bin_file(&a->host->shost_dev.kobj, 505 sysfs_remove_bin_file(&a->host->shost_dev.kobj,
506 &bin_attr_default_nvram); 506 &bin_attr_default_nvram);
507 a->sysfs_default_nvram_created = 0; 507 a->sysfs_default_nvram_created = 0;
508 } 508 }
509 509
510 /* Clean up interrupts */ 510 /* Clean up interrupts */
511 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { 511 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
512 esas2r_log_dev(ESAS2R_LOG_INFO, 512 esas2r_log_dev(ESAS2R_LOG_INFO,
513 &(a->pcid->dev), 513 &(a->pcid->dev),
514 "free_irq(%d) called", a->pcid->irq); 514 "free_irq(%d) called", a->pcid->irq);
515 515
516 free_irq(a->pcid->irq, a); 516 free_irq(a->pcid->irq, a);
517 esas2r_debug("IRQ released"); 517 esas2r_debug("IRQ released");
518 clear_bit(AF2_IRQ_CLAIMED, &a->flags2); 518 clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
519 } 519 }
520 520
521 if (test_bit(AF2_MSI_ENABLED, &a->flags2)) { 521 if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
522 pci_disable_msi(a->pcid); 522 pci_disable_msi(a->pcid);
523 clear_bit(AF2_MSI_ENABLED, &a->flags2); 523 clear_bit(AF2_MSI_ENABLED, &a->flags2);
524 esas2r_debug("MSI disabled"); 524 esas2r_debug("MSI disabled");
525 } 525 }
526 526
527 if (a->inbound_list_md.virt_addr) 527 if (a->inbound_list_md.virt_addr)
528 esas2r_initmem_free(a, &a->inbound_list_md); 528 esas2r_initmem_free(a, &a->inbound_list_md);
529 529
530 if (a->outbound_list_md.virt_addr) 530 if (a->outbound_list_md.virt_addr)
531 esas2r_initmem_free(a, &a->outbound_list_md); 531 esas2r_initmem_free(a, &a->outbound_list_md);
532 532
533 list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head, 533 list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
534 next_desc) { 534 next_desc) {
535 esas2r_initmem_free(a, memdesc); 535 esas2r_initmem_free(a, memdesc);
536 } 536 }
537 537
538 /* Following frees everything allocated via alloc_vda_req */ 538 /* Following frees everything allocated via alloc_vda_req */
539 list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) { 539 list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
540 esas2r_initmem_free(a, memdesc); 540 esas2r_initmem_free(a, memdesc);
541 list_del(&memdesc->next_desc); 541 list_del(&memdesc->next_desc);
542 kfree(memdesc); 542 kfree(memdesc);
543 } 543 }
544 544
545 kfree(a->first_ae_req); 545 kfree(a->first_ae_req);
546 a->first_ae_req = NULL; 546 a->first_ae_req = NULL;
547 547
548 kfree(a->sg_list_mds); 548 kfree(a->sg_list_mds);
549 a->sg_list_mds = NULL; 549 a->sg_list_mds = NULL;
550 550
551 kfree(a->req_table); 551 kfree(a->req_table);
552 a->req_table = NULL; 552 a->req_table = NULL;
553 553
554 if (a->regs) { 554 if (a->regs) {
555 esas2r_unmap_regions(a); 555 esas2r_unmap_regions(a);
556 a->regs = NULL; 556 a->regs = NULL;
557 a->data_window = NULL; 557 a->data_window = NULL;
558 esas2r_debug("regions unmapped"); 558 esas2r_debug("regions unmapped");
559 } 559 }
560 } 560 }
561 561
562 /* Release/free allocated resources for specified adapters. */ 562 /* Release/free allocated resources for specified adapters. */
563 void esas2r_kill_adapter(int i) 563 void esas2r_kill_adapter(int i)
564 { 564 {
565 struct esas2r_adapter *a = esas2r_adapters[i]; 565 struct esas2r_adapter *a = esas2r_adapters[i];
566 566
567 if (a) { 567 if (a) {
568 unsigned long flags; 568 unsigned long flags;
569 struct workqueue_struct *wq; 569 struct workqueue_struct *wq;
570 esas2r_debug("killing adapter %p [%d] ", a, i); 570 esas2r_debug("killing adapter %p [%d] ", a, i);
571 esas2r_fw_event_off(a); 571 esas2r_fw_event_off(a);
572 esas2r_adapter_power_down(a, 0); 572 esas2r_adapter_power_down(a, 0);
573 if (esas2r_buffered_ioctl && 573 if (esas2r_buffered_ioctl &&
574 (a->pcid == esas2r_buffered_ioctl_pcid)) { 574 (a->pcid == esas2r_buffered_ioctl_pcid)) {
575 dma_free_coherent(&a->pcid->dev, 575 dma_free_coherent(&a->pcid->dev,
576 (size_t)esas2r_buffered_ioctl_size, 576 (size_t)esas2r_buffered_ioctl_size,
577 esas2r_buffered_ioctl, 577 esas2r_buffered_ioctl,
578 esas2r_buffered_ioctl_addr); 578 esas2r_buffered_ioctl_addr);
579 esas2r_buffered_ioctl = NULL; 579 esas2r_buffered_ioctl = NULL;
580 } 580 }
581 581
582 if (a->vda_buffer) { 582 if (a->vda_buffer) {
583 dma_free_coherent(&a->pcid->dev, 583 dma_free_coherent(&a->pcid->dev,
584 (size_t)VDA_MAX_BUFFER_SIZE, 584 (size_t)VDA_MAX_BUFFER_SIZE,
585 a->vda_buffer, 585 a->vda_buffer,
586 (dma_addr_t)a->ppvda_buffer); 586 (dma_addr_t)a->ppvda_buffer);
587 a->vda_buffer = NULL; 587 a->vda_buffer = NULL;
588 } 588 }
589 if (a->fs_api_buffer) { 589 if (a->fs_api_buffer) {
590 dma_free_coherent(&a->pcid->dev, 590 dma_free_coherent(&a->pcid->dev,
591 (size_t)a->fs_api_buffer_size, 591 (size_t)a->fs_api_buffer_size,
592 a->fs_api_buffer, 592 a->fs_api_buffer,
593 (dma_addr_t)a->ppfs_api_buffer); 593 (dma_addr_t)a->ppfs_api_buffer);
594 a->fs_api_buffer = NULL; 594 a->fs_api_buffer = NULL;
595 } 595 }
596 596
597 kfree(a->local_atto_ioctl); 597 kfree(a->local_atto_ioctl);
598 a->local_atto_ioctl = NULL; 598 a->local_atto_ioctl = NULL;
599 599
600 spin_lock_irqsave(&a->fw_event_lock, flags); 600 spin_lock_irqsave(&a->fw_event_lock, flags);
601 wq = a->fw_event_q; 601 wq = a->fw_event_q;
602 a->fw_event_q = NULL; 602 a->fw_event_q = NULL;
603 spin_unlock_irqrestore(&a->fw_event_lock, flags); 603 spin_unlock_irqrestore(&a->fw_event_lock, flags);
604 if (wq) 604 if (wq)
605 destroy_workqueue(wq); 605 destroy_workqueue(wq);
606 606
607 if (a->uncached) { 607 if (a->uncached) {
608 dma_free_coherent(&a->pcid->dev, 608 dma_free_coherent(&a->pcid->dev,
609 (size_t)a->uncached_size, 609 (size_t)a->uncached_size,
610 a->uncached, 610 a->uncached,
611 (dma_addr_t)a->uncached_phys); 611 (dma_addr_t)a->uncached_phys);
612 a->uncached = NULL; 612 a->uncached = NULL;
613 esas2r_debug("uncached area freed"); 613 esas2r_debug("uncached area freed");
614 } 614 }
615 615
616 esas2r_log_dev(ESAS2R_LOG_INFO, 616 esas2r_log_dev(ESAS2R_LOG_INFO,
617 &(a->pcid->dev), 617 &(a->pcid->dev),
618 "pci_disable_device() called. msix_enabled: %d " 618 "pci_disable_device() called. msix_enabled: %d "
619 "msi_enabled: %d irq: %d pin: %d", 619 "msi_enabled: %d irq: %d pin: %d",
620 a->pcid->msix_enabled, 620 a->pcid->msix_enabled,
621 a->pcid->msi_enabled, 621 a->pcid->msi_enabled,
622 a->pcid->irq, 622 a->pcid->irq,
623 a->pcid->pin); 623 a->pcid->pin);
624 624
625 esas2r_log_dev(ESAS2R_LOG_INFO, 625 esas2r_log_dev(ESAS2R_LOG_INFO,
626 &(a->pcid->dev), 626 &(a->pcid->dev),
627 "before pci_disable_device() enable_cnt: %d", 627 "before pci_disable_device() enable_cnt: %d",
628 a->pcid->enable_cnt.counter); 628 a->pcid->enable_cnt.counter);
629 629
630 pci_disable_device(a->pcid); 630 pci_disable_device(a->pcid);
631 esas2r_log_dev(ESAS2R_LOG_INFO, 631 esas2r_log_dev(ESAS2R_LOG_INFO,
632 &(a->pcid->dev), 632 &(a->pcid->dev),
633 "after pci_disable_device() enable_cnt: %d", 633 "after pci_disable_device() enable_cnt: %d",
634 a->pcid->enable_cnt.counter); 634 a->pcid->enable_cnt.counter);
635 635
636 esas2r_log_dev(ESAS2R_LOG_INFO, 636 esas2r_log_dev(ESAS2R_LOG_INFO,
637 &(a->pcid->dev), 637 &(a->pcid->dev),
638 "pci_set_drv_data(%p, NULL) called", 638 "pci_set_drv_data(%p, NULL) called",
639 a->pcid); 639 a->pcid);
640 640
641 pci_set_drvdata(a->pcid, NULL); 641 pci_set_drvdata(a->pcid, NULL);
642 esas2r_adapters[i] = NULL; 642 esas2r_adapters[i] = NULL;
643 643
644 if (test_bit(AF2_INIT_DONE, &a->flags2)) { 644 if (test_bit(AF2_INIT_DONE, &a->flags2)) {
645 clear_bit(AF2_INIT_DONE, &a->flags2); 645 clear_bit(AF2_INIT_DONE, &a->flags2);
646 646
647 set_bit(AF_DEGRADED_MODE, &a->flags); 647 set_bit(AF_DEGRADED_MODE, &a->flags);
648 648
649 esas2r_log_dev(ESAS2R_LOG_INFO, 649 esas2r_log_dev(ESAS2R_LOG_INFO,
650 &(a->host->shost_gendev), 650 &(a->host->shost_gendev),
651 "scsi_remove_host() called"); 651 "scsi_remove_host() called");
652 652
653 scsi_remove_host(a->host); 653 scsi_remove_host(a->host);
654 654
655 esas2r_log_dev(ESAS2R_LOG_INFO, 655 esas2r_log_dev(ESAS2R_LOG_INFO,
656 &(a->host->shost_gendev), 656 &(a->host->shost_gendev),
657 "scsi_host_put() called"); 657 "scsi_host_put() called");
658 658
659 scsi_host_put(a->host); 659 scsi_host_put(a->host);
660 } 660 }
661 } 661 }
662 } 662 }
663 663
664 int esas2r_cleanup(struct Scsi_Host *host) 664 int esas2r_cleanup(struct Scsi_Host *host)
665 { 665 {
666 struct esas2r_adapter *a; 666 struct esas2r_adapter *a;
667 int index; 667 int index;
668 668
669 if (host == NULL) { 669 if (host == NULL) {
670 int i; 670 int i;
671 671
672 esas2r_debug("esas2r_cleanup everything"); 672 esas2r_debug("esas2r_cleanup everything");
673 for (i = 0; i < MAX_ADAPTERS; i++) 673 for (i = 0; i < MAX_ADAPTERS; i++)
674 esas2r_kill_adapter(i); 674 esas2r_kill_adapter(i);
675 return -1; 675 return -1;
676 } 676 }
677 677
678 esas2r_debug("esas2r_cleanup called for host %p", host); 678 esas2r_debug("esas2r_cleanup called for host %p", host);
679 a = (struct esas2r_adapter *)host->hostdata; 679 a = (struct esas2r_adapter *)host->hostdata;
680 index = a->index; 680 index = a->index;
681 esas2r_kill_adapter(index); 681 esas2r_kill_adapter(index);
682 return index; 682 return index;
683 } 683 }
684 684
685 int esas2r_suspend(struct pci_dev *pdev, pm_message_t state) 685 int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
686 { 686 {
687 struct Scsi_Host *host = pci_get_drvdata(pdev); 687 struct Scsi_Host *host = pci_get_drvdata(pdev);
688 u32 device_state; 688 u32 device_state;
689 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; 689 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
690 690
691 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()"); 691 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
692 if (!a) 692 if (!a)
693 return -ENODEV; 693 return -ENODEV;
694 694
695 esas2r_adapter_power_down(a, 1); 695 esas2r_adapter_power_down(a, 1);
696 device_state = pci_choose_state(pdev, state); 696 device_state = pci_choose_state(pdev, state);
697 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 697 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
698 "pci_save_state() called"); 698 "pci_save_state() called");
699 pci_save_state(pdev); 699 pci_save_state(pdev);
700 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 700 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
701 "pci_disable_device() called"); 701 "pci_disable_device() called");
702 pci_disable_device(pdev); 702 pci_disable_device(pdev);
703 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 703 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
704 "pci_set_power_state() called"); 704 "pci_set_power_state() called");
705 pci_set_power_state(pdev, device_state); 705 pci_set_power_state(pdev, device_state);
706 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0"); 706 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
707 return 0; 707 return 0;
708 } 708 }
709 709
710 int esas2r_resume(struct pci_dev *pdev) 710 int esas2r_resume(struct pci_dev *pdev)
711 { 711 {
712 struct Scsi_Host *host = pci_get_drvdata(pdev); 712 struct Scsi_Host *host = pci_get_drvdata(pdev);
713 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; 713 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
714 int rez; 714 int rez;
715 715
716 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()"); 716 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
717 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 717 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
718 "pci_set_power_state(PCI_D0) " 718 "pci_set_power_state(PCI_D0) "
719 "called"); 719 "called");
720 pci_set_power_state(pdev, PCI_D0); 720 pci_set_power_state(pdev, PCI_D0);
721 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 721 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
722 "pci_enable_wake(PCI_D0, 0) " 722 "pci_enable_wake(PCI_D0, 0) "
723 "called"); 723 "called");
724 pci_enable_wake(pdev, PCI_D0, 0); 724 pci_enable_wake(pdev, PCI_D0, 0);
725 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 725 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
726 "pci_restore_state() called"); 726 "pci_restore_state() called");
727 pci_restore_state(pdev); 727 pci_restore_state(pdev);
728 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 728 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
729 "pci_enable_device() called"); 729 "pci_enable_device() called");
730 rez = pci_enable_device(pdev); 730 rez = pci_enable_device(pdev);
731 pci_set_master(pdev); 731 pci_set_master(pdev);
732 732
733 if (!a) { 733 if (!a) {
734 rez = -ENODEV; 734 rez = -ENODEV;
735 goto error_exit; 735 goto error_exit;
736 } 736 }
737 737
738 if (esas2r_map_regions(a) != 0) { 738 if (esas2r_map_regions(a) != 0) {
739 esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!"); 739 esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
740 rez = -ENOMEM; 740 rez = -ENOMEM;
741 goto error_exit; 741 goto error_exit;
742 } 742 }
743 743
744 /* Set up interupt mode */ 744 /* Set up interupt mode */
745 esas2r_setup_interrupts(a, a->intr_mode); 745 esas2r_setup_interrupts(a, a->intr_mode);
746 746
747 /* 747 /*
748 * Disable chip interrupts to prevent spurious interrupts until we 748 * Disable chip interrupts to prevent spurious interrupts until we
749 * claim the IRQ. 749 * claim the IRQ.
750 */ 750 */
751 esas2r_disable_chip_interrupts(a); 751 esas2r_disable_chip_interrupts(a);
752 if (!esas2r_power_up(a, true)) { 752 if (!esas2r_power_up(a, true)) {
753 esas2r_debug("yikes, esas2r_power_up failed"); 753 esas2r_debug("yikes, esas2r_power_up failed");
754 rez = -ENOMEM; 754 rez = -ENOMEM;
755 goto error_exit; 755 goto error_exit;
756 } 756 }
757 757
758 esas2r_claim_interrupts(a); 758 esas2r_claim_interrupts(a);
759 759
760 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { 760 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
761 /* 761 /*
762 * Now that system interrupt(s) are claimed, we can enable 762 * Now that system interrupt(s) are claimed, we can enable
763 * chip interrupts. 763 * chip interrupts.
764 */ 764 */
765 esas2r_enable_chip_interrupts(a); 765 esas2r_enable_chip_interrupts(a);
766 esas2r_kickoff_timer(a); 766 esas2r_kickoff_timer(a);
767 } else { 767 } else {
768 esas2r_debug("yikes, unable to claim IRQ"); 768 esas2r_debug("yikes, unable to claim IRQ");
769 esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!"); 769 esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
770 rez = -ENOMEM; 770 rez = -ENOMEM;
771 goto error_exit; 771 goto error_exit;
772 } 772 }
773 773
774 error_exit: 774 error_exit:
775 esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d", 775 esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
776 rez); 776 rez);
777 return rez; 777 return rez;
778 } 778 }
779 779
780 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) 780 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
781 { 781 {
782 set_bit(AF_DEGRADED_MODE, &a->flags); 782 set_bit(AF_DEGRADED_MODE, &a->flags);
783 esas2r_log(ESAS2R_LOG_CRIT, 783 esas2r_log(ESAS2R_LOG_CRIT,
784 "setting adapter to degraded mode: %s\n", error_str); 784 "setting adapter to degraded mode: %s\n", error_str);
785 return false; 785 return false;
786 } 786 }
787 787
788 u32 esas2r_get_uncached_size(struct esas2r_adapter *a) 788 u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
789 { 789 {
790 return sizeof(struct esas2r_sas_nvram) 790 return sizeof(struct esas2r_sas_nvram)
791 + ALIGN(ESAS2R_DISC_BUF_LEN, 8) 791 + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
792 + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */ 792 + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
793 + 8 793 + 8
794 + (num_sg_lists * (u16)sgl_page_size) 794 + (num_sg_lists * (u16)sgl_page_size)
795 + ALIGN((num_requests + num_ae_requests + 1 + 795 + ALIGN((num_requests + num_ae_requests + 1 +
796 ESAS2R_LIST_EXTRA) * 796 ESAS2R_LIST_EXTRA) *
797 sizeof(struct esas2r_inbound_list_source_entry), 797 sizeof(struct esas2r_inbound_list_source_entry),
798 8) 798 8)
799 + ALIGN((num_requests + num_ae_requests + 1 + 799 + ALIGN((num_requests + num_ae_requests + 1 +
800 ESAS2R_LIST_EXTRA) * 800 ESAS2R_LIST_EXTRA) *
801 sizeof(struct atto_vda_ob_rsp), 8) 801 sizeof(struct atto_vda_ob_rsp), 8)
802 + 256; /* VDA request and buffer align */ 802 + 256; /* VDA request and buffer align */
803 } 803 }
804 804
805 static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) 805 static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
806 { 806 {
807 int pcie_cap_reg; 807 int pcie_cap_reg;
808 808
809 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); 809 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
810 if (0xffff & pcie_cap_reg) { 810 if (0xffff & pcie_cap_reg) {
811 u16 devcontrol; 811 u16 devcontrol;
812 812
813 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, 813 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
814 &devcontrol); 814 &devcontrol);
815 815
816 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) { 816 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) {
817 esas2r_log(ESAS2R_LOG_INFO, 817 esas2r_log(ESAS2R_LOG_INFO,
818 "max read request size > 512B"); 818 "max read request size > 512B");
819 819
820 devcontrol &= ~PCI_EXP_DEVCTL_READRQ; 820 devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
821 devcontrol |= 0x2000; 821 devcontrol |= 0x2000;
822 pci_write_config_word(a->pcid, 822 pci_write_config_word(a->pcid,
823 pcie_cap_reg + PCI_EXP_DEVCTL, 823 pcie_cap_reg + PCI_EXP_DEVCTL,
824 devcontrol); 824 devcontrol);
825 } 825 }
826 } 826 }
827 } 827 }
828 828
829 /* 829 /*
830 * Determine the organization of the uncached data area and 830 * Determine the organization of the uncached data area and
831 * finish initializing the adapter structure 831 * finish initializing the adapter structure
832 */ 832 */
833 bool esas2r_init_adapter_struct(struct esas2r_adapter *a, 833 bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
834 void **uncached_area) 834 void **uncached_area)
835 { 835 {
836 u32 i; 836 u32 i;
837 u8 *high; 837 u8 *high;
838 struct esas2r_inbound_list_source_entry *element; 838 struct esas2r_inbound_list_source_entry *element;
839 struct esas2r_request *rq; 839 struct esas2r_request *rq;
840 struct esas2r_mem_desc *sgl; 840 struct esas2r_mem_desc *sgl;
841 841
842 spin_lock_init(&a->sg_list_lock); 842 spin_lock_init(&a->sg_list_lock);
843 spin_lock_init(&a->mem_lock); 843 spin_lock_init(&a->mem_lock);
844 spin_lock_init(&a->queue_lock); 844 spin_lock_init(&a->queue_lock);
845 845
846 a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS]; 846 a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
847 847
848 if (!alloc_vda_req(a, &a->general_req)) { 848 if (!alloc_vda_req(a, &a->general_req)) {
849 esas2r_hdebug( 849 esas2r_hdebug(
850 "failed to allocate a VDA request for the general req!"); 850 "failed to allocate a VDA request for the general req!");
851 return false; 851 return false;
852 } 852 }
853 853
854 /* allocate requests for asynchronous events */ 854 /* allocate requests for asynchronous events */
855 a->first_ae_req = 855 a->first_ae_req =
856 kzalloc(num_ae_requests * sizeof(struct esas2r_request), 856 kzalloc(num_ae_requests * sizeof(struct esas2r_request),
857 GFP_KERNEL); 857 GFP_KERNEL);
858 858
859 if (a->first_ae_req == NULL) { 859 if (a->first_ae_req == NULL) {
860 esas2r_log(ESAS2R_LOG_CRIT, 860 esas2r_log(ESAS2R_LOG_CRIT,
861 "failed to allocate memory for asynchronous events"); 861 "failed to allocate memory for asynchronous events");
862 return false; 862 return false;
863 } 863 }
864 864
865 /* allocate the S/G list memory descriptors */ 865 /* allocate the S/G list memory descriptors */
866 a->sg_list_mds = kzalloc( 866 a->sg_list_mds = kzalloc(
867 num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL); 867 num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL);
868 868
869 if (a->sg_list_mds == NULL) { 869 if (a->sg_list_mds == NULL) {
870 esas2r_log(ESAS2R_LOG_CRIT, 870 esas2r_log(ESAS2R_LOG_CRIT,
871 "failed to allocate memory for s/g list descriptors"); 871 "failed to allocate memory for s/g list descriptors");
872 return false; 872 return false;
873 } 873 }
874 874
875 /* allocate the request table */ 875 /* allocate the request table */
876 a->req_table = 876 a->req_table =
877 kzalloc((num_requests + num_ae_requests + 877 kzalloc((num_requests + num_ae_requests +
878 1) * sizeof(struct esas2r_request *), GFP_KERNEL); 878 1) * sizeof(struct esas2r_request *), GFP_KERNEL);
879 879
880 if (a->req_table == NULL) { 880 if (a->req_table == NULL) {
881 esas2r_log(ESAS2R_LOG_CRIT, 881 esas2r_log(ESAS2R_LOG_CRIT,
882 "failed to allocate memory for the request table"); 882 "failed to allocate memory for the request table");
883 return false; 883 return false;
884 } 884 }
885 885
886 /* initialize PCI configuration space */ 886 /* initialize PCI configuration space */
887 esas2r_init_pci_cfg_space(a); 887 esas2r_init_pci_cfg_space(a);
888 888
889 /* 889 /*
890 * the thunder_stream boards all have a serial flash part that has a 890 * the thunder_stream boards all have a serial flash part that has a
891 * different base address on the AHB bus. 891 * different base address on the AHB bus.
892 */ 892 */
893 if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID) 893 if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
894 && (a->pcid->subsystem_device & ATTO_SSDID_TBT)) 894 && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
895 a->flags2 |= AF2_THUNDERBOLT; 895 a->flags2 |= AF2_THUNDERBOLT;
896 896
897 if (test_bit(AF2_THUNDERBOLT, &a->flags2)) 897 if (test_bit(AF2_THUNDERBOLT, &a->flags2))
898 a->flags2 |= AF2_SERIAL_FLASH; 898 a->flags2 |= AF2_SERIAL_FLASH;
899 899
900 if (a->pcid->subsystem_device == ATTO_TLSH_1068) 900 if (a->pcid->subsystem_device == ATTO_TLSH_1068)
901 a->flags2 |= AF2_THUNDERLINK; 901 a->flags2 |= AF2_THUNDERLINK;
902 902
903 /* Uncached Area */ 903 /* Uncached Area */
904 high = (u8 *)*uncached_area; 904 high = (u8 *)*uncached_area;
905 905
906 /* initialize the scatter/gather table pages */ 906 /* initialize the scatter/gather table pages */
907 907
908 for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) { 908 for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
909 sgl->size = sgl_page_size; 909 sgl->size = sgl_page_size;
910 910
911 list_add_tail(&sgl->next_desc, &a->free_sg_list_head); 911 list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
912 912
913 if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) { 913 if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
914 /* Allow the driver to load if the minimum count met. */ 914 /* Allow the driver to load if the minimum count met. */
915 if (i < NUM_SGL_MIN) 915 if (i < NUM_SGL_MIN)
916 return false; 916 return false;
917 break; 917 break;
918 } 918 }
919 } 919 }
920 920
921 /* compute the size of the lists */ 921 /* compute the size of the lists */
922 a->list_size = num_requests + ESAS2R_LIST_EXTRA; 922 a->list_size = num_requests + ESAS2R_LIST_EXTRA;
923 923
924 /* allocate the inbound list */ 924 /* allocate the inbound list */
925 a->inbound_list_md.size = a->list_size * 925 a->inbound_list_md.size = a->list_size *
926 sizeof(struct 926 sizeof(struct
927 esas2r_inbound_list_source_entry); 927 esas2r_inbound_list_source_entry);
928 928
929 if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) { 929 if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
930 esas2r_hdebug("failed to allocate IB list"); 930 esas2r_hdebug("failed to allocate IB list");
931 return false; 931 return false;
932 } 932 }
933 933
934 /* allocate the outbound list */ 934 /* allocate the outbound list */
935 a->outbound_list_md.size = a->list_size * 935 a->outbound_list_md.size = a->list_size *
936 sizeof(struct atto_vda_ob_rsp); 936 sizeof(struct atto_vda_ob_rsp);
937 937
938 if (!esas2r_initmem_alloc(a, &a->outbound_list_md, 938 if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
939 ESAS2R_LIST_ALIGN)) { 939 ESAS2R_LIST_ALIGN)) {
940 esas2r_hdebug("failed to allocate IB list"); 940 esas2r_hdebug("failed to allocate IB list");
941 return false; 941 return false;
942 } 942 }
943 943
944 /* allocate the NVRAM structure */ 944 /* allocate the NVRAM structure */
945 a->nvram = (struct esas2r_sas_nvram *)high; 945 a->nvram = (struct esas2r_sas_nvram *)high;
946 high += sizeof(struct esas2r_sas_nvram); 946 high += sizeof(struct esas2r_sas_nvram);
947 947
948 /* allocate the discovery buffer */ 948 /* allocate the discovery buffer */
949 a->disc_buffer = high; 949 a->disc_buffer = high;
950 high += ESAS2R_DISC_BUF_LEN; 950 high += ESAS2R_DISC_BUF_LEN;
951 high = PTR_ALIGN(high, 8); 951 high = PTR_ALIGN(high, 8);
952 952
953 /* allocate the outbound list copy pointer */ 953 /* allocate the outbound list copy pointer */
954 a->outbound_copy = (u32 volatile *)high; 954 a->outbound_copy = (u32 volatile *)high;
955 high += sizeof(u32); 955 high += sizeof(u32);
956 956
957 if (!test_bit(AF_NVR_VALID, &a->flags)) 957 if (!test_bit(AF_NVR_VALID, &a->flags))
958 esas2r_nvram_set_defaults(a); 958 esas2r_nvram_set_defaults(a);
959 959
960 /* update the caller's uncached memory area pointer */ 960 /* update the caller's uncached memory area pointer */
961 *uncached_area = (void *)high; 961 *uncached_area = (void *)high;
962 962
963 /* initialize the allocated memory */ 963 /* initialize the allocated memory */
964 if (test_bit(AF_FIRST_INIT, &a->flags)) { 964 if (test_bit(AF_FIRST_INIT, &a->flags)) {
965 memset(a->req_table, 0, 965 memset(a->req_table, 0,
966 (num_requests + num_ae_requests + 966 (num_requests + num_ae_requests +
967 1) * sizeof(struct esas2r_request *)); 967 1) * sizeof(struct esas2r_request *));
968 968
969 esas2r_targ_db_initialize(a); 969 esas2r_targ_db_initialize(a);
970 970
971 /* prime parts of the inbound list */ 971 /* prime parts of the inbound list */
972 element = 972 element =
973 (struct esas2r_inbound_list_source_entry *)a-> 973 (struct esas2r_inbound_list_source_entry *)a->
974 inbound_list_md. 974 inbound_list_md.
975 virt_addr; 975 virt_addr;
976 976
977 for (i = 0; i < a->list_size; i++) { 977 for (i = 0; i < a->list_size; i++) {
978 element->address = 0; 978 element->address = 0;
979 element->reserved = 0; 979 element->reserved = 0;
980 element->length = cpu_to_le32(HWILSE_INTERFACE_F0 980 element->length = cpu_to_le32(HWILSE_INTERFACE_F0
981 | (sizeof(union 981 | (sizeof(union
982 atto_vda_req) 982 atto_vda_req)
983 / 983 /
984 sizeof(u32))); 984 sizeof(u32)));
985 element++; 985 element++;
986 } 986 }
987 987
988 /* init the AE requests */ 988 /* init the AE requests */
989 for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++, 989 for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
990 i++) { 990 i++) {
991 INIT_LIST_HEAD(&rq->req_list); 991 INIT_LIST_HEAD(&rq->req_list);
992 if (!alloc_vda_req(a, rq)) { 992 if (!alloc_vda_req(a, rq)) {
993 esas2r_hdebug( 993 esas2r_hdebug(
994 "failed to allocate a VDA request!"); 994 "failed to allocate a VDA request!");
995 return false; 995 return false;
996 } 996 }
997 997
998 esas2r_rq_init_request(rq, a); 998 esas2r_rq_init_request(rq, a);
999 999
1000 /* override the completion function */ 1000 /* override the completion function */
1001 rq->comp_cb = esas2r_ae_complete; 1001 rq->comp_cb = esas2r_ae_complete;
1002 } 1002 }
1003 } 1003 }
1004 1004
1005 return true; 1005 return true;
1006 } 1006 }
1007 1007
1008 /* This code will verify that the chip is operational. */ 1008 /* This code will verify that the chip is operational. */
1009 bool esas2r_check_adapter(struct esas2r_adapter *a) 1009 bool esas2r_check_adapter(struct esas2r_adapter *a)
1010 { 1010 {
1011 u32 starttime; 1011 u32 starttime;
1012 u32 doorbell; 1012 u32 doorbell;
1013 u64 ppaddr; 1013 u64 ppaddr;
1014 u32 dw; 1014 u32 dw;
1015 1015
1016 /* 1016 /*
1017 * if the chip reset detected flag is set, we can bypass a bunch of 1017 * if the chip reset detected flag is set, we can bypass a bunch of
1018 * stuff. 1018 * stuff.
1019 */ 1019 */
1020 if (test_bit(AF_CHPRST_DETECTED, &a->flags)) 1020 if (test_bit(AF_CHPRST_DETECTED, &a->flags))
1021 goto skip_chip_reset; 1021 goto skip_chip_reset;
1022 1022
1023 /* 1023 /*
1024 * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver 1024 * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
1025 * may have left them enabled or we may be recovering from a fault. 1025 * may have left them enabled or we may be recovering from a fault.
1026 */ 1026 */
1027 esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK); 1027 esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
1028 esas2r_flush_register_dword(a, MU_INT_MASK_OUT); 1028 esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
1029 1029
1030 /* 1030 /*
1031 * wait for the firmware to become ready by forcing an interrupt and 1031 * wait for the firmware to become ready by forcing an interrupt and
1032 * waiting for a response. 1032 * waiting for a response.
1033 */ 1033 */
1034 starttime = jiffies_to_msecs(jiffies); 1034 starttime = jiffies_to_msecs(jiffies);
1035 1035
1036 while (true) { 1036 while (true) {
1037 esas2r_force_interrupt(a); 1037 esas2r_force_interrupt(a);
1038 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1038 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1039 if (doorbell == 0xFFFFFFFF) { 1039 if (doorbell == 0xFFFFFFFF) {
1040 /* 1040 /*
1041 * Give the firmware up to two seconds to enable 1041 * Give the firmware up to two seconds to enable
1042 * register access after a reset. 1042 * register access after a reset.
1043 */ 1043 */
1044 if ((jiffies_to_msecs(jiffies) - starttime) > 2000) 1044 if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
1045 return esas2r_set_degraded_mode(a, 1045 return esas2r_set_degraded_mode(a,
1046 "unable to access registers"); 1046 "unable to access registers");
1047 } else if (doorbell & DRBL_FORCE_INT) { 1047 } else if (doorbell & DRBL_FORCE_INT) {
1048 u32 ver = (doorbell & DRBL_FW_VER_MSK); 1048 u32 ver = (doorbell & DRBL_FW_VER_MSK);
1049 1049
1050 /* 1050 /*
1051 * This driver supports version 0 and version 1 of 1051 * This driver supports version 0 and version 1 of
1052 * the API 1052 * the API
1053 */ 1053 */
1054 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1054 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1055 doorbell); 1055 doorbell);
1056 1056
1057 if (ver == DRBL_FW_VER_0) { 1057 if (ver == DRBL_FW_VER_0) {
1058 set_bit(AF_LEGACY_SGE_MODE, &a->flags); 1058 set_bit(AF_LEGACY_SGE_MODE, &a->flags);
1059 1059
1060 a->max_vdareq_size = 128; 1060 a->max_vdareq_size = 128;
1061 a->build_sgl = esas2r_build_sg_list_sge; 1061 a->build_sgl = esas2r_build_sg_list_sge;
1062 } else if (ver == DRBL_FW_VER_1) { 1062 } else if (ver == DRBL_FW_VER_1) {
1063 clear_bit(AF_LEGACY_SGE_MODE, &a->flags); 1063 clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
1064 1064
1065 a->max_vdareq_size = 1024; 1065 a->max_vdareq_size = 1024;
1066 a->build_sgl = esas2r_build_sg_list_prd; 1066 a->build_sgl = esas2r_build_sg_list_prd;
1067 } else { 1067 } else {
1068 return esas2r_set_degraded_mode(a, 1068 return esas2r_set_degraded_mode(a,
1069 "unknown firmware version"); 1069 "unknown firmware version");
1070 } 1070 }
1071 break; 1071 break;
1072 } 1072 }
1073 1073
1074 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1074 schedule_timeout_interruptible(msecs_to_jiffies(100));
1075 1075
1076 if ((jiffies_to_msecs(jiffies) - starttime) > 180000) { 1076 if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
1077 esas2r_hdebug("FW ready TMO"); 1077 esas2r_hdebug("FW ready TMO");
1078 esas2r_bugon(); 1078 esas2r_bugon();
1079 1079
1080 return esas2r_set_degraded_mode(a, 1080 return esas2r_set_degraded_mode(a,
1081 "firmware start has timed out"); 1081 "firmware start has timed out");
1082 } 1082 }
1083 } 1083 }
1084 1084
1085 /* purge any asynchronous events since we will repost them later */ 1085 /* purge any asynchronous events since we will repost them later */
1086 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN); 1086 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
1087 starttime = jiffies_to_msecs(jiffies); 1087 starttime = jiffies_to_msecs(jiffies);
1088 1088
1089 while (true) { 1089 while (true) {
1090 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1090 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1091 if (doorbell & DRBL_MSG_IFC_DOWN) { 1091 if (doorbell & DRBL_MSG_IFC_DOWN) {
1092 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1092 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1093 doorbell); 1093 doorbell);
1094 break; 1094 break;
1095 } 1095 }
1096 1096
1097 schedule_timeout_interruptible(msecs_to_jiffies(50)); 1097 schedule_timeout_interruptible(msecs_to_jiffies(50));
1098 1098
1099 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { 1099 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1100 esas2r_hdebug("timeout waiting for interface down"); 1100 esas2r_hdebug("timeout waiting for interface down");
1101 break; 1101 break;
1102 } 1102 }
1103 } 1103 }
1104 skip_chip_reset: 1104 skip_chip_reset:
1105 /* 1105 /*
1106 * first things first, before we go changing any of these registers 1106 * first things first, before we go changing any of these registers
1107 * disable the communication lists. 1107 * disable the communication lists.
1108 */ 1108 */
1109 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); 1109 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1110 dw &= ~MU_ILC_ENABLE; 1110 dw &= ~MU_ILC_ENABLE;
1111 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); 1111 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1112 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); 1112 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1113 dw &= ~MU_OLC_ENABLE; 1113 dw &= ~MU_OLC_ENABLE;
1114 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); 1114 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1115 1115
1116 /* configure the communication list addresses */ 1116 /* configure the communication list addresses */
1117 ppaddr = a->inbound_list_md.phys_addr; 1117 ppaddr = a->inbound_list_md.phys_addr;
1118 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO, 1118 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
1119 lower_32_bits(ppaddr)); 1119 lower_32_bits(ppaddr));
1120 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI, 1120 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
1121 upper_32_bits(ppaddr)); 1121 upper_32_bits(ppaddr));
1122 ppaddr = a->outbound_list_md.phys_addr; 1122 ppaddr = a->outbound_list_md.phys_addr;
1123 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO, 1123 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
1124 lower_32_bits(ppaddr)); 1124 lower_32_bits(ppaddr));
1125 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI, 1125 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
1126 upper_32_bits(ppaddr)); 1126 upper_32_bits(ppaddr));
1127 ppaddr = a->uncached_phys + 1127 ppaddr = a->uncached_phys +
1128 ((u8 *)a->outbound_copy - a->uncached); 1128 ((u8 *)a->outbound_copy - a->uncached);
1129 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO, 1129 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
1130 lower_32_bits(ppaddr)); 1130 lower_32_bits(ppaddr));
1131 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI, 1131 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
1132 upper_32_bits(ppaddr)); 1132 upper_32_bits(ppaddr));
1133 1133
1134 /* reset the read and write pointers */ 1134 /* reset the read and write pointers */
1135 *a->outbound_copy = 1135 *a->outbound_copy =
1136 a->last_write = 1136 a->last_write =
1137 a->last_read = a->list_size - 1; 1137 a->last_read = a->list_size - 1;
1138 set_bit(AF_COMM_LIST_TOGGLE, &a->flags); 1138 set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
1139 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | 1139 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
1140 a->last_write); 1140 a->last_write);
1141 esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | 1141 esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
1142 a->last_write); 1142 a->last_write);
1143 esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE | 1143 esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
1144 a->last_write); 1144 a->last_write);
1145 esas2r_write_register_dword(a, MU_OUT_LIST_WRITE, 1145 esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
1146 MU_OLW_TOGGLE | a->last_write); 1146 MU_OLW_TOGGLE | a->last_write);
1147 1147
1148 /* configure the interface select fields */ 1148 /* configure the interface select fields */
1149 dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG); 1149 dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
1150 dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST); 1150 dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
1151 esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG, 1151 esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
1152 (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR)); 1152 (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
1153 dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG); 1153 dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
1154 dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE); 1154 dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
1155 esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG, 1155 esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
1156 (dw | MU_OLIC_LIST_F0 | 1156 (dw | MU_OLIC_LIST_F0 |
1157 MU_OLIC_SOURCE_DDR)); 1157 MU_OLIC_SOURCE_DDR));
1158 1158
1159 /* finish configuring the communication lists */ 1159 /* finish configuring the communication lists */
1160 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); 1160 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1161 dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK); 1161 dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
1162 dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC 1162 dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
1163 | (a->list_size << MU_ILC_NUMBER_SHIFT); 1163 | (a->list_size << MU_ILC_NUMBER_SHIFT);
1164 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); 1164 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1165 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); 1165 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1166 dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK); 1166 dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
1167 dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT); 1167 dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
1168 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); 1168 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1169 1169
1170 /* 1170 /*
1171 * notify the firmware that we're done setting up the communication 1171 * notify the firmware that we're done setting up the communication
1172 * list registers. wait here until the firmware is done configuring 1172 * list registers. wait here until the firmware is done configuring
1173 * its lists. it will signal that it is done by enabling the lists. 1173 * its lists. it will signal that it is done by enabling the lists.
1174 */ 1174 */
1175 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT); 1175 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
1176 starttime = jiffies_to_msecs(jiffies); 1176 starttime = jiffies_to_msecs(jiffies);
1177 1177
1178 while (true) { 1178 while (true) {
1179 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1179 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1180 if (doorbell & DRBL_MSG_IFC_INIT) { 1180 if (doorbell & DRBL_MSG_IFC_INIT) {
1181 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1181 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1182 doorbell); 1182 doorbell);
1183 break; 1183 break;
1184 } 1184 }
1185 1185
1186 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1186 schedule_timeout_interruptible(msecs_to_jiffies(100));
1187 1187
1188 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { 1188 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1189 esas2r_hdebug( 1189 esas2r_hdebug(
1190 "timeout waiting for communication list init"); 1190 "timeout waiting for communication list init");
1191 esas2r_bugon(); 1191 esas2r_bugon();
1192 return esas2r_set_degraded_mode(a, 1192 return esas2r_set_degraded_mode(a,
1193 "timeout waiting for communication list init"); 1193 "timeout waiting for communication list init");
1194 } 1194 }
1195 } 1195 }
1196 1196
1197 /* 1197 /*
1198 * flag whether the firmware supports the power down doorbell. we 1198 * flag whether the firmware supports the power down doorbell. we
1199 * determine this by reading the inbound doorbell enable mask. 1199 * determine this by reading the inbound doorbell enable mask.
1200 */ 1200 */
1201 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); 1201 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
1202 if (doorbell & DRBL_POWER_DOWN) 1202 if (doorbell & DRBL_POWER_DOWN)
1203 set_bit(AF2_VDA_POWER_DOWN, &a->flags2); 1203 set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
1204 else 1204 else
1205 clear_bit(AF2_VDA_POWER_DOWN, &a->flags2); 1205 clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
1206 1206
1207 /* 1207 /*
1208 * enable assertion of outbound queue and doorbell interrupts in the 1208 * enable assertion of outbound queue and doorbell interrupts in the
1209 * main interrupt cause register. 1209 * main interrupt cause register.
1210 */ 1210 */
1211 esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK); 1211 esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
1212 esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK); 1212 esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
1213 return true; 1213 return true;
1214 } 1214 }
1215 1215
1216 /* Process the initialization message just completed and format the next one. */ 1216 /* Process the initialization message just completed and format the next one. */
1217 static bool esas2r_format_init_msg(struct esas2r_adapter *a, 1217 static bool esas2r_format_init_msg(struct esas2r_adapter *a,
1218 struct esas2r_request *rq) 1218 struct esas2r_request *rq)
1219 { 1219 {
1220 u32 msg = a->init_msg; 1220 u32 msg = a->init_msg;
1221 struct atto_vda_cfg_init *ci; 1221 struct atto_vda_cfg_init *ci;
1222 1222
1223 a->init_msg = 0; 1223 a->init_msg = 0;
1224 1224
1225 switch (msg) { 1225 switch (msg) {
1226 case ESAS2R_INIT_MSG_START: 1226 case ESAS2R_INIT_MSG_START:
1227 case ESAS2R_INIT_MSG_REINIT: 1227 case ESAS2R_INIT_MSG_REINIT:
1228 { 1228 {
1229 struct timeval now; 1229 struct timeval now;
1230 do_gettimeofday(&now); 1230 do_gettimeofday(&now);
1231 esas2r_hdebug("CFG init"); 1231 esas2r_hdebug("CFG init");
1232 esas2r_build_cfg_req(a, 1232 esas2r_build_cfg_req(a,
1233 rq, 1233 rq,
1234 VDA_CFG_INIT, 1234 VDA_CFG_INIT,
1235 0, 1235 0,
1236 NULL); 1236 NULL);
1237 ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; 1237 ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
1238 ci->sgl_page_size = sgl_page_size; 1238 ci->sgl_page_size = cpu_to_le32(sgl_page_size);
1239 ci->epoch_time = now.tv_sec; 1239 ci->epoch_time = cpu_to_le32(now.tv_sec);
1240 rq->flags |= RF_FAILURE_OK; 1240 rq->flags |= RF_FAILURE_OK;
1241 a->init_msg = ESAS2R_INIT_MSG_INIT; 1241 a->init_msg = ESAS2R_INIT_MSG_INIT;
1242 break; 1242 break;
1243 } 1243 }
1244 1244
1245 case ESAS2R_INIT_MSG_INIT: 1245 case ESAS2R_INIT_MSG_INIT:
1246 if (rq->req_stat == RS_SUCCESS) { 1246 if (rq->req_stat == RS_SUCCESS) {
1247 u32 major; 1247 u32 major;
1248 u32 minor; 1248 u32 minor;
1249 u16 fw_release;
1249 1250
1250 a->fw_version = le16_to_cpu( 1251 a->fw_version = le16_to_cpu(
1251 rq->func_rsp.cfg_rsp.vda_version); 1252 rq->func_rsp.cfg_rsp.vda_version);
1252 a->fw_build = rq->func_rsp.cfg_rsp.fw_build; 1253 a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
1253 major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release); 1254 fw_release = le16_to_cpu(
1254 minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release); 1255 rq->func_rsp.cfg_rsp.fw_release);
1256 major = LOBYTE(fw_release);
1257 minor = HIBYTE(fw_release);
1255 a->fw_version += (major << 16) + (minor << 24); 1258 a->fw_version += (major << 16) + (minor << 24);
1256 } else { 1259 } else {
1257 esas2r_hdebug("FAILED"); 1260 esas2r_hdebug("FAILED");
1258 } 1261 }
1259 1262
1260 /* 1263 /*
1261 * the 2.71 and earlier releases of R6xx firmware did not error 1264 * the 2.71 and earlier releases of R6xx firmware did not error
1262 * unsupported config requests correctly. 1265 * unsupported config requests correctly.
1263 */ 1266 */
1264 1267
1265 if ((test_bit(AF2_THUNDERBOLT, &a->flags2)) 1268 if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
1266 || (be32_to_cpu(a->fw_version) > 0x00524702)) { 1269 || (be32_to_cpu(a->fw_version) > 0x00524702)) {
1267 esas2r_hdebug("CFG get init"); 1270 esas2r_hdebug("CFG get init");
1268 esas2r_build_cfg_req(a, 1271 esas2r_build_cfg_req(a,
1269 rq, 1272 rq,
1270 VDA_CFG_GET_INIT2, 1273 VDA_CFG_GET_INIT2,
1271 sizeof(struct atto_vda_cfg_init), 1274 sizeof(struct atto_vda_cfg_init),
1272 NULL); 1275 NULL);
1273 1276
1274 rq->vrq->cfg.sg_list_offset = offsetof( 1277 rq->vrq->cfg.sg_list_offset = offsetof(
1275 struct atto_vda_cfg_req, 1278 struct atto_vda_cfg_req,
1276 data.sge); 1279 data.sge);
1277 rq->vrq->cfg.data.prde.ctl_len = 1280 rq->vrq->cfg.data.prde.ctl_len =
1278 cpu_to_le32(sizeof(struct atto_vda_cfg_init)); 1281 cpu_to_le32(sizeof(struct atto_vda_cfg_init));
1279 rq->vrq->cfg.data.prde.address = cpu_to_le64( 1282 rq->vrq->cfg.data.prde.address = cpu_to_le64(
1280 rq->vrq_md->phys_addr + 1283 rq->vrq_md->phys_addr +
1281 sizeof(union atto_vda_req)); 1284 sizeof(union atto_vda_req));
1282 rq->flags |= RF_FAILURE_OK; 1285 rq->flags |= RF_FAILURE_OK;
1283 a->init_msg = ESAS2R_INIT_MSG_GET_INIT; 1286 a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
1284 break; 1287 break;
1285 } 1288 }
1286 1289
1287 case ESAS2R_INIT_MSG_GET_INIT: 1290 case ESAS2R_INIT_MSG_GET_INIT:
1288 if (msg == ESAS2R_INIT_MSG_GET_INIT) { 1291 if (msg == ESAS2R_INIT_MSG_GET_INIT) {
1289 ci = (struct atto_vda_cfg_init *)rq->data_buf; 1292 ci = (struct atto_vda_cfg_init *)rq->data_buf;
1290 if (rq->req_stat == RS_SUCCESS) { 1293 if (rq->req_stat == RS_SUCCESS) {
1291 a->num_targets_backend = 1294 a->num_targets_backend =
1292 le32_to_cpu(ci->num_targets_backend); 1295 le32_to_cpu(ci->num_targets_backend);
1293 a->ioctl_tunnel = 1296 a->ioctl_tunnel =
1294 le32_to_cpu(ci->ioctl_tunnel); 1297 le32_to_cpu(ci->ioctl_tunnel);
1295 } else { 1298 } else {
1296 esas2r_hdebug("FAILED"); 1299 esas2r_hdebug("FAILED");
1297 } 1300 }
1298 } 1301 }
1299 /* fall through */ 1302 /* fall through */
1300 1303
1301 default: 1304 default:
1302 rq->req_stat = RS_SUCCESS; 1305 rq->req_stat = RS_SUCCESS;
1303 return false; 1306 return false;
1304 } 1307 }
1305 return true; 1308 return true;
1306 } 1309 }
1307 1310
1308 /* 1311 /*
1309 * Perform initialization messages via the request queue. Messages are 1312 * Perform initialization messages via the request queue. Messages are
1310 * performed with interrupts disabled. 1313 * performed with interrupts disabled.
1311 */ 1314 */
1312 bool esas2r_init_msgs(struct esas2r_adapter *a) 1315 bool esas2r_init_msgs(struct esas2r_adapter *a)
1313 { 1316 {
1314 bool success = true; 1317 bool success = true;
1315 struct esas2r_request *rq = &a->general_req; 1318 struct esas2r_request *rq = &a->general_req;
1316 1319
1317 esas2r_rq_init_request(rq, a); 1320 esas2r_rq_init_request(rq, a);
1318 rq->comp_cb = esas2r_dummy_complete; 1321 rq->comp_cb = esas2r_dummy_complete;
1319 1322
1320 if (a->init_msg == 0) 1323 if (a->init_msg == 0)
1321 a->init_msg = ESAS2R_INIT_MSG_REINIT; 1324 a->init_msg = ESAS2R_INIT_MSG_REINIT;
1322 1325
1323 while (a->init_msg) { 1326 while (a->init_msg) {
1324 if (esas2r_format_init_msg(a, rq)) { 1327 if (esas2r_format_init_msg(a, rq)) {
1325 unsigned long flags; 1328 unsigned long flags;
1326 while (true) { 1329 while (true) {
1327 spin_lock_irqsave(&a->queue_lock, flags); 1330 spin_lock_irqsave(&a->queue_lock, flags);
1328 esas2r_start_vda_request(a, rq); 1331 esas2r_start_vda_request(a, rq);
1329 spin_unlock_irqrestore(&a->queue_lock, flags); 1332 spin_unlock_irqrestore(&a->queue_lock, flags);
1330 esas2r_wait_request(a, rq); 1333 esas2r_wait_request(a, rq);
1331 if (rq->req_stat != RS_PENDING) 1334 if (rq->req_stat != RS_PENDING)
1332 break; 1335 break;
1333 } 1336 }
1334 } 1337 }
1335 1338
1336 if (rq->req_stat == RS_SUCCESS 1339 if (rq->req_stat == RS_SUCCESS
1337 || ((rq->flags & RF_FAILURE_OK) 1340 || ((rq->flags & RF_FAILURE_OK)
1338 && rq->req_stat != RS_TIMEOUT)) 1341 && rq->req_stat != RS_TIMEOUT))
1339 continue; 1342 continue;
1340 1343
1341 esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)", 1344 esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
1342 a->init_msg, rq->req_stat, rq->flags); 1345 a->init_msg, rq->req_stat, rq->flags);
1343 a->init_msg = ESAS2R_INIT_MSG_START; 1346 a->init_msg = ESAS2R_INIT_MSG_START;
1344 success = false; 1347 success = false;
1345 break; 1348 break;
1346 } 1349 }
1347 1350
1348 esas2r_rq_destroy_request(rq, a); 1351 esas2r_rq_destroy_request(rq, a);
1349 return success; 1352 return success;
1350 } 1353 }
1351 1354
1352 /* Initialize the adapter chip */ 1355 /* Initialize the adapter chip */
1353 bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) 1356 bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
1354 { 1357 {
1355 bool rslt = false; 1358 bool rslt = false;
1356 struct esas2r_request *rq; 1359 struct esas2r_request *rq;
1357 u32 i; 1360 u32 i;
1358 1361
1359 if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1362 if (test_bit(AF_DEGRADED_MODE, &a->flags))
1360 goto exit; 1363 goto exit;
1361 1364
1362 if (!test_bit(AF_NVR_VALID, &a->flags)) { 1365 if (!test_bit(AF_NVR_VALID, &a->flags)) {
1363 if (!esas2r_nvram_read_direct(a)) 1366 if (!esas2r_nvram_read_direct(a))
1364 esas2r_log(ESAS2R_LOG_WARN, 1367 esas2r_log(ESAS2R_LOG_WARN,
1365 "invalid/missing NVRAM parameters"); 1368 "invalid/missing NVRAM parameters");
1366 } 1369 }
1367 1370
1368 if (!esas2r_init_msgs(a)) { 1371 if (!esas2r_init_msgs(a)) {
1369 esas2r_set_degraded_mode(a, "init messages failed"); 1372 esas2r_set_degraded_mode(a, "init messages failed");
1370 goto exit; 1373 goto exit;
1371 } 1374 }
1372 1375
1373 /* The firmware is ready. */ 1376 /* The firmware is ready. */
1374 clear_bit(AF_DEGRADED_MODE, &a->flags); 1377 clear_bit(AF_DEGRADED_MODE, &a->flags);
1375 clear_bit(AF_CHPRST_PENDING, &a->flags); 1378 clear_bit(AF_CHPRST_PENDING, &a->flags);
1376 1379
1377 /* Post all the async event requests */ 1380 /* Post all the async event requests */
1378 for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) 1381 for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
1379 esas2r_start_ae_request(a, rq); 1382 esas2r_start_ae_request(a, rq);
1380 1383
1381 if (!a->flash_rev[0]) 1384 if (!a->flash_rev[0])
1382 esas2r_read_flash_rev(a); 1385 esas2r_read_flash_rev(a);
1383 1386
1384 if (!a->image_type[0]) 1387 if (!a->image_type[0])
1385 esas2r_read_image_type(a); 1388 esas2r_read_image_type(a);
1386 1389
1387 if (a->fw_version == 0) 1390 if (a->fw_version == 0)
1388 a->fw_rev[0] = 0; 1391 a->fw_rev[0] = 0;
1389 else 1392 else
1390 sprintf(a->fw_rev, "%1d.%02d", 1393 sprintf(a->fw_rev, "%1d.%02d",
1391 (int)LOBYTE(HIWORD(a->fw_version)), 1394 (int)LOBYTE(HIWORD(a->fw_version)),
1392 (int)HIBYTE(HIWORD(a->fw_version))); 1395 (int)HIBYTE(HIWORD(a->fw_version)));
1393 1396
1394 esas2r_hdebug("firmware revision: %s", a->fw_rev); 1397 esas2r_hdebug("firmware revision: %s", a->fw_rev);
1395 1398
1396 if (test_bit(AF_CHPRST_DETECTED, &a->flags) 1399 if (test_bit(AF_CHPRST_DETECTED, &a->flags)
1397 && (test_bit(AF_FIRST_INIT, &a->flags))) { 1400 && (test_bit(AF_FIRST_INIT, &a->flags))) {
1398 esas2r_enable_chip_interrupts(a); 1401 esas2r_enable_chip_interrupts(a);
1399 return true; 1402 return true;
1400 } 1403 }
1401 1404
1402 /* initialize discovery */ 1405 /* initialize discovery */
1403 esas2r_disc_initialize(a); 1406 esas2r_disc_initialize(a);
1404 1407
1405 /* 1408 /*
1406 * wait for the device wait time to expire here if requested. this is 1409 * wait for the device wait time to expire here if requested. this is
1407 * usually requested during initial driver load and possibly when 1410 * usually requested during initial driver load and possibly when
1408 * resuming from a low power state. deferred device waiting will use 1411 * resuming from a low power state. deferred device waiting will use
1409 * interrupts. chip reset recovery always defers device waiting to 1412 * interrupts. chip reset recovery always defers device waiting to
1410 * avoid being in a TASKLET too long. 1413 * avoid being in a TASKLET too long.
1411 */ 1414 */
1412 if (init_poll) { 1415 if (init_poll) {
1413 u32 currtime = a->disc_start_time; 1416 u32 currtime = a->disc_start_time;
1414 u32 nexttick = 100; 1417 u32 nexttick = 100;
1415 u32 deltatime; 1418 u32 deltatime;
1416 1419
1417 /* 1420 /*
1418 * Block Tasklets from getting scheduled and indicate this is 1421 * Block Tasklets from getting scheduled and indicate this is
1419 * polled discovery. 1422 * polled discovery.
1420 */ 1423 */
1421 set_bit(AF_TASKLET_SCHEDULED, &a->flags); 1424 set_bit(AF_TASKLET_SCHEDULED, &a->flags);
1422 set_bit(AF_DISC_POLLED, &a->flags); 1425 set_bit(AF_DISC_POLLED, &a->flags);
1423 1426
1424 /* 1427 /*
1425 * Temporarily bring the disable count to zero to enable 1428 * Temporarily bring the disable count to zero to enable
1426 * deferred processing. Note that the count is already zero 1429 * deferred processing. Note that the count is already zero
1427 * after the first initialization. 1430 * after the first initialization.
1428 */ 1431 */
1429 if (test_bit(AF_FIRST_INIT, &a->flags)) 1432 if (test_bit(AF_FIRST_INIT, &a->flags))
1430 atomic_dec(&a->disable_cnt); 1433 atomic_dec(&a->disable_cnt);
1431 1434
1432 while (test_bit(AF_DISC_PENDING, &a->flags)) { 1435 while (test_bit(AF_DISC_PENDING, &a->flags)) {
1433 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1436 schedule_timeout_interruptible(msecs_to_jiffies(100));
1434 1437
1435 /* 1438 /*
1436 * Determine the need for a timer tick based on the 1439 * Determine the need for a timer tick based on the
1437 * delta time between this and the last iteration of 1440 * delta time between this and the last iteration of
1438 * this loop. We don't use the absolute time because 1441 * this loop. We don't use the absolute time because
1439 * then we would have to worry about when nexttick 1442 * then we would have to worry about when nexttick
1440 * wraps and currtime hasn't yet. 1443 * wraps and currtime hasn't yet.
1441 */ 1444 */
1442 deltatime = jiffies_to_msecs(jiffies) - currtime; 1445 deltatime = jiffies_to_msecs(jiffies) - currtime;
1443 currtime += deltatime; 1446 currtime += deltatime;
1444 1447
1445 /* 1448 /*
1446 * Process any waiting discovery as long as the chip is 1449 * Process any waiting discovery as long as the chip is
1447 * up. If a chip reset happens during initial polling, 1450 * up. If a chip reset happens during initial polling,
1448 * we have to make sure the timer tick processes the 1451 * we have to make sure the timer tick processes the
1449 * doorbell indicating the firmware is ready. 1452 * doorbell indicating the firmware is ready.
1450 */ 1453 */
1451 if (!test_bit(AF_CHPRST_PENDING, &a->flags)) 1454 if (!test_bit(AF_CHPRST_PENDING, &a->flags))
1452 esas2r_disc_check_for_work(a); 1455 esas2r_disc_check_for_work(a);
1453 1456
1454 /* Simulate a timer tick. */ 1457 /* Simulate a timer tick. */
1455 if (nexttick <= deltatime) { 1458 if (nexttick <= deltatime) {
1456 1459
1457 /* Time for a timer tick */ 1460 /* Time for a timer tick */
1458 nexttick += 100; 1461 nexttick += 100;
1459 esas2r_timer_tick(a); 1462 esas2r_timer_tick(a);
1460 } 1463 }
1461 1464
1462 if (nexttick > deltatime) 1465 if (nexttick > deltatime)
1463 nexttick -= deltatime; 1466 nexttick -= deltatime;
1464 1467
1465 /* Do any deferred processing */ 1468 /* Do any deferred processing */
1466 if (esas2r_is_tasklet_pending(a)) 1469 if (esas2r_is_tasklet_pending(a))
1467 esas2r_do_tasklet_tasks(a); 1470 esas2r_do_tasklet_tasks(a);
1468 1471
1469 } 1472 }
1470 1473
1471 if (test_bit(AF_FIRST_INIT, &a->flags)) 1474 if (test_bit(AF_FIRST_INIT, &a->flags))
1472 atomic_inc(&a->disable_cnt); 1475 atomic_inc(&a->disable_cnt);
1473 1476
1474 clear_bit(AF_DISC_POLLED, &a->flags); 1477 clear_bit(AF_DISC_POLLED, &a->flags);
1475 clear_bit(AF_TASKLET_SCHEDULED, &a->flags); 1478 clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
1476 } 1479 }
1477 1480
1478 1481
1479 esas2r_targ_db_report_changes(a); 1482 esas2r_targ_db_report_changes(a);
1480 1483
1481 /* 1484 /*
1482 * For cases where (a) the initialization messages processing may 1485 * For cases where (a) the initialization messages processing may
1483 * handle an interrupt for a port event and a discovery is waiting, but 1486 * handle an interrupt for a port event and a discovery is waiting, but
1484 * we are not waiting for devices, or (b) the device wait time has been 1487 * we are not waiting for devices, or (b) the device wait time has been
1485 * exhausted but there is still discovery pending, start any leftover 1488 * exhausted but there is still discovery pending, start any leftover
1486 * discovery in interrupt driven mode. 1489 * discovery in interrupt driven mode.
1487 */ 1490 */
1488 esas2r_disc_start_waiting(a); 1491 esas2r_disc_start_waiting(a);
1489 1492
1490 /* Enable chip interrupts */ 1493 /* Enable chip interrupts */
1491 a->int_mask = ESAS2R_INT_STS_MASK; 1494 a->int_mask = ESAS2R_INT_STS_MASK;
1492 esas2r_enable_chip_interrupts(a); 1495 esas2r_enable_chip_interrupts(a);
1493 esas2r_enable_heartbeat(a); 1496 esas2r_enable_heartbeat(a);
1494 rslt = true; 1497 rslt = true;
1495 1498
1496 exit: 1499 exit:
1497 /* 1500 /*
1498 * Regardless of whether initialization was successful, certain things 1501 * Regardless of whether initialization was successful, certain things
1499 * need to get done before we exit. 1502 * need to get done before we exit.
1500 */ 1503 */
1501 1504
1502 if (test_bit(AF_CHPRST_DETECTED, &a->flags) && 1505 if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
1503 test_bit(AF_FIRST_INIT, &a->flags)) { 1506 test_bit(AF_FIRST_INIT, &a->flags)) {
1504 /* 1507 /*
1505 * Reinitialization was performed during the first 1508 * Reinitialization was performed during the first
1506 * initialization. Only clear the chip reset flag so the 1509 * initialization. Only clear the chip reset flag so the
1507 * original device polling is not cancelled. 1510 * original device polling is not cancelled.
1508 */ 1511 */
1509 if (!rslt) 1512 if (!rslt)
1510 clear_bit(AF_CHPRST_PENDING, &a->flags); 1513 clear_bit(AF_CHPRST_PENDING, &a->flags);
1511 } else { 1514 } else {
1512 /* First initialization or a subsequent re-init is complete. */ 1515 /* First initialization or a subsequent re-init is complete. */
1513 if (!rslt) { 1516 if (!rslt) {
1514 clear_bit(AF_CHPRST_PENDING, &a->flags); 1517 clear_bit(AF_CHPRST_PENDING, &a->flags);
1515 clear_bit(AF_DISC_PENDING, &a->flags); 1518 clear_bit(AF_DISC_PENDING, &a->flags);
1516 } 1519 }
1517 1520
1518 1521
1519 /* Enable deferred processing after the first initialization. */ 1522 /* Enable deferred processing after the first initialization. */
1520 if (test_bit(AF_FIRST_INIT, &a->flags)) { 1523 if (test_bit(AF_FIRST_INIT, &a->flags)) {
1521 clear_bit(AF_FIRST_INIT, &a->flags); 1524 clear_bit(AF_FIRST_INIT, &a->flags);
1522 1525
1523 if (atomic_dec_return(&a->disable_cnt) == 0) 1526 if (atomic_dec_return(&a->disable_cnt) == 0)
1524 esas2r_do_deferred_processes(a); 1527 esas2r_do_deferred_processes(a);
1525 } 1528 }
1526 } 1529 }
1527 1530
1528 return rslt; 1531 return rslt;
1529 } 1532 }
1530 1533
1531 void esas2r_reset_adapter(struct esas2r_adapter *a) 1534 void esas2r_reset_adapter(struct esas2r_adapter *a)
1532 { 1535 {
1533 set_bit(AF_OS_RESET, &a->flags); 1536 set_bit(AF_OS_RESET, &a->flags);
1534 esas2r_local_reset_adapter(a); 1537 esas2r_local_reset_adapter(a);
1535 esas2r_schedule_tasklet(a); 1538 esas2r_schedule_tasklet(a);
1536 } 1539 }
1537 1540
1538 void esas2r_reset_chip(struct esas2r_adapter *a) 1541 void esas2r_reset_chip(struct esas2r_adapter *a)
1539 { 1542 {
1540 if (!esas2r_is_adapter_present(a)) 1543 if (!esas2r_is_adapter_present(a))
1541 return; 1544 return;
1542 1545
1543 /* 1546 /*
1544 * Before we reset the chip, save off the VDA core dump. The VDA core 1547 * Before we reset the chip, save off the VDA core dump. The VDA core
1545 * dump is located in the upper 512KB of the onchip SRAM. Make sure 1548 * dump is located in the upper 512KB of the onchip SRAM. Make sure
1546 * to not overwrite a previous crash that was saved. 1549 * to not overwrite a previous crash that was saved.
1547 */ 1550 */
1548 if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) && 1551 if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
1549 !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) { 1552 !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
1550 esas2r_read_mem_block(a, 1553 esas2r_read_mem_block(a,
1551 a->fw_coredump_buff, 1554 a->fw_coredump_buff,
1552 MW_DATA_ADDR_SRAM + 0x80000, 1555 MW_DATA_ADDR_SRAM + 0x80000,
1553 ESAS2R_FWCOREDUMP_SZ); 1556 ESAS2R_FWCOREDUMP_SZ);
1554 1557
1555 set_bit(AF2_COREDUMP_SAVED, &a->flags2); 1558 set_bit(AF2_COREDUMP_SAVED, &a->flags2);
1556 } 1559 }
1557 1560
1558 clear_bit(AF2_COREDUMP_AVAIL, &a->flags2); 1561 clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
1559 1562
1560 /* Reset the chip */ 1563 /* Reset the chip */
1561 if (a->pcid->revision == MVR_FREY_B2) 1564 if (a->pcid->revision == MVR_FREY_B2)
1562 esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2, 1565 esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
1563 MU_CTL_IN_FULL_RST2); 1566 MU_CTL_IN_FULL_RST2);
1564 else 1567 else
1565 esas2r_write_register_dword(a, MU_CTL_STATUS_IN, 1568 esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
1566 MU_CTL_IN_FULL_RST); 1569 MU_CTL_IN_FULL_RST);
1567 1570
1568 1571
1569 /* Stall a little while to let the reset condition clear */ 1572 /* Stall a little while to let the reset condition clear */
1570 mdelay(10); 1573 mdelay(10);
1571 } 1574 }
1572 1575
1573 static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a) 1576 static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
1574 { 1577 {
1575 u32 starttime; 1578 u32 starttime;
1576 u32 doorbell; 1579 u32 doorbell;
1577 1580
1578 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN); 1581 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
1579 starttime = jiffies_to_msecs(jiffies); 1582 starttime = jiffies_to_msecs(jiffies);
1580 1583
1581 while (true) { 1584 while (true) {
1582 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1585 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1583 if (doorbell & DRBL_POWER_DOWN) { 1586 if (doorbell & DRBL_POWER_DOWN) {
1584 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1587 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1585 doorbell); 1588 doorbell);
1586 break; 1589 break;
1587 } 1590 }
1588 1591
1589 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1592 schedule_timeout_interruptible(msecs_to_jiffies(100));
1590 1593
1591 if ((jiffies_to_msecs(jiffies) - starttime) > 30000) { 1594 if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
1592 esas2r_hdebug("Timeout waiting for power down"); 1595 esas2r_hdebug("Timeout waiting for power down");
1593 break; 1596 break;
1594 } 1597 }
1595 } 1598 }
1596 } 1599 }
1597 1600
1598 /* 1601 /*
1599 * Perform power management processing including managing device states, adapter 1602 * Perform power management processing including managing device states, adapter
1600 * states, interrupts, and I/O. 1603 * states, interrupts, and I/O.
1601 */ 1604 */
1602 void esas2r_power_down(struct esas2r_adapter *a) 1605 void esas2r_power_down(struct esas2r_adapter *a)
1603 { 1606 {
1604 set_bit(AF_POWER_MGT, &a->flags); 1607 set_bit(AF_POWER_MGT, &a->flags);
1605 set_bit(AF_POWER_DOWN, &a->flags); 1608 set_bit(AF_POWER_DOWN, &a->flags);
1606 1609
1607 if (!test_bit(AF_DEGRADED_MODE, &a->flags)) { 1610 if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
1608 u32 starttime; 1611 u32 starttime;
1609 u32 doorbell; 1612 u32 doorbell;
1610 1613
1611 /* 1614 /*
1612 * We are currently running OK and will be reinitializing later. 1615 * We are currently running OK and will be reinitializing later.
1613 * increment the disable count to coordinate with 1616 * increment the disable count to coordinate with
1614 * esas2r_init_adapter. We don't have to do this in degraded 1617 * esas2r_init_adapter. We don't have to do this in degraded
1615 * mode since we never enabled interrupts in the first place. 1618 * mode since we never enabled interrupts in the first place.
1616 */ 1619 */
1617 esas2r_disable_chip_interrupts(a); 1620 esas2r_disable_chip_interrupts(a);
1618 esas2r_disable_heartbeat(a); 1621 esas2r_disable_heartbeat(a);
1619 1622
1620 /* wait for any VDA activity to clear before continuing */ 1623 /* wait for any VDA activity to clear before continuing */
1621 esas2r_write_register_dword(a, MU_DOORBELL_IN, 1624 esas2r_write_register_dword(a, MU_DOORBELL_IN,
1622 DRBL_MSG_IFC_DOWN); 1625 DRBL_MSG_IFC_DOWN);
1623 starttime = jiffies_to_msecs(jiffies); 1626 starttime = jiffies_to_msecs(jiffies);
1624 1627
1625 while (true) { 1628 while (true) {
1626 doorbell = 1629 doorbell =
1627 esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1630 esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1628 if (doorbell & DRBL_MSG_IFC_DOWN) { 1631 if (doorbell & DRBL_MSG_IFC_DOWN) {
1629 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1632 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1630 doorbell); 1633 doorbell);
1631 break; 1634 break;
1632 } 1635 }
1633 1636
1634 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1637 schedule_timeout_interruptible(msecs_to_jiffies(100));
1635 1638
1636 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { 1639 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1637 esas2r_hdebug( 1640 esas2r_hdebug(
1638 "timeout waiting for interface down"); 1641 "timeout waiting for interface down");
1639 break; 1642 break;
1640 } 1643 }
1641 } 1644 }
1642 1645
1643 /* 1646 /*
1644 * For versions of firmware that support it tell them the driver 1647 * For versions of firmware that support it tell them the driver
1645 * is powering down. 1648 * is powering down.
1646 */ 1649 */
1647 if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2)) 1650 if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
1648 esas2r_power_down_notify_firmware(a); 1651 esas2r_power_down_notify_firmware(a);
1649 } 1652 }
1650 1653
1651 /* Suspend I/O processing. */ 1654 /* Suspend I/O processing. */
1652 set_bit(AF_OS_RESET, &a->flags); 1655 set_bit(AF_OS_RESET, &a->flags);
1653 set_bit(AF_DISC_PENDING, &a->flags); 1656 set_bit(AF_DISC_PENDING, &a->flags);
1654 set_bit(AF_CHPRST_PENDING, &a->flags); 1657 set_bit(AF_CHPRST_PENDING, &a->flags);
1655 1658
1656 esas2r_process_adapter_reset(a); 1659 esas2r_process_adapter_reset(a);
1657 1660
1658 /* Remove devices now that I/O is cleaned up. */ 1661 /* Remove devices now that I/O is cleaned up. */
1659 a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a); 1662 a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
1660 esas2r_targ_db_remove_all(a, false); 1663 esas2r_targ_db_remove_all(a, false);
1661 } 1664 }
1662 1665
1663 /* 1666 /*
1664 * Perform power management processing including managing device states, adapter 1667 * Perform power management processing including managing device states, adapter
1665 * states, interrupts, and I/O. 1668 * states, interrupts, and I/O.
1666 */ 1669 */
1667 bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll) 1670 bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
1668 { 1671 {
1669 bool ret; 1672 bool ret;
1670 1673
1671 clear_bit(AF_POWER_DOWN, &a->flags); 1674 clear_bit(AF_POWER_DOWN, &a->flags);
1672 esas2r_init_pci_cfg_space(a); 1675 esas2r_init_pci_cfg_space(a);
1673 set_bit(AF_FIRST_INIT, &a->flags); 1676 set_bit(AF_FIRST_INIT, &a->flags);
1674 atomic_inc(&a->disable_cnt); 1677 atomic_inc(&a->disable_cnt);
1675 1678
1676 /* reinitialize the adapter */ 1679 /* reinitialize the adapter */
1677 ret = esas2r_check_adapter(a); 1680 ret = esas2r_check_adapter(a);
1678 if (!esas2r_init_adapter_hw(a, init_poll)) 1681 if (!esas2r_init_adapter_hw(a, init_poll))
1679 ret = false; 1682 ret = false;
1680 1683
1681 /* send the reset asynchronous event */ 1684 /* send the reset asynchronous event */
1682 esas2r_send_reset_ae(a, true); 1685 esas2r_send_reset_ae(a, true);
1683 1686
1684 /* clear this flag after initialization. */ 1687 /* clear this flag after initialization. */
1685 clear_bit(AF_POWER_MGT, &a->flags); 1688 clear_bit(AF_POWER_MGT, &a->flags);
1686 return ret; 1689 return ret;
1687 } 1690 }
1688 1691
1689 bool esas2r_is_adapter_present(struct esas2r_adapter *a) 1692 bool esas2r_is_adapter_present(struct esas2r_adapter *a)
1690 { 1693 {
1691 if (test_bit(AF_NOT_PRESENT, &a->flags)) 1694 if (test_bit(AF_NOT_PRESENT, &a->flags))
1692 return false; 1695 return false;
1693 1696
1694 if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { 1697 if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
1695 set_bit(AF_NOT_PRESENT, &a->flags); 1698 set_bit(AF_NOT_PRESENT, &a->flags);
1696 1699
1697 return false; 1700 return false;
1698 } 1701 }
1699 return true; 1702 return true;
1700 } 1703 }
1701 1704
1702 const char *esas2r_get_model_name(struct esas2r_adapter *a) 1705 const char *esas2r_get_model_name(struct esas2r_adapter *a)
1703 { 1706 {
1704 switch (a->pcid->subsystem_device) { 1707 switch (a->pcid->subsystem_device) {
1705 case ATTO_ESAS_R680: 1708 case ATTO_ESAS_R680:
1706 return "ATTO ExpressSAS R680"; 1709 return "ATTO ExpressSAS R680";
1707 1710
1708 case ATTO_ESAS_R608: 1711 case ATTO_ESAS_R608:
1709 return "ATTO ExpressSAS R608"; 1712 return "ATTO ExpressSAS R608";
1710 1713
1711 case ATTO_ESAS_R60F: 1714 case ATTO_ESAS_R60F:
1712 return "ATTO ExpressSAS R60F"; 1715 return "ATTO ExpressSAS R60F";
1713 1716
1714 case ATTO_ESAS_R6F0: 1717 case ATTO_ESAS_R6F0:
1715 return "ATTO ExpressSAS R6F0"; 1718 return "ATTO ExpressSAS R6F0";
1716 1719
1717 case ATTO_ESAS_R644: 1720 case ATTO_ESAS_R644:
1718 return "ATTO ExpressSAS R644"; 1721 return "ATTO ExpressSAS R644";
1719 1722
1720 case ATTO_ESAS_R648: 1723 case ATTO_ESAS_R648:
1721 return "ATTO ExpressSAS R648"; 1724 return "ATTO ExpressSAS R648";
1722 1725
1723 case ATTO_TSSC_3808: 1726 case ATTO_TSSC_3808:
1724 return "ATTO ThunderStream SC 3808D"; 1727 return "ATTO ThunderStream SC 3808D";
1725 1728
1726 case ATTO_TSSC_3808E: 1729 case ATTO_TSSC_3808E:
1727 return "ATTO ThunderStream SC 3808E"; 1730 return "ATTO ThunderStream SC 3808E";
1728 1731
1729 case ATTO_TLSH_1068: 1732 case ATTO_TLSH_1068:
1730 return "ATTO ThunderLink SH 1068"; 1733 return "ATTO ThunderLink SH 1068";
1731 } 1734 }
1732 1735
1733 return "ATTO SAS Controller"; 1736 return "ATTO SAS Controller";
1734 } 1737 }
1735 1738
1736 const char *esas2r_get_model_name_short(struct esas2r_adapter *a) 1739 const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
1737 { 1740 {
1738 switch (a->pcid->subsystem_device) { 1741 switch (a->pcid->subsystem_device) {
1739 case ATTO_ESAS_R680: 1742 case ATTO_ESAS_R680:
1740 return "R680"; 1743 return "R680";
1741 1744
1742 case ATTO_ESAS_R608: 1745 case ATTO_ESAS_R608:
1743 return "R608"; 1746 return "R608";
1744 1747
1745 case ATTO_ESAS_R60F: 1748 case ATTO_ESAS_R60F:
1746 return "R60F"; 1749 return "R60F";
1747 1750
1748 case ATTO_ESAS_R6F0: 1751 case ATTO_ESAS_R6F0:
1749 return "R6F0"; 1752 return "R6F0";
1750 1753
1751 case ATTO_ESAS_R644: 1754 case ATTO_ESAS_R644:
1752 return "R644"; 1755 return "R644";
1753 1756
1754 case ATTO_ESAS_R648: 1757 case ATTO_ESAS_R648:
1755 return "R648"; 1758 return "R648";
1756 1759
1757 case ATTO_TSSC_3808: 1760 case ATTO_TSSC_3808:
1758 return "SC 3808D"; 1761 return "SC 3808D";
1759 1762
1760 case ATTO_TSSC_3808E: 1763 case ATTO_TSSC_3808E:
1761 return "SC 3808E"; 1764 return "SC 3808E";
1762 1765
1763 case ATTO_TLSH_1068: 1766 case ATTO_TLSH_1068:
1764 return "SH 1068"; 1767 return "SH 1068";
1765 } 1768 }
1766 1769
1767 return "unknown"; 1770 return "unknown";
1768 } 1771 }
1769 1772