Commit 92dbf273921eb53a9d5b760a8f3b32eefd776b1b

Authored by Giridhar Malavali
Committed by James Bottomley
1 parent ed0de87ce6

[SCSI] qla2xxx: Limit the logs in case device state does not change for ISP82xx.

Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com>
Signed-off-by: Madhuranath Iyengar <Madhu.Iyengar@qlogic.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

Showing 1 changed file with 14 additions and 4 deletions Inline Diff

drivers/scsi/qla2xxx/qla_nx.c
1 /* 1 /*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2011 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7 #include "qla_def.h" 7 #include "qla_def.h"
8 #include <linux/delay.h> 8 #include <linux/delay.h>
9 #include <linux/pci.h> 9 #include <linux/pci.h>
10 #include <scsi/scsi_tcq.h> 10 #include <scsi/scsi_tcq.h>
11 11
12 #define MASK(n) ((1ULL<<(n))-1) 12 #define MASK(n) ((1ULL<<(n))-1)
13 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ 13 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
14 ((addr >> 25) & 0x3ff)) 14 ((addr >> 25) & 0x3ff))
15 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \ 15 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
16 ((addr >> 25) & 0x3ff)) 16 ((addr >> 25) & 0x3ff))
17 #define MS_WIN(addr) (addr & 0x0ffc0000) 17 #define MS_WIN(addr) (addr & 0x0ffc0000)
18 #define QLA82XX_PCI_MN_2M (0) 18 #define QLA82XX_PCI_MN_2M (0)
19 #define QLA82XX_PCI_MS_2M (0x80000) 19 #define QLA82XX_PCI_MS_2M (0x80000)
20 #define QLA82XX_PCI_OCM0_2M (0xc0000) 20 #define QLA82XX_PCI_OCM0_2M (0xc0000)
21 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) 21 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
22 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) 22 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
23 #define BLOCK_PROTECT_BITS 0x0F 23 #define BLOCK_PROTECT_BITS 0x0F
24 24
25 /* CRB window related */ 25 /* CRB window related */
26 #define CRB_BLK(off) ((off >> 20) & 0x3f) 26 #define CRB_BLK(off) ((off >> 20) & 0x3f)
27 #define CRB_SUBBLK(off) ((off >> 16) & 0xf) 27 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
28 #define CRB_WINDOW_2M (0x130060) 28 #define CRB_WINDOW_2M (0x130060)
29 #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) 29 #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
30 #define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \ 30 #define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
31 ((off) & 0xf0000)) 31 ((off) & 0xf0000))
32 #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) 32 #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
33 #define CRB_INDIRECT_2M (0x1e0000UL) 33 #define CRB_INDIRECT_2M (0x1e0000UL)
34 34
35 #define MAX_CRB_XFORM 60 35 #define MAX_CRB_XFORM 60
36 static unsigned long crb_addr_xform[MAX_CRB_XFORM]; 36 static unsigned long crb_addr_xform[MAX_CRB_XFORM];
37 int qla82xx_crb_table_initialized; 37 int qla82xx_crb_table_initialized;
38 38
39 #define qla82xx_crb_addr_transform(name) \ 39 #define qla82xx_crb_addr_transform(name) \
40 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ 40 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
41 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) 41 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
42 42
43 static void qla82xx_crb_addr_transform_setup(void) 43 static void qla82xx_crb_addr_transform_setup(void)
44 { 44 {
45 qla82xx_crb_addr_transform(XDMA); 45 qla82xx_crb_addr_transform(XDMA);
46 qla82xx_crb_addr_transform(TIMR); 46 qla82xx_crb_addr_transform(TIMR);
47 qla82xx_crb_addr_transform(SRE); 47 qla82xx_crb_addr_transform(SRE);
48 qla82xx_crb_addr_transform(SQN3); 48 qla82xx_crb_addr_transform(SQN3);
49 qla82xx_crb_addr_transform(SQN2); 49 qla82xx_crb_addr_transform(SQN2);
50 qla82xx_crb_addr_transform(SQN1); 50 qla82xx_crb_addr_transform(SQN1);
51 qla82xx_crb_addr_transform(SQN0); 51 qla82xx_crb_addr_transform(SQN0);
52 qla82xx_crb_addr_transform(SQS3); 52 qla82xx_crb_addr_transform(SQS3);
53 qla82xx_crb_addr_transform(SQS2); 53 qla82xx_crb_addr_transform(SQS2);
54 qla82xx_crb_addr_transform(SQS1); 54 qla82xx_crb_addr_transform(SQS1);
55 qla82xx_crb_addr_transform(SQS0); 55 qla82xx_crb_addr_transform(SQS0);
56 qla82xx_crb_addr_transform(RPMX7); 56 qla82xx_crb_addr_transform(RPMX7);
57 qla82xx_crb_addr_transform(RPMX6); 57 qla82xx_crb_addr_transform(RPMX6);
58 qla82xx_crb_addr_transform(RPMX5); 58 qla82xx_crb_addr_transform(RPMX5);
59 qla82xx_crb_addr_transform(RPMX4); 59 qla82xx_crb_addr_transform(RPMX4);
60 qla82xx_crb_addr_transform(RPMX3); 60 qla82xx_crb_addr_transform(RPMX3);
61 qla82xx_crb_addr_transform(RPMX2); 61 qla82xx_crb_addr_transform(RPMX2);
62 qla82xx_crb_addr_transform(RPMX1); 62 qla82xx_crb_addr_transform(RPMX1);
63 qla82xx_crb_addr_transform(RPMX0); 63 qla82xx_crb_addr_transform(RPMX0);
64 qla82xx_crb_addr_transform(ROMUSB); 64 qla82xx_crb_addr_transform(ROMUSB);
65 qla82xx_crb_addr_transform(SN); 65 qla82xx_crb_addr_transform(SN);
66 qla82xx_crb_addr_transform(QMN); 66 qla82xx_crb_addr_transform(QMN);
67 qla82xx_crb_addr_transform(QMS); 67 qla82xx_crb_addr_transform(QMS);
68 qla82xx_crb_addr_transform(PGNI); 68 qla82xx_crb_addr_transform(PGNI);
69 qla82xx_crb_addr_transform(PGND); 69 qla82xx_crb_addr_transform(PGND);
70 qla82xx_crb_addr_transform(PGN3); 70 qla82xx_crb_addr_transform(PGN3);
71 qla82xx_crb_addr_transform(PGN2); 71 qla82xx_crb_addr_transform(PGN2);
72 qla82xx_crb_addr_transform(PGN1); 72 qla82xx_crb_addr_transform(PGN1);
73 qla82xx_crb_addr_transform(PGN0); 73 qla82xx_crb_addr_transform(PGN0);
74 qla82xx_crb_addr_transform(PGSI); 74 qla82xx_crb_addr_transform(PGSI);
75 qla82xx_crb_addr_transform(PGSD); 75 qla82xx_crb_addr_transform(PGSD);
76 qla82xx_crb_addr_transform(PGS3); 76 qla82xx_crb_addr_transform(PGS3);
77 qla82xx_crb_addr_transform(PGS2); 77 qla82xx_crb_addr_transform(PGS2);
78 qla82xx_crb_addr_transform(PGS1); 78 qla82xx_crb_addr_transform(PGS1);
79 qla82xx_crb_addr_transform(PGS0); 79 qla82xx_crb_addr_transform(PGS0);
80 qla82xx_crb_addr_transform(PS); 80 qla82xx_crb_addr_transform(PS);
81 qla82xx_crb_addr_transform(PH); 81 qla82xx_crb_addr_transform(PH);
82 qla82xx_crb_addr_transform(NIU); 82 qla82xx_crb_addr_transform(NIU);
83 qla82xx_crb_addr_transform(I2Q); 83 qla82xx_crb_addr_transform(I2Q);
84 qla82xx_crb_addr_transform(EG); 84 qla82xx_crb_addr_transform(EG);
85 qla82xx_crb_addr_transform(MN); 85 qla82xx_crb_addr_transform(MN);
86 qla82xx_crb_addr_transform(MS); 86 qla82xx_crb_addr_transform(MS);
87 qla82xx_crb_addr_transform(CAS2); 87 qla82xx_crb_addr_transform(CAS2);
88 qla82xx_crb_addr_transform(CAS1); 88 qla82xx_crb_addr_transform(CAS1);
89 qla82xx_crb_addr_transform(CAS0); 89 qla82xx_crb_addr_transform(CAS0);
90 qla82xx_crb_addr_transform(CAM); 90 qla82xx_crb_addr_transform(CAM);
91 qla82xx_crb_addr_transform(C2C1); 91 qla82xx_crb_addr_transform(C2C1);
92 qla82xx_crb_addr_transform(C2C0); 92 qla82xx_crb_addr_transform(C2C0);
93 qla82xx_crb_addr_transform(SMB); 93 qla82xx_crb_addr_transform(SMB);
94 qla82xx_crb_addr_transform(OCM0); 94 qla82xx_crb_addr_transform(OCM0);
95 /* 95 /*
96 * Used only in P3 just define it for P2 also. 96 * Used only in P3 just define it for P2 also.
97 */ 97 */
98 qla82xx_crb_addr_transform(I2C0); 98 qla82xx_crb_addr_transform(I2C0);
99 99
100 qla82xx_crb_table_initialized = 1; 100 qla82xx_crb_table_initialized = 1;
101 } 101 }
102 102
103 struct crb_128M_2M_block_map crb_128M_2M_map[64] = { 103 struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
104 {{{0, 0, 0, 0} } }, 104 {{{0, 0, 0, 0} } },
105 {{{1, 0x0100000, 0x0102000, 0x120000}, 105 {{{1, 0x0100000, 0x0102000, 0x120000},
106 {1, 0x0110000, 0x0120000, 0x130000}, 106 {1, 0x0110000, 0x0120000, 0x130000},
107 {1, 0x0120000, 0x0122000, 0x124000}, 107 {1, 0x0120000, 0x0122000, 0x124000},
108 {1, 0x0130000, 0x0132000, 0x126000}, 108 {1, 0x0130000, 0x0132000, 0x126000},
109 {1, 0x0140000, 0x0142000, 0x128000}, 109 {1, 0x0140000, 0x0142000, 0x128000},
110 {1, 0x0150000, 0x0152000, 0x12a000}, 110 {1, 0x0150000, 0x0152000, 0x12a000},
111 {1, 0x0160000, 0x0170000, 0x110000}, 111 {1, 0x0160000, 0x0170000, 0x110000},
112 {1, 0x0170000, 0x0172000, 0x12e000}, 112 {1, 0x0170000, 0x0172000, 0x12e000},
113 {0, 0x0000000, 0x0000000, 0x000000}, 113 {0, 0x0000000, 0x0000000, 0x000000},
114 {0, 0x0000000, 0x0000000, 0x000000}, 114 {0, 0x0000000, 0x0000000, 0x000000},
115 {0, 0x0000000, 0x0000000, 0x000000}, 115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000}, 116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000}, 117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000}, 118 {0, 0x0000000, 0x0000000, 0x000000},
119 {1, 0x01e0000, 0x01e0800, 0x122000}, 119 {1, 0x01e0000, 0x01e0800, 0x122000},
120 {0, 0x0000000, 0x0000000, 0x000000} } } , 120 {0, 0x0000000, 0x0000000, 0x000000} } } ,
121 {{{1, 0x0200000, 0x0210000, 0x180000} } }, 121 {{{1, 0x0200000, 0x0210000, 0x180000} } },
122 {{{0, 0, 0, 0} } }, 122 {{{0, 0, 0, 0} } },
123 {{{1, 0x0400000, 0x0401000, 0x169000} } }, 123 {{{1, 0x0400000, 0x0401000, 0x169000} } },
124 {{{1, 0x0500000, 0x0510000, 0x140000} } }, 124 {{{1, 0x0500000, 0x0510000, 0x140000} } },
125 {{{1, 0x0600000, 0x0610000, 0x1c0000} } }, 125 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
126 {{{1, 0x0700000, 0x0704000, 0x1b8000} } }, 126 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
127 {{{1, 0x0800000, 0x0802000, 0x170000}, 127 {{{1, 0x0800000, 0x0802000, 0x170000},
128 {0, 0x0000000, 0x0000000, 0x000000}, 128 {0, 0x0000000, 0x0000000, 0x000000},
129 {0, 0x0000000, 0x0000000, 0x000000}, 129 {0, 0x0000000, 0x0000000, 0x000000},
130 {0, 0x0000000, 0x0000000, 0x000000}, 130 {0, 0x0000000, 0x0000000, 0x000000},
131 {0, 0x0000000, 0x0000000, 0x000000}, 131 {0, 0x0000000, 0x0000000, 0x000000},
132 {0, 0x0000000, 0x0000000, 0x000000}, 132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000}, 133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000}, 134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000}, 135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000}, 136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000}, 137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000}, 138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000}, 139 {0, 0x0000000, 0x0000000, 0x000000},
140 {0, 0x0000000, 0x0000000, 0x000000}, 140 {0, 0x0000000, 0x0000000, 0x000000},
141 {0, 0x0000000, 0x0000000, 0x000000}, 141 {0, 0x0000000, 0x0000000, 0x000000},
142 {1, 0x08f0000, 0x08f2000, 0x172000} } }, 142 {1, 0x08f0000, 0x08f2000, 0x172000} } },
143 {{{1, 0x0900000, 0x0902000, 0x174000}, 143 {{{1, 0x0900000, 0x0902000, 0x174000},
144 {0, 0x0000000, 0x0000000, 0x000000}, 144 {0, 0x0000000, 0x0000000, 0x000000},
145 {0, 0x0000000, 0x0000000, 0x000000}, 145 {0, 0x0000000, 0x0000000, 0x000000},
146 {0, 0x0000000, 0x0000000, 0x000000}, 146 {0, 0x0000000, 0x0000000, 0x000000},
147 {0, 0x0000000, 0x0000000, 0x000000}, 147 {0, 0x0000000, 0x0000000, 0x000000},
148 {0, 0x0000000, 0x0000000, 0x000000}, 148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000}, 149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000}, 150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000}, 151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000}, 152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000}, 153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000}, 154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000}, 155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000}, 156 {0, 0x0000000, 0x0000000, 0x000000},
157 {0, 0x0000000, 0x0000000, 0x000000}, 157 {0, 0x0000000, 0x0000000, 0x000000},
158 {1, 0x09f0000, 0x09f2000, 0x176000} } }, 158 {1, 0x09f0000, 0x09f2000, 0x176000} } },
159 {{{0, 0x0a00000, 0x0a02000, 0x178000}, 159 {{{0, 0x0a00000, 0x0a02000, 0x178000},
160 {0, 0x0000000, 0x0000000, 0x000000}, 160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000}, 161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000}, 162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000}, 163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000}, 164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000}, 165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000}, 166 {0, 0x0000000, 0x0000000, 0x000000},
167 {0, 0x0000000, 0x0000000, 0x000000}, 167 {0, 0x0000000, 0x0000000, 0x000000},
168 {0, 0x0000000, 0x0000000, 0x000000}, 168 {0, 0x0000000, 0x0000000, 0x000000},
169 {0, 0x0000000, 0x0000000, 0x000000}, 169 {0, 0x0000000, 0x0000000, 0x000000},
170 {0, 0x0000000, 0x0000000, 0x000000}, 170 {0, 0x0000000, 0x0000000, 0x000000},
171 {0, 0x0000000, 0x0000000, 0x000000}, 171 {0, 0x0000000, 0x0000000, 0x000000},
172 {0, 0x0000000, 0x0000000, 0x000000}, 172 {0, 0x0000000, 0x0000000, 0x000000},
173 {0, 0x0000000, 0x0000000, 0x000000}, 173 {0, 0x0000000, 0x0000000, 0x000000},
174 {1, 0x0af0000, 0x0af2000, 0x17a000} } }, 174 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
175 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, 175 {{{0, 0x0b00000, 0x0b02000, 0x17c000},
176 {0, 0x0000000, 0x0000000, 0x000000}, 176 {0, 0x0000000, 0x0000000, 0x000000},
177 {0, 0x0000000, 0x0000000, 0x000000}, 177 {0, 0x0000000, 0x0000000, 0x000000},
178 {0, 0x0000000, 0x0000000, 0x000000}, 178 {0, 0x0000000, 0x0000000, 0x000000},
179 {0, 0x0000000, 0x0000000, 0x000000}, 179 {0, 0x0000000, 0x0000000, 0x000000},
180 {0, 0x0000000, 0x0000000, 0x000000}, 180 {0, 0x0000000, 0x0000000, 0x000000},
181 {0, 0x0000000, 0x0000000, 0x000000}, 181 {0, 0x0000000, 0x0000000, 0x000000},
182 {0, 0x0000000, 0x0000000, 0x000000}, 182 {0, 0x0000000, 0x0000000, 0x000000},
183 {0, 0x0000000, 0x0000000, 0x000000}, 183 {0, 0x0000000, 0x0000000, 0x000000},
184 {0, 0x0000000, 0x0000000, 0x000000}, 184 {0, 0x0000000, 0x0000000, 0x000000},
185 {0, 0x0000000, 0x0000000, 0x000000}, 185 {0, 0x0000000, 0x0000000, 0x000000},
186 {0, 0x0000000, 0x0000000, 0x000000}, 186 {0, 0x0000000, 0x0000000, 0x000000},
187 {0, 0x0000000, 0x0000000, 0x000000}, 187 {0, 0x0000000, 0x0000000, 0x000000},
188 {0, 0x0000000, 0x0000000, 0x000000}, 188 {0, 0x0000000, 0x0000000, 0x000000},
189 {0, 0x0000000, 0x0000000, 0x000000}, 189 {0, 0x0000000, 0x0000000, 0x000000},
190 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, 190 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
191 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } }, 191 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
192 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } }, 192 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
193 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } }, 193 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
194 {{{1, 0x0f00000, 0x0f01000, 0x164000} } }, 194 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
195 {{{0, 0x1000000, 0x1004000, 0x1a8000} } }, 195 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
196 {{{1, 0x1100000, 0x1101000, 0x160000} } }, 196 {{{1, 0x1100000, 0x1101000, 0x160000} } },
197 {{{1, 0x1200000, 0x1201000, 0x161000} } }, 197 {{{1, 0x1200000, 0x1201000, 0x161000} } },
198 {{{1, 0x1300000, 0x1301000, 0x162000} } }, 198 {{{1, 0x1300000, 0x1301000, 0x162000} } },
199 {{{1, 0x1400000, 0x1401000, 0x163000} } }, 199 {{{1, 0x1400000, 0x1401000, 0x163000} } },
200 {{{1, 0x1500000, 0x1501000, 0x165000} } }, 200 {{{1, 0x1500000, 0x1501000, 0x165000} } },
201 {{{1, 0x1600000, 0x1601000, 0x166000} } }, 201 {{{1, 0x1600000, 0x1601000, 0x166000} } },
202 {{{0, 0, 0, 0} } }, 202 {{{0, 0, 0, 0} } },
203 {{{0, 0, 0, 0} } }, 203 {{{0, 0, 0, 0} } },
204 {{{0, 0, 0, 0} } }, 204 {{{0, 0, 0, 0} } },
205 {{{0, 0, 0, 0} } }, 205 {{{0, 0, 0, 0} } },
206 {{{0, 0, 0, 0} } }, 206 {{{0, 0, 0, 0} } },
207 {{{0, 0, 0, 0} } }, 207 {{{0, 0, 0, 0} } },
208 {{{1, 0x1d00000, 0x1d10000, 0x190000} } }, 208 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
209 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } }, 209 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
210 {{{1, 0x1f00000, 0x1f10000, 0x150000} } }, 210 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
211 {{{0} } }, 211 {{{0} } },
212 {{{1, 0x2100000, 0x2102000, 0x120000}, 212 {{{1, 0x2100000, 0x2102000, 0x120000},
213 {1, 0x2110000, 0x2120000, 0x130000}, 213 {1, 0x2110000, 0x2120000, 0x130000},
214 {1, 0x2120000, 0x2122000, 0x124000}, 214 {1, 0x2120000, 0x2122000, 0x124000},
215 {1, 0x2130000, 0x2132000, 0x126000}, 215 {1, 0x2130000, 0x2132000, 0x126000},
216 {1, 0x2140000, 0x2142000, 0x128000}, 216 {1, 0x2140000, 0x2142000, 0x128000},
217 {1, 0x2150000, 0x2152000, 0x12a000}, 217 {1, 0x2150000, 0x2152000, 0x12a000},
218 {1, 0x2160000, 0x2170000, 0x110000}, 218 {1, 0x2160000, 0x2170000, 0x110000},
219 {1, 0x2170000, 0x2172000, 0x12e000}, 219 {1, 0x2170000, 0x2172000, 0x12e000},
220 {0, 0x0000000, 0x0000000, 0x000000}, 220 {0, 0x0000000, 0x0000000, 0x000000},
221 {0, 0x0000000, 0x0000000, 0x000000}, 221 {0, 0x0000000, 0x0000000, 0x000000},
222 {0, 0x0000000, 0x0000000, 0x000000}, 222 {0, 0x0000000, 0x0000000, 0x000000},
223 {0, 0x0000000, 0x0000000, 0x000000}, 223 {0, 0x0000000, 0x0000000, 0x000000},
224 {0, 0x0000000, 0x0000000, 0x000000}, 224 {0, 0x0000000, 0x0000000, 0x000000},
225 {0, 0x0000000, 0x0000000, 0x000000}, 225 {0, 0x0000000, 0x0000000, 0x000000},
226 {0, 0x0000000, 0x0000000, 0x000000}, 226 {0, 0x0000000, 0x0000000, 0x000000},
227 {0, 0x0000000, 0x0000000, 0x000000} } }, 227 {0, 0x0000000, 0x0000000, 0x000000} } },
228 {{{1, 0x2200000, 0x2204000, 0x1b0000} } }, 228 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
229 {{{0} } }, 229 {{{0} } },
230 {{{0} } }, 230 {{{0} } },
231 {{{0} } }, 231 {{{0} } },
232 {{{0} } }, 232 {{{0} } },
233 {{{0} } }, 233 {{{0} } },
234 {{{1, 0x2800000, 0x2804000, 0x1a4000} } }, 234 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
235 {{{1, 0x2900000, 0x2901000, 0x16b000} } }, 235 {{{1, 0x2900000, 0x2901000, 0x16b000} } },
236 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } }, 236 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
237 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } }, 237 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
238 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } }, 238 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
239 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } }, 239 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
240 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } }, 240 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
241 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } }, 241 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
242 {{{1, 0x3000000, 0x3000400, 0x1adc00} } }, 242 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
243 {{{0, 0x3100000, 0x3104000, 0x1a8000} } }, 243 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
244 {{{1, 0x3200000, 0x3204000, 0x1d4000} } }, 244 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
245 {{{1, 0x3300000, 0x3304000, 0x1a0000} } }, 245 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
246 {{{0} } }, 246 {{{0} } },
247 {{{1, 0x3500000, 0x3500400, 0x1ac000} } }, 247 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
248 {{{1, 0x3600000, 0x3600400, 0x1ae000} } }, 248 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
249 {{{1, 0x3700000, 0x3700400, 0x1ae400} } }, 249 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
250 {{{1, 0x3800000, 0x3804000, 0x1d0000} } }, 250 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
251 {{{1, 0x3900000, 0x3904000, 0x1b4000} } }, 251 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
252 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } }, 252 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
253 {{{0} } }, 253 {{{0} } },
254 {{{0} } }, 254 {{{0} } },
255 {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } }, 255 {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
256 {{{1, 0x3e00000, 0x3e01000, 0x167000} } }, 256 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
257 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } 257 {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
258 }; 258 };
259 259
260 /* 260 /*
261 * top 12 bits of crb internal address (hub, agent) 261 * top 12 bits of crb internal address (hub, agent)
262 */ 262 */
263 unsigned qla82xx_crb_hub_agt[64] = { 263 unsigned qla82xx_crb_hub_agt[64] = {
264 0, 264 0,
265 QLA82XX_HW_CRB_HUB_AGT_ADR_PS, 265 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
266 QLA82XX_HW_CRB_HUB_AGT_ADR_MN, 266 QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
267 QLA82XX_HW_CRB_HUB_AGT_ADR_MS, 267 QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
268 0, 268 0,
269 QLA82XX_HW_CRB_HUB_AGT_ADR_SRE, 269 QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
270 QLA82XX_HW_CRB_HUB_AGT_ADR_NIU, 270 QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
271 QLA82XX_HW_CRB_HUB_AGT_ADR_QMN, 271 QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
272 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0, 272 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
273 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1, 273 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
274 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2, 274 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
275 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3, 275 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
276 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, 276 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
277 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, 277 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
278 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, 278 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
279 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4, 279 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
280 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, 280 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
281 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0, 281 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
282 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1, 282 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
283 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2, 283 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
284 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3, 284 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
285 QLA82XX_HW_CRB_HUB_AGT_ADR_PGND, 285 QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
286 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI, 286 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
287 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0, 287 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
288 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1, 288 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
289 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2, 289 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
290 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3, 290 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
291 0, 291 0,
292 QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI, 292 QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
293 QLA82XX_HW_CRB_HUB_AGT_ADR_SN, 293 QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
294 0, 294 0,
295 QLA82XX_HW_CRB_HUB_AGT_ADR_EG, 295 QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
296 0, 296 0,
297 QLA82XX_HW_CRB_HUB_AGT_ADR_PS, 297 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
298 QLA82XX_HW_CRB_HUB_AGT_ADR_CAM, 298 QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
299 0, 299 0,
300 0, 300 0,
301 0, 301 0,
302 0, 302 0,
303 0, 303 0,
304 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, 304 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
305 0, 305 0,
306 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1, 306 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
307 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2, 307 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
308 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3, 308 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
309 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4, 309 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
310 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5, 310 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
311 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6, 311 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
312 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7, 312 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
313 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, 313 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
314 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, 314 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
315 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, 315 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
316 0, 316 0,
317 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0, 317 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
318 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8, 318 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
319 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9, 319 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
320 QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0, 320 QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
321 0, 321 0,
322 QLA82XX_HW_CRB_HUB_AGT_ADR_SMB, 322 QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
323 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0, 323 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
324 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1, 324 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
325 0, 325 0,
326 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC, 326 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
327 0, 327 0,
328 }; 328 };
329 329
330 /* Device states */ 330 /* Device states */
331 char *qdev_state[] = { 331 char *qdev_state[] = {
332 "Unknown", 332 "Unknown",
333 "Cold", 333 "Cold",
334 "Initializing", 334 "Initializing",
335 "Ready", 335 "Ready",
336 "Need Reset", 336 "Need Reset",
337 "Need Quiescent", 337 "Need Quiescent",
338 "Failed", 338 "Failed",
339 "Quiescent", 339 "Quiescent",
340 }; 340 };
341 341
342 /* 342 /*
343 * In: 'off' is offset from CRB space in 128M pci map 343 * In: 'off' is offset from CRB space in 128M pci map
344 * Out: 'off' is 2M pci map addr 344 * Out: 'off' is 2M pci map addr
345 * side effect: lock crb window 345 * side effect: lock crb window
346 */ 346 */
347 static void 347 static void
348 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) 348 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
349 { 349 {
350 u32 win_read; 350 u32 win_read;
351 351
352 ha->crb_win = CRB_HI(*off); 352 ha->crb_win = CRB_HI(*off);
353 writel(ha->crb_win, 353 writel(ha->crb_win,
354 (void *)(CRB_WINDOW_2M + ha->nx_pcibase)); 354 (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
355 355
356 /* Read back value to make sure write has gone through before trying 356 /* Read back value to make sure write has gone through before trying
357 * to use it. 357 * to use it.
358 */ 358 */
359 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); 359 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
360 if (win_read != ha->crb_win) { 360 if (win_read != ha->crb_win) {
361 DEBUG2(qla_printk(KERN_INFO, ha, 361 DEBUG2(qla_printk(KERN_INFO, ha,
362 "%s: Written crbwin (0x%x) != Read crbwin (0x%x), " 362 "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
363 "off=0x%lx\n", __func__, ha->crb_win, win_read, *off)); 363 "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
364 } 364 }
365 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; 365 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
366 } 366 }
367 367
368 static inline unsigned long 368 static inline unsigned long
369 qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) 369 qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
370 { 370 {
371 /* See if we are currently pointing to the region we want to use next */ 371 /* See if we are currently pointing to the region we want to use next */
372 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { 372 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
373 /* No need to change window. PCIX and PCIEregs are in both 373 /* No need to change window. PCIX and PCIEregs are in both
374 * regs are in both windows. 374 * regs are in both windows.
375 */ 375 */
376 return off; 376 return off;
377 } 377 }
378 378
379 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) { 379 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
380 /* We are in first CRB window */ 380 /* We are in first CRB window */
381 if (ha->curr_window != 0) 381 if (ha->curr_window != 0)
382 WARN_ON(1); 382 WARN_ON(1);
383 return off; 383 return off;
384 } 384 }
385 385
386 if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) { 386 if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
387 /* We are in second CRB window */ 387 /* We are in second CRB window */
388 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST; 388 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
389 389
390 if (ha->curr_window != 1) 390 if (ha->curr_window != 1)
391 return off; 391 return off;
392 392
393 /* We are in the QM or direct access 393 /* We are in the QM or direct access
394 * register region - do nothing 394 * register region - do nothing
395 */ 395 */
396 if ((off >= QLA82XX_PCI_DIRECT_CRB) && 396 if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
397 (off < QLA82XX_PCI_CAMQM_MAX)) 397 (off < QLA82XX_PCI_CAMQM_MAX))
398 return off; 398 return off;
399 } 399 }
400 /* strange address given */ 400 /* strange address given */
401 qla_printk(KERN_WARNING, ha, 401 qla_printk(KERN_WARNING, ha,
402 "%s: Warning: unm_nic_pci_set_crbwindow called with" 402 "%s: Warning: unm_nic_pci_set_crbwindow called with"
403 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off); 403 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
404 return off; 404 return off;
405 } 405 }
406 406
407 static int 407 static int
408 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off) 408 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
409 { 409 {
410 struct crb_128M_2M_sub_block_map *m; 410 struct crb_128M_2M_sub_block_map *m;
411 411
412 if (*off >= QLA82XX_CRB_MAX) 412 if (*off >= QLA82XX_CRB_MAX)
413 return -1; 413 return -1;
414 414
415 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { 415 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
416 *off = (*off - QLA82XX_PCI_CAMQM) + 416 *off = (*off - QLA82XX_PCI_CAMQM) +
417 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; 417 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
418 return 0; 418 return 0;
419 } 419 }
420 420
421 if (*off < QLA82XX_PCI_CRBSPACE) 421 if (*off < QLA82XX_PCI_CRBSPACE)
422 return -1; 422 return -1;
423 423
424 *off -= QLA82XX_PCI_CRBSPACE; 424 *off -= QLA82XX_PCI_CRBSPACE;
425 425
426 /* Try direct map */ 426 /* Try direct map */
427 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; 427 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
428 428
429 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { 429 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
430 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; 430 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
431 return 0; 431 return 0;
432 } 432 }
433 /* Not in direct map, use crb window */ 433 /* Not in direct map, use crb window */
434 return 1; 434 return 1;
435 } 435 }
436 436
437 #define CRB_WIN_LOCK_TIMEOUT 100000000 437 #define CRB_WIN_LOCK_TIMEOUT 100000000
438 static int qla82xx_crb_win_lock(struct qla_hw_data *ha) 438 static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
439 { 439 {
440 int done = 0, timeout = 0; 440 int done = 0, timeout = 0;
441 441
442 while (!done) { 442 while (!done) {
443 /* acquire semaphore3 from PCI HW block */ 443 /* acquire semaphore3 from PCI HW block */
444 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); 444 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
445 if (done == 1) 445 if (done == 1)
446 break; 446 break;
447 if (timeout >= CRB_WIN_LOCK_TIMEOUT) 447 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
448 return -1; 448 return -1;
449 timeout++; 449 timeout++;
450 } 450 }
451 qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum); 451 qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
452 return 0; 452 return 0;
453 } 453 }
454 454
455 int 455 int
456 qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data) 456 qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
457 { 457 {
458 unsigned long flags = 0; 458 unsigned long flags = 0;
459 int rv; 459 int rv;
460 460
461 rv = qla82xx_pci_get_crb_addr_2M(ha, &off); 461 rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
462 462
463 BUG_ON(rv == -1); 463 BUG_ON(rv == -1);
464 464
465 if (rv == 1) { 465 if (rv == 1) {
466 write_lock_irqsave(&ha->hw_lock, flags); 466 write_lock_irqsave(&ha->hw_lock, flags);
467 qla82xx_crb_win_lock(ha); 467 qla82xx_crb_win_lock(ha);
468 qla82xx_pci_set_crbwindow_2M(ha, &off); 468 qla82xx_pci_set_crbwindow_2M(ha, &off);
469 } 469 }
470 470
471 writel(data, (void __iomem *)off); 471 writel(data, (void __iomem *)off);
472 472
473 if (rv == 1) { 473 if (rv == 1) {
474 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); 474 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
475 write_unlock_irqrestore(&ha->hw_lock, flags); 475 write_unlock_irqrestore(&ha->hw_lock, flags);
476 } 476 }
477 return 0; 477 return 0;
478 } 478 }
479 479
480 int 480 int
481 qla82xx_rd_32(struct qla_hw_data *ha, ulong off) 481 qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
482 { 482 {
483 unsigned long flags = 0; 483 unsigned long flags = 0;
484 int rv; 484 int rv;
485 u32 data; 485 u32 data;
486 486
487 rv = qla82xx_pci_get_crb_addr_2M(ha, &off); 487 rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
488 488
489 BUG_ON(rv == -1); 489 BUG_ON(rv == -1);
490 490
491 if (rv == 1) { 491 if (rv == 1) {
492 write_lock_irqsave(&ha->hw_lock, flags); 492 write_lock_irqsave(&ha->hw_lock, flags);
493 qla82xx_crb_win_lock(ha); 493 qla82xx_crb_win_lock(ha);
494 qla82xx_pci_set_crbwindow_2M(ha, &off); 494 qla82xx_pci_set_crbwindow_2M(ha, &off);
495 } 495 }
496 data = RD_REG_DWORD((void __iomem *)off); 496 data = RD_REG_DWORD((void __iomem *)off);
497 497
498 if (rv == 1) { 498 if (rv == 1) {
499 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); 499 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
500 write_unlock_irqrestore(&ha->hw_lock, flags); 500 write_unlock_irqrestore(&ha->hw_lock, flags);
501 } 501 }
502 return data; 502 return data;
503 } 503 }
504 504
505 #define IDC_LOCK_TIMEOUT 100000000 505 #define IDC_LOCK_TIMEOUT 100000000
506 int qla82xx_idc_lock(struct qla_hw_data *ha) 506 int qla82xx_idc_lock(struct qla_hw_data *ha)
507 { 507 {
508 int i; 508 int i;
509 int done = 0, timeout = 0; 509 int done = 0, timeout = 0;
510 510
511 while (!done) { 511 while (!done) {
512 /* acquire semaphore5 from PCI HW block */ 512 /* acquire semaphore5 from PCI HW block */
513 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); 513 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
514 if (done == 1) 514 if (done == 1)
515 break; 515 break;
516 if (timeout >= IDC_LOCK_TIMEOUT) 516 if (timeout >= IDC_LOCK_TIMEOUT)
517 return -1; 517 return -1;
518 518
519 timeout++; 519 timeout++;
520 520
521 /* Yield CPU */ 521 /* Yield CPU */
522 if (!in_interrupt()) 522 if (!in_interrupt())
523 schedule(); 523 schedule();
524 else { 524 else {
525 for (i = 0; i < 20; i++) 525 for (i = 0; i < 20; i++)
526 cpu_relax(); 526 cpu_relax();
527 } 527 }
528 } 528 }
529 529
530 return 0; 530 return 0;
531 } 531 }
532 532
533 void qla82xx_idc_unlock(struct qla_hw_data *ha) 533 void qla82xx_idc_unlock(struct qla_hw_data *ha)
534 { 534 {
535 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); 535 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
536 } 536 }
537 537
538 /* PCI Windowing for DDR regions. */ 538 /* PCI Windowing for DDR regions. */
539 #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \ 539 #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
540 (((addr) <= (high)) && ((addr) >= (low))) 540 (((addr) <= (high)) && ((addr) >= (low)))
541 /* 541 /*
542 * check memory access boundary. 542 * check memory access boundary.
543 * used by test agent. support ddr access only for now 543 * used by test agent. support ddr access only for now
544 */ 544 */
545 static unsigned long 545 static unsigned long
546 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, 546 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
547 unsigned long long addr, int size) 547 unsigned long long addr, int size)
548 { 548 {
549 if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 549 if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
550 QLA82XX_ADDR_DDR_NET_MAX) || 550 QLA82XX_ADDR_DDR_NET_MAX) ||
551 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET, 551 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
552 QLA82XX_ADDR_DDR_NET_MAX) || 552 QLA82XX_ADDR_DDR_NET_MAX) ||
553 ((size != 1) && (size != 2) && (size != 4) && (size != 8))) 553 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
554 return 0; 554 return 0;
555 else 555 else
556 return 1; 556 return 1;
557 } 557 }
558 558
559 int qla82xx_pci_set_window_warning_count; 559 int qla82xx_pci_set_window_warning_count;
560 560
561 static unsigned long 561 static unsigned long
562 qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) 562 qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
563 { 563 {
564 int window; 564 int window;
565 u32 win_read; 565 u32 win_read;
566 566
567 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 567 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
568 QLA82XX_ADDR_DDR_NET_MAX)) { 568 QLA82XX_ADDR_DDR_NET_MAX)) {
569 /* DDR network side */ 569 /* DDR network side */
570 window = MN_WIN(addr); 570 window = MN_WIN(addr);
571 ha->ddr_mn_window = window; 571 ha->ddr_mn_window = window;
572 qla82xx_wr_32(ha, 572 qla82xx_wr_32(ha,
573 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); 573 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
574 win_read = qla82xx_rd_32(ha, 574 win_read = qla82xx_rd_32(ha,
575 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); 575 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
576 if ((win_read << 17) != window) { 576 if ((win_read << 17) != window) {
577 qla_printk(KERN_WARNING, ha, 577 qla_printk(KERN_WARNING, ha,
578 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", 578 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
579 __func__, window, win_read); 579 __func__, window, win_read);
580 } 580 }
581 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; 581 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
582 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, 582 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
583 QLA82XX_ADDR_OCM0_MAX)) { 583 QLA82XX_ADDR_OCM0_MAX)) {
584 unsigned int temp1; 584 unsigned int temp1;
585 if ((addr & 0x00ff800) == 0xff800) { 585 if ((addr & 0x00ff800) == 0xff800) {
586 qla_printk(KERN_WARNING, ha, 586 qla_printk(KERN_WARNING, ha,
587 "%s: QM access not handled.\n", __func__); 587 "%s: QM access not handled.\n", __func__);
588 addr = -1UL; 588 addr = -1UL;
589 } 589 }
590 window = OCM_WIN(addr); 590 window = OCM_WIN(addr);
591 ha->ddr_mn_window = window; 591 ha->ddr_mn_window = window;
592 qla82xx_wr_32(ha, 592 qla82xx_wr_32(ha,
593 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); 593 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
594 win_read = qla82xx_rd_32(ha, 594 win_read = qla82xx_rd_32(ha,
595 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); 595 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
596 temp1 = ((window & 0x1FF) << 7) | 596 temp1 = ((window & 0x1FF) << 7) |
597 ((window & 0x0FFFE0000) >> 17); 597 ((window & 0x0FFFE0000) >> 17);
598 if (win_read != temp1) { 598 if (win_read != temp1) {
599 qla_printk(KERN_WARNING, ha, 599 qla_printk(KERN_WARNING, ha,
600 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n", 600 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
601 __func__, temp1, win_read); 601 __func__, temp1, win_read);
602 } 602 }
603 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; 603 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
604 604
605 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, 605 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
606 QLA82XX_P3_ADDR_QDR_NET_MAX)) { 606 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
607 /* QDR network side */ 607 /* QDR network side */
608 window = MS_WIN(addr); 608 window = MS_WIN(addr);
609 ha->qdr_sn_window = window; 609 ha->qdr_sn_window = window;
610 qla82xx_wr_32(ha, 610 qla82xx_wr_32(ha,
611 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window); 611 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
612 win_read = qla82xx_rd_32(ha, 612 win_read = qla82xx_rd_32(ha,
613 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); 613 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
614 if (win_read != window) { 614 if (win_read != window) {
615 qla_printk(KERN_WARNING, ha, 615 qla_printk(KERN_WARNING, ha,
616 "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n", 616 "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
617 __func__, window, win_read); 617 __func__, window, win_read);
618 } 618 }
619 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; 619 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
620 } else { 620 } else {
621 /* 621 /*
622 * peg gdb frequently accesses memory that doesn't exist, 622 * peg gdb frequently accesses memory that doesn't exist,
623 * this limits the chit chat so debugging isn't slowed down. 623 * this limits the chit chat so debugging isn't slowed down.
624 */ 624 */
625 if ((qla82xx_pci_set_window_warning_count++ < 8) || 625 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
626 (qla82xx_pci_set_window_warning_count%64 == 0)) { 626 (qla82xx_pci_set_window_warning_count%64 == 0)) {
627 qla_printk(KERN_WARNING, ha, 627 qla_printk(KERN_WARNING, ha,
628 "%s: Warning:%s Unknown address range!\n", __func__, 628 "%s: Warning:%s Unknown address range!\n", __func__,
629 QLA2XXX_DRIVER_NAME); 629 QLA2XXX_DRIVER_NAME);
630 } 630 }
631 addr = -1UL; 631 addr = -1UL;
632 } 632 }
633 return addr; 633 return addr;
634 } 634 }
635 635
636 /* check if address is in the same windows as the previous access */ 636 /* check if address is in the same windows as the previous access */
637 static int qla82xx_pci_is_same_window(struct qla_hw_data *ha, 637 static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
638 unsigned long long addr) 638 unsigned long long addr)
639 { 639 {
640 int window; 640 int window;
641 unsigned long long qdr_max; 641 unsigned long long qdr_max;
642 642
643 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; 643 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
644 644
645 /* DDR network side */ 645 /* DDR network side */
646 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 646 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
647 QLA82XX_ADDR_DDR_NET_MAX)) 647 QLA82XX_ADDR_DDR_NET_MAX))
648 BUG(); 648 BUG();
649 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, 649 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
650 QLA82XX_ADDR_OCM0_MAX)) 650 QLA82XX_ADDR_OCM0_MAX))
651 return 1; 651 return 1;
652 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1, 652 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
653 QLA82XX_ADDR_OCM1_MAX)) 653 QLA82XX_ADDR_OCM1_MAX))
654 return 1; 654 return 1;
655 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { 655 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
656 /* QDR network side */ 656 /* QDR network side */
657 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; 657 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
658 if (ha->qdr_sn_window == window) 658 if (ha->qdr_sn_window == window)
659 return 1; 659 return 1;
660 } 660 }
661 return 0; 661 return 0;
662 } 662 }
663 663
664 static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, 664 static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
665 u64 off, void *data, int size) 665 u64 off, void *data, int size)
666 { 666 {
667 unsigned long flags; 667 unsigned long flags;
668 void *addr = NULL; 668 void *addr = NULL;
669 int ret = 0; 669 int ret = 0;
670 u64 start; 670 u64 start;
671 uint8_t *mem_ptr = NULL; 671 uint8_t *mem_ptr = NULL;
672 unsigned long mem_base; 672 unsigned long mem_base;
673 unsigned long mem_page; 673 unsigned long mem_page;
674 674
675 write_lock_irqsave(&ha->hw_lock, flags); 675 write_lock_irqsave(&ha->hw_lock, flags);
676 676
677 /* 677 /*
678 * If attempting to access unknown address or straddle hw windows, 678 * If attempting to access unknown address or straddle hw windows,
679 * do not access. 679 * do not access.
680 */ 680 */
681 start = qla82xx_pci_set_window(ha, off); 681 start = qla82xx_pci_set_window(ha, off);
682 if ((start == -1UL) || 682 if ((start == -1UL) ||
683 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 683 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
684 write_unlock_irqrestore(&ha->hw_lock, flags); 684 write_unlock_irqrestore(&ha->hw_lock, flags);
685 qla_printk(KERN_ERR, ha, 685 qla_printk(KERN_ERR, ha,
686 "%s out of bound pci memory access. " 686 "%s out of bound pci memory access. "
687 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 687 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
688 return -1; 688 return -1;
689 } 689 }
690 690
691 write_unlock_irqrestore(&ha->hw_lock, flags); 691 write_unlock_irqrestore(&ha->hw_lock, flags);
692 mem_base = pci_resource_start(ha->pdev, 0); 692 mem_base = pci_resource_start(ha->pdev, 0);
693 mem_page = start & PAGE_MASK; 693 mem_page = start & PAGE_MASK;
694 /* Map two pages whenever user tries to access addresses in two 694 /* Map two pages whenever user tries to access addresses in two
695 * consecutive pages. 695 * consecutive pages.
696 */ 696 */
697 if (mem_page != ((start + size - 1) & PAGE_MASK)) 697 if (mem_page != ((start + size - 1) & PAGE_MASK))
698 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); 698 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
699 else 699 else
700 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); 700 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
701 if (mem_ptr == 0UL) { 701 if (mem_ptr == 0UL) {
702 *(u8 *)data = 0; 702 *(u8 *)data = 0;
703 return -1; 703 return -1;
704 } 704 }
705 addr = mem_ptr; 705 addr = mem_ptr;
706 addr += start & (PAGE_SIZE - 1); 706 addr += start & (PAGE_SIZE - 1);
707 write_lock_irqsave(&ha->hw_lock, flags); 707 write_lock_irqsave(&ha->hw_lock, flags);
708 708
709 switch (size) { 709 switch (size) {
710 case 1: 710 case 1:
711 *(u8 *)data = readb(addr); 711 *(u8 *)data = readb(addr);
712 break; 712 break;
713 case 2: 713 case 2:
714 *(u16 *)data = readw(addr); 714 *(u16 *)data = readw(addr);
715 break; 715 break;
716 case 4: 716 case 4:
717 *(u32 *)data = readl(addr); 717 *(u32 *)data = readl(addr);
718 break; 718 break;
719 case 8: 719 case 8:
720 *(u64 *)data = readq(addr); 720 *(u64 *)data = readq(addr);
721 break; 721 break;
722 default: 722 default:
723 ret = -1; 723 ret = -1;
724 break; 724 break;
725 } 725 }
726 write_unlock_irqrestore(&ha->hw_lock, flags); 726 write_unlock_irqrestore(&ha->hw_lock, flags);
727 727
728 if (mem_ptr) 728 if (mem_ptr)
729 iounmap(mem_ptr); 729 iounmap(mem_ptr);
730 return ret; 730 return ret;
731 } 731 }
732 732
733 static int 733 static int
734 qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, 734 qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
735 u64 off, void *data, int size) 735 u64 off, void *data, int size)
736 { 736 {
737 unsigned long flags; 737 unsigned long flags;
738 void *addr = NULL; 738 void *addr = NULL;
739 int ret = 0; 739 int ret = 0;
740 u64 start; 740 u64 start;
741 uint8_t *mem_ptr = NULL; 741 uint8_t *mem_ptr = NULL;
742 unsigned long mem_base; 742 unsigned long mem_base;
743 unsigned long mem_page; 743 unsigned long mem_page;
744 744
745 write_lock_irqsave(&ha->hw_lock, flags); 745 write_lock_irqsave(&ha->hw_lock, flags);
746 746
747 /* 747 /*
748 * If attempting to access unknown address or straddle hw windows, 748 * If attempting to access unknown address or straddle hw windows,
749 * do not access. 749 * do not access.
750 */ 750 */
751 start = qla82xx_pci_set_window(ha, off); 751 start = qla82xx_pci_set_window(ha, off);
752 if ((start == -1UL) || 752 if ((start == -1UL) ||
753 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 753 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
754 write_unlock_irqrestore(&ha->hw_lock, flags); 754 write_unlock_irqrestore(&ha->hw_lock, flags);
755 qla_printk(KERN_ERR, ha, 755 qla_printk(KERN_ERR, ha,
756 "%s out of bound pci memory access. " 756 "%s out of bound pci memory access. "
757 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 757 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
758 return -1; 758 return -1;
759 } 759 }
760 760
761 write_unlock_irqrestore(&ha->hw_lock, flags); 761 write_unlock_irqrestore(&ha->hw_lock, flags);
762 mem_base = pci_resource_start(ha->pdev, 0); 762 mem_base = pci_resource_start(ha->pdev, 0);
763 mem_page = start & PAGE_MASK; 763 mem_page = start & PAGE_MASK;
764 /* Map two pages whenever user tries to access addresses in two 764 /* Map two pages whenever user tries to access addresses in two
765 * consecutive pages. 765 * consecutive pages.
766 */ 766 */
767 if (mem_page != ((start + size - 1) & PAGE_MASK)) 767 if (mem_page != ((start + size - 1) & PAGE_MASK))
768 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); 768 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
769 else 769 else
770 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); 770 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
771 if (mem_ptr == 0UL) 771 if (mem_ptr == 0UL)
772 return -1; 772 return -1;
773 773
774 addr = mem_ptr; 774 addr = mem_ptr;
775 addr += start & (PAGE_SIZE - 1); 775 addr += start & (PAGE_SIZE - 1);
776 write_lock_irqsave(&ha->hw_lock, flags); 776 write_lock_irqsave(&ha->hw_lock, flags);
777 777
778 switch (size) { 778 switch (size) {
779 case 1: 779 case 1:
780 writeb(*(u8 *)data, addr); 780 writeb(*(u8 *)data, addr);
781 break; 781 break;
782 case 2: 782 case 2:
783 writew(*(u16 *)data, addr); 783 writew(*(u16 *)data, addr);
784 break; 784 break;
785 case 4: 785 case 4:
786 writel(*(u32 *)data, addr); 786 writel(*(u32 *)data, addr);
787 break; 787 break;
788 case 8: 788 case 8:
789 writeq(*(u64 *)data, addr); 789 writeq(*(u64 *)data, addr);
790 break; 790 break;
791 default: 791 default:
792 ret = -1; 792 ret = -1;
793 break; 793 break;
794 } 794 }
795 write_unlock_irqrestore(&ha->hw_lock, flags); 795 write_unlock_irqrestore(&ha->hw_lock, flags);
796 if (mem_ptr) 796 if (mem_ptr)
797 iounmap(mem_ptr); 797 iounmap(mem_ptr);
798 return ret; 798 return ret;
799 } 799 }
800 800
801 #define MTU_FUDGE_FACTOR 100 801 #define MTU_FUDGE_FACTOR 100
802 static unsigned long 802 static unsigned long
803 qla82xx_decode_crb_addr(unsigned long addr) 803 qla82xx_decode_crb_addr(unsigned long addr)
804 { 804 {
805 int i; 805 int i;
806 unsigned long base_addr, offset, pci_base; 806 unsigned long base_addr, offset, pci_base;
807 807
808 if (!qla82xx_crb_table_initialized) 808 if (!qla82xx_crb_table_initialized)
809 qla82xx_crb_addr_transform_setup(); 809 qla82xx_crb_addr_transform_setup();
810 810
811 pci_base = ADDR_ERROR; 811 pci_base = ADDR_ERROR;
812 base_addr = addr & 0xfff00000; 812 base_addr = addr & 0xfff00000;
813 offset = addr & 0x000fffff; 813 offset = addr & 0x000fffff;
814 814
815 for (i = 0; i < MAX_CRB_XFORM; i++) { 815 for (i = 0; i < MAX_CRB_XFORM; i++) {
816 if (crb_addr_xform[i] == base_addr) { 816 if (crb_addr_xform[i] == base_addr) {
817 pci_base = i << 20; 817 pci_base = i << 20;
818 break; 818 break;
819 } 819 }
820 } 820 }
821 if (pci_base == ADDR_ERROR) 821 if (pci_base == ADDR_ERROR)
822 return pci_base; 822 return pci_base;
823 return pci_base + offset; 823 return pci_base + offset;
824 } 824 }
825 825
826 static long rom_max_timeout = 100; 826 static long rom_max_timeout = 100;
827 static long qla82xx_rom_lock_timeout = 100; 827 static long qla82xx_rom_lock_timeout = 100;
828 828
829 static int 829 static int
830 qla82xx_rom_lock(struct qla_hw_data *ha) 830 qla82xx_rom_lock(struct qla_hw_data *ha)
831 { 831 {
832 int done = 0, timeout = 0; 832 int done = 0, timeout = 0;
833 833
834 while (!done) { 834 while (!done) {
835 /* acquire semaphore2 from PCI HW block */ 835 /* acquire semaphore2 from PCI HW block */
836 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); 836 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
837 if (done == 1) 837 if (done == 1)
838 break; 838 break;
839 if (timeout >= qla82xx_rom_lock_timeout) 839 if (timeout >= qla82xx_rom_lock_timeout)
840 return -1; 840 return -1;
841 timeout++; 841 timeout++;
842 } 842 }
843 qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); 843 qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
844 return 0; 844 return 0;
845 } 845 }
846 846
847 static int 847 static int
848 qla82xx_wait_rom_busy(struct qla_hw_data *ha) 848 qla82xx_wait_rom_busy(struct qla_hw_data *ha)
849 { 849 {
850 long timeout = 0; 850 long timeout = 0;
851 long done = 0 ; 851 long done = 0 ;
852 852
853 while (done == 0) { 853 while (done == 0) {
854 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 854 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
855 done &= 4; 855 done &= 4;
856 timeout++; 856 timeout++;
857 if (timeout >= rom_max_timeout) { 857 if (timeout >= rom_max_timeout) {
858 DEBUG(qla_printk(KERN_INFO, ha, 858 DEBUG(qla_printk(KERN_INFO, ha,
859 "%s: Timeout reached waiting for rom busy", 859 "%s: Timeout reached waiting for rom busy",
860 QLA2XXX_DRIVER_NAME)); 860 QLA2XXX_DRIVER_NAME));
861 return -1; 861 return -1;
862 } 862 }
863 } 863 }
864 return 0; 864 return 0;
865 } 865 }
866 866
867 static int 867 static int
868 qla82xx_wait_rom_done(struct qla_hw_data *ha) 868 qla82xx_wait_rom_done(struct qla_hw_data *ha)
869 { 869 {
870 long timeout = 0; 870 long timeout = 0;
871 long done = 0 ; 871 long done = 0 ;
872 872
873 while (done == 0) { 873 while (done == 0) {
874 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 874 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
875 done &= 2; 875 done &= 2;
876 timeout++; 876 timeout++;
877 if (timeout >= rom_max_timeout) { 877 if (timeout >= rom_max_timeout) {
878 DEBUG(qla_printk(KERN_INFO, ha, 878 DEBUG(qla_printk(KERN_INFO, ha,
879 "%s: Timeout reached waiting for rom done", 879 "%s: Timeout reached waiting for rom done",
880 QLA2XXX_DRIVER_NAME)); 880 QLA2XXX_DRIVER_NAME));
881 return -1; 881 return -1;
882 } 882 }
883 } 883 }
884 return 0; 884 return 0;
885 } 885 }
886 886
887 static int 887 static int
888 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 888 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
889 { 889 {
890 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 890 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
891 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 891 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
892 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 892 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
893 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); 893 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
894 qla82xx_wait_rom_busy(ha); 894 qla82xx_wait_rom_busy(ha);
895 if (qla82xx_wait_rom_done(ha)) { 895 if (qla82xx_wait_rom_done(ha)) {
896 qla_printk(KERN_WARNING, ha, 896 qla_printk(KERN_WARNING, ha,
897 "%s: Error waiting for rom done\n", 897 "%s: Error waiting for rom done\n",
898 QLA2XXX_DRIVER_NAME); 898 QLA2XXX_DRIVER_NAME);
899 return -1; 899 return -1;
900 } 900 }
901 /* Reset abyte_cnt and dummy_byte_cnt */ 901 /* Reset abyte_cnt and dummy_byte_cnt */
902 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 902 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
903 udelay(10); 903 udelay(10);
904 cond_resched(); 904 cond_resched();
905 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 905 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
906 *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); 906 *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
907 return 0; 907 return 0;
908 } 908 }
909 909
910 static int 910 static int
911 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 911 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
912 { 912 {
913 int ret, loops = 0; 913 int ret, loops = 0;
914 914
915 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 915 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
916 udelay(100); 916 udelay(100);
917 schedule(); 917 schedule();
918 loops++; 918 loops++;
919 } 919 }
920 if (loops >= 50000) { 920 if (loops >= 50000) {
921 qla_printk(KERN_INFO, ha, 921 qla_printk(KERN_INFO, ha,
922 "%s: qla82xx_rom_lock failed\n", 922 "%s: qla82xx_rom_lock failed\n",
923 QLA2XXX_DRIVER_NAME); 923 QLA2XXX_DRIVER_NAME);
924 return -1; 924 return -1;
925 } 925 }
926 ret = qla82xx_do_rom_fast_read(ha, addr, valp); 926 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
927 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 927 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
928 return ret; 928 return ret;
929 } 929 }
930 930
931 static int 931 static int
932 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) 932 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
933 { 933 {
934 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); 934 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
935 qla82xx_wait_rom_busy(ha); 935 qla82xx_wait_rom_busy(ha);
936 if (qla82xx_wait_rom_done(ha)) { 936 if (qla82xx_wait_rom_done(ha)) {
937 qla_printk(KERN_WARNING, ha, 937 qla_printk(KERN_WARNING, ha,
938 "Error waiting for rom done\n"); 938 "Error waiting for rom done\n");
939 return -1; 939 return -1;
940 } 940 }
941 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); 941 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
942 return 0; 942 return 0;
943 } 943 }
944 944
945 static int 945 static int
946 qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) 946 qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
947 { 947 {
948 long timeout = 0; 948 long timeout = 0;
949 uint32_t done = 1 ; 949 uint32_t done = 1 ;
950 uint32_t val; 950 uint32_t val;
951 int ret = 0; 951 int ret = 0;
952 952
953 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 953 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
954 while ((done != 0) && (ret == 0)) { 954 while ((done != 0) && (ret == 0)) {
955 ret = qla82xx_read_status_reg(ha, &val); 955 ret = qla82xx_read_status_reg(ha, &val);
956 done = val & 1; 956 done = val & 1;
957 timeout++; 957 timeout++;
958 udelay(10); 958 udelay(10);
959 cond_resched(); 959 cond_resched();
960 if (timeout >= 50000) { 960 if (timeout >= 50000) {
961 qla_printk(KERN_WARNING, ha, 961 qla_printk(KERN_WARNING, ha,
962 "Timeout reached waiting for write finish"); 962 "Timeout reached waiting for write finish");
963 return -1; 963 return -1;
964 } 964 }
965 } 965 }
966 return ret; 966 return ret;
967 } 967 }
968 968
969 static int 969 static int
970 qla82xx_flash_set_write_enable(struct qla_hw_data *ha) 970 qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
971 { 971 {
972 uint32_t val; 972 uint32_t val;
973 qla82xx_wait_rom_busy(ha); 973 qla82xx_wait_rom_busy(ha);
974 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 974 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
975 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN); 975 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
976 qla82xx_wait_rom_busy(ha); 976 qla82xx_wait_rom_busy(ha);
977 if (qla82xx_wait_rom_done(ha)) 977 if (qla82xx_wait_rom_done(ha))
978 return -1; 978 return -1;
979 if (qla82xx_read_status_reg(ha, &val) != 0) 979 if (qla82xx_read_status_reg(ha, &val) != 0)
980 return -1; 980 return -1;
981 if ((val & 2) != 2) 981 if ((val & 2) != 2)
982 return -1; 982 return -1;
983 return 0; 983 return 0;
984 } 984 }
985 985
986 static int 986 static int
987 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) 987 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
988 { 988 {
989 if (qla82xx_flash_set_write_enable(ha)) 989 if (qla82xx_flash_set_write_enable(ha))
990 return -1; 990 return -1;
991 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); 991 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
992 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); 992 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
993 if (qla82xx_wait_rom_done(ha)) { 993 if (qla82xx_wait_rom_done(ha)) {
994 qla_printk(KERN_WARNING, ha, 994 qla_printk(KERN_WARNING, ha,
995 "Error waiting for rom done\n"); 995 "Error waiting for rom done\n");
996 return -1; 996 return -1;
997 } 997 }
998 return qla82xx_flash_wait_write_finish(ha); 998 return qla82xx_flash_wait_write_finish(ha);
999 } 999 }
1000 1000
1001 static int 1001 static int
1002 qla82xx_write_disable_flash(struct qla_hw_data *ha) 1002 qla82xx_write_disable_flash(struct qla_hw_data *ha)
1003 { 1003 {
1004 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); 1004 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1005 if (qla82xx_wait_rom_done(ha)) { 1005 if (qla82xx_wait_rom_done(ha)) {
1006 qla_printk(KERN_WARNING, ha, 1006 qla_printk(KERN_WARNING, ha,
1007 "Error waiting for rom done\n"); 1007 "Error waiting for rom done\n");
1008 return -1; 1008 return -1;
1009 } 1009 }
1010 return 0; 1010 return 0;
1011 } 1011 }
1012 1012
1013 static int 1013 static int
1014 ql82xx_rom_lock_d(struct qla_hw_data *ha) 1014 ql82xx_rom_lock_d(struct qla_hw_data *ha)
1015 { 1015 {
1016 int loops = 0; 1016 int loops = 0;
1017 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 1017 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1018 udelay(100); 1018 udelay(100);
1019 cond_resched(); 1019 cond_resched();
1020 loops++; 1020 loops++;
1021 } 1021 }
1022 if (loops >= 50000) { 1022 if (loops >= 50000) {
1023 qla_printk(KERN_WARNING, ha, "ROM lock failed\n"); 1023 qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
1024 return -1; 1024 return -1;
1025 } 1025 }
1026 return 0;; 1026 return 0;;
1027 } 1027 }
1028 1028
1029 static int 1029 static int
1030 qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, 1030 qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1031 uint32_t data) 1031 uint32_t data)
1032 { 1032 {
1033 int ret = 0; 1033 int ret = 0;
1034 1034
1035 ret = ql82xx_rom_lock_d(ha); 1035 ret = ql82xx_rom_lock_d(ha);
1036 if (ret < 0) { 1036 if (ret < 0) {
1037 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 1037 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
1038 return ret; 1038 return ret;
1039 } 1039 }
1040 1040
1041 if (qla82xx_flash_set_write_enable(ha)) 1041 if (qla82xx_flash_set_write_enable(ha))
1042 goto done_write; 1042 goto done_write;
1043 1043
1044 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); 1044 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
1045 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr); 1045 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1046 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 1046 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1047 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); 1047 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1048 qla82xx_wait_rom_busy(ha); 1048 qla82xx_wait_rom_busy(ha);
1049 if (qla82xx_wait_rom_done(ha)) { 1049 if (qla82xx_wait_rom_done(ha)) {
1050 qla_printk(KERN_WARNING, ha, 1050 qla_printk(KERN_WARNING, ha,
1051 "Error waiting for rom done\n"); 1051 "Error waiting for rom done\n");
1052 ret = -1; 1052 ret = -1;
1053 goto done_write; 1053 goto done_write;
1054 } 1054 }
1055 1055
1056 ret = qla82xx_flash_wait_write_finish(ha); 1056 ret = qla82xx_flash_wait_write_finish(ha);
1057 1057
1058 done_write: 1058 done_write:
1059 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 1059 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1060 return ret; 1060 return ret;
1061 } 1061 }
1062 1062
1063 /* This routine does CRB initialize sequence 1063 /* This routine does CRB initialize sequence
1064 * to put the ISP into operational state 1064 * to put the ISP into operational state
1065 */ 1065 */
1066 static int 1066 static int
1067 qla82xx_pinit_from_rom(scsi_qla_host_t *vha) 1067 qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1068 { 1068 {
1069 int addr, val; 1069 int addr, val;
1070 int i ; 1070 int i ;
1071 struct crb_addr_pair *buf; 1071 struct crb_addr_pair *buf;
1072 unsigned long off; 1072 unsigned long off;
1073 unsigned offset, n; 1073 unsigned offset, n;
1074 struct qla_hw_data *ha = vha->hw; 1074 struct qla_hw_data *ha = vha->hw;
1075 1075
1076 struct crb_addr_pair { 1076 struct crb_addr_pair {
1077 long addr; 1077 long addr;
1078 long data; 1078 long data;
1079 }; 1079 };
1080 1080
1081 /* Halt all the indiviual PEGs and other blocks of the ISP */ 1081 /* Halt all the indiviual PEGs and other blocks of the ISP */
1082 qla82xx_rom_lock(ha); 1082 qla82xx_rom_lock(ha);
1083 1083
1084 /* disable all I2Q */ 1084 /* disable all I2Q */
1085 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); 1085 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
1086 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); 1086 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
1087 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); 1087 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
1088 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); 1088 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
1089 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); 1089 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
1090 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); 1090 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
1091 1091
1092 /* disable all niu interrupts */ 1092 /* disable all niu interrupts */
1093 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); 1093 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1094 /* disable xge rx/tx */ 1094 /* disable xge rx/tx */
1095 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); 1095 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1096 /* disable xg1 rx/tx */ 1096 /* disable xg1 rx/tx */
1097 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); 1097 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1098 /* disable sideband mac */ 1098 /* disable sideband mac */
1099 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); 1099 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
1100 /* disable ap0 mac */ 1100 /* disable ap0 mac */
1101 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); 1101 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
1102 /* disable ap1 mac */ 1102 /* disable ap1 mac */
1103 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); 1103 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
1104 1104
1105 /* halt sre */ 1105 /* halt sre */
1106 val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); 1106 val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1107 qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); 1107 qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1108 1108
1109 /* halt epg */ 1109 /* halt epg */
1110 qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); 1110 qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1111 1111
1112 /* halt timers */ 1112 /* halt timers */
1113 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); 1113 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1114 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); 1114 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1115 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); 1115 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1116 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); 1116 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1117 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); 1117 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1118 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); 1118 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1119 1119
1120 /* halt pegs */ 1120 /* halt pegs */
1121 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); 1121 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1122 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); 1122 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1123 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); 1123 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1124 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); 1124 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1125 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); 1125 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1126 msleep(20); 1126 msleep(20);
1127 1127
1128 /* big hammer */ 1128 /* big hammer */
1129 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 1129 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1130 /* don't reset CAM block on reset */ 1130 /* don't reset CAM block on reset */
1131 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 1131 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1132 else 1132 else
1133 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); 1133 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1134 1134
1135 /* reset ms */ 1135 /* reset ms */
1136 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4); 1136 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1137 val |= (1 << 1); 1137 val |= (1 << 1);
1138 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); 1138 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1139 msleep(20); 1139 msleep(20);
1140 1140
1141 /* unreset ms */ 1141 /* unreset ms */
1142 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4); 1142 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1143 val &= ~(1 << 1); 1143 val &= ~(1 << 1);
1144 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); 1144 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1145 msleep(20); 1145 msleep(20);
1146 1146
1147 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 1147 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1148 1148
1149 /* Read the signature value from the flash. 1149 /* Read the signature value from the flash.
1150 * Offset 0: Contain signature (0xcafecafe) 1150 * Offset 0: Contain signature (0xcafecafe)
1151 * Offset 4: Offset and number of addr/value pairs 1151 * Offset 4: Offset and number of addr/value pairs
1152 * that present in CRB initialize sequence 1152 * that present in CRB initialize sequence
1153 */ 1153 */
1154 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || 1154 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1155 qla82xx_rom_fast_read(ha, 4, &n) != 0) { 1155 qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1156 qla_printk(KERN_WARNING, ha, 1156 qla_printk(KERN_WARNING, ha,
1157 "[ERROR] Reading crb_init area: n: %08x\n", n); 1157 "[ERROR] Reading crb_init area: n: %08x\n", n);
1158 return -1; 1158 return -1;
1159 } 1159 }
1160 1160
1161 /* Offset in flash = lower 16 bits 1161 /* Offset in flash = lower 16 bits
1162 * Number of enteries = upper 16 bits 1162 * Number of enteries = upper 16 bits
1163 */ 1163 */
1164 offset = n & 0xffffU; 1164 offset = n & 0xffffU;
1165 n = (n >> 16) & 0xffffU; 1165 n = (n >> 16) & 0xffffU;
1166 1166
1167 /* number of addr/value pair should not exceed 1024 enteries */ 1167 /* number of addr/value pair should not exceed 1024 enteries */
1168 if (n >= 1024) { 1168 if (n >= 1024) {
1169 qla_printk(KERN_WARNING, ha, 1169 qla_printk(KERN_WARNING, ha,
1170 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n", 1170 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
1171 QLA2XXX_DRIVER_NAME, __func__, n); 1171 QLA2XXX_DRIVER_NAME, __func__, n);
1172 return -1; 1172 return -1;
1173 } 1173 }
1174 1174
1175 qla_printk(KERN_INFO, ha, 1175 qla_printk(KERN_INFO, ha,
1176 "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n); 1176 "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
1177 1177
1178 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); 1178 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1179 if (buf == NULL) { 1179 if (buf == NULL) {
1180 qla_printk(KERN_WARNING, ha, 1180 qla_printk(KERN_WARNING, ha,
1181 "%s: [ERROR] Unable to malloc memory.\n", 1181 "%s: [ERROR] Unable to malloc memory.\n",
1182 QLA2XXX_DRIVER_NAME); 1182 QLA2XXX_DRIVER_NAME);
1183 return -1; 1183 return -1;
1184 } 1184 }
1185 1185
1186 for (i = 0; i < n; i++) { 1186 for (i = 0; i < n; i++) {
1187 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || 1187 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1188 qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) { 1188 qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1189 kfree(buf); 1189 kfree(buf);
1190 return -1; 1190 return -1;
1191 } 1191 }
1192 1192
1193 buf[i].addr = addr; 1193 buf[i].addr = addr;
1194 buf[i].data = val; 1194 buf[i].data = val;
1195 } 1195 }
1196 1196
1197 for (i = 0; i < n; i++) { 1197 for (i = 0; i < n; i++) {
1198 /* Translate internal CRB initialization 1198 /* Translate internal CRB initialization
1199 * address to PCI bus address 1199 * address to PCI bus address
1200 */ 1200 */
1201 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) + 1201 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1202 QLA82XX_PCI_CRBSPACE; 1202 QLA82XX_PCI_CRBSPACE;
1203 /* Not all CRB addr/value pair to be written, 1203 /* Not all CRB addr/value pair to be written,
1204 * some of them are skipped 1204 * some of them are skipped
1205 */ 1205 */
1206 1206
1207 /* skipping cold reboot MAGIC */ 1207 /* skipping cold reboot MAGIC */
1208 if (off == QLA82XX_CAM_RAM(0x1fc)) 1208 if (off == QLA82XX_CAM_RAM(0x1fc))
1209 continue; 1209 continue;
1210 1210
1211 /* do not reset PCI */ 1211 /* do not reset PCI */
1212 if (off == (ROMUSB_GLB + 0xbc)) 1212 if (off == (ROMUSB_GLB + 0xbc))
1213 continue; 1213 continue;
1214 1214
1215 /* skip core clock, so that firmware can increase the clock */ 1215 /* skip core clock, so that firmware can increase the clock */
1216 if (off == (ROMUSB_GLB + 0xc8)) 1216 if (off == (ROMUSB_GLB + 0xc8))
1217 continue; 1217 continue;
1218 1218
1219 /* skip the function enable register */ 1219 /* skip the function enable register */
1220 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION)) 1220 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1221 continue; 1221 continue;
1222 1222
1223 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2)) 1223 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1224 continue; 1224 continue;
1225 1225
1226 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB) 1226 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1227 continue; 1227 continue;
1228 1228
1229 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET) 1229 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1230 continue; 1230 continue;
1231 1231
1232 if (off == ADDR_ERROR) { 1232 if (off == ADDR_ERROR) {
1233 qla_printk(KERN_WARNING, ha, 1233 qla_printk(KERN_WARNING, ha,
1234 "%s: [ERROR] Unknown addr: 0x%08lx\n", 1234 "%s: [ERROR] Unknown addr: 0x%08lx\n",
1235 QLA2XXX_DRIVER_NAME, buf[i].addr); 1235 QLA2XXX_DRIVER_NAME, buf[i].addr);
1236 continue; 1236 continue;
1237 } 1237 }
1238 1238
1239 qla82xx_wr_32(ha, off, buf[i].data); 1239 qla82xx_wr_32(ha, off, buf[i].data);
1240 1240
1241 /* ISP requires much bigger delay to settle down, 1241 /* ISP requires much bigger delay to settle down,
1242 * else crb_window returns 0xffffffff 1242 * else crb_window returns 0xffffffff
1243 */ 1243 */
1244 if (off == QLA82XX_ROMUSB_GLB_SW_RESET) 1244 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1245 msleep(1000); 1245 msleep(1000);
1246 1246
1247 /* ISP requires millisec delay between 1247 /* ISP requires millisec delay between
1248 * successive CRB register updation 1248 * successive CRB register updation
1249 */ 1249 */
1250 msleep(1); 1250 msleep(1);
1251 } 1251 }
1252 1252
1253 kfree(buf); 1253 kfree(buf);
1254 1254
1255 /* Resetting the data and instruction cache */ 1255 /* Resetting the data and instruction cache */
1256 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); 1256 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1257 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); 1257 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1258 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); 1258 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1259 1259
1260 /* Clear all protocol processing engines */ 1260 /* Clear all protocol processing engines */
1261 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); 1261 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1262 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); 1262 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1263 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); 1263 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1264 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); 1264 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1265 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); 1265 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1266 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); 1266 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1267 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); 1267 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1268 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); 1268 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1269 return 0; 1269 return 0;
1270 } 1270 }
1271 1271
1272 static int 1272 static int
1273 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, 1273 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1274 u64 off, void *data, int size) 1274 u64 off, void *data, int size)
1275 { 1275 {
1276 int i, j, ret = 0, loop, sz[2], off0; 1276 int i, j, ret = 0, loop, sz[2], off0;
1277 int scale, shift_amount, startword; 1277 int scale, shift_amount, startword;
1278 uint32_t temp; 1278 uint32_t temp;
1279 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; 1279 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1280 1280
1281 /* 1281 /*
1282 * If not MN, go check for MS or invalid. 1282 * If not MN, go check for MS or invalid.
1283 */ 1283 */
1284 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) 1284 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1285 mem_crb = QLA82XX_CRB_QDR_NET; 1285 mem_crb = QLA82XX_CRB_QDR_NET;
1286 else { 1286 else {
1287 mem_crb = QLA82XX_CRB_DDR_NET; 1287 mem_crb = QLA82XX_CRB_DDR_NET;
1288 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) 1288 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1289 return qla82xx_pci_mem_write_direct(ha, 1289 return qla82xx_pci_mem_write_direct(ha,
1290 off, data, size); 1290 off, data, size);
1291 } 1291 }
1292 1292
1293 off0 = off & 0x7; 1293 off0 = off & 0x7;
1294 sz[0] = (size < (8 - off0)) ? size : (8 - off0); 1294 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1295 sz[1] = size - sz[0]; 1295 sz[1] = size - sz[0];
1296 1296
1297 off8 = off & 0xfffffff0; 1297 off8 = off & 0xfffffff0;
1298 loop = (((off & 0xf) + size - 1) >> 4) + 1; 1298 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1299 shift_amount = 4; 1299 shift_amount = 4;
1300 scale = 2; 1300 scale = 2;
1301 startword = (off & 0xf)/8; 1301 startword = (off & 0xf)/8;
1302 1302
1303 for (i = 0; i < loop; i++) { 1303 for (i = 0; i < loop; i++) {
1304 if (qla82xx_pci_mem_read_2M(ha, off8 + 1304 if (qla82xx_pci_mem_read_2M(ha, off8 +
1305 (i << shift_amount), &word[i * scale], 8)) 1305 (i << shift_amount), &word[i * scale], 8))
1306 return -1; 1306 return -1;
1307 } 1307 }
1308 1308
1309 switch (size) { 1309 switch (size) {
1310 case 1: 1310 case 1:
1311 tmpw = *((uint8_t *)data); 1311 tmpw = *((uint8_t *)data);
1312 break; 1312 break;
1313 case 2: 1313 case 2:
1314 tmpw = *((uint16_t *)data); 1314 tmpw = *((uint16_t *)data);
1315 break; 1315 break;
1316 case 4: 1316 case 4:
1317 tmpw = *((uint32_t *)data); 1317 tmpw = *((uint32_t *)data);
1318 break; 1318 break;
1319 case 8: 1319 case 8:
1320 default: 1320 default:
1321 tmpw = *((uint64_t *)data); 1321 tmpw = *((uint64_t *)data);
1322 break; 1322 break;
1323 } 1323 }
1324 1324
1325 if (sz[0] == 8) { 1325 if (sz[0] == 8) {
1326 word[startword] = tmpw; 1326 word[startword] = tmpw;
1327 } else { 1327 } else {
1328 word[startword] &= 1328 word[startword] &=
1329 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); 1329 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1330 word[startword] |= tmpw << (off0 * 8); 1330 word[startword] |= tmpw << (off0 * 8);
1331 } 1331 }
1332 if (sz[1] != 0) { 1332 if (sz[1] != 0) {
1333 word[startword+1] &= ~(~0ULL << (sz[1] * 8)); 1333 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1334 word[startword+1] |= tmpw >> (sz[0] * 8); 1334 word[startword+1] |= tmpw >> (sz[0] * 8);
1335 } 1335 }
1336 1336
1337 for (i = 0; i < loop; i++) { 1337 for (i = 0; i < loop; i++) {
1338 temp = off8 + (i << shift_amount); 1338 temp = off8 + (i << shift_amount);
1339 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); 1339 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1340 temp = 0; 1340 temp = 0;
1341 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); 1341 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1342 temp = word[i * scale] & 0xffffffff; 1342 temp = word[i * scale] & 0xffffffff;
1343 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); 1343 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1344 temp = (word[i * scale] >> 32) & 0xffffffff; 1344 temp = (word[i * scale] >> 32) & 0xffffffff;
1345 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); 1345 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1346 temp = word[i*scale + 1] & 0xffffffff; 1346 temp = word[i*scale + 1] & 0xffffffff;
1347 qla82xx_wr_32(ha, mem_crb + 1347 qla82xx_wr_32(ha, mem_crb +
1348 MIU_TEST_AGT_WRDATA_UPPER_LO, temp); 1348 MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1349 temp = (word[i*scale + 1] >> 32) & 0xffffffff; 1349 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1350 qla82xx_wr_32(ha, mem_crb + 1350 qla82xx_wr_32(ha, mem_crb +
1351 MIU_TEST_AGT_WRDATA_UPPER_HI, temp); 1351 MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1352 1352
1353 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1353 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1354 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1354 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1355 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1355 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1356 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1356 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1357 1357
1358 for (j = 0; j < MAX_CTL_CHECK; j++) { 1358 for (j = 0; j < MAX_CTL_CHECK; j++) {
1359 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); 1359 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1360 if ((temp & MIU_TA_CTL_BUSY) == 0) 1360 if ((temp & MIU_TA_CTL_BUSY) == 0)
1361 break; 1361 break;
1362 } 1362 }
1363 1363
1364 if (j >= MAX_CTL_CHECK) { 1364 if (j >= MAX_CTL_CHECK) {
1365 if (printk_ratelimit()) 1365 if (printk_ratelimit())
1366 dev_err(&ha->pdev->dev, 1366 dev_err(&ha->pdev->dev,
1367 "failed to write through agent\n"); 1367 "failed to write through agent\n");
1368 ret = -1; 1368 ret = -1;
1369 break; 1369 break;
1370 } 1370 }
1371 } 1371 }
1372 1372
1373 return ret; 1373 return ret;
1374 } 1374 }
1375 1375
1376 static int 1376 static int
1377 qla82xx_fw_load_from_flash(struct qla_hw_data *ha) 1377 qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1378 { 1378 {
1379 int i; 1379 int i;
1380 long size = 0; 1380 long size = 0;
1381 long flashaddr = ha->flt_region_bootload << 2; 1381 long flashaddr = ha->flt_region_bootload << 2;
1382 long memaddr = BOOTLD_START; 1382 long memaddr = BOOTLD_START;
1383 u64 data; 1383 u64 data;
1384 u32 high, low; 1384 u32 high, low;
1385 size = (IMAGE_START - BOOTLD_START) / 8; 1385 size = (IMAGE_START - BOOTLD_START) / 8;
1386 1386
1387 for (i = 0; i < size; i++) { 1387 for (i = 0; i < size; i++) {
1388 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) || 1388 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1389 (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) { 1389 (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1390 return -1; 1390 return -1;
1391 } 1391 }
1392 data = ((u64)high << 32) | low ; 1392 data = ((u64)high << 32) | low ;
1393 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8); 1393 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1394 flashaddr += 8; 1394 flashaddr += 8;
1395 memaddr += 8; 1395 memaddr += 8;
1396 1396
1397 if (i % 0x1000 == 0) 1397 if (i % 0x1000 == 0)
1398 msleep(1); 1398 msleep(1);
1399 } 1399 }
1400 udelay(100); 1400 udelay(100);
1401 read_lock(&ha->hw_lock); 1401 read_lock(&ha->hw_lock);
1402 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); 1402 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1403 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); 1403 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1404 read_unlock(&ha->hw_lock); 1404 read_unlock(&ha->hw_lock);
1405 return 0; 1405 return 0;
1406 } 1406 }
1407 1407
1408 int 1408 int
1409 qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, 1409 qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1410 u64 off, void *data, int size) 1410 u64 off, void *data, int size)
1411 { 1411 {
1412 int i, j = 0, k, start, end, loop, sz[2], off0[2]; 1412 int i, j = 0, k, start, end, loop, sz[2], off0[2];
1413 int shift_amount; 1413 int shift_amount;
1414 uint32_t temp; 1414 uint32_t temp;
1415 uint64_t off8, val, mem_crb, word[2] = {0, 0}; 1415 uint64_t off8, val, mem_crb, word[2] = {0, 0};
1416 1416
1417 /* 1417 /*
1418 * If not MN, go check for MS or invalid. 1418 * If not MN, go check for MS or invalid.
1419 */ 1419 */
1420 1420
1421 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) 1421 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1422 mem_crb = QLA82XX_CRB_QDR_NET; 1422 mem_crb = QLA82XX_CRB_QDR_NET;
1423 else { 1423 else {
1424 mem_crb = QLA82XX_CRB_DDR_NET; 1424 mem_crb = QLA82XX_CRB_DDR_NET;
1425 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) 1425 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1426 return qla82xx_pci_mem_read_direct(ha, 1426 return qla82xx_pci_mem_read_direct(ha,
1427 off, data, size); 1427 off, data, size);
1428 } 1428 }
1429 1429
1430 off8 = off & 0xfffffff0; 1430 off8 = off & 0xfffffff0;
1431 off0[0] = off & 0xf; 1431 off0[0] = off & 0xf;
1432 sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); 1432 sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1433 shift_amount = 4; 1433 shift_amount = 4;
1434 loop = ((off0[0] + size - 1) >> shift_amount) + 1; 1434 loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1435 off0[1] = 0; 1435 off0[1] = 0;
1436 sz[1] = size - sz[0]; 1436 sz[1] = size - sz[0];
1437 1437
1438 for (i = 0; i < loop; i++) { 1438 for (i = 0; i < loop; i++) {
1439 temp = off8 + (i << shift_amount); 1439 temp = off8 + (i << shift_amount);
1440 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); 1440 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1441 temp = 0; 1441 temp = 0;
1442 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); 1442 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1443 temp = MIU_TA_CTL_ENABLE; 1443 temp = MIU_TA_CTL_ENABLE;
1444 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1444 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1445 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; 1445 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1446 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1446 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1447 1447
1448 for (j = 0; j < MAX_CTL_CHECK; j++) { 1448 for (j = 0; j < MAX_CTL_CHECK; j++) {
1449 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); 1449 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1450 if ((temp & MIU_TA_CTL_BUSY) == 0) 1450 if ((temp & MIU_TA_CTL_BUSY) == 0)
1451 break; 1451 break;
1452 } 1452 }
1453 1453
1454 if (j >= MAX_CTL_CHECK) { 1454 if (j >= MAX_CTL_CHECK) {
1455 if (printk_ratelimit()) 1455 if (printk_ratelimit())
1456 dev_err(&ha->pdev->dev, 1456 dev_err(&ha->pdev->dev,
1457 "failed to read through agent\n"); 1457 "failed to read through agent\n");
1458 break; 1458 break;
1459 } 1459 }
1460 1460
1461 start = off0[i] >> 2; 1461 start = off0[i] >> 2;
1462 end = (off0[i] + sz[i] - 1) >> 2; 1462 end = (off0[i] + sz[i] - 1) >> 2;
1463 for (k = start; k <= end; k++) { 1463 for (k = start; k <= end; k++) {
1464 temp = qla82xx_rd_32(ha, 1464 temp = qla82xx_rd_32(ha,
1465 mem_crb + MIU_TEST_AGT_RDDATA(k)); 1465 mem_crb + MIU_TEST_AGT_RDDATA(k));
1466 word[i] |= ((uint64_t)temp << (32 * (k & 1))); 1466 word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1467 } 1467 }
1468 } 1468 }
1469 1469
1470 if (j >= MAX_CTL_CHECK) 1470 if (j >= MAX_CTL_CHECK)
1471 return -1; 1471 return -1;
1472 1472
1473 if ((off0[0] & 7) == 0) { 1473 if ((off0[0] & 7) == 0) {
1474 val = word[0]; 1474 val = word[0];
1475 } else { 1475 } else {
1476 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | 1476 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1477 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); 1477 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1478 } 1478 }
1479 1479
1480 switch (size) { 1480 switch (size) {
1481 case 1: 1481 case 1:
1482 *(uint8_t *)data = val; 1482 *(uint8_t *)data = val;
1483 break; 1483 break;
1484 case 2: 1484 case 2:
1485 *(uint16_t *)data = val; 1485 *(uint16_t *)data = val;
1486 break; 1486 break;
1487 case 4: 1487 case 4:
1488 *(uint32_t *)data = val; 1488 *(uint32_t *)data = val;
1489 break; 1489 break;
1490 case 8: 1490 case 8:
1491 *(uint64_t *)data = val; 1491 *(uint64_t *)data = val;
1492 break; 1492 break;
1493 } 1493 }
1494 return 0; 1494 return 0;
1495 } 1495 }
1496 1496
1497 1497
1498 static struct qla82xx_uri_table_desc * 1498 static struct qla82xx_uri_table_desc *
1499 qla82xx_get_table_desc(const u8 *unirom, int section) 1499 qla82xx_get_table_desc(const u8 *unirom, int section)
1500 { 1500 {
1501 uint32_t i; 1501 uint32_t i;
1502 struct qla82xx_uri_table_desc *directory = 1502 struct qla82xx_uri_table_desc *directory =
1503 (struct qla82xx_uri_table_desc *)&unirom[0]; 1503 (struct qla82xx_uri_table_desc *)&unirom[0];
1504 __le32 offset; 1504 __le32 offset;
1505 __le32 tab_type; 1505 __le32 tab_type;
1506 __le32 entries = cpu_to_le32(directory->num_entries); 1506 __le32 entries = cpu_to_le32(directory->num_entries);
1507 1507
1508 for (i = 0; i < entries; i++) { 1508 for (i = 0; i < entries; i++) {
1509 offset = cpu_to_le32(directory->findex) + 1509 offset = cpu_to_le32(directory->findex) +
1510 (i * cpu_to_le32(directory->entry_size)); 1510 (i * cpu_to_le32(directory->entry_size));
1511 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8)); 1511 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1512 1512
1513 if (tab_type == section) 1513 if (tab_type == section)
1514 return (struct qla82xx_uri_table_desc *)&unirom[offset]; 1514 return (struct qla82xx_uri_table_desc *)&unirom[offset];
1515 } 1515 }
1516 1516
1517 return NULL; 1517 return NULL;
1518 } 1518 }
1519 1519
1520 static struct qla82xx_uri_data_desc * 1520 static struct qla82xx_uri_data_desc *
1521 qla82xx_get_data_desc(struct qla_hw_data *ha, 1521 qla82xx_get_data_desc(struct qla_hw_data *ha,
1522 u32 section, u32 idx_offset) 1522 u32 section, u32 idx_offset)
1523 { 1523 {
1524 const u8 *unirom = ha->hablob->fw->data; 1524 const u8 *unirom = ha->hablob->fw->data;
1525 int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset)); 1525 int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1526 struct qla82xx_uri_table_desc *tab_desc = NULL; 1526 struct qla82xx_uri_table_desc *tab_desc = NULL;
1527 __le32 offset; 1527 __le32 offset;
1528 1528
1529 tab_desc = qla82xx_get_table_desc(unirom, section); 1529 tab_desc = qla82xx_get_table_desc(unirom, section);
1530 if (!tab_desc) 1530 if (!tab_desc)
1531 return NULL; 1531 return NULL;
1532 1532
1533 offset = cpu_to_le32(tab_desc->findex) + 1533 offset = cpu_to_le32(tab_desc->findex) +
1534 (cpu_to_le32(tab_desc->entry_size) * idx); 1534 (cpu_to_le32(tab_desc->entry_size) * idx);
1535 1535
1536 return (struct qla82xx_uri_data_desc *)&unirom[offset]; 1536 return (struct qla82xx_uri_data_desc *)&unirom[offset];
1537 } 1537 }
1538 1538
1539 static u8 * 1539 static u8 *
1540 qla82xx_get_bootld_offset(struct qla_hw_data *ha) 1540 qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1541 { 1541 {
1542 u32 offset = BOOTLD_START; 1542 u32 offset = BOOTLD_START;
1543 struct qla82xx_uri_data_desc *uri_desc = NULL; 1543 struct qla82xx_uri_data_desc *uri_desc = NULL;
1544 1544
1545 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { 1545 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1546 uri_desc = qla82xx_get_data_desc(ha, 1546 uri_desc = qla82xx_get_data_desc(ha,
1547 QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF); 1547 QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
1548 if (uri_desc) 1548 if (uri_desc)
1549 offset = cpu_to_le32(uri_desc->findex); 1549 offset = cpu_to_le32(uri_desc->findex);
1550 } 1550 }
1551 1551
1552 return (u8 *)&ha->hablob->fw->data[offset]; 1552 return (u8 *)&ha->hablob->fw->data[offset];
1553 } 1553 }
1554 1554
1555 static __le32 1555 static __le32
1556 qla82xx_get_fw_size(struct qla_hw_data *ha) 1556 qla82xx_get_fw_size(struct qla_hw_data *ha)
1557 { 1557 {
1558 struct qla82xx_uri_data_desc *uri_desc = NULL; 1558 struct qla82xx_uri_data_desc *uri_desc = NULL;
1559 1559
1560 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { 1560 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1561 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, 1561 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1562 QLA82XX_URI_FIRMWARE_IDX_OFF); 1562 QLA82XX_URI_FIRMWARE_IDX_OFF);
1563 if (uri_desc) 1563 if (uri_desc)
1564 return cpu_to_le32(uri_desc->size); 1564 return cpu_to_le32(uri_desc->size);
1565 } 1565 }
1566 1566
1567 return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); 1567 return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1568 } 1568 }
1569 1569
1570 static u8 * 1570 static u8 *
1571 qla82xx_get_fw_offs(struct qla_hw_data *ha) 1571 qla82xx_get_fw_offs(struct qla_hw_data *ha)
1572 { 1572 {
1573 u32 offset = IMAGE_START; 1573 u32 offset = IMAGE_START;
1574 struct qla82xx_uri_data_desc *uri_desc = NULL; 1574 struct qla82xx_uri_data_desc *uri_desc = NULL;
1575 1575
1576 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { 1576 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1577 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, 1577 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1578 QLA82XX_URI_FIRMWARE_IDX_OFF); 1578 QLA82XX_URI_FIRMWARE_IDX_OFF);
1579 if (uri_desc) 1579 if (uri_desc)
1580 offset = cpu_to_le32(uri_desc->findex); 1580 offset = cpu_to_le32(uri_desc->findex);
1581 } 1581 }
1582 1582
1583 return (u8 *)&ha->hablob->fw->data[offset]; 1583 return (u8 *)&ha->hablob->fw->data[offset];
1584 } 1584 }
1585 1585
1586 /* PCI related functions */ 1586 /* PCI related functions */
1587 char * 1587 char *
1588 qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str) 1588 qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1589 { 1589 {
1590 int pcie_reg; 1590 int pcie_reg;
1591 struct qla_hw_data *ha = vha->hw; 1591 struct qla_hw_data *ha = vha->hw;
1592 char lwstr[6]; 1592 char lwstr[6];
1593 uint16_t lnk; 1593 uint16_t lnk;
1594 1594
1595 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 1595 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1596 pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk); 1596 pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1597 ha->link_width = (lnk >> 4) & 0x3f; 1597 ha->link_width = (lnk >> 4) & 0x3f;
1598 1598
1599 strcpy(str, "PCIe ("); 1599 strcpy(str, "PCIe (");
1600 strcat(str, "2.5Gb/s "); 1600 strcat(str, "2.5Gb/s ");
1601 snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width); 1601 snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1602 strcat(str, lwstr); 1602 strcat(str, lwstr);
1603 return str; 1603 return str;
1604 } 1604 }
1605 1605
1606 int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) 1606 int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1607 { 1607 {
1608 unsigned long val = 0; 1608 unsigned long val = 0;
1609 u32 control; 1609 u32 control;
1610 1610
1611 switch (region) { 1611 switch (region) {
1612 case 0: 1612 case 0:
1613 val = 0; 1613 val = 0;
1614 break; 1614 break;
1615 case 1: 1615 case 1:
1616 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control); 1616 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1617 val = control + QLA82XX_MSIX_TBL_SPACE; 1617 val = control + QLA82XX_MSIX_TBL_SPACE;
1618 break; 1618 break;
1619 } 1619 }
1620 return val; 1620 return val;
1621 } 1621 }
1622 1622
1623 1623
1624 int 1624 int
1625 qla82xx_iospace_config(struct qla_hw_data *ha) 1625 qla82xx_iospace_config(struct qla_hw_data *ha)
1626 { 1626 {
1627 uint32_t len = 0; 1627 uint32_t len = 0;
1628 1628
1629 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { 1629 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1630 qla_printk(KERN_WARNING, ha, 1630 qla_printk(KERN_WARNING, ha,
1631 "Failed to reserve selected regions (%s)\n", 1631 "Failed to reserve selected regions (%s)\n",
1632 pci_name(ha->pdev)); 1632 pci_name(ha->pdev));
1633 goto iospace_error_exit; 1633 goto iospace_error_exit;
1634 } 1634 }
1635 1635
1636 /* Use MMIO operations for all accesses. */ 1636 /* Use MMIO operations for all accesses. */
1637 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 1637 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1638 qla_printk(KERN_ERR, ha, 1638 qla_printk(KERN_ERR, ha,
1639 "region #0 not an MMIO resource (%s), aborting\n", 1639 "region #0 not an MMIO resource (%s), aborting\n",
1640 pci_name(ha->pdev)); 1640 pci_name(ha->pdev));
1641 goto iospace_error_exit; 1641 goto iospace_error_exit;
1642 } 1642 }
1643 1643
1644 len = pci_resource_len(ha->pdev, 0); 1644 len = pci_resource_len(ha->pdev, 0);
1645 ha->nx_pcibase = 1645 ha->nx_pcibase =
1646 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); 1646 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1647 if (!ha->nx_pcibase) { 1647 if (!ha->nx_pcibase) {
1648 qla_printk(KERN_ERR, ha, 1648 qla_printk(KERN_ERR, ha,
1649 "cannot remap pcibase MMIO (%s), aborting\n", 1649 "cannot remap pcibase MMIO (%s), aborting\n",
1650 pci_name(ha->pdev)); 1650 pci_name(ha->pdev));
1651 pci_release_regions(ha->pdev); 1651 pci_release_regions(ha->pdev);
1652 goto iospace_error_exit; 1652 goto iospace_error_exit;
1653 } 1653 }
1654 1654
1655 /* Mapping of IO base pointer */ 1655 /* Mapping of IO base pointer */
1656 ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + 1656 ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1657 0xbc000 + (ha->pdev->devfn << 11)); 1657 0xbc000 + (ha->pdev->devfn << 11));
1658 1658
1659 if (!ql2xdbwr) { 1659 if (!ql2xdbwr) {
1660 ha->nxdb_wr_ptr = 1660 ha->nxdb_wr_ptr =
1661 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + 1661 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1662 (ha->pdev->devfn << 12)), 4); 1662 (ha->pdev->devfn << 12)), 4);
1663 if (!ha->nxdb_wr_ptr) { 1663 if (!ha->nxdb_wr_ptr) {
1664 qla_printk(KERN_ERR, ha, 1664 qla_printk(KERN_ERR, ha,
1665 "cannot remap MMIO (%s), aborting\n", 1665 "cannot remap MMIO (%s), aborting\n",
1666 pci_name(ha->pdev)); 1666 pci_name(ha->pdev));
1667 pci_release_regions(ha->pdev); 1667 pci_release_regions(ha->pdev);
1668 goto iospace_error_exit; 1668 goto iospace_error_exit;
1669 } 1669 }
1670 1670
1671 /* Mapping of IO base pointer, 1671 /* Mapping of IO base pointer,
1672 * door bell read and write pointer 1672 * door bell read and write pointer
1673 */ 1673 */
1674 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) + 1674 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1675 (ha->pdev->devfn * 8); 1675 (ha->pdev->devfn * 8);
1676 } else { 1676 } else {
1677 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ? 1677 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1678 QLA82XX_CAMRAM_DB1 : 1678 QLA82XX_CAMRAM_DB1 :
1679 QLA82XX_CAMRAM_DB2); 1679 QLA82XX_CAMRAM_DB2);
1680 } 1680 }
1681 1681
1682 ha->max_req_queues = ha->max_rsp_queues = 1; 1682 ha->max_req_queues = ha->max_rsp_queues = 1;
1683 ha->msix_count = ha->max_rsp_queues + 1; 1683 ha->msix_count = ha->max_rsp_queues + 1;
1684 return 0; 1684 return 0;
1685 1685
1686 iospace_error_exit: 1686 iospace_error_exit:
1687 return -ENOMEM; 1687 return -ENOMEM;
1688 } 1688 }
1689 1689
1690 /* GS related functions */ 1690 /* GS related functions */
1691 1691
1692 /* Initialization related functions */ 1692 /* Initialization related functions */
1693 1693
1694 /** 1694 /**
1695 * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers. 1695 * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
1696 * @ha: HA context 1696 * @ha: HA context
1697 * 1697 *
1698 * Returns 0 on success. 1698 * Returns 0 on success.
1699 */ 1699 */
1700 int 1700 int
1701 qla82xx_pci_config(scsi_qla_host_t *vha) 1701 qla82xx_pci_config(scsi_qla_host_t *vha)
1702 { 1702 {
1703 struct qla_hw_data *ha = vha->hw; 1703 struct qla_hw_data *ha = vha->hw;
1704 int ret; 1704 int ret;
1705 1705
1706 pci_set_master(ha->pdev); 1706 pci_set_master(ha->pdev);
1707 ret = pci_set_mwi(ha->pdev); 1707 ret = pci_set_mwi(ha->pdev);
1708 ha->chip_revision = ha->pdev->revision; 1708 ha->chip_revision = ha->pdev->revision;
1709 return 0; 1709 return 0;
1710 } 1710 }
1711 1711
1712 /** 1712 /**
1713 * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers. 1713 * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
1714 * @ha: HA context 1714 * @ha: HA context
1715 * 1715 *
1716 * Returns 0 on success. 1716 * Returns 0 on success.
1717 */ 1717 */
1718 void 1718 void
1719 qla82xx_reset_chip(scsi_qla_host_t *vha) 1719 qla82xx_reset_chip(scsi_qla_host_t *vha)
1720 { 1720 {
1721 struct qla_hw_data *ha = vha->hw; 1721 struct qla_hw_data *ha = vha->hw;
1722 ha->isp_ops->disable_intrs(ha); 1722 ha->isp_ops->disable_intrs(ha);
1723 } 1723 }
1724 1724
1725 void qla82xx_config_rings(struct scsi_qla_host *vha) 1725 void qla82xx_config_rings(struct scsi_qla_host *vha)
1726 { 1726 {
1727 struct qla_hw_data *ha = vha->hw; 1727 struct qla_hw_data *ha = vha->hw;
1728 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 1728 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1729 struct init_cb_81xx *icb; 1729 struct init_cb_81xx *icb;
1730 struct req_que *req = ha->req_q_map[0]; 1730 struct req_que *req = ha->req_q_map[0];
1731 struct rsp_que *rsp = ha->rsp_q_map[0]; 1731 struct rsp_que *rsp = ha->rsp_q_map[0];
1732 1732
1733 /* Setup ring parameters in initialization control block. */ 1733 /* Setup ring parameters in initialization control block. */
1734 icb = (struct init_cb_81xx *)ha->init_cb; 1734 icb = (struct init_cb_81xx *)ha->init_cb;
1735 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1735 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1736 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1736 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1737 icb->request_q_length = cpu_to_le16(req->length); 1737 icb->request_q_length = cpu_to_le16(req->length);
1738 icb->response_q_length = cpu_to_le16(rsp->length); 1738 icb->response_q_length = cpu_to_le16(rsp->length);
1739 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1739 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1740 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 1740 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1741 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1741 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1742 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1742 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1743 1743
1744 WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0); 1744 WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
1745 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0); 1745 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
1746 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0); 1746 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
1747 } 1747 }
1748 1748
1749 void qla82xx_reset_adapter(struct scsi_qla_host *vha) 1749 void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1750 { 1750 {
1751 struct qla_hw_data *ha = vha->hw; 1751 struct qla_hw_data *ha = vha->hw;
1752 vha->flags.online = 0; 1752 vha->flags.online = 0;
1753 qla2x00_try_to_stop_firmware(vha); 1753 qla2x00_try_to_stop_firmware(vha);
1754 ha->isp_ops->disable_intrs(ha); 1754 ha->isp_ops->disable_intrs(ha);
1755 } 1755 }
1756 1756
1757 static int 1757 static int
1758 qla82xx_fw_load_from_blob(struct qla_hw_data *ha) 1758 qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1759 { 1759 {
1760 u64 *ptr64; 1760 u64 *ptr64;
1761 u32 i, flashaddr, size; 1761 u32 i, flashaddr, size;
1762 __le64 data; 1762 __le64 data;
1763 1763
1764 size = (IMAGE_START - BOOTLD_START) / 8; 1764 size = (IMAGE_START - BOOTLD_START) / 8;
1765 1765
1766 ptr64 = (u64 *)qla82xx_get_bootld_offset(ha); 1766 ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
1767 flashaddr = BOOTLD_START; 1767 flashaddr = BOOTLD_START;
1768 1768
1769 for (i = 0; i < size; i++) { 1769 for (i = 0; i < size; i++) {
1770 data = cpu_to_le64(ptr64[i]); 1770 data = cpu_to_le64(ptr64[i]);
1771 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) 1771 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1772 return -EIO; 1772 return -EIO;
1773 flashaddr += 8; 1773 flashaddr += 8;
1774 } 1774 }
1775 1775
1776 flashaddr = FLASH_ADDR_START; 1776 flashaddr = FLASH_ADDR_START;
1777 size = (__force u32)qla82xx_get_fw_size(ha) / 8; 1777 size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1778 ptr64 = (u64 *)qla82xx_get_fw_offs(ha); 1778 ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
1779 1779
1780 for (i = 0; i < size; i++) { 1780 for (i = 0; i < size; i++) {
1781 data = cpu_to_le64(ptr64[i]); 1781 data = cpu_to_le64(ptr64[i]);
1782 1782
1783 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) 1783 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1784 return -EIO; 1784 return -EIO;
1785 flashaddr += 8; 1785 flashaddr += 8;
1786 } 1786 }
1787 udelay(100); 1787 udelay(100);
1788 1788
1789 /* Write a magic value to CAMRAM register 1789 /* Write a magic value to CAMRAM register
1790 * at a specified offset to indicate 1790 * at a specified offset to indicate
1791 * that all data is written and 1791 * that all data is written and
1792 * ready for firmware to initialize. 1792 * ready for firmware to initialize.
1793 */ 1793 */
1794 qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC); 1794 qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
1795 1795
1796 read_lock(&ha->hw_lock); 1796 read_lock(&ha->hw_lock);
1797 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); 1797 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1798 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); 1798 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1799 read_unlock(&ha->hw_lock); 1799 read_unlock(&ha->hw_lock);
1800 return 0; 1800 return 0;
1801 } 1801 }
1802 1802
1803 static int 1803 static int
1804 qla82xx_set_product_offset(struct qla_hw_data *ha) 1804 qla82xx_set_product_offset(struct qla_hw_data *ha)
1805 { 1805 {
1806 struct qla82xx_uri_table_desc *ptab_desc = NULL; 1806 struct qla82xx_uri_table_desc *ptab_desc = NULL;
1807 const uint8_t *unirom = ha->hablob->fw->data; 1807 const uint8_t *unirom = ha->hablob->fw->data;
1808 uint32_t i; 1808 uint32_t i;
1809 __le32 entries; 1809 __le32 entries;
1810 __le32 flags, file_chiprev, offset; 1810 __le32 flags, file_chiprev, offset;
1811 uint8_t chiprev = ha->chip_revision; 1811 uint8_t chiprev = ha->chip_revision;
1812 /* Hardcoding mn_present flag for P3P */ 1812 /* Hardcoding mn_present flag for P3P */
1813 int mn_present = 0; 1813 int mn_present = 0;
1814 uint32_t flagbit; 1814 uint32_t flagbit;
1815 1815
1816 ptab_desc = qla82xx_get_table_desc(unirom, 1816 ptab_desc = qla82xx_get_table_desc(unirom,
1817 QLA82XX_URI_DIR_SECT_PRODUCT_TBL); 1817 QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1818 if (!ptab_desc) 1818 if (!ptab_desc)
1819 return -1; 1819 return -1;
1820 1820
1821 entries = cpu_to_le32(ptab_desc->num_entries); 1821 entries = cpu_to_le32(ptab_desc->num_entries);
1822 1822
1823 for (i = 0; i < entries; i++) { 1823 for (i = 0; i < entries; i++) {
1824 offset = cpu_to_le32(ptab_desc->findex) + 1824 offset = cpu_to_le32(ptab_desc->findex) +
1825 (i * cpu_to_le32(ptab_desc->entry_size)); 1825 (i * cpu_to_le32(ptab_desc->entry_size));
1826 flags = cpu_to_le32(*((int *)&unirom[offset] + 1826 flags = cpu_to_le32(*((int *)&unirom[offset] +
1827 QLA82XX_URI_FLAGS_OFF)); 1827 QLA82XX_URI_FLAGS_OFF));
1828 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] + 1828 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1829 QLA82XX_URI_CHIP_REV_OFF)); 1829 QLA82XX_URI_CHIP_REV_OFF));
1830 1830
1831 flagbit = mn_present ? 1 : 2; 1831 flagbit = mn_present ? 1 : 2;
1832 1832
1833 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { 1833 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1834 ha->file_prd_off = offset; 1834 ha->file_prd_off = offset;
1835 return 0; 1835 return 0;
1836 } 1836 }
1837 } 1837 }
1838 return -1; 1838 return -1;
1839 } 1839 }
1840 1840
1841 int 1841 int
1842 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) 1842 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1843 { 1843 {
1844 __le32 val; 1844 __le32 val;
1845 uint32_t min_size; 1845 uint32_t min_size;
1846 struct qla_hw_data *ha = vha->hw; 1846 struct qla_hw_data *ha = vha->hw;
1847 const struct firmware *fw = ha->hablob->fw; 1847 const struct firmware *fw = ha->hablob->fw;
1848 1848
1849 ha->fw_type = fw_type; 1849 ha->fw_type = fw_type;
1850 1850
1851 if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) { 1851 if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1852 if (qla82xx_set_product_offset(ha)) 1852 if (qla82xx_set_product_offset(ha))
1853 return -EINVAL; 1853 return -EINVAL;
1854 1854
1855 min_size = QLA82XX_URI_FW_MIN_SIZE; 1855 min_size = QLA82XX_URI_FW_MIN_SIZE;
1856 } else { 1856 } else {
1857 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]); 1857 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1858 if ((__force u32)val != QLA82XX_BDINFO_MAGIC) 1858 if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1859 return -EINVAL; 1859 return -EINVAL;
1860 1860
1861 min_size = QLA82XX_FW_MIN_SIZE; 1861 min_size = QLA82XX_FW_MIN_SIZE;
1862 } 1862 }
1863 1863
1864 if (fw->size < min_size) 1864 if (fw->size < min_size)
1865 return -EINVAL; 1865 return -EINVAL;
1866 return 0; 1866 return 0;
1867 } 1867 }
1868 1868
1869 static int 1869 static int
1870 qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) 1870 qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1871 { 1871 {
1872 u32 val = 0; 1872 u32 val = 0;
1873 int retries = 60; 1873 int retries = 60;
1874 1874
1875 do { 1875 do {
1876 read_lock(&ha->hw_lock); 1876 read_lock(&ha->hw_lock);
1877 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE); 1877 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1878 read_unlock(&ha->hw_lock); 1878 read_unlock(&ha->hw_lock);
1879 1879
1880 switch (val) { 1880 switch (val) {
1881 case PHAN_INITIALIZE_COMPLETE: 1881 case PHAN_INITIALIZE_COMPLETE:
1882 case PHAN_INITIALIZE_ACK: 1882 case PHAN_INITIALIZE_ACK:
1883 return QLA_SUCCESS; 1883 return QLA_SUCCESS;
1884 case PHAN_INITIALIZE_FAILED: 1884 case PHAN_INITIALIZE_FAILED:
1885 break; 1885 break;
1886 default: 1886 default:
1887 break; 1887 break;
1888 } 1888 }
1889 qla_printk(KERN_WARNING, ha, 1889 qla_printk(KERN_WARNING, ha,
1890 "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n", 1890 "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
1891 val, retries); 1891 val, retries);
1892 1892
1893 msleep(500); 1893 msleep(500);
1894 1894
1895 } while (--retries); 1895 } while (--retries);
1896 1896
1897 qla_printk(KERN_INFO, ha, 1897 qla_printk(KERN_INFO, ha,
1898 "Cmd Peg initialization failed: 0x%x.\n", val); 1898 "Cmd Peg initialization failed: 0x%x.\n", val);
1899 1899
1900 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); 1900 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1901 read_lock(&ha->hw_lock); 1901 read_lock(&ha->hw_lock);
1902 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); 1902 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1903 read_unlock(&ha->hw_lock); 1903 read_unlock(&ha->hw_lock);
1904 return QLA_FUNCTION_FAILED; 1904 return QLA_FUNCTION_FAILED;
1905 } 1905 }
1906 1906
1907 static int 1907 static int
1908 qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) 1908 qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1909 { 1909 {
1910 u32 val = 0; 1910 u32 val = 0;
1911 int retries = 60; 1911 int retries = 60;
1912 1912
1913 do { 1913 do {
1914 read_lock(&ha->hw_lock); 1914 read_lock(&ha->hw_lock);
1915 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE); 1915 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1916 read_unlock(&ha->hw_lock); 1916 read_unlock(&ha->hw_lock);
1917 1917
1918 switch (val) { 1918 switch (val) {
1919 case PHAN_INITIALIZE_COMPLETE: 1919 case PHAN_INITIALIZE_COMPLETE:
1920 case PHAN_INITIALIZE_ACK: 1920 case PHAN_INITIALIZE_ACK:
1921 return QLA_SUCCESS; 1921 return QLA_SUCCESS;
1922 case PHAN_INITIALIZE_FAILED: 1922 case PHAN_INITIALIZE_FAILED:
1923 break; 1923 break;
1924 default: 1924 default:
1925 break; 1925 break;
1926 } 1926 }
1927 1927
1928 qla_printk(KERN_WARNING, ha, 1928 qla_printk(KERN_WARNING, ha,
1929 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n", 1929 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
1930 val, retries); 1930 val, retries);
1931 1931
1932 msleep(500); 1932 msleep(500);
1933 1933
1934 } while (--retries); 1934 } while (--retries);
1935 1935
1936 qla_printk(KERN_INFO, ha, 1936 qla_printk(KERN_INFO, ha,
1937 "Rcv Peg initialization failed: 0x%x.\n", val); 1937 "Rcv Peg initialization failed: 0x%x.\n", val);
1938 read_lock(&ha->hw_lock); 1938 read_lock(&ha->hw_lock);
1939 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); 1939 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1940 read_unlock(&ha->hw_lock); 1940 read_unlock(&ha->hw_lock);
1941 return QLA_FUNCTION_FAILED; 1941 return QLA_FUNCTION_FAILED;
1942 } 1942 }
1943 1943
1944 /* ISR related functions */ 1944 /* ISR related functions */
1945 uint32_t qla82xx_isr_int_target_mask_enable[8] = { 1945 uint32_t qla82xx_isr_int_target_mask_enable[8] = {
1946 ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1, 1946 ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
1947 ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3, 1947 ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
1948 ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5, 1948 ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
1949 ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7 1949 ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1950 }; 1950 };
1951 1951
1952 uint32_t qla82xx_isr_int_target_status[8] = { 1952 uint32_t qla82xx_isr_int_target_status[8] = {
1953 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, 1953 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
1954 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, 1954 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
1955 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, 1955 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
1956 ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7 1956 ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
1957 }; 1957 };
1958 1958
1959 static struct qla82xx_legacy_intr_set legacy_intr[] = \ 1959 static struct qla82xx_legacy_intr_set legacy_intr[] = \
1960 QLA82XX_LEGACY_INTR_CONFIG; 1960 QLA82XX_LEGACY_INTR_CONFIG;
1961 1961
1962 /* 1962 /*
1963 * qla82xx_mbx_completion() - Process mailbox command completions. 1963 * qla82xx_mbx_completion() - Process mailbox command completions.
1964 * @ha: SCSI driver HA context 1964 * @ha: SCSI driver HA context
1965 * @mb0: Mailbox0 register 1965 * @mb0: Mailbox0 register
1966 */ 1966 */
1967 static void 1967 static void
1968 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1968 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1969 { 1969 {
1970 uint16_t cnt; 1970 uint16_t cnt;
1971 uint16_t __iomem *wptr; 1971 uint16_t __iomem *wptr;
1972 struct qla_hw_data *ha = vha->hw; 1972 struct qla_hw_data *ha = vha->hw;
1973 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 1973 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1974 wptr = (uint16_t __iomem *)&reg->mailbox_out[1]; 1974 wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
1975 1975
1976 /* Load return mailbox registers. */ 1976 /* Load return mailbox registers. */
1977 ha->flags.mbox_int = 1; 1977 ha->flags.mbox_int = 1;
1978 ha->mailbox_out[0] = mb0; 1978 ha->mailbox_out[0] = mb0;
1979 1979
1980 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 1980 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1981 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 1981 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1982 wptr++; 1982 wptr++;
1983 } 1983 }
1984 1984
1985 if (ha->mcp) { 1985 if (ha->mcp) {
1986 DEBUG3_11(printk(KERN_INFO "%s(%ld): " 1986 DEBUG3_11(printk(KERN_INFO "%s(%ld): "
1987 "Got mailbox completion. cmd=%x.\n", 1987 "Got mailbox completion. cmd=%x.\n",
1988 __func__, vha->host_no, ha->mcp->mb[0])); 1988 __func__, vha->host_no, ha->mcp->mb[0]));
1989 } else { 1989 } else {
1990 qla_printk(KERN_INFO, ha, 1990 qla_printk(KERN_INFO, ha,
1991 "%s(%ld): MBX pointer ERROR!\n", 1991 "%s(%ld): MBX pointer ERROR!\n",
1992 __func__, vha->host_no); 1992 __func__, vha->host_no);
1993 } 1993 }
1994 } 1994 }
1995 1995
1996 /* 1996 /*
1997 * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 1997 * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1998 * @irq: 1998 * @irq:
1999 * @dev_id: SCSI driver HA context 1999 * @dev_id: SCSI driver HA context
2000 * @regs: 2000 * @regs:
2001 * 2001 *
2002 * Called by system whenever the host adapter generates an interrupt. 2002 * Called by system whenever the host adapter generates an interrupt.
2003 * 2003 *
2004 * Returns handled flag. 2004 * Returns handled flag.
2005 */ 2005 */
2006 irqreturn_t 2006 irqreturn_t
2007 qla82xx_intr_handler(int irq, void *dev_id) 2007 qla82xx_intr_handler(int irq, void *dev_id)
2008 { 2008 {
2009 scsi_qla_host_t *vha; 2009 scsi_qla_host_t *vha;
2010 struct qla_hw_data *ha; 2010 struct qla_hw_data *ha;
2011 struct rsp_que *rsp; 2011 struct rsp_que *rsp;
2012 struct device_reg_82xx __iomem *reg; 2012 struct device_reg_82xx __iomem *reg;
2013 int status = 0, status1 = 0; 2013 int status = 0, status1 = 0;
2014 unsigned long flags; 2014 unsigned long flags;
2015 unsigned long iter; 2015 unsigned long iter;
2016 uint32_t stat; 2016 uint32_t stat;
2017 uint16_t mb[4]; 2017 uint16_t mb[4];
2018 2018
2019 rsp = (struct rsp_que *) dev_id; 2019 rsp = (struct rsp_que *) dev_id;
2020 if (!rsp) { 2020 if (!rsp) {
2021 printk(KERN_INFO 2021 printk(KERN_INFO
2022 "%s(): NULL response queue pointer\n", __func__); 2022 "%s(): NULL response queue pointer\n", __func__);
2023 return IRQ_NONE; 2023 return IRQ_NONE;
2024 } 2024 }
2025 ha = rsp->hw; 2025 ha = rsp->hw;
2026 2026
2027 if (!ha->flags.msi_enabled) { 2027 if (!ha->flags.msi_enabled) {
2028 status = qla82xx_rd_32(ha, ISR_INT_VECTOR); 2028 status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2029 if (!(status & ha->nx_legacy_intr.int_vec_bit)) 2029 if (!(status & ha->nx_legacy_intr.int_vec_bit))
2030 return IRQ_NONE; 2030 return IRQ_NONE;
2031 2031
2032 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG); 2032 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
2033 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1)) 2033 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
2034 return IRQ_NONE; 2034 return IRQ_NONE;
2035 } 2035 }
2036 2036
2037 /* clear the interrupt */ 2037 /* clear the interrupt */
2038 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); 2038 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2039 2039
2040 /* read twice to ensure write is flushed */ 2040 /* read twice to ensure write is flushed */
2041 qla82xx_rd_32(ha, ISR_INT_VECTOR); 2041 qla82xx_rd_32(ha, ISR_INT_VECTOR);
2042 qla82xx_rd_32(ha, ISR_INT_VECTOR); 2042 qla82xx_rd_32(ha, ISR_INT_VECTOR);
2043 2043
2044 reg = &ha->iobase->isp82; 2044 reg = &ha->iobase->isp82;
2045 2045
2046 spin_lock_irqsave(&ha->hardware_lock, flags); 2046 spin_lock_irqsave(&ha->hardware_lock, flags);
2047 vha = pci_get_drvdata(ha->pdev); 2047 vha = pci_get_drvdata(ha->pdev);
2048 for (iter = 1; iter--; ) { 2048 for (iter = 1; iter--; ) {
2049 2049
2050 if (RD_REG_DWORD(&reg->host_int)) { 2050 if (RD_REG_DWORD(&reg->host_int)) {
2051 stat = RD_REG_DWORD(&reg->host_status); 2051 stat = RD_REG_DWORD(&reg->host_status);
2052 2052
2053 switch (stat & 0xff) { 2053 switch (stat & 0xff) {
2054 case 0x1: 2054 case 0x1:
2055 case 0x2: 2055 case 0x2:
2056 case 0x10: 2056 case 0x10:
2057 case 0x11: 2057 case 0x11:
2058 qla82xx_mbx_completion(vha, MSW(stat)); 2058 qla82xx_mbx_completion(vha, MSW(stat));
2059 status |= MBX_INTERRUPT; 2059 status |= MBX_INTERRUPT;
2060 break; 2060 break;
2061 case 0x12: 2061 case 0x12:
2062 mb[0] = MSW(stat); 2062 mb[0] = MSW(stat);
2063 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); 2063 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2064 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); 2064 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2065 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); 2065 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2066 qla2x00_async_event(vha, rsp, mb); 2066 qla2x00_async_event(vha, rsp, mb);
2067 break; 2067 break;
2068 case 0x13: 2068 case 0x13:
2069 qla24xx_process_response_queue(vha, rsp); 2069 qla24xx_process_response_queue(vha, rsp);
2070 break; 2070 break;
2071 default: 2071 default:
2072 DEBUG2(printk("scsi(%ld): " 2072 DEBUG2(printk("scsi(%ld): "
2073 " Unrecognized interrupt type (%d).\n", 2073 " Unrecognized interrupt type (%d).\n",
2074 vha->host_no, stat & 0xff)); 2074 vha->host_no, stat & 0xff));
2075 break; 2075 break;
2076 } 2076 }
2077 } 2077 }
2078 WRT_REG_DWORD(&reg->host_int, 0); 2078 WRT_REG_DWORD(&reg->host_int, 0);
2079 } 2079 }
2080 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2080 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2081 if (!ha->flags.msi_enabled) 2081 if (!ha->flags.msi_enabled)
2082 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); 2082 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2083 2083
2084 #ifdef QL_DEBUG_LEVEL_17 2084 #ifdef QL_DEBUG_LEVEL_17
2085 if (!irq && ha->flags.eeh_busy) 2085 if (!irq && ha->flags.eeh_busy)
2086 qla_printk(KERN_WARNING, ha, 2086 qla_printk(KERN_WARNING, ha,
2087 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2087 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2088 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2088 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2089 #endif 2089 #endif
2090 2090
2091 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2091 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2092 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2092 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2093 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2093 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2094 complete(&ha->mbx_intr_comp); 2094 complete(&ha->mbx_intr_comp);
2095 } 2095 }
2096 return IRQ_HANDLED; 2096 return IRQ_HANDLED;
2097 } 2097 }
2098 2098
2099 irqreturn_t 2099 irqreturn_t
2100 qla82xx_msix_default(int irq, void *dev_id) 2100 qla82xx_msix_default(int irq, void *dev_id)
2101 { 2101 {
2102 scsi_qla_host_t *vha; 2102 scsi_qla_host_t *vha;
2103 struct qla_hw_data *ha; 2103 struct qla_hw_data *ha;
2104 struct rsp_que *rsp; 2104 struct rsp_que *rsp;
2105 struct device_reg_82xx __iomem *reg; 2105 struct device_reg_82xx __iomem *reg;
2106 int status = 0; 2106 int status = 0;
2107 unsigned long flags; 2107 unsigned long flags;
2108 uint32_t stat; 2108 uint32_t stat;
2109 uint16_t mb[4]; 2109 uint16_t mb[4];
2110 2110
2111 rsp = (struct rsp_que *) dev_id; 2111 rsp = (struct rsp_que *) dev_id;
2112 if (!rsp) { 2112 if (!rsp) {
2113 printk(KERN_INFO 2113 printk(KERN_INFO
2114 "%s(): NULL response queue pointer\n", __func__); 2114 "%s(): NULL response queue pointer\n", __func__);
2115 return IRQ_NONE; 2115 return IRQ_NONE;
2116 } 2116 }
2117 ha = rsp->hw; 2117 ha = rsp->hw;
2118 2118
2119 reg = &ha->iobase->isp82; 2119 reg = &ha->iobase->isp82;
2120 2120
2121 spin_lock_irqsave(&ha->hardware_lock, flags); 2121 spin_lock_irqsave(&ha->hardware_lock, flags);
2122 vha = pci_get_drvdata(ha->pdev); 2122 vha = pci_get_drvdata(ha->pdev);
2123 do { 2123 do {
2124 if (RD_REG_DWORD(&reg->host_int)) { 2124 if (RD_REG_DWORD(&reg->host_int)) {
2125 stat = RD_REG_DWORD(&reg->host_status); 2125 stat = RD_REG_DWORD(&reg->host_status);
2126 2126
2127 switch (stat & 0xff) { 2127 switch (stat & 0xff) {
2128 case 0x1: 2128 case 0x1:
2129 case 0x2: 2129 case 0x2:
2130 case 0x10: 2130 case 0x10:
2131 case 0x11: 2131 case 0x11:
2132 qla82xx_mbx_completion(vha, MSW(stat)); 2132 qla82xx_mbx_completion(vha, MSW(stat));
2133 status |= MBX_INTERRUPT; 2133 status |= MBX_INTERRUPT;
2134 break; 2134 break;
2135 case 0x12: 2135 case 0x12:
2136 mb[0] = MSW(stat); 2136 mb[0] = MSW(stat);
2137 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); 2137 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2138 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); 2138 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2139 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); 2139 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2140 qla2x00_async_event(vha, rsp, mb); 2140 qla2x00_async_event(vha, rsp, mb);
2141 break; 2141 break;
2142 case 0x13: 2142 case 0x13:
2143 qla24xx_process_response_queue(vha, rsp); 2143 qla24xx_process_response_queue(vha, rsp);
2144 break; 2144 break;
2145 default: 2145 default:
2146 DEBUG2(printk("scsi(%ld): " 2146 DEBUG2(printk("scsi(%ld): "
2147 " Unrecognized interrupt type (%d).\n", 2147 " Unrecognized interrupt type (%d).\n",
2148 vha->host_no, stat & 0xff)); 2148 vha->host_no, stat & 0xff));
2149 break; 2149 break;
2150 } 2150 }
2151 } 2151 }
2152 WRT_REG_DWORD(&reg->host_int, 0); 2152 WRT_REG_DWORD(&reg->host_int, 0);
2153 } while (0); 2153 } while (0);
2154 2154
2155 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2155 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2156 2156
2157 #ifdef QL_DEBUG_LEVEL_17 2157 #ifdef QL_DEBUG_LEVEL_17
2158 if (!irq && ha->flags.eeh_busy) 2158 if (!irq && ha->flags.eeh_busy)
2159 qla_printk(KERN_WARNING, ha, 2159 qla_printk(KERN_WARNING, ha,
2160 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2160 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2161 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2161 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2162 #endif 2162 #endif
2163 2163
2164 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2164 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2165 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2165 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2166 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2166 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2167 complete(&ha->mbx_intr_comp); 2167 complete(&ha->mbx_intr_comp);
2168 } 2168 }
2169 return IRQ_HANDLED; 2169 return IRQ_HANDLED;
2170 } 2170 }
2171 2171
2172 irqreturn_t 2172 irqreturn_t
2173 qla82xx_msix_rsp_q(int irq, void *dev_id) 2173 qla82xx_msix_rsp_q(int irq, void *dev_id)
2174 { 2174 {
2175 scsi_qla_host_t *vha; 2175 scsi_qla_host_t *vha;
2176 struct qla_hw_data *ha; 2176 struct qla_hw_data *ha;
2177 struct rsp_que *rsp; 2177 struct rsp_que *rsp;
2178 struct device_reg_82xx __iomem *reg; 2178 struct device_reg_82xx __iomem *reg;
2179 2179
2180 rsp = (struct rsp_que *) dev_id; 2180 rsp = (struct rsp_que *) dev_id;
2181 if (!rsp) { 2181 if (!rsp) {
2182 printk(KERN_INFO 2182 printk(KERN_INFO
2183 "%s(): NULL response queue pointer\n", __func__); 2183 "%s(): NULL response queue pointer\n", __func__);
2184 return IRQ_NONE; 2184 return IRQ_NONE;
2185 } 2185 }
2186 2186
2187 ha = rsp->hw; 2187 ha = rsp->hw;
2188 reg = &ha->iobase->isp82; 2188 reg = &ha->iobase->isp82;
2189 spin_lock_irq(&ha->hardware_lock); 2189 spin_lock_irq(&ha->hardware_lock);
2190 vha = pci_get_drvdata(ha->pdev); 2190 vha = pci_get_drvdata(ha->pdev);
2191 qla24xx_process_response_queue(vha, rsp); 2191 qla24xx_process_response_queue(vha, rsp);
2192 WRT_REG_DWORD(&reg->host_int, 0); 2192 WRT_REG_DWORD(&reg->host_int, 0);
2193 spin_unlock_irq(&ha->hardware_lock); 2193 spin_unlock_irq(&ha->hardware_lock);
2194 return IRQ_HANDLED; 2194 return IRQ_HANDLED;
2195 } 2195 }
2196 2196
2197 void 2197 void
2198 qla82xx_poll(int irq, void *dev_id) 2198 qla82xx_poll(int irq, void *dev_id)
2199 { 2199 {
2200 scsi_qla_host_t *vha; 2200 scsi_qla_host_t *vha;
2201 struct qla_hw_data *ha; 2201 struct qla_hw_data *ha;
2202 struct rsp_que *rsp; 2202 struct rsp_que *rsp;
2203 struct device_reg_82xx __iomem *reg; 2203 struct device_reg_82xx __iomem *reg;
2204 int status = 0; 2204 int status = 0;
2205 uint32_t stat; 2205 uint32_t stat;
2206 uint16_t mb[4]; 2206 uint16_t mb[4];
2207 unsigned long flags; 2207 unsigned long flags;
2208 2208
2209 rsp = (struct rsp_que *) dev_id; 2209 rsp = (struct rsp_que *) dev_id;
2210 if (!rsp) { 2210 if (!rsp) {
2211 printk(KERN_INFO 2211 printk(KERN_INFO
2212 "%s(): NULL response queue pointer\n", __func__); 2212 "%s(): NULL response queue pointer\n", __func__);
2213 return; 2213 return;
2214 } 2214 }
2215 ha = rsp->hw; 2215 ha = rsp->hw;
2216 2216
2217 reg = &ha->iobase->isp82; 2217 reg = &ha->iobase->isp82;
2218 spin_lock_irqsave(&ha->hardware_lock, flags); 2218 spin_lock_irqsave(&ha->hardware_lock, flags);
2219 vha = pci_get_drvdata(ha->pdev); 2219 vha = pci_get_drvdata(ha->pdev);
2220 2220
2221 if (RD_REG_DWORD(&reg->host_int)) { 2221 if (RD_REG_DWORD(&reg->host_int)) {
2222 stat = RD_REG_DWORD(&reg->host_status); 2222 stat = RD_REG_DWORD(&reg->host_status);
2223 switch (stat & 0xff) { 2223 switch (stat & 0xff) {
2224 case 0x1: 2224 case 0x1:
2225 case 0x2: 2225 case 0x2:
2226 case 0x10: 2226 case 0x10:
2227 case 0x11: 2227 case 0x11:
2228 qla82xx_mbx_completion(vha, MSW(stat)); 2228 qla82xx_mbx_completion(vha, MSW(stat));
2229 status |= MBX_INTERRUPT; 2229 status |= MBX_INTERRUPT;
2230 break; 2230 break;
2231 case 0x12: 2231 case 0x12:
2232 mb[0] = MSW(stat); 2232 mb[0] = MSW(stat);
2233 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); 2233 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2234 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); 2234 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2235 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); 2235 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2236 qla2x00_async_event(vha, rsp, mb); 2236 qla2x00_async_event(vha, rsp, mb);
2237 break; 2237 break;
2238 case 0x13: 2238 case 0x13:
2239 qla24xx_process_response_queue(vha, rsp); 2239 qla24xx_process_response_queue(vha, rsp);
2240 break; 2240 break;
2241 default: 2241 default:
2242 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2242 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2243 "(%d).\n", 2243 "(%d).\n",
2244 vha->host_no, stat & 0xff)); 2244 vha->host_no, stat & 0xff));
2245 break; 2245 break;
2246 } 2246 }
2247 } 2247 }
2248 WRT_REG_DWORD(&reg->host_int, 0); 2248 WRT_REG_DWORD(&reg->host_int, 0);
2249 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2249 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2250 } 2250 }
2251 2251
2252 void 2252 void
2253 qla82xx_enable_intrs(struct qla_hw_data *ha) 2253 qla82xx_enable_intrs(struct qla_hw_data *ha)
2254 { 2254 {
2255 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2255 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2256 qla82xx_mbx_intr_enable(vha); 2256 qla82xx_mbx_intr_enable(vha);
2257 spin_lock_irq(&ha->hardware_lock); 2257 spin_lock_irq(&ha->hardware_lock);
2258 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); 2258 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2259 spin_unlock_irq(&ha->hardware_lock); 2259 spin_unlock_irq(&ha->hardware_lock);
2260 ha->interrupts_on = 1; 2260 ha->interrupts_on = 1;
2261 } 2261 }
2262 2262
2263 void 2263 void
2264 qla82xx_disable_intrs(struct qla_hw_data *ha) 2264 qla82xx_disable_intrs(struct qla_hw_data *ha)
2265 { 2265 {
2266 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2266 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2267 qla82xx_mbx_intr_disable(vha); 2267 qla82xx_mbx_intr_disable(vha);
2268 spin_lock_irq(&ha->hardware_lock); 2268 spin_lock_irq(&ha->hardware_lock);
2269 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); 2269 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2270 spin_unlock_irq(&ha->hardware_lock); 2270 spin_unlock_irq(&ha->hardware_lock);
2271 ha->interrupts_on = 0; 2271 ha->interrupts_on = 0;
2272 } 2272 }
2273 2273
2274 void qla82xx_init_flags(struct qla_hw_data *ha) 2274 void qla82xx_init_flags(struct qla_hw_data *ha)
2275 { 2275 {
2276 struct qla82xx_legacy_intr_set *nx_legacy_intr; 2276 struct qla82xx_legacy_intr_set *nx_legacy_intr;
2277 2277
2278 /* ISP 8021 initializations */ 2278 /* ISP 8021 initializations */
2279 rwlock_init(&ha->hw_lock); 2279 rwlock_init(&ha->hw_lock);
2280 ha->qdr_sn_window = -1; 2280 ha->qdr_sn_window = -1;
2281 ha->ddr_mn_window = -1; 2281 ha->ddr_mn_window = -1;
2282 ha->curr_window = 255; 2282 ha->curr_window = 255;
2283 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2283 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2284 nx_legacy_intr = &legacy_intr[ha->portnum]; 2284 nx_legacy_intr = &legacy_intr[ha->portnum];
2285 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 2285 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2286 ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; 2286 ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2287 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 2287 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2288 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 2288 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2289 } 2289 }
2290 2290
2291 inline void 2291 inline void
2292 qla82xx_set_drv_active(scsi_qla_host_t *vha) 2292 qla82xx_set_drv_active(scsi_qla_host_t *vha)
2293 { 2293 {
2294 uint32_t drv_active; 2294 uint32_t drv_active;
2295 struct qla_hw_data *ha = vha->hw; 2295 struct qla_hw_data *ha = vha->hw;
2296 2296
2297 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2297 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2298 2298
2299 /* If reset value is all FF's, initialize DRV_ACTIVE */ 2299 /* If reset value is all FF's, initialize DRV_ACTIVE */
2300 if (drv_active == 0xffffffff) { 2300 if (drv_active == 0xffffffff) {
2301 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, 2301 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2302 QLA82XX_DRV_NOT_ACTIVE); 2302 QLA82XX_DRV_NOT_ACTIVE);
2303 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2303 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2304 } 2304 }
2305 drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); 2305 drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2306 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 2306 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2307 } 2307 }
2308 2308
2309 inline void 2309 inline void
2310 qla82xx_clear_drv_active(struct qla_hw_data *ha) 2310 qla82xx_clear_drv_active(struct qla_hw_data *ha)
2311 { 2311 {
2312 uint32_t drv_active; 2312 uint32_t drv_active;
2313 2313
2314 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2314 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2315 drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); 2315 drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2316 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 2316 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2317 } 2317 }
2318 2318
2319 static inline int 2319 static inline int
2320 qla82xx_need_reset(struct qla_hw_data *ha) 2320 qla82xx_need_reset(struct qla_hw_data *ha)
2321 { 2321 {
2322 uint32_t drv_state; 2322 uint32_t drv_state;
2323 int rval; 2323 int rval;
2324 2324
2325 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2325 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2326 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2326 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2327 return rval; 2327 return rval;
2328 } 2328 }
2329 2329
2330 static inline void 2330 static inline void
2331 qla82xx_set_rst_ready(struct qla_hw_data *ha) 2331 qla82xx_set_rst_ready(struct qla_hw_data *ha)
2332 { 2332 {
2333 uint32_t drv_state; 2333 uint32_t drv_state;
2334 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2334 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2335 2335
2336 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2336 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2337 2337
2338 /* If reset value is all FF's, initialize DRV_STATE */ 2338 /* If reset value is all FF's, initialize DRV_STATE */
2339 if (drv_state == 0xffffffff) { 2339 if (drv_state == 0xffffffff) {
2340 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY); 2340 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
2341 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2341 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2342 } 2342 }
2343 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2343 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2344 qla_printk(KERN_INFO, ha, 2344 qla_printk(KERN_INFO, ha,
2345 "%s(%ld):drv_state = 0x%x\n", 2345 "%s(%ld):drv_state = 0x%x\n",
2346 __func__, vha->host_no, drv_state); 2346 __func__, vha->host_no, drv_state);
2347 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2347 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2348 } 2348 }
2349 2349
2350 static inline void 2350 static inline void
2351 qla82xx_clear_rst_ready(struct qla_hw_data *ha) 2351 qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2352 { 2352 {
2353 uint32_t drv_state; 2353 uint32_t drv_state;
2354 2354
2355 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2355 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2356 drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2356 drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2357 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2357 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2358 } 2358 }
2359 2359
2360 static inline void 2360 static inline void
2361 qla82xx_set_qsnt_ready(struct qla_hw_data *ha) 2361 qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2362 { 2362 {
2363 uint32_t qsnt_state; 2363 uint32_t qsnt_state;
2364 2364
2365 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2365 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2366 qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); 2366 qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2367 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); 2367 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2368 } 2368 }
2369 2369
2370 void 2370 void
2371 qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha) 2371 qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
2372 { 2372 {
2373 struct qla_hw_data *ha = vha->hw; 2373 struct qla_hw_data *ha = vha->hw;
2374 uint32_t qsnt_state; 2374 uint32_t qsnt_state;
2375 2375
2376 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2376 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2377 qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); 2377 qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2378 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); 2378 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2379 } 2379 }
2380 2380
2381 static int 2381 static int
2382 qla82xx_load_fw(scsi_qla_host_t *vha) 2382 qla82xx_load_fw(scsi_qla_host_t *vha)
2383 { 2383 {
2384 int rst; 2384 int rst;
2385 struct fw_blob *blob; 2385 struct fw_blob *blob;
2386 struct qla_hw_data *ha = vha->hw; 2386 struct qla_hw_data *ha = vha->hw;
2387 2387
2388 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { 2388 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2389 qla_printk(KERN_ERR, ha, 2389 qla_printk(KERN_ERR, ha,
2390 "%s: Error during CRB Initialization\n", __func__); 2390 "%s: Error during CRB Initialization\n", __func__);
2391 return QLA_FUNCTION_FAILED; 2391 return QLA_FUNCTION_FAILED;
2392 } 2392 }
2393 udelay(500); 2393 udelay(500);
2394 2394
2395 /* Bring QM and CAMRAM out of reset */ 2395 /* Bring QM and CAMRAM out of reset */
2396 rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); 2396 rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
2397 rst &= ~((1 << 28) | (1 << 24)); 2397 rst &= ~((1 << 28) | (1 << 24));
2398 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); 2398 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
2399 2399
2400 /* 2400 /*
2401 * FW Load priority: 2401 * FW Load priority:
2402 * 1) Operational firmware residing in flash. 2402 * 1) Operational firmware residing in flash.
2403 * 2) Firmware via request-firmware interface (.bin file). 2403 * 2) Firmware via request-firmware interface (.bin file).
2404 */ 2404 */
2405 if (ql2xfwloadbin == 2) 2405 if (ql2xfwloadbin == 2)
2406 goto try_blob_fw; 2406 goto try_blob_fw;
2407 2407
2408 qla_printk(KERN_INFO, ha, 2408 qla_printk(KERN_INFO, ha,
2409 "Attempting to load firmware from flash\n"); 2409 "Attempting to load firmware from flash\n");
2410 2410
2411 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2411 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2412 qla_printk(KERN_ERR, ha, 2412 qla_printk(KERN_ERR, ha,
2413 "Firmware loaded successfully from flash\n"); 2413 "Firmware loaded successfully from flash\n");
2414 return QLA_SUCCESS; 2414 return QLA_SUCCESS;
2415 } 2415 }
2416 try_blob_fw: 2416 try_blob_fw:
2417 qla_printk(KERN_INFO, ha, 2417 qla_printk(KERN_INFO, ha,
2418 "Attempting to load firmware from blob\n"); 2418 "Attempting to load firmware from blob\n");
2419 2419
2420 /* Load firmware blob. */ 2420 /* Load firmware blob. */
2421 blob = ha->hablob = qla2x00_request_firmware(vha); 2421 blob = ha->hablob = qla2x00_request_firmware(vha);
2422 if (!blob) { 2422 if (!blob) {
2423 qla_printk(KERN_ERR, ha, 2423 qla_printk(KERN_ERR, ha,
2424 "Firmware image not present.\n"); 2424 "Firmware image not present.\n");
2425 goto fw_load_failed; 2425 goto fw_load_failed;
2426 } 2426 }
2427 2427
2428 /* Validating firmware blob */ 2428 /* Validating firmware blob */
2429 if (qla82xx_validate_firmware_blob(vha, 2429 if (qla82xx_validate_firmware_blob(vha,
2430 QLA82XX_FLASH_ROMIMAGE)) { 2430 QLA82XX_FLASH_ROMIMAGE)) {
2431 /* Fallback to URI format */ 2431 /* Fallback to URI format */
2432 if (qla82xx_validate_firmware_blob(vha, 2432 if (qla82xx_validate_firmware_blob(vha,
2433 QLA82XX_UNIFIED_ROMIMAGE)) { 2433 QLA82XX_UNIFIED_ROMIMAGE)) {
2434 qla_printk(KERN_ERR, ha, 2434 qla_printk(KERN_ERR, ha,
2435 "No valid firmware image found!!!"); 2435 "No valid firmware image found!!!");
2436 return QLA_FUNCTION_FAILED; 2436 return QLA_FUNCTION_FAILED;
2437 } 2437 }
2438 } 2438 }
2439 2439
2440 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { 2440 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2441 qla_printk(KERN_ERR, ha, 2441 qla_printk(KERN_ERR, ha,
2442 "%s: Firmware loaded successfully " 2442 "%s: Firmware loaded successfully "
2443 " from binary blob\n", __func__); 2443 " from binary blob\n", __func__);
2444 return QLA_SUCCESS; 2444 return QLA_SUCCESS;
2445 } else { 2445 } else {
2446 qla_printk(KERN_ERR, ha, 2446 qla_printk(KERN_ERR, ha,
2447 "Firmware load failed from binary blob\n"); 2447 "Firmware load failed from binary blob\n");
2448 blob->fw = NULL; 2448 blob->fw = NULL;
2449 blob = NULL; 2449 blob = NULL;
2450 goto fw_load_failed; 2450 goto fw_load_failed;
2451 } 2451 }
2452 return QLA_SUCCESS; 2452 return QLA_SUCCESS;
2453 2453
2454 fw_load_failed: 2454 fw_load_failed:
2455 return QLA_FUNCTION_FAILED; 2455 return QLA_FUNCTION_FAILED;
2456 } 2456 }
2457 2457
2458 int 2458 int
2459 qla82xx_start_firmware(scsi_qla_host_t *vha) 2459 qla82xx_start_firmware(scsi_qla_host_t *vha)
2460 { 2460 {
2461 int pcie_cap; 2461 int pcie_cap;
2462 uint16_t lnk; 2462 uint16_t lnk;
2463 struct qla_hw_data *ha = vha->hw; 2463 struct qla_hw_data *ha = vha->hw;
2464 2464
2465 /* scrub dma mask expansion register */ 2465 /* scrub dma mask expansion register */
2466 qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE); 2466 qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
2467 2467
2468 /* Put both the PEG CMD and RCV PEG to default state 2468 /* Put both the PEG CMD and RCV PEG to default state
2469 * of 0 before resetting the hardware 2469 * of 0 before resetting the hardware
2470 */ 2470 */
2471 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); 2471 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2472 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0); 2472 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2473 2473
2474 /* Overwrite stale initialization register values */ 2474 /* Overwrite stale initialization register values */
2475 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); 2475 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2476 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); 2476 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2477 2477
2478 if (qla82xx_load_fw(vha) != QLA_SUCCESS) { 2478 if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2479 qla_printk(KERN_INFO, ha, 2479 qla_printk(KERN_INFO, ha,
2480 "%s: Error trying to start fw!\n", __func__); 2480 "%s: Error trying to start fw!\n", __func__);
2481 return QLA_FUNCTION_FAILED; 2481 return QLA_FUNCTION_FAILED;
2482 } 2482 }
2483 2483
2484 /* Handshake with the card before we register the devices. */ 2484 /* Handshake with the card before we register the devices. */
2485 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { 2485 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2486 qla_printk(KERN_INFO, ha, 2486 qla_printk(KERN_INFO, ha,
2487 "%s: Error during card handshake!\n", __func__); 2487 "%s: Error during card handshake!\n", __func__);
2488 return QLA_FUNCTION_FAILED; 2488 return QLA_FUNCTION_FAILED;
2489 } 2489 }
2490 2490
2491 /* Negotiated Link width */ 2491 /* Negotiated Link width */
2492 pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 2492 pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
2493 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); 2493 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2494 ha->link_width = (lnk >> 4) & 0x3f; 2494 ha->link_width = (lnk >> 4) & 0x3f;
2495 2495
2496 /* Synchronize with Receive peg */ 2496 /* Synchronize with Receive peg */
2497 return qla82xx_check_rcvpeg_state(ha); 2497 return qla82xx_check_rcvpeg_state(ha);
2498 } 2498 }
2499 2499
2500 static inline int 2500 static inline int
2501 qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 2501 qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2502 uint16_t tot_dsds) 2502 uint16_t tot_dsds)
2503 { 2503 {
2504 uint32_t *cur_dsd = NULL; 2504 uint32_t *cur_dsd = NULL;
2505 scsi_qla_host_t *vha; 2505 scsi_qla_host_t *vha;
2506 struct qla_hw_data *ha; 2506 struct qla_hw_data *ha;
2507 struct scsi_cmnd *cmd; 2507 struct scsi_cmnd *cmd;
2508 struct scatterlist *cur_seg; 2508 struct scatterlist *cur_seg;
2509 uint32_t *dsd_seg; 2509 uint32_t *dsd_seg;
2510 void *next_dsd; 2510 void *next_dsd;
2511 uint8_t avail_dsds; 2511 uint8_t avail_dsds;
2512 uint8_t first_iocb = 1; 2512 uint8_t first_iocb = 1;
2513 uint32_t dsd_list_len; 2513 uint32_t dsd_list_len;
2514 struct dsd_dma *dsd_ptr; 2514 struct dsd_dma *dsd_ptr;
2515 struct ct6_dsd *ctx; 2515 struct ct6_dsd *ctx;
2516 2516
2517 cmd = sp->cmd; 2517 cmd = sp->cmd;
2518 2518
2519 /* Update entry type to indicate Command Type 3 IOCB */ 2519 /* Update entry type to indicate Command Type 3 IOCB */
2520 *((uint32_t *)(&cmd_pkt->entry_type)) = 2520 *((uint32_t *)(&cmd_pkt->entry_type)) =
2521 __constant_cpu_to_le32(COMMAND_TYPE_6); 2521 __constant_cpu_to_le32(COMMAND_TYPE_6);
2522 2522
2523 /* No data transfer */ 2523 /* No data transfer */
2524 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 2524 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2525 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 2525 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2526 return 0; 2526 return 0;
2527 } 2527 }
2528 2528
2529 vha = sp->fcport->vha; 2529 vha = sp->fcport->vha;
2530 ha = vha->hw; 2530 ha = vha->hw;
2531 2531
2532 /* Set transfer direction */ 2532 /* Set transfer direction */
2533 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 2533 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2534 cmd_pkt->control_flags = 2534 cmd_pkt->control_flags =
2535 __constant_cpu_to_le16(CF_WRITE_DATA); 2535 __constant_cpu_to_le16(CF_WRITE_DATA);
2536 ha->qla_stats.output_bytes += scsi_bufflen(cmd); 2536 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2537 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 2537 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2538 cmd_pkt->control_flags = 2538 cmd_pkt->control_flags =
2539 __constant_cpu_to_le16(CF_READ_DATA); 2539 __constant_cpu_to_le16(CF_READ_DATA);
2540 ha->qla_stats.input_bytes += scsi_bufflen(cmd); 2540 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2541 } 2541 }
2542 2542
2543 cur_seg = scsi_sglist(cmd); 2543 cur_seg = scsi_sglist(cmd);
2544 ctx = sp->ctx; 2544 ctx = sp->ctx;
2545 2545
2546 while (tot_dsds) { 2546 while (tot_dsds) {
2547 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 2547 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2548 QLA_DSDS_PER_IOCB : tot_dsds; 2548 QLA_DSDS_PER_IOCB : tot_dsds;
2549 tot_dsds -= avail_dsds; 2549 tot_dsds -= avail_dsds;
2550 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; 2550 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2551 2551
2552 dsd_ptr = list_first_entry(&ha->gbl_dsd_list, 2552 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2553 struct dsd_dma, list); 2553 struct dsd_dma, list);
2554 next_dsd = dsd_ptr->dsd_addr; 2554 next_dsd = dsd_ptr->dsd_addr;
2555 list_del(&dsd_ptr->list); 2555 list_del(&dsd_ptr->list);
2556 ha->gbl_dsd_avail--; 2556 ha->gbl_dsd_avail--;
2557 list_add_tail(&dsd_ptr->list, &ctx->dsd_list); 2557 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2558 ctx->dsd_use_cnt++; 2558 ctx->dsd_use_cnt++;
2559 ha->gbl_dsd_inuse++; 2559 ha->gbl_dsd_inuse++;
2560 2560
2561 if (first_iocb) { 2561 if (first_iocb) {
2562 first_iocb = 0; 2562 first_iocb = 0;
2563 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 2563 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2564 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2564 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2565 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2565 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2566 cmd_pkt->fcp_data_dseg_len = dsd_list_len; 2566 cmd_pkt->fcp_data_dseg_len = dsd_list_len;
2567 } else { 2567 } else {
2568 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2568 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2569 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2569 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2570 *cur_dsd++ = dsd_list_len; 2570 *cur_dsd++ = dsd_list_len;
2571 } 2571 }
2572 cur_dsd = (uint32_t *)next_dsd; 2572 cur_dsd = (uint32_t *)next_dsd;
2573 while (avail_dsds) { 2573 while (avail_dsds) {
2574 dma_addr_t sle_dma; 2574 dma_addr_t sle_dma;
2575 2575
2576 sle_dma = sg_dma_address(cur_seg); 2576 sle_dma = sg_dma_address(cur_seg);
2577 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2577 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2578 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2578 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2579 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); 2579 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2580 cur_seg = sg_next(cur_seg); 2580 cur_seg = sg_next(cur_seg);
2581 avail_dsds--; 2581 avail_dsds--;
2582 } 2582 }
2583 } 2583 }
2584 2584
2585 /* Null termination */ 2585 /* Null termination */
2586 *cur_dsd++ = 0; 2586 *cur_dsd++ = 0;
2587 *cur_dsd++ = 0; 2587 *cur_dsd++ = 0;
2588 *cur_dsd++ = 0; 2588 *cur_dsd++ = 0;
2589 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; 2589 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2590 return 0; 2590 return 0;
2591 } 2591 }
2592 2592
2593 /* 2593 /*
2594 * qla82xx_calc_dsd_lists() - Determine number of DSD list required 2594 * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2595 * for Command Type 6. 2595 * for Command Type 6.
2596 * 2596 *
2597 * @dsds: number of data segment decriptors needed 2597 * @dsds: number of data segment decriptors needed
2598 * 2598 *
2599 * Returns the number of dsd list needed to store @dsds. 2599 * Returns the number of dsd list needed to store @dsds.
2600 */ 2600 */
2601 inline uint16_t 2601 inline uint16_t
2602 qla82xx_calc_dsd_lists(uint16_t dsds) 2602 qla82xx_calc_dsd_lists(uint16_t dsds)
2603 { 2603 {
2604 uint16_t dsd_lists = 0; 2604 uint16_t dsd_lists = 0;
2605 2605
2606 dsd_lists = (dsds/QLA_DSDS_PER_IOCB); 2606 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2607 if (dsds % QLA_DSDS_PER_IOCB) 2607 if (dsds % QLA_DSDS_PER_IOCB)
2608 dsd_lists++; 2608 dsd_lists++;
2609 return dsd_lists; 2609 return dsd_lists;
2610 } 2610 }
2611 2611
2612 /* 2612 /*
2613 * qla82xx_start_scsi() - Send a SCSI command to the ISP 2613 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2614 * @sp: command to send to the ISP 2614 * @sp: command to send to the ISP
2615 * 2615 *
2616 * Returns non-zero if a failure occurred, else zero. 2616 * Returns non-zero if a failure occurred, else zero.
2617 */ 2617 */
2618 int 2618 int
2619 qla82xx_start_scsi(srb_t *sp) 2619 qla82xx_start_scsi(srb_t *sp)
2620 { 2620 {
2621 int ret, nseg; 2621 int ret, nseg;
2622 unsigned long flags; 2622 unsigned long flags;
2623 struct scsi_cmnd *cmd; 2623 struct scsi_cmnd *cmd;
2624 uint32_t *clr_ptr; 2624 uint32_t *clr_ptr;
2625 uint32_t index; 2625 uint32_t index;
2626 uint32_t handle; 2626 uint32_t handle;
2627 uint16_t cnt; 2627 uint16_t cnt;
2628 uint16_t req_cnt; 2628 uint16_t req_cnt;
2629 uint16_t tot_dsds; 2629 uint16_t tot_dsds;
2630 struct device_reg_82xx __iomem *reg; 2630 struct device_reg_82xx __iomem *reg;
2631 uint32_t dbval; 2631 uint32_t dbval;
2632 uint32_t *fcp_dl; 2632 uint32_t *fcp_dl;
2633 uint8_t additional_cdb_len; 2633 uint8_t additional_cdb_len;
2634 struct ct6_dsd *ctx; 2634 struct ct6_dsd *ctx;
2635 struct scsi_qla_host *vha = sp->fcport->vha; 2635 struct scsi_qla_host *vha = sp->fcport->vha;
2636 struct qla_hw_data *ha = vha->hw; 2636 struct qla_hw_data *ha = vha->hw;
2637 struct req_que *req = NULL; 2637 struct req_que *req = NULL;
2638 struct rsp_que *rsp = NULL; 2638 struct rsp_que *rsp = NULL;
2639 char tag[2]; 2639 char tag[2];
2640 2640
2641 /* Setup device pointers. */ 2641 /* Setup device pointers. */
2642 ret = 0; 2642 ret = 0;
2643 reg = &ha->iobase->isp82; 2643 reg = &ha->iobase->isp82;
2644 cmd = sp->cmd; 2644 cmd = sp->cmd;
2645 req = vha->req; 2645 req = vha->req;
2646 rsp = ha->rsp_q_map[0]; 2646 rsp = ha->rsp_q_map[0];
2647 2647
2648 /* So we know we haven't pci_map'ed anything yet */ 2648 /* So we know we haven't pci_map'ed anything yet */
2649 tot_dsds = 0; 2649 tot_dsds = 0;
2650 2650
2651 dbval = 0x04 | (ha->portnum << 5); 2651 dbval = 0x04 | (ha->portnum << 5);
2652 2652
2653 /* Send marker if required */ 2653 /* Send marker if required */
2654 if (vha->marker_needed != 0) { 2654 if (vha->marker_needed != 0) {
2655 if (qla2x00_marker(vha, req, 2655 if (qla2x00_marker(vha, req,
2656 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 2656 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2657 return QLA_FUNCTION_FAILED; 2657 return QLA_FUNCTION_FAILED;
2658 vha->marker_needed = 0; 2658 vha->marker_needed = 0;
2659 } 2659 }
2660 2660
2661 /* Acquire ring specific lock */ 2661 /* Acquire ring specific lock */
2662 spin_lock_irqsave(&ha->hardware_lock, flags); 2662 spin_lock_irqsave(&ha->hardware_lock, flags);
2663 2663
2664 /* Check for room in outstanding command list. */ 2664 /* Check for room in outstanding command list. */
2665 handle = req->current_outstanding_cmd; 2665 handle = req->current_outstanding_cmd;
2666 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 2666 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2667 handle++; 2667 handle++;
2668 if (handle == MAX_OUTSTANDING_COMMANDS) 2668 if (handle == MAX_OUTSTANDING_COMMANDS)
2669 handle = 1; 2669 handle = 1;
2670 if (!req->outstanding_cmds[handle]) 2670 if (!req->outstanding_cmds[handle])
2671 break; 2671 break;
2672 } 2672 }
2673 if (index == MAX_OUTSTANDING_COMMANDS) 2673 if (index == MAX_OUTSTANDING_COMMANDS)
2674 goto queuing_error; 2674 goto queuing_error;
2675 2675
2676 /* Map the sg table so we have an accurate count of sg entries needed */ 2676 /* Map the sg table so we have an accurate count of sg entries needed */
2677 if (scsi_sg_count(cmd)) { 2677 if (scsi_sg_count(cmd)) {
2678 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 2678 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2679 scsi_sg_count(cmd), cmd->sc_data_direction); 2679 scsi_sg_count(cmd), cmd->sc_data_direction);
2680 if (unlikely(!nseg)) 2680 if (unlikely(!nseg))
2681 goto queuing_error; 2681 goto queuing_error;
2682 } else 2682 } else
2683 nseg = 0; 2683 nseg = 0;
2684 2684
2685 tot_dsds = nseg; 2685 tot_dsds = nseg;
2686 2686
2687 if (tot_dsds > ql2xshiftctondsd) { 2687 if (tot_dsds > ql2xshiftctondsd) {
2688 struct cmd_type_6 *cmd_pkt; 2688 struct cmd_type_6 *cmd_pkt;
2689 uint16_t more_dsd_lists = 0; 2689 uint16_t more_dsd_lists = 0;
2690 struct dsd_dma *dsd_ptr; 2690 struct dsd_dma *dsd_ptr;
2691 uint16_t i; 2691 uint16_t i;
2692 2692
2693 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds); 2693 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2694 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) 2694 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
2695 goto queuing_error; 2695 goto queuing_error;
2696 2696
2697 if (more_dsd_lists <= ha->gbl_dsd_avail) 2697 if (more_dsd_lists <= ha->gbl_dsd_avail)
2698 goto sufficient_dsds; 2698 goto sufficient_dsds;
2699 else 2699 else
2700 more_dsd_lists -= ha->gbl_dsd_avail; 2700 more_dsd_lists -= ha->gbl_dsd_avail;
2701 2701
2702 for (i = 0; i < more_dsd_lists; i++) { 2702 for (i = 0; i < more_dsd_lists; i++) {
2703 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 2703 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2704 if (!dsd_ptr) 2704 if (!dsd_ptr)
2705 goto queuing_error; 2705 goto queuing_error;
2706 2706
2707 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 2707 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2708 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 2708 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2709 if (!dsd_ptr->dsd_addr) { 2709 if (!dsd_ptr->dsd_addr) {
2710 kfree(dsd_ptr); 2710 kfree(dsd_ptr);
2711 goto queuing_error; 2711 goto queuing_error;
2712 } 2712 }
2713 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 2713 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2714 ha->gbl_dsd_avail++; 2714 ha->gbl_dsd_avail++;
2715 } 2715 }
2716 2716
2717 sufficient_dsds: 2717 sufficient_dsds:
2718 req_cnt = 1; 2718 req_cnt = 1;
2719 2719
2720 if (req->cnt < (req_cnt + 2)) { 2720 if (req->cnt < (req_cnt + 2)) {
2721 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2721 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2722 &reg->req_q_out[0]); 2722 &reg->req_q_out[0]);
2723 if (req->ring_index < cnt) 2723 if (req->ring_index < cnt)
2724 req->cnt = cnt - req->ring_index; 2724 req->cnt = cnt - req->ring_index;
2725 else 2725 else
2726 req->cnt = req->length - 2726 req->cnt = req->length -
2727 (req->ring_index - cnt); 2727 (req->ring_index - cnt);
2728 } 2728 }
2729 2729
2730 if (req->cnt < (req_cnt + 2)) 2730 if (req->cnt < (req_cnt + 2))
2731 goto queuing_error; 2731 goto queuing_error;
2732 2732
2733 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2733 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2734 if (!sp->ctx) { 2734 if (!sp->ctx) {
2735 DEBUG(printk(KERN_INFO 2735 DEBUG(printk(KERN_INFO
2736 "%s(%ld): failed to allocate" 2736 "%s(%ld): failed to allocate"
2737 " ctx.\n", __func__, vha->host_no)); 2737 " ctx.\n", __func__, vha->host_no));
2738 goto queuing_error; 2738 goto queuing_error;
2739 } 2739 }
2740 memset(ctx, 0, sizeof(struct ct6_dsd)); 2740 memset(ctx, 0, sizeof(struct ct6_dsd));
2741 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2741 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2742 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2742 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2743 if (!ctx->fcp_cmnd) { 2743 if (!ctx->fcp_cmnd) {
2744 DEBUG2_3(printk("%s(%ld): failed to allocate" 2744 DEBUG2_3(printk("%s(%ld): failed to allocate"
2745 " fcp_cmnd.\n", __func__, vha->host_no)); 2745 " fcp_cmnd.\n", __func__, vha->host_no));
2746 goto queuing_error_fcp_cmnd; 2746 goto queuing_error_fcp_cmnd;
2747 } 2747 }
2748 2748
2749 /* Initialize the DSD list and dma handle */ 2749 /* Initialize the DSD list and dma handle */
2750 INIT_LIST_HEAD(&ctx->dsd_list); 2750 INIT_LIST_HEAD(&ctx->dsd_list);
2751 ctx->dsd_use_cnt = 0; 2751 ctx->dsd_use_cnt = 0;
2752 2752
2753 if (cmd->cmd_len > 16) { 2753 if (cmd->cmd_len > 16) {
2754 additional_cdb_len = cmd->cmd_len - 16; 2754 additional_cdb_len = cmd->cmd_len - 16;
2755 if ((cmd->cmd_len % 4) != 0) { 2755 if ((cmd->cmd_len % 4) != 0) {
2756 /* SCSI command bigger than 16 bytes must be 2756 /* SCSI command bigger than 16 bytes must be
2757 * multiple of 4 2757 * multiple of 4
2758 */ 2758 */
2759 goto queuing_error_fcp_cmnd; 2759 goto queuing_error_fcp_cmnd;
2760 } 2760 }
2761 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 2761 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2762 } else { 2762 } else {
2763 additional_cdb_len = 0; 2763 additional_cdb_len = 0;
2764 ctx->fcp_cmnd_len = 12 + 16 + 4; 2764 ctx->fcp_cmnd_len = 12 + 16 + 4;
2765 } 2765 }
2766 2766
2767 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; 2767 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2768 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2768 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2769 2769
2770 /* Zero out remaining portion of packet. */ 2770 /* Zero out remaining portion of packet. */
2771 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 2771 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2772 clr_ptr = (uint32_t *)cmd_pkt + 2; 2772 clr_ptr = (uint32_t *)cmd_pkt + 2;
2773 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2773 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2774 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2774 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2775 2775
2776 /* Set NPORT-ID and LUN number*/ 2776 /* Set NPORT-ID and LUN number*/
2777 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2777 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2778 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2778 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2779 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2779 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2780 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2780 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2781 cmd_pkt->vp_index = sp->fcport->vp_idx; 2781 cmd_pkt->vp_index = sp->fcport->vp_idx;
2782 2782
2783 /* Build IOCB segments */ 2783 /* Build IOCB segments */
2784 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 2784 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2785 goto queuing_error_fcp_cmnd; 2785 goto queuing_error_fcp_cmnd;
2786 2786
2787 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2787 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2788 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2788 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2789 2789
2790 /* 2790 /*
2791 * Update tagged queuing modifier -- default is TSK_SIMPLE (0). 2791 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2792 */ 2792 */
2793 if (scsi_populate_tag_msg(cmd, tag)) { 2793 if (scsi_populate_tag_msg(cmd, tag)) {
2794 switch (tag[0]) { 2794 switch (tag[0]) {
2795 case HEAD_OF_QUEUE_TAG: 2795 case HEAD_OF_QUEUE_TAG:
2796 ctx->fcp_cmnd->task_attribute = 2796 ctx->fcp_cmnd->task_attribute =
2797 TSK_HEAD_OF_QUEUE; 2797 TSK_HEAD_OF_QUEUE;
2798 break; 2798 break;
2799 case ORDERED_QUEUE_TAG: 2799 case ORDERED_QUEUE_TAG:
2800 ctx->fcp_cmnd->task_attribute = 2800 ctx->fcp_cmnd->task_attribute =
2801 TSK_ORDERED; 2801 TSK_ORDERED;
2802 break; 2802 break;
2803 } 2803 }
2804 } 2804 }
2805 2805
2806 /* build FCP_CMND IU */ 2806 /* build FCP_CMND IU */
2807 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2807 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2808 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); 2808 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2809 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2809 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2810 2810
2811 if (cmd->sc_data_direction == DMA_TO_DEVICE) 2811 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2812 ctx->fcp_cmnd->additional_cdb_len |= 1; 2812 ctx->fcp_cmnd->additional_cdb_len |= 1;
2813 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 2813 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2814 ctx->fcp_cmnd->additional_cdb_len |= 2; 2814 ctx->fcp_cmnd->additional_cdb_len |= 2;
2815 2815
2816 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 2816 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2817 2817
2818 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + 2818 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2819 additional_cdb_len); 2819 additional_cdb_len);
2820 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); 2820 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2821 2821
2822 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); 2822 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2823 cmd_pkt->fcp_cmnd_dseg_address[0] = 2823 cmd_pkt->fcp_cmnd_dseg_address[0] =
2824 cpu_to_le32(LSD(ctx->fcp_cmnd_dma)); 2824 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2825 cmd_pkt->fcp_cmnd_dseg_address[1] = 2825 cmd_pkt->fcp_cmnd_dseg_address[1] =
2826 cpu_to_le32(MSD(ctx->fcp_cmnd_dma)); 2826 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2827 2827
2828 sp->flags |= SRB_FCP_CMND_DMA_VALID; 2828 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2829 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2829 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2830 /* Set total data segment count. */ 2830 /* Set total data segment count. */
2831 cmd_pkt->entry_count = (uint8_t)req_cnt; 2831 cmd_pkt->entry_count = (uint8_t)req_cnt;
2832 /* Specify response queue number where 2832 /* Specify response queue number where
2833 * completion should happen 2833 * completion should happen
2834 */ 2834 */
2835 cmd_pkt->entry_status = (uint8_t) rsp->id; 2835 cmd_pkt->entry_status = (uint8_t) rsp->id;
2836 } else { 2836 } else {
2837 struct cmd_type_7 *cmd_pkt; 2837 struct cmd_type_7 *cmd_pkt;
2838 req_cnt = qla24xx_calc_iocbs(tot_dsds); 2838 req_cnt = qla24xx_calc_iocbs(tot_dsds);
2839 if (req->cnt < (req_cnt + 2)) { 2839 if (req->cnt < (req_cnt + 2)) {
2840 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2840 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2841 &reg->req_q_out[0]); 2841 &reg->req_q_out[0]);
2842 if (req->ring_index < cnt) 2842 if (req->ring_index < cnt)
2843 req->cnt = cnt - req->ring_index; 2843 req->cnt = cnt - req->ring_index;
2844 else 2844 else
2845 req->cnt = req->length - 2845 req->cnt = req->length -
2846 (req->ring_index - cnt); 2846 (req->ring_index - cnt);
2847 } 2847 }
2848 if (req->cnt < (req_cnt + 2)) 2848 if (req->cnt < (req_cnt + 2))
2849 goto queuing_error; 2849 goto queuing_error;
2850 2850
2851 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 2851 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2852 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2852 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2853 2853
2854 /* Zero out remaining portion of packet. */ 2854 /* Zero out remaining portion of packet. */
2855 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 2855 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2856 clr_ptr = (uint32_t *)cmd_pkt + 2; 2856 clr_ptr = (uint32_t *)cmd_pkt + 2;
2857 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2857 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2858 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2858 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2859 2859
2860 /* Set NPORT-ID and LUN number*/ 2860 /* Set NPORT-ID and LUN number*/
2861 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2861 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2862 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2862 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2863 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2863 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2864 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2864 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2865 cmd_pkt->vp_index = sp->fcport->vp_idx; 2865 cmd_pkt->vp_index = sp->fcport->vp_idx;
2866 2866
2867 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2867 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2868 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2868 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2869 sizeof(cmd_pkt->lun)); 2869 sizeof(cmd_pkt->lun));
2870 2870
2871 /* 2871 /*
2872 * Update tagged queuing modifier -- default is TSK_SIMPLE (0). 2872 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2873 */ 2873 */
2874 if (scsi_populate_tag_msg(cmd, tag)) { 2874 if (scsi_populate_tag_msg(cmd, tag)) {
2875 switch (tag[0]) { 2875 switch (tag[0]) {
2876 case HEAD_OF_QUEUE_TAG: 2876 case HEAD_OF_QUEUE_TAG:
2877 cmd_pkt->task = TSK_HEAD_OF_QUEUE; 2877 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2878 break; 2878 break;
2879 case ORDERED_QUEUE_TAG: 2879 case ORDERED_QUEUE_TAG:
2880 cmd_pkt->task = TSK_ORDERED; 2880 cmd_pkt->task = TSK_ORDERED;
2881 break; 2881 break;
2882 } 2882 }
2883 } 2883 }
2884 2884
2885 /* Load SCSI command packet. */ 2885 /* Load SCSI command packet. */
2886 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 2886 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2887 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 2887 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2888 2888
2889 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2889 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2890 2890
2891 /* Build IOCB segments */ 2891 /* Build IOCB segments */
2892 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 2892 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2893 2893
2894 /* Set total data segment count. */ 2894 /* Set total data segment count. */
2895 cmd_pkt->entry_count = (uint8_t)req_cnt; 2895 cmd_pkt->entry_count = (uint8_t)req_cnt;
2896 /* Specify response queue number where 2896 /* Specify response queue number where
2897 * completion should happen. 2897 * completion should happen.
2898 */ 2898 */
2899 cmd_pkt->entry_status = (uint8_t) rsp->id; 2899 cmd_pkt->entry_status = (uint8_t) rsp->id;
2900 2900
2901 } 2901 }
2902 /* Build command packet. */ 2902 /* Build command packet. */
2903 req->current_outstanding_cmd = handle; 2903 req->current_outstanding_cmd = handle;
2904 req->outstanding_cmds[handle] = sp; 2904 req->outstanding_cmds[handle] = sp;
2905 sp->handle = handle; 2905 sp->handle = handle;
2906 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2906 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2907 req->cnt -= req_cnt; 2907 req->cnt -= req_cnt;
2908 wmb(); 2908 wmb();
2909 2909
2910 /* Adjust ring index. */ 2910 /* Adjust ring index. */
2911 req->ring_index++; 2911 req->ring_index++;
2912 if (req->ring_index == req->length) { 2912 if (req->ring_index == req->length) {
2913 req->ring_index = 0; 2913 req->ring_index = 0;
2914 req->ring_ptr = req->ring; 2914 req->ring_ptr = req->ring;
2915 } else 2915 } else
2916 req->ring_ptr++; 2916 req->ring_ptr++;
2917 2917
2918 sp->flags |= SRB_DMA_VALID; 2918 sp->flags |= SRB_DMA_VALID;
2919 2919
2920 /* Set chip new ring index. */ 2920 /* Set chip new ring index. */
2921 /* write, read and verify logic */ 2921 /* write, read and verify logic */
2922 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 2922 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2923 if (ql2xdbwr) 2923 if (ql2xdbwr)
2924 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); 2924 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2925 else { 2925 else {
2926 WRT_REG_DWORD( 2926 WRT_REG_DWORD(
2927 (unsigned long __iomem *)ha->nxdb_wr_ptr, 2927 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2928 dbval); 2928 dbval);
2929 wmb(); 2929 wmb();
2930 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { 2930 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2931 WRT_REG_DWORD( 2931 WRT_REG_DWORD(
2932 (unsigned long __iomem *)ha->nxdb_wr_ptr, 2932 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2933 dbval); 2933 dbval);
2934 wmb(); 2934 wmb();
2935 } 2935 }
2936 } 2936 }
2937 2937
2938 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2938 /* Manage unprocessed RIO/ZIO commands in response queue. */
2939 if (vha->flags.process_response_queue && 2939 if (vha->flags.process_response_queue &&
2940 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2940 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2941 qla24xx_process_response_queue(vha, rsp); 2941 qla24xx_process_response_queue(vha, rsp);
2942 2942
2943 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2943 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2944 return QLA_SUCCESS; 2944 return QLA_SUCCESS;
2945 2945
2946 queuing_error_fcp_cmnd: 2946 queuing_error_fcp_cmnd:
2947 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); 2947 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2948 queuing_error: 2948 queuing_error:
2949 if (tot_dsds) 2949 if (tot_dsds)
2950 scsi_dma_unmap(cmd); 2950 scsi_dma_unmap(cmd);
2951 2951
2952 if (sp->ctx) { 2952 if (sp->ctx) {
2953 mempool_free(sp->ctx, ha->ctx_mempool); 2953 mempool_free(sp->ctx, ha->ctx_mempool);
2954 sp->ctx = NULL; 2954 sp->ctx = NULL;
2955 } 2955 }
2956 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2956 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2957 2957
2958 return QLA_FUNCTION_FAILED; 2958 return QLA_FUNCTION_FAILED;
2959 } 2959 }
2960 2960
2961 static uint32_t * 2961 static uint32_t *
2962 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 2962 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2963 uint32_t length) 2963 uint32_t length)
2964 { 2964 {
2965 uint32_t i; 2965 uint32_t i;
2966 uint32_t val; 2966 uint32_t val;
2967 struct qla_hw_data *ha = vha->hw; 2967 struct qla_hw_data *ha = vha->hw;
2968 2968
2969 /* Dword reads to flash. */ 2969 /* Dword reads to flash. */
2970 for (i = 0; i < length/4; i++, faddr += 4) { 2970 for (i = 0; i < length/4; i++, faddr += 4) {
2971 if (qla82xx_rom_fast_read(ha, faddr, &val)) { 2971 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2972 qla_printk(KERN_WARNING, ha, 2972 qla_printk(KERN_WARNING, ha,
2973 "Do ROM fast read failed\n"); 2973 "Do ROM fast read failed\n");
2974 goto done_read; 2974 goto done_read;
2975 } 2975 }
2976 dwptr[i] = __constant_cpu_to_le32(val); 2976 dwptr[i] = __constant_cpu_to_le32(val);
2977 } 2977 }
2978 done_read: 2978 done_read:
2979 return dwptr; 2979 return dwptr;
2980 } 2980 }
2981 2981
2982 static int 2982 static int
2983 qla82xx_unprotect_flash(struct qla_hw_data *ha) 2983 qla82xx_unprotect_flash(struct qla_hw_data *ha)
2984 { 2984 {
2985 int ret; 2985 int ret;
2986 uint32_t val; 2986 uint32_t val;
2987 2987
2988 ret = ql82xx_rom_lock_d(ha); 2988 ret = ql82xx_rom_lock_d(ha);
2989 if (ret < 0) { 2989 if (ret < 0) {
2990 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 2990 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2991 return ret; 2991 return ret;
2992 } 2992 }
2993 2993
2994 ret = qla82xx_read_status_reg(ha, &val); 2994 ret = qla82xx_read_status_reg(ha, &val);
2995 if (ret < 0) 2995 if (ret < 0)
2996 goto done_unprotect; 2996 goto done_unprotect;
2997 2997
2998 val &= ~(BLOCK_PROTECT_BITS << 2); 2998 val &= ~(BLOCK_PROTECT_BITS << 2);
2999 ret = qla82xx_write_status_reg(ha, val); 2999 ret = qla82xx_write_status_reg(ha, val);
3000 if (ret < 0) { 3000 if (ret < 0) {
3001 val |= (BLOCK_PROTECT_BITS << 2); 3001 val |= (BLOCK_PROTECT_BITS << 2);
3002 qla82xx_write_status_reg(ha, val); 3002 qla82xx_write_status_reg(ha, val);
3003 } 3003 }
3004 3004
3005 if (qla82xx_write_disable_flash(ha) != 0) 3005 if (qla82xx_write_disable_flash(ha) != 0)
3006 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3006 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
3007 3007
3008 done_unprotect: 3008 done_unprotect:
3009 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3009 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3010 return ret; 3010 return ret;
3011 } 3011 }
3012 3012
3013 static int 3013 static int
3014 qla82xx_protect_flash(struct qla_hw_data *ha) 3014 qla82xx_protect_flash(struct qla_hw_data *ha)
3015 { 3015 {
3016 int ret; 3016 int ret;
3017 uint32_t val; 3017 uint32_t val;
3018 3018
3019 ret = ql82xx_rom_lock_d(ha); 3019 ret = ql82xx_rom_lock_d(ha);
3020 if (ret < 0) { 3020 if (ret < 0) {
3021 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3021 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
3022 return ret; 3022 return ret;
3023 } 3023 }
3024 3024
3025 ret = qla82xx_read_status_reg(ha, &val); 3025 ret = qla82xx_read_status_reg(ha, &val);
3026 if (ret < 0) 3026 if (ret < 0)
3027 goto done_protect; 3027 goto done_protect;
3028 3028
3029 val |= (BLOCK_PROTECT_BITS << 2); 3029 val |= (BLOCK_PROTECT_BITS << 2);
3030 /* LOCK all sectors */ 3030 /* LOCK all sectors */
3031 ret = qla82xx_write_status_reg(ha, val); 3031 ret = qla82xx_write_status_reg(ha, val);
3032 if (ret < 0) 3032 if (ret < 0)
3033 qla_printk(KERN_WARNING, ha, "Write status register failed\n"); 3033 qla_printk(KERN_WARNING, ha, "Write status register failed\n");
3034 3034
3035 if (qla82xx_write_disable_flash(ha) != 0) 3035 if (qla82xx_write_disable_flash(ha) != 0)
3036 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3036 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
3037 done_protect: 3037 done_protect:
3038 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3038 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3039 return ret; 3039 return ret;
3040 } 3040 }
3041 3041
3042 static int 3042 static int
3043 qla82xx_erase_sector(struct qla_hw_data *ha, int addr) 3043 qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3044 { 3044 {
3045 int ret = 0; 3045 int ret = 0;
3046 3046
3047 ret = ql82xx_rom_lock_d(ha); 3047 ret = ql82xx_rom_lock_d(ha);
3048 if (ret < 0) { 3048 if (ret < 0) {
3049 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3049 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
3050 return ret; 3050 return ret;
3051 } 3051 }
3052 3052
3053 qla82xx_flash_set_write_enable(ha); 3053 qla82xx_flash_set_write_enable(ha);
3054 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 3054 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
3055 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 3055 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
3056 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); 3056 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3057 3057
3058 if (qla82xx_wait_rom_done(ha)) { 3058 if (qla82xx_wait_rom_done(ha)) {
3059 qla_printk(KERN_WARNING, ha, 3059 qla_printk(KERN_WARNING, ha,
3060 "Error waiting for rom done\n"); 3060 "Error waiting for rom done\n");
3061 ret = -1; 3061 ret = -1;
3062 goto done; 3062 goto done;
3063 } 3063 }
3064 ret = qla82xx_flash_wait_write_finish(ha); 3064 ret = qla82xx_flash_wait_write_finish(ha);
3065 done: 3065 done:
3066 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3066 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3067 return ret; 3067 return ret;
3068 } 3068 }
3069 3069
3070 /* 3070 /*
3071 * Address and length are byte address 3071 * Address and length are byte address
3072 */ 3072 */
3073 uint8_t * 3073 uint8_t *
3074 qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, 3074 qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3075 uint32_t offset, uint32_t length) 3075 uint32_t offset, uint32_t length)
3076 { 3076 {
3077 scsi_block_requests(vha->host); 3077 scsi_block_requests(vha->host);
3078 qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length); 3078 qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
3079 scsi_unblock_requests(vha->host); 3079 scsi_unblock_requests(vha->host);
3080 return buf; 3080 return buf;
3081 } 3081 }
3082 3082
3083 static int 3083 static int
3084 qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, 3084 qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3085 uint32_t faddr, uint32_t dwords) 3085 uint32_t faddr, uint32_t dwords)
3086 { 3086 {
3087 int ret; 3087 int ret;
3088 uint32_t liter; 3088 uint32_t liter;
3089 uint32_t sec_mask, rest_addr; 3089 uint32_t sec_mask, rest_addr;
3090 dma_addr_t optrom_dma; 3090 dma_addr_t optrom_dma;
3091 void *optrom = NULL; 3091 void *optrom = NULL;
3092 int page_mode = 0; 3092 int page_mode = 0;
3093 struct qla_hw_data *ha = vha->hw; 3093 struct qla_hw_data *ha = vha->hw;
3094 3094
3095 ret = -1; 3095 ret = -1;
3096 3096
3097 /* Prepare burst-capable write on supported ISPs. */ 3097 /* Prepare burst-capable write on supported ISPs. */
3098 if (page_mode && !(faddr & 0xfff) && 3098 if (page_mode && !(faddr & 0xfff) &&
3099 dwords > OPTROM_BURST_DWORDS) { 3099 dwords > OPTROM_BURST_DWORDS) {
3100 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 3100 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3101 &optrom_dma, GFP_KERNEL); 3101 &optrom_dma, GFP_KERNEL);
3102 if (!optrom) { 3102 if (!optrom) {
3103 qla_printk(KERN_DEBUG, ha, 3103 qla_printk(KERN_DEBUG, ha,
3104 "Unable to allocate memory for optrom " 3104 "Unable to allocate memory for optrom "
3105 "burst write (%x KB).\n", 3105 "burst write (%x KB).\n",
3106 OPTROM_BURST_SIZE / 1024); 3106 OPTROM_BURST_SIZE / 1024);
3107 } 3107 }
3108 } 3108 }
3109 3109
3110 rest_addr = ha->fdt_block_size - 1; 3110 rest_addr = ha->fdt_block_size - 1;
3111 sec_mask = ~rest_addr; 3111 sec_mask = ~rest_addr;
3112 3112
3113 ret = qla82xx_unprotect_flash(ha); 3113 ret = qla82xx_unprotect_flash(ha);
3114 if (ret) { 3114 if (ret) {
3115 qla_printk(KERN_WARNING, ha, 3115 qla_printk(KERN_WARNING, ha,
3116 "Unable to unprotect flash for update.\n"); 3116 "Unable to unprotect flash for update.\n");
3117 goto write_done; 3117 goto write_done;
3118 } 3118 }
3119 3119
3120 for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { 3120 for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3121 /* Are we at the beginning of a sector? */ 3121 /* Are we at the beginning of a sector? */
3122 if ((faddr & rest_addr) == 0) { 3122 if ((faddr & rest_addr) == 0) {
3123 3123
3124 ret = qla82xx_erase_sector(ha, faddr); 3124 ret = qla82xx_erase_sector(ha, faddr);
3125 if (ret) { 3125 if (ret) {
3126 DEBUG9(qla_printk(KERN_ERR, ha, 3126 DEBUG9(qla_printk(KERN_ERR, ha,
3127 "Unable to erase sector: " 3127 "Unable to erase sector: "
3128 "address=%x.\n", faddr)); 3128 "address=%x.\n", faddr));
3129 break; 3129 break;
3130 } 3130 }
3131 } 3131 }
3132 3132
3133 /* Go with burst-write. */ 3133 /* Go with burst-write. */
3134 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { 3134 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
3135 /* Copy data to DMA'ble buffer. */ 3135 /* Copy data to DMA'ble buffer. */
3136 memcpy(optrom, dwptr, OPTROM_BURST_SIZE); 3136 memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
3137 3137
3138 ret = qla2x00_load_ram(vha, optrom_dma, 3138 ret = qla2x00_load_ram(vha, optrom_dma,
3139 (ha->flash_data_off | faddr), 3139 (ha->flash_data_off | faddr),
3140 OPTROM_BURST_DWORDS); 3140 OPTROM_BURST_DWORDS);
3141 if (ret != QLA_SUCCESS) { 3141 if (ret != QLA_SUCCESS) {
3142 qla_printk(KERN_WARNING, ha, 3142 qla_printk(KERN_WARNING, ha,
3143 "Unable to burst-write optrom segment " 3143 "Unable to burst-write optrom segment "
3144 "(%x/%x/%llx).\n", ret, 3144 "(%x/%x/%llx).\n", ret,
3145 (ha->flash_data_off | faddr), 3145 (ha->flash_data_off | faddr),
3146 (unsigned long long)optrom_dma); 3146 (unsigned long long)optrom_dma);
3147 qla_printk(KERN_WARNING, ha, 3147 qla_printk(KERN_WARNING, ha,
3148 "Reverting to slow-write.\n"); 3148 "Reverting to slow-write.\n");
3149 3149
3150 dma_free_coherent(&ha->pdev->dev, 3150 dma_free_coherent(&ha->pdev->dev,
3151 OPTROM_BURST_SIZE, optrom, optrom_dma); 3151 OPTROM_BURST_SIZE, optrom, optrom_dma);
3152 optrom = NULL; 3152 optrom = NULL;
3153 } else { 3153 } else {
3154 liter += OPTROM_BURST_DWORDS - 1; 3154 liter += OPTROM_BURST_DWORDS - 1;
3155 faddr += OPTROM_BURST_DWORDS - 1; 3155 faddr += OPTROM_BURST_DWORDS - 1;
3156 dwptr += OPTROM_BURST_DWORDS - 1; 3156 dwptr += OPTROM_BURST_DWORDS - 1;
3157 continue; 3157 continue;
3158 } 3158 }
3159 } 3159 }
3160 3160
3161 ret = qla82xx_write_flash_dword(ha, faddr, 3161 ret = qla82xx_write_flash_dword(ha, faddr,
3162 cpu_to_le32(*dwptr)); 3162 cpu_to_le32(*dwptr));
3163 if (ret) { 3163 if (ret) {
3164 DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program" 3164 DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
3165 "flash address=%x data=%x.\n", __func__, 3165 "flash address=%x data=%x.\n", __func__,
3166 ha->host_no, faddr, *dwptr)); 3166 ha->host_no, faddr, *dwptr));
3167 break; 3167 break;
3168 } 3168 }
3169 } 3169 }
3170 3170
3171 ret = qla82xx_protect_flash(ha); 3171 ret = qla82xx_protect_flash(ha);
3172 if (ret) 3172 if (ret)
3173 qla_printk(KERN_WARNING, ha, 3173 qla_printk(KERN_WARNING, ha,
3174 "Unable to protect flash after update.\n"); 3174 "Unable to protect flash after update.\n");
3175 write_done: 3175 write_done:
3176 if (optrom) 3176 if (optrom)
3177 dma_free_coherent(&ha->pdev->dev, 3177 dma_free_coherent(&ha->pdev->dev,
3178 OPTROM_BURST_SIZE, optrom, optrom_dma); 3178 OPTROM_BURST_SIZE, optrom, optrom_dma);
3179 return ret; 3179 return ret;
3180 } 3180 }
3181 3181
3182 int 3182 int
3183 qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, 3183 qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3184 uint32_t offset, uint32_t length) 3184 uint32_t offset, uint32_t length)
3185 { 3185 {
3186 int rval; 3186 int rval;
3187 3187
3188 /* Suspend HBA. */ 3188 /* Suspend HBA. */
3189 scsi_block_requests(vha->host); 3189 scsi_block_requests(vha->host);
3190 rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset, 3190 rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
3191 length >> 2); 3191 length >> 2);
3192 scsi_unblock_requests(vha->host); 3192 scsi_unblock_requests(vha->host);
3193 3193
3194 /* Convert return ISP82xx to generic */ 3194 /* Convert return ISP82xx to generic */
3195 if (rval) 3195 if (rval)
3196 rval = QLA_FUNCTION_FAILED; 3196 rval = QLA_FUNCTION_FAILED;
3197 else 3197 else
3198 rval = QLA_SUCCESS; 3198 rval = QLA_SUCCESS;
3199 return rval; 3199 return rval;
3200 } 3200 }
3201 3201
3202 void 3202 void
3203 qla82xx_start_iocbs(srb_t *sp) 3203 qla82xx_start_iocbs(srb_t *sp)
3204 { 3204 {
3205 struct qla_hw_data *ha = sp->fcport->vha->hw; 3205 struct qla_hw_data *ha = sp->fcport->vha->hw;
3206 struct req_que *req = ha->req_q_map[0]; 3206 struct req_que *req = ha->req_q_map[0];
3207 struct device_reg_82xx __iomem *reg; 3207 struct device_reg_82xx __iomem *reg;
3208 uint32_t dbval; 3208 uint32_t dbval;
3209 3209
3210 /* Adjust ring index. */ 3210 /* Adjust ring index. */
3211 req->ring_index++; 3211 req->ring_index++;
3212 if (req->ring_index == req->length) { 3212 if (req->ring_index == req->length) {
3213 req->ring_index = 0; 3213 req->ring_index = 0;
3214 req->ring_ptr = req->ring; 3214 req->ring_ptr = req->ring;
3215 } else 3215 } else
3216 req->ring_ptr++; 3216 req->ring_ptr++;
3217 3217
3218 reg = &ha->iobase->isp82; 3218 reg = &ha->iobase->isp82;
3219 dbval = 0x04 | (ha->portnum << 5); 3219 dbval = 0x04 | (ha->portnum << 5);
3220 3220
3221 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 3221 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3222 if (ql2xdbwr) 3222 if (ql2xdbwr)
3223 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); 3223 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
3224 else { 3224 else {
3225 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); 3225 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
3226 wmb(); 3226 wmb();
3227 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { 3227 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3228 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, 3228 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
3229 dbval); 3229 dbval);
3230 wmb(); 3230 wmb();
3231 } 3231 }
3232 } 3232 }
3233 } 3233 }
3234 3234
3235 void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) 3235 void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3236 { 3236 {
3237 if (qla82xx_rom_lock(ha)) 3237 if (qla82xx_rom_lock(ha))
3238 /* Someone else is holding the lock. */ 3238 /* Someone else is holding the lock. */
3239 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n"); 3239 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n");
3240 3240
3241 /* 3241 /*
3242 * Either we got the lock, or someone 3242 * Either we got the lock, or someone
3243 * else died while holding it. 3243 * else died while holding it.
3244 * In either case, unlock. 3244 * In either case, unlock.
3245 */ 3245 */
3246 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3246 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3247 } 3247 }
3248 3248
3249 /* 3249 /*
3250 * qla82xx_device_bootstrap 3250 * qla82xx_device_bootstrap
3251 * Initialize device, set DEV_READY, start fw 3251 * Initialize device, set DEV_READY, start fw
3252 * 3252 *
3253 * Note: 3253 * Note:
3254 * IDC lock must be held upon entry 3254 * IDC lock must be held upon entry
3255 * 3255 *
3256 * Return: 3256 * Return:
3257 * Success : 0 3257 * Success : 0
3258 * Failed : 1 3258 * Failed : 1
3259 */ 3259 */
3260 static int 3260 static int
3261 qla82xx_device_bootstrap(scsi_qla_host_t *vha) 3261 qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3262 { 3262 {
3263 int rval = QLA_SUCCESS; 3263 int rval = QLA_SUCCESS;
3264 int i, timeout; 3264 int i, timeout;
3265 uint32_t old_count, count; 3265 uint32_t old_count, count;
3266 struct qla_hw_data *ha = vha->hw; 3266 struct qla_hw_data *ha = vha->hw;
3267 int need_reset = 0, peg_stuck = 1; 3267 int need_reset = 0, peg_stuck = 1;
3268 3268
3269 need_reset = qla82xx_need_reset(ha); 3269 need_reset = qla82xx_need_reset(ha);
3270 3270
3271 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 3271 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3272 3272
3273 for (i = 0; i < 10; i++) { 3273 for (i = 0; i < 10; i++) {
3274 timeout = msleep_interruptible(200); 3274 timeout = msleep_interruptible(200);
3275 if (timeout) { 3275 if (timeout) {
3276 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3276 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3277 QLA82XX_DEV_FAILED); 3277 QLA82XX_DEV_FAILED);
3278 return QLA_FUNCTION_FAILED; 3278 return QLA_FUNCTION_FAILED;
3279 } 3279 }
3280 3280
3281 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 3281 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3282 if (count != old_count) 3282 if (count != old_count)
3283 peg_stuck = 0; 3283 peg_stuck = 0;
3284 } 3284 }
3285 3285
3286 if (need_reset) { 3286 if (need_reset) {
3287 /* We are trying to perform a recovery here. */ 3287 /* We are trying to perform a recovery here. */
3288 if (peg_stuck) 3288 if (peg_stuck)
3289 qla82xx_rom_lock_recovery(ha); 3289 qla82xx_rom_lock_recovery(ha);
3290 goto dev_initialize; 3290 goto dev_initialize;
3291 } else { 3291 } else {
3292 /* Start of day for this ha context. */ 3292 /* Start of day for this ha context. */
3293 if (peg_stuck) { 3293 if (peg_stuck) {
3294 /* Either we are the first or recovery in progress. */ 3294 /* Either we are the first or recovery in progress. */
3295 qla82xx_rom_lock_recovery(ha); 3295 qla82xx_rom_lock_recovery(ha);
3296 goto dev_initialize; 3296 goto dev_initialize;
3297 } else 3297 } else
3298 /* Firmware already running. */ 3298 /* Firmware already running. */
3299 goto dev_ready; 3299 goto dev_ready;
3300 } 3300 }
3301 3301
3302 return rval; 3302 return rval;
3303 3303
3304 dev_initialize: 3304 dev_initialize:
3305 /* set to DEV_INITIALIZING */ 3305 /* set to DEV_INITIALIZING */
3306 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 3306 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
3307 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); 3307 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3308 3308
3309 /* Driver that sets device state to initializating sets IDC version */ 3309 /* Driver that sets device state to initializating sets IDC version */
3310 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); 3310 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
3311 3311
3312 qla82xx_idc_unlock(ha); 3312 qla82xx_idc_unlock(ha);
3313 rval = qla82xx_start_firmware(vha); 3313 rval = qla82xx_start_firmware(vha);
3314 qla82xx_idc_lock(ha); 3314 qla82xx_idc_lock(ha);
3315 3315
3316 if (rval != QLA_SUCCESS) { 3316 if (rval != QLA_SUCCESS) {
3317 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 3317 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3318 qla82xx_clear_drv_active(ha); 3318 qla82xx_clear_drv_active(ha);
3319 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); 3319 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3320 return rval; 3320 return rval;
3321 } 3321 }
3322 3322
3323 dev_ready: 3323 dev_ready:
3324 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 3324 qla_printk(KERN_INFO, ha, "HW State: READY\n");
3325 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 3325 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3326 3326
3327 return QLA_SUCCESS; 3327 return QLA_SUCCESS;
3328 } 3328 }
3329 3329
3330 /* 3330 /*
3331 * qla82xx_need_qsnt_handler 3331 * qla82xx_need_qsnt_handler
3332 * Code to start quiescence sequence 3332 * Code to start quiescence sequence
3333 * 3333 *
3334 * Note: 3334 * Note:
3335 * IDC lock must be held upon entry 3335 * IDC lock must be held upon entry
3336 * 3336 *
3337 * Return: void 3337 * Return: void
3338 */ 3338 */
3339 3339
3340 static void 3340 static void
3341 qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) 3341 qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3342 { 3342 {
3343 struct qla_hw_data *ha = vha->hw; 3343 struct qla_hw_data *ha = vha->hw;
3344 uint32_t dev_state, drv_state, drv_active; 3344 uint32_t dev_state, drv_state, drv_active;
3345 unsigned long reset_timeout; 3345 unsigned long reset_timeout;
3346 3346
3347 if (vha->flags.online) { 3347 if (vha->flags.online) {
3348 /*Block any further I/O and wait for pending cmnds to complete*/ 3348 /*Block any further I/O and wait for pending cmnds to complete*/
3349 qla82xx_quiescent_state_cleanup(vha); 3349 qla82xx_quiescent_state_cleanup(vha);
3350 } 3350 }
3351 3351
3352 /* Set the quiescence ready bit */ 3352 /* Set the quiescence ready bit */
3353 qla82xx_set_qsnt_ready(ha); 3353 qla82xx_set_qsnt_ready(ha);
3354 3354
3355 /*wait for 30 secs for other functions to ack */ 3355 /*wait for 30 secs for other functions to ack */
3356 reset_timeout = jiffies + (30 * HZ); 3356 reset_timeout = jiffies + (30 * HZ);
3357 3357
3358 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3358 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3359 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3359 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3360 /* Its 2 that is written when qsnt is acked, moving one bit */ 3360 /* Its 2 that is written when qsnt is acked, moving one bit */
3361 drv_active = drv_active << 0x01; 3361 drv_active = drv_active << 0x01;
3362 3362
3363 while (drv_state != drv_active) { 3363 while (drv_state != drv_active) {
3364 3364
3365 if (time_after_eq(jiffies, reset_timeout)) { 3365 if (time_after_eq(jiffies, reset_timeout)) {
3366 /* quiescence timeout, other functions didn't ack 3366 /* quiescence timeout, other functions didn't ack
3367 * changing the state to DEV_READY 3367 * changing the state to DEV_READY
3368 */ 3368 */
3369 qla_printk(KERN_INFO, ha, 3369 qla_printk(KERN_INFO, ha,
3370 "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME); 3370 "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME);
3371 qla_printk(KERN_INFO, ha, 3371 qla_printk(KERN_INFO, ha,
3372 "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active, 3372 "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active,
3373 drv_state); 3373 drv_state);
3374 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3374 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3375 QLA82XX_DEV_READY); 3375 QLA82XX_DEV_READY);
3376 qla_printk(KERN_INFO, ha, 3376 qla_printk(KERN_INFO, ha,
3377 "HW State: DEV_READY\n"); 3377 "HW State: DEV_READY\n");
3378 qla82xx_idc_unlock(ha); 3378 qla82xx_idc_unlock(ha);
3379 qla2x00_perform_loop_resync(vha); 3379 qla2x00_perform_loop_resync(vha);
3380 qla82xx_idc_lock(ha); 3380 qla82xx_idc_lock(ha);
3381 3381
3382 qla82xx_clear_qsnt_ready(vha); 3382 qla82xx_clear_qsnt_ready(vha);
3383 return; 3383 return;
3384 } 3384 }
3385 3385
3386 qla82xx_idc_unlock(ha); 3386 qla82xx_idc_unlock(ha);
3387 msleep(1000); 3387 msleep(1000);
3388 qla82xx_idc_lock(ha); 3388 qla82xx_idc_lock(ha);
3389 3389
3390 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3390 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3391 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3391 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3392 drv_active = drv_active << 0x01; 3392 drv_active = drv_active << 0x01;
3393 } 3393 }
3394 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3394 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3395 /* everyone acked so set the state to DEV_QUIESCENCE */ 3395 /* everyone acked so set the state to DEV_QUIESCENCE */
3396 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { 3396 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3397 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n"); 3397 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n");
3398 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); 3398 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3399 } 3399 }
3400 } 3400 }
3401 3401
3402 /* 3402 /*
3403 * qla82xx_wait_for_state_change 3403 * qla82xx_wait_for_state_change
3404 * Wait for device state to change from given current state 3404 * Wait for device state to change from given current state
3405 * 3405 *
3406 * Note: 3406 * Note:
3407 * IDC lock must not be held upon entry 3407 * IDC lock must not be held upon entry
3408 * 3408 *
3409 * Return: 3409 * Return:
3410 * Changed device state. 3410 * Changed device state.
3411 */ 3411 */
3412 uint32_t 3412 uint32_t
3413 qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) 3413 qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3414 { 3414 {
3415 struct qla_hw_data *ha = vha->hw; 3415 struct qla_hw_data *ha = vha->hw;
3416 uint32_t dev_state; 3416 uint32_t dev_state;
3417 3417
3418 do { 3418 do {
3419 msleep(1000); 3419 msleep(1000);
3420 qla82xx_idc_lock(ha); 3420 qla82xx_idc_lock(ha);
3421 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3421 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3422 qla82xx_idc_unlock(ha); 3422 qla82xx_idc_unlock(ha);
3423 } while (dev_state == curr_state); 3423 } while (dev_state == curr_state);
3424 3424
3425 return dev_state; 3425 return dev_state;
3426 } 3426 }
3427 3427
3428 static void 3428 static void
3429 qla82xx_dev_failed_handler(scsi_qla_host_t *vha) 3429 qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3430 { 3430 {
3431 struct qla_hw_data *ha = vha->hw; 3431 struct qla_hw_data *ha = vha->hw;
3432 3432
3433 /* Disable the board */ 3433 /* Disable the board */
3434 qla_printk(KERN_INFO, ha, "Disabling the board\n"); 3434 qla_printk(KERN_INFO, ha, "Disabling the board\n");
3435 3435
3436 qla82xx_idc_lock(ha); 3436 qla82xx_idc_lock(ha);
3437 qla82xx_clear_drv_active(ha); 3437 qla82xx_clear_drv_active(ha);
3438 qla82xx_idc_unlock(ha); 3438 qla82xx_idc_unlock(ha);
3439 3439
3440 /* Set DEV_FAILED flag to disable timer */ 3440 /* Set DEV_FAILED flag to disable timer */
3441 vha->device_flags |= DFLG_DEV_FAILED; 3441 vha->device_flags |= DFLG_DEV_FAILED;
3442 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3442 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3443 qla2x00_mark_all_devices_lost(vha, 0); 3443 qla2x00_mark_all_devices_lost(vha, 0);
3444 vha->flags.online = 0; 3444 vha->flags.online = 0;
3445 vha->flags.init_done = 0; 3445 vha->flags.init_done = 0;
3446 } 3446 }
3447 3447
3448 /* 3448 /*
3449 * qla82xx_need_reset_handler 3449 * qla82xx_need_reset_handler
3450 * Code to start reset sequence 3450 * Code to start reset sequence
3451 * 3451 *
3452 * Note: 3452 * Note:
3453 * IDC lock must be held upon entry 3453 * IDC lock must be held upon entry
3454 * 3454 *
3455 * Return: 3455 * Return:
3456 * Success : 0 3456 * Success : 0
3457 * Failed : 1 3457 * Failed : 1
3458 */ 3458 */
3459 static void 3459 static void
3460 qla82xx_need_reset_handler(scsi_qla_host_t *vha) 3460 qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3461 { 3461 {
3462 uint32_t dev_state, drv_state, drv_active; 3462 uint32_t dev_state, drv_state, drv_active;
3463 unsigned long reset_timeout; 3463 unsigned long reset_timeout;
3464 struct qla_hw_data *ha = vha->hw; 3464 struct qla_hw_data *ha = vha->hw;
3465 struct req_que *req = ha->req_q_map[0]; 3465 struct req_que *req = ha->req_q_map[0];
3466 3466
3467 if (vha->flags.online) { 3467 if (vha->flags.online) {
3468 qla82xx_idc_unlock(ha); 3468 qla82xx_idc_unlock(ha);
3469 qla2x00_abort_isp_cleanup(vha); 3469 qla2x00_abort_isp_cleanup(vha);
3470 ha->isp_ops->get_flash_version(vha, req->ring); 3470 ha->isp_ops->get_flash_version(vha, req->ring);
3471 ha->isp_ops->nvram_config(vha); 3471 ha->isp_ops->nvram_config(vha);
3472 qla82xx_idc_lock(ha); 3472 qla82xx_idc_lock(ha);
3473 } 3473 }
3474 3474
3475 qla82xx_set_rst_ready(ha); 3475 qla82xx_set_rst_ready(ha);
3476 3476
3477 /* wait for 10 seconds for reset ack from all functions */ 3477 /* wait for 10 seconds for reset ack from all functions */
3478 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 3478 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3479 3479
3480 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3480 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3481 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3481 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3482 3482
3483 while (drv_state != drv_active) { 3483 while (drv_state != drv_active) {
3484 if (time_after_eq(jiffies, reset_timeout)) { 3484 if (time_after_eq(jiffies, reset_timeout)) {
3485 qla_printk(KERN_INFO, ha, 3485 qla_printk(KERN_INFO, ha,
3486 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME); 3486 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
3487 break; 3487 break;
3488 } 3488 }
3489 qla82xx_idc_unlock(ha); 3489 qla82xx_idc_unlock(ha);
3490 msleep(1000); 3490 msleep(1000);
3491 qla82xx_idc_lock(ha); 3491 qla82xx_idc_lock(ha);
3492 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3492 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3493 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3493 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3494 } 3494 }
3495 3495
3496 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3496 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3497 qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, 3497 qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
3498 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3498 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3499 3499
3500 /* Force to DEV_COLD unless someone else is starting a reset */ 3500 /* Force to DEV_COLD unless someone else is starting a reset */
3501 if (dev_state != QLA82XX_DEV_INITIALIZING) { 3501 if (dev_state != QLA82XX_DEV_INITIALIZING) {
3502 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 3502 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
3503 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3503 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3504 } 3504 }
3505 } 3505 }
3506 3506
3507 int 3507 int
3508 qla82xx_check_fw_alive(scsi_qla_host_t *vha) 3508 qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3509 { 3509 {
3510 uint32_t fw_heartbeat_counter; 3510 uint32_t fw_heartbeat_counter;
3511 int status = 0; 3511 int status = 0;
3512 3512
3513 fw_heartbeat_counter = qla82xx_rd_32(vha->hw, 3513 fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
3514 QLA82XX_PEG_ALIVE_COUNTER); 3514 QLA82XX_PEG_ALIVE_COUNTER);
3515 /* all 0xff, assume AER/EEH in progress, ignore */ 3515 /* all 0xff, assume AER/EEH in progress, ignore */
3516 if (fw_heartbeat_counter == 0xffffffff) 3516 if (fw_heartbeat_counter == 0xffffffff)
3517 return status; 3517 return status;
3518 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { 3518 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3519 vha->seconds_since_last_heartbeat++; 3519 vha->seconds_since_last_heartbeat++;
3520 /* FW not alive after 2 seconds */ 3520 /* FW not alive after 2 seconds */
3521 if (vha->seconds_since_last_heartbeat == 2) { 3521 if (vha->seconds_since_last_heartbeat == 2) {
3522 vha->seconds_since_last_heartbeat = 0; 3522 vha->seconds_since_last_heartbeat = 0;
3523 status = 1; 3523 status = 1;
3524 } 3524 }
3525 } else 3525 } else
3526 vha->seconds_since_last_heartbeat = 0; 3526 vha->seconds_since_last_heartbeat = 0;
3527 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3527 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3528 return status; 3528 return status;
3529 } 3529 }
3530 3530
3531 /* 3531 /*
3532 * qla82xx_device_state_handler 3532 * qla82xx_device_state_handler
3533 * Main state handler 3533 * Main state handler
3534 * 3534 *
3535 * Note: 3535 * Note:
3536 * IDC lock must be held upon entry 3536 * IDC lock must be held upon entry
3537 * 3537 *
3538 * Return: 3538 * Return:
3539 * Success : 0 3539 * Success : 0
3540 * Failed : 1 3540 * Failed : 1
3541 */ 3541 */
3542 int 3542 int
3543 qla82xx_device_state_handler(scsi_qla_host_t *vha) 3543 qla82xx_device_state_handler(scsi_qla_host_t *vha)
3544 { 3544 {
3545 uint32_t dev_state; 3545 uint32_t dev_state;
3546 uint32_t old_dev_state;
3546 int rval = QLA_SUCCESS; 3547 int rval = QLA_SUCCESS;
3547 unsigned long dev_init_timeout; 3548 unsigned long dev_init_timeout;
3548 struct qla_hw_data *ha = vha->hw; 3549 struct qla_hw_data *ha = vha->hw;
3550 int loopcount = 0;
3549 3551
3550 qla82xx_idc_lock(ha); 3552 qla82xx_idc_lock(ha);
3551 if (!vha->flags.init_done) 3553 if (!vha->flags.init_done)
3552 qla82xx_set_drv_active(vha); 3554 qla82xx_set_drv_active(vha);
3553 3555
3554 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3556 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3557 old_dev_state = dev_state;
3555 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 3558 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
3556 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3559 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3557 3560
3558 /* wait for 30 seconds for device to go ready */ 3561 /* wait for 30 seconds for device to go ready */
3559 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3562 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3560 3563
3561 while (1) { 3564 while (1) {
3562 3565
3563 if (time_after_eq(jiffies, dev_init_timeout)) { 3566 if (time_after_eq(jiffies, dev_init_timeout)) {
3564 DEBUG(qla_printk(KERN_INFO, ha, 3567 DEBUG(qla_printk(KERN_INFO, ha,
3565 "%s: device init failed!\n", 3568 "%s: device init failed!\n",
3566 QLA2XXX_DRIVER_NAME)); 3569 QLA2XXX_DRIVER_NAME));
3567 rval = QLA_FUNCTION_FAILED; 3570 rval = QLA_FUNCTION_FAILED;
3568 break; 3571 break;
3569 } 3572 }
3570 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3573 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3571 qla_printk(KERN_INFO, ha, 3574 if (old_dev_state != dev_state) {
3572 "2:Device state is 0x%x = %s\n", dev_state, 3575 loopcount = 0;
3573 dev_state < MAX_STATES ? 3576 old_dev_state = dev_state;
3574 qdev_state[dev_state] : "Unknown"); 3577 }
3578 if (loopcount < 5) {
3579 qla_printk(KERN_INFO, ha,
3580 "2:Device state is 0x%x = %s\n", dev_state,
3581 dev_state < MAX_STATES ?
3582 qdev_state[dev_state] : "Unknown");
3583 }
3575 3584
3576 switch (dev_state) { 3585 switch (dev_state) {
3577 case QLA82XX_DEV_READY: 3586 case QLA82XX_DEV_READY:
3578 goto exit; 3587 goto exit;
3579 case QLA82XX_DEV_COLD: 3588 case QLA82XX_DEV_COLD:
3580 rval = qla82xx_device_bootstrap(vha); 3589 rval = qla82xx_device_bootstrap(vha);
3581 goto exit; 3590 goto exit;
3582 case QLA82XX_DEV_INITIALIZING: 3591 case QLA82XX_DEV_INITIALIZING:
3583 qla82xx_idc_unlock(ha); 3592 qla82xx_idc_unlock(ha);
3584 msleep(1000); 3593 msleep(1000);
3585 qla82xx_idc_lock(ha); 3594 qla82xx_idc_lock(ha);
3586 break; 3595 break;
3587 case QLA82XX_DEV_NEED_RESET: 3596 case QLA82XX_DEV_NEED_RESET:
3588 if (!ql2xdontresethba) 3597 if (!ql2xdontresethba)
3589 qla82xx_need_reset_handler(vha); 3598 qla82xx_need_reset_handler(vha);
3590 dev_init_timeout = jiffies + 3599 dev_init_timeout = jiffies +
3591 (ha->nx_dev_init_timeout * HZ); 3600 (ha->nx_dev_init_timeout * HZ);
3592 break; 3601 break;
3593 case QLA82XX_DEV_NEED_QUIESCENT: 3602 case QLA82XX_DEV_NEED_QUIESCENT:
3594 qla82xx_need_qsnt_handler(vha); 3603 qla82xx_need_qsnt_handler(vha);
3595 /* Reset timeout value after quiescence handler */ 3604 /* Reset timeout value after quiescence handler */
3596 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ 3605 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3597 * HZ); 3606 * HZ);
3598 break; 3607 break;
3599 case QLA82XX_DEV_QUIESCENT: 3608 case QLA82XX_DEV_QUIESCENT:
3600 /* Owner will exit and other will wait for the state 3609 /* Owner will exit and other will wait for the state
3601 * to get changed 3610 * to get changed
3602 */ 3611 */
3603 if (ha->flags.quiesce_owner) 3612 if (ha->flags.quiesce_owner)
3604 goto exit; 3613 goto exit;
3605 3614
3606 qla82xx_idc_unlock(ha); 3615 qla82xx_idc_unlock(ha);
3607 msleep(1000); 3616 msleep(1000);
3608 qla82xx_idc_lock(ha); 3617 qla82xx_idc_lock(ha);
3609 3618
3610 /* Reset timeout value after quiescence handler */ 3619 /* Reset timeout value after quiescence handler */
3611 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ 3620 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3612 * HZ); 3621 * HZ);
3613 break; 3622 break;
3614 case QLA82XX_DEV_FAILED: 3623 case QLA82XX_DEV_FAILED:
3615 qla82xx_dev_failed_handler(vha); 3624 qla82xx_dev_failed_handler(vha);
3616 rval = QLA_FUNCTION_FAILED; 3625 rval = QLA_FUNCTION_FAILED;
3617 goto exit; 3626 goto exit;
3618 default: 3627 default:
3619 qla82xx_idc_unlock(ha); 3628 qla82xx_idc_unlock(ha);
3620 msleep(1000); 3629 msleep(1000);
3621 qla82xx_idc_lock(ha); 3630 qla82xx_idc_lock(ha);
3622 } 3631 }
3632 loopcount++;
3623 } 3633 }
3624 exit: 3634 exit:
3625 qla82xx_idc_unlock(ha); 3635 qla82xx_idc_unlock(ha);
3626 return rval; 3636 return rval;
3627 } 3637 }
3628 3638
3629 void qla82xx_watchdog(scsi_qla_host_t *vha) 3639 void qla82xx_watchdog(scsi_qla_host_t *vha)
3630 { 3640 {
3631 uint32_t dev_state, halt_status; 3641 uint32_t dev_state, halt_status;
3632 struct qla_hw_data *ha = vha->hw; 3642 struct qla_hw_data *ha = vha->hw;
3633 3643
3634 /* don't poll if reset is going on */ 3644 /* don't poll if reset is going on */
3635 if (!ha->flags.isp82xx_reset_hdlr_active) { 3645 if (!ha->flags.isp82xx_reset_hdlr_active) {
3636 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3646 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3637 if (dev_state == QLA82XX_DEV_NEED_RESET && 3647 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3638 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3648 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3639 qla_printk(KERN_WARNING, ha, 3649 qla_printk(KERN_WARNING, ha,
3640 "%s(): Adapter reset needed!\n", __func__); 3650 "%s(): Adapter reset needed!\n", __func__);
3641 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3651 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3642 qla2xxx_wake_dpc(vha); 3652 qla2xxx_wake_dpc(vha);
3643 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3653 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3644 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3654 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3645 DEBUG(qla_printk(KERN_INFO, ha, 3655 DEBUG(qla_printk(KERN_INFO, ha,
3646 "scsi(%ld) %s - detected quiescence needed\n", 3656 "scsi(%ld) %s - detected quiescence needed\n",
3647 vha->host_no, __func__)); 3657 vha->host_no, __func__));
3648 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3658 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3649 qla2xxx_wake_dpc(vha); 3659 qla2xxx_wake_dpc(vha);
3650 } else { 3660 } else {
3651 if (qla82xx_check_fw_alive(vha)) { 3661 if (qla82xx_check_fw_alive(vha)) {
3652 halt_status = qla82xx_rd_32(ha, 3662 halt_status = qla82xx_rd_32(ha,
3653 QLA82XX_PEG_HALT_STATUS1); 3663 QLA82XX_PEG_HALT_STATUS1);
3654 qla_printk(KERN_INFO, ha, 3664 qla_printk(KERN_INFO, ha,
3655 "scsi(%ld): %s, Dumping hw/fw registers:\n " 3665 "scsi(%ld): %s, Dumping hw/fw registers:\n "
3656 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n " 3666 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n "
3657 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n " 3667 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n "
3658 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n " 3668 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n "
3659 " PEG_NET_4_PC: 0x%x\n", 3669 " PEG_NET_4_PC: 0x%x\n",
3660 vha->host_no, __func__, halt_status, 3670 vha->host_no, __func__, halt_status,
3661 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), 3671 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3662 qla82xx_rd_32(ha, 3672 qla82xx_rd_32(ha,
3663 QLA82XX_CRB_PEG_NET_0 + 0x3c), 3673 QLA82XX_CRB_PEG_NET_0 + 0x3c),
3664 qla82xx_rd_32(ha, 3674 qla82xx_rd_32(ha,
3665 QLA82XX_CRB_PEG_NET_1 + 0x3c), 3675 QLA82XX_CRB_PEG_NET_1 + 0x3c),
3666 qla82xx_rd_32(ha, 3676 qla82xx_rd_32(ha,
3667 QLA82XX_CRB_PEG_NET_2 + 0x3c), 3677 QLA82XX_CRB_PEG_NET_2 + 0x3c),
3668 qla82xx_rd_32(ha, 3678 qla82xx_rd_32(ha,
3669 QLA82XX_CRB_PEG_NET_3 + 0x3c), 3679 QLA82XX_CRB_PEG_NET_3 + 0x3c),
3670 qla82xx_rd_32(ha, 3680 qla82xx_rd_32(ha,
3671 QLA82XX_CRB_PEG_NET_4 + 0x3c)); 3681 QLA82XX_CRB_PEG_NET_4 + 0x3c));
3672 if (halt_status & HALT_STATUS_UNRECOVERABLE) { 3682 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3673 set_bit(ISP_UNRECOVERABLE, 3683 set_bit(ISP_UNRECOVERABLE,
3674 &vha->dpc_flags); 3684 &vha->dpc_flags);
3675 } else { 3685 } else {
3676 qla_printk(KERN_INFO, ha, 3686 qla_printk(KERN_INFO, ha,
3677 "scsi(%ld): %s - detect abort needed\n", 3687 "scsi(%ld): %s - detect abort needed\n",
3678 vha->host_no, __func__); 3688 vha->host_no, __func__);
3679 set_bit(ISP_ABORT_NEEDED, 3689 set_bit(ISP_ABORT_NEEDED,
3680 &vha->dpc_flags); 3690 &vha->dpc_flags);
3681 } 3691 }
3682 qla2xxx_wake_dpc(vha); 3692 qla2xxx_wake_dpc(vha);
3683 ha->flags.isp82xx_fw_hung = 1; 3693 ha->flags.isp82xx_fw_hung = 1;
3684 if (ha->flags.mbox_busy) { 3694 if (ha->flags.mbox_busy) {
3685 ha->flags.mbox_int = 1; 3695 ha->flags.mbox_int = 1;
3686 DEBUG2(qla_printk(KERN_ERR, ha, 3696 DEBUG2(qla_printk(KERN_ERR, ha,
3687 "Due to fw hung, doing premature " 3697 "Due to fw hung, doing premature "
3688 "completion of mbx command\n")); 3698 "completion of mbx command\n"));
3689 if (test_bit(MBX_INTR_WAIT, 3699 if (test_bit(MBX_INTR_WAIT,
3690 &ha->mbx_cmd_flags)) 3700 &ha->mbx_cmd_flags))
3691 complete(&ha->mbx_intr_comp); 3701 complete(&ha->mbx_intr_comp);
3692 } 3702 }
3693 } 3703 }
3694 } 3704 }
3695 } 3705 }
3696 } 3706 }
3697 3707
3698 int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3708 int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3699 { 3709 {
3700 int rval; 3710 int rval;
3701 rval = qla82xx_device_state_handler(vha); 3711 rval = qla82xx_device_state_handler(vha);
3702 return rval; 3712 return rval;
3703 } 3713 }
3704 3714
3705 /* 3715 /*
3706 * qla82xx_abort_isp 3716 * qla82xx_abort_isp
3707 * Resets ISP and aborts all outstanding commands. 3717 * Resets ISP and aborts all outstanding commands.
3708 * 3718 *
3709 * Input: 3719 * Input:
3710 * ha = adapter block pointer. 3720 * ha = adapter block pointer.
3711 * 3721 *
3712 * Returns: 3722 * Returns:
3713 * 0 = success 3723 * 0 = success
3714 */ 3724 */
3715 int 3725 int
3716 qla82xx_abort_isp(scsi_qla_host_t *vha) 3726 qla82xx_abort_isp(scsi_qla_host_t *vha)
3717 { 3727 {
3718 int rval; 3728 int rval;
3719 struct qla_hw_data *ha = vha->hw; 3729 struct qla_hw_data *ha = vha->hw;
3720 uint32_t dev_state; 3730 uint32_t dev_state;
3721 3731
3722 if (vha->device_flags & DFLG_DEV_FAILED) { 3732 if (vha->device_flags & DFLG_DEV_FAILED) {
3723 qla_printk(KERN_WARNING, ha, 3733 qla_printk(KERN_WARNING, ha,
3724 "%s(%ld): Device in failed state, " 3734 "%s(%ld): Device in failed state, "
3725 "Exiting.\n", __func__, vha->host_no); 3735 "Exiting.\n", __func__, vha->host_no);
3726 return QLA_SUCCESS; 3736 return QLA_SUCCESS;
3727 } 3737 }
3728 ha->flags.isp82xx_reset_hdlr_active = 1; 3738 ha->flags.isp82xx_reset_hdlr_active = 1;
3729 3739
3730 qla82xx_idc_lock(ha); 3740 qla82xx_idc_lock(ha);
3731 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3741 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3732 if (dev_state == QLA82XX_DEV_READY) { 3742 if (dev_state == QLA82XX_DEV_READY) {
3733 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 3743 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
3734 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3744 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3735 QLA82XX_DEV_NEED_RESET); 3745 QLA82XX_DEV_NEED_RESET);
3736 } else 3746 } else
3737 qla_printk(KERN_INFO, ha, "HW State: %s\n", 3747 qla_printk(KERN_INFO, ha, "HW State: %s\n",
3738 dev_state < MAX_STATES ? 3748 dev_state < MAX_STATES ?
3739 qdev_state[dev_state] : "Unknown"); 3749 qdev_state[dev_state] : "Unknown");
3740 qla82xx_idc_unlock(ha); 3750 qla82xx_idc_unlock(ha);
3741 3751
3742 rval = qla82xx_device_state_handler(vha); 3752 rval = qla82xx_device_state_handler(vha);
3743 3753
3744 qla82xx_idc_lock(ha); 3754 qla82xx_idc_lock(ha);
3745 qla82xx_clear_rst_ready(ha); 3755 qla82xx_clear_rst_ready(ha);
3746 qla82xx_idc_unlock(ha); 3756 qla82xx_idc_unlock(ha);
3747 3757
3748 if (rval == QLA_SUCCESS) { 3758 if (rval == QLA_SUCCESS) {
3749 ha->flags.isp82xx_fw_hung = 0; 3759 ha->flags.isp82xx_fw_hung = 0;
3750 ha->flags.isp82xx_reset_hdlr_active = 0; 3760 ha->flags.isp82xx_reset_hdlr_active = 0;
3751 qla82xx_restart_isp(vha); 3761 qla82xx_restart_isp(vha);
3752 } 3762 }
3753 3763
3754 if (rval) { 3764 if (rval) {
3755 vha->flags.online = 1; 3765 vha->flags.online = 1;
3756 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 3766 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3757 if (ha->isp_abort_cnt == 0) { 3767 if (ha->isp_abort_cnt == 0) {
3758 qla_printk(KERN_WARNING, ha, 3768 qla_printk(KERN_WARNING, ha,
3759 "ISP error recovery failed - " 3769 "ISP error recovery failed - "
3760 "board disabled\n"); 3770 "board disabled\n");
3761 /* 3771 /*
3762 * The next call disables the board 3772 * The next call disables the board
3763 * completely. 3773 * completely.
3764 */ 3774 */
3765 ha->isp_ops->reset_adapter(vha); 3775 ha->isp_ops->reset_adapter(vha);
3766 vha->flags.online = 0; 3776 vha->flags.online = 0;
3767 clear_bit(ISP_ABORT_RETRY, 3777 clear_bit(ISP_ABORT_RETRY,
3768 &vha->dpc_flags); 3778 &vha->dpc_flags);
3769 rval = QLA_SUCCESS; 3779 rval = QLA_SUCCESS;
3770 } else { /* schedule another ISP abort */ 3780 } else { /* schedule another ISP abort */
3771 ha->isp_abort_cnt--; 3781 ha->isp_abort_cnt--;
3772 DEBUG(qla_printk(KERN_INFO, ha, 3782 DEBUG(qla_printk(KERN_INFO, ha,
3773 "qla%ld: ISP abort - retry remaining %d\n", 3783 "qla%ld: ISP abort - retry remaining %d\n",
3774 vha->host_no, ha->isp_abort_cnt)); 3784 vha->host_no, ha->isp_abort_cnt));
3775 rval = QLA_FUNCTION_FAILED; 3785 rval = QLA_FUNCTION_FAILED;
3776 } 3786 }
3777 } else { 3787 } else {
3778 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3788 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3779 DEBUG(qla_printk(KERN_INFO, ha, 3789 DEBUG(qla_printk(KERN_INFO, ha,
3780 "(%ld): ISP error recovery - retrying (%d) " 3790 "(%ld): ISP error recovery - retrying (%d) "
3781 "more times\n", vha->host_no, ha->isp_abort_cnt)); 3791 "more times\n", vha->host_no, ha->isp_abort_cnt));
3782 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3792 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3783 rval = QLA_FUNCTION_FAILED; 3793 rval = QLA_FUNCTION_FAILED;
3784 } 3794 }
3785 } 3795 }
3786 return rval; 3796 return rval;
3787 } 3797 }
3788 3798
3789 /* 3799 /*
3790 * qla82xx_fcoe_ctx_reset 3800 * qla82xx_fcoe_ctx_reset
3791 * Perform a quick reset and aborts all outstanding commands. 3801 * Perform a quick reset and aborts all outstanding commands.
3792 * This will only perform an FCoE context reset and avoids a full blown 3802 * This will only perform an FCoE context reset and avoids a full blown
3793 * chip reset. 3803 * chip reset.
3794 * 3804 *
3795 * Input: 3805 * Input:
3796 * ha = adapter block pointer. 3806 * ha = adapter block pointer.
3797 * is_reset_path = flag for identifying the reset path. 3807 * is_reset_path = flag for identifying the reset path.
3798 * 3808 *
3799 * Returns: 3809 * Returns:
3800 * 0 = success 3810 * 0 = success
3801 */ 3811 */
3802 int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha) 3812 int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
3803 { 3813 {
3804 int rval = QLA_FUNCTION_FAILED; 3814 int rval = QLA_FUNCTION_FAILED;
3805 3815
3806 if (vha->flags.online) { 3816 if (vha->flags.online) {
3807 /* Abort all outstanding commands, so as to be requeued later */ 3817 /* Abort all outstanding commands, so as to be requeued later */
3808 qla2x00_abort_isp_cleanup(vha); 3818 qla2x00_abort_isp_cleanup(vha);
3809 } 3819 }
3810 3820
3811 /* Stop currently executing firmware. 3821 /* Stop currently executing firmware.
3812 * This will destroy existing FCoE context at the F/W end. 3822 * This will destroy existing FCoE context at the F/W end.
3813 */ 3823 */
3814 qla2x00_try_to_stop_firmware(vha); 3824 qla2x00_try_to_stop_firmware(vha);
3815 3825
3816 /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */ 3826 /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
3817 rval = qla82xx_restart_isp(vha); 3827 rval = qla82xx_restart_isp(vha);
3818 3828
3819 return rval; 3829 return rval;
3820 } 3830 }
3821 3831
3822 /* 3832 /*
3823 * qla2x00_wait_for_fcoe_ctx_reset 3833 * qla2x00_wait_for_fcoe_ctx_reset
3824 * Wait till the FCoE context is reset. 3834 * Wait till the FCoE context is reset.
3825 * 3835 *
3826 * Note: 3836 * Note:
3827 * Does context switching here. 3837 * Does context switching here.
3828 * Release SPIN_LOCK (if any) before calling this routine. 3838 * Release SPIN_LOCK (if any) before calling this routine.
3829 * 3839 *
3830 * Return: 3840 * Return:
3831 * Success (fcoe_ctx reset is done) : 0 3841 * Success (fcoe_ctx reset is done) : 0
3832 * Failed (fcoe_ctx reset not completed within max loop timout ) : 1 3842 * Failed (fcoe_ctx reset not completed within max loop timout ) : 1
3833 */ 3843 */
3834 int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) 3844 int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3835 { 3845 {
3836 int status = QLA_FUNCTION_FAILED; 3846 int status = QLA_FUNCTION_FAILED;
3837 unsigned long wait_reset; 3847 unsigned long wait_reset;
3838 3848
3839 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 3849 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
3840 while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 3850 while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3841 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 3851 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3842 && time_before(jiffies, wait_reset)) { 3852 && time_before(jiffies, wait_reset)) {
3843 3853
3844 set_current_state(TASK_UNINTERRUPTIBLE); 3854 set_current_state(TASK_UNINTERRUPTIBLE);
3845 schedule_timeout(HZ); 3855 schedule_timeout(HZ);
3846 3856
3847 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) && 3857 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
3848 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 3858 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3849 status = QLA_SUCCESS; 3859 status = QLA_SUCCESS;
3850 break; 3860 break;
3851 } 3861 }
3852 } 3862 }
3853 DEBUG2(printk(KERN_INFO 3863 DEBUG2(printk(KERN_INFO
3854 "%s status=%d\n", __func__, status)); 3864 "%s status=%d\n", __func__, status));
3855 3865
3856 return status; 3866 return status;
3857 } 3867 }
3858 3868
3859 void 3869 void
3860 qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) 3870 qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3861 { 3871 {
3862 int i; 3872 int i;
3863 unsigned long flags; 3873 unsigned long flags;
3864 struct qla_hw_data *ha = vha->hw; 3874 struct qla_hw_data *ha = vha->hw;
3865 3875
3866 /* Check if 82XX firmware is alive or not 3876 /* Check if 82XX firmware is alive or not
3867 * We may have arrived here from NEED_RESET 3877 * We may have arrived here from NEED_RESET
3868 * detection only 3878 * detection only
3869 */ 3879 */
3870 if (!ha->flags.isp82xx_fw_hung) { 3880 if (!ha->flags.isp82xx_fw_hung) {
3871 for (i = 0; i < 2; i++) { 3881 for (i = 0; i < 2; i++) {
3872 msleep(1000); 3882 msleep(1000);
3873 if (qla82xx_check_fw_alive(vha)) { 3883 if (qla82xx_check_fw_alive(vha)) {
3874 ha->flags.isp82xx_fw_hung = 1; 3884 ha->flags.isp82xx_fw_hung = 1;
3875 if (ha->flags.mbox_busy) { 3885 if (ha->flags.mbox_busy) {
3876 ha->flags.mbox_int = 1; 3886 ha->flags.mbox_int = 1;
3877 complete(&ha->mbx_intr_comp); 3887 complete(&ha->mbx_intr_comp);
3878 } 3888 }
3879 break; 3889 break;
3880 } 3890 }
3881 } 3891 }
3882 } 3892 }
3883 3893
3884 /* Abort all commands gracefully if fw NOT hung */ 3894 /* Abort all commands gracefully if fw NOT hung */
3885 if (!ha->flags.isp82xx_fw_hung) { 3895 if (!ha->flags.isp82xx_fw_hung) {
3886 int cnt, que; 3896 int cnt, que;
3887 srb_t *sp; 3897 srb_t *sp;
3888 struct req_que *req; 3898 struct req_que *req;
3889 3899
3890 spin_lock_irqsave(&ha->hardware_lock, flags); 3900 spin_lock_irqsave(&ha->hardware_lock, flags);
3891 for (que = 0; que < ha->max_req_queues; que++) { 3901 for (que = 0; que < ha->max_req_queues; que++) {
3892 req = ha->req_q_map[que]; 3902 req = ha->req_q_map[que];
3893 if (!req) 3903 if (!req)
3894 continue; 3904 continue;
3895 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 3905 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3896 sp = req->outstanding_cmds[cnt]; 3906 sp = req->outstanding_cmds[cnt];
3897 if (sp) { 3907 if (sp) {
3898 if (!sp->ctx || 3908 if (!sp->ctx ||
3899 (sp->flags & SRB_FCP_CMND_DMA_VALID)) { 3909 (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
3900 spin_unlock_irqrestore( 3910 spin_unlock_irqrestore(
3901 &ha->hardware_lock, flags); 3911 &ha->hardware_lock, flags);
3902 if (ha->isp_ops->abort_command(sp)) { 3912 if (ha->isp_ops->abort_command(sp)) {
3903 qla_printk(KERN_INFO, ha, 3913 qla_printk(KERN_INFO, ha,
3904 "scsi(%ld): mbx abort command failed in %s\n", 3914 "scsi(%ld): mbx abort command failed in %s\n",
3905 vha->host_no, __func__); 3915 vha->host_no, __func__);
3906 } else { 3916 } else {
3907 qla_printk(KERN_INFO, ha, 3917 qla_printk(KERN_INFO, ha,
3908 "scsi(%ld): mbx abort command success in %s\n", 3918 "scsi(%ld): mbx abort command success in %s\n",
3909 vha->host_no, __func__); 3919 vha->host_no, __func__);
3910 } 3920 }
3911 spin_lock_irqsave(&ha->hardware_lock, flags); 3921 spin_lock_irqsave(&ha->hardware_lock, flags);
3912 } 3922 }
3913 } 3923 }
3914 } 3924 }
3915 } 3925 }
3916 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3926 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3917 3927
3918 /* Wait for pending cmds (physical and virtual) to complete */ 3928 /* Wait for pending cmds (physical and virtual) to complete */
3919 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, 3929 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3920 WAIT_HOST) == QLA_SUCCESS) { 3930 WAIT_HOST) == QLA_SUCCESS) {
3921 DEBUG2(qla_printk(KERN_INFO, ha, 3931 DEBUG2(qla_printk(KERN_INFO, ha,
3922 "Done wait for pending commands\n")); 3932 "Done wait for pending commands\n"));
3923 } 3933 }
3924 } 3934 }
3925 } 3935 }
3926 3936