Commit 1b427a33b06ae76c64c1ad9af899a45b682ba6c6

Authored by Bartlomiej Zolnierkiewicz
1 parent fc99856a45

sgiioc4: fixup message on resource allocation failure

There can be more than one sgiioc4 card in the system so print
also PCI device name on resource allocation failure (so we know
which one is the problematic one).

Reported-by: Jeremy Higdon <jeremy@sgi.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>

Showing 1 changed file with 2 additions and 2 deletions Inline Diff

drivers/ide/pci/sgiioc4.c
1 /* 1 /*
2 * Copyright (c) 2003-2006 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License 5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation. 6 * as published by the Free Software Foundation.
7 * 7 *
8 * This program is distributed in the hope that it would be useful, but 8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of 9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 * 11 *
12 * You should have received a copy of the GNU General Public 12 * You should have received a copy of the GNU General Public
13 * License along with this program; if not, write the Free Software 13 * License along with this program; if not, write the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
15 * 15 *
16 * For further information regarding this notice, see: 16 * For further information regarding this notice, see:
17 * 17 *
18 * http://oss.sgi.com/projects/GenInfo/NoticeExplan 18 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
19 */ 19 */
20 20
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/types.h> 22 #include <linux/types.h>
23 #include <linux/pci.h> 23 #include <linux/pci.h>
24 #include <linux/delay.h> 24 #include <linux/delay.h>
25 #include <linux/hdreg.h> 25 #include <linux/hdreg.h>
26 #include <linux/init.h> 26 #include <linux/init.h>
27 #include <linux/kernel.h> 27 #include <linux/kernel.h>
28 #include <linux/ioport.h> 28 #include <linux/ioport.h>
29 #include <linux/blkdev.h> 29 #include <linux/blkdev.h>
30 #include <linux/scatterlist.h> 30 #include <linux/scatterlist.h>
31 #include <linux/ioc4.h> 31 #include <linux/ioc4.h>
32 #include <asm/io.h> 32 #include <asm/io.h>
33 33
34 #include <linux/ide.h> 34 #include <linux/ide.h>
35 35
36 #define DRV_NAME "SGIIOC4" 36 #define DRV_NAME "SGIIOC4"
37 37
38 /* IOC4 Specific Definitions */ 38 /* IOC4 Specific Definitions */
39 #define IOC4_CMD_OFFSET 0x100 39 #define IOC4_CMD_OFFSET 0x100
40 #define IOC4_CTRL_OFFSET 0x120 40 #define IOC4_CTRL_OFFSET 0x120
41 #define IOC4_DMA_OFFSET 0x140 41 #define IOC4_DMA_OFFSET 0x140
42 #define IOC4_INTR_OFFSET 0x0 42 #define IOC4_INTR_OFFSET 0x0
43 43
44 #define IOC4_TIMING 0x00 44 #define IOC4_TIMING 0x00
45 #define IOC4_DMA_PTR_L 0x01 45 #define IOC4_DMA_PTR_L 0x01
46 #define IOC4_DMA_PTR_H 0x02 46 #define IOC4_DMA_PTR_H 0x02
47 #define IOC4_DMA_ADDR_L 0x03 47 #define IOC4_DMA_ADDR_L 0x03
48 #define IOC4_DMA_ADDR_H 0x04 48 #define IOC4_DMA_ADDR_H 0x04
49 #define IOC4_BC_DEV 0x05 49 #define IOC4_BC_DEV 0x05
50 #define IOC4_BC_MEM 0x06 50 #define IOC4_BC_MEM 0x06
51 #define IOC4_DMA_CTRL 0x07 51 #define IOC4_DMA_CTRL 0x07
52 #define IOC4_DMA_END_ADDR 0x08 52 #define IOC4_DMA_END_ADDR 0x08
53 53
54 /* Bits in the IOC4 Control/Status Register */ 54 /* Bits in the IOC4 Control/Status Register */
55 #define IOC4_S_DMA_START 0x01 55 #define IOC4_S_DMA_START 0x01
56 #define IOC4_S_DMA_STOP 0x02 56 #define IOC4_S_DMA_STOP 0x02
57 #define IOC4_S_DMA_DIR 0x04 57 #define IOC4_S_DMA_DIR 0x04
58 #define IOC4_S_DMA_ACTIVE 0x08 58 #define IOC4_S_DMA_ACTIVE 0x08
59 #define IOC4_S_DMA_ERROR 0x10 59 #define IOC4_S_DMA_ERROR 0x10
60 #define IOC4_ATA_MEMERR 0x02 60 #define IOC4_ATA_MEMERR 0x02
61 61
62 /* Read/Write Directions */ 62 /* Read/Write Directions */
63 #define IOC4_DMA_WRITE 0x04 63 #define IOC4_DMA_WRITE 0x04
64 #define IOC4_DMA_READ 0x00 64 #define IOC4_DMA_READ 0x00
65 65
66 /* Interrupt Register Offsets */ 66 /* Interrupt Register Offsets */
67 #define IOC4_INTR_REG 0x03 67 #define IOC4_INTR_REG 0x03
68 #define IOC4_INTR_SET 0x05 68 #define IOC4_INTR_SET 0x05
69 #define IOC4_INTR_CLEAR 0x07 69 #define IOC4_INTR_CLEAR 0x07
70 70
71 #define IOC4_IDE_CACHELINE_SIZE 128 71 #define IOC4_IDE_CACHELINE_SIZE 128
72 #define IOC4_CMD_CTL_BLK_SIZE 0x20 72 #define IOC4_CMD_CTL_BLK_SIZE 0x20
73 #define IOC4_SUPPORTED_FIRMWARE_REV 46 73 #define IOC4_SUPPORTED_FIRMWARE_REV 46
74 74
75 typedef struct { 75 typedef struct {
76 u32 timing_reg0; 76 u32 timing_reg0;
77 u32 timing_reg1; 77 u32 timing_reg1;
78 u32 low_mem_ptr; 78 u32 low_mem_ptr;
79 u32 high_mem_ptr; 79 u32 high_mem_ptr;
80 u32 low_mem_addr; 80 u32 low_mem_addr;
81 u32 high_mem_addr; 81 u32 high_mem_addr;
82 u32 dev_byte_count; 82 u32 dev_byte_count;
83 u32 mem_byte_count; 83 u32 mem_byte_count;
84 u32 status; 84 u32 status;
85 } ioc4_dma_regs_t; 85 } ioc4_dma_regs_t;
86 86
87 /* Each Physical Region Descriptor Entry size is 16 bytes (2 * 64 bits) */ 87 /* Each Physical Region Descriptor Entry size is 16 bytes (2 * 64 bits) */
88 /* IOC4 has only 1 IDE channel */ 88 /* IOC4 has only 1 IDE channel */
89 #define IOC4_PRD_BYTES 16 89 #define IOC4_PRD_BYTES 16
90 #define IOC4_PRD_ENTRIES (PAGE_SIZE /(4*IOC4_PRD_BYTES)) 90 #define IOC4_PRD_ENTRIES (PAGE_SIZE /(4*IOC4_PRD_BYTES))
91 91
92 92
93 static void 93 static void
94 sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port, 94 sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port,
95 unsigned long ctrl_port, unsigned long irq_port) 95 unsigned long ctrl_port, unsigned long irq_port)
96 { 96 {
97 unsigned long reg = data_port; 97 unsigned long reg = data_port;
98 int i; 98 int i;
99 99
100 /* Registers are word (32 bit) aligned */ 100 /* Registers are word (32 bit) aligned */
101 for (i = 0; i <= 7; i++) 101 for (i = 0; i <= 7; i++)
102 hw->io_ports_array[i] = reg + i * 4; 102 hw->io_ports_array[i] = reg + i * 4;
103 103
104 if (ctrl_port) 104 if (ctrl_port)
105 hw->io_ports.ctl_addr = ctrl_port; 105 hw->io_ports.ctl_addr = ctrl_port;
106 106
107 if (irq_port) 107 if (irq_port)
108 hw->io_ports.irq_addr = irq_port; 108 hw->io_ports.irq_addr = irq_port;
109 } 109 }
110 110
111 static void 111 static void
112 sgiioc4_maskproc(ide_drive_t * drive, int mask) 112 sgiioc4_maskproc(ide_drive_t * drive, int mask)
113 { 113 {
114 writeb(ATA_DEVCTL_OBS | (mask ? 2 : 0), 114 writeb(ATA_DEVCTL_OBS | (mask ? 2 : 0),
115 (void __iomem *)drive->hwif->io_ports.ctl_addr); 115 (void __iomem *)drive->hwif->io_ports.ctl_addr);
116 } 116 }
117 117
118 static int 118 static int
119 sgiioc4_checkirq(ide_hwif_t * hwif) 119 sgiioc4_checkirq(ide_hwif_t * hwif)
120 { 120 {
121 unsigned long intr_addr = 121 unsigned long intr_addr =
122 hwif->io_ports.irq_addr + IOC4_INTR_REG * 4; 122 hwif->io_ports.irq_addr + IOC4_INTR_REG * 4;
123 123
124 if ((u8)readl((void __iomem *)intr_addr) & 0x03) 124 if ((u8)readl((void __iomem *)intr_addr) & 0x03)
125 return 1; 125 return 1;
126 126
127 return 0; 127 return 0;
128 } 128 }
129 129
130 static u8 sgiioc4_read_status(ide_hwif_t *); 130 static u8 sgiioc4_read_status(ide_hwif_t *);
131 131
132 static int 132 static int
133 sgiioc4_clearirq(ide_drive_t * drive) 133 sgiioc4_clearirq(ide_drive_t * drive)
134 { 134 {
135 u32 intr_reg; 135 u32 intr_reg;
136 ide_hwif_t *hwif = HWIF(drive); 136 ide_hwif_t *hwif = HWIF(drive);
137 struct ide_io_ports *io_ports = &hwif->io_ports; 137 struct ide_io_ports *io_ports = &hwif->io_ports;
138 unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2); 138 unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2);
139 139
140 /* Code to check for PCI error conditions */ 140 /* Code to check for PCI error conditions */
141 intr_reg = readl((void __iomem *)other_ir); 141 intr_reg = readl((void __iomem *)other_ir);
142 if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */ 142 if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */
143 /* 143 /*
144 * Using sgiioc4_read_status to read the Status register has a 144 * Using sgiioc4_read_status to read the Status register has a
145 * side effect of clearing the interrupt. The first read should 145 * side effect of clearing the interrupt. The first read should
146 * clear it if it is set. The second read should return 146 * clear it if it is set. The second read should return
147 * a "clear" status if it got cleared. If not, then spin 147 * a "clear" status if it got cleared. If not, then spin
148 * for a bit trying to clear it. 148 * for a bit trying to clear it.
149 */ 149 */
150 u8 stat = sgiioc4_read_status(hwif); 150 u8 stat = sgiioc4_read_status(hwif);
151 int count = 0; 151 int count = 0;
152 152
153 stat = sgiioc4_read_status(hwif); 153 stat = sgiioc4_read_status(hwif);
154 while ((stat & 0x80) && (count++ < 100)) { 154 while ((stat & 0x80) && (count++ < 100)) {
155 udelay(1); 155 udelay(1);
156 stat = sgiioc4_read_status(hwif); 156 stat = sgiioc4_read_status(hwif);
157 } 157 }
158 158
159 if (intr_reg & 0x02) { 159 if (intr_reg & 0x02) {
160 struct pci_dev *dev = to_pci_dev(hwif->dev); 160 struct pci_dev *dev = to_pci_dev(hwif->dev);
161 /* Error when transferring DMA data on PCI bus */ 161 /* Error when transferring DMA data on PCI bus */
162 u32 pci_err_addr_low, pci_err_addr_high, 162 u32 pci_err_addr_low, pci_err_addr_high,
163 pci_stat_cmd_reg; 163 pci_stat_cmd_reg;
164 164
165 pci_err_addr_low = 165 pci_err_addr_low =
166 readl((void __iomem *)io_ports->irq_addr); 166 readl((void __iomem *)io_ports->irq_addr);
167 pci_err_addr_high = 167 pci_err_addr_high =
168 readl((void __iomem *)(io_ports->irq_addr + 4)); 168 readl((void __iomem *)(io_ports->irq_addr + 4));
169 pci_read_config_dword(dev, PCI_COMMAND, 169 pci_read_config_dword(dev, PCI_COMMAND,
170 &pci_stat_cmd_reg); 170 &pci_stat_cmd_reg);
171 printk(KERN_ERR 171 printk(KERN_ERR
172 "%s(%s) : PCI Bus Error when doing DMA:" 172 "%s(%s) : PCI Bus Error when doing DMA:"
173 " status-cmd reg is 0x%x\n", 173 " status-cmd reg is 0x%x\n",
174 __func__, drive->name, pci_stat_cmd_reg); 174 __func__, drive->name, pci_stat_cmd_reg);
175 printk(KERN_ERR 175 printk(KERN_ERR
176 "%s(%s) : PCI Error Address is 0x%x%x\n", 176 "%s(%s) : PCI Error Address is 0x%x%x\n",
177 __func__, drive->name, 177 __func__, drive->name,
178 pci_err_addr_high, pci_err_addr_low); 178 pci_err_addr_high, pci_err_addr_low);
179 /* Clear the PCI Error indicator */ 179 /* Clear the PCI Error indicator */
180 pci_write_config_dword(dev, PCI_COMMAND, 0x00000146); 180 pci_write_config_dword(dev, PCI_COMMAND, 0x00000146);
181 } 181 }
182 182
183 /* Clear the Interrupt, Error bits on the IOC4 */ 183 /* Clear the Interrupt, Error bits on the IOC4 */
184 writel(0x03, (void __iomem *)other_ir); 184 writel(0x03, (void __iomem *)other_ir);
185 185
186 intr_reg = readl((void __iomem *)other_ir); 186 intr_reg = readl((void __iomem *)other_ir);
187 } 187 }
188 188
189 return intr_reg & 3; 189 return intr_reg & 3;
190 } 190 }
191 191
192 static void sgiioc4_dma_start(ide_drive_t *drive) 192 static void sgiioc4_dma_start(ide_drive_t *drive)
193 { 193 {
194 ide_hwif_t *hwif = HWIF(drive); 194 ide_hwif_t *hwif = HWIF(drive);
195 unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4; 195 unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4;
196 unsigned int reg = readl((void __iomem *)ioc4_dma_addr); 196 unsigned int reg = readl((void __iomem *)ioc4_dma_addr);
197 unsigned int temp_reg = reg | IOC4_S_DMA_START; 197 unsigned int temp_reg = reg | IOC4_S_DMA_START;
198 198
199 writel(temp_reg, (void __iomem *)ioc4_dma_addr); 199 writel(temp_reg, (void __iomem *)ioc4_dma_addr);
200 } 200 }
201 201
202 static u32 202 static u32
203 sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base) 203 sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base)
204 { 204 {
205 unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4; 205 unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4;
206 u32 ioc4_dma; 206 u32 ioc4_dma;
207 int count; 207 int count;
208 208
209 count = 0; 209 count = 0;
210 ioc4_dma = readl((void __iomem *)ioc4_dma_addr); 210 ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
211 while ((ioc4_dma & IOC4_S_DMA_STOP) && (count++ < 200)) { 211 while ((ioc4_dma & IOC4_S_DMA_STOP) && (count++ < 200)) {
212 udelay(1); 212 udelay(1);
213 ioc4_dma = readl((void __iomem *)ioc4_dma_addr); 213 ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
214 } 214 }
215 return ioc4_dma; 215 return ioc4_dma;
216 } 216 }
217 217
218 /* Stops the IOC4 DMA Engine */ 218 /* Stops the IOC4 DMA Engine */
219 static int sgiioc4_dma_end(ide_drive_t *drive) 219 static int sgiioc4_dma_end(ide_drive_t *drive)
220 { 220 {
221 u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0; 221 u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0;
222 ide_hwif_t *hwif = HWIF(drive); 222 ide_hwif_t *hwif = HWIF(drive);
223 unsigned long dma_base = hwif->dma_base; 223 unsigned long dma_base = hwif->dma_base;
224 int dma_stat = 0; 224 int dma_stat = 0;
225 unsigned long *ending_dma = ide_get_hwifdata(hwif); 225 unsigned long *ending_dma = ide_get_hwifdata(hwif);
226 226
227 writel(IOC4_S_DMA_STOP, (void __iomem *)(dma_base + IOC4_DMA_CTRL * 4)); 227 writel(IOC4_S_DMA_STOP, (void __iomem *)(dma_base + IOC4_DMA_CTRL * 4));
228 228
229 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); 229 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
230 230
231 if (ioc4_dma & IOC4_S_DMA_STOP) { 231 if (ioc4_dma & IOC4_S_DMA_STOP) {
232 printk(KERN_ERR 232 printk(KERN_ERR
233 "%s(%s): IOC4 DMA STOP bit is still 1 :" 233 "%s(%s): IOC4 DMA STOP bit is still 1 :"
234 "ioc4_dma_reg 0x%x\n", 234 "ioc4_dma_reg 0x%x\n",
235 __func__, drive->name, ioc4_dma); 235 __func__, drive->name, ioc4_dma);
236 dma_stat = 1; 236 dma_stat = 1;
237 } 237 }
238 238
239 /* 239 /*
240 * The IOC4 will DMA 1's to the ending dma area to indicate that 240 * The IOC4 will DMA 1's to the ending dma area to indicate that
241 * previous data DMA is complete. This is necessary because of relaxed 241 * previous data DMA is complete. This is necessary because of relaxed
242 * ordering between register reads and DMA writes on the Altix. 242 * ordering between register reads and DMA writes on the Altix.
243 */ 243 */
244 while ((cnt++ < 200) && (!valid)) { 244 while ((cnt++ < 200) && (!valid)) {
245 for (num = 0; num < 16; num++) { 245 for (num = 0; num < 16; num++) {
246 if (ending_dma[num]) { 246 if (ending_dma[num]) {
247 valid = 1; 247 valid = 1;
248 break; 248 break;
249 } 249 }
250 } 250 }
251 udelay(1); 251 udelay(1);
252 } 252 }
253 if (!valid) { 253 if (!valid) {
254 printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__, 254 printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__,
255 drive->name); 255 drive->name);
256 dma_stat = 1; 256 dma_stat = 1;
257 } 257 }
258 258
259 bc_dev = readl((void __iomem *)(dma_base + IOC4_BC_DEV * 4)); 259 bc_dev = readl((void __iomem *)(dma_base + IOC4_BC_DEV * 4));
260 bc_mem = readl((void __iomem *)(dma_base + IOC4_BC_MEM * 4)); 260 bc_mem = readl((void __iomem *)(dma_base + IOC4_BC_MEM * 4));
261 261
262 if ((bc_dev & 0x01FF) || (bc_mem & 0x1FF)) { 262 if ((bc_dev & 0x01FF) || (bc_mem & 0x1FF)) {
263 if (bc_dev > bc_mem + 8) { 263 if (bc_dev > bc_mem + 8) {
264 printk(KERN_ERR 264 printk(KERN_ERR
265 "%s(%s): WARNING!! byte_count_dev %d " 265 "%s(%s): WARNING!! byte_count_dev %d "
266 "!= byte_count_mem %d\n", 266 "!= byte_count_mem %d\n",
267 __func__, drive->name, bc_dev, bc_mem); 267 __func__, drive->name, bc_dev, bc_mem);
268 } 268 }
269 } 269 }
270 270
271 drive->waiting_for_dma = 0; 271 drive->waiting_for_dma = 0;
272 ide_destroy_dmatable(drive); 272 ide_destroy_dmatable(drive);
273 273
274 return dma_stat; 274 return dma_stat;
275 } 275 }
276 276
277 static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed) 277 static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
278 { 278 {
279 } 279 }
280 280
281 /* returns 1 if dma irq issued, 0 otherwise */ 281 /* returns 1 if dma irq issued, 0 otherwise */
282 static int sgiioc4_dma_test_irq(ide_drive_t *drive) 282 static int sgiioc4_dma_test_irq(ide_drive_t *drive)
283 { 283 {
284 return sgiioc4_checkirq(HWIF(drive)); 284 return sgiioc4_checkirq(HWIF(drive));
285 } 285 }
286 286
287 static void sgiioc4_dma_host_set(ide_drive_t *drive, int on) 287 static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
288 { 288 {
289 if (!on) 289 if (!on)
290 sgiioc4_clearirq(drive); 290 sgiioc4_clearirq(drive);
291 } 291 }
292 292
293 static void 293 static void
294 sgiioc4_resetproc(ide_drive_t * drive) 294 sgiioc4_resetproc(ide_drive_t * drive)
295 { 295 {
296 sgiioc4_dma_end(drive); 296 sgiioc4_dma_end(drive);
297 sgiioc4_clearirq(drive); 297 sgiioc4_clearirq(drive);
298 } 298 }
299 299
300 static void 300 static void
301 sgiioc4_dma_lost_irq(ide_drive_t * drive) 301 sgiioc4_dma_lost_irq(ide_drive_t * drive)
302 { 302 {
303 sgiioc4_resetproc(drive); 303 sgiioc4_resetproc(drive);
304 304
305 ide_dma_lost_irq(drive); 305 ide_dma_lost_irq(drive);
306 } 306 }
307 307
308 static u8 sgiioc4_read_status(ide_hwif_t *hwif) 308 static u8 sgiioc4_read_status(ide_hwif_t *hwif)
309 { 309 {
310 unsigned long port = hwif->io_ports.status_addr; 310 unsigned long port = hwif->io_ports.status_addr;
311 u8 reg = (u8) readb((void __iomem *) port); 311 u8 reg = (u8) readb((void __iomem *) port);
312 312
313 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */ 313 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */
314 if (reg & 0x51) { /* Not busy...check for interrupt */ 314 if (reg & 0x51) { /* Not busy...check for interrupt */
315 unsigned long other_ir = port - 0x110; 315 unsigned long other_ir = port - 0x110;
316 unsigned int intr_reg = (u32) readl((void __iomem *) other_ir); 316 unsigned int intr_reg = (u32) readl((void __iomem *) other_ir);
317 317
318 /* Clear the Interrupt, Error bits on the IOC4 */ 318 /* Clear the Interrupt, Error bits on the IOC4 */
319 if (intr_reg & 0x03) { 319 if (intr_reg & 0x03) {
320 writel(0x03, (void __iomem *) other_ir); 320 writel(0x03, (void __iomem *) other_ir);
321 intr_reg = (u32) readl((void __iomem *) other_ir); 321 intr_reg = (u32) readl((void __iomem *) other_ir);
322 } 322 }
323 } 323 }
324 } 324 }
325 325
326 return reg; 326 return reg;
327 } 327 }
328 328
329 /* Creates a dma map for the scatter-gather list entries */ 329 /* Creates a dma map for the scatter-gather list entries */
330 static int __devinit 330 static int __devinit
331 ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d) 331 ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
332 { 332 {
333 struct pci_dev *dev = to_pci_dev(hwif->dev); 333 struct pci_dev *dev = to_pci_dev(hwif->dev);
334 unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET; 334 unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
335 void __iomem *virt_dma_base; 335 void __iomem *virt_dma_base;
336 int num_ports = sizeof (ioc4_dma_regs_t); 336 int num_ports = sizeof (ioc4_dma_regs_t);
337 void *pad; 337 void *pad;
338 338
339 if (dma_base == 0) 339 if (dma_base == 0)
340 return -1; 340 return -1;
341 341
342 printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, 342 printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name,
343 dma_base, dma_base + num_ports - 1); 343 dma_base, dma_base + num_ports - 1);
344 344
345 if (!request_mem_region(dma_base, num_ports, hwif->name)) { 345 if (!request_mem_region(dma_base, num_ports, hwif->name)) {
346 printk(KERN_ERR 346 printk(KERN_ERR
347 "%s(%s) -- ERROR, Addresses 0x%p to 0x%p " 347 "%s(%s) -- ERROR, Addresses 0x%p to 0x%p "
348 "ALREADY in use\n", 348 "ALREADY in use\n",
349 __func__, hwif->name, (void *) dma_base, 349 __func__, hwif->name, (void *) dma_base,
350 (void *) dma_base + num_ports - 1); 350 (void *) dma_base + num_ports - 1);
351 return -1; 351 return -1;
352 } 352 }
353 353
354 virt_dma_base = ioremap(dma_base, num_ports); 354 virt_dma_base = ioremap(dma_base, num_ports);
355 if (virt_dma_base == NULL) { 355 if (virt_dma_base == NULL) {
356 printk(KERN_ERR 356 printk(KERN_ERR
357 "%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n", 357 "%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n",
358 __func__, hwif->name, dma_base, dma_base + num_ports - 1); 358 __func__, hwif->name, dma_base, dma_base + num_ports - 1);
359 goto dma_remap_failure; 359 goto dma_remap_failure;
360 } 360 }
361 hwif->dma_base = (unsigned long) virt_dma_base; 361 hwif->dma_base = (unsigned long) virt_dma_base;
362 362
363 hwif->dmatable_cpu = pci_alloc_consistent(dev, 363 hwif->dmatable_cpu = pci_alloc_consistent(dev,
364 IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, 364 IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
365 &hwif->dmatable_dma); 365 &hwif->dmatable_dma);
366 366
367 if (!hwif->dmatable_cpu) 367 if (!hwif->dmatable_cpu)
368 goto dma_pci_alloc_failure; 368 goto dma_pci_alloc_failure;
369 369
370 hwif->sg_max_nents = IOC4_PRD_ENTRIES; 370 hwif->sg_max_nents = IOC4_PRD_ENTRIES;
371 371
372 pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE, 372 pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE,
373 (dma_addr_t *)&hwif->extra_base); 373 (dma_addr_t *)&hwif->extra_base);
374 if (pad) { 374 if (pad) {
375 ide_set_hwifdata(hwif, pad); 375 ide_set_hwifdata(hwif, pad);
376 return 0; 376 return 0;
377 } 377 }
378 378
379 pci_free_consistent(dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, 379 pci_free_consistent(dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
380 hwif->dmatable_cpu, hwif->dmatable_dma); 380 hwif->dmatable_cpu, hwif->dmatable_dma);
381 printk(KERN_INFO 381 printk(KERN_INFO
382 "%s() -- Error! Unable to allocate DMA Maps for drive %s\n", 382 "%s() -- Error! Unable to allocate DMA Maps for drive %s\n",
383 __func__, hwif->name); 383 __func__, hwif->name);
384 printk(KERN_INFO 384 printk(KERN_INFO
385 "Changing from DMA to PIO mode for Drive %s\n", hwif->name); 385 "Changing from DMA to PIO mode for Drive %s\n", hwif->name);
386 386
387 dma_pci_alloc_failure: 387 dma_pci_alloc_failure:
388 iounmap(virt_dma_base); 388 iounmap(virt_dma_base);
389 389
390 dma_remap_failure: 390 dma_remap_failure:
391 release_mem_region(dma_base, num_ports); 391 release_mem_region(dma_base, num_ports);
392 392
393 return -1; 393 return -1;
394 } 394 }
395 395
396 /* Initializes the IOC4 DMA Engine */ 396 /* Initializes the IOC4 DMA Engine */
397 static void 397 static void
398 sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive) 398 sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
399 { 399 {
400 u32 ioc4_dma; 400 u32 ioc4_dma;
401 ide_hwif_t *hwif = HWIF(drive); 401 ide_hwif_t *hwif = HWIF(drive);
402 unsigned long dma_base = hwif->dma_base; 402 unsigned long dma_base = hwif->dma_base;
403 unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4; 403 unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4;
404 u32 dma_addr, ending_dma_addr; 404 u32 dma_addr, ending_dma_addr;
405 405
406 ioc4_dma = readl((void __iomem *)ioc4_dma_addr); 406 ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
407 407
408 if (ioc4_dma & IOC4_S_DMA_ACTIVE) { 408 if (ioc4_dma & IOC4_S_DMA_ACTIVE) {
409 printk(KERN_WARNING 409 printk(KERN_WARNING
410 "%s(%s):Warning!! DMA from previous transfer was still active\n", 410 "%s(%s):Warning!! DMA from previous transfer was still active\n",
411 __func__, drive->name); 411 __func__, drive->name);
412 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); 412 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
413 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); 413 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
414 414
415 if (ioc4_dma & IOC4_S_DMA_STOP) 415 if (ioc4_dma & IOC4_S_DMA_STOP)
416 printk(KERN_ERR 416 printk(KERN_ERR
417 "%s(%s) : IOC4 Dma STOP bit is still 1\n", 417 "%s(%s) : IOC4 Dma STOP bit is still 1\n",
418 __func__, drive->name); 418 __func__, drive->name);
419 } 419 }
420 420
421 ioc4_dma = readl((void __iomem *)ioc4_dma_addr); 421 ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
422 if (ioc4_dma & IOC4_S_DMA_ERROR) { 422 if (ioc4_dma & IOC4_S_DMA_ERROR) {
423 printk(KERN_WARNING 423 printk(KERN_WARNING
424 "%s(%s) : Warning!! - DMA Error during Previous" 424 "%s(%s) : Warning!! - DMA Error during Previous"
425 " transfer | status 0x%x\n", 425 " transfer | status 0x%x\n",
426 __func__, drive->name, ioc4_dma); 426 __func__, drive->name, ioc4_dma);
427 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); 427 writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
428 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); 428 ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
429 429
430 if (ioc4_dma & IOC4_S_DMA_STOP) 430 if (ioc4_dma & IOC4_S_DMA_STOP)
431 printk(KERN_ERR 431 printk(KERN_ERR
432 "%s(%s) : IOC4 DMA STOP bit is still 1\n", 432 "%s(%s) : IOC4 DMA STOP bit is still 1\n",
433 __func__, drive->name); 433 __func__, drive->name);
434 } 434 }
435 435
436 /* Address of the Scatter Gather List */ 436 /* Address of the Scatter Gather List */
437 dma_addr = cpu_to_le32(hwif->dmatable_dma); 437 dma_addr = cpu_to_le32(hwif->dmatable_dma);
438 writel(dma_addr, (void __iomem *)(dma_base + IOC4_DMA_PTR_L * 4)); 438 writel(dma_addr, (void __iomem *)(dma_base + IOC4_DMA_PTR_L * 4));
439 439
440 /* Address of the Ending DMA */ 440 /* Address of the Ending DMA */
441 memset(ide_get_hwifdata(hwif), 0, IOC4_IDE_CACHELINE_SIZE); 441 memset(ide_get_hwifdata(hwif), 0, IOC4_IDE_CACHELINE_SIZE);
442 ending_dma_addr = cpu_to_le32(hwif->extra_base); 442 ending_dma_addr = cpu_to_le32(hwif->extra_base);
443 writel(ending_dma_addr, (void __iomem *)(dma_base + IOC4_DMA_END_ADDR * 4)); 443 writel(ending_dma_addr, (void __iomem *)(dma_base + IOC4_DMA_END_ADDR * 4));
444 444
445 writel(dma_direction, (void __iomem *)ioc4_dma_addr); 445 writel(dma_direction, (void __iomem *)ioc4_dma_addr);
446 drive->waiting_for_dma = 1; 446 drive->waiting_for_dma = 1;
447 } 447 }
448 448
449 /* IOC4 Scatter Gather list Format */ 449 /* IOC4 Scatter Gather list Format */
450 /* 128 Bit entries to support 64 bit addresses in the future */ 450 /* 128 Bit entries to support 64 bit addresses in the future */
451 /* The Scatter Gather list Entry should be in the BIG-ENDIAN Format */ 451 /* The Scatter Gather list Entry should be in the BIG-ENDIAN Format */
452 /* --------------------------------------------------------------------- */ 452 /* --------------------------------------------------------------------- */
453 /* | Upper 32 bits - Zero | Lower 32 bits- address | */ 453 /* | Upper 32 bits - Zero | Lower 32 bits- address | */
454 /* --------------------------------------------------------------------- */ 454 /* --------------------------------------------------------------------- */
455 /* | Upper 32 bits - Zero |EOL| 15 unused | 16 Bit Length| */ 455 /* | Upper 32 bits - Zero |EOL| 15 unused | 16 Bit Length| */
456 /* --------------------------------------------------------------------- */ 456 /* --------------------------------------------------------------------- */
457 /* Creates the scatter gather list, DMA Table */ 457 /* Creates the scatter gather list, DMA Table */
458 static unsigned int 458 static unsigned int
459 sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir) 459 sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
460 { 460 {
461 ide_hwif_t *hwif = HWIF(drive); 461 ide_hwif_t *hwif = HWIF(drive);
462 unsigned int *table = hwif->dmatable_cpu; 462 unsigned int *table = hwif->dmatable_cpu;
463 unsigned int count = 0, i = 1; 463 unsigned int count = 0, i = 1;
464 struct scatterlist *sg; 464 struct scatterlist *sg;
465 465
466 hwif->sg_nents = i = ide_build_sglist(drive, rq); 466 hwif->sg_nents = i = ide_build_sglist(drive, rq);
467 467
468 if (!i) 468 if (!i)
469 return 0; /* sglist of length Zero */ 469 return 0; /* sglist of length Zero */
470 470
471 sg = hwif->sg_table; 471 sg = hwif->sg_table;
472 while (i && sg_dma_len(sg)) { 472 while (i && sg_dma_len(sg)) {
473 dma_addr_t cur_addr; 473 dma_addr_t cur_addr;
474 int cur_len; 474 int cur_len;
475 cur_addr = sg_dma_address(sg); 475 cur_addr = sg_dma_address(sg);
476 cur_len = sg_dma_len(sg); 476 cur_len = sg_dma_len(sg);
477 477
478 while (cur_len) { 478 while (cur_len) {
479 if (count++ >= IOC4_PRD_ENTRIES) { 479 if (count++ >= IOC4_PRD_ENTRIES) {
480 printk(KERN_WARNING 480 printk(KERN_WARNING
481 "%s: DMA table too small\n", 481 "%s: DMA table too small\n",
482 drive->name); 482 drive->name);
483 goto use_pio_instead; 483 goto use_pio_instead;
484 } else { 484 } else {
485 u32 bcount = 485 u32 bcount =
486 0x10000 - (cur_addr & 0xffff); 486 0x10000 - (cur_addr & 0xffff);
487 487
488 if (bcount > cur_len) 488 if (bcount > cur_len)
489 bcount = cur_len; 489 bcount = cur_len;
490 490
491 /* put the addr, length in 491 /* put the addr, length in
492 * the IOC4 dma-table format */ 492 * the IOC4 dma-table format */
493 *table = 0x0; 493 *table = 0x0;
494 table++; 494 table++;
495 *table = cpu_to_be32(cur_addr); 495 *table = cpu_to_be32(cur_addr);
496 table++; 496 table++;
497 *table = 0x0; 497 *table = 0x0;
498 table++; 498 table++;
499 499
500 *table = cpu_to_be32(bcount); 500 *table = cpu_to_be32(bcount);
501 table++; 501 table++;
502 502
503 cur_addr += bcount; 503 cur_addr += bcount;
504 cur_len -= bcount; 504 cur_len -= bcount;
505 } 505 }
506 } 506 }
507 507
508 sg = sg_next(sg); 508 sg = sg_next(sg);
509 i--; 509 i--;
510 } 510 }
511 511
512 if (count) { 512 if (count) {
513 table--; 513 table--;
514 *table |= cpu_to_be32(0x80000000); 514 *table |= cpu_to_be32(0x80000000);
515 return count; 515 return count;
516 } 516 }
517 517
518 use_pio_instead: 518 use_pio_instead:
519 ide_destroy_dmatable(drive); 519 ide_destroy_dmatable(drive);
520 520
521 return 0; /* revert to PIO for this request */ 521 return 0; /* revert to PIO for this request */
522 } 522 }
523 523
524 static int sgiioc4_dma_setup(ide_drive_t *drive) 524 static int sgiioc4_dma_setup(ide_drive_t *drive)
525 { 525 {
526 struct request *rq = HWGROUP(drive)->rq; 526 struct request *rq = HWGROUP(drive)->rq;
527 unsigned int count = 0; 527 unsigned int count = 0;
528 int ddir; 528 int ddir;
529 529
530 if (rq_data_dir(rq)) 530 if (rq_data_dir(rq))
531 ddir = PCI_DMA_TODEVICE; 531 ddir = PCI_DMA_TODEVICE;
532 else 532 else
533 ddir = PCI_DMA_FROMDEVICE; 533 ddir = PCI_DMA_FROMDEVICE;
534 534
535 if (!(count = sgiioc4_build_dma_table(drive, rq, ddir))) { 535 if (!(count = sgiioc4_build_dma_table(drive, rq, ddir))) {
536 /* try PIO instead of DMA */ 536 /* try PIO instead of DMA */
537 ide_map_sg(drive, rq); 537 ide_map_sg(drive, rq);
538 return 1; 538 return 1;
539 } 539 }
540 540
541 if (rq_data_dir(rq)) 541 if (rq_data_dir(rq))
542 /* Writes TO the IOC4 FROM Main Memory */ 542 /* Writes TO the IOC4 FROM Main Memory */
543 ddir = IOC4_DMA_READ; 543 ddir = IOC4_DMA_READ;
544 else 544 else
545 /* Writes FROM the IOC4 TO Main Memory */ 545 /* Writes FROM the IOC4 TO Main Memory */
546 ddir = IOC4_DMA_WRITE; 546 ddir = IOC4_DMA_WRITE;
547 547
548 sgiioc4_configure_for_dma(ddir, drive); 548 sgiioc4_configure_for_dma(ddir, drive);
549 549
550 return 0; 550 return 0;
551 } 551 }
552 552
553 static const struct ide_tp_ops sgiioc4_tp_ops = { 553 static const struct ide_tp_ops sgiioc4_tp_ops = {
554 .exec_command = ide_exec_command, 554 .exec_command = ide_exec_command,
555 .read_status = sgiioc4_read_status, 555 .read_status = sgiioc4_read_status,
556 .read_altstatus = ide_read_altstatus, 556 .read_altstatus = ide_read_altstatus,
557 .read_sff_dma_status = ide_read_sff_dma_status, 557 .read_sff_dma_status = ide_read_sff_dma_status,
558 558
559 .set_irq = ide_set_irq, 559 .set_irq = ide_set_irq,
560 560
561 .tf_load = ide_tf_load, 561 .tf_load = ide_tf_load,
562 .tf_read = ide_tf_read, 562 .tf_read = ide_tf_read,
563 563
564 .input_data = ide_input_data, 564 .input_data = ide_input_data,
565 .output_data = ide_output_data, 565 .output_data = ide_output_data,
566 }; 566 };
567 567
568 static const struct ide_port_ops sgiioc4_port_ops = { 568 static const struct ide_port_ops sgiioc4_port_ops = {
569 .set_dma_mode = sgiioc4_set_dma_mode, 569 .set_dma_mode = sgiioc4_set_dma_mode,
570 /* reset DMA engine, clear IRQs */ 570 /* reset DMA engine, clear IRQs */
571 .resetproc = sgiioc4_resetproc, 571 .resetproc = sgiioc4_resetproc,
572 /* mask on/off NIEN register */ 572 /* mask on/off NIEN register */
573 .maskproc = sgiioc4_maskproc, 573 .maskproc = sgiioc4_maskproc,
574 }; 574 };
575 575
576 static const struct ide_dma_ops sgiioc4_dma_ops = { 576 static const struct ide_dma_ops sgiioc4_dma_ops = {
577 .dma_host_set = sgiioc4_dma_host_set, 577 .dma_host_set = sgiioc4_dma_host_set,
578 .dma_setup = sgiioc4_dma_setup, 578 .dma_setup = sgiioc4_dma_setup,
579 .dma_start = sgiioc4_dma_start, 579 .dma_start = sgiioc4_dma_start,
580 .dma_end = sgiioc4_dma_end, 580 .dma_end = sgiioc4_dma_end,
581 .dma_test_irq = sgiioc4_dma_test_irq, 581 .dma_test_irq = sgiioc4_dma_test_irq,
582 .dma_lost_irq = sgiioc4_dma_lost_irq, 582 .dma_lost_irq = sgiioc4_dma_lost_irq,
583 .dma_timeout = ide_dma_timeout, 583 .dma_timeout = ide_dma_timeout,
584 }; 584 };
585 585
586 static const struct ide_port_info sgiioc4_port_info __devinitdata = { 586 static const struct ide_port_info sgiioc4_port_info __devinitdata = {
587 .name = DRV_NAME, 587 .name = DRV_NAME,
588 .chipset = ide_pci, 588 .chipset = ide_pci,
589 .init_dma = ide_dma_sgiioc4, 589 .init_dma = ide_dma_sgiioc4,
590 .tp_ops = &sgiioc4_tp_ops, 590 .tp_ops = &sgiioc4_tp_ops,
591 .port_ops = &sgiioc4_port_ops, 591 .port_ops = &sgiioc4_port_ops,
592 .dma_ops = &sgiioc4_dma_ops, 592 .dma_ops = &sgiioc4_dma_ops,
593 .host_flags = IDE_HFLAG_MMIO, 593 .host_flags = IDE_HFLAG_MMIO,
594 .mwdma_mask = ATA_MWDMA2_ONLY, 594 .mwdma_mask = ATA_MWDMA2_ONLY,
595 }; 595 };
596 596
597 static int __devinit 597 static int __devinit
598 sgiioc4_ide_setup_pci_device(struct pci_dev *dev) 598 sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
599 { 599 {
600 unsigned long cmd_base, irqport; 600 unsigned long cmd_base, irqport;
601 unsigned long bar0, cmd_phys_base, ctl; 601 unsigned long bar0, cmd_phys_base, ctl;
602 void __iomem *virt_base; 602 void __iomem *virt_base;
603 struct ide_host *host; 603 struct ide_host *host;
604 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 604 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
605 struct ide_port_info d = sgiioc4_port_info; 605 struct ide_port_info d = sgiioc4_port_info;
606 int rc; 606 int rc;
607 607
608 /* Get the CmdBlk and CtrlBlk Base Registers */ 608 /* Get the CmdBlk and CtrlBlk Base Registers */
609 bar0 = pci_resource_start(dev, 0); 609 bar0 = pci_resource_start(dev, 0);
610 virt_base = ioremap(bar0, pci_resource_len(dev, 0)); 610 virt_base = ioremap(bar0, pci_resource_len(dev, 0));
611 if (virt_base == NULL) { 611 if (virt_base == NULL) {
612 printk(KERN_ERR "%s: Unable to remap BAR 0 address: 0x%lx\n", 612 printk(KERN_ERR "%s: Unable to remap BAR 0 address: 0x%lx\n",
613 DRV_NAME, bar0); 613 DRV_NAME, bar0);
614 return -ENOMEM; 614 return -ENOMEM;
615 } 615 }
616 cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET; 616 cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET;
617 ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET; 617 ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET;
618 irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET; 618 irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET;
619 619
620 cmd_phys_base = bar0 + IOC4_CMD_OFFSET; 620 cmd_phys_base = bar0 + IOC4_CMD_OFFSET;
621 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, 621 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
622 DRV_NAME)) { 622 DRV_NAME)) {
623 printk(KERN_ERR 623 printk(KERN_ERR
624 "%s : %s -- ERROR, Addresses " 624 "%s %s: -- ERROR, Addresses "
625 "0x%p to 0x%p ALREADY in use\n", 625 "0x%p to 0x%p ALREADY in use\n",
626 __func__, DRV_NAME, (void *) cmd_phys_base, 626 DRV_NAME, pci_name(dev), (void *)cmd_phys_base,
627 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); 627 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
628 return -ENOMEM; 628 return -ENOMEM;
629 } 629 }
630 630
631 /* Initialize the IO registers */ 631 /* Initialize the IO registers */
632 memset(&hw, 0, sizeof(hw)); 632 memset(&hw, 0, sizeof(hw));
633 sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport); 633 sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
634 hw.irq = dev->irq; 634 hw.irq = dev->irq;
635 hw.chipset = ide_pci; 635 hw.chipset = ide_pci;
636 hw.dev = &dev->dev; 636 hw.dev = &dev->dev;
637 637
638 /* Initializing chipset IRQ Registers */ 638 /* Initializing chipset IRQ Registers */
639 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); 639 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
640 640
641 host = ide_host_alloc(&d, hws); 641 host = ide_host_alloc(&d, hws);
642 if (host == NULL) { 642 if (host == NULL) {
643 rc = -ENOMEM; 643 rc = -ENOMEM;
644 goto err; 644 goto err;
645 } 645 }
646 646
647 rc = ide_host_register(host, &d, hws); 647 rc = ide_host_register(host, &d, hws);
648 if (rc) 648 if (rc)
649 goto err_free; 649 goto err_free;
650 650
651 return 0; 651 return 0;
652 err_free: 652 err_free:
653 ide_host_free(host); 653 ide_host_free(host);
654 err: 654 err:
655 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); 655 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE);
656 iounmap(virt_base); 656 iounmap(virt_base);
657 return rc; 657 return rc;
658 } 658 }
659 659
660 static unsigned int __devinit 660 static unsigned int __devinit
661 pci_init_sgiioc4(struct pci_dev *dev) 661 pci_init_sgiioc4(struct pci_dev *dev)
662 { 662 {
663 int ret; 663 int ret;
664 664
665 printk(KERN_INFO "%s: IDE controller at PCI slot %s, revision %d\n", 665 printk(KERN_INFO "%s: IDE controller at PCI slot %s, revision %d\n",
666 DRV_NAME, pci_name(dev), dev->revision); 666 DRV_NAME, pci_name(dev), dev->revision);
667 667
668 if (dev->revision < IOC4_SUPPORTED_FIRMWARE_REV) { 668 if (dev->revision < IOC4_SUPPORTED_FIRMWARE_REV) {
669 printk(KERN_ERR "Skipping %s IDE controller in slot %s: " 669 printk(KERN_ERR "Skipping %s IDE controller in slot %s: "
670 "firmware is obsolete - please upgrade to " 670 "firmware is obsolete - please upgrade to "
671 "revision46 or higher\n", 671 "revision46 or higher\n",
672 DRV_NAME, pci_name(dev)); 672 DRV_NAME, pci_name(dev));
673 ret = -EAGAIN; 673 ret = -EAGAIN;
674 goto out; 674 goto out;
675 } 675 }
676 ret = sgiioc4_ide_setup_pci_device(dev); 676 ret = sgiioc4_ide_setup_pci_device(dev);
677 out: 677 out:
678 return ret; 678 return ret;
679 } 679 }
680 680
681 int 681 int
682 ioc4_ide_attach_one(struct ioc4_driver_data *idd) 682 ioc4_ide_attach_one(struct ioc4_driver_data *idd)
683 { 683 {
684 /* PCI-RT does not bring out IDE connection. 684 /* PCI-RT does not bring out IDE connection.
685 * Do not attach to this particular IOC4. 685 * Do not attach to this particular IOC4.
686 */ 686 */
687 if (idd->idd_variant == IOC4_VARIANT_PCI_RT) 687 if (idd->idd_variant == IOC4_VARIANT_PCI_RT)
688 return 0; 688 return 0;
689 689
690 return pci_init_sgiioc4(idd->idd_pdev); 690 return pci_init_sgiioc4(idd->idd_pdev);
691 } 691 }
692 692
693 static struct ioc4_submodule ioc4_ide_submodule = { 693 static struct ioc4_submodule ioc4_ide_submodule = {
694 .is_name = "IOC4_ide", 694 .is_name = "IOC4_ide",
695 .is_owner = THIS_MODULE, 695 .is_owner = THIS_MODULE,
696 .is_probe = ioc4_ide_attach_one, 696 .is_probe = ioc4_ide_attach_one,
697 /* .is_remove = ioc4_ide_remove_one, */ 697 /* .is_remove = ioc4_ide_remove_one, */
698 }; 698 };
699 699
700 static int __init ioc4_ide_init(void) 700 static int __init ioc4_ide_init(void)
701 { 701 {
702 return ioc4_register_submodule(&ioc4_ide_submodule); 702 return ioc4_register_submodule(&ioc4_ide_submodule);
703 } 703 }
704 704
705 late_initcall(ioc4_ide_init); /* Call only after IDE init is done */ 705 late_initcall(ioc4_ide_init); /* Call only after IDE init is done */
706 706
707 MODULE_AUTHOR("Aniket Malatpure/Jeremy Higdon"); 707 MODULE_AUTHOR("Aniket Malatpure/Jeremy Higdon");
708 MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card"); 708 MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card");
709 MODULE_LICENSE("GPL"); 709 MODULE_LICENSE("GPL");
710 710