Commit 32eaeae177bf77fbc224c35262add45bd5e6abb3

Authored by Clemens Ladisch
Committed by Stefan Richter
1 parent a74477db91

firewire: ohci: work around selfID junk due to wrong gap count

If a device's firmware initiates a bus reset by setting the IBR bit in
PHY register 1 without resetting the gap count field to 63 (and without
having sent a PHY configuration packet beforehand), the gap count of
this node will remain at the old value after the bus reset and thus be
inconsistent with the gap count on all other nodes.

The bus manager is supposed to detect the inconsistent gap count values
in the self ID packets and correct them by issuing another bus reset.

However, if the buggy device happens to be the cycle master, and if it
sends a cycle start packet immediately after the bus reset (which is
likely after a long bus reset), then the time between the end of the
selfID phase and the start of the cycle start packet will be based on
the too-small gap count value, so this gap will be too short to be
detected as a subaction gap by the other nodes.  This means that the
cycle start packet will be assumed to be self ID data, and will be
stored after the actual self ID quadlets in the self ID buffer.

This garbage in the self ID buffer made firewire-core ignore all of the
self ID data, and thus prevented the Linux bus manager from correcting
the problem.  Furthermore, because the bus reset handling was aborted
completely, asynchronous transfers would be no longer handled correctly,
and fw_run_transaction() would hang until the next bus reset.

To fix this, make the detection of inconsistent self IDs more
discriminating:  If the invalid data in the self ID buffer looks like
a cycle start packet, we can assume that the previous data in the buffer
is correctly received self ID information, and process it normally.

(We inspect only the first quadlet of the cycle start packet, because
this value is different enough from any valid self ID quadlet, and many
controllers do not store the cycle start packet in five quadlets because
they expect self ID data to have an even number of quadlets.)

This bug has been observed when a bus-powered DesktopKonnekt6 is
switched off with its power button.

Signed-off-by: Clemens Ladisch <clemens@ladisch.de>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>

Showing 1 changed file with 16 additions and 2 deletions Inline Diff

drivers/firewire/ohci.c
1 /* 1 /*
2 * Driver for OHCI 1394 controllers 2 * Driver for OHCI 1394 controllers
3 * 3 *
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation, 17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21 #include <linux/bitops.h> 21 #include <linux/bitops.h>
22 #include <linux/bug.h> 22 #include <linux/bug.h>
23 #include <linux/compiler.h> 23 #include <linux/compiler.h>
24 #include <linux/delay.h> 24 #include <linux/delay.h>
25 #include <linux/device.h> 25 #include <linux/device.h>
26 #include <linux/dma-mapping.h> 26 #include <linux/dma-mapping.h>
27 #include <linux/firewire.h> 27 #include <linux/firewire.h>
28 #include <linux/firewire-constants.h> 28 #include <linux/firewire-constants.h>
29 #include <linux/init.h> 29 #include <linux/init.h>
30 #include <linux/interrupt.h> 30 #include <linux/interrupt.h>
31 #include <linux/io.h> 31 #include <linux/io.h>
32 #include <linux/kernel.h> 32 #include <linux/kernel.h>
33 #include <linux/list.h> 33 #include <linux/list.h>
34 #include <linux/mm.h> 34 #include <linux/mm.h>
35 #include <linux/module.h> 35 #include <linux/module.h>
36 #include <linux/moduleparam.h> 36 #include <linux/moduleparam.h>
37 #include <linux/mutex.h> 37 #include <linux/mutex.h>
38 #include <linux/pci.h> 38 #include <linux/pci.h>
39 #include <linux/pci_ids.h> 39 #include <linux/pci_ids.h>
40 #include <linux/slab.h> 40 #include <linux/slab.h>
41 #include <linux/spinlock.h> 41 #include <linux/spinlock.h>
42 #include <linux/string.h> 42 #include <linux/string.h>
43 #include <linux/time.h> 43 #include <linux/time.h>
44 #include <linux/vmalloc.h> 44 #include <linux/vmalloc.h>
45 #include <linux/workqueue.h> 45 #include <linux/workqueue.h>
46 46
47 #include <asm/byteorder.h> 47 #include <asm/byteorder.h>
48 #include <asm/page.h> 48 #include <asm/page.h>
49 #include <asm/system.h> 49 #include <asm/system.h>
50 50
51 #ifdef CONFIG_PPC_PMAC 51 #ifdef CONFIG_PPC_PMAC
52 #include <asm/pmac_feature.h> 52 #include <asm/pmac_feature.h>
53 #endif 53 #endif
54 54
55 #include "core.h" 55 #include "core.h"
56 #include "ohci.h" 56 #include "ohci.h"
57 57
58 #define DESCRIPTOR_OUTPUT_MORE 0 58 #define DESCRIPTOR_OUTPUT_MORE 0
59 #define DESCRIPTOR_OUTPUT_LAST (1 << 12) 59 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
60 #define DESCRIPTOR_INPUT_MORE (2 << 12) 60 #define DESCRIPTOR_INPUT_MORE (2 << 12)
61 #define DESCRIPTOR_INPUT_LAST (3 << 12) 61 #define DESCRIPTOR_INPUT_LAST (3 << 12)
62 #define DESCRIPTOR_STATUS (1 << 11) 62 #define DESCRIPTOR_STATUS (1 << 11)
63 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) 63 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
64 #define DESCRIPTOR_PING (1 << 7) 64 #define DESCRIPTOR_PING (1 << 7)
65 #define DESCRIPTOR_YY (1 << 6) 65 #define DESCRIPTOR_YY (1 << 6)
66 #define DESCRIPTOR_NO_IRQ (0 << 4) 66 #define DESCRIPTOR_NO_IRQ (0 << 4)
67 #define DESCRIPTOR_IRQ_ERROR (1 << 4) 67 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
68 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4) 68 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
69 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) 69 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
70 #define DESCRIPTOR_WAIT (3 << 0) 70 #define DESCRIPTOR_WAIT (3 << 0)
71 71
72 struct descriptor { 72 struct descriptor {
73 __le16 req_count; 73 __le16 req_count;
74 __le16 control; 74 __le16 control;
75 __le32 data_address; 75 __le32 data_address;
76 __le32 branch_address; 76 __le32 branch_address;
77 __le16 res_count; 77 __le16 res_count;
78 __le16 transfer_status; 78 __le16 transfer_status;
79 } __attribute__((aligned(16))); 79 } __attribute__((aligned(16)));
80 80
81 #define CONTROL_SET(regs) (regs) 81 #define CONTROL_SET(regs) (regs)
82 #define CONTROL_CLEAR(regs) ((regs) + 4) 82 #define CONTROL_CLEAR(regs) ((regs) + 4)
83 #define COMMAND_PTR(regs) ((regs) + 12) 83 #define COMMAND_PTR(regs) ((regs) + 12)
84 #define CONTEXT_MATCH(regs) ((regs) + 16) 84 #define CONTEXT_MATCH(regs) ((regs) + 16)
85 85
86 #define AR_BUFFER_SIZE (32*1024) 86 #define AR_BUFFER_SIZE (32*1024)
87 #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE) 87 #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
88 /* we need at least two pages for proper list management */ 88 /* we need at least two pages for proper list management */
89 #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2) 89 #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
90 90
91 #define MAX_ASYNC_PAYLOAD 4096 91 #define MAX_ASYNC_PAYLOAD 4096
92 #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4) 92 #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
93 #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE) 93 #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
94 94
95 struct ar_context { 95 struct ar_context {
96 struct fw_ohci *ohci; 96 struct fw_ohci *ohci;
97 struct page *pages[AR_BUFFERS]; 97 struct page *pages[AR_BUFFERS];
98 void *buffer; 98 void *buffer;
99 struct descriptor *descriptors; 99 struct descriptor *descriptors;
100 dma_addr_t descriptors_bus; 100 dma_addr_t descriptors_bus;
101 void *pointer; 101 void *pointer;
102 unsigned int last_buffer_index; 102 unsigned int last_buffer_index;
103 u32 regs; 103 u32 regs;
104 struct tasklet_struct tasklet; 104 struct tasklet_struct tasklet;
105 }; 105 };
106 106
107 struct context; 107 struct context;
108 108
109 typedef int (*descriptor_callback_t)(struct context *ctx, 109 typedef int (*descriptor_callback_t)(struct context *ctx,
110 struct descriptor *d, 110 struct descriptor *d,
111 struct descriptor *last); 111 struct descriptor *last);
112 112
113 /* 113 /*
114 * A buffer that contains a block of DMA-able coherent memory used for 114 * A buffer that contains a block of DMA-able coherent memory used for
115 * storing a portion of a DMA descriptor program. 115 * storing a portion of a DMA descriptor program.
116 */ 116 */
117 struct descriptor_buffer { 117 struct descriptor_buffer {
118 struct list_head list; 118 struct list_head list;
119 dma_addr_t buffer_bus; 119 dma_addr_t buffer_bus;
120 size_t buffer_size; 120 size_t buffer_size;
121 size_t used; 121 size_t used;
122 struct descriptor buffer[0]; 122 struct descriptor buffer[0];
123 }; 123 };
124 124
125 struct context { 125 struct context {
126 struct fw_ohci *ohci; 126 struct fw_ohci *ohci;
127 u32 regs; 127 u32 regs;
128 int total_allocation; 128 int total_allocation;
129 bool running; 129 bool running;
130 bool flushing; 130 bool flushing;
131 131
132 /* 132 /*
133 * List of page-sized buffers for storing DMA descriptors. 133 * List of page-sized buffers for storing DMA descriptors.
134 * Head of list contains buffers in use and tail of list contains 134 * Head of list contains buffers in use and tail of list contains
135 * free buffers. 135 * free buffers.
136 */ 136 */
137 struct list_head buffer_list; 137 struct list_head buffer_list;
138 138
139 /* 139 /*
140 * Pointer to a buffer inside buffer_list that contains the tail 140 * Pointer to a buffer inside buffer_list that contains the tail
141 * end of the current DMA program. 141 * end of the current DMA program.
142 */ 142 */
143 struct descriptor_buffer *buffer_tail; 143 struct descriptor_buffer *buffer_tail;
144 144
145 /* 145 /*
146 * The descriptor containing the branch address of the first 146 * The descriptor containing the branch address of the first
147 * descriptor that has not yet been filled by the device. 147 * descriptor that has not yet been filled by the device.
148 */ 148 */
149 struct descriptor *last; 149 struct descriptor *last;
150 150
151 /* 151 /*
152 * The last descriptor in the DMA program. It contains the branch 152 * The last descriptor in the DMA program. It contains the branch
153 * address that must be updated upon appending a new descriptor. 153 * address that must be updated upon appending a new descriptor.
154 */ 154 */
155 struct descriptor *prev; 155 struct descriptor *prev;
156 156
157 descriptor_callback_t callback; 157 descriptor_callback_t callback;
158 158
159 struct tasklet_struct tasklet; 159 struct tasklet_struct tasklet;
160 }; 160 };
161 161
162 #define IT_HEADER_SY(v) ((v) << 0) 162 #define IT_HEADER_SY(v) ((v) << 0)
163 #define IT_HEADER_TCODE(v) ((v) << 4) 163 #define IT_HEADER_TCODE(v) ((v) << 4)
164 #define IT_HEADER_CHANNEL(v) ((v) << 8) 164 #define IT_HEADER_CHANNEL(v) ((v) << 8)
165 #define IT_HEADER_TAG(v) ((v) << 14) 165 #define IT_HEADER_TAG(v) ((v) << 14)
166 #define IT_HEADER_SPEED(v) ((v) << 16) 166 #define IT_HEADER_SPEED(v) ((v) << 16)
167 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16) 167 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
168 168
169 struct iso_context { 169 struct iso_context {
170 struct fw_iso_context base; 170 struct fw_iso_context base;
171 struct context context; 171 struct context context;
172 int excess_bytes; 172 int excess_bytes;
173 void *header; 173 void *header;
174 size_t header_length; 174 size_t header_length;
175 175
176 u8 sync; 176 u8 sync;
177 u8 tags; 177 u8 tags;
178 }; 178 };
179 179
180 #define CONFIG_ROM_SIZE 1024 180 #define CONFIG_ROM_SIZE 1024
181 181
182 struct fw_ohci { 182 struct fw_ohci {
183 struct fw_card card; 183 struct fw_card card;
184 184
185 __iomem char *registers; 185 __iomem char *registers;
186 int node_id; 186 int node_id;
187 int generation; 187 int generation;
188 int request_generation; /* for timestamping incoming requests */ 188 int request_generation; /* for timestamping incoming requests */
189 unsigned quirks; 189 unsigned quirks;
190 unsigned int pri_req_max; 190 unsigned int pri_req_max;
191 u32 bus_time; 191 u32 bus_time;
192 bool is_root; 192 bool is_root;
193 bool csr_state_setclear_abdicate; 193 bool csr_state_setclear_abdicate;
194 int n_ir; 194 int n_ir;
195 int n_it; 195 int n_it;
196 /* 196 /*
197 * Spinlock for accessing fw_ohci data. Never call out of 197 * Spinlock for accessing fw_ohci data. Never call out of
198 * this driver with this lock held. 198 * this driver with this lock held.
199 */ 199 */
200 spinlock_t lock; 200 spinlock_t lock;
201 201
202 struct mutex phy_reg_mutex; 202 struct mutex phy_reg_mutex;
203 203
204 void *misc_buffer; 204 void *misc_buffer;
205 dma_addr_t misc_buffer_bus; 205 dma_addr_t misc_buffer_bus;
206 206
207 struct ar_context ar_request_ctx; 207 struct ar_context ar_request_ctx;
208 struct ar_context ar_response_ctx; 208 struct ar_context ar_response_ctx;
209 struct context at_request_ctx; 209 struct context at_request_ctx;
210 struct context at_response_ctx; 210 struct context at_response_ctx;
211 211
212 u32 it_context_support; 212 u32 it_context_support;
213 u32 it_context_mask; /* unoccupied IT contexts */ 213 u32 it_context_mask; /* unoccupied IT contexts */
214 struct iso_context *it_context_list; 214 struct iso_context *it_context_list;
215 u64 ir_context_channels; /* unoccupied channels */ 215 u64 ir_context_channels; /* unoccupied channels */
216 u32 ir_context_support; 216 u32 ir_context_support;
217 u32 ir_context_mask; /* unoccupied IR contexts */ 217 u32 ir_context_mask; /* unoccupied IR contexts */
218 struct iso_context *ir_context_list; 218 struct iso_context *ir_context_list;
219 u64 mc_channels; /* channels in use by the multichannel IR context */ 219 u64 mc_channels; /* channels in use by the multichannel IR context */
220 bool mc_allocated; 220 bool mc_allocated;
221 221
222 __be32 *config_rom; 222 __be32 *config_rom;
223 dma_addr_t config_rom_bus; 223 dma_addr_t config_rom_bus;
224 __be32 *next_config_rom; 224 __be32 *next_config_rom;
225 dma_addr_t next_config_rom_bus; 225 dma_addr_t next_config_rom_bus;
226 __be32 next_header; 226 __be32 next_header;
227 227
228 __le32 *self_id_cpu; 228 __le32 *self_id_cpu;
229 dma_addr_t self_id_bus; 229 dma_addr_t self_id_bus;
230 struct work_struct bus_reset_work; 230 struct work_struct bus_reset_work;
231 231
232 u32 self_id_buffer[512]; 232 u32 self_id_buffer[512];
233 }; 233 };
234 234
235 static inline struct fw_ohci *fw_ohci(struct fw_card *card) 235 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
236 { 236 {
237 return container_of(card, struct fw_ohci, card); 237 return container_of(card, struct fw_ohci, card);
238 } 238 }
239 239
240 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 240 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
241 #define IR_CONTEXT_BUFFER_FILL 0x80000000 241 #define IR_CONTEXT_BUFFER_FILL 0x80000000
242 #define IR_CONTEXT_ISOCH_HEADER 0x40000000 242 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
243 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000 243 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
244 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000 244 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
245 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000 245 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
246 246
247 #define CONTEXT_RUN 0x8000 247 #define CONTEXT_RUN 0x8000
248 #define CONTEXT_WAKE 0x1000 248 #define CONTEXT_WAKE 0x1000
249 #define CONTEXT_DEAD 0x0800 249 #define CONTEXT_DEAD 0x0800
250 #define CONTEXT_ACTIVE 0x0400 250 #define CONTEXT_ACTIVE 0x0400
251 251
252 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf 252 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
253 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 253 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
254 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 254 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
255 255
256 #define OHCI1394_REGISTER_SIZE 0x800 256 #define OHCI1394_REGISTER_SIZE 0x800
257 #define OHCI1394_PCI_HCI_Control 0x40 257 #define OHCI1394_PCI_HCI_Control 0x40
258 #define SELF_ID_BUF_SIZE 0x800 258 #define SELF_ID_BUF_SIZE 0x800
259 #define OHCI_TCODE_PHY_PACKET 0x0e 259 #define OHCI_TCODE_PHY_PACKET 0x0e
260 #define OHCI_VERSION_1_1 0x010010 260 #define OHCI_VERSION_1_1 0x010010
261 261
262 static char ohci_driver_name[] = KBUILD_MODNAME; 262 static char ohci_driver_name[] = KBUILD_MODNAME;
263 263
264 #define PCI_DEVICE_ID_AGERE_FW643 0x5901 264 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
265 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 265 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
266 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 266 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
267 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020 267 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
268 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025 268 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
269 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd 269 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
270 270
271 #define QUIRK_CYCLE_TIMER 1 271 #define QUIRK_CYCLE_TIMER 1
272 #define QUIRK_RESET_PACKET 2 272 #define QUIRK_RESET_PACKET 2
273 #define QUIRK_BE_HEADERS 4 273 #define QUIRK_BE_HEADERS 4
274 #define QUIRK_NO_1394A 8 274 #define QUIRK_NO_1394A 8
275 #define QUIRK_NO_MSI 16 275 #define QUIRK_NO_MSI 16
276 #define QUIRK_TI_SLLZ059 32 276 #define QUIRK_TI_SLLZ059 32
277 277
278 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ 278 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
279 static const struct { 279 static const struct {
280 unsigned short vendor, device, revision, flags; 280 unsigned short vendor, device, revision, flags;
281 } ohci_quirks[] = { 281 } ohci_quirks[] = {
282 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, 282 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
283 QUIRK_CYCLE_TIMER}, 283 QUIRK_CYCLE_TIMER},
284 284
285 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, 285 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
286 QUIRK_BE_HEADERS}, 286 QUIRK_BE_HEADERS},
287 287
288 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, 288 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
289 QUIRK_NO_MSI}, 289 QUIRK_NO_MSI},
290 290
291 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, 291 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
292 QUIRK_NO_MSI}, 292 QUIRK_NO_MSI},
293 293
294 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, 294 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
295 QUIRK_CYCLE_TIMER}, 295 QUIRK_CYCLE_TIMER},
296 296
297 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, 297 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
298 QUIRK_NO_MSI}, 298 QUIRK_NO_MSI},
299 299
300 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 300 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
301 QUIRK_CYCLE_TIMER}, 301 QUIRK_CYCLE_TIMER},
302 302
303 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, 303 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
304 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, 304 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
305 305
306 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID, 306 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
307 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059}, 307 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
308 308
309 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID, 309 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
310 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059}, 310 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
311 311
312 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, 312 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
313 QUIRK_RESET_PACKET}, 313 QUIRK_RESET_PACKET},
314 314
315 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, 315 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
316 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 316 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
317 }; 317 };
318 318
319 /* This overrides anything that was found in ohci_quirks[]. */ 319 /* This overrides anything that was found in ohci_quirks[]. */
320 static int param_quirks; 320 static int param_quirks;
321 module_param_named(quirks, param_quirks, int, 0644); 321 module_param_named(quirks, param_quirks, int, 0644);
322 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" 322 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
323 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) 323 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
324 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 324 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
325 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) 325 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
326 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) 326 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
327 ", disable MSI = " __stringify(QUIRK_NO_MSI) 327 ", disable MSI = " __stringify(QUIRK_NO_MSI)
328 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) 328 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
329 ")"); 329 ")");
330 330
331 #define OHCI_PARAM_DEBUG_AT_AR 1 331 #define OHCI_PARAM_DEBUG_AT_AR 1
332 #define OHCI_PARAM_DEBUG_SELFIDS 2 332 #define OHCI_PARAM_DEBUG_SELFIDS 2
333 #define OHCI_PARAM_DEBUG_IRQS 4 333 #define OHCI_PARAM_DEBUG_IRQS 4
334 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ 334 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
335 335
336 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG 336 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
337 337
338 static int param_debug; 338 static int param_debug;
339 module_param_named(debug, param_debug, int, 0644); 339 module_param_named(debug, param_debug, int, 0644);
340 MODULE_PARM_DESC(debug, "Verbose logging (default = 0" 340 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
341 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) 341 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
342 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) 342 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
343 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) 343 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
344 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) 344 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
345 ", or a combination, or all = -1)"); 345 ", or a combination, or all = -1)");
346 346
347 static void log_irqs(u32 evt) 347 static void log_irqs(u32 evt)
348 { 348 {
349 if (likely(!(param_debug & 349 if (likely(!(param_debug &
350 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) 350 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
351 return; 351 return;
352 352
353 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && 353 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
354 !(evt & OHCI1394_busReset)) 354 !(evt & OHCI1394_busReset))
355 return; 355 return;
356 356
357 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 357 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
358 evt & OHCI1394_selfIDComplete ? " selfID" : "", 358 evt & OHCI1394_selfIDComplete ? " selfID" : "",
359 evt & OHCI1394_RQPkt ? " AR_req" : "", 359 evt & OHCI1394_RQPkt ? " AR_req" : "",
360 evt & OHCI1394_RSPkt ? " AR_resp" : "", 360 evt & OHCI1394_RSPkt ? " AR_resp" : "",
361 evt & OHCI1394_reqTxComplete ? " AT_req" : "", 361 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
362 evt & OHCI1394_respTxComplete ? " AT_resp" : "", 362 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
363 evt & OHCI1394_isochRx ? " IR" : "", 363 evt & OHCI1394_isochRx ? " IR" : "",
364 evt & OHCI1394_isochTx ? " IT" : "", 364 evt & OHCI1394_isochTx ? " IT" : "",
365 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 365 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
366 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 366 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
367 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 367 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
368 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 368 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
369 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 369 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
370 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "", 370 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
371 evt & OHCI1394_busReset ? " busReset" : "", 371 evt & OHCI1394_busReset ? " busReset" : "",
372 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 372 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
373 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 373 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
374 OHCI1394_respTxComplete | OHCI1394_isochRx | 374 OHCI1394_respTxComplete | OHCI1394_isochRx |
375 OHCI1394_isochTx | OHCI1394_postedWriteErr | 375 OHCI1394_isochTx | OHCI1394_postedWriteErr |
376 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 376 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
377 OHCI1394_cycleInconsistent | 377 OHCI1394_cycleInconsistent |
378 OHCI1394_regAccessFail | OHCI1394_busReset) 378 OHCI1394_regAccessFail | OHCI1394_busReset)
379 ? " ?" : ""); 379 ? " ?" : "");
380 } 380 }
381 381
382 static const char *speed[] = { 382 static const char *speed[] = {
383 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", 383 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
384 }; 384 };
385 static const char *power[] = { 385 static const char *power[] = {
386 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", 386 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
387 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", 387 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
388 }; 388 };
389 static const char port[] = { '.', '-', 'p', 'c', }; 389 static const char port[] = { '.', '-', 'p', 'c', };
390 390
391 static char _p(u32 *s, int shift) 391 static char _p(u32 *s, int shift)
392 { 392 {
393 return port[*s >> shift & 3]; 393 return port[*s >> shift & 3];
394 } 394 }
395 395
396 static void log_selfids(int node_id, int generation, int self_id_count, u32 *s) 396 static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
397 { 397 {
398 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) 398 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
399 return; 399 return;
400 400
401 fw_notify("%d selfIDs, generation %d, local node ID %04x\n", 401 fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
402 self_id_count, generation, node_id); 402 self_id_count, generation, node_id);
403 403
404 for (; self_id_count--; ++s) 404 for (; self_id_count--; ++s)
405 if ((*s & 1 << 23) == 0) 405 if ((*s & 1 << 23) == 0)
406 fw_notify("selfID 0: %08x, phy %d [%c%c%c] " 406 fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
407 "%s gc=%d %s %s%s%s\n", 407 "%s gc=%d %s %s%s%s\n",
408 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), 408 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
409 speed[*s >> 14 & 3], *s >> 16 & 63, 409 speed[*s >> 14 & 3], *s >> 16 & 63,
410 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", 410 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
411 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); 411 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
412 else 412 else
413 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", 413 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
414 *s, *s >> 24 & 63, 414 *s, *s >> 24 & 63,
415 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), 415 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
416 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); 416 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
417 } 417 }
418 418
419 static const char *evts[] = { 419 static const char *evts[] = {
420 [0x00] = "evt_no_status", [0x01] = "-reserved-", 420 [0x00] = "evt_no_status", [0x01] = "-reserved-",
421 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", 421 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
422 [0x04] = "evt_underrun", [0x05] = "evt_overrun", 422 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
423 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", 423 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
424 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", 424 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
425 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", 425 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
426 [0x0c] = "-reserved-", [0x0d] = "-reserved-", 426 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
427 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", 427 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
428 [0x10] = "-reserved-", [0x11] = "ack_complete", 428 [0x10] = "-reserved-", [0x11] = "ack_complete",
429 [0x12] = "ack_pending ", [0x13] = "-reserved-", 429 [0x12] = "ack_pending ", [0x13] = "-reserved-",
430 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", 430 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
431 [0x16] = "ack_busy_B", [0x17] = "-reserved-", 431 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
432 [0x18] = "-reserved-", [0x19] = "-reserved-", 432 [0x18] = "-reserved-", [0x19] = "-reserved-",
433 [0x1a] = "-reserved-", [0x1b] = "ack_tardy", 433 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
434 [0x1c] = "-reserved-", [0x1d] = "ack_data_error", 434 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
435 [0x1e] = "ack_type_error", [0x1f] = "-reserved-", 435 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
436 [0x20] = "pending/cancelled", 436 [0x20] = "pending/cancelled",
437 }; 437 };
438 static const char *tcodes[] = { 438 static const char *tcodes[] = {
439 [0x0] = "QW req", [0x1] = "BW req", 439 [0x0] = "QW req", [0x1] = "BW req",
440 [0x2] = "W resp", [0x3] = "-reserved-", 440 [0x2] = "W resp", [0x3] = "-reserved-",
441 [0x4] = "QR req", [0x5] = "BR req", 441 [0x4] = "QR req", [0x5] = "BR req",
442 [0x6] = "QR resp", [0x7] = "BR resp", 442 [0x6] = "QR resp", [0x7] = "BR resp",
443 [0x8] = "cycle start", [0x9] = "Lk req", 443 [0x8] = "cycle start", [0x9] = "Lk req",
444 [0xa] = "async stream packet", [0xb] = "Lk resp", 444 [0xa] = "async stream packet", [0xb] = "Lk resp",
445 [0xc] = "-reserved-", [0xd] = "-reserved-", 445 [0xc] = "-reserved-", [0xd] = "-reserved-",
446 [0xe] = "link internal", [0xf] = "-reserved-", 446 [0xe] = "link internal", [0xf] = "-reserved-",
447 }; 447 };
448 448
449 static void log_ar_at_event(char dir, int speed, u32 *header, int evt) 449 static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
450 { 450 {
451 int tcode = header[0] >> 4 & 0xf; 451 int tcode = header[0] >> 4 & 0xf;
452 char specific[12]; 452 char specific[12];
453 453
454 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) 454 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
455 return; 455 return;
456 456
457 if (unlikely(evt >= ARRAY_SIZE(evts))) 457 if (unlikely(evt >= ARRAY_SIZE(evts)))
458 evt = 0x1f; 458 evt = 0x1f;
459 459
460 if (evt == OHCI1394_evt_bus_reset) { 460 if (evt == OHCI1394_evt_bus_reset) {
461 fw_notify("A%c evt_bus_reset, generation %d\n", 461 fw_notify("A%c evt_bus_reset, generation %d\n",
462 dir, (header[2] >> 16) & 0xff); 462 dir, (header[2] >> 16) & 0xff);
463 return; 463 return;
464 } 464 }
465 465
466 switch (tcode) { 466 switch (tcode) {
467 case 0x0: case 0x6: case 0x8: 467 case 0x0: case 0x6: case 0x8:
468 snprintf(specific, sizeof(specific), " = %08x", 468 snprintf(specific, sizeof(specific), " = %08x",
469 be32_to_cpu((__force __be32)header[3])); 469 be32_to_cpu((__force __be32)header[3]));
470 break; 470 break;
471 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: 471 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
472 snprintf(specific, sizeof(specific), " %x,%x", 472 snprintf(specific, sizeof(specific), " %x,%x",
473 header[3] >> 16, header[3] & 0xffff); 473 header[3] >> 16, header[3] & 0xffff);
474 break; 474 break;
475 default: 475 default:
476 specific[0] = '\0'; 476 specific[0] = '\0';
477 } 477 }
478 478
479 switch (tcode) { 479 switch (tcode) {
480 case 0xa: 480 case 0xa:
481 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); 481 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
482 break; 482 break;
483 case 0xe: 483 case 0xe:
484 fw_notify("A%c %s, PHY %08x %08x\n", 484 fw_notify("A%c %s, PHY %08x %08x\n",
485 dir, evts[evt], header[1], header[2]); 485 dir, evts[evt], header[1], header[2]);
486 break; 486 break;
487 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: 487 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
488 fw_notify("A%c spd %x tl %02x, " 488 fw_notify("A%c spd %x tl %02x, "
489 "%04x -> %04x, %s, " 489 "%04x -> %04x, %s, "
490 "%s, %04x%08x%s\n", 490 "%s, %04x%08x%s\n",
491 dir, speed, header[0] >> 10 & 0x3f, 491 dir, speed, header[0] >> 10 & 0x3f,
492 header[1] >> 16, header[0] >> 16, evts[evt], 492 header[1] >> 16, header[0] >> 16, evts[evt],
493 tcodes[tcode], header[1] & 0xffff, header[2], specific); 493 tcodes[tcode], header[1] & 0xffff, header[2], specific);
494 break; 494 break;
495 default: 495 default:
496 fw_notify("A%c spd %x tl %02x, " 496 fw_notify("A%c spd %x tl %02x, "
497 "%04x -> %04x, %s, " 497 "%04x -> %04x, %s, "
498 "%s%s\n", 498 "%s%s\n",
499 dir, speed, header[0] >> 10 & 0x3f, 499 dir, speed, header[0] >> 10 & 0x3f,
500 header[1] >> 16, header[0] >> 16, evts[evt], 500 header[1] >> 16, header[0] >> 16, evts[evt],
501 tcodes[tcode], specific); 501 tcodes[tcode], specific);
502 } 502 }
503 } 503 }
504 504
505 #else 505 #else
506 506
507 #define param_debug 0 507 #define param_debug 0
508 static inline void log_irqs(u32 evt) {} 508 static inline void log_irqs(u32 evt) {}
509 static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {} 509 static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
510 static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {} 510 static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
511 511
512 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ 512 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
513 513
514 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) 514 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
515 { 515 {
516 writel(data, ohci->registers + offset); 516 writel(data, ohci->registers + offset);
517 } 517 }
518 518
519 static inline u32 reg_read(const struct fw_ohci *ohci, int offset) 519 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
520 { 520 {
521 return readl(ohci->registers + offset); 521 return readl(ohci->registers + offset);
522 } 522 }
523 523
524 static inline void flush_writes(const struct fw_ohci *ohci) 524 static inline void flush_writes(const struct fw_ohci *ohci)
525 { 525 {
526 /* Do a dummy read to flush writes. */ 526 /* Do a dummy read to flush writes. */
527 reg_read(ohci, OHCI1394_Version); 527 reg_read(ohci, OHCI1394_Version);
528 } 528 }
529 529
530 /* 530 /*
531 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and 531 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
532 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex. 532 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
533 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg() 533 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
534 * directly. Exceptions are intrinsically serialized contexts like pci_probe. 534 * directly. Exceptions are intrinsically serialized contexts like pci_probe.
535 */ 535 */
536 static int read_phy_reg(struct fw_ohci *ohci, int addr) 536 static int read_phy_reg(struct fw_ohci *ohci, int addr)
537 { 537 {
538 u32 val; 538 u32 val;
539 int i; 539 int i;
540 540
541 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 541 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
542 for (i = 0; i < 3 + 100; i++) { 542 for (i = 0; i < 3 + 100; i++) {
543 val = reg_read(ohci, OHCI1394_PhyControl); 543 val = reg_read(ohci, OHCI1394_PhyControl);
544 if (!~val) 544 if (!~val)
545 return -ENODEV; /* Card was ejected. */ 545 return -ENODEV; /* Card was ejected. */
546 546
547 if (val & OHCI1394_PhyControl_ReadDone) 547 if (val & OHCI1394_PhyControl_ReadDone)
548 return OHCI1394_PhyControl_ReadData(val); 548 return OHCI1394_PhyControl_ReadData(val);
549 549
550 /* 550 /*
551 * Try a few times without waiting. Sleeping is necessary 551 * Try a few times without waiting. Sleeping is necessary
552 * only when the link/PHY interface is busy. 552 * only when the link/PHY interface is busy.
553 */ 553 */
554 if (i >= 3) 554 if (i >= 3)
555 msleep(1); 555 msleep(1);
556 } 556 }
557 fw_error("failed to read phy reg\n"); 557 fw_error("failed to read phy reg\n");
558 558
559 return -EBUSY; 559 return -EBUSY;
560 } 560 }
561 561
562 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) 562 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
563 { 563 {
564 int i; 564 int i;
565 565
566 reg_write(ohci, OHCI1394_PhyControl, 566 reg_write(ohci, OHCI1394_PhyControl,
567 OHCI1394_PhyControl_Write(addr, val)); 567 OHCI1394_PhyControl_Write(addr, val));
568 for (i = 0; i < 3 + 100; i++) { 568 for (i = 0; i < 3 + 100; i++) {
569 val = reg_read(ohci, OHCI1394_PhyControl); 569 val = reg_read(ohci, OHCI1394_PhyControl);
570 if (!~val) 570 if (!~val)
571 return -ENODEV; /* Card was ejected. */ 571 return -ENODEV; /* Card was ejected. */
572 572
573 if (!(val & OHCI1394_PhyControl_WritePending)) 573 if (!(val & OHCI1394_PhyControl_WritePending))
574 return 0; 574 return 0;
575 575
576 if (i >= 3) 576 if (i >= 3)
577 msleep(1); 577 msleep(1);
578 } 578 }
579 fw_error("failed to write phy reg\n"); 579 fw_error("failed to write phy reg\n");
580 580
581 return -EBUSY; 581 return -EBUSY;
582 } 582 }
583 583
584 static int update_phy_reg(struct fw_ohci *ohci, int addr, 584 static int update_phy_reg(struct fw_ohci *ohci, int addr,
585 int clear_bits, int set_bits) 585 int clear_bits, int set_bits)
586 { 586 {
587 int ret = read_phy_reg(ohci, addr); 587 int ret = read_phy_reg(ohci, addr);
588 if (ret < 0) 588 if (ret < 0)
589 return ret; 589 return ret;
590 590
591 /* 591 /*
592 * The interrupt status bits are cleared by writing a one bit. 592 * The interrupt status bits are cleared by writing a one bit.
593 * Avoid clearing them unless explicitly requested in set_bits. 593 * Avoid clearing them unless explicitly requested in set_bits.
594 */ 594 */
595 if (addr == 5) 595 if (addr == 5)
596 clear_bits |= PHY_INT_STATUS_BITS; 596 clear_bits |= PHY_INT_STATUS_BITS;
597 597
598 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits); 598 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
599 } 599 }
600 600
601 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) 601 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
602 { 602 {
603 int ret; 603 int ret;
604 604
605 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); 605 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
606 if (ret < 0) 606 if (ret < 0)
607 return ret; 607 return ret;
608 608
609 return read_phy_reg(ohci, addr); 609 return read_phy_reg(ohci, addr);
610 } 610 }
611 611
612 static int ohci_read_phy_reg(struct fw_card *card, int addr) 612 static int ohci_read_phy_reg(struct fw_card *card, int addr)
613 { 613 {
614 struct fw_ohci *ohci = fw_ohci(card); 614 struct fw_ohci *ohci = fw_ohci(card);
615 int ret; 615 int ret;
616 616
617 mutex_lock(&ohci->phy_reg_mutex); 617 mutex_lock(&ohci->phy_reg_mutex);
618 ret = read_phy_reg(ohci, addr); 618 ret = read_phy_reg(ohci, addr);
619 mutex_unlock(&ohci->phy_reg_mutex); 619 mutex_unlock(&ohci->phy_reg_mutex);
620 620
621 return ret; 621 return ret;
622 } 622 }
623 623
624 static int ohci_update_phy_reg(struct fw_card *card, int addr, 624 static int ohci_update_phy_reg(struct fw_card *card, int addr,
625 int clear_bits, int set_bits) 625 int clear_bits, int set_bits)
626 { 626 {
627 struct fw_ohci *ohci = fw_ohci(card); 627 struct fw_ohci *ohci = fw_ohci(card);
628 int ret; 628 int ret;
629 629
630 mutex_lock(&ohci->phy_reg_mutex); 630 mutex_lock(&ohci->phy_reg_mutex);
631 ret = update_phy_reg(ohci, addr, clear_bits, set_bits); 631 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
632 mutex_unlock(&ohci->phy_reg_mutex); 632 mutex_unlock(&ohci->phy_reg_mutex);
633 633
634 return ret; 634 return ret;
635 } 635 }
636 636
637 static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) 637 static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
638 { 638 {
639 return page_private(ctx->pages[i]); 639 return page_private(ctx->pages[i]);
640 } 640 }
641 641
642 static void ar_context_link_page(struct ar_context *ctx, unsigned int index) 642 static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
643 { 643 {
644 struct descriptor *d; 644 struct descriptor *d;
645 645
646 d = &ctx->descriptors[index]; 646 d = &ctx->descriptors[index];
647 d->branch_address &= cpu_to_le32(~0xf); 647 d->branch_address &= cpu_to_le32(~0xf);
648 d->res_count = cpu_to_le16(PAGE_SIZE); 648 d->res_count = cpu_to_le16(PAGE_SIZE);
649 d->transfer_status = 0; 649 d->transfer_status = 0;
650 650
651 wmb(); /* finish init of new descriptors before branch_address update */ 651 wmb(); /* finish init of new descriptors before branch_address update */
652 d = &ctx->descriptors[ctx->last_buffer_index]; 652 d = &ctx->descriptors[ctx->last_buffer_index];
653 d->branch_address |= cpu_to_le32(1); 653 d->branch_address |= cpu_to_le32(1);
654 654
655 ctx->last_buffer_index = index; 655 ctx->last_buffer_index = index;
656 656
657 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 657 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
658 } 658 }
659 659
660 static void ar_context_release(struct ar_context *ctx) 660 static void ar_context_release(struct ar_context *ctx)
661 { 661 {
662 unsigned int i; 662 unsigned int i;
663 663
664 if (ctx->buffer) 664 if (ctx->buffer)
665 vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES); 665 vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
666 666
667 for (i = 0; i < AR_BUFFERS; i++) 667 for (i = 0; i < AR_BUFFERS; i++)
668 if (ctx->pages[i]) { 668 if (ctx->pages[i]) {
669 dma_unmap_page(ctx->ohci->card.device, 669 dma_unmap_page(ctx->ohci->card.device,
670 ar_buffer_bus(ctx, i), 670 ar_buffer_bus(ctx, i),
671 PAGE_SIZE, DMA_FROM_DEVICE); 671 PAGE_SIZE, DMA_FROM_DEVICE);
672 __free_page(ctx->pages[i]); 672 __free_page(ctx->pages[i]);
673 } 673 }
674 } 674 }
675 675
676 static void ar_context_abort(struct ar_context *ctx, const char *error_msg) 676 static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
677 { 677 {
678 if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { 678 if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
679 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 679 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
680 flush_writes(ctx->ohci); 680 flush_writes(ctx->ohci);
681 681
682 fw_error("AR error: %s; DMA stopped\n", error_msg); 682 fw_error("AR error: %s; DMA stopped\n", error_msg);
683 } 683 }
684 /* FIXME: restart? */ 684 /* FIXME: restart? */
685 } 685 }
686 686
687 static inline unsigned int ar_next_buffer_index(unsigned int index) 687 static inline unsigned int ar_next_buffer_index(unsigned int index)
688 { 688 {
689 return (index + 1) % AR_BUFFERS; 689 return (index + 1) % AR_BUFFERS;
690 } 690 }
691 691
692 static inline unsigned int ar_prev_buffer_index(unsigned int index) 692 static inline unsigned int ar_prev_buffer_index(unsigned int index)
693 { 693 {
694 return (index - 1 + AR_BUFFERS) % AR_BUFFERS; 694 return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
695 } 695 }
696 696
697 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) 697 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
698 { 698 {
699 return ar_next_buffer_index(ctx->last_buffer_index); 699 return ar_next_buffer_index(ctx->last_buffer_index);
700 } 700 }
701 701
702 /* 702 /*
703 * We search for the buffer that contains the last AR packet DMA data written 703 * We search for the buffer that contains the last AR packet DMA data written
704 * by the controller. 704 * by the controller.
705 */ 705 */
706 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, 706 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
707 unsigned int *buffer_offset) 707 unsigned int *buffer_offset)
708 { 708 {
709 unsigned int i, next_i, last = ctx->last_buffer_index; 709 unsigned int i, next_i, last = ctx->last_buffer_index;
710 __le16 res_count, next_res_count; 710 __le16 res_count, next_res_count;
711 711
712 i = ar_first_buffer_index(ctx); 712 i = ar_first_buffer_index(ctx);
713 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); 713 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
714 714
715 /* A buffer that is not yet completely filled must be the last one. */ 715 /* A buffer that is not yet completely filled must be the last one. */
716 while (i != last && res_count == 0) { 716 while (i != last && res_count == 0) {
717 717
718 /* Peek at the next descriptor. */ 718 /* Peek at the next descriptor. */
719 next_i = ar_next_buffer_index(i); 719 next_i = ar_next_buffer_index(i);
720 rmb(); /* read descriptors in order */ 720 rmb(); /* read descriptors in order */
721 next_res_count = ACCESS_ONCE( 721 next_res_count = ACCESS_ONCE(
722 ctx->descriptors[next_i].res_count); 722 ctx->descriptors[next_i].res_count);
723 /* 723 /*
724 * If the next descriptor is still empty, we must stop at this 724 * If the next descriptor is still empty, we must stop at this
725 * descriptor. 725 * descriptor.
726 */ 726 */
727 if (next_res_count == cpu_to_le16(PAGE_SIZE)) { 727 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
728 /* 728 /*
729 * The exception is when the DMA data for one packet is 729 * The exception is when the DMA data for one packet is
730 * split over three buffers; in this case, the middle 730 * split over three buffers; in this case, the middle
731 * buffer's descriptor might be never updated by the 731 * buffer's descriptor might be never updated by the
732 * controller and look still empty, and we have to peek 732 * controller and look still empty, and we have to peek
733 * at the third one. 733 * at the third one.
734 */ 734 */
735 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { 735 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
736 next_i = ar_next_buffer_index(next_i); 736 next_i = ar_next_buffer_index(next_i);
737 rmb(); 737 rmb();
738 next_res_count = ACCESS_ONCE( 738 next_res_count = ACCESS_ONCE(
739 ctx->descriptors[next_i].res_count); 739 ctx->descriptors[next_i].res_count);
740 if (next_res_count != cpu_to_le16(PAGE_SIZE)) 740 if (next_res_count != cpu_to_le16(PAGE_SIZE))
741 goto next_buffer_is_active; 741 goto next_buffer_is_active;
742 } 742 }
743 743
744 break; 744 break;
745 } 745 }
746 746
747 next_buffer_is_active: 747 next_buffer_is_active:
748 i = next_i; 748 i = next_i;
749 res_count = next_res_count; 749 res_count = next_res_count;
750 } 750 }
751 751
752 rmb(); /* read res_count before the DMA data */ 752 rmb(); /* read res_count before the DMA data */
753 753
754 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); 754 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
755 if (*buffer_offset > PAGE_SIZE) { 755 if (*buffer_offset > PAGE_SIZE) {
756 *buffer_offset = 0; 756 *buffer_offset = 0;
757 ar_context_abort(ctx, "corrupted descriptor"); 757 ar_context_abort(ctx, "corrupted descriptor");
758 } 758 }
759 759
760 return i; 760 return i;
761 } 761 }
762 762
763 static void ar_sync_buffers_for_cpu(struct ar_context *ctx, 763 static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
764 unsigned int end_buffer_index, 764 unsigned int end_buffer_index,
765 unsigned int end_buffer_offset) 765 unsigned int end_buffer_offset)
766 { 766 {
767 unsigned int i; 767 unsigned int i;
768 768
769 i = ar_first_buffer_index(ctx); 769 i = ar_first_buffer_index(ctx);
770 while (i != end_buffer_index) { 770 while (i != end_buffer_index) {
771 dma_sync_single_for_cpu(ctx->ohci->card.device, 771 dma_sync_single_for_cpu(ctx->ohci->card.device,
772 ar_buffer_bus(ctx, i), 772 ar_buffer_bus(ctx, i),
773 PAGE_SIZE, DMA_FROM_DEVICE); 773 PAGE_SIZE, DMA_FROM_DEVICE);
774 i = ar_next_buffer_index(i); 774 i = ar_next_buffer_index(i);
775 } 775 }
776 if (end_buffer_offset > 0) 776 if (end_buffer_offset > 0)
777 dma_sync_single_for_cpu(ctx->ohci->card.device, 777 dma_sync_single_for_cpu(ctx->ohci->card.device,
778 ar_buffer_bus(ctx, i), 778 ar_buffer_bus(ctx, i),
779 end_buffer_offset, DMA_FROM_DEVICE); 779 end_buffer_offset, DMA_FROM_DEVICE);
780 } 780 }
781 781
782 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 782 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
783 #define cond_le32_to_cpu(v) \ 783 #define cond_le32_to_cpu(v) \
784 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) 784 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
785 #else 785 #else
786 #define cond_le32_to_cpu(v) le32_to_cpu(v) 786 #define cond_le32_to_cpu(v) le32_to_cpu(v)
787 #endif 787 #endif
788 788
789 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) 789 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
790 { 790 {
791 struct fw_ohci *ohci = ctx->ohci; 791 struct fw_ohci *ohci = ctx->ohci;
792 struct fw_packet p; 792 struct fw_packet p;
793 u32 status, length, tcode; 793 u32 status, length, tcode;
794 int evt; 794 int evt;
795 795
796 p.header[0] = cond_le32_to_cpu(buffer[0]); 796 p.header[0] = cond_le32_to_cpu(buffer[0]);
797 p.header[1] = cond_le32_to_cpu(buffer[1]); 797 p.header[1] = cond_le32_to_cpu(buffer[1]);
798 p.header[2] = cond_le32_to_cpu(buffer[2]); 798 p.header[2] = cond_le32_to_cpu(buffer[2]);
799 799
800 tcode = (p.header[0] >> 4) & 0x0f; 800 tcode = (p.header[0] >> 4) & 0x0f;
801 switch (tcode) { 801 switch (tcode) {
802 case TCODE_WRITE_QUADLET_REQUEST: 802 case TCODE_WRITE_QUADLET_REQUEST:
803 case TCODE_READ_QUADLET_RESPONSE: 803 case TCODE_READ_QUADLET_RESPONSE:
804 p.header[3] = (__force __u32) buffer[3]; 804 p.header[3] = (__force __u32) buffer[3];
805 p.header_length = 16; 805 p.header_length = 16;
806 p.payload_length = 0; 806 p.payload_length = 0;
807 break; 807 break;
808 808
809 case TCODE_READ_BLOCK_REQUEST : 809 case TCODE_READ_BLOCK_REQUEST :
810 p.header[3] = cond_le32_to_cpu(buffer[3]); 810 p.header[3] = cond_le32_to_cpu(buffer[3]);
811 p.header_length = 16; 811 p.header_length = 16;
812 p.payload_length = 0; 812 p.payload_length = 0;
813 break; 813 break;
814 814
815 case TCODE_WRITE_BLOCK_REQUEST: 815 case TCODE_WRITE_BLOCK_REQUEST:
816 case TCODE_READ_BLOCK_RESPONSE: 816 case TCODE_READ_BLOCK_RESPONSE:
817 case TCODE_LOCK_REQUEST: 817 case TCODE_LOCK_REQUEST:
818 case TCODE_LOCK_RESPONSE: 818 case TCODE_LOCK_RESPONSE:
819 p.header[3] = cond_le32_to_cpu(buffer[3]); 819 p.header[3] = cond_le32_to_cpu(buffer[3]);
820 p.header_length = 16; 820 p.header_length = 16;
821 p.payload_length = p.header[3] >> 16; 821 p.payload_length = p.header[3] >> 16;
822 if (p.payload_length > MAX_ASYNC_PAYLOAD) { 822 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
823 ar_context_abort(ctx, "invalid packet length"); 823 ar_context_abort(ctx, "invalid packet length");
824 return NULL; 824 return NULL;
825 } 825 }
826 break; 826 break;
827 827
828 case TCODE_WRITE_RESPONSE: 828 case TCODE_WRITE_RESPONSE:
829 case TCODE_READ_QUADLET_REQUEST: 829 case TCODE_READ_QUADLET_REQUEST:
830 case OHCI_TCODE_PHY_PACKET: 830 case OHCI_TCODE_PHY_PACKET:
831 p.header_length = 12; 831 p.header_length = 12;
832 p.payload_length = 0; 832 p.payload_length = 0;
833 break; 833 break;
834 834
835 default: 835 default:
836 ar_context_abort(ctx, "invalid tcode"); 836 ar_context_abort(ctx, "invalid tcode");
837 return NULL; 837 return NULL;
838 } 838 }
839 839
840 p.payload = (void *) buffer + p.header_length; 840 p.payload = (void *) buffer + p.header_length;
841 841
842 /* FIXME: What to do about evt_* errors? */ 842 /* FIXME: What to do about evt_* errors? */
843 length = (p.header_length + p.payload_length + 3) / 4; 843 length = (p.header_length + p.payload_length + 3) / 4;
844 status = cond_le32_to_cpu(buffer[length]); 844 status = cond_le32_to_cpu(buffer[length]);
845 evt = (status >> 16) & 0x1f; 845 evt = (status >> 16) & 0x1f;
846 846
847 p.ack = evt - 16; 847 p.ack = evt - 16;
848 p.speed = (status >> 21) & 0x7; 848 p.speed = (status >> 21) & 0x7;
849 p.timestamp = status & 0xffff; 849 p.timestamp = status & 0xffff;
850 p.generation = ohci->request_generation; 850 p.generation = ohci->request_generation;
851 851
852 log_ar_at_event('R', p.speed, p.header, evt); 852 log_ar_at_event('R', p.speed, p.header, evt);
853 853
854 /* 854 /*
855 * Several controllers, notably from NEC and VIA, forget to 855 * Several controllers, notably from NEC and VIA, forget to
856 * write ack_complete status at PHY packet reception. 856 * write ack_complete status at PHY packet reception.
857 */ 857 */
858 if (evt == OHCI1394_evt_no_status && 858 if (evt == OHCI1394_evt_no_status &&
859 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) 859 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
860 p.ack = ACK_COMPLETE; 860 p.ack = ACK_COMPLETE;
861 861
862 /* 862 /*
863 * The OHCI bus reset handler synthesizes a PHY packet with 863 * The OHCI bus reset handler synthesizes a PHY packet with
864 * the new generation number when a bus reset happens (see 864 * the new generation number when a bus reset happens (see
865 * section 8.4.2.3). This helps us determine when a request 865 * section 8.4.2.3). This helps us determine when a request
866 * was received and make sure we send the response in the same 866 * was received and make sure we send the response in the same
867 * generation. We only need this for requests; for responses 867 * generation. We only need this for requests; for responses
868 * we use the unique tlabel for finding the matching 868 * we use the unique tlabel for finding the matching
869 * request. 869 * request.
870 * 870 *
871 * Alas some chips sometimes emit bus reset packets with a 871 * Alas some chips sometimes emit bus reset packets with a
872 * wrong generation. We set the correct generation for these 872 * wrong generation. We set the correct generation for these
873 * at a slightly incorrect time (in bus_reset_work). 873 * at a slightly incorrect time (in bus_reset_work).
874 */ 874 */
875 if (evt == OHCI1394_evt_bus_reset) { 875 if (evt == OHCI1394_evt_bus_reset) {
876 if (!(ohci->quirks & QUIRK_RESET_PACKET)) 876 if (!(ohci->quirks & QUIRK_RESET_PACKET))
877 ohci->request_generation = (p.header[2] >> 16) & 0xff; 877 ohci->request_generation = (p.header[2] >> 16) & 0xff;
878 } else if (ctx == &ohci->ar_request_ctx) { 878 } else if (ctx == &ohci->ar_request_ctx) {
879 fw_core_handle_request(&ohci->card, &p); 879 fw_core_handle_request(&ohci->card, &p);
880 } else { 880 } else {
881 fw_core_handle_response(&ohci->card, &p); 881 fw_core_handle_response(&ohci->card, &p);
882 } 882 }
883 883
884 return buffer + length + 1; 884 return buffer + length + 1;
885 } 885 }
886 886
887 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end) 887 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
888 { 888 {
889 void *next; 889 void *next;
890 890
891 while (p < end) { 891 while (p < end) {
892 next = handle_ar_packet(ctx, p); 892 next = handle_ar_packet(ctx, p);
893 if (!next) 893 if (!next)
894 return p; 894 return p;
895 p = next; 895 p = next;
896 } 896 }
897 897
898 return p; 898 return p;
899 } 899 }
900 900
901 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) 901 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
902 { 902 {
903 unsigned int i; 903 unsigned int i;
904 904
905 i = ar_first_buffer_index(ctx); 905 i = ar_first_buffer_index(ctx);
906 while (i != end_buffer) { 906 while (i != end_buffer) {
907 dma_sync_single_for_device(ctx->ohci->card.device, 907 dma_sync_single_for_device(ctx->ohci->card.device,
908 ar_buffer_bus(ctx, i), 908 ar_buffer_bus(ctx, i),
909 PAGE_SIZE, DMA_FROM_DEVICE); 909 PAGE_SIZE, DMA_FROM_DEVICE);
910 ar_context_link_page(ctx, i); 910 ar_context_link_page(ctx, i);
911 i = ar_next_buffer_index(i); 911 i = ar_next_buffer_index(i);
912 } 912 }
913 } 913 }
914 914
915 static void ar_context_tasklet(unsigned long data) 915 static void ar_context_tasklet(unsigned long data)
916 { 916 {
917 struct ar_context *ctx = (struct ar_context *)data; 917 struct ar_context *ctx = (struct ar_context *)data;
918 unsigned int end_buffer_index, end_buffer_offset; 918 unsigned int end_buffer_index, end_buffer_offset;
919 void *p, *end; 919 void *p, *end;
920 920
921 p = ctx->pointer; 921 p = ctx->pointer;
922 if (!p) 922 if (!p)
923 return; 923 return;
924 924
925 end_buffer_index = ar_search_last_active_buffer(ctx, 925 end_buffer_index = ar_search_last_active_buffer(ctx,
926 &end_buffer_offset); 926 &end_buffer_offset);
927 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); 927 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
928 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; 928 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
929 929
930 if (end_buffer_index < ar_first_buffer_index(ctx)) { 930 if (end_buffer_index < ar_first_buffer_index(ctx)) {
931 /* 931 /*
932 * The filled part of the overall buffer wraps around; handle 932 * The filled part of the overall buffer wraps around; handle
933 * all packets up to the buffer end here. If the last packet 933 * all packets up to the buffer end here. If the last packet
934 * wraps around, its tail will be visible after the buffer end 934 * wraps around, its tail will be visible after the buffer end
935 * because the buffer start pages are mapped there again. 935 * because the buffer start pages are mapped there again.
936 */ 936 */
937 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; 937 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
938 p = handle_ar_packets(ctx, p, buffer_end); 938 p = handle_ar_packets(ctx, p, buffer_end);
939 if (p < buffer_end) 939 if (p < buffer_end)
940 goto error; 940 goto error;
941 /* adjust p to point back into the actual buffer */ 941 /* adjust p to point back into the actual buffer */
942 p -= AR_BUFFERS * PAGE_SIZE; 942 p -= AR_BUFFERS * PAGE_SIZE;
943 } 943 }
944 944
945 p = handle_ar_packets(ctx, p, end); 945 p = handle_ar_packets(ctx, p, end);
946 if (p != end) { 946 if (p != end) {
947 if (p > end) 947 if (p > end)
948 ar_context_abort(ctx, "inconsistent descriptor"); 948 ar_context_abort(ctx, "inconsistent descriptor");
949 goto error; 949 goto error;
950 } 950 }
951 951
952 ctx->pointer = p; 952 ctx->pointer = p;
953 ar_recycle_buffers(ctx, end_buffer_index); 953 ar_recycle_buffers(ctx, end_buffer_index);
954 954
955 return; 955 return;
956 956
957 error: 957 error:
958 ctx->pointer = NULL; 958 ctx->pointer = NULL;
959 } 959 }
960 960
961 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, 961 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
962 unsigned int descriptors_offset, u32 regs) 962 unsigned int descriptors_offset, u32 regs)
963 { 963 {
964 unsigned int i; 964 unsigned int i;
965 dma_addr_t dma_addr; 965 dma_addr_t dma_addr;
966 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES]; 966 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
967 struct descriptor *d; 967 struct descriptor *d;
968 968
969 ctx->regs = regs; 969 ctx->regs = regs;
970 ctx->ohci = ohci; 970 ctx->ohci = ohci;
971 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); 971 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
972 972
973 for (i = 0; i < AR_BUFFERS; i++) { 973 for (i = 0; i < AR_BUFFERS; i++) {
974 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); 974 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
975 if (!ctx->pages[i]) 975 if (!ctx->pages[i])
976 goto out_of_memory; 976 goto out_of_memory;
977 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], 977 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
978 0, PAGE_SIZE, DMA_FROM_DEVICE); 978 0, PAGE_SIZE, DMA_FROM_DEVICE);
979 if (dma_mapping_error(ohci->card.device, dma_addr)) { 979 if (dma_mapping_error(ohci->card.device, dma_addr)) {
980 __free_page(ctx->pages[i]); 980 __free_page(ctx->pages[i]);
981 ctx->pages[i] = NULL; 981 ctx->pages[i] = NULL;
982 goto out_of_memory; 982 goto out_of_memory;
983 } 983 }
984 set_page_private(ctx->pages[i], dma_addr); 984 set_page_private(ctx->pages[i], dma_addr);
985 } 985 }
986 986
987 for (i = 0; i < AR_BUFFERS; i++) 987 for (i = 0; i < AR_BUFFERS; i++)
988 pages[i] = ctx->pages[i]; 988 pages[i] = ctx->pages[i];
989 for (i = 0; i < AR_WRAPAROUND_PAGES; i++) 989 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
990 pages[AR_BUFFERS + i] = ctx->pages[i]; 990 pages[AR_BUFFERS + i] = ctx->pages[i];
991 ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES, 991 ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
992 -1, PAGE_KERNEL); 992 -1, PAGE_KERNEL);
993 if (!ctx->buffer) 993 if (!ctx->buffer)
994 goto out_of_memory; 994 goto out_of_memory;
995 995
996 ctx->descriptors = ohci->misc_buffer + descriptors_offset; 996 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
997 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; 997 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
998 998
999 for (i = 0; i < AR_BUFFERS; i++) { 999 for (i = 0; i < AR_BUFFERS; i++) {
1000 d = &ctx->descriptors[i]; 1000 d = &ctx->descriptors[i];
1001 d->req_count = cpu_to_le16(PAGE_SIZE); 1001 d->req_count = cpu_to_le16(PAGE_SIZE);
1002 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 1002 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
1003 DESCRIPTOR_STATUS | 1003 DESCRIPTOR_STATUS |
1004 DESCRIPTOR_BRANCH_ALWAYS); 1004 DESCRIPTOR_BRANCH_ALWAYS);
1005 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); 1005 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
1006 d->branch_address = cpu_to_le32(ctx->descriptors_bus + 1006 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
1007 ar_next_buffer_index(i) * sizeof(struct descriptor)); 1007 ar_next_buffer_index(i) * sizeof(struct descriptor));
1008 } 1008 }
1009 1009
1010 return 0; 1010 return 0;
1011 1011
1012 out_of_memory: 1012 out_of_memory:
1013 ar_context_release(ctx); 1013 ar_context_release(ctx);
1014 1014
1015 return -ENOMEM; 1015 return -ENOMEM;
1016 } 1016 }
1017 1017
1018 static void ar_context_run(struct ar_context *ctx) 1018 static void ar_context_run(struct ar_context *ctx)
1019 { 1019 {
1020 unsigned int i; 1020 unsigned int i;
1021 1021
1022 for (i = 0; i < AR_BUFFERS; i++) 1022 for (i = 0; i < AR_BUFFERS; i++)
1023 ar_context_link_page(ctx, i); 1023 ar_context_link_page(ctx, i);
1024 1024
1025 ctx->pointer = ctx->buffer; 1025 ctx->pointer = ctx->buffer;
1026 1026
1027 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); 1027 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
1028 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); 1028 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
1029 } 1029 }
1030 1030
1031 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) 1031 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
1032 { 1032 {
1033 __le16 branch; 1033 __le16 branch;
1034 1034
1035 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); 1035 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
1036 1036
1037 /* figure out which descriptor the branch address goes in */ 1037 /* figure out which descriptor the branch address goes in */
1038 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) 1038 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
1039 return d; 1039 return d;
1040 else 1040 else
1041 return d + z - 1; 1041 return d + z - 1;
1042 } 1042 }
1043 1043
1044 static void context_tasklet(unsigned long data) 1044 static void context_tasklet(unsigned long data)
1045 { 1045 {
1046 struct context *ctx = (struct context *) data; 1046 struct context *ctx = (struct context *) data;
1047 struct descriptor *d, *last; 1047 struct descriptor *d, *last;
1048 u32 address; 1048 u32 address;
1049 int z; 1049 int z;
1050 struct descriptor_buffer *desc; 1050 struct descriptor_buffer *desc;
1051 1051
1052 desc = list_entry(ctx->buffer_list.next, 1052 desc = list_entry(ctx->buffer_list.next,
1053 struct descriptor_buffer, list); 1053 struct descriptor_buffer, list);
1054 last = ctx->last; 1054 last = ctx->last;
1055 while (last->branch_address != 0) { 1055 while (last->branch_address != 0) {
1056 struct descriptor_buffer *old_desc = desc; 1056 struct descriptor_buffer *old_desc = desc;
1057 address = le32_to_cpu(last->branch_address); 1057 address = le32_to_cpu(last->branch_address);
1058 z = address & 0xf; 1058 z = address & 0xf;
1059 address &= ~0xf; 1059 address &= ~0xf;
1060 1060
1061 /* If the branch address points to a buffer outside of the 1061 /* If the branch address points to a buffer outside of the
1062 * current buffer, advance to the next buffer. */ 1062 * current buffer, advance to the next buffer. */
1063 if (address < desc->buffer_bus || 1063 if (address < desc->buffer_bus ||
1064 address >= desc->buffer_bus + desc->used) 1064 address >= desc->buffer_bus + desc->used)
1065 desc = list_entry(desc->list.next, 1065 desc = list_entry(desc->list.next,
1066 struct descriptor_buffer, list); 1066 struct descriptor_buffer, list);
1067 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); 1067 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1068 last = find_branch_descriptor(d, z); 1068 last = find_branch_descriptor(d, z);
1069 1069
1070 if (!ctx->callback(ctx, d, last)) 1070 if (!ctx->callback(ctx, d, last))
1071 break; 1071 break;
1072 1072
1073 if (old_desc != desc) { 1073 if (old_desc != desc) {
1074 /* If we've advanced to the next buffer, move the 1074 /* If we've advanced to the next buffer, move the
1075 * previous buffer to the free list. */ 1075 * previous buffer to the free list. */
1076 unsigned long flags; 1076 unsigned long flags;
1077 old_desc->used = 0; 1077 old_desc->used = 0;
1078 spin_lock_irqsave(&ctx->ohci->lock, flags); 1078 spin_lock_irqsave(&ctx->ohci->lock, flags);
1079 list_move_tail(&old_desc->list, &ctx->buffer_list); 1079 list_move_tail(&old_desc->list, &ctx->buffer_list);
1080 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1080 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1081 } 1081 }
1082 ctx->last = last; 1082 ctx->last = last;
1083 } 1083 }
1084 } 1084 }
1085 1085
1086 /* 1086 /*
1087 * Allocate a new buffer and add it to the list of free buffers for this 1087 * Allocate a new buffer and add it to the list of free buffers for this
1088 * context. Must be called with ohci->lock held. 1088 * context. Must be called with ohci->lock held.
1089 */ 1089 */
1090 static int context_add_buffer(struct context *ctx) 1090 static int context_add_buffer(struct context *ctx)
1091 { 1091 {
1092 struct descriptor_buffer *desc; 1092 struct descriptor_buffer *desc;
1093 dma_addr_t uninitialized_var(bus_addr); 1093 dma_addr_t uninitialized_var(bus_addr);
1094 int offset; 1094 int offset;
1095 1095
1096 /* 1096 /*
1097 * 16MB of descriptors should be far more than enough for any DMA 1097 * 16MB of descriptors should be far more than enough for any DMA
1098 * program. This will catch run-away userspace or DoS attacks. 1098 * program. This will catch run-away userspace or DoS attacks.
1099 */ 1099 */
1100 if (ctx->total_allocation >= 16*1024*1024) 1100 if (ctx->total_allocation >= 16*1024*1024)
1101 return -ENOMEM; 1101 return -ENOMEM;
1102 1102
1103 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, 1103 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
1104 &bus_addr, GFP_ATOMIC); 1104 &bus_addr, GFP_ATOMIC);
1105 if (!desc) 1105 if (!desc)
1106 return -ENOMEM; 1106 return -ENOMEM;
1107 1107
1108 offset = (void *)&desc->buffer - (void *)desc; 1108 offset = (void *)&desc->buffer - (void *)desc;
1109 desc->buffer_size = PAGE_SIZE - offset; 1109 desc->buffer_size = PAGE_SIZE - offset;
1110 desc->buffer_bus = bus_addr + offset; 1110 desc->buffer_bus = bus_addr + offset;
1111 desc->used = 0; 1111 desc->used = 0;
1112 1112
1113 list_add_tail(&desc->list, &ctx->buffer_list); 1113 list_add_tail(&desc->list, &ctx->buffer_list);
1114 ctx->total_allocation += PAGE_SIZE; 1114 ctx->total_allocation += PAGE_SIZE;
1115 1115
1116 return 0; 1116 return 0;
1117 } 1117 }
1118 1118
1119 static int context_init(struct context *ctx, struct fw_ohci *ohci, 1119 static int context_init(struct context *ctx, struct fw_ohci *ohci,
1120 u32 regs, descriptor_callback_t callback) 1120 u32 regs, descriptor_callback_t callback)
1121 { 1121 {
1122 ctx->ohci = ohci; 1122 ctx->ohci = ohci;
1123 ctx->regs = regs; 1123 ctx->regs = regs;
1124 ctx->total_allocation = 0; 1124 ctx->total_allocation = 0;
1125 1125
1126 INIT_LIST_HEAD(&ctx->buffer_list); 1126 INIT_LIST_HEAD(&ctx->buffer_list);
1127 if (context_add_buffer(ctx) < 0) 1127 if (context_add_buffer(ctx) < 0)
1128 return -ENOMEM; 1128 return -ENOMEM;
1129 1129
1130 ctx->buffer_tail = list_entry(ctx->buffer_list.next, 1130 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1131 struct descriptor_buffer, list); 1131 struct descriptor_buffer, list);
1132 1132
1133 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); 1133 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
1134 ctx->callback = callback; 1134 ctx->callback = callback;
1135 1135
1136 /* 1136 /*
1137 * We put a dummy descriptor in the buffer that has a NULL 1137 * We put a dummy descriptor in the buffer that has a NULL
1138 * branch address and looks like it's been sent. That way we 1138 * branch address and looks like it's been sent. That way we
1139 * have a descriptor to append DMA programs to. 1139 * have a descriptor to append DMA programs to.
1140 */ 1140 */
1141 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); 1141 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1142 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); 1142 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1143 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); 1143 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1144 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); 1144 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1145 ctx->last = ctx->buffer_tail->buffer; 1145 ctx->last = ctx->buffer_tail->buffer;
1146 ctx->prev = ctx->buffer_tail->buffer; 1146 ctx->prev = ctx->buffer_tail->buffer;
1147 1147
1148 return 0; 1148 return 0;
1149 } 1149 }
1150 1150
1151 static void context_release(struct context *ctx) 1151 static void context_release(struct context *ctx)
1152 { 1152 {
1153 struct fw_card *card = &ctx->ohci->card; 1153 struct fw_card *card = &ctx->ohci->card;
1154 struct descriptor_buffer *desc, *tmp; 1154 struct descriptor_buffer *desc, *tmp;
1155 1155
1156 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) 1156 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
1157 dma_free_coherent(card->device, PAGE_SIZE, desc, 1157 dma_free_coherent(card->device, PAGE_SIZE, desc,
1158 desc->buffer_bus - 1158 desc->buffer_bus -
1159 ((void *)&desc->buffer - (void *)desc)); 1159 ((void *)&desc->buffer - (void *)desc));
1160 } 1160 }
1161 1161
1162 /* Must be called with ohci->lock held */ 1162 /* Must be called with ohci->lock held */
1163 static struct descriptor *context_get_descriptors(struct context *ctx, 1163 static struct descriptor *context_get_descriptors(struct context *ctx,
1164 int z, dma_addr_t *d_bus) 1164 int z, dma_addr_t *d_bus)
1165 { 1165 {
1166 struct descriptor *d = NULL; 1166 struct descriptor *d = NULL;
1167 struct descriptor_buffer *desc = ctx->buffer_tail; 1167 struct descriptor_buffer *desc = ctx->buffer_tail;
1168 1168
1169 if (z * sizeof(*d) > desc->buffer_size) 1169 if (z * sizeof(*d) > desc->buffer_size)
1170 return NULL; 1170 return NULL;
1171 1171
1172 if (z * sizeof(*d) > desc->buffer_size - desc->used) { 1172 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1173 /* No room for the descriptor in this buffer, so advance to the 1173 /* No room for the descriptor in this buffer, so advance to the
1174 * next one. */ 1174 * next one. */
1175 1175
1176 if (desc->list.next == &ctx->buffer_list) { 1176 if (desc->list.next == &ctx->buffer_list) {
1177 /* If there is no free buffer next in the list, 1177 /* If there is no free buffer next in the list,
1178 * allocate one. */ 1178 * allocate one. */
1179 if (context_add_buffer(ctx) < 0) 1179 if (context_add_buffer(ctx) < 0)
1180 return NULL; 1180 return NULL;
1181 } 1181 }
1182 desc = list_entry(desc->list.next, 1182 desc = list_entry(desc->list.next,
1183 struct descriptor_buffer, list); 1183 struct descriptor_buffer, list);
1184 ctx->buffer_tail = desc; 1184 ctx->buffer_tail = desc;
1185 } 1185 }
1186 1186
1187 d = desc->buffer + desc->used / sizeof(*d); 1187 d = desc->buffer + desc->used / sizeof(*d);
1188 memset(d, 0, z * sizeof(*d)); 1188 memset(d, 0, z * sizeof(*d));
1189 *d_bus = desc->buffer_bus + desc->used; 1189 *d_bus = desc->buffer_bus + desc->used;
1190 1190
1191 return d; 1191 return d;
1192 } 1192 }
1193 1193
1194 static void context_run(struct context *ctx, u32 extra) 1194 static void context_run(struct context *ctx, u32 extra)
1195 { 1195 {
1196 struct fw_ohci *ohci = ctx->ohci; 1196 struct fw_ohci *ohci = ctx->ohci;
1197 1197
1198 reg_write(ohci, COMMAND_PTR(ctx->regs), 1198 reg_write(ohci, COMMAND_PTR(ctx->regs),
1199 le32_to_cpu(ctx->last->branch_address)); 1199 le32_to_cpu(ctx->last->branch_address));
1200 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 1200 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1201 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 1201 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1202 ctx->running = true; 1202 ctx->running = true;
1203 flush_writes(ohci); 1203 flush_writes(ohci);
1204 } 1204 }
1205 1205
1206 static void context_append(struct context *ctx, 1206 static void context_append(struct context *ctx,
1207 struct descriptor *d, int z, int extra) 1207 struct descriptor *d, int z, int extra)
1208 { 1208 {
1209 dma_addr_t d_bus; 1209 dma_addr_t d_bus;
1210 struct descriptor_buffer *desc = ctx->buffer_tail; 1210 struct descriptor_buffer *desc = ctx->buffer_tail;
1211 1211
1212 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); 1212 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1213 1213
1214 desc->used += (z + extra) * sizeof(*d); 1214 desc->used += (z + extra) * sizeof(*d);
1215 1215
1216 wmb(); /* finish init of new descriptors before branch_address update */ 1216 wmb(); /* finish init of new descriptors before branch_address update */
1217 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 1217 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1218 ctx->prev = find_branch_descriptor(d, z); 1218 ctx->prev = find_branch_descriptor(d, z);
1219 } 1219 }
1220 1220
1221 static void context_stop(struct context *ctx) 1221 static void context_stop(struct context *ctx)
1222 { 1222 {
1223 u32 reg; 1223 u32 reg;
1224 int i; 1224 int i;
1225 1225
1226 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 1226 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1227 ctx->running = false; 1227 ctx->running = false;
1228 1228
1229 for (i = 0; i < 1000; i++) { 1229 for (i = 0; i < 1000; i++) {
1230 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 1230 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
1231 if ((reg & CONTEXT_ACTIVE) == 0) 1231 if ((reg & CONTEXT_ACTIVE) == 0)
1232 return; 1232 return;
1233 1233
1234 if (i) 1234 if (i)
1235 udelay(10); 1235 udelay(10);
1236 } 1236 }
1237 fw_error("Error: DMA context still active (0x%08x)\n", reg); 1237 fw_error("Error: DMA context still active (0x%08x)\n", reg);
1238 } 1238 }
1239 1239
1240 struct driver_data { 1240 struct driver_data {
1241 u8 inline_data[8]; 1241 u8 inline_data[8];
1242 struct fw_packet *packet; 1242 struct fw_packet *packet;
1243 }; 1243 };
1244 1244
1245 /* 1245 /*
1246 * This function apppends a packet to the DMA queue for transmission. 1246 * This function apppends a packet to the DMA queue for transmission.
1247 * Must always be called with the ochi->lock held to ensure proper 1247 * Must always be called with the ochi->lock held to ensure proper
1248 * generation handling and locking around packet queue manipulation. 1248 * generation handling and locking around packet queue manipulation.
1249 */ 1249 */
1250 static int at_context_queue_packet(struct context *ctx, 1250 static int at_context_queue_packet(struct context *ctx,
1251 struct fw_packet *packet) 1251 struct fw_packet *packet)
1252 { 1252 {
1253 struct fw_ohci *ohci = ctx->ohci; 1253 struct fw_ohci *ohci = ctx->ohci;
1254 dma_addr_t d_bus, uninitialized_var(payload_bus); 1254 dma_addr_t d_bus, uninitialized_var(payload_bus);
1255 struct driver_data *driver_data; 1255 struct driver_data *driver_data;
1256 struct descriptor *d, *last; 1256 struct descriptor *d, *last;
1257 __le32 *header; 1257 __le32 *header;
1258 int z, tcode; 1258 int z, tcode;
1259 1259
1260 d = context_get_descriptors(ctx, 4, &d_bus); 1260 d = context_get_descriptors(ctx, 4, &d_bus);
1261 if (d == NULL) { 1261 if (d == NULL) {
1262 packet->ack = RCODE_SEND_ERROR; 1262 packet->ack = RCODE_SEND_ERROR;
1263 return -1; 1263 return -1;
1264 } 1264 }
1265 1265
1266 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 1266 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1267 d[0].res_count = cpu_to_le16(packet->timestamp); 1267 d[0].res_count = cpu_to_le16(packet->timestamp);
1268 1268
1269 /* 1269 /*
1270 * The DMA format for asyncronous link packets is different 1270 * The DMA format for asyncronous link packets is different
1271 * from the IEEE1394 layout, so shift the fields around 1271 * from the IEEE1394 layout, so shift the fields around
1272 * accordingly. 1272 * accordingly.
1273 */ 1273 */
1274 1274
1275 tcode = (packet->header[0] >> 4) & 0x0f; 1275 tcode = (packet->header[0] >> 4) & 0x0f;
1276 header = (__le32 *) &d[1]; 1276 header = (__le32 *) &d[1];
1277 switch (tcode) { 1277 switch (tcode) {
1278 case TCODE_WRITE_QUADLET_REQUEST: 1278 case TCODE_WRITE_QUADLET_REQUEST:
1279 case TCODE_WRITE_BLOCK_REQUEST: 1279 case TCODE_WRITE_BLOCK_REQUEST:
1280 case TCODE_WRITE_RESPONSE: 1280 case TCODE_WRITE_RESPONSE:
1281 case TCODE_READ_QUADLET_REQUEST: 1281 case TCODE_READ_QUADLET_REQUEST:
1282 case TCODE_READ_BLOCK_REQUEST: 1282 case TCODE_READ_BLOCK_REQUEST:
1283 case TCODE_READ_QUADLET_RESPONSE: 1283 case TCODE_READ_QUADLET_RESPONSE:
1284 case TCODE_READ_BLOCK_RESPONSE: 1284 case TCODE_READ_BLOCK_RESPONSE:
1285 case TCODE_LOCK_REQUEST: 1285 case TCODE_LOCK_REQUEST:
1286 case TCODE_LOCK_RESPONSE: 1286 case TCODE_LOCK_RESPONSE:
1287 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1287 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1288 (packet->speed << 16)); 1288 (packet->speed << 16));
1289 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 1289 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1290 (packet->header[0] & 0xffff0000)); 1290 (packet->header[0] & 0xffff0000));
1291 header[2] = cpu_to_le32(packet->header[2]); 1291 header[2] = cpu_to_le32(packet->header[2]);
1292 1292
1293 if (TCODE_IS_BLOCK_PACKET(tcode)) 1293 if (TCODE_IS_BLOCK_PACKET(tcode))
1294 header[3] = cpu_to_le32(packet->header[3]); 1294 header[3] = cpu_to_le32(packet->header[3]);
1295 else 1295 else
1296 header[3] = (__force __le32) packet->header[3]; 1296 header[3] = (__force __le32) packet->header[3];
1297 1297
1298 d[0].req_count = cpu_to_le16(packet->header_length); 1298 d[0].req_count = cpu_to_le16(packet->header_length);
1299 break; 1299 break;
1300 1300
1301 case TCODE_LINK_INTERNAL: 1301 case TCODE_LINK_INTERNAL:
1302 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 1302 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1303 (packet->speed << 16)); 1303 (packet->speed << 16));
1304 header[1] = cpu_to_le32(packet->header[1]); 1304 header[1] = cpu_to_le32(packet->header[1]);
1305 header[2] = cpu_to_le32(packet->header[2]); 1305 header[2] = cpu_to_le32(packet->header[2]);
1306 d[0].req_count = cpu_to_le16(12); 1306 d[0].req_count = cpu_to_le16(12);
1307 1307
1308 if (is_ping_packet(&packet->header[1])) 1308 if (is_ping_packet(&packet->header[1]))
1309 d[0].control |= cpu_to_le16(DESCRIPTOR_PING); 1309 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1310 break; 1310 break;
1311 1311
1312 case TCODE_STREAM_DATA: 1312 case TCODE_STREAM_DATA:
1313 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1313 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1314 (packet->speed << 16)); 1314 (packet->speed << 16));
1315 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); 1315 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
1316 d[0].req_count = cpu_to_le16(8); 1316 d[0].req_count = cpu_to_le16(8);
1317 break; 1317 break;
1318 1318
1319 default: 1319 default:
1320 /* BUG(); */ 1320 /* BUG(); */
1321 packet->ack = RCODE_SEND_ERROR; 1321 packet->ack = RCODE_SEND_ERROR;
1322 return -1; 1322 return -1;
1323 } 1323 }
1324 1324
1325 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor)); 1325 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1326 driver_data = (struct driver_data *) &d[3]; 1326 driver_data = (struct driver_data *) &d[3];
1327 driver_data->packet = packet; 1327 driver_data->packet = packet;
1328 packet->driver_data = driver_data; 1328 packet->driver_data = driver_data;
1329 1329
1330 if (packet->payload_length > 0) { 1330 if (packet->payload_length > 0) {
1331 if (packet->payload_length > sizeof(driver_data->inline_data)) { 1331 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1332 payload_bus = dma_map_single(ohci->card.device, 1332 payload_bus = dma_map_single(ohci->card.device,
1333 packet->payload, 1333 packet->payload,
1334 packet->payload_length, 1334 packet->payload_length,
1335 DMA_TO_DEVICE); 1335 DMA_TO_DEVICE);
1336 if (dma_mapping_error(ohci->card.device, payload_bus)) { 1336 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1337 packet->ack = RCODE_SEND_ERROR; 1337 packet->ack = RCODE_SEND_ERROR;
1338 return -1; 1338 return -1;
1339 } 1339 }
1340 packet->payload_bus = payload_bus; 1340 packet->payload_bus = payload_bus;
1341 packet->payload_mapped = true; 1341 packet->payload_mapped = true;
1342 } else { 1342 } else {
1343 memcpy(driver_data->inline_data, packet->payload, 1343 memcpy(driver_data->inline_data, packet->payload,
1344 packet->payload_length); 1344 packet->payload_length);
1345 payload_bus = d_bus + 3 * sizeof(*d); 1345 payload_bus = d_bus + 3 * sizeof(*d);
1346 } 1346 }
1347 1347
1348 d[2].req_count = cpu_to_le16(packet->payload_length); 1348 d[2].req_count = cpu_to_le16(packet->payload_length);
1349 d[2].data_address = cpu_to_le32(payload_bus); 1349 d[2].data_address = cpu_to_le32(payload_bus);
1350 last = &d[2]; 1350 last = &d[2];
1351 z = 3; 1351 z = 3;
1352 } else { 1352 } else {
1353 last = &d[0]; 1353 last = &d[0];
1354 z = 2; 1354 z = 2;
1355 } 1355 }
1356 1356
1357 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 1357 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1358 DESCRIPTOR_IRQ_ALWAYS | 1358 DESCRIPTOR_IRQ_ALWAYS |
1359 DESCRIPTOR_BRANCH_ALWAYS); 1359 DESCRIPTOR_BRANCH_ALWAYS);
1360 1360
1361 /* FIXME: Document how the locking works. */ 1361 /* FIXME: Document how the locking works. */
1362 if (ohci->generation != packet->generation) { 1362 if (ohci->generation != packet->generation) {
1363 if (packet->payload_mapped) 1363 if (packet->payload_mapped)
1364 dma_unmap_single(ohci->card.device, payload_bus, 1364 dma_unmap_single(ohci->card.device, payload_bus,
1365 packet->payload_length, DMA_TO_DEVICE); 1365 packet->payload_length, DMA_TO_DEVICE);
1366 packet->ack = RCODE_GENERATION; 1366 packet->ack = RCODE_GENERATION;
1367 return -1; 1367 return -1;
1368 } 1368 }
1369 1369
1370 context_append(ctx, d, z, 4 - z); 1370 context_append(ctx, d, z, 4 - z);
1371 1371
1372 if (ctx->running) 1372 if (ctx->running)
1373 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 1373 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1374 else 1374 else
1375 context_run(ctx, 0); 1375 context_run(ctx, 0);
1376 1376
1377 return 0; 1377 return 0;
1378 } 1378 }
1379 1379
1380 static void at_context_flush(struct context *ctx) 1380 static void at_context_flush(struct context *ctx)
1381 { 1381 {
1382 tasklet_disable(&ctx->tasklet); 1382 tasklet_disable(&ctx->tasklet);
1383 1383
1384 ctx->flushing = true; 1384 ctx->flushing = true;
1385 context_tasklet((unsigned long)ctx); 1385 context_tasklet((unsigned long)ctx);
1386 ctx->flushing = false; 1386 ctx->flushing = false;
1387 1387
1388 tasklet_enable(&ctx->tasklet); 1388 tasklet_enable(&ctx->tasklet);
1389 } 1389 }
1390 1390
1391 static int handle_at_packet(struct context *context, 1391 static int handle_at_packet(struct context *context,
1392 struct descriptor *d, 1392 struct descriptor *d,
1393 struct descriptor *last) 1393 struct descriptor *last)
1394 { 1394 {
1395 struct driver_data *driver_data; 1395 struct driver_data *driver_data;
1396 struct fw_packet *packet; 1396 struct fw_packet *packet;
1397 struct fw_ohci *ohci = context->ohci; 1397 struct fw_ohci *ohci = context->ohci;
1398 int evt; 1398 int evt;
1399 1399
1400 if (last->transfer_status == 0 && !context->flushing) 1400 if (last->transfer_status == 0 && !context->flushing)
1401 /* This descriptor isn't done yet, stop iteration. */ 1401 /* This descriptor isn't done yet, stop iteration. */
1402 return 0; 1402 return 0;
1403 1403
1404 driver_data = (struct driver_data *) &d[3]; 1404 driver_data = (struct driver_data *) &d[3];
1405 packet = driver_data->packet; 1405 packet = driver_data->packet;
1406 if (packet == NULL) 1406 if (packet == NULL)
1407 /* This packet was cancelled, just continue. */ 1407 /* This packet was cancelled, just continue. */
1408 return 1; 1408 return 1;
1409 1409
1410 if (packet->payload_mapped) 1410 if (packet->payload_mapped)
1411 dma_unmap_single(ohci->card.device, packet->payload_bus, 1411 dma_unmap_single(ohci->card.device, packet->payload_bus,
1412 packet->payload_length, DMA_TO_DEVICE); 1412 packet->payload_length, DMA_TO_DEVICE);
1413 1413
1414 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1414 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1415 packet->timestamp = le16_to_cpu(last->res_count); 1415 packet->timestamp = le16_to_cpu(last->res_count);
1416 1416
1417 log_ar_at_event('T', packet->speed, packet->header, evt); 1417 log_ar_at_event('T', packet->speed, packet->header, evt);
1418 1418
1419 switch (evt) { 1419 switch (evt) {
1420 case OHCI1394_evt_timeout: 1420 case OHCI1394_evt_timeout:
1421 /* Async response transmit timed out. */ 1421 /* Async response transmit timed out. */
1422 packet->ack = RCODE_CANCELLED; 1422 packet->ack = RCODE_CANCELLED;
1423 break; 1423 break;
1424 1424
1425 case OHCI1394_evt_flushed: 1425 case OHCI1394_evt_flushed:
1426 /* 1426 /*
1427 * The packet was flushed should give same error as 1427 * The packet was flushed should give same error as
1428 * when we try to use a stale generation count. 1428 * when we try to use a stale generation count.
1429 */ 1429 */
1430 packet->ack = RCODE_GENERATION; 1430 packet->ack = RCODE_GENERATION;
1431 break; 1431 break;
1432 1432
1433 case OHCI1394_evt_missing_ack: 1433 case OHCI1394_evt_missing_ack:
1434 if (context->flushing) 1434 if (context->flushing)
1435 packet->ack = RCODE_GENERATION; 1435 packet->ack = RCODE_GENERATION;
1436 else { 1436 else {
1437 /* 1437 /*
1438 * Using a valid (current) generation count, but the 1438 * Using a valid (current) generation count, but the
1439 * node is not on the bus or not sending acks. 1439 * node is not on the bus or not sending acks.
1440 */ 1440 */
1441 packet->ack = RCODE_NO_ACK; 1441 packet->ack = RCODE_NO_ACK;
1442 } 1442 }
1443 break; 1443 break;
1444 1444
1445 case ACK_COMPLETE + 0x10: 1445 case ACK_COMPLETE + 0x10:
1446 case ACK_PENDING + 0x10: 1446 case ACK_PENDING + 0x10:
1447 case ACK_BUSY_X + 0x10: 1447 case ACK_BUSY_X + 0x10:
1448 case ACK_BUSY_A + 0x10: 1448 case ACK_BUSY_A + 0x10:
1449 case ACK_BUSY_B + 0x10: 1449 case ACK_BUSY_B + 0x10:
1450 case ACK_DATA_ERROR + 0x10: 1450 case ACK_DATA_ERROR + 0x10:
1451 case ACK_TYPE_ERROR + 0x10: 1451 case ACK_TYPE_ERROR + 0x10:
1452 packet->ack = evt - 0x10; 1452 packet->ack = evt - 0x10;
1453 break; 1453 break;
1454 1454
1455 case OHCI1394_evt_no_status: 1455 case OHCI1394_evt_no_status:
1456 if (context->flushing) { 1456 if (context->flushing) {
1457 packet->ack = RCODE_GENERATION; 1457 packet->ack = RCODE_GENERATION;
1458 break; 1458 break;
1459 } 1459 }
1460 /* fall through */ 1460 /* fall through */
1461 1461
1462 default: 1462 default:
1463 packet->ack = RCODE_SEND_ERROR; 1463 packet->ack = RCODE_SEND_ERROR;
1464 break; 1464 break;
1465 } 1465 }
1466 1466
1467 packet->callback(packet, &ohci->card, packet->ack); 1467 packet->callback(packet, &ohci->card, packet->ack);
1468 1468
1469 return 1; 1469 return 1;
1470 } 1470 }
1471 1471
1472 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) 1472 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1473 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) 1473 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1474 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) 1474 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1475 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 1475 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1476 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 1476 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1477 1477
1478 static void handle_local_rom(struct fw_ohci *ohci, 1478 static void handle_local_rom(struct fw_ohci *ohci,
1479 struct fw_packet *packet, u32 csr) 1479 struct fw_packet *packet, u32 csr)
1480 { 1480 {
1481 struct fw_packet response; 1481 struct fw_packet response;
1482 int tcode, length, i; 1482 int tcode, length, i;
1483 1483
1484 tcode = HEADER_GET_TCODE(packet->header[0]); 1484 tcode = HEADER_GET_TCODE(packet->header[0]);
1485 if (TCODE_IS_BLOCK_PACKET(tcode)) 1485 if (TCODE_IS_BLOCK_PACKET(tcode))
1486 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1486 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1487 else 1487 else
1488 length = 4; 1488 length = 4;
1489 1489
1490 i = csr - CSR_CONFIG_ROM; 1490 i = csr - CSR_CONFIG_ROM;
1491 if (i + length > CONFIG_ROM_SIZE) { 1491 if (i + length > CONFIG_ROM_SIZE) {
1492 fw_fill_response(&response, packet->header, 1492 fw_fill_response(&response, packet->header,
1493 RCODE_ADDRESS_ERROR, NULL, 0); 1493 RCODE_ADDRESS_ERROR, NULL, 0);
1494 } else if (!TCODE_IS_READ_REQUEST(tcode)) { 1494 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1495 fw_fill_response(&response, packet->header, 1495 fw_fill_response(&response, packet->header,
1496 RCODE_TYPE_ERROR, NULL, 0); 1496 RCODE_TYPE_ERROR, NULL, 0);
1497 } else { 1497 } else {
1498 fw_fill_response(&response, packet->header, RCODE_COMPLETE, 1498 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1499 (void *) ohci->config_rom + i, length); 1499 (void *) ohci->config_rom + i, length);
1500 } 1500 }
1501 1501
1502 fw_core_handle_response(&ohci->card, &response); 1502 fw_core_handle_response(&ohci->card, &response);
1503 } 1503 }
1504 1504
1505 static void handle_local_lock(struct fw_ohci *ohci, 1505 static void handle_local_lock(struct fw_ohci *ohci,
1506 struct fw_packet *packet, u32 csr) 1506 struct fw_packet *packet, u32 csr)
1507 { 1507 {
1508 struct fw_packet response; 1508 struct fw_packet response;
1509 int tcode, length, ext_tcode, sel, try; 1509 int tcode, length, ext_tcode, sel, try;
1510 __be32 *payload, lock_old; 1510 __be32 *payload, lock_old;
1511 u32 lock_arg, lock_data; 1511 u32 lock_arg, lock_data;
1512 1512
1513 tcode = HEADER_GET_TCODE(packet->header[0]); 1513 tcode = HEADER_GET_TCODE(packet->header[0]);
1514 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1514 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1515 payload = packet->payload; 1515 payload = packet->payload;
1516 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); 1516 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1517 1517
1518 if (tcode == TCODE_LOCK_REQUEST && 1518 if (tcode == TCODE_LOCK_REQUEST &&
1519 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { 1519 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1520 lock_arg = be32_to_cpu(payload[0]); 1520 lock_arg = be32_to_cpu(payload[0]);
1521 lock_data = be32_to_cpu(payload[1]); 1521 lock_data = be32_to_cpu(payload[1]);
1522 } else if (tcode == TCODE_READ_QUADLET_REQUEST) { 1522 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1523 lock_arg = 0; 1523 lock_arg = 0;
1524 lock_data = 0; 1524 lock_data = 0;
1525 } else { 1525 } else {
1526 fw_fill_response(&response, packet->header, 1526 fw_fill_response(&response, packet->header,
1527 RCODE_TYPE_ERROR, NULL, 0); 1527 RCODE_TYPE_ERROR, NULL, 0);
1528 goto out; 1528 goto out;
1529 } 1529 }
1530 1530
1531 sel = (csr - CSR_BUS_MANAGER_ID) / 4; 1531 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1532 reg_write(ohci, OHCI1394_CSRData, lock_data); 1532 reg_write(ohci, OHCI1394_CSRData, lock_data);
1533 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); 1533 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1534 reg_write(ohci, OHCI1394_CSRControl, sel); 1534 reg_write(ohci, OHCI1394_CSRControl, sel);
1535 1535
1536 for (try = 0; try < 20; try++) 1536 for (try = 0; try < 20; try++)
1537 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { 1537 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1538 lock_old = cpu_to_be32(reg_read(ohci, 1538 lock_old = cpu_to_be32(reg_read(ohci,
1539 OHCI1394_CSRData)); 1539 OHCI1394_CSRData));
1540 fw_fill_response(&response, packet->header, 1540 fw_fill_response(&response, packet->header,
1541 RCODE_COMPLETE, 1541 RCODE_COMPLETE,
1542 &lock_old, sizeof(lock_old)); 1542 &lock_old, sizeof(lock_old));
1543 goto out; 1543 goto out;
1544 } 1544 }
1545 1545
1546 fw_error("swap not done (CSR lock timeout)\n"); 1546 fw_error("swap not done (CSR lock timeout)\n");
1547 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); 1547 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1548 1548
1549 out: 1549 out:
1550 fw_core_handle_response(&ohci->card, &response); 1550 fw_core_handle_response(&ohci->card, &response);
1551 } 1551 }
1552 1552
1553 static void handle_local_request(struct context *ctx, struct fw_packet *packet) 1553 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1554 { 1554 {
1555 u64 offset, csr; 1555 u64 offset, csr;
1556 1556
1557 if (ctx == &ctx->ohci->at_request_ctx) { 1557 if (ctx == &ctx->ohci->at_request_ctx) {
1558 packet->ack = ACK_PENDING; 1558 packet->ack = ACK_PENDING;
1559 packet->callback(packet, &ctx->ohci->card, packet->ack); 1559 packet->callback(packet, &ctx->ohci->card, packet->ack);
1560 } 1560 }
1561 1561
1562 offset = 1562 offset =
1563 ((unsigned long long) 1563 ((unsigned long long)
1564 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | 1564 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1565 packet->header[2]; 1565 packet->header[2];
1566 csr = offset - CSR_REGISTER_BASE; 1566 csr = offset - CSR_REGISTER_BASE;
1567 1567
1568 /* Handle config rom reads. */ 1568 /* Handle config rom reads. */
1569 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) 1569 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1570 handle_local_rom(ctx->ohci, packet, csr); 1570 handle_local_rom(ctx->ohci, packet, csr);
1571 else switch (csr) { 1571 else switch (csr) {
1572 case CSR_BUS_MANAGER_ID: 1572 case CSR_BUS_MANAGER_ID:
1573 case CSR_BANDWIDTH_AVAILABLE: 1573 case CSR_BANDWIDTH_AVAILABLE:
1574 case CSR_CHANNELS_AVAILABLE_HI: 1574 case CSR_CHANNELS_AVAILABLE_HI:
1575 case CSR_CHANNELS_AVAILABLE_LO: 1575 case CSR_CHANNELS_AVAILABLE_LO:
1576 handle_local_lock(ctx->ohci, packet, csr); 1576 handle_local_lock(ctx->ohci, packet, csr);
1577 break; 1577 break;
1578 default: 1578 default:
1579 if (ctx == &ctx->ohci->at_request_ctx) 1579 if (ctx == &ctx->ohci->at_request_ctx)
1580 fw_core_handle_request(&ctx->ohci->card, packet); 1580 fw_core_handle_request(&ctx->ohci->card, packet);
1581 else 1581 else
1582 fw_core_handle_response(&ctx->ohci->card, packet); 1582 fw_core_handle_response(&ctx->ohci->card, packet);
1583 break; 1583 break;
1584 } 1584 }
1585 1585
1586 if (ctx == &ctx->ohci->at_response_ctx) { 1586 if (ctx == &ctx->ohci->at_response_ctx) {
1587 packet->ack = ACK_COMPLETE; 1587 packet->ack = ACK_COMPLETE;
1588 packet->callback(packet, &ctx->ohci->card, packet->ack); 1588 packet->callback(packet, &ctx->ohci->card, packet->ack);
1589 } 1589 }
1590 } 1590 }
1591 1591
1592 static void at_context_transmit(struct context *ctx, struct fw_packet *packet) 1592 static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1593 { 1593 {
1594 unsigned long flags; 1594 unsigned long flags;
1595 int ret; 1595 int ret;
1596 1596
1597 spin_lock_irqsave(&ctx->ohci->lock, flags); 1597 spin_lock_irqsave(&ctx->ohci->lock, flags);
1598 1598
1599 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && 1599 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1600 ctx->ohci->generation == packet->generation) { 1600 ctx->ohci->generation == packet->generation) {
1601 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1601 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1602 handle_local_request(ctx, packet); 1602 handle_local_request(ctx, packet);
1603 return; 1603 return;
1604 } 1604 }
1605 1605
1606 ret = at_context_queue_packet(ctx, packet); 1606 ret = at_context_queue_packet(ctx, packet);
1607 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1607 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1608 1608
1609 if (ret < 0) 1609 if (ret < 0)
1610 packet->callback(packet, &ctx->ohci->card, packet->ack); 1610 packet->callback(packet, &ctx->ohci->card, packet->ack);
1611 1611
1612 } 1612 }
1613 1613
1614 static void detect_dead_context(struct fw_ohci *ohci, 1614 static void detect_dead_context(struct fw_ohci *ohci,
1615 const char *name, unsigned int regs) 1615 const char *name, unsigned int regs)
1616 { 1616 {
1617 u32 ctl; 1617 u32 ctl;
1618 1618
1619 ctl = reg_read(ohci, CONTROL_SET(regs)); 1619 ctl = reg_read(ohci, CONTROL_SET(regs));
1620 if (ctl & CONTEXT_DEAD) { 1620 if (ctl & CONTEXT_DEAD) {
1621 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG 1621 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
1622 fw_error("DMA context %s has stopped, error code: %s\n", 1622 fw_error("DMA context %s has stopped, error code: %s\n",
1623 name, evts[ctl & 0x1f]); 1623 name, evts[ctl & 0x1f]);
1624 #else 1624 #else
1625 fw_error("DMA context %s has stopped, error code: %#x\n", 1625 fw_error("DMA context %s has stopped, error code: %#x\n",
1626 name, ctl & 0x1f); 1626 name, ctl & 0x1f);
1627 #endif 1627 #endif
1628 } 1628 }
1629 } 1629 }
1630 1630
1631 static void handle_dead_contexts(struct fw_ohci *ohci) 1631 static void handle_dead_contexts(struct fw_ohci *ohci)
1632 { 1632 {
1633 unsigned int i; 1633 unsigned int i;
1634 char name[8]; 1634 char name[8];
1635 1635
1636 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase); 1636 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1637 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase); 1637 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1638 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase); 1638 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1639 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase); 1639 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1640 for (i = 0; i < 32; ++i) { 1640 for (i = 0; i < 32; ++i) {
1641 if (!(ohci->it_context_support & (1 << i))) 1641 if (!(ohci->it_context_support & (1 << i)))
1642 continue; 1642 continue;
1643 sprintf(name, "IT%u", i); 1643 sprintf(name, "IT%u", i);
1644 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i)); 1644 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1645 } 1645 }
1646 for (i = 0; i < 32; ++i) { 1646 for (i = 0; i < 32; ++i) {
1647 if (!(ohci->ir_context_support & (1 << i))) 1647 if (!(ohci->ir_context_support & (1 << i)))
1648 continue; 1648 continue;
1649 sprintf(name, "IR%u", i); 1649 sprintf(name, "IR%u", i);
1650 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i)); 1650 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1651 } 1651 }
1652 /* TODO: maybe try to flush and restart the dead contexts */ 1652 /* TODO: maybe try to flush and restart the dead contexts */
1653 } 1653 }
1654 1654
1655 static u32 cycle_timer_ticks(u32 cycle_timer) 1655 static u32 cycle_timer_ticks(u32 cycle_timer)
1656 { 1656 {
1657 u32 ticks; 1657 u32 ticks;
1658 1658
1659 ticks = cycle_timer & 0xfff; 1659 ticks = cycle_timer & 0xfff;
1660 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); 1660 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1661 ticks += (3072 * 8000) * (cycle_timer >> 25); 1661 ticks += (3072 * 8000) * (cycle_timer >> 25);
1662 1662
1663 return ticks; 1663 return ticks;
1664 } 1664 }
1665 1665
1666 /* 1666 /*
1667 * Some controllers exhibit one or more of the following bugs when updating the 1667 * Some controllers exhibit one or more of the following bugs when updating the
1668 * iso cycle timer register: 1668 * iso cycle timer register:
1669 * - When the lowest six bits are wrapping around to zero, a read that happens 1669 * - When the lowest six bits are wrapping around to zero, a read that happens
1670 * at the same time will return garbage in the lowest ten bits. 1670 * at the same time will return garbage in the lowest ten bits.
1671 * - When the cycleOffset field wraps around to zero, the cycleCount field is 1671 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1672 * not incremented for about 60 ns. 1672 * not incremented for about 60 ns.
1673 * - Occasionally, the entire register reads zero. 1673 * - Occasionally, the entire register reads zero.
1674 * 1674 *
1675 * To catch these, we read the register three times and ensure that the 1675 * To catch these, we read the register three times and ensure that the
1676 * difference between each two consecutive reads is approximately the same, i.e. 1676 * difference between each two consecutive reads is approximately the same, i.e.
1677 * less than twice the other. Furthermore, any negative difference indicates an 1677 * less than twice the other. Furthermore, any negative difference indicates an
1678 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to 1678 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1679 * execute, so we have enough precision to compute the ratio of the differences.) 1679 * execute, so we have enough precision to compute the ratio of the differences.)
1680 */ 1680 */
1681 static u32 get_cycle_time(struct fw_ohci *ohci) 1681 static u32 get_cycle_time(struct fw_ohci *ohci)
1682 { 1682 {
1683 u32 c0, c1, c2; 1683 u32 c0, c1, c2;
1684 u32 t0, t1, t2; 1684 u32 t0, t1, t2;
1685 s32 diff01, diff12; 1685 s32 diff01, diff12;
1686 int i; 1686 int i;
1687 1687
1688 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1688 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1689 1689
1690 if (ohci->quirks & QUIRK_CYCLE_TIMER) { 1690 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1691 i = 0; 1691 i = 0;
1692 c1 = c2; 1692 c1 = c2;
1693 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1693 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1694 do { 1694 do {
1695 c0 = c1; 1695 c0 = c1;
1696 c1 = c2; 1696 c1 = c2;
1697 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1697 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1698 t0 = cycle_timer_ticks(c0); 1698 t0 = cycle_timer_ticks(c0);
1699 t1 = cycle_timer_ticks(c1); 1699 t1 = cycle_timer_ticks(c1);
1700 t2 = cycle_timer_ticks(c2); 1700 t2 = cycle_timer_ticks(c2);
1701 diff01 = t1 - t0; 1701 diff01 = t1 - t0;
1702 diff12 = t2 - t1; 1702 diff12 = t2 - t1;
1703 } while ((diff01 <= 0 || diff12 <= 0 || 1703 } while ((diff01 <= 0 || diff12 <= 0 ||
1704 diff01 / diff12 >= 2 || diff12 / diff01 >= 2) 1704 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1705 && i++ < 20); 1705 && i++ < 20);
1706 } 1706 }
1707 1707
1708 return c2; 1708 return c2;
1709 } 1709 }
1710 1710
1711 /* 1711 /*
1712 * This function has to be called at least every 64 seconds. The bus_time 1712 * This function has to be called at least every 64 seconds. The bus_time
1713 * field stores not only the upper 25 bits of the BUS_TIME register but also 1713 * field stores not only the upper 25 bits of the BUS_TIME register but also
1714 * the most significant bit of the cycle timer in bit 6 so that we can detect 1714 * the most significant bit of the cycle timer in bit 6 so that we can detect
1715 * changes in this bit. 1715 * changes in this bit.
1716 */ 1716 */
1717 static u32 update_bus_time(struct fw_ohci *ohci) 1717 static u32 update_bus_time(struct fw_ohci *ohci)
1718 { 1718 {
1719 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; 1719 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1720 1720
1721 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) 1721 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1722 ohci->bus_time += 0x40; 1722 ohci->bus_time += 0x40;
1723 1723
1724 return ohci->bus_time | cycle_time_seconds; 1724 return ohci->bus_time | cycle_time_seconds;
1725 } 1725 }
1726 1726
1727 static int get_status_for_port(struct fw_ohci *ohci, int port_index) 1727 static int get_status_for_port(struct fw_ohci *ohci, int port_index)
1728 { 1728 {
1729 int reg; 1729 int reg;
1730 1730
1731 mutex_lock(&ohci->phy_reg_mutex); 1731 mutex_lock(&ohci->phy_reg_mutex);
1732 reg = write_phy_reg(ohci, 7, port_index); 1732 reg = write_phy_reg(ohci, 7, port_index);
1733 if (reg >= 0) 1733 if (reg >= 0)
1734 reg = read_phy_reg(ohci, 8); 1734 reg = read_phy_reg(ohci, 8);
1735 mutex_unlock(&ohci->phy_reg_mutex); 1735 mutex_unlock(&ohci->phy_reg_mutex);
1736 if (reg < 0) 1736 if (reg < 0)
1737 return reg; 1737 return reg;
1738 1738
1739 switch (reg & 0x0f) { 1739 switch (reg & 0x0f) {
1740 case 0x06: 1740 case 0x06:
1741 return 2; /* is child node (connected to parent node) */ 1741 return 2; /* is child node (connected to parent node) */
1742 case 0x0e: 1742 case 0x0e:
1743 return 3; /* is parent node (connected to child node) */ 1743 return 3; /* is parent node (connected to child node) */
1744 } 1744 }
1745 return 1; /* not connected */ 1745 return 1; /* not connected */
1746 } 1746 }
1747 1747
1748 static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id, 1748 static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1749 int self_id_count) 1749 int self_id_count)
1750 { 1750 {
1751 int i; 1751 int i;
1752 u32 entry; 1752 u32 entry;
1753 1753
1754 for (i = 0; i < self_id_count; i++) { 1754 for (i = 0; i < self_id_count; i++) {
1755 entry = ohci->self_id_buffer[i]; 1755 entry = ohci->self_id_buffer[i];
1756 if ((self_id & 0xff000000) == (entry & 0xff000000)) 1756 if ((self_id & 0xff000000) == (entry & 0xff000000))
1757 return -1; 1757 return -1;
1758 if ((self_id & 0xff000000) < (entry & 0xff000000)) 1758 if ((self_id & 0xff000000) < (entry & 0xff000000))
1759 return i; 1759 return i;
1760 } 1760 }
1761 return i; 1761 return i;
1762 } 1762 }
1763 1763
1764 /* 1764 /*
1765 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally 1765 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1766 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059. 1766 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1767 * Construct the selfID from phy register contents. 1767 * Construct the selfID from phy register contents.
1768 * FIXME: How to determine the selfID.i flag? 1768 * FIXME: How to determine the selfID.i flag?
1769 */ 1769 */
1770 static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) 1770 static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1771 { 1771 {
1772 int reg, i, pos, status; 1772 int reg, i, pos, status;
1773 /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */ 1773 /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
1774 u32 self_id = 0x8040c800; 1774 u32 self_id = 0x8040c800;
1775 1775
1776 reg = reg_read(ohci, OHCI1394_NodeID); 1776 reg = reg_read(ohci, OHCI1394_NodeID);
1777 if (!(reg & OHCI1394_NodeID_idValid)) { 1777 if (!(reg & OHCI1394_NodeID_idValid)) {
1778 fw_notify("node ID not valid, new bus reset in progress\n"); 1778 fw_notify("node ID not valid, new bus reset in progress\n");
1779 return -EBUSY; 1779 return -EBUSY;
1780 } 1780 }
1781 self_id |= ((reg & 0x3f) << 24); /* phy ID */ 1781 self_id |= ((reg & 0x3f) << 24); /* phy ID */
1782 1782
1783 reg = ohci_read_phy_reg(&ohci->card, 4); 1783 reg = ohci_read_phy_reg(&ohci->card, 4);
1784 if (reg < 0) 1784 if (reg < 0)
1785 return reg; 1785 return reg;
1786 self_id |= ((reg & 0x07) << 8); /* power class */ 1786 self_id |= ((reg & 0x07) << 8); /* power class */
1787 1787
1788 reg = ohci_read_phy_reg(&ohci->card, 1); 1788 reg = ohci_read_phy_reg(&ohci->card, 1);
1789 if (reg < 0) 1789 if (reg < 0)
1790 return reg; 1790 return reg;
1791 self_id |= ((reg & 0x3f) << 16); /* gap count */ 1791 self_id |= ((reg & 0x3f) << 16); /* gap count */
1792 1792
1793 for (i = 0; i < 3; i++) { 1793 for (i = 0; i < 3; i++) {
1794 status = get_status_for_port(ohci, i); 1794 status = get_status_for_port(ohci, i);
1795 if (status < 0) 1795 if (status < 0)
1796 return status; 1796 return status;
1797 self_id |= ((status & 0x3) << (6 - (i * 2))); 1797 self_id |= ((status & 0x3) << (6 - (i * 2)));
1798 } 1798 }
1799 1799
1800 pos = get_self_id_pos(ohci, self_id, self_id_count); 1800 pos = get_self_id_pos(ohci, self_id, self_id_count);
1801 if (pos >= 0) { 1801 if (pos >= 0) {
1802 memmove(&(ohci->self_id_buffer[pos+1]), 1802 memmove(&(ohci->self_id_buffer[pos+1]),
1803 &(ohci->self_id_buffer[pos]), 1803 &(ohci->self_id_buffer[pos]),
1804 (self_id_count - pos) * sizeof(*ohci->self_id_buffer)); 1804 (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1805 ohci->self_id_buffer[pos] = self_id; 1805 ohci->self_id_buffer[pos] = self_id;
1806 self_id_count++; 1806 self_id_count++;
1807 } 1807 }
1808 return self_id_count; 1808 return self_id_count;
1809 } 1809 }
1810 1810
1811 static void bus_reset_work(struct work_struct *work) 1811 static void bus_reset_work(struct work_struct *work)
1812 { 1812 {
1813 struct fw_ohci *ohci = 1813 struct fw_ohci *ohci =
1814 container_of(work, struct fw_ohci, bus_reset_work); 1814 container_of(work, struct fw_ohci, bus_reset_work);
1815 int self_id_count, i, j, reg; 1815 int self_id_count, i, j, reg;
1816 int generation, new_generation; 1816 int generation, new_generation;
1817 unsigned long flags; 1817 unsigned long flags;
1818 void *free_rom = NULL; 1818 void *free_rom = NULL;
1819 dma_addr_t free_rom_bus = 0; 1819 dma_addr_t free_rom_bus = 0;
1820 bool is_new_root; 1820 bool is_new_root;
1821 1821
1822 reg = reg_read(ohci, OHCI1394_NodeID); 1822 reg = reg_read(ohci, OHCI1394_NodeID);
1823 if (!(reg & OHCI1394_NodeID_idValid)) { 1823 if (!(reg & OHCI1394_NodeID_idValid)) {
1824 fw_notify("node ID not valid, new bus reset in progress\n"); 1824 fw_notify("node ID not valid, new bus reset in progress\n");
1825 return; 1825 return;
1826 } 1826 }
1827 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { 1827 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1828 fw_notify("malconfigured bus\n"); 1828 fw_notify("malconfigured bus\n");
1829 return; 1829 return;
1830 } 1830 }
1831 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1831 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1832 OHCI1394_NodeID_nodeNumber); 1832 OHCI1394_NodeID_nodeNumber);
1833 1833
1834 is_new_root = (reg & OHCI1394_NodeID_root) != 0; 1834 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1835 if (!(ohci->is_root && is_new_root)) 1835 if (!(ohci->is_root && is_new_root))
1836 reg_write(ohci, OHCI1394_LinkControlSet, 1836 reg_write(ohci, OHCI1394_LinkControlSet,
1837 OHCI1394_LinkControl_cycleMaster); 1837 OHCI1394_LinkControl_cycleMaster);
1838 ohci->is_root = is_new_root; 1838 ohci->is_root = is_new_root;
1839 1839
1840 reg = reg_read(ohci, OHCI1394_SelfIDCount); 1840 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1841 if (reg & OHCI1394_SelfIDCount_selfIDError) { 1841 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1842 fw_notify("inconsistent self IDs\n"); 1842 fw_notify("inconsistent self IDs\n");
1843 return; 1843 return;
1844 } 1844 }
1845 /* 1845 /*
1846 * The count in the SelfIDCount register is the number of 1846 * The count in the SelfIDCount register is the number of
1847 * bytes in the self ID receive buffer. Since we also receive 1847 * bytes in the self ID receive buffer. Since we also receive
1848 * the inverted quadlets and a header quadlet, we shift one 1848 * the inverted quadlets and a header quadlet, we shift one
1849 * bit extra to get the actual number of self IDs. 1849 * bit extra to get the actual number of self IDs.
1850 */ 1850 */
1851 self_id_count = (reg >> 3) & 0xff; 1851 self_id_count = (reg >> 3) & 0xff;
1852 1852
1853 if (self_id_count > 252) { 1853 if (self_id_count > 252) {
1854 fw_notify("inconsistent self IDs\n"); 1854 fw_notify("inconsistent self IDs\n");
1855 return; 1855 return;
1856 } 1856 }
1857 1857
1858 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; 1858 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1859 rmb(); 1859 rmb();
1860 1860
1861 for (i = 1, j = 0; j < self_id_count; i += 2, j++) { 1861 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1862 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) { 1862 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1863 fw_notify("inconsistent self IDs\n"); 1863 /*
1864 return; 1864 * If the invalid data looks like a cycle start packet,
1865 * it's likely to be the result of the cycle master
1866 * having a wrong gap count. In this case, the self IDs
1867 * so far are valid and should be processed so that the
1868 * bus manager can then correct the gap count.
1869 */
1870 if (cond_le32_to_cpu(ohci->self_id_cpu[i])
1871 == 0xffff008f) {
1872 fw_notify("ignoring spurious self IDs\n");
1873 self_id_count = j;
1874 break;
1875 } else {
1876 fw_notify("inconsistent self IDs\n");
1877 return;
1878 }
1865 } 1879 }
1866 ohci->self_id_buffer[j] = 1880 ohci->self_id_buffer[j] =
1867 cond_le32_to_cpu(ohci->self_id_cpu[i]); 1881 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1868 } 1882 }
1869 1883
1870 if (ohci->quirks & QUIRK_TI_SLLZ059) { 1884 if (ohci->quirks & QUIRK_TI_SLLZ059) {
1871 self_id_count = find_and_insert_self_id(ohci, self_id_count); 1885 self_id_count = find_and_insert_self_id(ohci, self_id_count);
1872 if (self_id_count < 0) { 1886 if (self_id_count < 0) {
1873 fw_notify("could not construct local self ID\n"); 1887 fw_notify("could not construct local self ID\n");
1874 return; 1888 return;
1875 } 1889 }
1876 } 1890 }
1877 1891
1878 if (self_id_count == 0) { 1892 if (self_id_count == 0) {
1879 fw_notify("inconsistent self IDs\n"); 1893 fw_notify("inconsistent self IDs\n");
1880 return; 1894 return;
1881 } 1895 }
1882 rmb(); 1896 rmb();
1883 1897
1884 /* 1898 /*
1885 * Check the consistency of the self IDs we just read. The 1899 * Check the consistency of the self IDs we just read. The
1886 * problem we face is that a new bus reset can start while we 1900 * problem we face is that a new bus reset can start while we
1887 * read out the self IDs from the DMA buffer. If this happens, 1901 * read out the self IDs from the DMA buffer. If this happens,
1888 * the DMA buffer will be overwritten with new self IDs and we 1902 * the DMA buffer will be overwritten with new self IDs and we
1889 * will read out inconsistent data. The OHCI specification 1903 * will read out inconsistent data. The OHCI specification
1890 * (section 11.2) recommends a technique similar to 1904 * (section 11.2) recommends a technique similar to
1891 * linux/seqlock.h, where we remember the generation of the 1905 * linux/seqlock.h, where we remember the generation of the
1892 * self IDs in the buffer before reading them out and compare 1906 * self IDs in the buffer before reading them out and compare
1893 * it to the current generation after reading them out. If 1907 * it to the current generation after reading them out. If
1894 * the two generations match we know we have a consistent set 1908 * the two generations match we know we have a consistent set
1895 * of self IDs. 1909 * of self IDs.
1896 */ 1910 */
1897 1911
1898 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; 1912 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1899 if (new_generation != generation) { 1913 if (new_generation != generation) {
1900 fw_notify("recursive bus reset detected, " 1914 fw_notify("recursive bus reset detected, "
1901 "discarding self ids\n"); 1915 "discarding self ids\n");
1902 return; 1916 return;
1903 } 1917 }
1904 1918
1905 /* FIXME: Document how the locking works. */ 1919 /* FIXME: Document how the locking works. */
1906 spin_lock_irqsave(&ohci->lock, flags); 1920 spin_lock_irqsave(&ohci->lock, flags);
1907 1921
1908 ohci->generation = -1; /* prevent AT packet queueing */ 1922 ohci->generation = -1; /* prevent AT packet queueing */
1909 context_stop(&ohci->at_request_ctx); 1923 context_stop(&ohci->at_request_ctx);
1910 context_stop(&ohci->at_response_ctx); 1924 context_stop(&ohci->at_response_ctx);
1911 1925
1912 spin_unlock_irqrestore(&ohci->lock, flags); 1926 spin_unlock_irqrestore(&ohci->lock, flags);
1913 1927
1914 /* 1928 /*
1915 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent 1929 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
1916 * packets in the AT queues and software needs to drain them. 1930 * packets in the AT queues and software needs to drain them.
1917 * Some OHCI 1.1 controllers (JMicron) apparently require this too. 1931 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
1918 */ 1932 */
1919 at_context_flush(&ohci->at_request_ctx); 1933 at_context_flush(&ohci->at_request_ctx);
1920 at_context_flush(&ohci->at_response_ctx); 1934 at_context_flush(&ohci->at_response_ctx);
1921 1935
1922 spin_lock_irqsave(&ohci->lock, flags); 1936 spin_lock_irqsave(&ohci->lock, flags);
1923 1937
1924 ohci->generation = generation; 1938 ohci->generation = generation;
1925 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1939 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1926 1940
1927 if (ohci->quirks & QUIRK_RESET_PACKET) 1941 if (ohci->quirks & QUIRK_RESET_PACKET)
1928 ohci->request_generation = generation; 1942 ohci->request_generation = generation;
1929 1943
1930 /* 1944 /*
1931 * This next bit is unrelated to the AT context stuff but we 1945 * This next bit is unrelated to the AT context stuff but we
1932 * have to do it under the spinlock also. If a new config rom 1946 * have to do it under the spinlock also. If a new config rom
1933 * was set up before this reset, the old one is now no longer 1947 * was set up before this reset, the old one is now no longer
1934 * in use and we can free it. Update the config rom pointers 1948 * in use and we can free it. Update the config rom pointers
1935 * to point to the current config rom and clear the 1949 * to point to the current config rom and clear the
1936 * next_config_rom pointer so a new update can take place. 1950 * next_config_rom pointer so a new update can take place.
1937 */ 1951 */
1938 1952
1939 if (ohci->next_config_rom != NULL) { 1953 if (ohci->next_config_rom != NULL) {
1940 if (ohci->next_config_rom != ohci->config_rom) { 1954 if (ohci->next_config_rom != ohci->config_rom) {
1941 free_rom = ohci->config_rom; 1955 free_rom = ohci->config_rom;
1942 free_rom_bus = ohci->config_rom_bus; 1956 free_rom_bus = ohci->config_rom_bus;
1943 } 1957 }
1944 ohci->config_rom = ohci->next_config_rom; 1958 ohci->config_rom = ohci->next_config_rom;
1945 ohci->config_rom_bus = ohci->next_config_rom_bus; 1959 ohci->config_rom_bus = ohci->next_config_rom_bus;
1946 ohci->next_config_rom = NULL; 1960 ohci->next_config_rom = NULL;
1947 1961
1948 /* 1962 /*
1949 * Restore config_rom image and manually update 1963 * Restore config_rom image and manually update
1950 * config_rom registers. Writing the header quadlet 1964 * config_rom registers. Writing the header quadlet
1951 * will indicate that the config rom is ready, so we 1965 * will indicate that the config rom is ready, so we
1952 * do that last. 1966 * do that last.
1953 */ 1967 */
1954 reg_write(ohci, OHCI1394_BusOptions, 1968 reg_write(ohci, OHCI1394_BusOptions,
1955 be32_to_cpu(ohci->config_rom[2])); 1969 be32_to_cpu(ohci->config_rom[2]));
1956 ohci->config_rom[0] = ohci->next_header; 1970 ohci->config_rom[0] = ohci->next_header;
1957 reg_write(ohci, OHCI1394_ConfigROMhdr, 1971 reg_write(ohci, OHCI1394_ConfigROMhdr,
1958 be32_to_cpu(ohci->next_header)); 1972 be32_to_cpu(ohci->next_header));
1959 } 1973 }
1960 1974
1961 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 1975 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1962 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); 1976 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1963 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); 1977 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1964 #endif 1978 #endif
1965 1979
1966 spin_unlock_irqrestore(&ohci->lock, flags); 1980 spin_unlock_irqrestore(&ohci->lock, flags);
1967 1981
1968 if (free_rom) 1982 if (free_rom)
1969 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1983 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1970 free_rom, free_rom_bus); 1984 free_rom, free_rom_bus);
1971 1985
1972 log_selfids(ohci->node_id, generation, 1986 log_selfids(ohci->node_id, generation,
1973 self_id_count, ohci->self_id_buffer); 1987 self_id_count, ohci->self_id_buffer);
1974 1988
1975 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 1989 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1976 self_id_count, ohci->self_id_buffer, 1990 self_id_count, ohci->self_id_buffer,
1977 ohci->csr_state_setclear_abdicate); 1991 ohci->csr_state_setclear_abdicate);
1978 ohci->csr_state_setclear_abdicate = false; 1992 ohci->csr_state_setclear_abdicate = false;
1979 } 1993 }
1980 1994
1981 static irqreturn_t irq_handler(int irq, void *data) 1995 static irqreturn_t irq_handler(int irq, void *data)
1982 { 1996 {
1983 struct fw_ohci *ohci = data; 1997 struct fw_ohci *ohci = data;
1984 u32 event, iso_event; 1998 u32 event, iso_event;
1985 int i; 1999 int i;
1986 2000
1987 event = reg_read(ohci, OHCI1394_IntEventClear); 2001 event = reg_read(ohci, OHCI1394_IntEventClear);
1988 2002
1989 if (!event || !~event) 2003 if (!event || !~event)
1990 return IRQ_NONE; 2004 return IRQ_NONE;
1991 2005
1992 /* 2006 /*
1993 * busReset and postedWriteErr must not be cleared yet 2007 * busReset and postedWriteErr must not be cleared yet
1994 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) 2008 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
1995 */ 2009 */
1996 reg_write(ohci, OHCI1394_IntEventClear, 2010 reg_write(ohci, OHCI1394_IntEventClear,
1997 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); 2011 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
1998 log_irqs(event); 2012 log_irqs(event);
1999 2013
2000 if (event & OHCI1394_selfIDComplete) 2014 if (event & OHCI1394_selfIDComplete)
2001 queue_work(fw_workqueue, &ohci->bus_reset_work); 2015 queue_work(fw_workqueue, &ohci->bus_reset_work);
2002 2016
2003 if (event & OHCI1394_RQPkt) 2017 if (event & OHCI1394_RQPkt)
2004 tasklet_schedule(&ohci->ar_request_ctx.tasklet); 2018 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
2005 2019
2006 if (event & OHCI1394_RSPkt) 2020 if (event & OHCI1394_RSPkt)
2007 tasklet_schedule(&ohci->ar_response_ctx.tasklet); 2021 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
2008 2022
2009 if (event & OHCI1394_reqTxComplete) 2023 if (event & OHCI1394_reqTxComplete)
2010 tasklet_schedule(&ohci->at_request_ctx.tasklet); 2024 tasklet_schedule(&ohci->at_request_ctx.tasklet);
2011 2025
2012 if (event & OHCI1394_respTxComplete) 2026 if (event & OHCI1394_respTxComplete)
2013 tasklet_schedule(&ohci->at_response_ctx.tasklet); 2027 tasklet_schedule(&ohci->at_response_ctx.tasklet);
2014 2028
2015 if (event & OHCI1394_isochRx) { 2029 if (event & OHCI1394_isochRx) {
2016 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); 2030 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2017 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); 2031 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2018 2032
2019 while (iso_event) { 2033 while (iso_event) {
2020 i = ffs(iso_event) - 1; 2034 i = ffs(iso_event) - 1;
2021 tasklet_schedule( 2035 tasklet_schedule(
2022 &ohci->ir_context_list[i].context.tasklet); 2036 &ohci->ir_context_list[i].context.tasklet);
2023 iso_event &= ~(1 << i); 2037 iso_event &= ~(1 << i);
2024 } 2038 }
2025 } 2039 }
2026 2040
2027 if (event & OHCI1394_isochTx) { 2041 if (event & OHCI1394_isochTx) {
2028 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); 2042 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2029 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); 2043 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2030 2044
2031 while (iso_event) { 2045 while (iso_event) {
2032 i = ffs(iso_event) - 1; 2046 i = ffs(iso_event) - 1;
2033 tasklet_schedule( 2047 tasklet_schedule(
2034 &ohci->it_context_list[i].context.tasklet); 2048 &ohci->it_context_list[i].context.tasklet);
2035 iso_event &= ~(1 << i); 2049 iso_event &= ~(1 << i);
2036 } 2050 }
2037 } 2051 }
2038 2052
2039 if (unlikely(event & OHCI1394_regAccessFail)) 2053 if (unlikely(event & OHCI1394_regAccessFail))
2040 fw_error("Register access failure - " 2054 fw_error("Register access failure - "
2041 "please notify linux1394-devel@lists.sf.net\n"); 2055 "please notify linux1394-devel@lists.sf.net\n");
2042 2056
2043 if (unlikely(event & OHCI1394_postedWriteErr)) { 2057 if (unlikely(event & OHCI1394_postedWriteErr)) {
2044 reg_read(ohci, OHCI1394_PostedWriteAddressHi); 2058 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2045 reg_read(ohci, OHCI1394_PostedWriteAddressLo); 2059 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2046 reg_write(ohci, OHCI1394_IntEventClear, 2060 reg_write(ohci, OHCI1394_IntEventClear,
2047 OHCI1394_postedWriteErr); 2061 OHCI1394_postedWriteErr);
2048 if (printk_ratelimit()) 2062 if (printk_ratelimit())
2049 fw_error("PCI posted write error\n"); 2063 fw_error("PCI posted write error\n");
2050 } 2064 }
2051 2065
2052 if (unlikely(event & OHCI1394_cycleTooLong)) { 2066 if (unlikely(event & OHCI1394_cycleTooLong)) {
2053 if (printk_ratelimit()) 2067 if (printk_ratelimit())
2054 fw_notify("isochronous cycle too long\n"); 2068 fw_notify("isochronous cycle too long\n");
2055 reg_write(ohci, OHCI1394_LinkControlSet, 2069 reg_write(ohci, OHCI1394_LinkControlSet,
2056 OHCI1394_LinkControl_cycleMaster); 2070 OHCI1394_LinkControl_cycleMaster);
2057 } 2071 }
2058 2072
2059 if (unlikely(event & OHCI1394_cycleInconsistent)) { 2073 if (unlikely(event & OHCI1394_cycleInconsistent)) {
2060 /* 2074 /*
2061 * We need to clear this event bit in order to make 2075 * We need to clear this event bit in order to make
2062 * cycleMatch isochronous I/O work. In theory we should 2076 * cycleMatch isochronous I/O work. In theory we should
2063 * stop active cycleMatch iso contexts now and restart 2077 * stop active cycleMatch iso contexts now and restart
2064 * them at least two cycles later. (FIXME?) 2078 * them at least two cycles later. (FIXME?)
2065 */ 2079 */
2066 if (printk_ratelimit()) 2080 if (printk_ratelimit())
2067 fw_notify("isochronous cycle inconsistent\n"); 2081 fw_notify("isochronous cycle inconsistent\n");
2068 } 2082 }
2069 2083
2070 if (unlikely(event & OHCI1394_unrecoverableError)) 2084 if (unlikely(event & OHCI1394_unrecoverableError))
2071 handle_dead_contexts(ohci); 2085 handle_dead_contexts(ohci);
2072 2086
2073 if (event & OHCI1394_cycle64Seconds) { 2087 if (event & OHCI1394_cycle64Seconds) {
2074 spin_lock(&ohci->lock); 2088 spin_lock(&ohci->lock);
2075 update_bus_time(ohci); 2089 update_bus_time(ohci);
2076 spin_unlock(&ohci->lock); 2090 spin_unlock(&ohci->lock);
2077 } else 2091 } else
2078 flush_writes(ohci); 2092 flush_writes(ohci);
2079 2093
2080 return IRQ_HANDLED; 2094 return IRQ_HANDLED;
2081 } 2095 }
2082 2096
2083 static int software_reset(struct fw_ohci *ohci) 2097 static int software_reset(struct fw_ohci *ohci)
2084 { 2098 {
2085 u32 val; 2099 u32 val;
2086 int i; 2100 int i;
2087 2101
2088 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); 2102 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2089 for (i = 0; i < 500; i++) { 2103 for (i = 0; i < 500; i++) {
2090 val = reg_read(ohci, OHCI1394_HCControlSet); 2104 val = reg_read(ohci, OHCI1394_HCControlSet);
2091 if (!~val) 2105 if (!~val)
2092 return -ENODEV; /* Card was ejected. */ 2106 return -ENODEV; /* Card was ejected. */
2093 2107
2094 if (!(val & OHCI1394_HCControl_softReset)) 2108 if (!(val & OHCI1394_HCControl_softReset))
2095 return 0; 2109 return 0;
2096 2110
2097 msleep(1); 2111 msleep(1);
2098 } 2112 }
2099 2113
2100 return -EBUSY; 2114 return -EBUSY;
2101 } 2115 }
2102 2116
2103 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) 2117 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
2104 { 2118 {
2105 size_t size = length * 4; 2119 size_t size = length * 4;
2106 2120
2107 memcpy(dest, src, size); 2121 memcpy(dest, src, size);
2108 if (size < CONFIG_ROM_SIZE) 2122 if (size < CONFIG_ROM_SIZE)
2109 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); 2123 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
2110 } 2124 }
2111 2125
2112 static int configure_1394a_enhancements(struct fw_ohci *ohci) 2126 static int configure_1394a_enhancements(struct fw_ohci *ohci)
2113 { 2127 {
2114 bool enable_1394a; 2128 bool enable_1394a;
2115 int ret, clear, set, offset; 2129 int ret, clear, set, offset;
2116 2130
2117 /* Check if the driver should configure link and PHY. */ 2131 /* Check if the driver should configure link and PHY. */
2118 if (!(reg_read(ohci, OHCI1394_HCControlSet) & 2132 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2119 OHCI1394_HCControl_programPhyEnable)) 2133 OHCI1394_HCControl_programPhyEnable))
2120 return 0; 2134 return 0;
2121 2135
2122 /* Paranoia: check whether the PHY supports 1394a, too. */ 2136 /* Paranoia: check whether the PHY supports 1394a, too. */
2123 enable_1394a = false; 2137 enable_1394a = false;
2124 ret = read_phy_reg(ohci, 2); 2138 ret = read_phy_reg(ohci, 2);
2125 if (ret < 0) 2139 if (ret < 0)
2126 return ret; 2140 return ret;
2127 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) { 2141 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2128 ret = read_paged_phy_reg(ohci, 1, 8); 2142 ret = read_paged_phy_reg(ohci, 1, 8);
2129 if (ret < 0) 2143 if (ret < 0)
2130 return ret; 2144 return ret;
2131 if (ret >= 1) 2145 if (ret >= 1)
2132 enable_1394a = true; 2146 enable_1394a = true;
2133 } 2147 }
2134 2148
2135 if (ohci->quirks & QUIRK_NO_1394A) 2149 if (ohci->quirks & QUIRK_NO_1394A)
2136 enable_1394a = false; 2150 enable_1394a = false;
2137 2151
2138 /* Configure PHY and link consistently. */ 2152 /* Configure PHY and link consistently. */
2139 if (enable_1394a) { 2153 if (enable_1394a) {
2140 clear = 0; 2154 clear = 0;
2141 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 2155 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2142 } else { 2156 } else {
2143 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 2157 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2144 set = 0; 2158 set = 0;
2145 } 2159 }
2146 ret = update_phy_reg(ohci, 5, clear, set); 2160 ret = update_phy_reg(ohci, 5, clear, set);
2147 if (ret < 0) 2161 if (ret < 0)
2148 return ret; 2162 return ret;
2149 2163
2150 if (enable_1394a) 2164 if (enable_1394a)
2151 offset = OHCI1394_HCControlSet; 2165 offset = OHCI1394_HCControlSet;
2152 else 2166 else
2153 offset = OHCI1394_HCControlClear; 2167 offset = OHCI1394_HCControlClear;
2154 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable); 2168 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2155 2169
2156 /* Clean up: configuration has been taken care of. */ 2170 /* Clean up: configuration has been taken care of. */
2157 reg_write(ohci, OHCI1394_HCControlClear, 2171 reg_write(ohci, OHCI1394_HCControlClear,
2158 OHCI1394_HCControl_programPhyEnable); 2172 OHCI1394_HCControl_programPhyEnable);
2159 2173
2160 return 0; 2174 return 0;
2161 } 2175 }
2162 2176
2163 static int probe_tsb41ba3d(struct fw_ohci *ohci) 2177 static int probe_tsb41ba3d(struct fw_ohci *ohci)
2164 { 2178 {
2165 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */ 2179 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2166 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, }; 2180 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2167 int reg, i; 2181 int reg, i;
2168 2182
2169 reg = read_phy_reg(ohci, 2); 2183 reg = read_phy_reg(ohci, 2);
2170 if (reg < 0) 2184 if (reg < 0)
2171 return reg; 2185 return reg;
2172 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS) 2186 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2173 return 0; 2187 return 0;
2174 2188
2175 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) { 2189 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2176 reg = read_paged_phy_reg(ohci, 1, i + 10); 2190 reg = read_paged_phy_reg(ohci, 1, i + 10);
2177 if (reg < 0) 2191 if (reg < 0)
2178 return reg; 2192 return reg;
2179 if (reg != id[i]) 2193 if (reg != id[i])
2180 return 0; 2194 return 0;
2181 } 2195 }
2182 return 1; 2196 return 1;
2183 } 2197 }
2184 2198
2185 static int ohci_enable(struct fw_card *card, 2199 static int ohci_enable(struct fw_card *card,
2186 const __be32 *config_rom, size_t length) 2200 const __be32 *config_rom, size_t length)
2187 { 2201 {
2188 struct fw_ohci *ohci = fw_ohci(card); 2202 struct fw_ohci *ohci = fw_ohci(card);
2189 struct pci_dev *dev = to_pci_dev(card->device); 2203 struct pci_dev *dev = to_pci_dev(card->device);
2190 u32 lps, seconds, version, irqs; 2204 u32 lps, seconds, version, irqs;
2191 int i, ret; 2205 int i, ret;
2192 2206
2193 if (software_reset(ohci)) { 2207 if (software_reset(ohci)) {
2194 fw_error("Failed to reset ohci card.\n"); 2208 fw_error("Failed to reset ohci card.\n");
2195 return -EBUSY; 2209 return -EBUSY;
2196 } 2210 }
2197 2211
2198 /* 2212 /*
2199 * Now enable LPS, which we need in order to start accessing 2213 * Now enable LPS, which we need in order to start accessing
2200 * most of the registers. In fact, on some cards (ALI M5251), 2214 * most of the registers. In fact, on some cards (ALI M5251),
2201 * accessing registers in the SClk domain without LPS enabled 2215 * accessing registers in the SClk domain without LPS enabled
2202 * will lock up the machine. Wait 50msec to make sure we have 2216 * will lock up the machine. Wait 50msec to make sure we have
2203 * full link enabled. However, with some cards (well, at least 2217 * full link enabled. However, with some cards (well, at least
2204 * a JMicron PCIe card), we have to try again sometimes. 2218 * a JMicron PCIe card), we have to try again sometimes.
2205 */ 2219 */
2206 reg_write(ohci, OHCI1394_HCControlSet, 2220 reg_write(ohci, OHCI1394_HCControlSet,
2207 OHCI1394_HCControl_LPS | 2221 OHCI1394_HCControl_LPS |
2208 OHCI1394_HCControl_postedWriteEnable); 2222 OHCI1394_HCControl_postedWriteEnable);
2209 flush_writes(ohci); 2223 flush_writes(ohci);
2210 2224
2211 for (lps = 0, i = 0; !lps && i < 3; i++) { 2225 for (lps = 0, i = 0; !lps && i < 3; i++) {
2212 msleep(50); 2226 msleep(50);
2213 lps = reg_read(ohci, OHCI1394_HCControlSet) & 2227 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2214 OHCI1394_HCControl_LPS; 2228 OHCI1394_HCControl_LPS;
2215 } 2229 }
2216 2230
2217 if (!lps) { 2231 if (!lps) {
2218 fw_error("Failed to set Link Power Status\n"); 2232 fw_error("Failed to set Link Power Status\n");
2219 return -EIO; 2233 return -EIO;
2220 } 2234 }
2221 2235
2222 if (ohci->quirks & QUIRK_TI_SLLZ059) { 2236 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2223 ret = probe_tsb41ba3d(ohci); 2237 ret = probe_tsb41ba3d(ohci);
2224 if (ret < 0) 2238 if (ret < 0)
2225 return ret; 2239 return ret;
2226 if (ret) 2240 if (ret)
2227 fw_notify("local TSB41BA3D phy\n"); 2241 fw_notify("local TSB41BA3D phy\n");
2228 else 2242 else
2229 ohci->quirks &= ~QUIRK_TI_SLLZ059; 2243 ohci->quirks &= ~QUIRK_TI_SLLZ059;
2230 } 2244 }
2231 2245
2232 reg_write(ohci, OHCI1394_HCControlClear, 2246 reg_write(ohci, OHCI1394_HCControlClear,
2233 OHCI1394_HCControl_noByteSwapData); 2247 OHCI1394_HCControl_noByteSwapData);
2234 2248
2235 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 2249 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2236 reg_write(ohci, OHCI1394_LinkControlSet, 2250 reg_write(ohci, OHCI1394_LinkControlSet,
2237 OHCI1394_LinkControl_cycleTimerEnable | 2251 OHCI1394_LinkControl_cycleTimerEnable |
2238 OHCI1394_LinkControl_cycleMaster); 2252 OHCI1394_LinkControl_cycleMaster);
2239 2253
2240 reg_write(ohci, OHCI1394_ATRetries, 2254 reg_write(ohci, OHCI1394_ATRetries,
2241 OHCI1394_MAX_AT_REQ_RETRIES | 2255 OHCI1394_MAX_AT_REQ_RETRIES |
2242 (OHCI1394_MAX_AT_RESP_RETRIES << 4) | 2256 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2243 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | 2257 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2244 (200 << 16)); 2258 (200 << 16));
2245 2259
2246 seconds = lower_32_bits(get_seconds()); 2260 seconds = lower_32_bits(get_seconds());
2247 reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); 2261 reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25);
2248 ohci->bus_time = seconds & ~0x3f; 2262 ohci->bus_time = seconds & ~0x3f;
2249 2263
2250 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2264 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2251 if (version >= OHCI_VERSION_1_1) { 2265 if (version >= OHCI_VERSION_1_1) {
2252 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, 2266 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2253 0xfffffffe); 2267 0xfffffffe);
2254 card->broadcast_channel_auto_allocated = true; 2268 card->broadcast_channel_auto_allocated = true;
2255 } 2269 }
2256 2270
2257 /* Get implemented bits of the priority arbitration request counter. */ 2271 /* Get implemented bits of the priority arbitration request counter. */
2258 reg_write(ohci, OHCI1394_FairnessControl, 0x3f); 2272 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2259 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; 2273 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2260 reg_write(ohci, OHCI1394_FairnessControl, 0); 2274 reg_write(ohci, OHCI1394_FairnessControl, 0);
2261 card->priority_budget_implemented = ohci->pri_req_max != 0; 2275 card->priority_budget_implemented = ohci->pri_req_max != 0;
2262 2276
2263 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); 2277 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
2264 reg_write(ohci, OHCI1394_IntEventClear, ~0); 2278 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2265 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 2279 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2266 2280
2267 ret = configure_1394a_enhancements(ohci); 2281 ret = configure_1394a_enhancements(ohci);
2268 if (ret < 0) 2282 if (ret < 0)
2269 return ret; 2283 return ret;
2270 2284
2271 /* Activate link_on bit and contender bit in our self ID packets.*/ 2285 /* Activate link_on bit and contender bit in our self ID packets.*/
2272 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER); 2286 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2273 if (ret < 0) 2287 if (ret < 0)
2274 return ret; 2288 return ret;
2275 2289
2276 /* 2290 /*
2277 * When the link is not yet enabled, the atomic config rom 2291 * When the link is not yet enabled, the atomic config rom
2278 * update mechanism described below in ohci_set_config_rom() 2292 * update mechanism described below in ohci_set_config_rom()
2279 * is not active. We have to update ConfigRomHeader and 2293 * is not active. We have to update ConfigRomHeader and
2280 * BusOptions manually, and the write to ConfigROMmap takes 2294 * BusOptions manually, and the write to ConfigROMmap takes
2281 * effect immediately. We tie this to the enabling of the 2295 * effect immediately. We tie this to the enabling of the
2282 * link, so we have a valid config rom before enabling - the 2296 * link, so we have a valid config rom before enabling - the
2283 * OHCI requires that ConfigROMhdr and BusOptions have valid 2297 * OHCI requires that ConfigROMhdr and BusOptions have valid
2284 * values before enabling. 2298 * values before enabling.
2285 * 2299 *
2286 * However, when the ConfigROMmap is written, some controllers 2300 * However, when the ConfigROMmap is written, some controllers
2287 * always read back quadlets 0 and 2 from the config rom to 2301 * always read back quadlets 0 and 2 from the config rom to
2288 * the ConfigRomHeader and BusOptions registers on bus reset. 2302 * the ConfigRomHeader and BusOptions registers on bus reset.
2289 * They shouldn't do that in this initial case where the link 2303 * They shouldn't do that in this initial case where the link
2290 * isn't enabled. This means we have to use the same 2304 * isn't enabled. This means we have to use the same
2291 * workaround here, setting the bus header to 0 and then write 2305 * workaround here, setting the bus header to 0 and then write
2292 * the right values in the bus reset tasklet. 2306 * the right values in the bus reset tasklet.
2293 */ 2307 */
2294 2308
2295 if (config_rom) { 2309 if (config_rom) {
2296 ohci->next_config_rom = 2310 ohci->next_config_rom =
2297 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2311 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2298 &ohci->next_config_rom_bus, 2312 &ohci->next_config_rom_bus,
2299 GFP_KERNEL); 2313 GFP_KERNEL);
2300 if (ohci->next_config_rom == NULL) 2314 if (ohci->next_config_rom == NULL)
2301 return -ENOMEM; 2315 return -ENOMEM;
2302 2316
2303 copy_config_rom(ohci->next_config_rom, config_rom, length); 2317 copy_config_rom(ohci->next_config_rom, config_rom, length);
2304 } else { 2318 } else {
2305 /* 2319 /*
2306 * In the suspend case, config_rom is NULL, which 2320 * In the suspend case, config_rom is NULL, which
2307 * means that we just reuse the old config rom. 2321 * means that we just reuse the old config rom.
2308 */ 2322 */
2309 ohci->next_config_rom = ohci->config_rom; 2323 ohci->next_config_rom = ohci->config_rom;
2310 ohci->next_config_rom_bus = ohci->config_rom_bus; 2324 ohci->next_config_rom_bus = ohci->config_rom_bus;
2311 } 2325 }
2312 2326
2313 ohci->next_header = ohci->next_config_rom[0]; 2327 ohci->next_header = ohci->next_config_rom[0];
2314 ohci->next_config_rom[0] = 0; 2328 ohci->next_config_rom[0] = 0;
2315 reg_write(ohci, OHCI1394_ConfigROMhdr, 0); 2329 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2316 reg_write(ohci, OHCI1394_BusOptions, 2330 reg_write(ohci, OHCI1394_BusOptions,
2317 be32_to_cpu(ohci->next_config_rom[2])); 2331 be32_to_cpu(ohci->next_config_rom[2]));
2318 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 2332 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2319 2333
2320 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 2334 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2321 2335
2322 if (!(ohci->quirks & QUIRK_NO_MSI)) 2336 if (!(ohci->quirks & QUIRK_NO_MSI))
2323 pci_enable_msi(dev); 2337 pci_enable_msi(dev);
2324 if (request_irq(dev->irq, irq_handler, 2338 if (request_irq(dev->irq, irq_handler,
2325 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, 2339 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
2326 ohci_driver_name, ohci)) { 2340 ohci_driver_name, ohci)) {
2327 fw_error("Failed to allocate interrupt %d.\n", dev->irq); 2341 fw_error("Failed to allocate interrupt %d.\n", dev->irq);
2328 pci_disable_msi(dev); 2342 pci_disable_msi(dev);
2329 2343
2330 if (config_rom) { 2344 if (config_rom) {
2331 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2345 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2332 ohci->next_config_rom, 2346 ohci->next_config_rom,
2333 ohci->next_config_rom_bus); 2347 ohci->next_config_rom_bus);
2334 ohci->next_config_rom = NULL; 2348 ohci->next_config_rom = NULL;
2335 } 2349 }
2336 return -EIO; 2350 return -EIO;
2337 } 2351 }
2338 2352
2339 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 2353 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2340 OHCI1394_RQPkt | OHCI1394_RSPkt | 2354 OHCI1394_RQPkt | OHCI1394_RSPkt |
2341 OHCI1394_isochTx | OHCI1394_isochRx | 2355 OHCI1394_isochTx | OHCI1394_isochRx |
2342 OHCI1394_postedWriteErr | 2356 OHCI1394_postedWriteErr |
2343 OHCI1394_selfIDComplete | 2357 OHCI1394_selfIDComplete |
2344 OHCI1394_regAccessFail | 2358 OHCI1394_regAccessFail |
2345 OHCI1394_cycle64Seconds | 2359 OHCI1394_cycle64Seconds |
2346 OHCI1394_cycleInconsistent | 2360 OHCI1394_cycleInconsistent |
2347 OHCI1394_unrecoverableError | 2361 OHCI1394_unrecoverableError |
2348 OHCI1394_cycleTooLong | 2362 OHCI1394_cycleTooLong |
2349 OHCI1394_masterIntEnable; 2363 OHCI1394_masterIntEnable;
2350 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 2364 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
2351 irqs |= OHCI1394_busReset; 2365 irqs |= OHCI1394_busReset;
2352 reg_write(ohci, OHCI1394_IntMaskSet, irqs); 2366 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2353 2367
2354 reg_write(ohci, OHCI1394_HCControlSet, 2368 reg_write(ohci, OHCI1394_HCControlSet,
2355 OHCI1394_HCControl_linkEnable | 2369 OHCI1394_HCControl_linkEnable |
2356 OHCI1394_HCControl_BIBimageValid); 2370 OHCI1394_HCControl_BIBimageValid);
2357 2371
2358 reg_write(ohci, OHCI1394_LinkControlSet, 2372 reg_write(ohci, OHCI1394_LinkControlSet,
2359 OHCI1394_LinkControl_rcvSelfID | 2373 OHCI1394_LinkControl_rcvSelfID |
2360 OHCI1394_LinkControl_rcvPhyPkt); 2374 OHCI1394_LinkControl_rcvPhyPkt);
2361 2375
2362 ar_context_run(&ohci->ar_request_ctx); 2376 ar_context_run(&ohci->ar_request_ctx);
2363 ar_context_run(&ohci->ar_response_ctx); 2377 ar_context_run(&ohci->ar_response_ctx);
2364 2378
2365 flush_writes(ohci); 2379 flush_writes(ohci);
2366 2380
2367 /* We are ready to go, reset bus to finish initialization. */ 2381 /* We are ready to go, reset bus to finish initialization. */
2368 fw_schedule_bus_reset(&ohci->card, false, true); 2382 fw_schedule_bus_reset(&ohci->card, false, true);
2369 2383
2370 return 0; 2384 return 0;
2371 } 2385 }
2372 2386
2373 static int ohci_set_config_rom(struct fw_card *card, 2387 static int ohci_set_config_rom(struct fw_card *card,
2374 const __be32 *config_rom, size_t length) 2388 const __be32 *config_rom, size_t length)
2375 { 2389 {
2376 struct fw_ohci *ohci; 2390 struct fw_ohci *ohci;
2377 unsigned long flags; 2391 unsigned long flags;
2378 __be32 *next_config_rom; 2392 __be32 *next_config_rom;
2379 dma_addr_t uninitialized_var(next_config_rom_bus); 2393 dma_addr_t uninitialized_var(next_config_rom_bus);
2380 2394
2381 ohci = fw_ohci(card); 2395 ohci = fw_ohci(card);
2382 2396
2383 /* 2397 /*
2384 * When the OHCI controller is enabled, the config rom update 2398 * When the OHCI controller is enabled, the config rom update
2385 * mechanism is a bit tricky, but easy enough to use. See 2399 * mechanism is a bit tricky, but easy enough to use. See
2386 * section 5.5.6 in the OHCI specification. 2400 * section 5.5.6 in the OHCI specification.
2387 * 2401 *
2388 * The OHCI controller caches the new config rom address in a 2402 * The OHCI controller caches the new config rom address in a
2389 * shadow register (ConfigROMmapNext) and needs a bus reset 2403 * shadow register (ConfigROMmapNext) and needs a bus reset
2390 * for the changes to take place. When the bus reset is 2404 * for the changes to take place. When the bus reset is
2391 * detected, the controller loads the new values for the 2405 * detected, the controller loads the new values for the
2392 * ConfigRomHeader and BusOptions registers from the specified 2406 * ConfigRomHeader and BusOptions registers from the specified
2393 * config rom and loads ConfigROMmap from the ConfigROMmapNext 2407 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2394 * shadow register. All automatically and atomically. 2408 * shadow register. All automatically and atomically.
2395 * 2409 *
2396 * Now, there's a twist to this story. The automatic load of 2410 * Now, there's a twist to this story. The automatic load of
2397 * ConfigRomHeader and BusOptions doesn't honor the 2411 * ConfigRomHeader and BusOptions doesn't honor the
2398 * noByteSwapData bit, so with a be32 config rom, the 2412 * noByteSwapData bit, so with a be32 config rom, the
2399 * controller will load be32 values in to these registers 2413 * controller will load be32 values in to these registers
2400 * during the atomic update, even on litte endian 2414 * during the atomic update, even on litte endian
2401 * architectures. The workaround we use is to put a 0 in the 2415 * architectures. The workaround we use is to put a 0 in the
2402 * header quadlet; 0 is endian agnostic and means that the 2416 * header quadlet; 0 is endian agnostic and means that the
2403 * config rom isn't ready yet. In the bus reset tasklet we 2417 * config rom isn't ready yet. In the bus reset tasklet we
2404 * then set up the real values for the two registers. 2418 * then set up the real values for the two registers.
2405 * 2419 *
2406 * We use ohci->lock to avoid racing with the code that sets 2420 * We use ohci->lock to avoid racing with the code that sets
2407 * ohci->next_config_rom to NULL (see bus_reset_work). 2421 * ohci->next_config_rom to NULL (see bus_reset_work).
2408 */ 2422 */
2409 2423
2410 next_config_rom = 2424 next_config_rom =
2411 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2425 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2412 &next_config_rom_bus, GFP_KERNEL); 2426 &next_config_rom_bus, GFP_KERNEL);
2413 if (next_config_rom == NULL) 2427 if (next_config_rom == NULL)
2414 return -ENOMEM; 2428 return -ENOMEM;
2415 2429
2416 spin_lock_irqsave(&ohci->lock, flags); 2430 spin_lock_irqsave(&ohci->lock, flags);
2417 2431
2418 /* 2432 /*
2419 * If there is not an already pending config_rom update, 2433 * If there is not an already pending config_rom update,
2420 * push our new allocation into the ohci->next_config_rom 2434 * push our new allocation into the ohci->next_config_rom
2421 * and then mark the local variable as null so that we 2435 * and then mark the local variable as null so that we
2422 * won't deallocate the new buffer. 2436 * won't deallocate the new buffer.
2423 * 2437 *
2424 * OTOH, if there is a pending config_rom update, just 2438 * OTOH, if there is a pending config_rom update, just
2425 * use that buffer with the new config_rom data, and 2439 * use that buffer with the new config_rom data, and
2426 * let this routine free the unused DMA allocation. 2440 * let this routine free the unused DMA allocation.
2427 */ 2441 */
2428 2442
2429 if (ohci->next_config_rom == NULL) { 2443 if (ohci->next_config_rom == NULL) {
2430 ohci->next_config_rom = next_config_rom; 2444 ohci->next_config_rom = next_config_rom;
2431 ohci->next_config_rom_bus = next_config_rom_bus; 2445 ohci->next_config_rom_bus = next_config_rom_bus;
2432 next_config_rom = NULL; 2446 next_config_rom = NULL;
2433 } 2447 }
2434 2448
2435 copy_config_rom(ohci->next_config_rom, config_rom, length); 2449 copy_config_rom(ohci->next_config_rom, config_rom, length);
2436 2450
2437 ohci->next_header = config_rom[0]; 2451 ohci->next_header = config_rom[0];
2438 ohci->next_config_rom[0] = 0; 2452 ohci->next_config_rom[0] = 0;
2439 2453
2440 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 2454 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2441 2455
2442 spin_unlock_irqrestore(&ohci->lock, flags); 2456 spin_unlock_irqrestore(&ohci->lock, flags);
2443 2457
2444 /* If we didn't use the DMA allocation, delete it. */ 2458 /* If we didn't use the DMA allocation, delete it. */
2445 if (next_config_rom != NULL) 2459 if (next_config_rom != NULL)
2446 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2460 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2447 next_config_rom, next_config_rom_bus); 2461 next_config_rom, next_config_rom_bus);
2448 2462
2449 /* 2463 /*
2450 * Now initiate a bus reset to have the changes take 2464 * Now initiate a bus reset to have the changes take
2451 * effect. We clean up the old config rom memory and DMA 2465 * effect. We clean up the old config rom memory and DMA
2452 * mappings in the bus reset tasklet, since the OHCI 2466 * mappings in the bus reset tasklet, since the OHCI
2453 * controller could need to access it before the bus reset 2467 * controller could need to access it before the bus reset
2454 * takes effect. 2468 * takes effect.
2455 */ 2469 */
2456 2470
2457 fw_schedule_bus_reset(&ohci->card, true, true); 2471 fw_schedule_bus_reset(&ohci->card, true, true);
2458 2472
2459 return 0; 2473 return 0;
2460 } 2474 }
2461 2475
2462 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) 2476 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2463 { 2477 {
2464 struct fw_ohci *ohci = fw_ohci(card); 2478 struct fw_ohci *ohci = fw_ohci(card);
2465 2479
2466 at_context_transmit(&ohci->at_request_ctx, packet); 2480 at_context_transmit(&ohci->at_request_ctx, packet);
2467 } 2481 }
2468 2482
2469 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) 2483 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2470 { 2484 {
2471 struct fw_ohci *ohci = fw_ohci(card); 2485 struct fw_ohci *ohci = fw_ohci(card);
2472 2486
2473 at_context_transmit(&ohci->at_response_ctx, packet); 2487 at_context_transmit(&ohci->at_response_ctx, packet);
2474 } 2488 }
2475 2489
2476 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) 2490 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2477 { 2491 {
2478 struct fw_ohci *ohci = fw_ohci(card); 2492 struct fw_ohci *ohci = fw_ohci(card);
2479 struct context *ctx = &ohci->at_request_ctx; 2493 struct context *ctx = &ohci->at_request_ctx;
2480 struct driver_data *driver_data = packet->driver_data; 2494 struct driver_data *driver_data = packet->driver_data;
2481 int ret = -ENOENT; 2495 int ret = -ENOENT;
2482 2496
2483 tasklet_disable(&ctx->tasklet); 2497 tasklet_disable(&ctx->tasklet);
2484 2498
2485 if (packet->ack != 0) 2499 if (packet->ack != 0)
2486 goto out; 2500 goto out;
2487 2501
2488 if (packet->payload_mapped) 2502 if (packet->payload_mapped)
2489 dma_unmap_single(ohci->card.device, packet->payload_bus, 2503 dma_unmap_single(ohci->card.device, packet->payload_bus,
2490 packet->payload_length, DMA_TO_DEVICE); 2504 packet->payload_length, DMA_TO_DEVICE);
2491 2505
2492 log_ar_at_event('T', packet->speed, packet->header, 0x20); 2506 log_ar_at_event('T', packet->speed, packet->header, 0x20);
2493 driver_data->packet = NULL; 2507 driver_data->packet = NULL;
2494 packet->ack = RCODE_CANCELLED; 2508 packet->ack = RCODE_CANCELLED;
2495 packet->callback(packet, &ohci->card, packet->ack); 2509 packet->callback(packet, &ohci->card, packet->ack);
2496 ret = 0; 2510 ret = 0;
2497 out: 2511 out:
2498 tasklet_enable(&ctx->tasklet); 2512 tasklet_enable(&ctx->tasklet);
2499 2513
2500 return ret; 2514 return ret;
2501 } 2515 }
2502 2516
2503 static int ohci_enable_phys_dma(struct fw_card *card, 2517 static int ohci_enable_phys_dma(struct fw_card *card,
2504 int node_id, int generation) 2518 int node_id, int generation)
2505 { 2519 {
2506 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 2520 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
2507 return 0; 2521 return 0;
2508 #else 2522 #else
2509 struct fw_ohci *ohci = fw_ohci(card); 2523 struct fw_ohci *ohci = fw_ohci(card);
2510 unsigned long flags; 2524 unsigned long flags;
2511 int n, ret = 0; 2525 int n, ret = 0;
2512 2526
2513 /* 2527 /*
2514 * FIXME: Make sure this bitmask is cleared when we clear the busReset 2528 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2515 * interrupt bit. Clear physReqResourceAllBuses on bus reset. 2529 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2516 */ 2530 */
2517 2531
2518 spin_lock_irqsave(&ohci->lock, flags); 2532 spin_lock_irqsave(&ohci->lock, flags);
2519 2533
2520 if (ohci->generation != generation) { 2534 if (ohci->generation != generation) {
2521 ret = -ESTALE; 2535 ret = -ESTALE;
2522 goto out; 2536 goto out;
2523 } 2537 }
2524 2538
2525 /* 2539 /*
2526 * Note, if the node ID contains a non-local bus ID, physical DMA is 2540 * Note, if the node ID contains a non-local bus ID, physical DMA is
2527 * enabled for _all_ nodes on remote buses. 2541 * enabled for _all_ nodes on remote buses.
2528 */ 2542 */
2529 2543
2530 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; 2544 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2531 if (n < 32) 2545 if (n < 32)
2532 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); 2546 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2533 else 2547 else
2534 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); 2548 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2535 2549
2536 flush_writes(ohci); 2550 flush_writes(ohci);
2537 out: 2551 out:
2538 spin_unlock_irqrestore(&ohci->lock, flags); 2552 spin_unlock_irqrestore(&ohci->lock, flags);
2539 2553
2540 return ret; 2554 return ret;
2541 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 2555 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
2542 } 2556 }
2543 2557
2544 static u32 ohci_read_csr(struct fw_card *card, int csr_offset) 2558 static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2545 { 2559 {
2546 struct fw_ohci *ohci = fw_ohci(card); 2560 struct fw_ohci *ohci = fw_ohci(card);
2547 unsigned long flags; 2561 unsigned long flags;
2548 u32 value; 2562 u32 value;
2549 2563
2550 switch (csr_offset) { 2564 switch (csr_offset) {
2551 case CSR_STATE_CLEAR: 2565 case CSR_STATE_CLEAR:
2552 case CSR_STATE_SET: 2566 case CSR_STATE_SET:
2553 if (ohci->is_root && 2567 if (ohci->is_root &&
2554 (reg_read(ohci, OHCI1394_LinkControlSet) & 2568 (reg_read(ohci, OHCI1394_LinkControlSet) &
2555 OHCI1394_LinkControl_cycleMaster)) 2569 OHCI1394_LinkControl_cycleMaster))
2556 value = CSR_STATE_BIT_CMSTR; 2570 value = CSR_STATE_BIT_CMSTR;
2557 else 2571 else
2558 value = 0; 2572 value = 0;
2559 if (ohci->csr_state_setclear_abdicate) 2573 if (ohci->csr_state_setclear_abdicate)
2560 value |= CSR_STATE_BIT_ABDICATE; 2574 value |= CSR_STATE_BIT_ABDICATE;
2561 2575
2562 return value; 2576 return value;
2563 2577
2564 case CSR_NODE_IDS: 2578 case CSR_NODE_IDS:
2565 return reg_read(ohci, OHCI1394_NodeID) << 16; 2579 return reg_read(ohci, OHCI1394_NodeID) << 16;
2566 2580
2567 case CSR_CYCLE_TIME: 2581 case CSR_CYCLE_TIME:
2568 return get_cycle_time(ohci); 2582 return get_cycle_time(ohci);
2569 2583
2570 case CSR_BUS_TIME: 2584 case CSR_BUS_TIME:
2571 /* 2585 /*
2572 * We might be called just after the cycle timer has wrapped 2586 * We might be called just after the cycle timer has wrapped
2573 * around but just before the cycle64Seconds handler, so we 2587 * around but just before the cycle64Seconds handler, so we
2574 * better check here, too, if the bus time needs to be updated. 2588 * better check here, too, if the bus time needs to be updated.
2575 */ 2589 */
2576 spin_lock_irqsave(&ohci->lock, flags); 2590 spin_lock_irqsave(&ohci->lock, flags);
2577 value = update_bus_time(ohci); 2591 value = update_bus_time(ohci);
2578 spin_unlock_irqrestore(&ohci->lock, flags); 2592 spin_unlock_irqrestore(&ohci->lock, flags);
2579 return value; 2593 return value;
2580 2594
2581 case CSR_BUSY_TIMEOUT: 2595 case CSR_BUSY_TIMEOUT:
2582 value = reg_read(ohci, OHCI1394_ATRetries); 2596 value = reg_read(ohci, OHCI1394_ATRetries);
2583 return (value >> 4) & 0x0ffff00f; 2597 return (value >> 4) & 0x0ffff00f;
2584 2598
2585 case CSR_PRIORITY_BUDGET: 2599 case CSR_PRIORITY_BUDGET:
2586 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | 2600 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2587 (ohci->pri_req_max << 8); 2601 (ohci->pri_req_max << 8);
2588 2602
2589 default: 2603 default:
2590 WARN_ON(1); 2604 WARN_ON(1);
2591 return 0; 2605 return 0;
2592 } 2606 }
2593 } 2607 }
2594 2608
2595 static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) 2609 static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2596 { 2610 {
2597 struct fw_ohci *ohci = fw_ohci(card); 2611 struct fw_ohci *ohci = fw_ohci(card);
2598 unsigned long flags; 2612 unsigned long flags;
2599 2613
2600 switch (csr_offset) { 2614 switch (csr_offset) {
2601 case CSR_STATE_CLEAR: 2615 case CSR_STATE_CLEAR:
2602 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { 2616 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2603 reg_write(ohci, OHCI1394_LinkControlClear, 2617 reg_write(ohci, OHCI1394_LinkControlClear,
2604 OHCI1394_LinkControl_cycleMaster); 2618 OHCI1394_LinkControl_cycleMaster);
2605 flush_writes(ohci); 2619 flush_writes(ohci);
2606 } 2620 }
2607 if (value & CSR_STATE_BIT_ABDICATE) 2621 if (value & CSR_STATE_BIT_ABDICATE)
2608 ohci->csr_state_setclear_abdicate = false; 2622 ohci->csr_state_setclear_abdicate = false;
2609 break; 2623 break;
2610 2624
2611 case CSR_STATE_SET: 2625 case CSR_STATE_SET:
2612 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { 2626 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2613 reg_write(ohci, OHCI1394_LinkControlSet, 2627 reg_write(ohci, OHCI1394_LinkControlSet,
2614 OHCI1394_LinkControl_cycleMaster); 2628 OHCI1394_LinkControl_cycleMaster);
2615 flush_writes(ohci); 2629 flush_writes(ohci);
2616 } 2630 }
2617 if (value & CSR_STATE_BIT_ABDICATE) 2631 if (value & CSR_STATE_BIT_ABDICATE)
2618 ohci->csr_state_setclear_abdicate = true; 2632 ohci->csr_state_setclear_abdicate = true;
2619 break; 2633 break;
2620 2634
2621 case CSR_NODE_IDS: 2635 case CSR_NODE_IDS:
2622 reg_write(ohci, OHCI1394_NodeID, value >> 16); 2636 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2623 flush_writes(ohci); 2637 flush_writes(ohci);
2624 break; 2638 break;
2625 2639
2626 case CSR_CYCLE_TIME: 2640 case CSR_CYCLE_TIME:
2627 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); 2641 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2628 reg_write(ohci, OHCI1394_IntEventSet, 2642 reg_write(ohci, OHCI1394_IntEventSet,
2629 OHCI1394_cycleInconsistent); 2643 OHCI1394_cycleInconsistent);
2630 flush_writes(ohci); 2644 flush_writes(ohci);
2631 break; 2645 break;
2632 2646
2633 case CSR_BUS_TIME: 2647 case CSR_BUS_TIME:
2634 spin_lock_irqsave(&ohci->lock, flags); 2648 spin_lock_irqsave(&ohci->lock, flags);
2635 ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); 2649 ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f);
2636 spin_unlock_irqrestore(&ohci->lock, flags); 2650 spin_unlock_irqrestore(&ohci->lock, flags);
2637 break; 2651 break;
2638 2652
2639 case CSR_BUSY_TIMEOUT: 2653 case CSR_BUSY_TIMEOUT:
2640 value = (value & 0xf) | ((value & 0xf) << 4) | 2654 value = (value & 0xf) | ((value & 0xf) << 4) |
2641 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); 2655 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2642 reg_write(ohci, OHCI1394_ATRetries, value); 2656 reg_write(ohci, OHCI1394_ATRetries, value);
2643 flush_writes(ohci); 2657 flush_writes(ohci);
2644 break; 2658 break;
2645 2659
2646 case CSR_PRIORITY_BUDGET: 2660 case CSR_PRIORITY_BUDGET:
2647 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); 2661 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2648 flush_writes(ohci); 2662 flush_writes(ohci);
2649 break; 2663 break;
2650 2664
2651 default: 2665 default:
2652 WARN_ON(1); 2666 WARN_ON(1);
2653 break; 2667 break;
2654 } 2668 }
2655 } 2669 }
2656 2670
2657 static void copy_iso_headers(struct iso_context *ctx, void *p) 2671 static void copy_iso_headers(struct iso_context *ctx, void *p)
2658 { 2672 {
2659 int i = ctx->header_length; 2673 int i = ctx->header_length;
2660 2674
2661 if (i + ctx->base.header_size > PAGE_SIZE) 2675 if (i + ctx->base.header_size > PAGE_SIZE)
2662 return; 2676 return;
2663 2677
2664 /* 2678 /*
2665 * The iso header is byteswapped to little endian by 2679 * The iso header is byteswapped to little endian by
2666 * the controller, but the remaining header quadlets 2680 * the controller, but the remaining header quadlets
2667 * are big endian. We want to present all the headers 2681 * are big endian. We want to present all the headers
2668 * as big endian, so we have to swap the first quadlet. 2682 * as big endian, so we have to swap the first quadlet.
2669 */ 2683 */
2670 if (ctx->base.header_size > 0) 2684 if (ctx->base.header_size > 0)
2671 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); 2685 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
2672 if (ctx->base.header_size > 4) 2686 if (ctx->base.header_size > 4)
2673 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); 2687 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
2674 if (ctx->base.header_size > 8) 2688 if (ctx->base.header_size > 8)
2675 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); 2689 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
2676 ctx->header_length += ctx->base.header_size; 2690 ctx->header_length += ctx->base.header_size;
2677 } 2691 }
2678 2692
2679 static int handle_ir_packet_per_buffer(struct context *context, 2693 static int handle_ir_packet_per_buffer(struct context *context,
2680 struct descriptor *d, 2694 struct descriptor *d,
2681 struct descriptor *last) 2695 struct descriptor *last)
2682 { 2696 {
2683 struct iso_context *ctx = 2697 struct iso_context *ctx =
2684 container_of(context, struct iso_context, context); 2698 container_of(context, struct iso_context, context);
2685 struct descriptor *pd; 2699 struct descriptor *pd;
2686 __le32 *ir_header; 2700 __le32 *ir_header;
2687 void *p; 2701 void *p;
2688 2702
2689 for (pd = d; pd <= last; pd++) 2703 for (pd = d; pd <= last; pd++)
2690 if (pd->transfer_status) 2704 if (pd->transfer_status)
2691 break; 2705 break;
2692 if (pd > last) 2706 if (pd > last)
2693 /* Descriptor(s) not done yet, stop iteration */ 2707 /* Descriptor(s) not done yet, stop iteration */
2694 return 0; 2708 return 0;
2695 2709
2696 p = last + 1; 2710 p = last + 1;
2697 copy_iso_headers(ctx, p); 2711 copy_iso_headers(ctx, p);
2698 2712
2699 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 2713 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2700 ir_header = (__le32 *) p; 2714 ir_header = (__le32 *) p;
2701 ctx->base.callback.sc(&ctx->base, 2715 ctx->base.callback.sc(&ctx->base,
2702 le32_to_cpu(ir_header[0]) & 0xffff, 2716 le32_to_cpu(ir_header[0]) & 0xffff,
2703 ctx->header_length, ctx->header, 2717 ctx->header_length, ctx->header,
2704 ctx->base.callback_data); 2718 ctx->base.callback_data);
2705 ctx->header_length = 0; 2719 ctx->header_length = 0;
2706 } 2720 }
2707 2721
2708 return 1; 2722 return 1;
2709 } 2723 }
2710 2724
2711 /* d == last because each descriptor block is only a single descriptor. */ 2725 /* d == last because each descriptor block is only a single descriptor. */
2712 static int handle_ir_buffer_fill(struct context *context, 2726 static int handle_ir_buffer_fill(struct context *context,
2713 struct descriptor *d, 2727 struct descriptor *d,
2714 struct descriptor *last) 2728 struct descriptor *last)
2715 { 2729 {
2716 struct iso_context *ctx = 2730 struct iso_context *ctx =
2717 container_of(context, struct iso_context, context); 2731 container_of(context, struct iso_context, context);
2718 2732
2719 if (!last->transfer_status) 2733 if (!last->transfer_status)
2720 /* Descriptor(s) not done yet, stop iteration */ 2734 /* Descriptor(s) not done yet, stop iteration */
2721 return 0; 2735 return 0;
2722 2736
2723 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) 2737 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
2724 ctx->base.callback.mc(&ctx->base, 2738 ctx->base.callback.mc(&ctx->base,
2725 le32_to_cpu(last->data_address) + 2739 le32_to_cpu(last->data_address) +
2726 le16_to_cpu(last->req_count) - 2740 le16_to_cpu(last->req_count) -
2727 le16_to_cpu(last->res_count), 2741 le16_to_cpu(last->res_count),
2728 ctx->base.callback_data); 2742 ctx->base.callback_data);
2729 2743
2730 return 1; 2744 return 1;
2731 } 2745 }
2732 2746
2733 static int handle_it_packet(struct context *context, 2747 static int handle_it_packet(struct context *context,
2734 struct descriptor *d, 2748 struct descriptor *d,
2735 struct descriptor *last) 2749 struct descriptor *last)
2736 { 2750 {
2737 struct iso_context *ctx = 2751 struct iso_context *ctx =
2738 container_of(context, struct iso_context, context); 2752 container_of(context, struct iso_context, context);
2739 int i; 2753 int i;
2740 struct descriptor *pd; 2754 struct descriptor *pd;
2741 2755
2742 for (pd = d; pd <= last; pd++) 2756 for (pd = d; pd <= last; pd++)
2743 if (pd->transfer_status) 2757 if (pd->transfer_status)
2744 break; 2758 break;
2745 if (pd > last) 2759 if (pd > last)
2746 /* Descriptor(s) not done yet, stop iteration */ 2760 /* Descriptor(s) not done yet, stop iteration */
2747 return 0; 2761 return 0;
2748 2762
2749 i = ctx->header_length; 2763 i = ctx->header_length;
2750 if (i + 4 < PAGE_SIZE) { 2764 if (i + 4 < PAGE_SIZE) {
2751 /* Present this value as big-endian to match the receive code */ 2765 /* Present this value as big-endian to match the receive code */
2752 *(__be32 *)(ctx->header + i) = cpu_to_be32( 2766 *(__be32 *)(ctx->header + i) = cpu_to_be32(
2753 ((u32)le16_to_cpu(pd->transfer_status) << 16) | 2767 ((u32)le16_to_cpu(pd->transfer_status) << 16) |
2754 le16_to_cpu(pd->res_count)); 2768 le16_to_cpu(pd->res_count));
2755 ctx->header_length += 4; 2769 ctx->header_length += 4;
2756 } 2770 }
2757 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 2771 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2758 ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), 2772 ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count),
2759 ctx->header_length, ctx->header, 2773 ctx->header_length, ctx->header,
2760 ctx->base.callback_data); 2774 ctx->base.callback_data);
2761 ctx->header_length = 0; 2775 ctx->header_length = 0;
2762 } 2776 }
2763 return 1; 2777 return 1;
2764 } 2778 }
2765 2779
2766 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) 2780 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2767 { 2781 {
2768 u32 hi = channels >> 32, lo = channels; 2782 u32 hi = channels >> 32, lo = channels;
2769 2783
2770 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); 2784 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2771 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); 2785 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2772 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); 2786 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2773 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); 2787 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2774 mmiowb(); 2788 mmiowb();
2775 ohci->mc_channels = channels; 2789 ohci->mc_channels = channels;
2776 } 2790 }
2777 2791
2778 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 2792 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2779 int type, int channel, size_t header_size) 2793 int type, int channel, size_t header_size)
2780 { 2794 {
2781 struct fw_ohci *ohci = fw_ohci(card); 2795 struct fw_ohci *ohci = fw_ohci(card);
2782 struct iso_context *uninitialized_var(ctx); 2796 struct iso_context *uninitialized_var(ctx);
2783 descriptor_callback_t uninitialized_var(callback); 2797 descriptor_callback_t uninitialized_var(callback);
2784 u64 *uninitialized_var(channels); 2798 u64 *uninitialized_var(channels);
2785 u32 *uninitialized_var(mask), uninitialized_var(regs); 2799 u32 *uninitialized_var(mask), uninitialized_var(regs);
2786 unsigned long flags; 2800 unsigned long flags;
2787 int index, ret = -EBUSY; 2801 int index, ret = -EBUSY;
2788 2802
2789 spin_lock_irqsave(&ohci->lock, flags); 2803 spin_lock_irqsave(&ohci->lock, flags);
2790 2804
2791 switch (type) { 2805 switch (type) {
2792 case FW_ISO_CONTEXT_TRANSMIT: 2806 case FW_ISO_CONTEXT_TRANSMIT:
2793 mask = &ohci->it_context_mask; 2807 mask = &ohci->it_context_mask;
2794 callback = handle_it_packet; 2808 callback = handle_it_packet;
2795 index = ffs(*mask) - 1; 2809 index = ffs(*mask) - 1;
2796 if (index >= 0) { 2810 if (index >= 0) {
2797 *mask &= ~(1 << index); 2811 *mask &= ~(1 << index);
2798 regs = OHCI1394_IsoXmitContextBase(index); 2812 regs = OHCI1394_IsoXmitContextBase(index);
2799 ctx = &ohci->it_context_list[index]; 2813 ctx = &ohci->it_context_list[index];
2800 } 2814 }
2801 break; 2815 break;
2802 2816
2803 case FW_ISO_CONTEXT_RECEIVE: 2817 case FW_ISO_CONTEXT_RECEIVE:
2804 channels = &ohci->ir_context_channels; 2818 channels = &ohci->ir_context_channels;
2805 mask = &ohci->ir_context_mask; 2819 mask = &ohci->ir_context_mask;
2806 callback = handle_ir_packet_per_buffer; 2820 callback = handle_ir_packet_per_buffer;
2807 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; 2821 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2808 if (index >= 0) { 2822 if (index >= 0) {
2809 *channels &= ~(1ULL << channel); 2823 *channels &= ~(1ULL << channel);
2810 *mask &= ~(1 << index); 2824 *mask &= ~(1 << index);
2811 regs = OHCI1394_IsoRcvContextBase(index); 2825 regs = OHCI1394_IsoRcvContextBase(index);
2812 ctx = &ohci->ir_context_list[index]; 2826 ctx = &ohci->ir_context_list[index];
2813 } 2827 }
2814 break; 2828 break;
2815 2829
2816 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2830 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2817 mask = &ohci->ir_context_mask; 2831 mask = &ohci->ir_context_mask;
2818 callback = handle_ir_buffer_fill; 2832 callback = handle_ir_buffer_fill;
2819 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; 2833 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
2820 if (index >= 0) { 2834 if (index >= 0) {
2821 ohci->mc_allocated = true; 2835 ohci->mc_allocated = true;
2822 *mask &= ~(1 << index); 2836 *mask &= ~(1 << index);
2823 regs = OHCI1394_IsoRcvContextBase(index); 2837 regs = OHCI1394_IsoRcvContextBase(index);
2824 ctx = &ohci->ir_context_list[index]; 2838 ctx = &ohci->ir_context_list[index];
2825 } 2839 }
2826 break; 2840 break;
2827 2841
2828 default: 2842 default:
2829 index = -1; 2843 index = -1;
2830 ret = -ENOSYS; 2844 ret = -ENOSYS;
2831 } 2845 }
2832 2846
2833 spin_unlock_irqrestore(&ohci->lock, flags); 2847 spin_unlock_irqrestore(&ohci->lock, flags);
2834 2848
2835 if (index < 0) 2849 if (index < 0)
2836 return ERR_PTR(ret); 2850 return ERR_PTR(ret);
2837 2851
2838 memset(ctx, 0, sizeof(*ctx)); 2852 memset(ctx, 0, sizeof(*ctx));
2839 ctx->header_length = 0; 2853 ctx->header_length = 0;
2840 ctx->header = (void *) __get_free_page(GFP_KERNEL); 2854 ctx->header = (void *) __get_free_page(GFP_KERNEL);
2841 if (ctx->header == NULL) { 2855 if (ctx->header == NULL) {
2842 ret = -ENOMEM; 2856 ret = -ENOMEM;
2843 goto out; 2857 goto out;
2844 } 2858 }
2845 ret = context_init(&ctx->context, ohci, regs, callback); 2859 ret = context_init(&ctx->context, ohci, regs, callback);
2846 if (ret < 0) 2860 if (ret < 0)
2847 goto out_with_header; 2861 goto out_with_header;
2848 2862
2849 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) 2863 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
2850 set_multichannel_mask(ohci, 0); 2864 set_multichannel_mask(ohci, 0);
2851 2865
2852 return &ctx->base; 2866 return &ctx->base;
2853 2867
2854 out_with_header: 2868 out_with_header:
2855 free_page((unsigned long)ctx->header); 2869 free_page((unsigned long)ctx->header);
2856 out: 2870 out:
2857 spin_lock_irqsave(&ohci->lock, flags); 2871 spin_lock_irqsave(&ohci->lock, flags);
2858 2872
2859 switch (type) { 2873 switch (type) {
2860 case FW_ISO_CONTEXT_RECEIVE: 2874 case FW_ISO_CONTEXT_RECEIVE:
2861 *channels |= 1ULL << channel; 2875 *channels |= 1ULL << channel;
2862 break; 2876 break;
2863 2877
2864 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2878 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2865 ohci->mc_allocated = false; 2879 ohci->mc_allocated = false;
2866 break; 2880 break;
2867 } 2881 }
2868 *mask |= 1 << index; 2882 *mask |= 1 << index;
2869 2883
2870 spin_unlock_irqrestore(&ohci->lock, flags); 2884 spin_unlock_irqrestore(&ohci->lock, flags);
2871 2885
2872 return ERR_PTR(ret); 2886 return ERR_PTR(ret);
2873 } 2887 }
2874 2888
2875 static int ohci_start_iso(struct fw_iso_context *base, 2889 static int ohci_start_iso(struct fw_iso_context *base,
2876 s32 cycle, u32 sync, u32 tags) 2890 s32 cycle, u32 sync, u32 tags)
2877 { 2891 {
2878 struct iso_context *ctx = container_of(base, struct iso_context, base); 2892 struct iso_context *ctx = container_of(base, struct iso_context, base);
2879 struct fw_ohci *ohci = ctx->context.ohci; 2893 struct fw_ohci *ohci = ctx->context.ohci;
2880 u32 control = IR_CONTEXT_ISOCH_HEADER, match; 2894 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
2881 int index; 2895 int index;
2882 2896
2883 /* the controller cannot start without any queued packets */ 2897 /* the controller cannot start without any queued packets */
2884 if (ctx->context.last->branch_address == 0) 2898 if (ctx->context.last->branch_address == 0)
2885 return -ENODATA; 2899 return -ENODATA;
2886 2900
2887 switch (ctx->base.type) { 2901 switch (ctx->base.type) {
2888 case FW_ISO_CONTEXT_TRANSMIT: 2902 case FW_ISO_CONTEXT_TRANSMIT:
2889 index = ctx - ohci->it_context_list; 2903 index = ctx - ohci->it_context_list;
2890 match = 0; 2904 match = 0;
2891 if (cycle >= 0) 2905 if (cycle >= 0)
2892 match = IT_CONTEXT_CYCLE_MATCH_ENABLE | 2906 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
2893 (cycle & 0x7fff) << 16; 2907 (cycle & 0x7fff) << 16;
2894 2908
2895 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); 2909 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
2896 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 2910 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
2897 context_run(&ctx->context, match); 2911 context_run(&ctx->context, match);
2898 break; 2912 break;
2899 2913
2900 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2914 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2901 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; 2915 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
2902 /* fall through */ 2916 /* fall through */
2903 case FW_ISO_CONTEXT_RECEIVE: 2917 case FW_ISO_CONTEXT_RECEIVE:
2904 index = ctx - ohci->ir_context_list; 2918 index = ctx - ohci->ir_context_list;
2905 match = (tags << 28) | (sync << 8) | ctx->base.channel; 2919 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2906 if (cycle >= 0) { 2920 if (cycle >= 0) {
2907 match |= (cycle & 0x07fff) << 12; 2921 match |= (cycle & 0x07fff) << 12;
2908 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE; 2922 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
2909 } 2923 }
2910 2924
2911 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); 2925 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
2912 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 2926 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
2913 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 2927 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
2914 context_run(&ctx->context, control); 2928 context_run(&ctx->context, control);
2915 2929
2916 ctx->sync = sync; 2930 ctx->sync = sync;
2917 ctx->tags = tags; 2931 ctx->tags = tags;
2918 2932
2919 break; 2933 break;
2920 } 2934 }
2921 2935
2922 return 0; 2936 return 0;
2923 } 2937 }
2924 2938
2925 static int ohci_stop_iso(struct fw_iso_context *base) 2939 static int ohci_stop_iso(struct fw_iso_context *base)
2926 { 2940 {
2927 struct fw_ohci *ohci = fw_ohci(base->card); 2941 struct fw_ohci *ohci = fw_ohci(base->card);
2928 struct iso_context *ctx = container_of(base, struct iso_context, base); 2942 struct iso_context *ctx = container_of(base, struct iso_context, base);
2929 int index; 2943 int index;
2930 2944
2931 switch (ctx->base.type) { 2945 switch (ctx->base.type) {
2932 case FW_ISO_CONTEXT_TRANSMIT: 2946 case FW_ISO_CONTEXT_TRANSMIT:
2933 index = ctx - ohci->it_context_list; 2947 index = ctx - ohci->it_context_list;
2934 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); 2948 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
2935 break; 2949 break;
2936 2950
2937 case FW_ISO_CONTEXT_RECEIVE: 2951 case FW_ISO_CONTEXT_RECEIVE:
2938 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2952 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2939 index = ctx - ohci->ir_context_list; 2953 index = ctx - ohci->ir_context_list;
2940 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); 2954 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
2941 break; 2955 break;
2942 } 2956 }
2943 flush_writes(ohci); 2957 flush_writes(ohci);
2944 context_stop(&ctx->context); 2958 context_stop(&ctx->context);
2945 tasklet_kill(&ctx->context.tasklet); 2959 tasklet_kill(&ctx->context.tasklet);
2946 2960
2947 return 0; 2961 return 0;
2948 } 2962 }
2949 2963
2950 static void ohci_free_iso_context(struct fw_iso_context *base) 2964 static void ohci_free_iso_context(struct fw_iso_context *base)
2951 { 2965 {
2952 struct fw_ohci *ohci = fw_ohci(base->card); 2966 struct fw_ohci *ohci = fw_ohci(base->card);
2953 struct iso_context *ctx = container_of(base, struct iso_context, base); 2967 struct iso_context *ctx = container_of(base, struct iso_context, base);
2954 unsigned long flags; 2968 unsigned long flags;
2955 int index; 2969 int index;
2956 2970
2957 ohci_stop_iso(base); 2971 ohci_stop_iso(base);
2958 context_release(&ctx->context); 2972 context_release(&ctx->context);
2959 free_page((unsigned long)ctx->header); 2973 free_page((unsigned long)ctx->header);
2960 2974
2961 spin_lock_irqsave(&ohci->lock, flags); 2975 spin_lock_irqsave(&ohci->lock, flags);
2962 2976
2963 switch (base->type) { 2977 switch (base->type) {
2964 case FW_ISO_CONTEXT_TRANSMIT: 2978 case FW_ISO_CONTEXT_TRANSMIT:
2965 index = ctx - ohci->it_context_list; 2979 index = ctx - ohci->it_context_list;
2966 ohci->it_context_mask |= 1 << index; 2980 ohci->it_context_mask |= 1 << index;
2967 break; 2981 break;
2968 2982
2969 case FW_ISO_CONTEXT_RECEIVE: 2983 case FW_ISO_CONTEXT_RECEIVE:
2970 index = ctx - ohci->ir_context_list; 2984 index = ctx - ohci->ir_context_list;
2971 ohci->ir_context_mask |= 1 << index; 2985 ohci->ir_context_mask |= 1 << index;
2972 ohci->ir_context_channels |= 1ULL << base->channel; 2986 ohci->ir_context_channels |= 1ULL << base->channel;
2973 break; 2987 break;
2974 2988
2975 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2989 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2976 index = ctx - ohci->ir_context_list; 2990 index = ctx - ohci->ir_context_list;
2977 ohci->ir_context_mask |= 1 << index; 2991 ohci->ir_context_mask |= 1 << index;
2978 ohci->ir_context_channels |= ohci->mc_channels; 2992 ohci->ir_context_channels |= ohci->mc_channels;
2979 ohci->mc_channels = 0; 2993 ohci->mc_channels = 0;
2980 ohci->mc_allocated = false; 2994 ohci->mc_allocated = false;
2981 break; 2995 break;
2982 } 2996 }
2983 2997
2984 spin_unlock_irqrestore(&ohci->lock, flags); 2998 spin_unlock_irqrestore(&ohci->lock, flags);
2985 } 2999 }
2986 3000
2987 static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) 3001 static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
2988 { 3002 {
2989 struct fw_ohci *ohci = fw_ohci(base->card); 3003 struct fw_ohci *ohci = fw_ohci(base->card);
2990 unsigned long flags; 3004 unsigned long flags;
2991 int ret; 3005 int ret;
2992 3006
2993 switch (base->type) { 3007 switch (base->type) {
2994 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3008 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2995 3009
2996 spin_lock_irqsave(&ohci->lock, flags); 3010 spin_lock_irqsave(&ohci->lock, flags);
2997 3011
2998 /* Don't allow multichannel to grab other contexts' channels. */ 3012 /* Don't allow multichannel to grab other contexts' channels. */
2999 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { 3013 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3000 *channels = ohci->ir_context_channels; 3014 *channels = ohci->ir_context_channels;
3001 ret = -EBUSY; 3015 ret = -EBUSY;
3002 } else { 3016 } else {
3003 set_multichannel_mask(ohci, *channels); 3017 set_multichannel_mask(ohci, *channels);
3004 ret = 0; 3018 ret = 0;
3005 } 3019 }
3006 3020
3007 spin_unlock_irqrestore(&ohci->lock, flags); 3021 spin_unlock_irqrestore(&ohci->lock, flags);
3008 3022
3009 break; 3023 break;
3010 default: 3024 default:
3011 ret = -EINVAL; 3025 ret = -EINVAL;
3012 } 3026 }
3013 3027
3014 return ret; 3028 return ret;
3015 } 3029 }
3016 3030
3017 #ifdef CONFIG_PM 3031 #ifdef CONFIG_PM
3018 static void ohci_resume_iso_dma(struct fw_ohci *ohci) 3032 static void ohci_resume_iso_dma(struct fw_ohci *ohci)
3019 { 3033 {
3020 int i; 3034 int i;
3021 struct iso_context *ctx; 3035 struct iso_context *ctx;
3022 3036
3023 for (i = 0 ; i < ohci->n_ir ; i++) { 3037 for (i = 0 ; i < ohci->n_ir ; i++) {
3024 ctx = &ohci->ir_context_list[i]; 3038 ctx = &ohci->ir_context_list[i];
3025 if (ctx->context.running) 3039 if (ctx->context.running)
3026 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); 3040 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3027 } 3041 }
3028 3042
3029 for (i = 0 ; i < ohci->n_it ; i++) { 3043 for (i = 0 ; i < ohci->n_it ; i++) {
3030 ctx = &ohci->it_context_list[i]; 3044 ctx = &ohci->it_context_list[i];
3031 if (ctx->context.running) 3045 if (ctx->context.running)
3032 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); 3046 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3033 } 3047 }
3034 } 3048 }
3035 #endif 3049 #endif
3036 3050
3037 static int queue_iso_transmit(struct iso_context *ctx, 3051 static int queue_iso_transmit(struct iso_context *ctx,
3038 struct fw_iso_packet *packet, 3052 struct fw_iso_packet *packet,
3039 struct fw_iso_buffer *buffer, 3053 struct fw_iso_buffer *buffer,
3040 unsigned long payload) 3054 unsigned long payload)
3041 { 3055 {
3042 struct descriptor *d, *last, *pd; 3056 struct descriptor *d, *last, *pd;
3043 struct fw_iso_packet *p; 3057 struct fw_iso_packet *p;
3044 __le32 *header; 3058 __le32 *header;
3045 dma_addr_t d_bus, page_bus; 3059 dma_addr_t d_bus, page_bus;
3046 u32 z, header_z, payload_z, irq; 3060 u32 z, header_z, payload_z, irq;
3047 u32 payload_index, payload_end_index, next_page_index; 3061 u32 payload_index, payload_end_index, next_page_index;
3048 int page, end_page, i, length, offset; 3062 int page, end_page, i, length, offset;
3049 3063
3050 p = packet; 3064 p = packet;
3051 payload_index = payload; 3065 payload_index = payload;
3052 3066
3053 if (p->skip) 3067 if (p->skip)
3054 z = 1; 3068 z = 1;
3055 else 3069 else
3056 z = 2; 3070 z = 2;
3057 if (p->header_length > 0) 3071 if (p->header_length > 0)
3058 z++; 3072 z++;
3059 3073
3060 /* Determine the first page the payload isn't contained in. */ 3074 /* Determine the first page the payload isn't contained in. */
3061 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; 3075 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
3062 if (p->payload_length > 0) 3076 if (p->payload_length > 0)
3063 payload_z = end_page - (payload_index >> PAGE_SHIFT); 3077 payload_z = end_page - (payload_index >> PAGE_SHIFT);
3064 else 3078 else
3065 payload_z = 0; 3079 payload_z = 0;
3066 3080
3067 z += payload_z; 3081 z += payload_z;
3068 3082
3069 /* Get header size in number of descriptors. */ 3083 /* Get header size in number of descriptors. */
3070 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); 3084 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
3071 3085
3072 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); 3086 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
3073 if (d == NULL) 3087 if (d == NULL)
3074 return -ENOMEM; 3088 return -ENOMEM;
3075 3089
3076 if (!p->skip) { 3090 if (!p->skip) {
3077 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 3091 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
3078 d[0].req_count = cpu_to_le16(8); 3092 d[0].req_count = cpu_to_le16(8);
3079 /* 3093 /*
3080 * Link the skip address to this descriptor itself. This causes 3094 * Link the skip address to this descriptor itself. This causes
3081 * a context to skip a cycle whenever lost cycles or FIFO 3095 * a context to skip a cycle whenever lost cycles or FIFO
3082 * overruns occur, without dropping the data. The application 3096 * overruns occur, without dropping the data. The application
3083 * should then decide whether this is an error condition or not. 3097 * should then decide whether this is an error condition or not.
3084 * FIXME: Make the context's cycle-lost behaviour configurable? 3098 * FIXME: Make the context's cycle-lost behaviour configurable?
3085 */ 3099 */
3086 d[0].branch_address = cpu_to_le32(d_bus | z); 3100 d[0].branch_address = cpu_to_le32(d_bus | z);
3087 3101
3088 header = (__le32 *) &d[1]; 3102 header = (__le32 *) &d[1];
3089 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 3103 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
3090 IT_HEADER_TAG(p->tag) | 3104 IT_HEADER_TAG(p->tag) |
3091 IT_HEADER_TCODE(TCODE_STREAM_DATA) | 3105 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
3092 IT_HEADER_CHANNEL(ctx->base.channel) | 3106 IT_HEADER_CHANNEL(ctx->base.channel) |
3093 IT_HEADER_SPEED(ctx->base.speed)); 3107 IT_HEADER_SPEED(ctx->base.speed));
3094 header[1] = 3108 header[1] =
3095 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + 3109 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
3096 p->payload_length)); 3110 p->payload_length));
3097 } 3111 }
3098 3112
3099 if (p->header_length > 0) { 3113 if (p->header_length > 0) {
3100 d[2].req_count = cpu_to_le16(p->header_length); 3114 d[2].req_count = cpu_to_le16(p->header_length);
3101 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d)); 3115 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
3102 memcpy(&d[z], p->header, p->header_length); 3116 memcpy(&d[z], p->header, p->header_length);
3103 } 3117 }
3104 3118
3105 pd = d + z - payload_z; 3119 pd = d + z - payload_z;
3106 payload_end_index = payload_index + p->payload_length; 3120 payload_end_index = payload_index + p->payload_length;
3107 for (i = 0; i < payload_z; i++) { 3121 for (i = 0; i < payload_z; i++) {
3108 page = payload_index >> PAGE_SHIFT; 3122 page = payload_index >> PAGE_SHIFT;
3109 offset = payload_index & ~PAGE_MASK; 3123 offset = payload_index & ~PAGE_MASK;
3110 next_page_index = (page + 1) << PAGE_SHIFT; 3124 next_page_index = (page + 1) << PAGE_SHIFT;
3111 length = 3125 length =
3112 min(next_page_index, payload_end_index) - payload_index; 3126 min(next_page_index, payload_end_index) - payload_index;
3113 pd[i].req_count = cpu_to_le16(length); 3127 pd[i].req_count = cpu_to_le16(length);
3114 3128
3115 page_bus = page_private(buffer->pages[page]); 3129 page_bus = page_private(buffer->pages[page]);
3116 pd[i].data_address = cpu_to_le32(page_bus + offset); 3130 pd[i].data_address = cpu_to_le32(page_bus + offset);
3117 3131
3118 payload_index += length; 3132 payload_index += length;
3119 } 3133 }
3120 3134
3121 if (p->interrupt) 3135 if (p->interrupt)
3122 irq = DESCRIPTOR_IRQ_ALWAYS; 3136 irq = DESCRIPTOR_IRQ_ALWAYS;
3123 else 3137 else
3124 irq = DESCRIPTOR_NO_IRQ; 3138 irq = DESCRIPTOR_NO_IRQ;
3125 3139
3126 last = z == 2 ? d : d + z - 1; 3140 last = z == 2 ? d : d + z - 1;
3127 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 3141 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
3128 DESCRIPTOR_STATUS | 3142 DESCRIPTOR_STATUS |
3129 DESCRIPTOR_BRANCH_ALWAYS | 3143 DESCRIPTOR_BRANCH_ALWAYS |
3130 irq); 3144 irq);
3131 3145
3132 context_append(&ctx->context, d, z, header_z); 3146 context_append(&ctx->context, d, z, header_z);
3133 3147
3134 return 0; 3148 return 0;
3135 } 3149 }
3136 3150
3137 static int queue_iso_packet_per_buffer(struct iso_context *ctx, 3151 static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3138 struct fw_iso_packet *packet, 3152 struct fw_iso_packet *packet,
3139 struct fw_iso_buffer *buffer, 3153 struct fw_iso_buffer *buffer,
3140 unsigned long payload) 3154 unsigned long payload)
3141 { 3155 {
3142 struct descriptor *d, *pd; 3156 struct descriptor *d, *pd;
3143 dma_addr_t d_bus, page_bus; 3157 dma_addr_t d_bus, page_bus;
3144 u32 z, header_z, rest; 3158 u32 z, header_z, rest;
3145 int i, j, length; 3159 int i, j, length;
3146 int page, offset, packet_count, header_size, payload_per_buffer; 3160 int page, offset, packet_count, header_size, payload_per_buffer;
3147 3161
3148 /* 3162 /*
3149 * The OHCI controller puts the isochronous header and trailer in the 3163 * The OHCI controller puts the isochronous header and trailer in the
3150 * buffer, so we need at least 8 bytes. 3164 * buffer, so we need at least 8 bytes.
3151 */ 3165 */
3152 packet_count = packet->header_length / ctx->base.header_size; 3166 packet_count = packet->header_length / ctx->base.header_size;
3153 header_size = max(ctx->base.header_size, (size_t)8); 3167 header_size = max(ctx->base.header_size, (size_t)8);
3154 3168
3155 /* Get header size in number of descriptors. */ 3169 /* Get header size in number of descriptors. */
3156 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 3170 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
3157 page = payload >> PAGE_SHIFT; 3171 page = payload >> PAGE_SHIFT;
3158 offset = payload & ~PAGE_MASK; 3172 offset = payload & ~PAGE_MASK;
3159 payload_per_buffer = packet->payload_length / packet_count; 3173 payload_per_buffer = packet->payload_length / packet_count;
3160 3174
3161 for (i = 0; i < packet_count; i++) { 3175 for (i = 0; i < packet_count; i++) {
3162 /* d points to the header descriptor */ 3176 /* d points to the header descriptor */
3163 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; 3177 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3164 d = context_get_descriptors(&ctx->context, 3178 d = context_get_descriptors(&ctx->context,
3165 z + header_z, &d_bus); 3179 z + header_z, &d_bus);
3166 if (d == NULL) 3180 if (d == NULL)
3167 return -ENOMEM; 3181 return -ENOMEM;
3168 3182
3169 d->control = cpu_to_le16(DESCRIPTOR_STATUS | 3183 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
3170 DESCRIPTOR_INPUT_MORE); 3184 DESCRIPTOR_INPUT_MORE);
3171 if (packet->skip && i == 0) 3185 if (packet->skip && i == 0)
3172 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 3186 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3173 d->req_count = cpu_to_le16(header_size); 3187 d->req_count = cpu_to_le16(header_size);
3174 d->res_count = d->req_count; 3188 d->res_count = d->req_count;
3175 d->transfer_status = 0; 3189 d->transfer_status = 0;
3176 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); 3190 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3177 3191
3178 rest = payload_per_buffer; 3192 rest = payload_per_buffer;
3179 pd = d; 3193 pd = d;
3180 for (j = 1; j < z; j++) { 3194 for (j = 1; j < z; j++) {
3181 pd++; 3195 pd++;
3182 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 3196 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3183 DESCRIPTOR_INPUT_MORE); 3197 DESCRIPTOR_INPUT_MORE);
3184 3198
3185 if (offset + rest < PAGE_SIZE) 3199 if (offset + rest < PAGE_SIZE)
3186 length = rest; 3200 length = rest;
3187 else 3201 else
3188 length = PAGE_SIZE - offset; 3202 length = PAGE_SIZE - offset;
3189 pd->req_count = cpu_to_le16(length); 3203 pd->req_count = cpu_to_le16(length);
3190 pd->res_count = pd->req_count; 3204 pd->res_count = pd->req_count;
3191 pd->transfer_status = 0; 3205 pd->transfer_status = 0;
3192 3206
3193 page_bus = page_private(buffer->pages[page]); 3207 page_bus = page_private(buffer->pages[page]);
3194 pd->data_address = cpu_to_le32(page_bus + offset); 3208 pd->data_address = cpu_to_le32(page_bus + offset);
3195 3209
3196 offset = (offset + length) & ~PAGE_MASK; 3210 offset = (offset + length) & ~PAGE_MASK;
3197 rest -= length; 3211 rest -= length;
3198 if (offset == 0) 3212 if (offset == 0)
3199 page++; 3213 page++;
3200 } 3214 }
3201 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 3215 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3202 DESCRIPTOR_INPUT_LAST | 3216 DESCRIPTOR_INPUT_LAST |
3203 DESCRIPTOR_BRANCH_ALWAYS); 3217 DESCRIPTOR_BRANCH_ALWAYS);
3204 if (packet->interrupt && i == packet_count - 1) 3218 if (packet->interrupt && i == packet_count - 1)
3205 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 3219 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3206 3220
3207 context_append(&ctx->context, d, z, header_z); 3221 context_append(&ctx->context, d, z, header_z);
3208 } 3222 }
3209 3223
3210 return 0; 3224 return 0;
3211 } 3225 }
3212 3226
3213 static int queue_iso_buffer_fill(struct iso_context *ctx, 3227 static int queue_iso_buffer_fill(struct iso_context *ctx,
3214 struct fw_iso_packet *packet, 3228 struct fw_iso_packet *packet,
3215 struct fw_iso_buffer *buffer, 3229 struct fw_iso_buffer *buffer,
3216 unsigned long payload) 3230 unsigned long payload)
3217 { 3231 {
3218 struct descriptor *d; 3232 struct descriptor *d;
3219 dma_addr_t d_bus, page_bus; 3233 dma_addr_t d_bus, page_bus;
3220 int page, offset, rest, z, i, length; 3234 int page, offset, rest, z, i, length;
3221 3235
3222 page = payload >> PAGE_SHIFT; 3236 page = payload >> PAGE_SHIFT;
3223 offset = payload & ~PAGE_MASK; 3237 offset = payload & ~PAGE_MASK;
3224 rest = packet->payload_length; 3238 rest = packet->payload_length;
3225 3239
3226 /* We need one descriptor for each page in the buffer. */ 3240 /* We need one descriptor for each page in the buffer. */
3227 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); 3241 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3228 3242
3229 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) 3243 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3230 return -EFAULT; 3244 return -EFAULT;
3231 3245
3232 for (i = 0; i < z; i++) { 3246 for (i = 0; i < z; i++) {
3233 d = context_get_descriptors(&ctx->context, 1, &d_bus); 3247 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3234 if (d == NULL) 3248 if (d == NULL)
3235 return -ENOMEM; 3249 return -ENOMEM;
3236 3250
3237 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 3251 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3238 DESCRIPTOR_BRANCH_ALWAYS); 3252 DESCRIPTOR_BRANCH_ALWAYS);
3239 if (packet->skip && i == 0) 3253 if (packet->skip && i == 0)
3240 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 3254 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3241 if (packet->interrupt && i == z - 1) 3255 if (packet->interrupt && i == z - 1)
3242 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 3256 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3243 3257
3244 if (offset + rest < PAGE_SIZE) 3258 if (offset + rest < PAGE_SIZE)
3245 length = rest; 3259 length = rest;
3246 else 3260 else
3247 length = PAGE_SIZE - offset; 3261 length = PAGE_SIZE - offset;
3248 d->req_count = cpu_to_le16(length); 3262 d->req_count = cpu_to_le16(length);
3249 d->res_count = d->req_count; 3263 d->res_count = d->req_count;
3250 d->transfer_status = 0; 3264 d->transfer_status = 0;
3251 3265
3252 page_bus = page_private(buffer->pages[page]); 3266 page_bus = page_private(buffer->pages[page]);
3253 d->data_address = cpu_to_le32(page_bus + offset); 3267 d->data_address = cpu_to_le32(page_bus + offset);
3254 3268
3255 rest -= length; 3269 rest -= length;
3256 offset = 0; 3270 offset = 0;
3257 page++; 3271 page++;
3258 3272
3259 context_append(&ctx->context, d, 1, 0); 3273 context_append(&ctx->context, d, 1, 0);
3260 } 3274 }
3261 3275
3262 return 0; 3276 return 0;
3263 } 3277 }
3264 3278
3265 static int ohci_queue_iso(struct fw_iso_context *base, 3279 static int ohci_queue_iso(struct fw_iso_context *base,
3266 struct fw_iso_packet *packet, 3280 struct fw_iso_packet *packet,
3267 struct fw_iso_buffer *buffer, 3281 struct fw_iso_buffer *buffer,
3268 unsigned long payload) 3282 unsigned long payload)
3269 { 3283 {
3270 struct iso_context *ctx = container_of(base, struct iso_context, base); 3284 struct iso_context *ctx = container_of(base, struct iso_context, base);
3271 unsigned long flags; 3285 unsigned long flags;
3272 int ret = -ENOSYS; 3286 int ret = -ENOSYS;
3273 3287
3274 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 3288 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
3275 switch (base->type) { 3289 switch (base->type) {
3276 case FW_ISO_CONTEXT_TRANSMIT: 3290 case FW_ISO_CONTEXT_TRANSMIT:
3277 ret = queue_iso_transmit(ctx, packet, buffer, payload); 3291 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3278 break; 3292 break;
3279 case FW_ISO_CONTEXT_RECEIVE: 3293 case FW_ISO_CONTEXT_RECEIVE:
3280 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); 3294 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3281 break; 3295 break;
3282 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3296 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3283 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); 3297 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3284 break; 3298 break;
3285 } 3299 }
3286 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 3300 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
3287 3301
3288 return ret; 3302 return ret;
3289 } 3303 }
3290 3304
3291 static void ohci_flush_queue_iso(struct fw_iso_context *base) 3305 static void ohci_flush_queue_iso(struct fw_iso_context *base)
3292 { 3306 {
3293 struct context *ctx = 3307 struct context *ctx =
3294 &container_of(base, struct iso_context, base)->context; 3308 &container_of(base, struct iso_context, base)->context;
3295 3309
3296 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 3310 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3297 } 3311 }
3298 3312
3299 static const struct fw_card_driver ohci_driver = { 3313 static const struct fw_card_driver ohci_driver = {
3300 .enable = ohci_enable, 3314 .enable = ohci_enable,
3301 .read_phy_reg = ohci_read_phy_reg, 3315 .read_phy_reg = ohci_read_phy_reg,
3302 .update_phy_reg = ohci_update_phy_reg, 3316 .update_phy_reg = ohci_update_phy_reg,
3303 .set_config_rom = ohci_set_config_rom, 3317 .set_config_rom = ohci_set_config_rom,
3304 .send_request = ohci_send_request, 3318 .send_request = ohci_send_request,
3305 .send_response = ohci_send_response, 3319 .send_response = ohci_send_response,
3306 .cancel_packet = ohci_cancel_packet, 3320 .cancel_packet = ohci_cancel_packet,
3307 .enable_phys_dma = ohci_enable_phys_dma, 3321 .enable_phys_dma = ohci_enable_phys_dma,
3308 .read_csr = ohci_read_csr, 3322 .read_csr = ohci_read_csr,
3309 .write_csr = ohci_write_csr, 3323 .write_csr = ohci_write_csr,
3310 3324
3311 .allocate_iso_context = ohci_allocate_iso_context, 3325 .allocate_iso_context = ohci_allocate_iso_context,
3312 .free_iso_context = ohci_free_iso_context, 3326 .free_iso_context = ohci_free_iso_context,
3313 .set_iso_channels = ohci_set_iso_channels, 3327 .set_iso_channels = ohci_set_iso_channels,
3314 .queue_iso = ohci_queue_iso, 3328 .queue_iso = ohci_queue_iso,
3315 .flush_queue_iso = ohci_flush_queue_iso, 3329 .flush_queue_iso = ohci_flush_queue_iso,
3316 .start_iso = ohci_start_iso, 3330 .start_iso = ohci_start_iso,
3317 .stop_iso = ohci_stop_iso, 3331 .stop_iso = ohci_stop_iso,
3318 }; 3332 };
3319 3333
3320 #ifdef CONFIG_PPC_PMAC 3334 #ifdef CONFIG_PPC_PMAC
3321 static void pmac_ohci_on(struct pci_dev *dev) 3335 static void pmac_ohci_on(struct pci_dev *dev)
3322 { 3336 {
3323 if (machine_is(powermac)) { 3337 if (machine_is(powermac)) {
3324 struct device_node *ofn = pci_device_to_OF_node(dev); 3338 struct device_node *ofn = pci_device_to_OF_node(dev);
3325 3339
3326 if (ofn) { 3340 if (ofn) {
3327 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); 3341 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3328 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); 3342 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3329 } 3343 }
3330 } 3344 }
3331 } 3345 }
3332 3346
3333 static void pmac_ohci_off(struct pci_dev *dev) 3347 static void pmac_ohci_off(struct pci_dev *dev)
3334 { 3348 {
3335 if (machine_is(powermac)) { 3349 if (machine_is(powermac)) {
3336 struct device_node *ofn = pci_device_to_OF_node(dev); 3350 struct device_node *ofn = pci_device_to_OF_node(dev);
3337 3351
3338 if (ofn) { 3352 if (ofn) {
3339 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); 3353 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3340 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); 3354 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3341 } 3355 }
3342 } 3356 }
3343 } 3357 }
3344 #else 3358 #else
3345 static inline void pmac_ohci_on(struct pci_dev *dev) {} 3359 static inline void pmac_ohci_on(struct pci_dev *dev) {}
3346 static inline void pmac_ohci_off(struct pci_dev *dev) {} 3360 static inline void pmac_ohci_off(struct pci_dev *dev) {}
3347 #endif /* CONFIG_PPC_PMAC */ 3361 #endif /* CONFIG_PPC_PMAC */
3348 3362
3349 static int __devinit pci_probe(struct pci_dev *dev, 3363 static int __devinit pci_probe(struct pci_dev *dev,
3350 const struct pci_device_id *ent) 3364 const struct pci_device_id *ent)
3351 { 3365 {
3352 struct fw_ohci *ohci; 3366 struct fw_ohci *ohci;
3353 u32 bus_options, max_receive, link_speed, version; 3367 u32 bus_options, max_receive, link_speed, version;
3354 u64 guid; 3368 u64 guid;
3355 int i, err; 3369 int i, err;
3356 size_t size; 3370 size_t size;
3357 3371
3358 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { 3372 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3359 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n"); 3373 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3360 return -ENOSYS; 3374 return -ENOSYS;
3361 } 3375 }
3362 3376
3363 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 3377 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
3364 if (ohci == NULL) { 3378 if (ohci == NULL) {
3365 err = -ENOMEM; 3379 err = -ENOMEM;
3366 goto fail; 3380 goto fail;
3367 } 3381 }
3368 3382
3369 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 3383 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3370 3384
3371 pmac_ohci_on(dev); 3385 pmac_ohci_on(dev);
3372 3386
3373 err = pci_enable_device(dev); 3387 err = pci_enable_device(dev);
3374 if (err) { 3388 if (err) {
3375 fw_error("Failed to enable OHCI hardware\n"); 3389 fw_error("Failed to enable OHCI hardware\n");
3376 goto fail_free; 3390 goto fail_free;
3377 } 3391 }
3378 3392
3379 pci_set_master(dev); 3393 pci_set_master(dev);
3380 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); 3394 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3381 pci_set_drvdata(dev, ohci); 3395 pci_set_drvdata(dev, ohci);
3382 3396
3383 spin_lock_init(&ohci->lock); 3397 spin_lock_init(&ohci->lock);
3384 mutex_init(&ohci->phy_reg_mutex); 3398 mutex_init(&ohci->phy_reg_mutex);
3385 3399
3386 INIT_WORK(&ohci->bus_reset_work, bus_reset_work); 3400 INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
3387 3401
3388 err = pci_request_region(dev, 0, ohci_driver_name); 3402 err = pci_request_region(dev, 0, ohci_driver_name);
3389 if (err) { 3403 if (err) {
3390 fw_error("MMIO resource unavailable\n"); 3404 fw_error("MMIO resource unavailable\n");
3391 goto fail_disable; 3405 goto fail_disable;
3392 } 3406 }
3393 3407
3394 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); 3408 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
3395 if (ohci->registers == NULL) { 3409 if (ohci->registers == NULL) {
3396 fw_error("Failed to remap registers\n"); 3410 fw_error("Failed to remap registers\n");
3397 err = -ENXIO; 3411 err = -ENXIO;
3398 goto fail_iomem; 3412 goto fail_iomem;
3399 } 3413 }
3400 3414
3401 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) 3415 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3402 if ((ohci_quirks[i].vendor == dev->vendor) && 3416 if ((ohci_quirks[i].vendor == dev->vendor) &&
3403 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || 3417 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3404 ohci_quirks[i].device == dev->device) && 3418 ohci_quirks[i].device == dev->device) &&
3405 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || 3419 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3406 ohci_quirks[i].revision >= dev->revision)) { 3420 ohci_quirks[i].revision >= dev->revision)) {
3407 ohci->quirks = ohci_quirks[i].flags; 3421 ohci->quirks = ohci_quirks[i].flags;
3408 break; 3422 break;
3409 } 3423 }
3410 if (param_quirks) 3424 if (param_quirks)
3411 ohci->quirks = param_quirks; 3425 ohci->quirks = param_quirks;
3412 3426
3413 /* 3427 /*
3414 * Because dma_alloc_coherent() allocates at least one page, 3428 * Because dma_alloc_coherent() allocates at least one page,
3415 * we save space by using a common buffer for the AR request/ 3429 * we save space by using a common buffer for the AR request/
3416 * response descriptors and the self IDs buffer. 3430 * response descriptors and the self IDs buffer.
3417 */ 3431 */
3418 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4); 3432 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3419 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2); 3433 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3420 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device, 3434 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
3421 PAGE_SIZE, 3435 PAGE_SIZE,
3422 &ohci->misc_buffer_bus, 3436 &ohci->misc_buffer_bus,
3423 GFP_KERNEL); 3437 GFP_KERNEL);
3424 if (!ohci->misc_buffer) { 3438 if (!ohci->misc_buffer) {
3425 err = -ENOMEM; 3439 err = -ENOMEM;
3426 goto fail_iounmap; 3440 goto fail_iounmap;
3427 } 3441 }
3428 3442
3429 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, 3443 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3430 OHCI1394_AsReqRcvContextControlSet); 3444 OHCI1394_AsReqRcvContextControlSet);
3431 if (err < 0) 3445 if (err < 0)
3432 goto fail_misc_buf; 3446 goto fail_misc_buf;
3433 3447
3434 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, 3448 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3435 OHCI1394_AsRspRcvContextControlSet); 3449 OHCI1394_AsRspRcvContextControlSet);
3436 if (err < 0) 3450 if (err < 0)
3437 goto fail_arreq_ctx; 3451 goto fail_arreq_ctx;
3438 3452
3439 err = context_init(&ohci->at_request_ctx, ohci, 3453 err = context_init(&ohci->at_request_ctx, ohci,
3440 OHCI1394_AsReqTrContextControlSet, handle_at_packet); 3454 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3441 if (err < 0) 3455 if (err < 0)
3442 goto fail_arrsp_ctx; 3456 goto fail_arrsp_ctx;
3443 3457
3444 err = context_init(&ohci->at_response_ctx, ohci, 3458 err = context_init(&ohci->at_response_ctx, ohci,
3445 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 3459 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3446 if (err < 0) 3460 if (err < 0)
3447 goto fail_atreq_ctx; 3461 goto fail_atreq_ctx;
3448 3462
3449 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 3463 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3450 ohci->ir_context_channels = ~0ULL; 3464 ohci->ir_context_channels = ~0ULL;
3451 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 3465 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3452 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 3466 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3453 ohci->ir_context_mask = ohci->ir_context_support; 3467 ohci->ir_context_mask = ohci->ir_context_support;
3454 ohci->n_ir = hweight32(ohci->ir_context_mask); 3468 ohci->n_ir = hweight32(ohci->ir_context_mask);
3455 size = sizeof(struct iso_context) * ohci->n_ir; 3469 size = sizeof(struct iso_context) * ohci->n_ir;
3456 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 3470 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
3457 3471
3458 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 3472 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3459 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 3473 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3460 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 3474 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3461 ohci->it_context_mask = ohci->it_context_support; 3475 ohci->it_context_mask = ohci->it_context_support;
3462 ohci->n_it = hweight32(ohci->it_context_mask); 3476 ohci->n_it = hweight32(ohci->it_context_mask);
3463 size = sizeof(struct iso_context) * ohci->n_it; 3477 size = sizeof(struct iso_context) * ohci->n_it;
3464 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 3478 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
3465 3479
3466 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 3480 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
3467 err = -ENOMEM; 3481 err = -ENOMEM;
3468 goto fail_contexts; 3482 goto fail_contexts;
3469 } 3483 }
3470 3484
3471 ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2; 3485 ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2;
3472 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; 3486 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3473 3487
3474 bus_options = reg_read(ohci, OHCI1394_BusOptions); 3488 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3475 max_receive = (bus_options >> 12) & 0xf; 3489 max_receive = (bus_options >> 12) & 0xf;
3476 link_speed = bus_options & 0x7; 3490 link_speed = bus_options & 0x7;
3477 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | 3491 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3478 reg_read(ohci, OHCI1394_GUIDLo); 3492 reg_read(ohci, OHCI1394_GUIDLo);
3479 3493
3480 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 3494 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
3481 if (err) 3495 if (err)
3482 goto fail_contexts; 3496 goto fail_contexts;
3483 3497
3484 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 3498 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3485 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " 3499 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
3486 "%d IR + %d IT contexts, quirks 0x%x\n", 3500 "%d IR + %d IT contexts, quirks 0x%x\n",
3487 dev_name(&dev->dev), version >> 16, version & 0xff, 3501 dev_name(&dev->dev), version >> 16, version & 0xff,
3488 ohci->n_ir, ohci->n_it, ohci->quirks); 3502 ohci->n_ir, ohci->n_it, ohci->quirks);
3489 3503
3490 return 0; 3504 return 0;
3491 3505
3492 fail_contexts: 3506 fail_contexts:
3493 kfree(ohci->ir_context_list); 3507 kfree(ohci->ir_context_list);
3494 kfree(ohci->it_context_list); 3508 kfree(ohci->it_context_list);
3495 context_release(&ohci->at_response_ctx); 3509 context_release(&ohci->at_response_ctx);
3496 fail_atreq_ctx: 3510 fail_atreq_ctx:
3497 context_release(&ohci->at_request_ctx); 3511 context_release(&ohci->at_request_ctx);
3498 fail_arrsp_ctx: 3512 fail_arrsp_ctx:
3499 ar_context_release(&ohci->ar_response_ctx); 3513 ar_context_release(&ohci->ar_response_ctx);
3500 fail_arreq_ctx: 3514 fail_arreq_ctx:
3501 ar_context_release(&ohci->ar_request_ctx); 3515 ar_context_release(&ohci->ar_request_ctx);
3502 fail_misc_buf: 3516 fail_misc_buf:
3503 dma_free_coherent(ohci->card.device, PAGE_SIZE, 3517 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3504 ohci->misc_buffer, ohci->misc_buffer_bus); 3518 ohci->misc_buffer, ohci->misc_buffer_bus);
3505 fail_iounmap: 3519 fail_iounmap:
3506 pci_iounmap(dev, ohci->registers); 3520 pci_iounmap(dev, ohci->registers);
3507 fail_iomem: 3521 fail_iomem:
3508 pci_release_region(dev, 0); 3522 pci_release_region(dev, 0);
3509 fail_disable: 3523 fail_disable:
3510 pci_disable_device(dev); 3524 pci_disable_device(dev);
3511 fail_free: 3525 fail_free:
3512 kfree(ohci); 3526 kfree(ohci);
3513 pmac_ohci_off(dev); 3527 pmac_ohci_off(dev);
3514 fail: 3528 fail:
3515 if (err == -ENOMEM) 3529 if (err == -ENOMEM)
3516 fw_error("Out of memory\n"); 3530 fw_error("Out of memory\n");
3517 3531
3518 return err; 3532 return err;
3519 } 3533 }
3520 3534
3521 static void pci_remove(struct pci_dev *dev) 3535 static void pci_remove(struct pci_dev *dev)
3522 { 3536 {
3523 struct fw_ohci *ohci; 3537 struct fw_ohci *ohci;
3524 3538
3525 ohci = pci_get_drvdata(dev); 3539 ohci = pci_get_drvdata(dev);
3526 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 3540 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3527 flush_writes(ohci); 3541 flush_writes(ohci);
3528 cancel_work_sync(&ohci->bus_reset_work); 3542 cancel_work_sync(&ohci->bus_reset_work);
3529 fw_core_remove_card(&ohci->card); 3543 fw_core_remove_card(&ohci->card);
3530 3544
3531 /* 3545 /*
3532 * FIXME: Fail all pending packets here, now that the upper 3546 * FIXME: Fail all pending packets here, now that the upper
3533 * layers can't queue any more. 3547 * layers can't queue any more.
3534 */ 3548 */
3535 3549
3536 software_reset(ohci); 3550 software_reset(ohci);
3537 free_irq(dev->irq, ohci); 3551 free_irq(dev->irq, ohci);
3538 3552
3539 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) 3553 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
3540 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 3554 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3541 ohci->next_config_rom, ohci->next_config_rom_bus); 3555 ohci->next_config_rom, ohci->next_config_rom_bus);
3542 if (ohci->config_rom) 3556 if (ohci->config_rom)
3543 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 3557 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3544 ohci->config_rom, ohci->config_rom_bus); 3558 ohci->config_rom, ohci->config_rom_bus);
3545 ar_context_release(&ohci->ar_request_ctx); 3559 ar_context_release(&ohci->ar_request_ctx);
3546 ar_context_release(&ohci->ar_response_ctx); 3560 ar_context_release(&ohci->ar_response_ctx);
3547 dma_free_coherent(ohci->card.device, PAGE_SIZE, 3561 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3548 ohci->misc_buffer, ohci->misc_buffer_bus); 3562 ohci->misc_buffer, ohci->misc_buffer_bus);
3549 context_release(&ohci->at_request_ctx); 3563 context_release(&ohci->at_request_ctx);
3550 context_release(&ohci->at_response_ctx); 3564 context_release(&ohci->at_response_ctx);
3551 kfree(ohci->it_context_list); 3565 kfree(ohci->it_context_list);
3552 kfree(ohci->ir_context_list); 3566 kfree(ohci->ir_context_list);
3553 pci_disable_msi(dev); 3567 pci_disable_msi(dev);
3554 pci_iounmap(dev, ohci->registers); 3568 pci_iounmap(dev, ohci->registers);
3555 pci_release_region(dev, 0); 3569 pci_release_region(dev, 0);
3556 pci_disable_device(dev); 3570 pci_disable_device(dev);
3557 kfree(ohci); 3571 kfree(ohci);
3558 pmac_ohci_off(dev); 3572 pmac_ohci_off(dev);
3559 3573
3560 fw_notify("Removed fw-ohci device.\n"); 3574 fw_notify("Removed fw-ohci device.\n");
3561 } 3575 }
3562 3576
3563 #ifdef CONFIG_PM 3577 #ifdef CONFIG_PM
3564 static int pci_suspend(struct pci_dev *dev, pm_message_t state) 3578 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3565 { 3579 {
3566 struct fw_ohci *ohci = pci_get_drvdata(dev); 3580 struct fw_ohci *ohci = pci_get_drvdata(dev);
3567 int err; 3581 int err;
3568 3582
3569 software_reset(ohci); 3583 software_reset(ohci);
3570 free_irq(dev->irq, ohci); 3584 free_irq(dev->irq, ohci);
3571 pci_disable_msi(dev); 3585 pci_disable_msi(dev);
3572 err = pci_save_state(dev); 3586 err = pci_save_state(dev);
3573 if (err) { 3587 if (err) {
3574 fw_error("pci_save_state failed\n"); 3588 fw_error("pci_save_state failed\n");
3575 return err; 3589 return err;
3576 } 3590 }
3577 err = pci_set_power_state(dev, pci_choose_state(dev, state)); 3591 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3578 if (err) 3592 if (err)
3579 fw_error("pci_set_power_state failed with %d\n", err); 3593 fw_error("pci_set_power_state failed with %d\n", err);
3580 pmac_ohci_off(dev); 3594 pmac_ohci_off(dev);
3581 3595
3582 return 0; 3596 return 0;
3583 } 3597 }
3584 3598
3585 static int pci_resume(struct pci_dev *dev) 3599 static int pci_resume(struct pci_dev *dev)
3586 { 3600 {
3587 struct fw_ohci *ohci = pci_get_drvdata(dev); 3601 struct fw_ohci *ohci = pci_get_drvdata(dev);
3588 int err; 3602 int err;
3589 3603
3590 pmac_ohci_on(dev); 3604 pmac_ohci_on(dev);
3591 pci_set_power_state(dev, PCI_D0); 3605 pci_set_power_state(dev, PCI_D0);
3592 pci_restore_state(dev); 3606 pci_restore_state(dev);
3593 err = pci_enable_device(dev); 3607 err = pci_enable_device(dev);
3594 if (err) { 3608 if (err) {
3595 fw_error("pci_enable_device failed\n"); 3609 fw_error("pci_enable_device failed\n");
3596 return err; 3610 return err;
3597 } 3611 }
3598 3612
3599 /* Some systems don't setup GUID register on resume from ram */ 3613 /* Some systems don't setup GUID register on resume from ram */
3600 if (!reg_read(ohci, OHCI1394_GUIDLo) && 3614 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3601 !reg_read(ohci, OHCI1394_GUIDHi)) { 3615 !reg_read(ohci, OHCI1394_GUIDHi)) {
3602 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); 3616 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3603 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); 3617 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3604 } 3618 }
3605 3619
3606 err = ohci_enable(&ohci->card, NULL, 0); 3620 err = ohci_enable(&ohci->card, NULL, 0);
3607 if (err) 3621 if (err)
3608 return err; 3622 return err;
3609 3623
3610 ohci_resume_iso_dma(ohci); 3624 ohci_resume_iso_dma(ohci);
3611 3625
3612 return 0; 3626 return 0;
3613 } 3627 }
3614 #endif 3628 #endif
3615 3629
3616 static const struct pci_device_id pci_table[] = { 3630 static const struct pci_device_id pci_table[] = {
3617 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, 3631 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3618 { } 3632 { }
3619 }; 3633 };
3620 3634
3621 MODULE_DEVICE_TABLE(pci, pci_table); 3635 MODULE_DEVICE_TABLE(pci, pci_table);
3622 3636
3623 static struct pci_driver fw_ohci_pci_driver = { 3637 static struct pci_driver fw_ohci_pci_driver = {
3624 .name = ohci_driver_name, 3638 .name = ohci_driver_name,
3625 .id_table = pci_table, 3639 .id_table = pci_table,
3626 .probe = pci_probe, 3640 .probe = pci_probe,
3627 .remove = pci_remove, 3641 .remove = pci_remove,
3628 #ifdef CONFIG_PM 3642 #ifdef CONFIG_PM
3629 .resume = pci_resume, 3643 .resume = pci_resume,
3630 .suspend = pci_suspend, 3644 .suspend = pci_suspend,
3631 #endif 3645 #endif
3632 }; 3646 };
3633 3647
3634 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 3648 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3635 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); 3649 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3636 MODULE_LICENSE("GPL"); 3650 MODULE_LICENSE("GPL");
3637 3651
3638 /* Provide a module alias so root-on-sbp2 initrds don't break. */ 3652 /* Provide a module alias so root-on-sbp2 initrds don't break. */
3639 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE 3653 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
3640 MODULE_ALIAS("ohci1394"); 3654 MODULE_ALIAS("ohci1394");
3641 #endif 3655 #endif
3642 3656
3643 static int __init fw_ohci_init(void) 3657 static int __init fw_ohci_init(void)
3644 { 3658 {
3645 return pci_register_driver(&fw_ohci_pci_driver); 3659 return pci_register_driver(&fw_ohci_pci_driver);
3646 } 3660 }
3647 3661
3648 static void __exit fw_ohci_cleanup(void) 3662 static void __exit fw_ohci_cleanup(void)
3649 { 3663 {
3650 pci_unregister_driver(&fw_ohci_pci_driver); 3664 pci_unregister_driver(&fw_ohci_pci_driver);
3651 } 3665 }
3652 3666
3653 module_init(fw_ohci_init); 3667 module_init(fw_ohci_init);
3654 module_exit(fw_ohci_cleanup); 3668 module_exit(fw_ohci_cleanup);
3655 3669