Commit 13882a82ee1646336c3996c93b4a560a55d2a419
Committed by
Stefan Richter
1 parent
f30e6d3e41
Exists in
master
and in
7 other branches
firewire: optimize iso queueing by setting wake only after the last packet
When queueing iso packets, the run time is dominated by the two MMIO accesses that set the DMA context's wake bit. Because most drivers submit packets in batches, we can save much time by removing all but the last wakeup. The internal kernel API is changed to require a call to fw_iso_context_queue_flush() after a batch of queued packets. The user space API does not change, so one call to FW_CDEV_IOC_QUEUE_ISO must specify multiple packets to take advantage of this optimization. In my measurements, this patch reduces the time needed to queue fifty skip packets from userspace to one sixth on a 2.5 GHz CPU, or to one third at 800 MHz. Signed-off-by: Clemens Ladisch <clemens@ladisch.de> Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Showing 9 changed files with 35 additions and 5 deletions Inline Diff
drivers/firewire/core-card.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> | 2 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or | 6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. | 7 | * (at your option) any later version. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software Foundation, | 15 | * along with this program; if not, write to the Free Software Foundation, |
16 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/bug.h> | 19 | #include <linux/bug.h> |
20 | #include <linux/completion.h> | 20 | #include <linux/completion.h> |
21 | #include <linux/crc-itu-t.h> | 21 | #include <linux/crc-itu-t.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <linux/firewire.h> | 24 | #include <linux/firewire.h> |
25 | #include <linux/firewire-constants.h> | 25 | #include <linux/firewire-constants.h> |
26 | #include <linux/jiffies.h> | 26 | #include <linux/jiffies.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/kref.h> | 28 | #include <linux/kref.h> |
29 | #include <linux/list.h> | 29 | #include <linux/list.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
32 | #include <linux/spinlock.h> | 32 | #include <linux/spinlock.h> |
33 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
34 | 34 | ||
35 | #include <asm/atomic.h> | 35 | #include <asm/atomic.h> |
36 | #include <asm/byteorder.h> | 36 | #include <asm/byteorder.h> |
37 | 37 | ||
38 | #include "core.h" | 38 | #include "core.h" |
39 | 39 | ||
40 | int fw_compute_block_crc(__be32 *block) | 40 | int fw_compute_block_crc(__be32 *block) |
41 | { | 41 | { |
42 | int length; | 42 | int length; |
43 | u16 crc; | 43 | u16 crc; |
44 | 44 | ||
45 | length = (be32_to_cpu(block[0]) >> 16) & 0xff; | 45 | length = (be32_to_cpu(block[0]) >> 16) & 0xff; |
46 | crc = crc_itu_t(0, (u8 *)&block[1], length * 4); | 46 | crc = crc_itu_t(0, (u8 *)&block[1], length * 4); |
47 | *block |= cpu_to_be32(crc); | 47 | *block |= cpu_to_be32(crc); |
48 | 48 | ||
49 | return length; | 49 | return length; |
50 | } | 50 | } |
51 | 51 | ||
52 | static DEFINE_MUTEX(card_mutex); | 52 | static DEFINE_MUTEX(card_mutex); |
53 | static LIST_HEAD(card_list); | 53 | static LIST_HEAD(card_list); |
54 | 54 | ||
55 | static LIST_HEAD(descriptor_list); | 55 | static LIST_HEAD(descriptor_list); |
56 | static int descriptor_count; | 56 | static int descriptor_count; |
57 | 57 | ||
58 | static __be32 tmp_config_rom[256]; | 58 | static __be32 tmp_config_rom[256]; |
59 | /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */ | 59 | /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */ |
60 | static size_t config_rom_length = 1 + 4 + 1 + 1; | 60 | static size_t config_rom_length = 1 + 4 + 1 + 1; |
61 | 61 | ||
62 | #define BIB_CRC(v) ((v) << 0) | 62 | #define BIB_CRC(v) ((v) << 0) |
63 | #define BIB_CRC_LENGTH(v) ((v) << 16) | 63 | #define BIB_CRC_LENGTH(v) ((v) << 16) |
64 | #define BIB_INFO_LENGTH(v) ((v) << 24) | 64 | #define BIB_INFO_LENGTH(v) ((v) << 24) |
65 | #define BIB_BUS_NAME 0x31333934 /* "1394" */ | 65 | #define BIB_BUS_NAME 0x31333934 /* "1394" */ |
66 | #define BIB_LINK_SPEED(v) ((v) << 0) | 66 | #define BIB_LINK_SPEED(v) ((v) << 0) |
67 | #define BIB_GENERATION(v) ((v) << 4) | 67 | #define BIB_GENERATION(v) ((v) << 4) |
68 | #define BIB_MAX_ROM(v) ((v) << 8) | 68 | #define BIB_MAX_ROM(v) ((v) << 8) |
69 | #define BIB_MAX_RECEIVE(v) ((v) << 12) | 69 | #define BIB_MAX_RECEIVE(v) ((v) << 12) |
70 | #define BIB_CYC_CLK_ACC(v) ((v) << 16) | 70 | #define BIB_CYC_CLK_ACC(v) ((v) << 16) |
71 | #define BIB_PMC ((1) << 27) | 71 | #define BIB_PMC ((1) << 27) |
72 | #define BIB_BMC ((1) << 28) | 72 | #define BIB_BMC ((1) << 28) |
73 | #define BIB_ISC ((1) << 29) | 73 | #define BIB_ISC ((1) << 29) |
74 | #define BIB_CMC ((1) << 30) | 74 | #define BIB_CMC ((1) << 30) |
75 | #define BIB_IRMC ((1) << 31) | 75 | #define BIB_IRMC ((1) << 31) |
76 | #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ | 76 | #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms), | 79 | * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms), |
80 | * but we have to make it longer because there are many devices whose firmware | 80 | * but we have to make it longer because there are many devices whose firmware |
81 | * is just too slow for that. | 81 | * is just too slow for that. |
82 | */ | 82 | */ |
83 | #define DEFAULT_SPLIT_TIMEOUT (2 * 8000) | 83 | #define DEFAULT_SPLIT_TIMEOUT (2 * 8000) |
84 | 84 | ||
85 | #define CANON_OUI 0x000085 | 85 | #define CANON_OUI 0x000085 |
86 | 86 | ||
87 | static void generate_config_rom(struct fw_card *card, __be32 *config_rom) | 87 | static void generate_config_rom(struct fw_card *card, __be32 *config_rom) |
88 | { | 88 | { |
89 | struct fw_descriptor *desc; | 89 | struct fw_descriptor *desc; |
90 | int i, j, k, length; | 90 | int i, j, k, length; |
91 | 91 | ||
92 | /* | 92 | /* |
93 | * Initialize contents of config rom buffer. On the OHCI | 93 | * Initialize contents of config rom buffer. On the OHCI |
94 | * controller, block reads to the config rom accesses the host | 94 | * controller, block reads to the config rom accesses the host |
95 | * memory, but quadlet read access the hardware bus info block | 95 | * memory, but quadlet read access the hardware bus info block |
96 | * registers. That's just crack, but it means we should make | 96 | * registers. That's just crack, but it means we should make |
97 | * sure the contents of bus info block in host memory matches | 97 | * sure the contents of bus info block in host memory matches |
98 | * the version stored in the OHCI registers. | 98 | * the version stored in the OHCI registers. |
99 | */ | 99 | */ |
100 | 100 | ||
101 | config_rom[0] = cpu_to_be32( | 101 | config_rom[0] = cpu_to_be32( |
102 | BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); | 102 | BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); |
103 | config_rom[1] = cpu_to_be32(BIB_BUS_NAME); | 103 | config_rom[1] = cpu_to_be32(BIB_BUS_NAME); |
104 | config_rom[2] = cpu_to_be32( | 104 | config_rom[2] = cpu_to_be32( |
105 | BIB_LINK_SPEED(card->link_speed) | | 105 | BIB_LINK_SPEED(card->link_speed) | |
106 | BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | | 106 | BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | |
107 | BIB_MAX_ROM(2) | | 107 | BIB_MAX_ROM(2) | |
108 | BIB_MAX_RECEIVE(card->max_receive) | | 108 | BIB_MAX_RECEIVE(card->max_receive) | |
109 | BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC); | 109 | BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC); |
110 | config_rom[3] = cpu_to_be32(card->guid >> 32); | 110 | config_rom[3] = cpu_to_be32(card->guid >> 32); |
111 | config_rom[4] = cpu_to_be32(card->guid); | 111 | config_rom[4] = cpu_to_be32(card->guid); |
112 | 112 | ||
113 | /* Generate root directory. */ | 113 | /* Generate root directory. */ |
114 | config_rom[6] = cpu_to_be32(NODE_CAPABILITIES); | 114 | config_rom[6] = cpu_to_be32(NODE_CAPABILITIES); |
115 | i = 7; | 115 | i = 7; |
116 | j = 7 + descriptor_count; | 116 | j = 7 + descriptor_count; |
117 | 117 | ||
118 | /* Generate root directory entries for descriptors. */ | 118 | /* Generate root directory entries for descriptors. */ |
119 | list_for_each_entry (desc, &descriptor_list, link) { | 119 | list_for_each_entry (desc, &descriptor_list, link) { |
120 | if (desc->immediate > 0) | 120 | if (desc->immediate > 0) |
121 | config_rom[i++] = cpu_to_be32(desc->immediate); | 121 | config_rom[i++] = cpu_to_be32(desc->immediate); |
122 | config_rom[i] = cpu_to_be32(desc->key | (j - i)); | 122 | config_rom[i] = cpu_to_be32(desc->key | (j - i)); |
123 | i++; | 123 | i++; |
124 | j += desc->length; | 124 | j += desc->length; |
125 | } | 125 | } |
126 | 126 | ||
127 | /* Update root directory length. */ | 127 | /* Update root directory length. */ |
128 | config_rom[5] = cpu_to_be32((i - 5 - 1) << 16); | 128 | config_rom[5] = cpu_to_be32((i - 5 - 1) << 16); |
129 | 129 | ||
130 | /* End of root directory, now copy in descriptors. */ | 130 | /* End of root directory, now copy in descriptors. */ |
131 | list_for_each_entry (desc, &descriptor_list, link) { | 131 | list_for_each_entry (desc, &descriptor_list, link) { |
132 | for (k = 0; k < desc->length; k++) | 132 | for (k = 0; k < desc->length; k++) |
133 | config_rom[i + k] = cpu_to_be32(desc->data[k]); | 133 | config_rom[i + k] = cpu_to_be32(desc->data[k]); |
134 | i += desc->length; | 134 | i += desc->length; |
135 | } | 135 | } |
136 | 136 | ||
137 | /* Calculate CRCs for all blocks in the config rom. This | 137 | /* Calculate CRCs for all blocks in the config rom. This |
138 | * assumes that CRC length and info length are identical for | 138 | * assumes that CRC length and info length are identical for |
139 | * the bus info block, which is always the case for this | 139 | * the bus info block, which is always the case for this |
140 | * implementation. */ | 140 | * implementation. */ |
141 | for (i = 0; i < j; i += length + 1) | 141 | for (i = 0; i < j; i += length + 1) |
142 | length = fw_compute_block_crc(config_rom + i); | 142 | length = fw_compute_block_crc(config_rom + i); |
143 | 143 | ||
144 | WARN_ON(j != config_rom_length); | 144 | WARN_ON(j != config_rom_length); |
145 | } | 145 | } |
146 | 146 | ||
147 | static void update_config_roms(void) | 147 | static void update_config_roms(void) |
148 | { | 148 | { |
149 | struct fw_card *card; | 149 | struct fw_card *card; |
150 | 150 | ||
151 | list_for_each_entry (card, &card_list, link) { | 151 | list_for_each_entry (card, &card_list, link) { |
152 | generate_config_rom(card, tmp_config_rom); | 152 | generate_config_rom(card, tmp_config_rom); |
153 | card->driver->set_config_rom(card, tmp_config_rom, | 153 | card->driver->set_config_rom(card, tmp_config_rom, |
154 | config_rom_length); | 154 | config_rom_length); |
155 | } | 155 | } |
156 | } | 156 | } |
157 | 157 | ||
158 | static size_t required_space(struct fw_descriptor *desc) | 158 | static size_t required_space(struct fw_descriptor *desc) |
159 | { | 159 | { |
160 | /* descriptor + entry into root dir + optional immediate entry */ | 160 | /* descriptor + entry into root dir + optional immediate entry */ |
161 | return desc->length + 1 + (desc->immediate > 0 ? 1 : 0); | 161 | return desc->length + 1 + (desc->immediate > 0 ? 1 : 0); |
162 | } | 162 | } |
163 | 163 | ||
164 | int fw_core_add_descriptor(struct fw_descriptor *desc) | 164 | int fw_core_add_descriptor(struct fw_descriptor *desc) |
165 | { | 165 | { |
166 | size_t i; | 166 | size_t i; |
167 | int ret; | 167 | int ret; |
168 | 168 | ||
169 | /* | 169 | /* |
170 | * Check descriptor is valid; the length of all blocks in the | 170 | * Check descriptor is valid; the length of all blocks in the |
171 | * descriptor has to add up to exactly the length of the | 171 | * descriptor has to add up to exactly the length of the |
172 | * block. | 172 | * block. |
173 | */ | 173 | */ |
174 | i = 0; | 174 | i = 0; |
175 | while (i < desc->length) | 175 | while (i < desc->length) |
176 | i += (desc->data[i] >> 16) + 1; | 176 | i += (desc->data[i] >> 16) + 1; |
177 | 177 | ||
178 | if (i != desc->length) | 178 | if (i != desc->length) |
179 | return -EINVAL; | 179 | return -EINVAL; |
180 | 180 | ||
181 | mutex_lock(&card_mutex); | 181 | mutex_lock(&card_mutex); |
182 | 182 | ||
183 | if (config_rom_length + required_space(desc) > 256) { | 183 | if (config_rom_length + required_space(desc) > 256) { |
184 | ret = -EBUSY; | 184 | ret = -EBUSY; |
185 | } else { | 185 | } else { |
186 | list_add_tail(&desc->link, &descriptor_list); | 186 | list_add_tail(&desc->link, &descriptor_list); |
187 | config_rom_length += required_space(desc); | 187 | config_rom_length += required_space(desc); |
188 | descriptor_count++; | 188 | descriptor_count++; |
189 | if (desc->immediate > 0) | 189 | if (desc->immediate > 0) |
190 | descriptor_count++; | 190 | descriptor_count++; |
191 | update_config_roms(); | 191 | update_config_roms(); |
192 | ret = 0; | 192 | ret = 0; |
193 | } | 193 | } |
194 | 194 | ||
195 | mutex_unlock(&card_mutex); | 195 | mutex_unlock(&card_mutex); |
196 | 196 | ||
197 | return ret; | 197 | return ret; |
198 | } | 198 | } |
199 | EXPORT_SYMBOL(fw_core_add_descriptor); | 199 | EXPORT_SYMBOL(fw_core_add_descriptor); |
200 | 200 | ||
201 | void fw_core_remove_descriptor(struct fw_descriptor *desc) | 201 | void fw_core_remove_descriptor(struct fw_descriptor *desc) |
202 | { | 202 | { |
203 | mutex_lock(&card_mutex); | 203 | mutex_lock(&card_mutex); |
204 | 204 | ||
205 | list_del(&desc->link); | 205 | list_del(&desc->link); |
206 | config_rom_length -= required_space(desc); | 206 | config_rom_length -= required_space(desc); |
207 | descriptor_count--; | 207 | descriptor_count--; |
208 | if (desc->immediate > 0) | 208 | if (desc->immediate > 0) |
209 | descriptor_count--; | 209 | descriptor_count--; |
210 | update_config_roms(); | 210 | update_config_roms(); |
211 | 211 | ||
212 | mutex_unlock(&card_mutex); | 212 | mutex_unlock(&card_mutex); |
213 | } | 213 | } |
214 | EXPORT_SYMBOL(fw_core_remove_descriptor); | 214 | EXPORT_SYMBOL(fw_core_remove_descriptor); |
215 | 215 | ||
216 | static int reset_bus(struct fw_card *card, bool short_reset) | 216 | static int reset_bus(struct fw_card *card, bool short_reset) |
217 | { | 217 | { |
218 | int reg = short_reset ? 5 : 1; | 218 | int reg = short_reset ? 5 : 1; |
219 | int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; | 219 | int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; |
220 | 220 | ||
221 | return card->driver->update_phy_reg(card, reg, 0, bit); | 221 | return card->driver->update_phy_reg(card, reg, 0, bit); |
222 | } | 222 | } |
223 | 223 | ||
224 | void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) | 224 | void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) |
225 | { | 225 | { |
226 | /* We don't try hard to sort out requests of long vs. short resets. */ | 226 | /* We don't try hard to sort out requests of long vs. short resets. */ |
227 | card->br_short = short_reset; | 227 | card->br_short = short_reset; |
228 | 228 | ||
229 | /* Use an arbitrary short delay to combine multiple reset requests. */ | 229 | /* Use an arbitrary short delay to combine multiple reset requests. */ |
230 | fw_card_get(card); | 230 | fw_card_get(card); |
231 | if (!schedule_delayed_work(&card->br_work, | 231 | if (!schedule_delayed_work(&card->br_work, |
232 | delayed ? DIV_ROUND_UP(HZ, 100) : 0)) | 232 | delayed ? DIV_ROUND_UP(HZ, 100) : 0)) |
233 | fw_card_put(card); | 233 | fw_card_put(card); |
234 | } | 234 | } |
235 | EXPORT_SYMBOL(fw_schedule_bus_reset); | 235 | EXPORT_SYMBOL(fw_schedule_bus_reset); |
236 | 236 | ||
237 | static void br_work(struct work_struct *work) | 237 | static void br_work(struct work_struct *work) |
238 | { | 238 | { |
239 | struct fw_card *card = container_of(work, struct fw_card, br_work.work); | 239 | struct fw_card *card = container_of(work, struct fw_card, br_work.work); |
240 | 240 | ||
241 | /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ | 241 | /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ |
242 | if (card->reset_jiffies != 0 && | 242 | if (card->reset_jiffies != 0 && |
243 | time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { | 243 | time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { |
244 | if (!schedule_delayed_work(&card->br_work, 2 * HZ)) | 244 | if (!schedule_delayed_work(&card->br_work, 2 * HZ)) |
245 | fw_card_put(card); | 245 | fw_card_put(card); |
246 | return; | 246 | return; |
247 | } | 247 | } |
248 | 248 | ||
249 | fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation, | 249 | fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation, |
250 | FW_PHY_CONFIG_CURRENT_GAP_COUNT); | 250 | FW_PHY_CONFIG_CURRENT_GAP_COUNT); |
251 | reset_bus(card, card->br_short); | 251 | reset_bus(card, card->br_short); |
252 | fw_card_put(card); | 252 | fw_card_put(card); |
253 | } | 253 | } |
254 | 254 | ||
255 | static void allocate_broadcast_channel(struct fw_card *card, int generation) | 255 | static void allocate_broadcast_channel(struct fw_card *card, int generation) |
256 | { | 256 | { |
257 | int channel, bandwidth = 0; | 257 | int channel, bandwidth = 0; |
258 | 258 | ||
259 | if (!card->broadcast_channel_allocated) { | 259 | if (!card->broadcast_channel_allocated) { |
260 | fw_iso_resource_manage(card, generation, 1ULL << 31, | 260 | fw_iso_resource_manage(card, generation, 1ULL << 31, |
261 | &channel, &bandwidth, true); | 261 | &channel, &bandwidth, true); |
262 | if (channel != 31) { | 262 | if (channel != 31) { |
263 | fw_notify("failed to allocate broadcast channel\n"); | 263 | fw_notify("failed to allocate broadcast channel\n"); |
264 | return; | 264 | return; |
265 | } | 265 | } |
266 | card->broadcast_channel_allocated = true; | 266 | card->broadcast_channel_allocated = true; |
267 | } | 267 | } |
268 | 268 | ||
269 | device_for_each_child(card->device, (void *)(long)generation, | 269 | device_for_each_child(card->device, (void *)(long)generation, |
270 | fw_device_set_broadcast_channel); | 270 | fw_device_set_broadcast_channel); |
271 | } | 271 | } |
272 | 272 | ||
273 | static const char gap_count_table[] = { | 273 | static const char gap_count_table[] = { |
274 | 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 | 274 | 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 |
275 | }; | 275 | }; |
276 | 276 | ||
277 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) | 277 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) |
278 | { | 278 | { |
279 | fw_card_get(card); | 279 | fw_card_get(card); |
280 | if (!schedule_delayed_work(&card->bm_work, delay)) | 280 | if (!schedule_delayed_work(&card->bm_work, delay)) |
281 | fw_card_put(card); | 281 | fw_card_put(card); |
282 | } | 282 | } |
283 | 283 | ||
284 | static void bm_work(struct work_struct *work) | 284 | static void bm_work(struct work_struct *work) |
285 | { | 285 | { |
286 | struct fw_card *card = container_of(work, struct fw_card, bm_work.work); | 286 | struct fw_card *card = container_of(work, struct fw_card, bm_work.work); |
287 | struct fw_device *root_device, *irm_device; | 287 | struct fw_device *root_device, *irm_device; |
288 | struct fw_node *root_node; | 288 | struct fw_node *root_node; |
289 | int root_id, new_root_id, irm_id, bm_id, local_id; | 289 | int root_id, new_root_id, irm_id, bm_id, local_id; |
290 | int gap_count, generation, grace, rcode; | 290 | int gap_count, generation, grace, rcode; |
291 | bool do_reset = false; | 291 | bool do_reset = false; |
292 | bool root_device_is_running; | 292 | bool root_device_is_running; |
293 | bool root_device_is_cmc; | 293 | bool root_device_is_cmc; |
294 | bool irm_is_1394_1995_only; | 294 | bool irm_is_1394_1995_only; |
295 | bool keep_this_irm; | 295 | bool keep_this_irm; |
296 | __be32 transaction_data[2]; | 296 | __be32 transaction_data[2]; |
297 | 297 | ||
298 | spin_lock_irq(&card->lock); | 298 | spin_lock_irq(&card->lock); |
299 | 299 | ||
300 | if (card->local_node == NULL) { | 300 | if (card->local_node == NULL) { |
301 | spin_unlock_irq(&card->lock); | 301 | spin_unlock_irq(&card->lock); |
302 | goto out_put_card; | 302 | goto out_put_card; |
303 | } | 303 | } |
304 | 304 | ||
305 | generation = card->generation; | 305 | generation = card->generation; |
306 | 306 | ||
307 | root_node = card->root_node; | 307 | root_node = card->root_node; |
308 | fw_node_get(root_node); | 308 | fw_node_get(root_node); |
309 | root_device = root_node->data; | 309 | root_device = root_node->data; |
310 | root_device_is_running = root_device && | 310 | root_device_is_running = root_device && |
311 | atomic_read(&root_device->state) == FW_DEVICE_RUNNING; | 311 | atomic_read(&root_device->state) == FW_DEVICE_RUNNING; |
312 | root_device_is_cmc = root_device && root_device->cmc; | 312 | root_device_is_cmc = root_device && root_device->cmc; |
313 | 313 | ||
314 | irm_device = card->irm_node->data; | 314 | irm_device = card->irm_node->data; |
315 | irm_is_1394_1995_only = irm_device && irm_device->config_rom && | 315 | irm_is_1394_1995_only = irm_device && irm_device->config_rom && |
316 | (irm_device->config_rom[2] & 0x000000f0) == 0; | 316 | (irm_device->config_rom[2] & 0x000000f0) == 0; |
317 | 317 | ||
318 | /* Canon MV5i works unreliably if it is not root node. */ | 318 | /* Canon MV5i works unreliably if it is not root node. */ |
319 | keep_this_irm = irm_device && irm_device->config_rom && | 319 | keep_this_irm = irm_device && irm_device->config_rom && |
320 | irm_device->config_rom[3] >> 8 == CANON_OUI; | 320 | irm_device->config_rom[3] >> 8 == CANON_OUI; |
321 | 321 | ||
322 | root_id = root_node->node_id; | 322 | root_id = root_node->node_id; |
323 | irm_id = card->irm_node->node_id; | 323 | irm_id = card->irm_node->node_id; |
324 | local_id = card->local_node->node_id; | 324 | local_id = card->local_node->node_id; |
325 | 325 | ||
326 | grace = time_after64(get_jiffies_64(), | 326 | grace = time_after64(get_jiffies_64(), |
327 | card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); | 327 | card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); |
328 | 328 | ||
329 | if ((is_next_generation(generation, card->bm_generation) && | 329 | if ((is_next_generation(generation, card->bm_generation) && |
330 | !card->bm_abdicate) || | 330 | !card->bm_abdicate) || |
331 | (card->bm_generation != generation && grace)) { | 331 | (card->bm_generation != generation && grace)) { |
332 | /* | 332 | /* |
333 | * This first step is to figure out who is IRM and | 333 | * This first step is to figure out who is IRM and |
334 | * then try to become bus manager. If the IRM is not | 334 | * then try to become bus manager. If the IRM is not |
335 | * well defined (e.g. does not have an active link | 335 | * well defined (e.g. does not have an active link |
336 | * layer or does not responds to our lock request, we | 336 | * layer or does not responds to our lock request, we |
337 | * will have to do a little vigilante bus management. | 337 | * will have to do a little vigilante bus management. |
338 | * In that case, we do a goto into the gap count logic | 338 | * In that case, we do a goto into the gap count logic |
339 | * so that when we do the reset, we still optimize the | 339 | * so that when we do the reset, we still optimize the |
340 | * gap count. That could well save a reset in the | 340 | * gap count. That could well save a reset in the |
341 | * next generation. | 341 | * next generation. |
342 | */ | 342 | */ |
343 | 343 | ||
344 | if (!card->irm_node->link_on) { | 344 | if (!card->irm_node->link_on) { |
345 | new_root_id = local_id; | 345 | new_root_id = local_id; |
346 | fw_notify("%s, making local node (%02x) root.\n", | 346 | fw_notify("%s, making local node (%02x) root.\n", |
347 | "IRM has link off", new_root_id); | 347 | "IRM has link off", new_root_id); |
348 | goto pick_me; | 348 | goto pick_me; |
349 | } | 349 | } |
350 | 350 | ||
351 | if (irm_is_1394_1995_only && !keep_this_irm) { | 351 | if (irm_is_1394_1995_only && !keep_this_irm) { |
352 | new_root_id = local_id; | 352 | new_root_id = local_id; |
353 | fw_notify("%s, making local node (%02x) root.\n", | 353 | fw_notify("%s, making local node (%02x) root.\n", |
354 | "IRM is not 1394a compliant", new_root_id); | 354 | "IRM is not 1394a compliant", new_root_id); |
355 | goto pick_me; | 355 | goto pick_me; |
356 | } | 356 | } |
357 | 357 | ||
358 | transaction_data[0] = cpu_to_be32(0x3f); | 358 | transaction_data[0] = cpu_to_be32(0x3f); |
359 | transaction_data[1] = cpu_to_be32(local_id); | 359 | transaction_data[1] = cpu_to_be32(local_id); |
360 | 360 | ||
361 | spin_unlock_irq(&card->lock); | 361 | spin_unlock_irq(&card->lock); |
362 | 362 | ||
363 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 363 | rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
364 | irm_id, generation, SCODE_100, | 364 | irm_id, generation, SCODE_100, |
365 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, | 365 | CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, |
366 | transaction_data, 8); | 366 | transaction_data, 8); |
367 | 367 | ||
368 | if (rcode == RCODE_GENERATION) | 368 | if (rcode == RCODE_GENERATION) |
369 | /* Another bus reset, BM work has been rescheduled. */ | 369 | /* Another bus reset, BM work has been rescheduled. */ |
370 | goto out; | 370 | goto out; |
371 | 371 | ||
372 | bm_id = be32_to_cpu(transaction_data[0]); | 372 | bm_id = be32_to_cpu(transaction_data[0]); |
373 | 373 | ||
374 | spin_lock_irq(&card->lock); | 374 | spin_lock_irq(&card->lock); |
375 | if (rcode == RCODE_COMPLETE && generation == card->generation) | 375 | if (rcode == RCODE_COMPLETE && generation == card->generation) |
376 | card->bm_node_id = | 376 | card->bm_node_id = |
377 | bm_id == 0x3f ? local_id : 0xffc0 | bm_id; | 377 | bm_id == 0x3f ? local_id : 0xffc0 | bm_id; |
378 | spin_unlock_irq(&card->lock); | 378 | spin_unlock_irq(&card->lock); |
379 | 379 | ||
380 | if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { | 380 | if (rcode == RCODE_COMPLETE && bm_id != 0x3f) { |
381 | /* Somebody else is BM. Only act as IRM. */ | 381 | /* Somebody else is BM. Only act as IRM. */ |
382 | if (local_id == irm_id) | 382 | if (local_id == irm_id) |
383 | allocate_broadcast_channel(card, generation); | 383 | allocate_broadcast_channel(card, generation); |
384 | 384 | ||
385 | goto out; | 385 | goto out; |
386 | } | 386 | } |
387 | 387 | ||
388 | if (rcode == RCODE_SEND_ERROR) { | 388 | if (rcode == RCODE_SEND_ERROR) { |
389 | /* | 389 | /* |
390 | * We have been unable to send the lock request due to | 390 | * We have been unable to send the lock request due to |
391 | * some local problem. Let's try again later and hope | 391 | * some local problem. Let's try again later and hope |
392 | * that the problem has gone away by then. | 392 | * that the problem has gone away by then. |
393 | */ | 393 | */ |
394 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); | 394 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); |
395 | goto out; | 395 | goto out; |
396 | } | 396 | } |
397 | 397 | ||
398 | spin_lock_irq(&card->lock); | 398 | spin_lock_irq(&card->lock); |
399 | 399 | ||
400 | if (rcode != RCODE_COMPLETE && !keep_this_irm) { | 400 | if (rcode != RCODE_COMPLETE && !keep_this_irm) { |
401 | /* | 401 | /* |
402 | * The lock request failed, maybe the IRM | 402 | * The lock request failed, maybe the IRM |
403 | * isn't really IRM capable after all. Let's | 403 | * isn't really IRM capable after all. Let's |
404 | * do a bus reset and pick the local node as | 404 | * do a bus reset and pick the local node as |
405 | * root, and thus, IRM. | 405 | * root, and thus, IRM. |
406 | */ | 406 | */ |
407 | new_root_id = local_id; | 407 | new_root_id = local_id; |
408 | fw_notify("%s, making local node (%02x) root.\n", | 408 | fw_notify("%s, making local node (%02x) root.\n", |
409 | "BM lock failed", new_root_id); | 409 | "BM lock failed", new_root_id); |
410 | goto pick_me; | 410 | goto pick_me; |
411 | } | 411 | } |
412 | } else if (card->bm_generation != generation) { | 412 | } else if (card->bm_generation != generation) { |
413 | /* | 413 | /* |
414 | * We weren't BM in the last generation, and the last | 414 | * We weren't BM in the last generation, and the last |
415 | * bus reset is less than 125ms ago. Reschedule this job. | 415 | * bus reset is less than 125ms ago. Reschedule this job. |
416 | */ | 416 | */ |
417 | spin_unlock_irq(&card->lock); | 417 | spin_unlock_irq(&card->lock); |
418 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); | 418 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); |
419 | goto out; | 419 | goto out; |
420 | } | 420 | } |
421 | 421 | ||
422 | /* | 422 | /* |
423 | * We're bus manager for this generation, so next step is to | 423 | * We're bus manager for this generation, so next step is to |
424 | * make sure we have an active cycle master and do gap count | 424 | * make sure we have an active cycle master and do gap count |
425 | * optimization. | 425 | * optimization. |
426 | */ | 426 | */ |
427 | card->bm_generation = generation; | 427 | card->bm_generation = generation; |
428 | 428 | ||
429 | if (root_device == NULL) { | 429 | if (root_device == NULL) { |
430 | /* | 430 | /* |
431 | * Either link_on is false, or we failed to read the | 431 | * Either link_on is false, or we failed to read the |
432 | * config rom. In either case, pick another root. | 432 | * config rom. In either case, pick another root. |
433 | */ | 433 | */ |
434 | new_root_id = local_id; | 434 | new_root_id = local_id; |
435 | } else if (!root_device_is_running) { | 435 | } else if (!root_device_is_running) { |
436 | /* | 436 | /* |
437 | * If we haven't probed this device yet, bail out now | 437 | * If we haven't probed this device yet, bail out now |
438 | * and let's try again once that's done. | 438 | * and let's try again once that's done. |
439 | */ | 439 | */ |
440 | spin_unlock_irq(&card->lock); | 440 | spin_unlock_irq(&card->lock); |
441 | goto out; | 441 | goto out; |
442 | } else if (root_device_is_cmc) { | 442 | } else if (root_device_is_cmc) { |
443 | /* | 443 | /* |
444 | * We will send out a force root packet for this | 444 | * We will send out a force root packet for this |
445 | * node as part of the gap count optimization. | 445 | * node as part of the gap count optimization. |
446 | */ | 446 | */ |
447 | new_root_id = root_id; | 447 | new_root_id = root_id; |
448 | } else { | 448 | } else { |
449 | /* | 449 | /* |
450 | * Current root has an active link layer and we | 450 | * Current root has an active link layer and we |
451 | * successfully read the config rom, but it's not | 451 | * successfully read the config rom, but it's not |
452 | * cycle master capable. | 452 | * cycle master capable. |
453 | */ | 453 | */ |
454 | new_root_id = local_id; | 454 | new_root_id = local_id; |
455 | } | 455 | } |
456 | 456 | ||
457 | pick_me: | 457 | pick_me: |
458 | /* | 458 | /* |
459 | * Pick a gap count from 1394a table E-1. The table doesn't cover | 459 | * Pick a gap count from 1394a table E-1. The table doesn't cover |
460 | * the typically much larger 1394b beta repeater delays though. | 460 | * the typically much larger 1394b beta repeater delays though. |
461 | */ | 461 | */ |
462 | if (!card->beta_repeaters_present && | 462 | if (!card->beta_repeaters_present && |
463 | root_node->max_hops < ARRAY_SIZE(gap_count_table)) | 463 | root_node->max_hops < ARRAY_SIZE(gap_count_table)) |
464 | gap_count = gap_count_table[root_node->max_hops]; | 464 | gap_count = gap_count_table[root_node->max_hops]; |
465 | else | 465 | else |
466 | gap_count = 63; | 466 | gap_count = 63; |
467 | 467 | ||
468 | /* | 468 | /* |
469 | * Finally, figure out if we should do a reset or not. If we have | 469 | * Finally, figure out if we should do a reset or not. If we have |
470 | * done less than 5 resets with the same physical topology and we | 470 | * done less than 5 resets with the same physical topology and we |
471 | * have either a new root or a new gap count setting, let's do it. | 471 | * have either a new root or a new gap count setting, let's do it. |
472 | */ | 472 | */ |
473 | 473 | ||
474 | if (card->bm_retries++ < 5 && | 474 | if (card->bm_retries++ < 5 && |
475 | (card->gap_count != gap_count || new_root_id != root_id)) | 475 | (card->gap_count != gap_count || new_root_id != root_id)) |
476 | do_reset = true; | 476 | do_reset = true; |
477 | 477 | ||
478 | spin_unlock_irq(&card->lock); | 478 | spin_unlock_irq(&card->lock); |
479 | 479 | ||
480 | if (do_reset) { | 480 | if (do_reset) { |
481 | fw_notify("phy config: card %d, new root=%x, gap_count=%d\n", | 481 | fw_notify("phy config: card %d, new root=%x, gap_count=%d\n", |
482 | card->index, new_root_id, gap_count); | 482 | card->index, new_root_id, gap_count); |
483 | fw_send_phy_config(card, new_root_id, generation, gap_count); | 483 | fw_send_phy_config(card, new_root_id, generation, gap_count); |
484 | reset_bus(card, true); | 484 | reset_bus(card, true); |
485 | /* Will allocate broadcast channel after the reset. */ | 485 | /* Will allocate broadcast channel after the reset. */ |
486 | goto out; | 486 | goto out; |
487 | } | 487 | } |
488 | 488 | ||
489 | if (root_device_is_cmc) { | 489 | if (root_device_is_cmc) { |
490 | /* | 490 | /* |
491 | * Make sure that the cycle master sends cycle start packets. | 491 | * Make sure that the cycle master sends cycle start packets. |
492 | */ | 492 | */ |
493 | transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); | 493 | transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); |
494 | rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, | 494 | rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, |
495 | root_id, generation, SCODE_100, | 495 | root_id, generation, SCODE_100, |
496 | CSR_REGISTER_BASE + CSR_STATE_SET, | 496 | CSR_REGISTER_BASE + CSR_STATE_SET, |
497 | transaction_data, 4); | 497 | transaction_data, 4); |
498 | if (rcode == RCODE_GENERATION) | 498 | if (rcode == RCODE_GENERATION) |
499 | goto out; | 499 | goto out; |
500 | } | 500 | } |
501 | 501 | ||
502 | if (local_id == irm_id) | 502 | if (local_id == irm_id) |
503 | allocate_broadcast_channel(card, generation); | 503 | allocate_broadcast_channel(card, generation); |
504 | 504 | ||
505 | out: | 505 | out: |
506 | fw_node_put(root_node); | 506 | fw_node_put(root_node); |
507 | out_put_card: | 507 | out_put_card: |
508 | fw_card_put(card); | 508 | fw_card_put(card); |
509 | } | 509 | } |
510 | 510 | ||
511 | void fw_card_initialize(struct fw_card *card, | 511 | void fw_card_initialize(struct fw_card *card, |
512 | const struct fw_card_driver *driver, | 512 | const struct fw_card_driver *driver, |
513 | struct device *device) | 513 | struct device *device) |
514 | { | 514 | { |
515 | static atomic_t index = ATOMIC_INIT(-1); | 515 | static atomic_t index = ATOMIC_INIT(-1); |
516 | 516 | ||
517 | card->index = atomic_inc_return(&index); | 517 | card->index = atomic_inc_return(&index); |
518 | card->driver = driver; | 518 | card->driver = driver; |
519 | card->device = device; | 519 | card->device = device; |
520 | card->current_tlabel = 0; | 520 | card->current_tlabel = 0; |
521 | card->tlabel_mask = 0; | 521 | card->tlabel_mask = 0; |
522 | card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000; | 522 | card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000; |
523 | card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19; | 523 | card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19; |
524 | card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT; | 524 | card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT; |
525 | card->split_timeout_jiffies = | 525 | card->split_timeout_jiffies = |
526 | DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000); | 526 | DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000); |
527 | card->color = 0; | 527 | card->color = 0; |
528 | card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; | 528 | card->broadcast_channel = BROADCAST_CHANNEL_INITIAL; |
529 | 529 | ||
530 | kref_init(&card->kref); | 530 | kref_init(&card->kref); |
531 | init_completion(&card->done); | 531 | init_completion(&card->done); |
532 | INIT_LIST_HEAD(&card->transaction_list); | 532 | INIT_LIST_HEAD(&card->transaction_list); |
533 | INIT_LIST_HEAD(&card->phy_receiver_list); | 533 | INIT_LIST_HEAD(&card->phy_receiver_list); |
534 | spin_lock_init(&card->lock); | 534 | spin_lock_init(&card->lock); |
535 | 535 | ||
536 | card->local_node = NULL; | 536 | card->local_node = NULL; |
537 | 537 | ||
538 | INIT_DELAYED_WORK(&card->br_work, br_work); | 538 | INIT_DELAYED_WORK(&card->br_work, br_work); |
539 | INIT_DELAYED_WORK(&card->bm_work, bm_work); | 539 | INIT_DELAYED_WORK(&card->bm_work, bm_work); |
540 | } | 540 | } |
541 | EXPORT_SYMBOL(fw_card_initialize); | 541 | EXPORT_SYMBOL(fw_card_initialize); |
542 | 542 | ||
543 | int fw_card_add(struct fw_card *card, | 543 | int fw_card_add(struct fw_card *card, |
544 | u32 max_receive, u32 link_speed, u64 guid) | 544 | u32 max_receive, u32 link_speed, u64 guid) |
545 | { | 545 | { |
546 | int ret; | 546 | int ret; |
547 | 547 | ||
548 | card->max_receive = max_receive; | 548 | card->max_receive = max_receive; |
549 | card->link_speed = link_speed; | 549 | card->link_speed = link_speed; |
550 | card->guid = guid; | 550 | card->guid = guid; |
551 | 551 | ||
552 | mutex_lock(&card_mutex); | 552 | mutex_lock(&card_mutex); |
553 | 553 | ||
554 | generate_config_rom(card, tmp_config_rom); | 554 | generate_config_rom(card, tmp_config_rom); |
555 | ret = card->driver->enable(card, tmp_config_rom, config_rom_length); | 555 | ret = card->driver->enable(card, tmp_config_rom, config_rom_length); |
556 | if (ret == 0) | 556 | if (ret == 0) |
557 | list_add_tail(&card->link, &card_list); | 557 | list_add_tail(&card->link, &card_list); |
558 | 558 | ||
559 | mutex_unlock(&card_mutex); | 559 | mutex_unlock(&card_mutex); |
560 | 560 | ||
561 | return ret; | 561 | return ret; |
562 | } | 562 | } |
563 | EXPORT_SYMBOL(fw_card_add); | 563 | EXPORT_SYMBOL(fw_card_add); |
564 | 564 | ||
565 | /* | 565 | /* |
566 | * The next few functions implement a dummy driver that is used once a card | 566 | * The next few functions implement a dummy driver that is used once a card |
567 | * driver shuts down an fw_card. This allows the driver to cleanly unload, | 567 | * driver shuts down an fw_card. This allows the driver to cleanly unload, |
568 | * as all IO to the card will be handled (and failed) by the dummy driver | 568 | * as all IO to the card will be handled (and failed) by the dummy driver |
569 | * instead of calling into the module. Only functions for iso context | 569 | * instead of calling into the module. Only functions for iso context |
570 | * shutdown still need to be provided by the card driver. | 570 | * shutdown still need to be provided by the card driver. |
571 | * | 571 | * |
572 | * .read/write_csr() should never be called anymore after the dummy driver | 572 | * .read/write_csr() should never be called anymore after the dummy driver |
573 | * was bound since they are only used within request handler context. | 573 | * was bound since they are only used within request handler context. |
574 | * .set_config_rom() is never called since the card is taken out of card_list | 574 | * .set_config_rom() is never called since the card is taken out of card_list |
575 | * before switching to the dummy driver. | 575 | * before switching to the dummy driver. |
576 | */ | 576 | */ |
577 | 577 | ||
578 | static int dummy_read_phy_reg(struct fw_card *card, int address) | 578 | static int dummy_read_phy_reg(struct fw_card *card, int address) |
579 | { | 579 | { |
580 | return -ENODEV; | 580 | return -ENODEV; |
581 | } | 581 | } |
582 | 582 | ||
583 | static int dummy_update_phy_reg(struct fw_card *card, int address, | 583 | static int dummy_update_phy_reg(struct fw_card *card, int address, |
584 | int clear_bits, int set_bits) | 584 | int clear_bits, int set_bits) |
585 | { | 585 | { |
586 | return -ENODEV; | 586 | return -ENODEV; |
587 | } | 587 | } |
588 | 588 | ||
589 | static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) | 589 | static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) |
590 | { | 590 | { |
591 | packet->callback(packet, card, RCODE_CANCELLED); | 591 | packet->callback(packet, card, RCODE_CANCELLED); |
592 | } | 592 | } |
593 | 593 | ||
594 | static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) | 594 | static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) |
595 | { | 595 | { |
596 | packet->callback(packet, card, RCODE_CANCELLED); | 596 | packet->callback(packet, card, RCODE_CANCELLED); |
597 | } | 597 | } |
598 | 598 | ||
599 | static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) | 599 | static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) |
600 | { | 600 | { |
601 | return -ENOENT; | 601 | return -ENOENT; |
602 | } | 602 | } |
603 | 603 | ||
604 | static int dummy_enable_phys_dma(struct fw_card *card, | 604 | static int dummy_enable_phys_dma(struct fw_card *card, |
605 | int node_id, int generation) | 605 | int node_id, int generation) |
606 | { | 606 | { |
607 | return -ENODEV; | 607 | return -ENODEV; |
608 | } | 608 | } |
609 | 609 | ||
610 | static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, | 610 | static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card, |
611 | int type, int channel, size_t header_size) | 611 | int type, int channel, size_t header_size) |
612 | { | 612 | { |
613 | return ERR_PTR(-ENODEV); | 613 | return ERR_PTR(-ENODEV); |
614 | } | 614 | } |
615 | 615 | ||
616 | static int dummy_start_iso(struct fw_iso_context *ctx, | 616 | static int dummy_start_iso(struct fw_iso_context *ctx, |
617 | s32 cycle, u32 sync, u32 tags) | 617 | s32 cycle, u32 sync, u32 tags) |
618 | { | 618 | { |
619 | return -ENODEV; | 619 | return -ENODEV; |
620 | } | 620 | } |
621 | 621 | ||
622 | static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels) | 622 | static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels) |
623 | { | 623 | { |
624 | return -ENODEV; | 624 | return -ENODEV; |
625 | } | 625 | } |
626 | 626 | ||
627 | static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p, | 627 | static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p, |
628 | struct fw_iso_buffer *buffer, unsigned long payload) | 628 | struct fw_iso_buffer *buffer, unsigned long payload) |
629 | { | 629 | { |
630 | return -ENODEV; | 630 | return -ENODEV; |
631 | } | 631 | } |
632 | 632 | ||
633 | static void dummy_flush_queue_iso(struct fw_iso_context *ctx) | ||
634 | { | ||
635 | } | ||
636 | |||
633 | static const struct fw_card_driver dummy_driver_template = { | 637 | static const struct fw_card_driver dummy_driver_template = { |
634 | .read_phy_reg = dummy_read_phy_reg, | 638 | .read_phy_reg = dummy_read_phy_reg, |
635 | .update_phy_reg = dummy_update_phy_reg, | 639 | .update_phy_reg = dummy_update_phy_reg, |
636 | .send_request = dummy_send_request, | 640 | .send_request = dummy_send_request, |
637 | .send_response = dummy_send_response, | 641 | .send_response = dummy_send_response, |
638 | .cancel_packet = dummy_cancel_packet, | 642 | .cancel_packet = dummy_cancel_packet, |
639 | .enable_phys_dma = dummy_enable_phys_dma, | 643 | .enable_phys_dma = dummy_enable_phys_dma, |
640 | .allocate_iso_context = dummy_allocate_iso_context, | 644 | .allocate_iso_context = dummy_allocate_iso_context, |
641 | .start_iso = dummy_start_iso, | 645 | .start_iso = dummy_start_iso, |
642 | .set_iso_channels = dummy_set_iso_channels, | 646 | .set_iso_channels = dummy_set_iso_channels, |
643 | .queue_iso = dummy_queue_iso, | 647 | .queue_iso = dummy_queue_iso, |
648 | .flush_queue_iso = dummy_flush_queue_iso, | ||
644 | }; | 649 | }; |
645 | 650 | ||
646 | void fw_card_release(struct kref *kref) | 651 | void fw_card_release(struct kref *kref) |
647 | { | 652 | { |
648 | struct fw_card *card = container_of(kref, struct fw_card, kref); | 653 | struct fw_card *card = container_of(kref, struct fw_card, kref); |
649 | 654 | ||
650 | complete(&card->done); | 655 | complete(&card->done); |
651 | } | 656 | } |
652 | 657 | ||
653 | void fw_core_remove_card(struct fw_card *card) | 658 | void fw_core_remove_card(struct fw_card *card) |
654 | { | 659 | { |
655 | struct fw_card_driver dummy_driver = dummy_driver_template; | 660 | struct fw_card_driver dummy_driver = dummy_driver_template; |
656 | 661 | ||
657 | card->driver->update_phy_reg(card, 4, | 662 | card->driver->update_phy_reg(card, 4, |
658 | PHY_LINK_ACTIVE | PHY_CONTENDER, 0); | 663 | PHY_LINK_ACTIVE | PHY_CONTENDER, 0); |
659 | fw_schedule_bus_reset(card, false, true); | 664 | fw_schedule_bus_reset(card, false, true); |
660 | 665 | ||
661 | mutex_lock(&card_mutex); | 666 | mutex_lock(&card_mutex); |
662 | list_del_init(&card->link); | 667 | list_del_init(&card->link); |
663 | mutex_unlock(&card_mutex); | 668 | mutex_unlock(&card_mutex); |
664 | 669 | ||
665 | /* Switch off most of the card driver interface. */ | 670 | /* Switch off most of the card driver interface. */ |
666 | dummy_driver.free_iso_context = card->driver->free_iso_context; | 671 | dummy_driver.free_iso_context = card->driver->free_iso_context; |
667 | dummy_driver.stop_iso = card->driver->stop_iso; | 672 | dummy_driver.stop_iso = card->driver->stop_iso; |
668 | card->driver = &dummy_driver; | 673 | card->driver = &dummy_driver; |
669 | 674 | ||
670 | fw_destroy_nodes(card); | 675 | fw_destroy_nodes(card); |
671 | 676 | ||
672 | /* Wait for all users, especially device workqueue jobs, to finish. */ | 677 | /* Wait for all users, especially device workqueue jobs, to finish. */ |
673 | fw_card_put(card); | 678 | fw_card_put(card); |
674 | wait_for_completion(&card->done); | 679 | wait_for_completion(&card->done); |
675 | 680 | ||
676 | WARN_ON(!list_empty(&card->transaction_list)); | 681 | WARN_ON(!list_empty(&card->transaction_list)); |
677 | } | 682 | } |
678 | EXPORT_SYMBOL(fw_core_remove_card); | 683 | EXPORT_SYMBOL(fw_core_remove_card); |
679 | 684 |
drivers/firewire/core-cdev.c
1 | /* | 1 | /* |
2 | * Char device for device raw access | 2 | * Char device for device raw access |
3 | * | 3 | * |
4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> | 4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software Foundation, | 17 | * along with this program; if not, write to the Free Software Foundation, |
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/bug.h> | 21 | #include <linux/bug.h> |
22 | #include <linux/compat.h> | 22 | #include <linux/compat.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/firewire.h> | 26 | #include <linux/firewire.h> |
27 | #include <linux/firewire-cdev.h> | 27 | #include <linux/firewire-cdev.h> |
28 | #include <linux/idr.h> | 28 | #include <linux/idr.h> |
29 | #include <linux/irqflags.h> | 29 | #include <linux/irqflags.h> |
30 | #include <linux/jiffies.h> | 30 | #include <linux/jiffies.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/kref.h> | 32 | #include <linux/kref.h> |
33 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
36 | #include <linux/poll.h> | 36 | #include <linux/poll.h> |
37 | #include <linux/sched.h> /* required for linux/wait.h */ | 37 | #include <linux/sched.h> /* required for linux/wait.h */ |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
40 | #include <linux/string.h> | 40 | #include <linux/string.h> |
41 | #include <linux/time.h> | 41 | #include <linux/time.h> |
42 | #include <linux/uaccess.h> | 42 | #include <linux/uaccess.h> |
43 | #include <linux/vmalloc.h> | 43 | #include <linux/vmalloc.h> |
44 | #include <linux/wait.h> | 44 | #include <linux/wait.h> |
45 | #include <linux/workqueue.h> | 45 | #include <linux/workqueue.h> |
46 | 46 | ||
47 | #include <asm/system.h> | 47 | #include <asm/system.h> |
48 | 48 | ||
49 | #include "core.h" | 49 | #include "core.h" |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * ABI version history is documented in linux/firewire-cdev.h. | 52 | * ABI version history is documented in linux/firewire-cdev.h. |
53 | */ | 53 | */ |
54 | #define FW_CDEV_KERNEL_VERSION 4 | 54 | #define FW_CDEV_KERNEL_VERSION 4 |
55 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 | 55 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 |
56 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 | 56 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 |
57 | 57 | ||
58 | struct client { | 58 | struct client { |
59 | u32 version; | 59 | u32 version; |
60 | struct fw_device *device; | 60 | struct fw_device *device; |
61 | 61 | ||
62 | spinlock_t lock; | 62 | spinlock_t lock; |
63 | bool in_shutdown; | 63 | bool in_shutdown; |
64 | struct idr resource_idr; | 64 | struct idr resource_idr; |
65 | struct list_head event_list; | 65 | struct list_head event_list; |
66 | wait_queue_head_t wait; | 66 | wait_queue_head_t wait; |
67 | wait_queue_head_t tx_flush_wait; | 67 | wait_queue_head_t tx_flush_wait; |
68 | u64 bus_reset_closure; | 68 | u64 bus_reset_closure; |
69 | 69 | ||
70 | struct fw_iso_context *iso_context; | 70 | struct fw_iso_context *iso_context; |
71 | u64 iso_closure; | 71 | u64 iso_closure; |
72 | struct fw_iso_buffer buffer; | 72 | struct fw_iso_buffer buffer; |
73 | unsigned long vm_start; | 73 | unsigned long vm_start; |
74 | 74 | ||
75 | struct list_head phy_receiver_link; | 75 | struct list_head phy_receiver_link; |
76 | u64 phy_receiver_closure; | 76 | u64 phy_receiver_closure; |
77 | 77 | ||
78 | struct list_head link; | 78 | struct list_head link; |
79 | struct kref kref; | 79 | struct kref kref; |
80 | }; | 80 | }; |
81 | 81 | ||
82 | static inline void client_get(struct client *client) | 82 | static inline void client_get(struct client *client) |
83 | { | 83 | { |
84 | kref_get(&client->kref); | 84 | kref_get(&client->kref); |
85 | } | 85 | } |
86 | 86 | ||
87 | static void client_release(struct kref *kref) | 87 | static void client_release(struct kref *kref) |
88 | { | 88 | { |
89 | struct client *client = container_of(kref, struct client, kref); | 89 | struct client *client = container_of(kref, struct client, kref); |
90 | 90 | ||
91 | fw_device_put(client->device); | 91 | fw_device_put(client->device); |
92 | kfree(client); | 92 | kfree(client); |
93 | } | 93 | } |
94 | 94 | ||
95 | static void client_put(struct client *client) | 95 | static void client_put(struct client *client) |
96 | { | 96 | { |
97 | kref_put(&client->kref, client_release); | 97 | kref_put(&client->kref, client_release); |
98 | } | 98 | } |
99 | 99 | ||
100 | struct client_resource; | 100 | struct client_resource; |
101 | typedef void (*client_resource_release_fn_t)(struct client *, | 101 | typedef void (*client_resource_release_fn_t)(struct client *, |
102 | struct client_resource *); | 102 | struct client_resource *); |
103 | struct client_resource { | 103 | struct client_resource { |
104 | client_resource_release_fn_t release; | 104 | client_resource_release_fn_t release; |
105 | int handle; | 105 | int handle; |
106 | }; | 106 | }; |
107 | 107 | ||
108 | struct address_handler_resource { | 108 | struct address_handler_resource { |
109 | struct client_resource resource; | 109 | struct client_resource resource; |
110 | struct fw_address_handler handler; | 110 | struct fw_address_handler handler; |
111 | __u64 closure; | 111 | __u64 closure; |
112 | struct client *client; | 112 | struct client *client; |
113 | }; | 113 | }; |
114 | 114 | ||
115 | struct outbound_transaction_resource { | 115 | struct outbound_transaction_resource { |
116 | struct client_resource resource; | 116 | struct client_resource resource; |
117 | struct fw_transaction transaction; | 117 | struct fw_transaction transaction; |
118 | }; | 118 | }; |
119 | 119 | ||
120 | struct inbound_transaction_resource { | 120 | struct inbound_transaction_resource { |
121 | struct client_resource resource; | 121 | struct client_resource resource; |
122 | struct fw_card *card; | 122 | struct fw_card *card; |
123 | struct fw_request *request; | 123 | struct fw_request *request; |
124 | void *data; | 124 | void *data; |
125 | size_t length; | 125 | size_t length; |
126 | }; | 126 | }; |
127 | 127 | ||
128 | struct descriptor_resource { | 128 | struct descriptor_resource { |
129 | struct client_resource resource; | 129 | struct client_resource resource; |
130 | struct fw_descriptor descriptor; | 130 | struct fw_descriptor descriptor; |
131 | u32 data[0]; | 131 | u32 data[0]; |
132 | }; | 132 | }; |
133 | 133 | ||
134 | struct iso_resource { | 134 | struct iso_resource { |
135 | struct client_resource resource; | 135 | struct client_resource resource; |
136 | struct client *client; | 136 | struct client *client; |
137 | /* Schedule work and access todo only with client->lock held. */ | 137 | /* Schedule work and access todo only with client->lock held. */ |
138 | struct delayed_work work; | 138 | struct delayed_work work; |
139 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, | 139 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, |
140 | ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; | 140 | ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; |
141 | int generation; | 141 | int generation; |
142 | u64 channels; | 142 | u64 channels; |
143 | s32 bandwidth; | 143 | s32 bandwidth; |
144 | struct iso_resource_event *e_alloc, *e_dealloc; | 144 | struct iso_resource_event *e_alloc, *e_dealloc; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | static void release_iso_resource(struct client *, struct client_resource *); | 147 | static void release_iso_resource(struct client *, struct client_resource *); |
148 | 148 | ||
149 | static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) | 149 | static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) |
150 | { | 150 | { |
151 | client_get(r->client); | 151 | client_get(r->client); |
152 | if (!schedule_delayed_work(&r->work, delay)) | 152 | if (!schedule_delayed_work(&r->work, delay)) |
153 | client_put(r->client); | 153 | client_put(r->client); |
154 | } | 154 | } |
155 | 155 | ||
156 | static void schedule_if_iso_resource(struct client_resource *resource) | 156 | static void schedule_if_iso_resource(struct client_resource *resource) |
157 | { | 157 | { |
158 | if (resource->release == release_iso_resource) | 158 | if (resource->release == release_iso_resource) |
159 | schedule_iso_resource(container_of(resource, | 159 | schedule_iso_resource(container_of(resource, |
160 | struct iso_resource, resource), 0); | 160 | struct iso_resource, resource), 0); |
161 | } | 161 | } |
162 | 162 | ||
163 | /* | 163 | /* |
164 | * dequeue_event() just kfree()'s the event, so the event has to be | 164 | * dequeue_event() just kfree()'s the event, so the event has to be |
165 | * the first field in a struct XYZ_event. | 165 | * the first field in a struct XYZ_event. |
166 | */ | 166 | */ |
167 | struct event { | 167 | struct event { |
168 | struct { void *data; size_t size; } v[2]; | 168 | struct { void *data; size_t size; } v[2]; |
169 | struct list_head link; | 169 | struct list_head link; |
170 | }; | 170 | }; |
171 | 171 | ||
172 | struct bus_reset_event { | 172 | struct bus_reset_event { |
173 | struct event event; | 173 | struct event event; |
174 | struct fw_cdev_event_bus_reset reset; | 174 | struct fw_cdev_event_bus_reset reset; |
175 | }; | 175 | }; |
176 | 176 | ||
177 | struct outbound_transaction_event { | 177 | struct outbound_transaction_event { |
178 | struct event event; | 178 | struct event event; |
179 | struct client *client; | 179 | struct client *client; |
180 | struct outbound_transaction_resource r; | 180 | struct outbound_transaction_resource r; |
181 | struct fw_cdev_event_response response; | 181 | struct fw_cdev_event_response response; |
182 | }; | 182 | }; |
183 | 183 | ||
184 | struct inbound_transaction_event { | 184 | struct inbound_transaction_event { |
185 | struct event event; | 185 | struct event event; |
186 | union { | 186 | union { |
187 | struct fw_cdev_event_request request; | 187 | struct fw_cdev_event_request request; |
188 | struct fw_cdev_event_request2 request2; | 188 | struct fw_cdev_event_request2 request2; |
189 | } req; | 189 | } req; |
190 | }; | 190 | }; |
191 | 191 | ||
192 | struct iso_interrupt_event { | 192 | struct iso_interrupt_event { |
193 | struct event event; | 193 | struct event event; |
194 | struct fw_cdev_event_iso_interrupt interrupt; | 194 | struct fw_cdev_event_iso_interrupt interrupt; |
195 | }; | 195 | }; |
196 | 196 | ||
197 | struct iso_interrupt_mc_event { | 197 | struct iso_interrupt_mc_event { |
198 | struct event event; | 198 | struct event event; |
199 | struct fw_cdev_event_iso_interrupt_mc interrupt; | 199 | struct fw_cdev_event_iso_interrupt_mc interrupt; |
200 | }; | 200 | }; |
201 | 201 | ||
202 | struct iso_resource_event { | 202 | struct iso_resource_event { |
203 | struct event event; | 203 | struct event event; |
204 | struct fw_cdev_event_iso_resource iso_resource; | 204 | struct fw_cdev_event_iso_resource iso_resource; |
205 | }; | 205 | }; |
206 | 206 | ||
207 | struct outbound_phy_packet_event { | 207 | struct outbound_phy_packet_event { |
208 | struct event event; | 208 | struct event event; |
209 | struct client *client; | 209 | struct client *client; |
210 | struct fw_packet p; | 210 | struct fw_packet p; |
211 | struct fw_cdev_event_phy_packet phy_packet; | 211 | struct fw_cdev_event_phy_packet phy_packet; |
212 | }; | 212 | }; |
213 | 213 | ||
214 | struct inbound_phy_packet_event { | 214 | struct inbound_phy_packet_event { |
215 | struct event event; | 215 | struct event event; |
216 | struct fw_cdev_event_phy_packet phy_packet; | 216 | struct fw_cdev_event_phy_packet phy_packet; |
217 | }; | 217 | }; |
218 | 218 | ||
219 | static inline void __user *u64_to_uptr(__u64 value) | 219 | static inline void __user *u64_to_uptr(__u64 value) |
220 | { | 220 | { |
221 | return (void __user *)(unsigned long)value; | 221 | return (void __user *)(unsigned long)value; |
222 | } | 222 | } |
223 | 223 | ||
224 | static inline __u64 uptr_to_u64(void __user *ptr) | 224 | static inline __u64 uptr_to_u64(void __user *ptr) |
225 | { | 225 | { |
226 | return (__u64)(unsigned long)ptr; | 226 | return (__u64)(unsigned long)ptr; |
227 | } | 227 | } |
228 | 228 | ||
229 | static int fw_device_op_open(struct inode *inode, struct file *file) | 229 | static int fw_device_op_open(struct inode *inode, struct file *file) |
230 | { | 230 | { |
231 | struct fw_device *device; | 231 | struct fw_device *device; |
232 | struct client *client; | 232 | struct client *client; |
233 | 233 | ||
234 | device = fw_device_get_by_devt(inode->i_rdev); | 234 | device = fw_device_get_by_devt(inode->i_rdev); |
235 | if (device == NULL) | 235 | if (device == NULL) |
236 | return -ENODEV; | 236 | return -ENODEV; |
237 | 237 | ||
238 | if (fw_device_is_shutdown(device)) { | 238 | if (fw_device_is_shutdown(device)) { |
239 | fw_device_put(device); | 239 | fw_device_put(device); |
240 | return -ENODEV; | 240 | return -ENODEV; |
241 | } | 241 | } |
242 | 242 | ||
243 | client = kzalloc(sizeof(*client), GFP_KERNEL); | 243 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
244 | if (client == NULL) { | 244 | if (client == NULL) { |
245 | fw_device_put(device); | 245 | fw_device_put(device); |
246 | return -ENOMEM; | 246 | return -ENOMEM; |
247 | } | 247 | } |
248 | 248 | ||
249 | client->device = device; | 249 | client->device = device; |
250 | spin_lock_init(&client->lock); | 250 | spin_lock_init(&client->lock); |
251 | idr_init(&client->resource_idr); | 251 | idr_init(&client->resource_idr); |
252 | INIT_LIST_HEAD(&client->event_list); | 252 | INIT_LIST_HEAD(&client->event_list); |
253 | init_waitqueue_head(&client->wait); | 253 | init_waitqueue_head(&client->wait); |
254 | init_waitqueue_head(&client->tx_flush_wait); | 254 | init_waitqueue_head(&client->tx_flush_wait); |
255 | INIT_LIST_HEAD(&client->phy_receiver_link); | 255 | INIT_LIST_HEAD(&client->phy_receiver_link); |
256 | kref_init(&client->kref); | 256 | kref_init(&client->kref); |
257 | 257 | ||
258 | file->private_data = client; | 258 | file->private_data = client; |
259 | 259 | ||
260 | mutex_lock(&device->client_list_mutex); | 260 | mutex_lock(&device->client_list_mutex); |
261 | list_add_tail(&client->link, &device->client_list); | 261 | list_add_tail(&client->link, &device->client_list); |
262 | mutex_unlock(&device->client_list_mutex); | 262 | mutex_unlock(&device->client_list_mutex); |
263 | 263 | ||
264 | return nonseekable_open(inode, file); | 264 | return nonseekable_open(inode, file); |
265 | } | 265 | } |
266 | 266 | ||
267 | static void queue_event(struct client *client, struct event *event, | 267 | static void queue_event(struct client *client, struct event *event, |
268 | void *data0, size_t size0, void *data1, size_t size1) | 268 | void *data0, size_t size0, void *data1, size_t size1) |
269 | { | 269 | { |
270 | unsigned long flags; | 270 | unsigned long flags; |
271 | 271 | ||
272 | event->v[0].data = data0; | 272 | event->v[0].data = data0; |
273 | event->v[0].size = size0; | 273 | event->v[0].size = size0; |
274 | event->v[1].data = data1; | 274 | event->v[1].data = data1; |
275 | event->v[1].size = size1; | 275 | event->v[1].size = size1; |
276 | 276 | ||
277 | spin_lock_irqsave(&client->lock, flags); | 277 | spin_lock_irqsave(&client->lock, flags); |
278 | if (client->in_shutdown) | 278 | if (client->in_shutdown) |
279 | kfree(event); | 279 | kfree(event); |
280 | else | 280 | else |
281 | list_add_tail(&event->link, &client->event_list); | 281 | list_add_tail(&event->link, &client->event_list); |
282 | spin_unlock_irqrestore(&client->lock, flags); | 282 | spin_unlock_irqrestore(&client->lock, flags); |
283 | 283 | ||
284 | wake_up_interruptible(&client->wait); | 284 | wake_up_interruptible(&client->wait); |
285 | } | 285 | } |
286 | 286 | ||
287 | static int dequeue_event(struct client *client, | 287 | static int dequeue_event(struct client *client, |
288 | char __user *buffer, size_t count) | 288 | char __user *buffer, size_t count) |
289 | { | 289 | { |
290 | struct event *event; | 290 | struct event *event; |
291 | size_t size, total; | 291 | size_t size, total; |
292 | int i, ret; | 292 | int i, ret; |
293 | 293 | ||
294 | ret = wait_event_interruptible(client->wait, | 294 | ret = wait_event_interruptible(client->wait, |
295 | !list_empty(&client->event_list) || | 295 | !list_empty(&client->event_list) || |
296 | fw_device_is_shutdown(client->device)); | 296 | fw_device_is_shutdown(client->device)); |
297 | if (ret < 0) | 297 | if (ret < 0) |
298 | return ret; | 298 | return ret; |
299 | 299 | ||
300 | if (list_empty(&client->event_list) && | 300 | if (list_empty(&client->event_list) && |
301 | fw_device_is_shutdown(client->device)) | 301 | fw_device_is_shutdown(client->device)) |
302 | return -ENODEV; | 302 | return -ENODEV; |
303 | 303 | ||
304 | spin_lock_irq(&client->lock); | 304 | spin_lock_irq(&client->lock); |
305 | event = list_first_entry(&client->event_list, struct event, link); | 305 | event = list_first_entry(&client->event_list, struct event, link); |
306 | list_del(&event->link); | 306 | list_del(&event->link); |
307 | spin_unlock_irq(&client->lock); | 307 | spin_unlock_irq(&client->lock); |
308 | 308 | ||
309 | total = 0; | 309 | total = 0; |
310 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { | 310 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { |
311 | size = min(event->v[i].size, count - total); | 311 | size = min(event->v[i].size, count - total); |
312 | if (copy_to_user(buffer + total, event->v[i].data, size)) { | 312 | if (copy_to_user(buffer + total, event->v[i].data, size)) { |
313 | ret = -EFAULT; | 313 | ret = -EFAULT; |
314 | goto out; | 314 | goto out; |
315 | } | 315 | } |
316 | total += size; | 316 | total += size; |
317 | } | 317 | } |
318 | ret = total; | 318 | ret = total; |
319 | 319 | ||
320 | out: | 320 | out: |
321 | kfree(event); | 321 | kfree(event); |
322 | 322 | ||
323 | return ret; | 323 | return ret; |
324 | } | 324 | } |
325 | 325 | ||
326 | static ssize_t fw_device_op_read(struct file *file, char __user *buffer, | 326 | static ssize_t fw_device_op_read(struct file *file, char __user *buffer, |
327 | size_t count, loff_t *offset) | 327 | size_t count, loff_t *offset) |
328 | { | 328 | { |
329 | struct client *client = file->private_data; | 329 | struct client *client = file->private_data; |
330 | 330 | ||
331 | return dequeue_event(client, buffer, count); | 331 | return dequeue_event(client, buffer, count); |
332 | } | 332 | } |
333 | 333 | ||
334 | static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, | 334 | static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, |
335 | struct client *client) | 335 | struct client *client) |
336 | { | 336 | { |
337 | struct fw_card *card = client->device->card; | 337 | struct fw_card *card = client->device->card; |
338 | 338 | ||
339 | spin_lock_irq(&card->lock); | 339 | spin_lock_irq(&card->lock); |
340 | 340 | ||
341 | event->closure = client->bus_reset_closure; | 341 | event->closure = client->bus_reset_closure; |
342 | event->type = FW_CDEV_EVENT_BUS_RESET; | 342 | event->type = FW_CDEV_EVENT_BUS_RESET; |
343 | event->generation = client->device->generation; | 343 | event->generation = client->device->generation; |
344 | event->node_id = client->device->node_id; | 344 | event->node_id = client->device->node_id; |
345 | event->local_node_id = card->local_node->node_id; | 345 | event->local_node_id = card->local_node->node_id; |
346 | event->bm_node_id = card->bm_node_id; | 346 | event->bm_node_id = card->bm_node_id; |
347 | event->irm_node_id = card->irm_node->node_id; | 347 | event->irm_node_id = card->irm_node->node_id; |
348 | event->root_node_id = card->root_node->node_id; | 348 | event->root_node_id = card->root_node->node_id; |
349 | 349 | ||
350 | spin_unlock_irq(&card->lock); | 350 | spin_unlock_irq(&card->lock); |
351 | } | 351 | } |
352 | 352 | ||
353 | static void for_each_client(struct fw_device *device, | 353 | static void for_each_client(struct fw_device *device, |
354 | void (*callback)(struct client *client)) | 354 | void (*callback)(struct client *client)) |
355 | { | 355 | { |
356 | struct client *c; | 356 | struct client *c; |
357 | 357 | ||
358 | mutex_lock(&device->client_list_mutex); | 358 | mutex_lock(&device->client_list_mutex); |
359 | list_for_each_entry(c, &device->client_list, link) | 359 | list_for_each_entry(c, &device->client_list, link) |
360 | callback(c); | 360 | callback(c); |
361 | mutex_unlock(&device->client_list_mutex); | 361 | mutex_unlock(&device->client_list_mutex); |
362 | } | 362 | } |
363 | 363 | ||
364 | static int schedule_reallocations(int id, void *p, void *data) | 364 | static int schedule_reallocations(int id, void *p, void *data) |
365 | { | 365 | { |
366 | schedule_if_iso_resource(p); | 366 | schedule_if_iso_resource(p); |
367 | 367 | ||
368 | return 0; | 368 | return 0; |
369 | } | 369 | } |
370 | 370 | ||
371 | static void queue_bus_reset_event(struct client *client) | 371 | static void queue_bus_reset_event(struct client *client) |
372 | { | 372 | { |
373 | struct bus_reset_event *e; | 373 | struct bus_reset_event *e; |
374 | 374 | ||
375 | e = kzalloc(sizeof(*e), GFP_KERNEL); | 375 | e = kzalloc(sizeof(*e), GFP_KERNEL); |
376 | if (e == NULL) { | 376 | if (e == NULL) { |
377 | fw_notify("Out of memory when allocating event\n"); | 377 | fw_notify("Out of memory when allocating event\n"); |
378 | return; | 378 | return; |
379 | } | 379 | } |
380 | 380 | ||
381 | fill_bus_reset_event(&e->reset, client); | 381 | fill_bus_reset_event(&e->reset, client); |
382 | 382 | ||
383 | queue_event(client, &e->event, | 383 | queue_event(client, &e->event, |
384 | &e->reset, sizeof(e->reset), NULL, 0); | 384 | &e->reset, sizeof(e->reset), NULL, 0); |
385 | 385 | ||
386 | spin_lock_irq(&client->lock); | 386 | spin_lock_irq(&client->lock); |
387 | idr_for_each(&client->resource_idr, schedule_reallocations, client); | 387 | idr_for_each(&client->resource_idr, schedule_reallocations, client); |
388 | spin_unlock_irq(&client->lock); | 388 | spin_unlock_irq(&client->lock); |
389 | } | 389 | } |
390 | 390 | ||
391 | void fw_device_cdev_update(struct fw_device *device) | 391 | void fw_device_cdev_update(struct fw_device *device) |
392 | { | 392 | { |
393 | for_each_client(device, queue_bus_reset_event); | 393 | for_each_client(device, queue_bus_reset_event); |
394 | } | 394 | } |
395 | 395 | ||
396 | static void wake_up_client(struct client *client) | 396 | static void wake_up_client(struct client *client) |
397 | { | 397 | { |
398 | wake_up_interruptible(&client->wait); | 398 | wake_up_interruptible(&client->wait); |
399 | } | 399 | } |
400 | 400 | ||
401 | void fw_device_cdev_remove(struct fw_device *device) | 401 | void fw_device_cdev_remove(struct fw_device *device) |
402 | { | 402 | { |
403 | for_each_client(device, wake_up_client); | 403 | for_each_client(device, wake_up_client); |
404 | } | 404 | } |
405 | 405 | ||
406 | union ioctl_arg { | 406 | union ioctl_arg { |
407 | struct fw_cdev_get_info get_info; | 407 | struct fw_cdev_get_info get_info; |
408 | struct fw_cdev_send_request send_request; | 408 | struct fw_cdev_send_request send_request; |
409 | struct fw_cdev_allocate allocate; | 409 | struct fw_cdev_allocate allocate; |
410 | struct fw_cdev_deallocate deallocate; | 410 | struct fw_cdev_deallocate deallocate; |
411 | struct fw_cdev_send_response send_response; | 411 | struct fw_cdev_send_response send_response; |
412 | struct fw_cdev_initiate_bus_reset initiate_bus_reset; | 412 | struct fw_cdev_initiate_bus_reset initiate_bus_reset; |
413 | struct fw_cdev_add_descriptor add_descriptor; | 413 | struct fw_cdev_add_descriptor add_descriptor; |
414 | struct fw_cdev_remove_descriptor remove_descriptor; | 414 | struct fw_cdev_remove_descriptor remove_descriptor; |
415 | struct fw_cdev_create_iso_context create_iso_context; | 415 | struct fw_cdev_create_iso_context create_iso_context; |
416 | struct fw_cdev_queue_iso queue_iso; | 416 | struct fw_cdev_queue_iso queue_iso; |
417 | struct fw_cdev_start_iso start_iso; | 417 | struct fw_cdev_start_iso start_iso; |
418 | struct fw_cdev_stop_iso stop_iso; | 418 | struct fw_cdev_stop_iso stop_iso; |
419 | struct fw_cdev_get_cycle_timer get_cycle_timer; | 419 | struct fw_cdev_get_cycle_timer get_cycle_timer; |
420 | struct fw_cdev_allocate_iso_resource allocate_iso_resource; | 420 | struct fw_cdev_allocate_iso_resource allocate_iso_resource; |
421 | struct fw_cdev_send_stream_packet send_stream_packet; | 421 | struct fw_cdev_send_stream_packet send_stream_packet; |
422 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; | 422 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; |
423 | struct fw_cdev_send_phy_packet send_phy_packet; | 423 | struct fw_cdev_send_phy_packet send_phy_packet; |
424 | struct fw_cdev_receive_phy_packets receive_phy_packets; | 424 | struct fw_cdev_receive_phy_packets receive_phy_packets; |
425 | struct fw_cdev_set_iso_channels set_iso_channels; | 425 | struct fw_cdev_set_iso_channels set_iso_channels; |
426 | }; | 426 | }; |
427 | 427 | ||
428 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) | 428 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) |
429 | { | 429 | { |
430 | struct fw_cdev_get_info *a = &arg->get_info; | 430 | struct fw_cdev_get_info *a = &arg->get_info; |
431 | struct fw_cdev_event_bus_reset bus_reset; | 431 | struct fw_cdev_event_bus_reset bus_reset; |
432 | unsigned long ret = 0; | 432 | unsigned long ret = 0; |
433 | 433 | ||
434 | client->version = a->version; | 434 | client->version = a->version; |
435 | a->version = FW_CDEV_KERNEL_VERSION; | 435 | a->version = FW_CDEV_KERNEL_VERSION; |
436 | a->card = client->device->card->index; | 436 | a->card = client->device->card->index; |
437 | 437 | ||
438 | down_read(&fw_device_rwsem); | 438 | down_read(&fw_device_rwsem); |
439 | 439 | ||
440 | if (a->rom != 0) { | 440 | if (a->rom != 0) { |
441 | size_t want = a->rom_length; | 441 | size_t want = a->rom_length; |
442 | size_t have = client->device->config_rom_length * 4; | 442 | size_t have = client->device->config_rom_length * 4; |
443 | 443 | ||
444 | ret = copy_to_user(u64_to_uptr(a->rom), | 444 | ret = copy_to_user(u64_to_uptr(a->rom), |
445 | client->device->config_rom, min(want, have)); | 445 | client->device->config_rom, min(want, have)); |
446 | } | 446 | } |
447 | a->rom_length = client->device->config_rom_length * 4; | 447 | a->rom_length = client->device->config_rom_length * 4; |
448 | 448 | ||
449 | up_read(&fw_device_rwsem); | 449 | up_read(&fw_device_rwsem); |
450 | 450 | ||
451 | if (ret != 0) | 451 | if (ret != 0) |
452 | return -EFAULT; | 452 | return -EFAULT; |
453 | 453 | ||
454 | client->bus_reset_closure = a->bus_reset_closure; | 454 | client->bus_reset_closure = a->bus_reset_closure; |
455 | if (a->bus_reset != 0) { | 455 | if (a->bus_reset != 0) { |
456 | fill_bus_reset_event(&bus_reset, client); | 456 | fill_bus_reset_event(&bus_reset, client); |
457 | if (copy_to_user(u64_to_uptr(a->bus_reset), | 457 | if (copy_to_user(u64_to_uptr(a->bus_reset), |
458 | &bus_reset, sizeof(bus_reset))) | 458 | &bus_reset, sizeof(bus_reset))) |
459 | return -EFAULT; | 459 | return -EFAULT; |
460 | } | 460 | } |
461 | 461 | ||
462 | return 0; | 462 | return 0; |
463 | } | 463 | } |
464 | 464 | ||
465 | static int add_client_resource(struct client *client, | 465 | static int add_client_resource(struct client *client, |
466 | struct client_resource *resource, gfp_t gfp_mask) | 466 | struct client_resource *resource, gfp_t gfp_mask) |
467 | { | 467 | { |
468 | unsigned long flags; | 468 | unsigned long flags; |
469 | int ret; | 469 | int ret; |
470 | 470 | ||
471 | retry: | 471 | retry: |
472 | if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) | 472 | if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) |
473 | return -ENOMEM; | 473 | return -ENOMEM; |
474 | 474 | ||
475 | spin_lock_irqsave(&client->lock, flags); | 475 | spin_lock_irqsave(&client->lock, flags); |
476 | if (client->in_shutdown) | 476 | if (client->in_shutdown) |
477 | ret = -ECANCELED; | 477 | ret = -ECANCELED; |
478 | else | 478 | else |
479 | ret = idr_get_new(&client->resource_idr, resource, | 479 | ret = idr_get_new(&client->resource_idr, resource, |
480 | &resource->handle); | 480 | &resource->handle); |
481 | if (ret >= 0) { | 481 | if (ret >= 0) { |
482 | client_get(client); | 482 | client_get(client); |
483 | schedule_if_iso_resource(resource); | 483 | schedule_if_iso_resource(resource); |
484 | } | 484 | } |
485 | spin_unlock_irqrestore(&client->lock, flags); | 485 | spin_unlock_irqrestore(&client->lock, flags); |
486 | 486 | ||
487 | if (ret == -EAGAIN) | 487 | if (ret == -EAGAIN) |
488 | goto retry; | 488 | goto retry; |
489 | 489 | ||
490 | return ret < 0 ? ret : 0; | 490 | return ret < 0 ? ret : 0; |
491 | } | 491 | } |
492 | 492 | ||
493 | static int release_client_resource(struct client *client, u32 handle, | 493 | static int release_client_resource(struct client *client, u32 handle, |
494 | client_resource_release_fn_t release, | 494 | client_resource_release_fn_t release, |
495 | struct client_resource **return_resource) | 495 | struct client_resource **return_resource) |
496 | { | 496 | { |
497 | struct client_resource *resource; | 497 | struct client_resource *resource; |
498 | 498 | ||
499 | spin_lock_irq(&client->lock); | 499 | spin_lock_irq(&client->lock); |
500 | if (client->in_shutdown) | 500 | if (client->in_shutdown) |
501 | resource = NULL; | 501 | resource = NULL; |
502 | else | 502 | else |
503 | resource = idr_find(&client->resource_idr, handle); | 503 | resource = idr_find(&client->resource_idr, handle); |
504 | if (resource && resource->release == release) | 504 | if (resource && resource->release == release) |
505 | idr_remove(&client->resource_idr, handle); | 505 | idr_remove(&client->resource_idr, handle); |
506 | spin_unlock_irq(&client->lock); | 506 | spin_unlock_irq(&client->lock); |
507 | 507 | ||
508 | if (!(resource && resource->release == release)) | 508 | if (!(resource && resource->release == release)) |
509 | return -EINVAL; | 509 | return -EINVAL; |
510 | 510 | ||
511 | if (return_resource) | 511 | if (return_resource) |
512 | *return_resource = resource; | 512 | *return_resource = resource; |
513 | else | 513 | else |
514 | resource->release(client, resource); | 514 | resource->release(client, resource); |
515 | 515 | ||
516 | client_put(client); | 516 | client_put(client); |
517 | 517 | ||
518 | return 0; | 518 | return 0; |
519 | } | 519 | } |
520 | 520 | ||
521 | static void release_transaction(struct client *client, | 521 | static void release_transaction(struct client *client, |
522 | struct client_resource *resource) | 522 | struct client_resource *resource) |
523 | { | 523 | { |
524 | } | 524 | } |
525 | 525 | ||
526 | static void complete_transaction(struct fw_card *card, int rcode, | 526 | static void complete_transaction(struct fw_card *card, int rcode, |
527 | void *payload, size_t length, void *data) | 527 | void *payload, size_t length, void *data) |
528 | { | 528 | { |
529 | struct outbound_transaction_event *e = data; | 529 | struct outbound_transaction_event *e = data; |
530 | struct fw_cdev_event_response *rsp = &e->response; | 530 | struct fw_cdev_event_response *rsp = &e->response; |
531 | struct client *client = e->client; | 531 | struct client *client = e->client; |
532 | unsigned long flags; | 532 | unsigned long flags; |
533 | 533 | ||
534 | if (length < rsp->length) | 534 | if (length < rsp->length) |
535 | rsp->length = length; | 535 | rsp->length = length; |
536 | if (rcode == RCODE_COMPLETE) | 536 | if (rcode == RCODE_COMPLETE) |
537 | memcpy(rsp->data, payload, rsp->length); | 537 | memcpy(rsp->data, payload, rsp->length); |
538 | 538 | ||
539 | spin_lock_irqsave(&client->lock, flags); | 539 | spin_lock_irqsave(&client->lock, flags); |
540 | idr_remove(&client->resource_idr, e->r.resource.handle); | 540 | idr_remove(&client->resource_idr, e->r.resource.handle); |
541 | if (client->in_shutdown) | 541 | if (client->in_shutdown) |
542 | wake_up(&client->tx_flush_wait); | 542 | wake_up(&client->tx_flush_wait); |
543 | spin_unlock_irqrestore(&client->lock, flags); | 543 | spin_unlock_irqrestore(&client->lock, flags); |
544 | 544 | ||
545 | rsp->type = FW_CDEV_EVENT_RESPONSE; | 545 | rsp->type = FW_CDEV_EVENT_RESPONSE; |
546 | rsp->rcode = rcode; | 546 | rsp->rcode = rcode; |
547 | 547 | ||
548 | /* | 548 | /* |
549 | * In the case that sizeof(*rsp) doesn't align with the position of the | 549 | * In the case that sizeof(*rsp) doesn't align with the position of the |
550 | * data, and the read is short, preserve an extra copy of the data | 550 | * data, and the read is short, preserve an extra copy of the data |
551 | * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless | 551 | * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless |
552 | * for short reads and some apps depended on it, this is both safe | 552 | * for short reads and some apps depended on it, this is both safe |
553 | * and prudent for compatibility. | 553 | * and prudent for compatibility. |
554 | */ | 554 | */ |
555 | if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) | 555 | if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) |
556 | queue_event(client, &e->event, rsp, sizeof(*rsp), | 556 | queue_event(client, &e->event, rsp, sizeof(*rsp), |
557 | rsp->data, rsp->length); | 557 | rsp->data, rsp->length); |
558 | else | 558 | else |
559 | queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, | 559 | queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, |
560 | NULL, 0); | 560 | NULL, 0); |
561 | 561 | ||
562 | /* Drop the idr's reference */ | 562 | /* Drop the idr's reference */ |
563 | client_put(client); | 563 | client_put(client); |
564 | } | 564 | } |
565 | 565 | ||
566 | static int init_request(struct client *client, | 566 | static int init_request(struct client *client, |
567 | struct fw_cdev_send_request *request, | 567 | struct fw_cdev_send_request *request, |
568 | int destination_id, int speed) | 568 | int destination_id, int speed) |
569 | { | 569 | { |
570 | struct outbound_transaction_event *e; | 570 | struct outbound_transaction_event *e; |
571 | int ret; | 571 | int ret; |
572 | 572 | ||
573 | if (request->tcode != TCODE_STREAM_DATA && | 573 | if (request->tcode != TCODE_STREAM_DATA && |
574 | (request->length > 4096 || request->length > 512 << speed)) | 574 | (request->length > 4096 || request->length > 512 << speed)) |
575 | return -EIO; | 575 | return -EIO; |
576 | 576 | ||
577 | if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && | 577 | if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && |
578 | request->length < 4) | 578 | request->length < 4) |
579 | return -EINVAL; | 579 | return -EINVAL; |
580 | 580 | ||
581 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); | 581 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); |
582 | if (e == NULL) | 582 | if (e == NULL) |
583 | return -ENOMEM; | 583 | return -ENOMEM; |
584 | 584 | ||
585 | e->client = client; | 585 | e->client = client; |
586 | e->response.length = request->length; | 586 | e->response.length = request->length; |
587 | e->response.closure = request->closure; | 587 | e->response.closure = request->closure; |
588 | 588 | ||
589 | if (request->data && | 589 | if (request->data && |
590 | copy_from_user(e->response.data, | 590 | copy_from_user(e->response.data, |
591 | u64_to_uptr(request->data), request->length)) { | 591 | u64_to_uptr(request->data), request->length)) { |
592 | ret = -EFAULT; | 592 | ret = -EFAULT; |
593 | goto failed; | 593 | goto failed; |
594 | } | 594 | } |
595 | 595 | ||
596 | e->r.resource.release = release_transaction; | 596 | e->r.resource.release = release_transaction; |
597 | ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); | 597 | ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); |
598 | if (ret < 0) | 598 | if (ret < 0) |
599 | goto failed; | 599 | goto failed; |
600 | 600 | ||
601 | fw_send_request(client->device->card, &e->r.transaction, | 601 | fw_send_request(client->device->card, &e->r.transaction, |
602 | request->tcode, destination_id, request->generation, | 602 | request->tcode, destination_id, request->generation, |
603 | speed, request->offset, e->response.data, | 603 | speed, request->offset, e->response.data, |
604 | request->length, complete_transaction, e); | 604 | request->length, complete_transaction, e); |
605 | return 0; | 605 | return 0; |
606 | 606 | ||
607 | failed: | 607 | failed: |
608 | kfree(e); | 608 | kfree(e); |
609 | 609 | ||
610 | return ret; | 610 | return ret; |
611 | } | 611 | } |
612 | 612 | ||
613 | static int ioctl_send_request(struct client *client, union ioctl_arg *arg) | 613 | static int ioctl_send_request(struct client *client, union ioctl_arg *arg) |
614 | { | 614 | { |
615 | switch (arg->send_request.tcode) { | 615 | switch (arg->send_request.tcode) { |
616 | case TCODE_WRITE_QUADLET_REQUEST: | 616 | case TCODE_WRITE_QUADLET_REQUEST: |
617 | case TCODE_WRITE_BLOCK_REQUEST: | 617 | case TCODE_WRITE_BLOCK_REQUEST: |
618 | case TCODE_READ_QUADLET_REQUEST: | 618 | case TCODE_READ_QUADLET_REQUEST: |
619 | case TCODE_READ_BLOCK_REQUEST: | 619 | case TCODE_READ_BLOCK_REQUEST: |
620 | case TCODE_LOCK_MASK_SWAP: | 620 | case TCODE_LOCK_MASK_SWAP: |
621 | case TCODE_LOCK_COMPARE_SWAP: | 621 | case TCODE_LOCK_COMPARE_SWAP: |
622 | case TCODE_LOCK_FETCH_ADD: | 622 | case TCODE_LOCK_FETCH_ADD: |
623 | case TCODE_LOCK_LITTLE_ADD: | 623 | case TCODE_LOCK_LITTLE_ADD: |
624 | case TCODE_LOCK_BOUNDED_ADD: | 624 | case TCODE_LOCK_BOUNDED_ADD: |
625 | case TCODE_LOCK_WRAP_ADD: | 625 | case TCODE_LOCK_WRAP_ADD: |
626 | case TCODE_LOCK_VENDOR_DEPENDENT: | 626 | case TCODE_LOCK_VENDOR_DEPENDENT: |
627 | break; | 627 | break; |
628 | default: | 628 | default: |
629 | return -EINVAL; | 629 | return -EINVAL; |
630 | } | 630 | } |
631 | 631 | ||
632 | return init_request(client, &arg->send_request, client->device->node_id, | 632 | return init_request(client, &arg->send_request, client->device->node_id, |
633 | client->device->max_speed); | 633 | client->device->max_speed); |
634 | } | 634 | } |
635 | 635 | ||
636 | static inline bool is_fcp_request(struct fw_request *request) | 636 | static inline bool is_fcp_request(struct fw_request *request) |
637 | { | 637 | { |
638 | return request == NULL; | 638 | return request == NULL; |
639 | } | 639 | } |
640 | 640 | ||
641 | static void release_request(struct client *client, | 641 | static void release_request(struct client *client, |
642 | struct client_resource *resource) | 642 | struct client_resource *resource) |
643 | { | 643 | { |
644 | struct inbound_transaction_resource *r = container_of(resource, | 644 | struct inbound_transaction_resource *r = container_of(resource, |
645 | struct inbound_transaction_resource, resource); | 645 | struct inbound_transaction_resource, resource); |
646 | 646 | ||
647 | if (is_fcp_request(r->request)) | 647 | if (is_fcp_request(r->request)) |
648 | kfree(r->data); | 648 | kfree(r->data); |
649 | else | 649 | else |
650 | fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); | 650 | fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR); |
651 | 651 | ||
652 | fw_card_put(r->card); | 652 | fw_card_put(r->card); |
653 | kfree(r); | 653 | kfree(r); |
654 | } | 654 | } |
655 | 655 | ||
656 | static void handle_request(struct fw_card *card, struct fw_request *request, | 656 | static void handle_request(struct fw_card *card, struct fw_request *request, |
657 | int tcode, int destination, int source, | 657 | int tcode, int destination, int source, |
658 | int generation, unsigned long long offset, | 658 | int generation, unsigned long long offset, |
659 | void *payload, size_t length, void *callback_data) | 659 | void *payload, size_t length, void *callback_data) |
660 | { | 660 | { |
661 | struct address_handler_resource *handler = callback_data; | 661 | struct address_handler_resource *handler = callback_data; |
662 | struct inbound_transaction_resource *r; | 662 | struct inbound_transaction_resource *r; |
663 | struct inbound_transaction_event *e; | 663 | struct inbound_transaction_event *e; |
664 | size_t event_size0; | 664 | size_t event_size0; |
665 | void *fcp_frame = NULL; | 665 | void *fcp_frame = NULL; |
666 | int ret; | 666 | int ret; |
667 | 667 | ||
668 | /* card may be different from handler->client->device->card */ | 668 | /* card may be different from handler->client->device->card */ |
669 | fw_card_get(card); | 669 | fw_card_get(card); |
670 | 670 | ||
671 | r = kmalloc(sizeof(*r), GFP_ATOMIC); | 671 | r = kmalloc(sizeof(*r), GFP_ATOMIC); |
672 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | 672 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
673 | if (r == NULL || e == NULL) { | 673 | if (r == NULL || e == NULL) { |
674 | fw_notify("Out of memory when allocating event\n"); | 674 | fw_notify("Out of memory when allocating event\n"); |
675 | goto failed; | 675 | goto failed; |
676 | } | 676 | } |
677 | r->card = card; | 677 | r->card = card; |
678 | r->request = request; | 678 | r->request = request; |
679 | r->data = payload; | 679 | r->data = payload; |
680 | r->length = length; | 680 | r->length = length; |
681 | 681 | ||
682 | if (is_fcp_request(request)) { | 682 | if (is_fcp_request(request)) { |
683 | /* | 683 | /* |
684 | * FIXME: Let core-transaction.c manage a | 684 | * FIXME: Let core-transaction.c manage a |
685 | * single reference-counted copy? | 685 | * single reference-counted copy? |
686 | */ | 686 | */ |
687 | fcp_frame = kmemdup(payload, length, GFP_ATOMIC); | 687 | fcp_frame = kmemdup(payload, length, GFP_ATOMIC); |
688 | if (fcp_frame == NULL) | 688 | if (fcp_frame == NULL) |
689 | goto failed; | 689 | goto failed; |
690 | 690 | ||
691 | r->data = fcp_frame; | 691 | r->data = fcp_frame; |
692 | } | 692 | } |
693 | 693 | ||
694 | r->resource.release = release_request; | 694 | r->resource.release = release_request; |
695 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); | 695 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); |
696 | if (ret < 0) | 696 | if (ret < 0) |
697 | goto failed; | 697 | goto failed; |
698 | 698 | ||
699 | if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { | 699 | if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { |
700 | struct fw_cdev_event_request *req = &e->req.request; | 700 | struct fw_cdev_event_request *req = &e->req.request; |
701 | 701 | ||
702 | if (tcode & 0x10) | 702 | if (tcode & 0x10) |
703 | tcode = TCODE_LOCK_REQUEST; | 703 | tcode = TCODE_LOCK_REQUEST; |
704 | 704 | ||
705 | req->type = FW_CDEV_EVENT_REQUEST; | 705 | req->type = FW_CDEV_EVENT_REQUEST; |
706 | req->tcode = tcode; | 706 | req->tcode = tcode; |
707 | req->offset = offset; | 707 | req->offset = offset; |
708 | req->length = length; | 708 | req->length = length; |
709 | req->handle = r->resource.handle; | 709 | req->handle = r->resource.handle; |
710 | req->closure = handler->closure; | 710 | req->closure = handler->closure; |
711 | event_size0 = sizeof(*req); | 711 | event_size0 = sizeof(*req); |
712 | } else { | 712 | } else { |
713 | struct fw_cdev_event_request2 *req = &e->req.request2; | 713 | struct fw_cdev_event_request2 *req = &e->req.request2; |
714 | 714 | ||
715 | req->type = FW_CDEV_EVENT_REQUEST2; | 715 | req->type = FW_CDEV_EVENT_REQUEST2; |
716 | req->tcode = tcode; | 716 | req->tcode = tcode; |
717 | req->offset = offset; | 717 | req->offset = offset; |
718 | req->source_node_id = source; | 718 | req->source_node_id = source; |
719 | req->destination_node_id = destination; | 719 | req->destination_node_id = destination; |
720 | req->card = card->index; | 720 | req->card = card->index; |
721 | req->generation = generation; | 721 | req->generation = generation; |
722 | req->length = length; | 722 | req->length = length; |
723 | req->handle = r->resource.handle; | 723 | req->handle = r->resource.handle; |
724 | req->closure = handler->closure; | 724 | req->closure = handler->closure; |
725 | event_size0 = sizeof(*req); | 725 | event_size0 = sizeof(*req); |
726 | } | 726 | } |
727 | 727 | ||
728 | queue_event(handler->client, &e->event, | 728 | queue_event(handler->client, &e->event, |
729 | &e->req, event_size0, r->data, length); | 729 | &e->req, event_size0, r->data, length); |
730 | return; | 730 | return; |
731 | 731 | ||
732 | failed: | 732 | failed: |
733 | kfree(r); | 733 | kfree(r); |
734 | kfree(e); | 734 | kfree(e); |
735 | kfree(fcp_frame); | 735 | kfree(fcp_frame); |
736 | 736 | ||
737 | if (!is_fcp_request(request)) | 737 | if (!is_fcp_request(request)) |
738 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | 738 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); |
739 | 739 | ||
740 | fw_card_put(card); | 740 | fw_card_put(card); |
741 | } | 741 | } |
742 | 742 | ||
743 | static void release_address_handler(struct client *client, | 743 | static void release_address_handler(struct client *client, |
744 | struct client_resource *resource) | 744 | struct client_resource *resource) |
745 | { | 745 | { |
746 | struct address_handler_resource *r = | 746 | struct address_handler_resource *r = |
747 | container_of(resource, struct address_handler_resource, resource); | 747 | container_of(resource, struct address_handler_resource, resource); |
748 | 748 | ||
749 | fw_core_remove_address_handler(&r->handler); | 749 | fw_core_remove_address_handler(&r->handler); |
750 | kfree(r); | 750 | kfree(r); |
751 | } | 751 | } |
752 | 752 | ||
753 | static int ioctl_allocate(struct client *client, union ioctl_arg *arg) | 753 | static int ioctl_allocate(struct client *client, union ioctl_arg *arg) |
754 | { | 754 | { |
755 | struct fw_cdev_allocate *a = &arg->allocate; | 755 | struct fw_cdev_allocate *a = &arg->allocate; |
756 | struct address_handler_resource *r; | 756 | struct address_handler_resource *r; |
757 | struct fw_address_region region; | 757 | struct fw_address_region region; |
758 | int ret; | 758 | int ret; |
759 | 759 | ||
760 | r = kmalloc(sizeof(*r), GFP_KERNEL); | 760 | r = kmalloc(sizeof(*r), GFP_KERNEL); |
761 | if (r == NULL) | 761 | if (r == NULL) |
762 | return -ENOMEM; | 762 | return -ENOMEM; |
763 | 763 | ||
764 | region.start = a->offset; | 764 | region.start = a->offset; |
765 | if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) | 765 | if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) |
766 | region.end = a->offset + a->length; | 766 | region.end = a->offset + a->length; |
767 | else | 767 | else |
768 | region.end = a->region_end; | 768 | region.end = a->region_end; |
769 | 769 | ||
770 | r->handler.length = a->length; | 770 | r->handler.length = a->length; |
771 | r->handler.address_callback = handle_request; | 771 | r->handler.address_callback = handle_request; |
772 | r->handler.callback_data = r; | 772 | r->handler.callback_data = r; |
773 | r->closure = a->closure; | 773 | r->closure = a->closure; |
774 | r->client = client; | 774 | r->client = client; |
775 | 775 | ||
776 | ret = fw_core_add_address_handler(&r->handler, ®ion); | 776 | ret = fw_core_add_address_handler(&r->handler, ®ion); |
777 | if (ret < 0) { | 777 | if (ret < 0) { |
778 | kfree(r); | 778 | kfree(r); |
779 | return ret; | 779 | return ret; |
780 | } | 780 | } |
781 | a->offset = r->handler.offset; | 781 | a->offset = r->handler.offset; |
782 | 782 | ||
783 | r->resource.release = release_address_handler; | 783 | r->resource.release = release_address_handler; |
784 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | 784 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
785 | if (ret < 0) { | 785 | if (ret < 0) { |
786 | release_address_handler(client, &r->resource); | 786 | release_address_handler(client, &r->resource); |
787 | return ret; | 787 | return ret; |
788 | } | 788 | } |
789 | a->handle = r->resource.handle; | 789 | a->handle = r->resource.handle; |
790 | 790 | ||
791 | return 0; | 791 | return 0; |
792 | } | 792 | } |
793 | 793 | ||
794 | static int ioctl_deallocate(struct client *client, union ioctl_arg *arg) | 794 | static int ioctl_deallocate(struct client *client, union ioctl_arg *arg) |
795 | { | 795 | { |
796 | return release_client_resource(client, arg->deallocate.handle, | 796 | return release_client_resource(client, arg->deallocate.handle, |
797 | release_address_handler, NULL); | 797 | release_address_handler, NULL); |
798 | } | 798 | } |
799 | 799 | ||
800 | static int ioctl_send_response(struct client *client, union ioctl_arg *arg) | 800 | static int ioctl_send_response(struct client *client, union ioctl_arg *arg) |
801 | { | 801 | { |
802 | struct fw_cdev_send_response *a = &arg->send_response; | 802 | struct fw_cdev_send_response *a = &arg->send_response; |
803 | struct client_resource *resource; | 803 | struct client_resource *resource; |
804 | struct inbound_transaction_resource *r; | 804 | struct inbound_transaction_resource *r; |
805 | int ret = 0; | 805 | int ret = 0; |
806 | 806 | ||
807 | if (release_client_resource(client, a->handle, | 807 | if (release_client_resource(client, a->handle, |
808 | release_request, &resource) < 0) | 808 | release_request, &resource) < 0) |
809 | return -EINVAL; | 809 | return -EINVAL; |
810 | 810 | ||
811 | r = container_of(resource, struct inbound_transaction_resource, | 811 | r = container_of(resource, struct inbound_transaction_resource, |
812 | resource); | 812 | resource); |
813 | if (is_fcp_request(r->request)) | 813 | if (is_fcp_request(r->request)) |
814 | goto out; | 814 | goto out; |
815 | 815 | ||
816 | if (a->length != fw_get_response_length(r->request)) { | 816 | if (a->length != fw_get_response_length(r->request)) { |
817 | ret = -EINVAL; | 817 | ret = -EINVAL; |
818 | kfree(r->request); | 818 | kfree(r->request); |
819 | goto out; | 819 | goto out; |
820 | } | 820 | } |
821 | if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { | 821 | if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) { |
822 | ret = -EFAULT; | 822 | ret = -EFAULT; |
823 | kfree(r->request); | 823 | kfree(r->request); |
824 | goto out; | 824 | goto out; |
825 | } | 825 | } |
826 | fw_send_response(r->card, r->request, a->rcode); | 826 | fw_send_response(r->card, r->request, a->rcode); |
827 | out: | 827 | out: |
828 | fw_card_put(r->card); | 828 | fw_card_put(r->card); |
829 | kfree(r); | 829 | kfree(r); |
830 | 830 | ||
831 | return ret; | 831 | return ret; |
832 | } | 832 | } |
833 | 833 | ||
834 | static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) | 834 | static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) |
835 | { | 835 | { |
836 | fw_schedule_bus_reset(client->device->card, true, | 836 | fw_schedule_bus_reset(client->device->card, true, |
837 | arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); | 837 | arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); |
838 | return 0; | 838 | return 0; |
839 | } | 839 | } |
840 | 840 | ||
841 | static void release_descriptor(struct client *client, | 841 | static void release_descriptor(struct client *client, |
842 | struct client_resource *resource) | 842 | struct client_resource *resource) |
843 | { | 843 | { |
844 | struct descriptor_resource *r = | 844 | struct descriptor_resource *r = |
845 | container_of(resource, struct descriptor_resource, resource); | 845 | container_of(resource, struct descriptor_resource, resource); |
846 | 846 | ||
847 | fw_core_remove_descriptor(&r->descriptor); | 847 | fw_core_remove_descriptor(&r->descriptor); |
848 | kfree(r); | 848 | kfree(r); |
849 | } | 849 | } |
850 | 850 | ||
851 | static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg) | 851 | static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg) |
852 | { | 852 | { |
853 | struct fw_cdev_add_descriptor *a = &arg->add_descriptor; | 853 | struct fw_cdev_add_descriptor *a = &arg->add_descriptor; |
854 | struct descriptor_resource *r; | 854 | struct descriptor_resource *r; |
855 | int ret; | 855 | int ret; |
856 | 856 | ||
857 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | 857 | /* Access policy: Allow this ioctl only on local nodes' device files. */ |
858 | if (!client->device->is_local) | 858 | if (!client->device->is_local) |
859 | return -ENOSYS; | 859 | return -ENOSYS; |
860 | 860 | ||
861 | if (a->length > 256) | 861 | if (a->length > 256) |
862 | return -EINVAL; | 862 | return -EINVAL; |
863 | 863 | ||
864 | r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL); | 864 | r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL); |
865 | if (r == NULL) | 865 | if (r == NULL) |
866 | return -ENOMEM; | 866 | return -ENOMEM; |
867 | 867 | ||
868 | if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) { | 868 | if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) { |
869 | ret = -EFAULT; | 869 | ret = -EFAULT; |
870 | goto failed; | 870 | goto failed; |
871 | } | 871 | } |
872 | 872 | ||
873 | r->descriptor.length = a->length; | 873 | r->descriptor.length = a->length; |
874 | r->descriptor.immediate = a->immediate; | 874 | r->descriptor.immediate = a->immediate; |
875 | r->descriptor.key = a->key; | 875 | r->descriptor.key = a->key; |
876 | r->descriptor.data = r->data; | 876 | r->descriptor.data = r->data; |
877 | 877 | ||
878 | ret = fw_core_add_descriptor(&r->descriptor); | 878 | ret = fw_core_add_descriptor(&r->descriptor); |
879 | if (ret < 0) | 879 | if (ret < 0) |
880 | goto failed; | 880 | goto failed; |
881 | 881 | ||
882 | r->resource.release = release_descriptor; | 882 | r->resource.release = release_descriptor; |
883 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | 883 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
884 | if (ret < 0) { | 884 | if (ret < 0) { |
885 | fw_core_remove_descriptor(&r->descriptor); | 885 | fw_core_remove_descriptor(&r->descriptor); |
886 | goto failed; | 886 | goto failed; |
887 | } | 887 | } |
888 | a->handle = r->resource.handle; | 888 | a->handle = r->resource.handle; |
889 | 889 | ||
890 | return 0; | 890 | return 0; |
891 | failed: | 891 | failed: |
892 | kfree(r); | 892 | kfree(r); |
893 | 893 | ||
894 | return ret; | 894 | return ret; |
895 | } | 895 | } |
896 | 896 | ||
897 | static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg) | 897 | static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg) |
898 | { | 898 | { |
899 | return release_client_resource(client, arg->remove_descriptor.handle, | 899 | return release_client_resource(client, arg->remove_descriptor.handle, |
900 | release_descriptor, NULL); | 900 | release_descriptor, NULL); |
901 | } | 901 | } |
902 | 902 | ||
903 | static void iso_callback(struct fw_iso_context *context, u32 cycle, | 903 | static void iso_callback(struct fw_iso_context *context, u32 cycle, |
904 | size_t header_length, void *header, void *data) | 904 | size_t header_length, void *header, void *data) |
905 | { | 905 | { |
906 | struct client *client = data; | 906 | struct client *client = data; |
907 | struct iso_interrupt_event *e; | 907 | struct iso_interrupt_event *e; |
908 | 908 | ||
909 | e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); | 909 | e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); |
910 | if (e == NULL) { | 910 | if (e == NULL) { |
911 | fw_notify("Out of memory when allocating event\n"); | 911 | fw_notify("Out of memory when allocating event\n"); |
912 | return; | 912 | return; |
913 | } | 913 | } |
914 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; | 914 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; |
915 | e->interrupt.closure = client->iso_closure; | 915 | e->interrupt.closure = client->iso_closure; |
916 | e->interrupt.cycle = cycle; | 916 | e->interrupt.cycle = cycle; |
917 | e->interrupt.header_length = header_length; | 917 | e->interrupt.header_length = header_length; |
918 | memcpy(e->interrupt.header, header, header_length); | 918 | memcpy(e->interrupt.header, header, header_length); |
919 | queue_event(client, &e->event, &e->interrupt, | 919 | queue_event(client, &e->event, &e->interrupt, |
920 | sizeof(e->interrupt) + header_length, NULL, 0); | 920 | sizeof(e->interrupt) + header_length, NULL, 0); |
921 | } | 921 | } |
922 | 922 | ||
923 | static void iso_mc_callback(struct fw_iso_context *context, | 923 | static void iso_mc_callback(struct fw_iso_context *context, |
924 | dma_addr_t completed, void *data) | 924 | dma_addr_t completed, void *data) |
925 | { | 925 | { |
926 | struct client *client = data; | 926 | struct client *client = data; |
927 | struct iso_interrupt_mc_event *e; | 927 | struct iso_interrupt_mc_event *e; |
928 | 928 | ||
929 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | 929 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
930 | if (e == NULL) { | 930 | if (e == NULL) { |
931 | fw_notify("Out of memory when allocating event\n"); | 931 | fw_notify("Out of memory when allocating event\n"); |
932 | return; | 932 | return; |
933 | } | 933 | } |
934 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; | 934 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; |
935 | e->interrupt.closure = client->iso_closure; | 935 | e->interrupt.closure = client->iso_closure; |
936 | e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, | 936 | e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, |
937 | completed); | 937 | completed); |
938 | queue_event(client, &e->event, &e->interrupt, | 938 | queue_event(client, &e->event, &e->interrupt, |
939 | sizeof(e->interrupt), NULL, 0); | 939 | sizeof(e->interrupt), NULL, 0); |
940 | } | 940 | } |
941 | 941 | ||
942 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) | 942 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) |
943 | { | 943 | { |
944 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; | 944 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; |
945 | struct fw_iso_context *context; | 945 | struct fw_iso_context *context; |
946 | fw_iso_callback_t cb; | 946 | fw_iso_callback_t cb; |
947 | 947 | ||
948 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || | 948 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || |
949 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || | 949 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || |
950 | FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != | 950 | FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != |
951 | FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); | 951 | FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); |
952 | 952 | ||
953 | switch (a->type) { | 953 | switch (a->type) { |
954 | case FW_ISO_CONTEXT_TRANSMIT: | 954 | case FW_ISO_CONTEXT_TRANSMIT: |
955 | if (a->speed > SCODE_3200 || a->channel > 63) | 955 | if (a->speed > SCODE_3200 || a->channel > 63) |
956 | return -EINVAL; | 956 | return -EINVAL; |
957 | 957 | ||
958 | cb = iso_callback; | 958 | cb = iso_callback; |
959 | break; | 959 | break; |
960 | 960 | ||
961 | case FW_ISO_CONTEXT_RECEIVE: | 961 | case FW_ISO_CONTEXT_RECEIVE: |
962 | if (a->header_size < 4 || (a->header_size & 3) || | 962 | if (a->header_size < 4 || (a->header_size & 3) || |
963 | a->channel > 63) | 963 | a->channel > 63) |
964 | return -EINVAL; | 964 | return -EINVAL; |
965 | 965 | ||
966 | cb = iso_callback; | 966 | cb = iso_callback; |
967 | break; | 967 | break; |
968 | 968 | ||
969 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | 969 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
970 | cb = (fw_iso_callback_t)iso_mc_callback; | 970 | cb = (fw_iso_callback_t)iso_mc_callback; |
971 | break; | 971 | break; |
972 | 972 | ||
973 | default: | 973 | default: |
974 | return -EINVAL; | 974 | return -EINVAL; |
975 | } | 975 | } |
976 | 976 | ||
977 | context = fw_iso_context_create(client->device->card, a->type, | 977 | context = fw_iso_context_create(client->device->card, a->type, |
978 | a->channel, a->speed, a->header_size, cb, client); | 978 | a->channel, a->speed, a->header_size, cb, client); |
979 | if (IS_ERR(context)) | 979 | if (IS_ERR(context)) |
980 | return PTR_ERR(context); | 980 | return PTR_ERR(context); |
981 | 981 | ||
982 | /* We only support one context at this time. */ | 982 | /* We only support one context at this time. */ |
983 | spin_lock_irq(&client->lock); | 983 | spin_lock_irq(&client->lock); |
984 | if (client->iso_context != NULL) { | 984 | if (client->iso_context != NULL) { |
985 | spin_unlock_irq(&client->lock); | 985 | spin_unlock_irq(&client->lock); |
986 | fw_iso_context_destroy(context); | 986 | fw_iso_context_destroy(context); |
987 | return -EBUSY; | 987 | return -EBUSY; |
988 | } | 988 | } |
989 | client->iso_closure = a->closure; | 989 | client->iso_closure = a->closure; |
990 | client->iso_context = context; | 990 | client->iso_context = context; |
991 | spin_unlock_irq(&client->lock); | 991 | spin_unlock_irq(&client->lock); |
992 | 992 | ||
993 | a->handle = 0; | 993 | a->handle = 0; |
994 | 994 | ||
995 | return 0; | 995 | return 0; |
996 | } | 996 | } |
997 | 997 | ||
998 | static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) | 998 | static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) |
999 | { | 999 | { |
1000 | struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; | 1000 | struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; |
1001 | struct fw_iso_context *ctx = client->iso_context; | 1001 | struct fw_iso_context *ctx = client->iso_context; |
1002 | 1002 | ||
1003 | if (ctx == NULL || a->handle != 0) | 1003 | if (ctx == NULL || a->handle != 0) |
1004 | return -EINVAL; | 1004 | return -EINVAL; |
1005 | 1005 | ||
1006 | return fw_iso_context_set_channels(ctx, &a->channels); | 1006 | return fw_iso_context_set_channels(ctx, &a->channels); |
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | /* Macros for decoding the iso packet control header. */ | 1009 | /* Macros for decoding the iso packet control header. */ |
1010 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) | 1010 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) |
1011 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) | 1011 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) |
1012 | #define GET_SKIP(v) (((v) >> 17) & 0x01) | 1012 | #define GET_SKIP(v) (((v) >> 17) & 0x01) |
1013 | #define GET_TAG(v) (((v) >> 18) & 0x03) | 1013 | #define GET_TAG(v) (((v) >> 18) & 0x03) |
1014 | #define GET_SY(v) (((v) >> 20) & 0x0f) | 1014 | #define GET_SY(v) (((v) >> 20) & 0x0f) |
1015 | #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) | 1015 | #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) |
1016 | 1016 | ||
1017 | static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | 1017 | static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) |
1018 | { | 1018 | { |
1019 | struct fw_cdev_queue_iso *a = &arg->queue_iso; | 1019 | struct fw_cdev_queue_iso *a = &arg->queue_iso; |
1020 | struct fw_cdev_iso_packet __user *p, *end, *next; | 1020 | struct fw_cdev_iso_packet __user *p, *end, *next; |
1021 | struct fw_iso_context *ctx = client->iso_context; | 1021 | struct fw_iso_context *ctx = client->iso_context; |
1022 | unsigned long payload, buffer_end, transmit_header_bytes = 0; | 1022 | unsigned long payload, buffer_end, transmit_header_bytes = 0; |
1023 | u32 control; | 1023 | u32 control; |
1024 | int count; | 1024 | int count; |
1025 | struct { | 1025 | struct { |
1026 | struct fw_iso_packet packet; | 1026 | struct fw_iso_packet packet; |
1027 | u8 header[256]; | 1027 | u8 header[256]; |
1028 | } u; | 1028 | } u; |
1029 | 1029 | ||
1030 | if (ctx == NULL || a->handle != 0) | 1030 | if (ctx == NULL || a->handle != 0) |
1031 | return -EINVAL; | 1031 | return -EINVAL; |
1032 | 1032 | ||
1033 | /* | 1033 | /* |
1034 | * If the user passes a non-NULL data pointer, has mmap()'ed | 1034 | * If the user passes a non-NULL data pointer, has mmap()'ed |
1035 | * the iso buffer, and the pointer points inside the buffer, | 1035 | * the iso buffer, and the pointer points inside the buffer, |
1036 | * we setup the payload pointers accordingly. Otherwise we | 1036 | * we setup the payload pointers accordingly. Otherwise we |
1037 | * set them both to 0, which will still let packets with | 1037 | * set them both to 0, which will still let packets with |
1038 | * payload_length == 0 through. In other words, if no packets | 1038 | * payload_length == 0 through. In other words, if no packets |
1039 | * use the indirect payload, the iso buffer need not be mapped | 1039 | * use the indirect payload, the iso buffer need not be mapped |
1040 | * and the a->data pointer is ignored. | 1040 | * and the a->data pointer is ignored. |
1041 | */ | 1041 | */ |
1042 | payload = (unsigned long)a->data - client->vm_start; | 1042 | payload = (unsigned long)a->data - client->vm_start; |
1043 | buffer_end = client->buffer.page_count << PAGE_SHIFT; | 1043 | buffer_end = client->buffer.page_count << PAGE_SHIFT; |
1044 | if (a->data == 0 || client->buffer.pages == NULL || | 1044 | if (a->data == 0 || client->buffer.pages == NULL || |
1045 | payload >= buffer_end) { | 1045 | payload >= buffer_end) { |
1046 | payload = 0; | 1046 | payload = 0; |
1047 | buffer_end = 0; | 1047 | buffer_end = 0; |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) | 1050 | if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) |
1051 | return -EINVAL; | 1051 | return -EINVAL; |
1052 | 1052 | ||
1053 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); | 1053 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); |
1054 | if (!access_ok(VERIFY_READ, p, a->size)) | 1054 | if (!access_ok(VERIFY_READ, p, a->size)) |
1055 | return -EFAULT; | 1055 | return -EFAULT; |
1056 | 1056 | ||
1057 | end = (void __user *)p + a->size; | 1057 | end = (void __user *)p + a->size; |
1058 | count = 0; | 1058 | count = 0; |
1059 | while (p < end) { | 1059 | while (p < end) { |
1060 | if (get_user(control, &p->control)) | 1060 | if (get_user(control, &p->control)) |
1061 | return -EFAULT; | 1061 | return -EFAULT; |
1062 | u.packet.payload_length = GET_PAYLOAD_LENGTH(control); | 1062 | u.packet.payload_length = GET_PAYLOAD_LENGTH(control); |
1063 | u.packet.interrupt = GET_INTERRUPT(control); | 1063 | u.packet.interrupt = GET_INTERRUPT(control); |
1064 | u.packet.skip = GET_SKIP(control); | 1064 | u.packet.skip = GET_SKIP(control); |
1065 | u.packet.tag = GET_TAG(control); | 1065 | u.packet.tag = GET_TAG(control); |
1066 | u.packet.sy = GET_SY(control); | 1066 | u.packet.sy = GET_SY(control); |
1067 | u.packet.header_length = GET_HEADER_LENGTH(control); | 1067 | u.packet.header_length = GET_HEADER_LENGTH(control); |
1068 | 1068 | ||
1069 | switch (ctx->type) { | 1069 | switch (ctx->type) { |
1070 | case FW_ISO_CONTEXT_TRANSMIT: | 1070 | case FW_ISO_CONTEXT_TRANSMIT: |
1071 | if (u.packet.header_length & 3) | 1071 | if (u.packet.header_length & 3) |
1072 | return -EINVAL; | 1072 | return -EINVAL; |
1073 | transmit_header_bytes = u.packet.header_length; | 1073 | transmit_header_bytes = u.packet.header_length; |
1074 | break; | 1074 | break; |
1075 | 1075 | ||
1076 | case FW_ISO_CONTEXT_RECEIVE: | 1076 | case FW_ISO_CONTEXT_RECEIVE: |
1077 | if (u.packet.header_length == 0 || | 1077 | if (u.packet.header_length == 0 || |
1078 | u.packet.header_length % ctx->header_size != 0) | 1078 | u.packet.header_length % ctx->header_size != 0) |
1079 | return -EINVAL; | 1079 | return -EINVAL; |
1080 | break; | 1080 | break; |
1081 | 1081 | ||
1082 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | 1082 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
1083 | if (u.packet.payload_length == 0 || | 1083 | if (u.packet.payload_length == 0 || |
1084 | u.packet.payload_length & 3) | 1084 | u.packet.payload_length & 3) |
1085 | return -EINVAL; | 1085 | return -EINVAL; |
1086 | break; | 1086 | break; |
1087 | } | 1087 | } |
1088 | 1088 | ||
1089 | next = (struct fw_cdev_iso_packet __user *) | 1089 | next = (struct fw_cdev_iso_packet __user *) |
1090 | &p->header[transmit_header_bytes / 4]; | 1090 | &p->header[transmit_header_bytes / 4]; |
1091 | if (next > end) | 1091 | if (next > end) |
1092 | return -EINVAL; | 1092 | return -EINVAL; |
1093 | if (__copy_from_user | 1093 | if (__copy_from_user |
1094 | (u.packet.header, p->header, transmit_header_bytes)) | 1094 | (u.packet.header, p->header, transmit_header_bytes)) |
1095 | return -EFAULT; | 1095 | return -EFAULT; |
1096 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && | 1096 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && |
1097 | u.packet.header_length + u.packet.payload_length > 0) | 1097 | u.packet.header_length + u.packet.payload_length > 0) |
1098 | return -EINVAL; | 1098 | return -EINVAL; |
1099 | if (payload + u.packet.payload_length > buffer_end) | 1099 | if (payload + u.packet.payload_length > buffer_end) |
1100 | return -EINVAL; | 1100 | return -EINVAL; |
1101 | 1101 | ||
1102 | if (fw_iso_context_queue(ctx, &u.packet, | 1102 | if (fw_iso_context_queue(ctx, &u.packet, |
1103 | &client->buffer, payload)) | 1103 | &client->buffer, payload)) |
1104 | break; | 1104 | break; |
1105 | 1105 | ||
1106 | p = next; | 1106 | p = next; |
1107 | payload += u.packet.payload_length; | 1107 | payload += u.packet.payload_length; |
1108 | count++; | 1108 | count++; |
1109 | } | 1109 | } |
1110 | fw_iso_context_queue_flush(ctx); | ||
1110 | 1111 | ||
1111 | a->size -= uptr_to_u64(p) - a->packets; | 1112 | a->size -= uptr_to_u64(p) - a->packets; |
1112 | a->packets = uptr_to_u64(p); | 1113 | a->packets = uptr_to_u64(p); |
1113 | a->data = client->vm_start + payload; | 1114 | a->data = client->vm_start + payload; |
1114 | 1115 | ||
1115 | return count; | 1116 | return count; |
1116 | } | 1117 | } |
1117 | 1118 | ||
1118 | static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) | 1119 | static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) |
1119 | { | 1120 | { |
1120 | struct fw_cdev_start_iso *a = &arg->start_iso; | 1121 | struct fw_cdev_start_iso *a = &arg->start_iso; |
1121 | 1122 | ||
1122 | BUILD_BUG_ON( | 1123 | BUILD_BUG_ON( |
1123 | FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || | 1124 | FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || |
1124 | FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || | 1125 | FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || |
1125 | FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || | 1126 | FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || |
1126 | FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || | 1127 | FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || |
1127 | FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); | 1128 | FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); |
1128 | 1129 | ||
1129 | if (client->iso_context == NULL || a->handle != 0) | 1130 | if (client->iso_context == NULL || a->handle != 0) |
1130 | return -EINVAL; | 1131 | return -EINVAL; |
1131 | 1132 | ||
1132 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE && | 1133 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE && |
1133 | (a->tags == 0 || a->tags > 15 || a->sync > 15)) | 1134 | (a->tags == 0 || a->tags > 15 || a->sync > 15)) |
1134 | return -EINVAL; | 1135 | return -EINVAL; |
1135 | 1136 | ||
1136 | return fw_iso_context_start(client->iso_context, | 1137 | return fw_iso_context_start(client->iso_context, |
1137 | a->cycle, a->sync, a->tags); | 1138 | a->cycle, a->sync, a->tags); |
1138 | } | 1139 | } |
1139 | 1140 | ||
1140 | static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg) | 1141 | static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg) |
1141 | { | 1142 | { |
1142 | struct fw_cdev_stop_iso *a = &arg->stop_iso; | 1143 | struct fw_cdev_stop_iso *a = &arg->stop_iso; |
1143 | 1144 | ||
1144 | if (client->iso_context == NULL || a->handle != 0) | 1145 | if (client->iso_context == NULL || a->handle != 0) |
1145 | return -EINVAL; | 1146 | return -EINVAL; |
1146 | 1147 | ||
1147 | return fw_iso_context_stop(client->iso_context); | 1148 | return fw_iso_context_stop(client->iso_context); |
1148 | } | 1149 | } |
1149 | 1150 | ||
1150 | static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) | 1151 | static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) |
1151 | { | 1152 | { |
1152 | struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; | 1153 | struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; |
1153 | struct fw_card *card = client->device->card; | 1154 | struct fw_card *card = client->device->card; |
1154 | struct timespec ts = {0, 0}; | 1155 | struct timespec ts = {0, 0}; |
1155 | u32 cycle_time; | 1156 | u32 cycle_time; |
1156 | int ret = 0; | 1157 | int ret = 0; |
1157 | 1158 | ||
1158 | local_irq_disable(); | 1159 | local_irq_disable(); |
1159 | 1160 | ||
1160 | cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); | 1161 | cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME); |
1161 | 1162 | ||
1162 | switch (a->clk_id) { | 1163 | switch (a->clk_id) { |
1163 | case CLOCK_REALTIME: getnstimeofday(&ts); break; | 1164 | case CLOCK_REALTIME: getnstimeofday(&ts); break; |
1164 | case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break; | 1165 | case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break; |
1165 | case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; | 1166 | case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; |
1166 | default: | 1167 | default: |
1167 | ret = -EINVAL; | 1168 | ret = -EINVAL; |
1168 | } | 1169 | } |
1169 | 1170 | ||
1170 | local_irq_enable(); | 1171 | local_irq_enable(); |
1171 | 1172 | ||
1172 | a->tv_sec = ts.tv_sec; | 1173 | a->tv_sec = ts.tv_sec; |
1173 | a->tv_nsec = ts.tv_nsec; | 1174 | a->tv_nsec = ts.tv_nsec; |
1174 | a->cycle_timer = cycle_time; | 1175 | a->cycle_timer = cycle_time; |
1175 | 1176 | ||
1176 | return ret; | 1177 | return ret; |
1177 | } | 1178 | } |
1178 | 1179 | ||
1179 | static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) | 1180 | static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) |
1180 | { | 1181 | { |
1181 | struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer; | 1182 | struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer; |
1182 | struct fw_cdev_get_cycle_timer2 ct2; | 1183 | struct fw_cdev_get_cycle_timer2 ct2; |
1183 | 1184 | ||
1184 | ct2.clk_id = CLOCK_REALTIME; | 1185 | ct2.clk_id = CLOCK_REALTIME; |
1185 | ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2); | 1186 | ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2); |
1186 | 1187 | ||
1187 | a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC; | 1188 | a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC; |
1188 | a->cycle_timer = ct2.cycle_timer; | 1189 | a->cycle_timer = ct2.cycle_timer; |
1189 | 1190 | ||
1190 | return 0; | 1191 | return 0; |
1191 | } | 1192 | } |
1192 | 1193 | ||
1193 | static void iso_resource_work(struct work_struct *work) | 1194 | static void iso_resource_work(struct work_struct *work) |
1194 | { | 1195 | { |
1195 | struct iso_resource_event *e; | 1196 | struct iso_resource_event *e; |
1196 | struct iso_resource *r = | 1197 | struct iso_resource *r = |
1197 | container_of(work, struct iso_resource, work.work); | 1198 | container_of(work, struct iso_resource, work.work); |
1198 | struct client *client = r->client; | 1199 | struct client *client = r->client; |
1199 | int generation, channel, bandwidth, todo; | 1200 | int generation, channel, bandwidth, todo; |
1200 | bool skip, free, success; | 1201 | bool skip, free, success; |
1201 | 1202 | ||
1202 | spin_lock_irq(&client->lock); | 1203 | spin_lock_irq(&client->lock); |
1203 | generation = client->device->generation; | 1204 | generation = client->device->generation; |
1204 | todo = r->todo; | 1205 | todo = r->todo; |
1205 | /* Allow 1000ms grace period for other reallocations. */ | 1206 | /* Allow 1000ms grace period for other reallocations. */ |
1206 | if (todo == ISO_RES_ALLOC && | 1207 | if (todo == ISO_RES_ALLOC && |
1207 | time_before64(get_jiffies_64(), | 1208 | time_before64(get_jiffies_64(), |
1208 | client->device->card->reset_jiffies + HZ)) { | 1209 | client->device->card->reset_jiffies + HZ)) { |
1209 | schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); | 1210 | schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); |
1210 | skip = true; | 1211 | skip = true; |
1211 | } else { | 1212 | } else { |
1212 | /* We could be called twice within the same generation. */ | 1213 | /* We could be called twice within the same generation. */ |
1213 | skip = todo == ISO_RES_REALLOC && | 1214 | skip = todo == ISO_RES_REALLOC && |
1214 | r->generation == generation; | 1215 | r->generation == generation; |
1215 | } | 1216 | } |
1216 | free = todo == ISO_RES_DEALLOC || | 1217 | free = todo == ISO_RES_DEALLOC || |
1217 | todo == ISO_RES_ALLOC_ONCE || | 1218 | todo == ISO_RES_ALLOC_ONCE || |
1218 | todo == ISO_RES_DEALLOC_ONCE; | 1219 | todo == ISO_RES_DEALLOC_ONCE; |
1219 | r->generation = generation; | 1220 | r->generation = generation; |
1220 | spin_unlock_irq(&client->lock); | 1221 | spin_unlock_irq(&client->lock); |
1221 | 1222 | ||
1222 | if (skip) | 1223 | if (skip) |
1223 | goto out; | 1224 | goto out; |
1224 | 1225 | ||
1225 | bandwidth = r->bandwidth; | 1226 | bandwidth = r->bandwidth; |
1226 | 1227 | ||
1227 | fw_iso_resource_manage(client->device->card, generation, | 1228 | fw_iso_resource_manage(client->device->card, generation, |
1228 | r->channels, &channel, &bandwidth, | 1229 | r->channels, &channel, &bandwidth, |
1229 | todo == ISO_RES_ALLOC || | 1230 | todo == ISO_RES_ALLOC || |
1230 | todo == ISO_RES_REALLOC || | 1231 | todo == ISO_RES_REALLOC || |
1231 | todo == ISO_RES_ALLOC_ONCE); | 1232 | todo == ISO_RES_ALLOC_ONCE); |
1232 | /* | 1233 | /* |
1233 | * Is this generation outdated already? As long as this resource sticks | 1234 | * Is this generation outdated already? As long as this resource sticks |
1234 | * in the idr, it will be scheduled again for a newer generation or at | 1235 | * in the idr, it will be scheduled again for a newer generation or at |
1235 | * shutdown. | 1236 | * shutdown. |
1236 | */ | 1237 | */ |
1237 | if (channel == -EAGAIN && | 1238 | if (channel == -EAGAIN && |
1238 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) | 1239 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) |
1239 | goto out; | 1240 | goto out; |
1240 | 1241 | ||
1241 | success = channel >= 0 || bandwidth > 0; | 1242 | success = channel >= 0 || bandwidth > 0; |
1242 | 1243 | ||
1243 | spin_lock_irq(&client->lock); | 1244 | spin_lock_irq(&client->lock); |
1244 | /* | 1245 | /* |
1245 | * Transit from allocation to reallocation, except if the client | 1246 | * Transit from allocation to reallocation, except if the client |
1246 | * requested deallocation in the meantime. | 1247 | * requested deallocation in the meantime. |
1247 | */ | 1248 | */ |
1248 | if (r->todo == ISO_RES_ALLOC) | 1249 | if (r->todo == ISO_RES_ALLOC) |
1249 | r->todo = ISO_RES_REALLOC; | 1250 | r->todo = ISO_RES_REALLOC; |
1250 | /* | 1251 | /* |
1251 | * Allocation or reallocation failure? Pull this resource out of the | 1252 | * Allocation or reallocation failure? Pull this resource out of the |
1252 | * idr and prepare for deletion, unless the client is shutting down. | 1253 | * idr and prepare for deletion, unless the client is shutting down. |
1253 | */ | 1254 | */ |
1254 | if (r->todo == ISO_RES_REALLOC && !success && | 1255 | if (r->todo == ISO_RES_REALLOC && !success && |
1255 | !client->in_shutdown && | 1256 | !client->in_shutdown && |
1256 | idr_find(&client->resource_idr, r->resource.handle)) { | 1257 | idr_find(&client->resource_idr, r->resource.handle)) { |
1257 | idr_remove(&client->resource_idr, r->resource.handle); | 1258 | idr_remove(&client->resource_idr, r->resource.handle); |
1258 | client_put(client); | 1259 | client_put(client); |
1259 | free = true; | 1260 | free = true; |
1260 | } | 1261 | } |
1261 | spin_unlock_irq(&client->lock); | 1262 | spin_unlock_irq(&client->lock); |
1262 | 1263 | ||
1263 | if (todo == ISO_RES_ALLOC && channel >= 0) | 1264 | if (todo == ISO_RES_ALLOC && channel >= 0) |
1264 | r->channels = 1ULL << channel; | 1265 | r->channels = 1ULL << channel; |
1265 | 1266 | ||
1266 | if (todo == ISO_RES_REALLOC && success) | 1267 | if (todo == ISO_RES_REALLOC && success) |
1267 | goto out; | 1268 | goto out; |
1268 | 1269 | ||
1269 | if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { | 1270 | if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { |
1270 | e = r->e_alloc; | 1271 | e = r->e_alloc; |
1271 | r->e_alloc = NULL; | 1272 | r->e_alloc = NULL; |
1272 | } else { | 1273 | } else { |
1273 | e = r->e_dealloc; | 1274 | e = r->e_dealloc; |
1274 | r->e_dealloc = NULL; | 1275 | r->e_dealloc = NULL; |
1275 | } | 1276 | } |
1276 | e->iso_resource.handle = r->resource.handle; | 1277 | e->iso_resource.handle = r->resource.handle; |
1277 | e->iso_resource.channel = channel; | 1278 | e->iso_resource.channel = channel; |
1278 | e->iso_resource.bandwidth = bandwidth; | 1279 | e->iso_resource.bandwidth = bandwidth; |
1279 | 1280 | ||
1280 | queue_event(client, &e->event, | 1281 | queue_event(client, &e->event, |
1281 | &e->iso_resource, sizeof(e->iso_resource), NULL, 0); | 1282 | &e->iso_resource, sizeof(e->iso_resource), NULL, 0); |
1282 | 1283 | ||
1283 | if (free) { | 1284 | if (free) { |
1284 | cancel_delayed_work(&r->work); | 1285 | cancel_delayed_work(&r->work); |
1285 | kfree(r->e_alloc); | 1286 | kfree(r->e_alloc); |
1286 | kfree(r->e_dealloc); | 1287 | kfree(r->e_dealloc); |
1287 | kfree(r); | 1288 | kfree(r); |
1288 | } | 1289 | } |
1289 | out: | 1290 | out: |
1290 | client_put(client); | 1291 | client_put(client); |
1291 | } | 1292 | } |
1292 | 1293 | ||
1293 | static void release_iso_resource(struct client *client, | 1294 | static void release_iso_resource(struct client *client, |
1294 | struct client_resource *resource) | 1295 | struct client_resource *resource) |
1295 | { | 1296 | { |
1296 | struct iso_resource *r = | 1297 | struct iso_resource *r = |
1297 | container_of(resource, struct iso_resource, resource); | 1298 | container_of(resource, struct iso_resource, resource); |
1298 | 1299 | ||
1299 | spin_lock_irq(&client->lock); | 1300 | spin_lock_irq(&client->lock); |
1300 | r->todo = ISO_RES_DEALLOC; | 1301 | r->todo = ISO_RES_DEALLOC; |
1301 | schedule_iso_resource(r, 0); | 1302 | schedule_iso_resource(r, 0); |
1302 | spin_unlock_irq(&client->lock); | 1303 | spin_unlock_irq(&client->lock); |
1303 | } | 1304 | } |
1304 | 1305 | ||
1305 | static int init_iso_resource(struct client *client, | 1306 | static int init_iso_resource(struct client *client, |
1306 | struct fw_cdev_allocate_iso_resource *request, int todo) | 1307 | struct fw_cdev_allocate_iso_resource *request, int todo) |
1307 | { | 1308 | { |
1308 | struct iso_resource_event *e1, *e2; | 1309 | struct iso_resource_event *e1, *e2; |
1309 | struct iso_resource *r; | 1310 | struct iso_resource *r; |
1310 | int ret; | 1311 | int ret; |
1311 | 1312 | ||
1312 | if ((request->channels == 0 && request->bandwidth == 0) || | 1313 | if ((request->channels == 0 && request->bandwidth == 0) || |
1313 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || | 1314 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || |
1314 | request->bandwidth < 0) | 1315 | request->bandwidth < 0) |
1315 | return -EINVAL; | 1316 | return -EINVAL; |
1316 | 1317 | ||
1317 | r = kmalloc(sizeof(*r), GFP_KERNEL); | 1318 | r = kmalloc(sizeof(*r), GFP_KERNEL); |
1318 | e1 = kmalloc(sizeof(*e1), GFP_KERNEL); | 1319 | e1 = kmalloc(sizeof(*e1), GFP_KERNEL); |
1319 | e2 = kmalloc(sizeof(*e2), GFP_KERNEL); | 1320 | e2 = kmalloc(sizeof(*e2), GFP_KERNEL); |
1320 | if (r == NULL || e1 == NULL || e2 == NULL) { | 1321 | if (r == NULL || e1 == NULL || e2 == NULL) { |
1321 | ret = -ENOMEM; | 1322 | ret = -ENOMEM; |
1322 | goto fail; | 1323 | goto fail; |
1323 | } | 1324 | } |
1324 | 1325 | ||
1325 | INIT_DELAYED_WORK(&r->work, iso_resource_work); | 1326 | INIT_DELAYED_WORK(&r->work, iso_resource_work); |
1326 | r->client = client; | 1327 | r->client = client; |
1327 | r->todo = todo; | 1328 | r->todo = todo; |
1328 | r->generation = -1; | 1329 | r->generation = -1; |
1329 | r->channels = request->channels; | 1330 | r->channels = request->channels; |
1330 | r->bandwidth = request->bandwidth; | 1331 | r->bandwidth = request->bandwidth; |
1331 | r->e_alloc = e1; | 1332 | r->e_alloc = e1; |
1332 | r->e_dealloc = e2; | 1333 | r->e_dealloc = e2; |
1333 | 1334 | ||
1334 | e1->iso_resource.closure = request->closure; | 1335 | e1->iso_resource.closure = request->closure; |
1335 | e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; | 1336 | e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; |
1336 | e2->iso_resource.closure = request->closure; | 1337 | e2->iso_resource.closure = request->closure; |
1337 | e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; | 1338 | e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; |
1338 | 1339 | ||
1339 | if (todo == ISO_RES_ALLOC) { | 1340 | if (todo == ISO_RES_ALLOC) { |
1340 | r->resource.release = release_iso_resource; | 1341 | r->resource.release = release_iso_resource; |
1341 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | 1342 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
1342 | if (ret < 0) | 1343 | if (ret < 0) |
1343 | goto fail; | 1344 | goto fail; |
1344 | } else { | 1345 | } else { |
1345 | r->resource.release = NULL; | 1346 | r->resource.release = NULL; |
1346 | r->resource.handle = -1; | 1347 | r->resource.handle = -1; |
1347 | schedule_iso_resource(r, 0); | 1348 | schedule_iso_resource(r, 0); |
1348 | } | 1349 | } |
1349 | request->handle = r->resource.handle; | 1350 | request->handle = r->resource.handle; |
1350 | 1351 | ||
1351 | return 0; | 1352 | return 0; |
1352 | fail: | 1353 | fail: |
1353 | kfree(r); | 1354 | kfree(r); |
1354 | kfree(e1); | 1355 | kfree(e1); |
1355 | kfree(e2); | 1356 | kfree(e2); |
1356 | 1357 | ||
1357 | return ret; | 1358 | return ret; |
1358 | } | 1359 | } |
1359 | 1360 | ||
1360 | static int ioctl_allocate_iso_resource(struct client *client, | 1361 | static int ioctl_allocate_iso_resource(struct client *client, |
1361 | union ioctl_arg *arg) | 1362 | union ioctl_arg *arg) |
1362 | { | 1363 | { |
1363 | return init_iso_resource(client, | 1364 | return init_iso_resource(client, |
1364 | &arg->allocate_iso_resource, ISO_RES_ALLOC); | 1365 | &arg->allocate_iso_resource, ISO_RES_ALLOC); |
1365 | } | 1366 | } |
1366 | 1367 | ||
1367 | static int ioctl_deallocate_iso_resource(struct client *client, | 1368 | static int ioctl_deallocate_iso_resource(struct client *client, |
1368 | union ioctl_arg *arg) | 1369 | union ioctl_arg *arg) |
1369 | { | 1370 | { |
1370 | return release_client_resource(client, | 1371 | return release_client_resource(client, |
1371 | arg->deallocate.handle, release_iso_resource, NULL); | 1372 | arg->deallocate.handle, release_iso_resource, NULL); |
1372 | } | 1373 | } |
1373 | 1374 | ||
1374 | static int ioctl_allocate_iso_resource_once(struct client *client, | 1375 | static int ioctl_allocate_iso_resource_once(struct client *client, |
1375 | union ioctl_arg *arg) | 1376 | union ioctl_arg *arg) |
1376 | { | 1377 | { |
1377 | return init_iso_resource(client, | 1378 | return init_iso_resource(client, |
1378 | &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE); | 1379 | &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE); |
1379 | } | 1380 | } |
1380 | 1381 | ||
1381 | static int ioctl_deallocate_iso_resource_once(struct client *client, | 1382 | static int ioctl_deallocate_iso_resource_once(struct client *client, |
1382 | union ioctl_arg *arg) | 1383 | union ioctl_arg *arg) |
1383 | { | 1384 | { |
1384 | return init_iso_resource(client, | 1385 | return init_iso_resource(client, |
1385 | &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE); | 1386 | &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE); |
1386 | } | 1387 | } |
1387 | 1388 | ||
1388 | /* | 1389 | /* |
1389 | * Returns a speed code: Maximum speed to or from this device, | 1390 | * Returns a speed code: Maximum speed to or from this device, |
1390 | * limited by the device's link speed, the local node's link speed, | 1391 | * limited by the device's link speed, the local node's link speed, |
1391 | * and all PHY port speeds between the two links. | 1392 | * and all PHY port speeds between the two links. |
1392 | */ | 1393 | */ |
1393 | static int ioctl_get_speed(struct client *client, union ioctl_arg *arg) | 1394 | static int ioctl_get_speed(struct client *client, union ioctl_arg *arg) |
1394 | { | 1395 | { |
1395 | return client->device->max_speed; | 1396 | return client->device->max_speed; |
1396 | } | 1397 | } |
1397 | 1398 | ||
1398 | static int ioctl_send_broadcast_request(struct client *client, | 1399 | static int ioctl_send_broadcast_request(struct client *client, |
1399 | union ioctl_arg *arg) | 1400 | union ioctl_arg *arg) |
1400 | { | 1401 | { |
1401 | struct fw_cdev_send_request *a = &arg->send_request; | 1402 | struct fw_cdev_send_request *a = &arg->send_request; |
1402 | 1403 | ||
1403 | switch (a->tcode) { | 1404 | switch (a->tcode) { |
1404 | case TCODE_WRITE_QUADLET_REQUEST: | 1405 | case TCODE_WRITE_QUADLET_REQUEST: |
1405 | case TCODE_WRITE_BLOCK_REQUEST: | 1406 | case TCODE_WRITE_BLOCK_REQUEST: |
1406 | break; | 1407 | break; |
1407 | default: | 1408 | default: |
1408 | return -EINVAL; | 1409 | return -EINVAL; |
1409 | } | 1410 | } |
1410 | 1411 | ||
1411 | /* Security policy: Only allow accesses to Units Space. */ | 1412 | /* Security policy: Only allow accesses to Units Space. */ |
1412 | if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) | 1413 | if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) |
1413 | return -EACCES; | 1414 | return -EACCES; |
1414 | 1415 | ||
1415 | return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100); | 1416 | return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100); |
1416 | } | 1417 | } |
1417 | 1418 | ||
1418 | static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) | 1419 | static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) |
1419 | { | 1420 | { |
1420 | struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet; | 1421 | struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet; |
1421 | struct fw_cdev_send_request request; | 1422 | struct fw_cdev_send_request request; |
1422 | int dest; | 1423 | int dest; |
1423 | 1424 | ||
1424 | if (a->speed > client->device->card->link_speed || | 1425 | if (a->speed > client->device->card->link_speed || |
1425 | a->length > 1024 << a->speed) | 1426 | a->length > 1024 << a->speed) |
1426 | return -EIO; | 1427 | return -EIO; |
1427 | 1428 | ||
1428 | if (a->tag > 3 || a->channel > 63 || a->sy > 15) | 1429 | if (a->tag > 3 || a->channel > 63 || a->sy > 15) |
1429 | return -EINVAL; | 1430 | return -EINVAL; |
1430 | 1431 | ||
1431 | dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy); | 1432 | dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy); |
1432 | request.tcode = TCODE_STREAM_DATA; | 1433 | request.tcode = TCODE_STREAM_DATA; |
1433 | request.length = a->length; | 1434 | request.length = a->length; |
1434 | request.closure = a->closure; | 1435 | request.closure = a->closure; |
1435 | request.data = a->data; | 1436 | request.data = a->data; |
1436 | request.generation = a->generation; | 1437 | request.generation = a->generation; |
1437 | 1438 | ||
1438 | return init_request(client, &request, dest, a->speed); | 1439 | return init_request(client, &request, dest, a->speed); |
1439 | } | 1440 | } |
1440 | 1441 | ||
1441 | static void outbound_phy_packet_callback(struct fw_packet *packet, | 1442 | static void outbound_phy_packet_callback(struct fw_packet *packet, |
1442 | struct fw_card *card, int status) | 1443 | struct fw_card *card, int status) |
1443 | { | 1444 | { |
1444 | struct outbound_phy_packet_event *e = | 1445 | struct outbound_phy_packet_event *e = |
1445 | container_of(packet, struct outbound_phy_packet_event, p); | 1446 | container_of(packet, struct outbound_phy_packet_event, p); |
1446 | 1447 | ||
1447 | switch (status) { | 1448 | switch (status) { |
1448 | /* expected: */ | 1449 | /* expected: */ |
1449 | case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break; | 1450 | case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break; |
1450 | /* should never happen with PHY packets: */ | 1451 | /* should never happen with PHY packets: */ |
1451 | case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break; | 1452 | case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break; |
1452 | case ACK_BUSY_X: | 1453 | case ACK_BUSY_X: |
1453 | case ACK_BUSY_A: | 1454 | case ACK_BUSY_A: |
1454 | case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break; | 1455 | case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break; |
1455 | case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break; | 1456 | case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break; |
1456 | case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break; | 1457 | case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break; |
1457 | /* stale generation; cancelled; on certain controllers: no ack */ | 1458 | /* stale generation; cancelled; on certain controllers: no ack */ |
1458 | default: e->phy_packet.rcode = status; break; | 1459 | default: e->phy_packet.rcode = status; break; |
1459 | } | 1460 | } |
1460 | e->phy_packet.data[0] = packet->timestamp; | 1461 | e->phy_packet.data[0] = packet->timestamp; |
1461 | 1462 | ||
1462 | queue_event(e->client, &e->event, &e->phy_packet, | 1463 | queue_event(e->client, &e->event, &e->phy_packet, |
1463 | sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); | 1464 | sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); |
1464 | client_put(e->client); | 1465 | client_put(e->client); |
1465 | } | 1466 | } |
1466 | 1467 | ||
1467 | static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) | 1468 | static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) |
1468 | { | 1469 | { |
1469 | struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; | 1470 | struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; |
1470 | struct fw_card *card = client->device->card; | 1471 | struct fw_card *card = client->device->card; |
1471 | struct outbound_phy_packet_event *e; | 1472 | struct outbound_phy_packet_event *e; |
1472 | 1473 | ||
1473 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | 1474 | /* Access policy: Allow this ioctl only on local nodes' device files. */ |
1474 | if (!client->device->is_local) | 1475 | if (!client->device->is_local) |
1475 | return -ENOSYS; | 1476 | return -ENOSYS; |
1476 | 1477 | ||
1477 | e = kzalloc(sizeof(*e) + 4, GFP_KERNEL); | 1478 | e = kzalloc(sizeof(*e) + 4, GFP_KERNEL); |
1478 | if (e == NULL) | 1479 | if (e == NULL) |
1479 | return -ENOMEM; | 1480 | return -ENOMEM; |
1480 | 1481 | ||
1481 | client_get(client); | 1482 | client_get(client); |
1482 | e->client = client; | 1483 | e->client = client; |
1483 | e->p.speed = SCODE_100; | 1484 | e->p.speed = SCODE_100; |
1484 | e->p.generation = a->generation; | 1485 | e->p.generation = a->generation; |
1485 | e->p.header[0] = TCODE_LINK_INTERNAL << 4; | 1486 | e->p.header[0] = TCODE_LINK_INTERNAL << 4; |
1486 | e->p.header[1] = a->data[0]; | 1487 | e->p.header[1] = a->data[0]; |
1487 | e->p.header[2] = a->data[1]; | 1488 | e->p.header[2] = a->data[1]; |
1488 | e->p.header_length = 12; | 1489 | e->p.header_length = 12; |
1489 | e->p.callback = outbound_phy_packet_callback; | 1490 | e->p.callback = outbound_phy_packet_callback; |
1490 | e->phy_packet.closure = a->closure; | 1491 | e->phy_packet.closure = a->closure; |
1491 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; | 1492 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; |
1492 | if (is_ping_packet(a->data)) | 1493 | if (is_ping_packet(a->data)) |
1493 | e->phy_packet.length = 4; | 1494 | e->phy_packet.length = 4; |
1494 | 1495 | ||
1495 | card->driver->send_request(card, &e->p); | 1496 | card->driver->send_request(card, &e->p); |
1496 | 1497 | ||
1497 | return 0; | 1498 | return 0; |
1498 | } | 1499 | } |
1499 | 1500 | ||
1500 | static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) | 1501 | static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) |
1501 | { | 1502 | { |
1502 | struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; | 1503 | struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; |
1503 | struct fw_card *card = client->device->card; | 1504 | struct fw_card *card = client->device->card; |
1504 | 1505 | ||
1505 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | 1506 | /* Access policy: Allow this ioctl only on local nodes' device files. */ |
1506 | if (!client->device->is_local) | 1507 | if (!client->device->is_local) |
1507 | return -ENOSYS; | 1508 | return -ENOSYS; |
1508 | 1509 | ||
1509 | spin_lock_irq(&card->lock); | 1510 | spin_lock_irq(&card->lock); |
1510 | 1511 | ||
1511 | list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); | 1512 | list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list); |
1512 | client->phy_receiver_closure = a->closure; | 1513 | client->phy_receiver_closure = a->closure; |
1513 | 1514 | ||
1514 | spin_unlock_irq(&card->lock); | 1515 | spin_unlock_irq(&card->lock); |
1515 | 1516 | ||
1516 | return 0; | 1517 | return 0; |
1517 | } | 1518 | } |
1518 | 1519 | ||
1519 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) | 1520 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) |
1520 | { | 1521 | { |
1521 | struct client *client; | 1522 | struct client *client; |
1522 | struct inbound_phy_packet_event *e; | 1523 | struct inbound_phy_packet_event *e; |
1523 | unsigned long flags; | 1524 | unsigned long flags; |
1524 | 1525 | ||
1525 | spin_lock_irqsave(&card->lock, flags); | 1526 | spin_lock_irqsave(&card->lock, flags); |
1526 | 1527 | ||
1527 | list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { | 1528 | list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { |
1528 | e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); | 1529 | e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); |
1529 | if (e == NULL) { | 1530 | if (e == NULL) { |
1530 | fw_notify("Out of memory when allocating event\n"); | 1531 | fw_notify("Out of memory when allocating event\n"); |
1531 | break; | 1532 | break; |
1532 | } | 1533 | } |
1533 | e->phy_packet.closure = client->phy_receiver_closure; | 1534 | e->phy_packet.closure = client->phy_receiver_closure; |
1534 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; | 1535 | e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; |
1535 | e->phy_packet.rcode = RCODE_COMPLETE; | 1536 | e->phy_packet.rcode = RCODE_COMPLETE; |
1536 | e->phy_packet.length = 8; | 1537 | e->phy_packet.length = 8; |
1537 | e->phy_packet.data[0] = p->header[1]; | 1538 | e->phy_packet.data[0] = p->header[1]; |
1538 | e->phy_packet.data[1] = p->header[2]; | 1539 | e->phy_packet.data[1] = p->header[2]; |
1539 | queue_event(client, &e->event, | 1540 | queue_event(client, &e->event, |
1540 | &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0); | 1541 | &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0); |
1541 | } | 1542 | } |
1542 | 1543 | ||
1543 | spin_unlock_irqrestore(&card->lock, flags); | 1544 | spin_unlock_irqrestore(&card->lock, flags); |
1544 | } | 1545 | } |
1545 | 1546 | ||
1546 | static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { | 1547 | static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { |
1547 | [0x00] = ioctl_get_info, | 1548 | [0x00] = ioctl_get_info, |
1548 | [0x01] = ioctl_send_request, | 1549 | [0x01] = ioctl_send_request, |
1549 | [0x02] = ioctl_allocate, | 1550 | [0x02] = ioctl_allocate, |
1550 | [0x03] = ioctl_deallocate, | 1551 | [0x03] = ioctl_deallocate, |
1551 | [0x04] = ioctl_send_response, | 1552 | [0x04] = ioctl_send_response, |
1552 | [0x05] = ioctl_initiate_bus_reset, | 1553 | [0x05] = ioctl_initiate_bus_reset, |
1553 | [0x06] = ioctl_add_descriptor, | 1554 | [0x06] = ioctl_add_descriptor, |
1554 | [0x07] = ioctl_remove_descriptor, | 1555 | [0x07] = ioctl_remove_descriptor, |
1555 | [0x08] = ioctl_create_iso_context, | 1556 | [0x08] = ioctl_create_iso_context, |
1556 | [0x09] = ioctl_queue_iso, | 1557 | [0x09] = ioctl_queue_iso, |
1557 | [0x0a] = ioctl_start_iso, | 1558 | [0x0a] = ioctl_start_iso, |
1558 | [0x0b] = ioctl_stop_iso, | 1559 | [0x0b] = ioctl_stop_iso, |
1559 | [0x0c] = ioctl_get_cycle_timer, | 1560 | [0x0c] = ioctl_get_cycle_timer, |
1560 | [0x0d] = ioctl_allocate_iso_resource, | 1561 | [0x0d] = ioctl_allocate_iso_resource, |
1561 | [0x0e] = ioctl_deallocate_iso_resource, | 1562 | [0x0e] = ioctl_deallocate_iso_resource, |
1562 | [0x0f] = ioctl_allocate_iso_resource_once, | 1563 | [0x0f] = ioctl_allocate_iso_resource_once, |
1563 | [0x10] = ioctl_deallocate_iso_resource_once, | 1564 | [0x10] = ioctl_deallocate_iso_resource_once, |
1564 | [0x11] = ioctl_get_speed, | 1565 | [0x11] = ioctl_get_speed, |
1565 | [0x12] = ioctl_send_broadcast_request, | 1566 | [0x12] = ioctl_send_broadcast_request, |
1566 | [0x13] = ioctl_send_stream_packet, | 1567 | [0x13] = ioctl_send_stream_packet, |
1567 | [0x14] = ioctl_get_cycle_timer2, | 1568 | [0x14] = ioctl_get_cycle_timer2, |
1568 | [0x15] = ioctl_send_phy_packet, | 1569 | [0x15] = ioctl_send_phy_packet, |
1569 | [0x16] = ioctl_receive_phy_packets, | 1570 | [0x16] = ioctl_receive_phy_packets, |
1570 | [0x17] = ioctl_set_iso_channels, | 1571 | [0x17] = ioctl_set_iso_channels, |
1571 | }; | 1572 | }; |
1572 | 1573 | ||
1573 | static int dispatch_ioctl(struct client *client, | 1574 | static int dispatch_ioctl(struct client *client, |
1574 | unsigned int cmd, void __user *arg) | 1575 | unsigned int cmd, void __user *arg) |
1575 | { | 1576 | { |
1576 | union ioctl_arg buffer; | 1577 | union ioctl_arg buffer; |
1577 | int ret; | 1578 | int ret; |
1578 | 1579 | ||
1579 | if (fw_device_is_shutdown(client->device)) | 1580 | if (fw_device_is_shutdown(client->device)) |
1580 | return -ENODEV; | 1581 | return -ENODEV; |
1581 | 1582 | ||
1582 | if (_IOC_TYPE(cmd) != '#' || | 1583 | if (_IOC_TYPE(cmd) != '#' || |
1583 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || | 1584 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || |
1584 | _IOC_SIZE(cmd) > sizeof(buffer)) | 1585 | _IOC_SIZE(cmd) > sizeof(buffer)) |
1585 | return -EINVAL; | 1586 | return -EINVAL; |
1586 | 1587 | ||
1587 | if (_IOC_DIR(cmd) == _IOC_READ) | 1588 | if (_IOC_DIR(cmd) == _IOC_READ) |
1588 | memset(&buffer, 0, _IOC_SIZE(cmd)); | 1589 | memset(&buffer, 0, _IOC_SIZE(cmd)); |
1589 | 1590 | ||
1590 | if (_IOC_DIR(cmd) & _IOC_WRITE) | 1591 | if (_IOC_DIR(cmd) & _IOC_WRITE) |
1591 | if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) | 1592 | if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) |
1592 | return -EFAULT; | 1593 | return -EFAULT; |
1593 | 1594 | ||
1594 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); | 1595 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); |
1595 | if (ret < 0) | 1596 | if (ret < 0) |
1596 | return ret; | 1597 | return ret; |
1597 | 1598 | ||
1598 | if (_IOC_DIR(cmd) & _IOC_READ) | 1599 | if (_IOC_DIR(cmd) & _IOC_READ) |
1599 | if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) | 1600 | if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) |
1600 | return -EFAULT; | 1601 | return -EFAULT; |
1601 | 1602 | ||
1602 | return ret; | 1603 | return ret; |
1603 | } | 1604 | } |
1604 | 1605 | ||
1605 | static long fw_device_op_ioctl(struct file *file, | 1606 | static long fw_device_op_ioctl(struct file *file, |
1606 | unsigned int cmd, unsigned long arg) | 1607 | unsigned int cmd, unsigned long arg) |
1607 | { | 1608 | { |
1608 | return dispatch_ioctl(file->private_data, cmd, (void __user *)arg); | 1609 | return dispatch_ioctl(file->private_data, cmd, (void __user *)arg); |
1609 | } | 1610 | } |
1610 | 1611 | ||
1611 | #ifdef CONFIG_COMPAT | 1612 | #ifdef CONFIG_COMPAT |
1612 | static long fw_device_op_compat_ioctl(struct file *file, | 1613 | static long fw_device_op_compat_ioctl(struct file *file, |
1613 | unsigned int cmd, unsigned long arg) | 1614 | unsigned int cmd, unsigned long arg) |
1614 | { | 1615 | { |
1615 | return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg)); | 1616 | return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg)); |
1616 | } | 1617 | } |
1617 | #endif | 1618 | #endif |
1618 | 1619 | ||
1619 | static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) | 1620 | static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) |
1620 | { | 1621 | { |
1621 | struct client *client = file->private_data; | 1622 | struct client *client = file->private_data; |
1622 | enum dma_data_direction direction; | 1623 | enum dma_data_direction direction; |
1623 | unsigned long size; | 1624 | unsigned long size; |
1624 | int page_count, ret; | 1625 | int page_count, ret; |
1625 | 1626 | ||
1626 | if (fw_device_is_shutdown(client->device)) | 1627 | if (fw_device_is_shutdown(client->device)) |
1627 | return -ENODEV; | 1628 | return -ENODEV; |
1628 | 1629 | ||
1629 | /* FIXME: We could support multiple buffers, but we don't. */ | 1630 | /* FIXME: We could support multiple buffers, but we don't. */ |
1630 | if (client->buffer.pages != NULL) | 1631 | if (client->buffer.pages != NULL) |
1631 | return -EBUSY; | 1632 | return -EBUSY; |
1632 | 1633 | ||
1633 | if (!(vma->vm_flags & VM_SHARED)) | 1634 | if (!(vma->vm_flags & VM_SHARED)) |
1634 | return -EINVAL; | 1635 | return -EINVAL; |
1635 | 1636 | ||
1636 | if (vma->vm_start & ~PAGE_MASK) | 1637 | if (vma->vm_start & ~PAGE_MASK) |
1637 | return -EINVAL; | 1638 | return -EINVAL; |
1638 | 1639 | ||
1639 | client->vm_start = vma->vm_start; | 1640 | client->vm_start = vma->vm_start; |
1640 | size = vma->vm_end - vma->vm_start; | 1641 | size = vma->vm_end - vma->vm_start; |
1641 | page_count = size >> PAGE_SHIFT; | 1642 | page_count = size >> PAGE_SHIFT; |
1642 | if (size & ~PAGE_MASK) | 1643 | if (size & ~PAGE_MASK) |
1643 | return -EINVAL; | 1644 | return -EINVAL; |
1644 | 1645 | ||
1645 | if (vma->vm_flags & VM_WRITE) | 1646 | if (vma->vm_flags & VM_WRITE) |
1646 | direction = DMA_TO_DEVICE; | 1647 | direction = DMA_TO_DEVICE; |
1647 | else | 1648 | else |
1648 | direction = DMA_FROM_DEVICE; | 1649 | direction = DMA_FROM_DEVICE; |
1649 | 1650 | ||
1650 | ret = fw_iso_buffer_init(&client->buffer, client->device->card, | 1651 | ret = fw_iso_buffer_init(&client->buffer, client->device->card, |
1651 | page_count, direction); | 1652 | page_count, direction); |
1652 | if (ret < 0) | 1653 | if (ret < 0) |
1653 | return ret; | 1654 | return ret; |
1654 | 1655 | ||
1655 | ret = fw_iso_buffer_map(&client->buffer, vma); | 1656 | ret = fw_iso_buffer_map(&client->buffer, vma); |
1656 | if (ret < 0) | 1657 | if (ret < 0) |
1657 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | 1658 | fw_iso_buffer_destroy(&client->buffer, client->device->card); |
1658 | 1659 | ||
1659 | return ret; | 1660 | return ret; |
1660 | } | 1661 | } |
1661 | 1662 | ||
1662 | static int is_outbound_transaction_resource(int id, void *p, void *data) | 1663 | static int is_outbound_transaction_resource(int id, void *p, void *data) |
1663 | { | 1664 | { |
1664 | struct client_resource *resource = p; | 1665 | struct client_resource *resource = p; |
1665 | 1666 | ||
1666 | return resource->release == release_transaction; | 1667 | return resource->release == release_transaction; |
1667 | } | 1668 | } |
1668 | 1669 | ||
1669 | static int has_outbound_transactions(struct client *client) | 1670 | static int has_outbound_transactions(struct client *client) |
1670 | { | 1671 | { |
1671 | int ret; | 1672 | int ret; |
1672 | 1673 | ||
1673 | spin_lock_irq(&client->lock); | 1674 | spin_lock_irq(&client->lock); |
1674 | ret = idr_for_each(&client->resource_idr, | 1675 | ret = idr_for_each(&client->resource_idr, |
1675 | is_outbound_transaction_resource, NULL); | 1676 | is_outbound_transaction_resource, NULL); |
1676 | spin_unlock_irq(&client->lock); | 1677 | spin_unlock_irq(&client->lock); |
1677 | 1678 | ||
1678 | return ret; | 1679 | return ret; |
1679 | } | 1680 | } |
1680 | 1681 | ||
1681 | static int shutdown_resource(int id, void *p, void *data) | 1682 | static int shutdown_resource(int id, void *p, void *data) |
1682 | { | 1683 | { |
1683 | struct client_resource *resource = p; | 1684 | struct client_resource *resource = p; |
1684 | struct client *client = data; | 1685 | struct client *client = data; |
1685 | 1686 | ||
1686 | resource->release(client, resource); | 1687 | resource->release(client, resource); |
1687 | client_put(client); | 1688 | client_put(client); |
1688 | 1689 | ||
1689 | return 0; | 1690 | return 0; |
1690 | } | 1691 | } |
1691 | 1692 | ||
1692 | static int fw_device_op_release(struct inode *inode, struct file *file) | 1693 | static int fw_device_op_release(struct inode *inode, struct file *file) |
1693 | { | 1694 | { |
1694 | struct client *client = file->private_data; | 1695 | struct client *client = file->private_data; |
1695 | struct event *event, *next_event; | 1696 | struct event *event, *next_event; |
1696 | 1697 | ||
1697 | spin_lock_irq(&client->device->card->lock); | 1698 | spin_lock_irq(&client->device->card->lock); |
1698 | list_del(&client->phy_receiver_link); | 1699 | list_del(&client->phy_receiver_link); |
1699 | spin_unlock_irq(&client->device->card->lock); | 1700 | spin_unlock_irq(&client->device->card->lock); |
1700 | 1701 | ||
1701 | mutex_lock(&client->device->client_list_mutex); | 1702 | mutex_lock(&client->device->client_list_mutex); |
1702 | list_del(&client->link); | 1703 | list_del(&client->link); |
1703 | mutex_unlock(&client->device->client_list_mutex); | 1704 | mutex_unlock(&client->device->client_list_mutex); |
1704 | 1705 | ||
1705 | if (client->iso_context) | 1706 | if (client->iso_context) |
1706 | fw_iso_context_destroy(client->iso_context); | 1707 | fw_iso_context_destroy(client->iso_context); |
1707 | 1708 | ||
1708 | if (client->buffer.pages) | 1709 | if (client->buffer.pages) |
1709 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | 1710 | fw_iso_buffer_destroy(&client->buffer, client->device->card); |
1710 | 1711 | ||
1711 | /* Freeze client->resource_idr and client->event_list */ | 1712 | /* Freeze client->resource_idr and client->event_list */ |
1712 | spin_lock_irq(&client->lock); | 1713 | spin_lock_irq(&client->lock); |
1713 | client->in_shutdown = true; | 1714 | client->in_shutdown = true; |
1714 | spin_unlock_irq(&client->lock); | 1715 | spin_unlock_irq(&client->lock); |
1715 | 1716 | ||
1716 | wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); | 1717 | wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); |
1717 | 1718 | ||
1718 | idr_for_each(&client->resource_idr, shutdown_resource, client); | 1719 | idr_for_each(&client->resource_idr, shutdown_resource, client); |
1719 | idr_remove_all(&client->resource_idr); | 1720 | idr_remove_all(&client->resource_idr); |
1720 | idr_destroy(&client->resource_idr); | 1721 | idr_destroy(&client->resource_idr); |
1721 | 1722 | ||
1722 | list_for_each_entry_safe(event, next_event, &client->event_list, link) | 1723 | list_for_each_entry_safe(event, next_event, &client->event_list, link) |
1723 | kfree(event); | 1724 | kfree(event); |
1724 | 1725 | ||
1725 | client_put(client); | 1726 | client_put(client); |
1726 | 1727 | ||
1727 | return 0; | 1728 | return 0; |
1728 | } | 1729 | } |
1729 | 1730 | ||
1730 | static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) | 1731 | static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) |
1731 | { | 1732 | { |
1732 | struct client *client = file->private_data; | 1733 | struct client *client = file->private_data; |
1733 | unsigned int mask = 0; | 1734 | unsigned int mask = 0; |
1734 | 1735 | ||
1735 | poll_wait(file, &client->wait, pt); | 1736 | poll_wait(file, &client->wait, pt); |
1736 | 1737 | ||
1737 | if (fw_device_is_shutdown(client->device)) | 1738 | if (fw_device_is_shutdown(client->device)) |
1738 | mask |= POLLHUP | POLLERR; | 1739 | mask |= POLLHUP | POLLERR; |
1739 | if (!list_empty(&client->event_list)) | 1740 | if (!list_empty(&client->event_list)) |
1740 | mask |= POLLIN | POLLRDNORM; | 1741 | mask |= POLLIN | POLLRDNORM; |
1741 | 1742 | ||
1742 | return mask; | 1743 | return mask; |
1743 | } | 1744 | } |
1744 | 1745 | ||
1745 | const struct file_operations fw_device_ops = { | 1746 | const struct file_operations fw_device_ops = { |
1746 | .owner = THIS_MODULE, | 1747 | .owner = THIS_MODULE, |
1747 | .llseek = no_llseek, | 1748 | .llseek = no_llseek, |
1748 | .open = fw_device_op_open, | 1749 | .open = fw_device_op_open, |
1749 | .read = fw_device_op_read, | 1750 | .read = fw_device_op_read, |
1750 | .unlocked_ioctl = fw_device_op_ioctl, | 1751 | .unlocked_ioctl = fw_device_op_ioctl, |
1751 | .mmap = fw_device_op_mmap, | 1752 | .mmap = fw_device_op_mmap, |
1752 | .release = fw_device_op_release, | 1753 | .release = fw_device_op_release, |
1753 | .poll = fw_device_op_poll, | 1754 | .poll = fw_device_op_poll, |
1754 | #ifdef CONFIG_COMPAT | 1755 | #ifdef CONFIG_COMPAT |
1755 | .compat_ioctl = fw_device_op_compat_ioctl, | 1756 | .compat_ioctl = fw_device_op_compat_ioctl, |
1756 | #endif | 1757 | #endif |
1757 | }; | 1758 | }; |
1758 | 1759 |
drivers/firewire/core-iso.c
1 | /* | 1 | /* |
2 | * Isochronous I/O functionality: | 2 | * Isochronous I/O functionality: |
3 | * - Isochronous DMA context management | 3 | * - Isochronous DMA context management |
4 | * - Isochronous bus resource management (channels, bandwidth), client side | 4 | * - Isochronous bus resource management (channels, bandwidth), client side |
5 | * | 5 | * |
6 | * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> | 6 | * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or | 10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. | 11 | * (at your option) any later version. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, | 13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software Foundation, | 19 | * along with this program; if not, write to the Free Software Foundation, |
20 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 20 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/dma-mapping.h> | 23 | #include <linux/dma-mapping.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/firewire.h> | 25 | #include <linux/firewire.h> |
26 | #include <linux/firewire-constants.h> | 26 | #include <linux/firewire-constants.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/spinlock.h> | 30 | #include <linux/spinlock.h> |
31 | #include <linux/vmalloc.h> | 31 | #include <linux/vmalloc.h> |
32 | 32 | ||
33 | #include <asm/byteorder.h> | 33 | #include <asm/byteorder.h> |
34 | 34 | ||
35 | #include "core.h" | 35 | #include "core.h" |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Isochronous DMA context management | 38 | * Isochronous DMA context management |
39 | */ | 39 | */ |
40 | 40 | ||
41 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | 41 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, |
42 | int page_count, enum dma_data_direction direction) | 42 | int page_count, enum dma_data_direction direction) |
43 | { | 43 | { |
44 | int i, j; | 44 | int i, j; |
45 | dma_addr_t address; | 45 | dma_addr_t address; |
46 | 46 | ||
47 | buffer->page_count = page_count; | 47 | buffer->page_count = page_count; |
48 | buffer->direction = direction; | 48 | buffer->direction = direction; |
49 | 49 | ||
50 | buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), | 50 | buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), |
51 | GFP_KERNEL); | 51 | GFP_KERNEL); |
52 | if (buffer->pages == NULL) | 52 | if (buffer->pages == NULL) |
53 | goto out; | 53 | goto out; |
54 | 54 | ||
55 | for (i = 0; i < buffer->page_count; i++) { | 55 | for (i = 0; i < buffer->page_count; i++) { |
56 | buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); | 56 | buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
57 | if (buffer->pages[i] == NULL) | 57 | if (buffer->pages[i] == NULL) |
58 | goto out_pages; | 58 | goto out_pages; |
59 | 59 | ||
60 | address = dma_map_page(card->device, buffer->pages[i], | 60 | address = dma_map_page(card->device, buffer->pages[i], |
61 | 0, PAGE_SIZE, direction); | 61 | 0, PAGE_SIZE, direction); |
62 | if (dma_mapping_error(card->device, address)) { | 62 | if (dma_mapping_error(card->device, address)) { |
63 | __free_page(buffer->pages[i]); | 63 | __free_page(buffer->pages[i]); |
64 | goto out_pages; | 64 | goto out_pages; |
65 | } | 65 | } |
66 | set_page_private(buffer->pages[i], address); | 66 | set_page_private(buffer->pages[i], address); |
67 | } | 67 | } |
68 | 68 | ||
69 | return 0; | 69 | return 0; |
70 | 70 | ||
71 | out_pages: | 71 | out_pages: |
72 | for (j = 0; j < i; j++) { | 72 | for (j = 0; j < i; j++) { |
73 | address = page_private(buffer->pages[j]); | 73 | address = page_private(buffer->pages[j]); |
74 | dma_unmap_page(card->device, address, | 74 | dma_unmap_page(card->device, address, |
75 | PAGE_SIZE, direction); | 75 | PAGE_SIZE, direction); |
76 | __free_page(buffer->pages[j]); | 76 | __free_page(buffer->pages[j]); |
77 | } | 77 | } |
78 | kfree(buffer->pages); | 78 | kfree(buffer->pages); |
79 | out: | 79 | out: |
80 | buffer->pages = NULL; | 80 | buffer->pages = NULL; |
81 | 81 | ||
82 | return -ENOMEM; | 82 | return -ENOMEM; |
83 | } | 83 | } |
84 | EXPORT_SYMBOL(fw_iso_buffer_init); | 84 | EXPORT_SYMBOL(fw_iso_buffer_init); |
85 | 85 | ||
86 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) | 86 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) |
87 | { | 87 | { |
88 | unsigned long uaddr; | 88 | unsigned long uaddr; |
89 | int i, err; | 89 | int i, err; |
90 | 90 | ||
91 | uaddr = vma->vm_start; | 91 | uaddr = vma->vm_start; |
92 | for (i = 0; i < buffer->page_count; i++) { | 92 | for (i = 0; i < buffer->page_count; i++) { |
93 | err = vm_insert_page(vma, uaddr, buffer->pages[i]); | 93 | err = vm_insert_page(vma, uaddr, buffer->pages[i]); |
94 | if (err) | 94 | if (err) |
95 | return err; | 95 | return err; |
96 | 96 | ||
97 | uaddr += PAGE_SIZE; | 97 | uaddr += PAGE_SIZE; |
98 | } | 98 | } |
99 | 99 | ||
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, | 103 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, |
104 | struct fw_card *card) | 104 | struct fw_card *card) |
105 | { | 105 | { |
106 | int i; | 106 | int i; |
107 | dma_addr_t address; | 107 | dma_addr_t address; |
108 | 108 | ||
109 | for (i = 0; i < buffer->page_count; i++) { | 109 | for (i = 0; i < buffer->page_count; i++) { |
110 | address = page_private(buffer->pages[i]); | 110 | address = page_private(buffer->pages[i]); |
111 | dma_unmap_page(card->device, address, | 111 | dma_unmap_page(card->device, address, |
112 | PAGE_SIZE, buffer->direction); | 112 | PAGE_SIZE, buffer->direction); |
113 | __free_page(buffer->pages[i]); | 113 | __free_page(buffer->pages[i]); |
114 | } | 114 | } |
115 | 115 | ||
116 | kfree(buffer->pages); | 116 | kfree(buffer->pages); |
117 | buffer->pages = NULL; | 117 | buffer->pages = NULL; |
118 | } | 118 | } |
119 | EXPORT_SYMBOL(fw_iso_buffer_destroy); | 119 | EXPORT_SYMBOL(fw_iso_buffer_destroy); |
120 | 120 | ||
121 | /* Convert DMA address to offset into virtually contiguous buffer. */ | 121 | /* Convert DMA address to offset into virtually contiguous buffer. */ |
122 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) | 122 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) |
123 | { | 123 | { |
124 | int i; | 124 | int i; |
125 | dma_addr_t address; | 125 | dma_addr_t address; |
126 | ssize_t offset; | 126 | ssize_t offset; |
127 | 127 | ||
128 | for (i = 0; i < buffer->page_count; i++) { | 128 | for (i = 0; i < buffer->page_count; i++) { |
129 | address = page_private(buffer->pages[i]); | 129 | address = page_private(buffer->pages[i]); |
130 | offset = (ssize_t)completed - (ssize_t)address; | 130 | offset = (ssize_t)completed - (ssize_t)address; |
131 | if (offset > 0 && offset <= PAGE_SIZE) | 131 | if (offset > 0 && offset <= PAGE_SIZE) |
132 | return (i << PAGE_SHIFT) + offset; | 132 | return (i << PAGE_SHIFT) + offset; |
133 | } | 133 | } |
134 | 134 | ||
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | 138 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
139 | int type, int channel, int speed, size_t header_size, | 139 | int type, int channel, int speed, size_t header_size, |
140 | fw_iso_callback_t callback, void *callback_data) | 140 | fw_iso_callback_t callback, void *callback_data) |
141 | { | 141 | { |
142 | struct fw_iso_context *ctx; | 142 | struct fw_iso_context *ctx; |
143 | 143 | ||
144 | ctx = card->driver->allocate_iso_context(card, | 144 | ctx = card->driver->allocate_iso_context(card, |
145 | type, channel, header_size); | 145 | type, channel, header_size); |
146 | if (IS_ERR(ctx)) | 146 | if (IS_ERR(ctx)) |
147 | return ctx; | 147 | return ctx; |
148 | 148 | ||
149 | ctx->card = card; | 149 | ctx->card = card; |
150 | ctx->type = type; | 150 | ctx->type = type; |
151 | ctx->channel = channel; | 151 | ctx->channel = channel; |
152 | ctx->speed = speed; | 152 | ctx->speed = speed; |
153 | ctx->header_size = header_size; | 153 | ctx->header_size = header_size; |
154 | ctx->callback.sc = callback; | 154 | ctx->callback.sc = callback; |
155 | ctx->callback_data = callback_data; | 155 | ctx->callback_data = callback_data; |
156 | 156 | ||
157 | return ctx; | 157 | return ctx; |
158 | } | 158 | } |
159 | EXPORT_SYMBOL(fw_iso_context_create); | 159 | EXPORT_SYMBOL(fw_iso_context_create); |
160 | 160 | ||
161 | void fw_iso_context_destroy(struct fw_iso_context *ctx) | 161 | void fw_iso_context_destroy(struct fw_iso_context *ctx) |
162 | { | 162 | { |
163 | ctx->card->driver->free_iso_context(ctx); | 163 | ctx->card->driver->free_iso_context(ctx); |
164 | } | 164 | } |
165 | EXPORT_SYMBOL(fw_iso_context_destroy); | 165 | EXPORT_SYMBOL(fw_iso_context_destroy); |
166 | 166 | ||
167 | int fw_iso_context_start(struct fw_iso_context *ctx, | 167 | int fw_iso_context_start(struct fw_iso_context *ctx, |
168 | int cycle, int sync, int tags) | 168 | int cycle, int sync, int tags) |
169 | { | 169 | { |
170 | return ctx->card->driver->start_iso(ctx, cycle, sync, tags); | 170 | return ctx->card->driver->start_iso(ctx, cycle, sync, tags); |
171 | } | 171 | } |
172 | EXPORT_SYMBOL(fw_iso_context_start); | 172 | EXPORT_SYMBOL(fw_iso_context_start); |
173 | 173 | ||
174 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) | 174 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) |
175 | { | 175 | { |
176 | return ctx->card->driver->set_iso_channels(ctx, channels); | 176 | return ctx->card->driver->set_iso_channels(ctx, channels); |
177 | } | 177 | } |
178 | 178 | ||
179 | int fw_iso_context_queue(struct fw_iso_context *ctx, | 179 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
180 | struct fw_iso_packet *packet, | 180 | struct fw_iso_packet *packet, |
181 | struct fw_iso_buffer *buffer, | 181 | struct fw_iso_buffer *buffer, |
182 | unsigned long payload) | 182 | unsigned long payload) |
183 | { | 183 | { |
184 | return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); | 184 | return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); |
185 | } | 185 | } |
186 | EXPORT_SYMBOL(fw_iso_context_queue); | 186 | EXPORT_SYMBOL(fw_iso_context_queue); |
187 | 187 | ||
188 | void fw_iso_context_queue_flush(struct fw_iso_context *ctx) | ||
189 | { | ||
190 | ctx->card->driver->flush_queue_iso(ctx); | ||
191 | } | ||
192 | EXPORT_SYMBOL(fw_iso_context_queue_flush); | ||
193 | |||
188 | int fw_iso_context_stop(struct fw_iso_context *ctx) | 194 | int fw_iso_context_stop(struct fw_iso_context *ctx) |
189 | { | 195 | { |
190 | return ctx->card->driver->stop_iso(ctx); | 196 | return ctx->card->driver->stop_iso(ctx); |
191 | } | 197 | } |
192 | EXPORT_SYMBOL(fw_iso_context_stop); | 198 | EXPORT_SYMBOL(fw_iso_context_stop); |
193 | 199 | ||
194 | /* | 200 | /* |
195 | * Isochronous bus resource management (channels, bandwidth), client side | 201 | * Isochronous bus resource management (channels, bandwidth), client side |
196 | */ | 202 | */ |
197 | 203 | ||
198 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | 204 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, |
199 | int bandwidth, bool allocate) | 205 | int bandwidth, bool allocate) |
200 | { | 206 | { |
201 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; | 207 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; |
202 | __be32 data[2]; | 208 | __be32 data[2]; |
203 | 209 | ||
204 | /* | 210 | /* |
205 | * On a 1394a IRM with low contention, try < 1 is enough. | 211 | * On a 1394a IRM with low contention, try < 1 is enough. |
206 | * On a 1394-1995 IRM, we need at least try < 2. | 212 | * On a 1394-1995 IRM, we need at least try < 2. |
207 | * Let's just do try < 5. | 213 | * Let's just do try < 5. |
208 | */ | 214 | */ |
209 | for (try = 0; try < 5; try++) { | 215 | for (try = 0; try < 5; try++) { |
210 | new = allocate ? old - bandwidth : old + bandwidth; | 216 | new = allocate ? old - bandwidth : old + bandwidth; |
211 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) | 217 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) |
212 | return -EBUSY; | 218 | return -EBUSY; |
213 | 219 | ||
214 | data[0] = cpu_to_be32(old); | 220 | data[0] = cpu_to_be32(old); |
215 | data[1] = cpu_to_be32(new); | 221 | data[1] = cpu_to_be32(new); |
216 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 222 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
217 | irm_id, generation, SCODE_100, | 223 | irm_id, generation, SCODE_100, |
218 | CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, | 224 | CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, |
219 | data, 8)) { | 225 | data, 8)) { |
220 | case RCODE_GENERATION: | 226 | case RCODE_GENERATION: |
221 | /* A generation change frees all bandwidth. */ | 227 | /* A generation change frees all bandwidth. */ |
222 | return allocate ? -EAGAIN : bandwidth; | 228 | return allocate ? -EAGAIN : bandwidth; |
223 | 229 | ||
224 | case RCODE_COMPLETE: | 230 | case RCODE_COMPLETE: |
225 | if (be32_to_cpup(data) == old) | 231 | if (be32_to_cpup(data) == old) |
226 | return bandwidth; | 232 | return bandwidth; |
227 | 233 | ||
228 | old = be32_to_cpup(data); | 234 | old = be32_to_cpup(data); |
229 | /* Fall through. */ | 235 | /* Fall through. */ |
230 | } | 236 | } |
231 | } | 237 | } |
232 | 238 | ||
233 | return -EIO; | 239 | return -EIO; |
234 | } | 240 | } |
235 | 241 | ||
236 | static int manage_channel(struct fw_card *card, int irm_id, int generation, | 242 | static int manage_channel(struct fw_card *card, int irm_id, int generation, |
237 | u32 channels_mask, u64 offset, bool allocate) | 243 | u32 channels_mask, u64 offset, bool allocate) |
238 | { | 244 | { |
239 | __be32 bit, all, old; | 245 | __be32 bit, all, old; |
240 | __be32 data[2]; | 246 | __be32 data[2]; |
241 | int channel, ret = -EIO, retry = 5; | 247 | int channel, ret = -EIO, retry = 5; |
242 | 248 | ||
243 | old = all = allocate ? cpu_to_be32(~0) : 0; | 249 | old = all = allocate ? cpu_to_be32(~0) : 0; |
244 | 250 | ||
245 | for (channel = 0; channel < 32; channel++) { | 251 | for (channel = 0; channel < 32; channel++) { |
246 | if (!(channels_mask & 1 << channel)) | 252 | if (!(channels_mask & 1 << channel)) |
247 | continue; | 253 | continue; |
248 | 254 | ||
249 | ret = -EBUSY; | 255 | ret = -EBUSY; |
250 | 256 | ||
251 | bit = cpu_to_be32(1 << (31 - channel)); | 257 | bit = cpu_to_be32(1 << (31 - channel)); |
252 | if ((old & bit) != (all & bit)) | 258 | if ((old & bit) != (all & bit)) |
253 | continue; | 259 | continue; |
254 | 260 | ||
255 | data[0] = old; | 261 | data[0] = old; |
256 | data[1] = old ^ bit; | 262 | data[1] = old ^ bit; |
257 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 263 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
258 | irm_id, generation, SCODE_100, | 264 | irm_id, generation, SCODE_100, |
259 | offset, data, 8)) { | 265 | offset, data, 8)) { |
260 | case RCODE_GENERATION: | 266 | case RCODE_GENERATION: |
261 | /* A generation change frees all channels. */ | 267 | /* A generation change frees all channels. */ |
262 | return allocate ? -EAGAIN : channel; | 268 | return allocate ? -EAGAIN : channel; |
263 | 269 | ||
264 | case RCODE_COMPLETE: | 270 | case RCODE_COMPLETE: |
265 | if (data[0] == old) | 271 | if (data[0] == old) |
266 | return channel; | 272 | return channel; |
267 | 273 | ||
268 | old = data[0]; | 274 | old = data[0]; |
269 | 275 | ||
270 | /* Is the IRM 1394a-2000 compliant? */ | 276 | /* Is the IRM 1394a-2000 compliant? */ |
271 | if ((data[0] & bit) == (data[1] & bit)) | 277 | if ((data[0] & bit) == (data[1] & bit)) |
272 | continue; | 278 | continue; |
273 | 279 | ||
274 | /* 1394-1995 IRM, fall through to retry. */ | 280 | /* 1394-1995 IRM, fall through to retry. */ |
275 | default: | 281 | default: |
276 | if (retry) { | 282 | if (retry) { |
277 | retry--; | 283 | retry--; |
278 | channel--; | 284 | channel--; |
279 | } else { | 285 | } else { |
280 | ret = -EIO; | 286 | ret = -EIO; |
281 | } | 287 | } |
282 | } | 288 | } |
283 | } | 289 | } |
284 | 290 | ||
285 | return ret; | 291 | return ret; |
286 | } | 292 | } |
287 | 293 | ||
288 | static void deallocate_channel(struct fw_card *card, int irm_id, | 294 | static void deallocate_channel(struct fw_card *card, int irm_id, |
289 | int generation, int channel) | 295 | int generation, int channel) |
290 | { | 296 | { |
291 | u32 mask; | 297 | u32 mask; |
292 | u64 offset; | 298 | u64 offset; |
293 | 299 | ||
294 | mask = channel < 32 ? 1 << channel : 1 << (channel - 32); | 300 | mask = channel < 32 ? 1 << channel : 1 << (channel - 32); |
295 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : | 301 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : |
296 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; | 302 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; |
297 | 303 | ||
298 | manage_channel(card, irm_id, generation, mask, offset, false); | 304 | manage_channel(card, irm_id, generation, mask, offset, false); |
299 | } | 305 | } |
300 | 306 | ||
301 | /** | 307 | /** |
302 | * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth | 308 | * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth |
303 | * | 309 | * |
304 | * In parameters: card, generation, channels_mask, bandwidth, allocate | 310 | * In parameters: card, generation, channels_mask, bandwidth, allocate |
305 | * Out parameters: channel, bandwidth | 311 | * Out parameters: channel, bandwidth |
306 | * This function blocks (sleeps) during communication with the IRM. | 312 | * This function blocks (sleeps) during communication with the IRM. |
307 | * | 313 | * |
308 | * Allocates or deallocates at most one channel out of channels_mask. | 314 | * Allocates or deallocates at most one channel out of channels_mask. |
309 | * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0. | 315 | * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0. |
310 | * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for | 316 | * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for |
311 | * channel 0 and LSB for channel 63.) | 317 | * channel 0 and LSB for channel 63.) |
312 | * Allocates or deallocates as many bandwidth allocation units as specified. | 318 | * Allocates or deallocates as many bandwidth allocation units as specified. |
313 | * | 319 | * |
314 | * Returns channel < 0 if no channel was allocated or deallocated. | 320 | * Returns channel < 0 if no channel was allocated or deallocated. |
315 | * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. | 321 | * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. |
316 | * | 322 | * |
317 | * If generation is stale, deallocations succeed but allocations fail with | 323 | * If generation is stale, deallocations succeed but allocations fail with |
318 | * channel = -EAGAIN. | 324 | * channel = -EAGAIN. |
319 | * | 325 | * |
320 | * If channel allocation fails, no bandwidth will be allocated either. | 326 | * If channel allocation fails, no bandwidth will be allocated either. |
321 | * If bandwidth allocation fails, no channel will be allocated either. | 327 | * If bandwidth allocation fails, no channel will be allocated either. |
322 | * But deallocations of channel and bandwidth are tried independently | 328 | * But deallocations of channel and bandwidth are tried independently |
323 | * of each other's success. | 329 | * of each other's success. |
324 | */ | 330 | */ |
325 | void fw_iso_resource_manage(struct fw_card *card, int generation, | 331 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
326 | u64 channels_mask, int *channel, int *bandwidth, | 332 | u64 channels_mask, int *channel, int *bandwidth, |
327 | bool allocate) | 333 | bool allocate) |
328 | { | 334 | { |
329 | u32 channels_hi = channels_mask; /* channels 31...0 */ | 335 | u32 channels_hi = channels_mask; /* channels 31...0 */ |
330 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ | 336 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ |
331 | int irm_id, ret, c = -EINVAL; | 337 | int irm_id, ret, c = -EINVAL; |
332 | 338 | ||
333 | spin_lock_irq(&card->lock); | 339 | spin_lock_irq(&card->lock); |
334 | irm_id = card->irm_node->node_id; | 340 | irm_id = card->irm_node->node_id; |
335 | spin_unlock_irq(&card->lock); | 341 | spin_unlock_irq(&card->lock); |
336 | 342 | ||
337 | if (channels_hi) | 343 | if (channels_hi) |
338 | c = manage_channel(card, irm_id, generation, channels_hi, | 344 | c = manage_channel(card, irm_id, generation, channels_hi, |
339 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, | 345 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, |
340 | allocate); | 346 | allocate); |
341 | if (channels_lo && c < 0) { | 347 | if (channels_lo && c < 0) { |
342 | c = manage_channel(card, irm_id, generation, channels_lo, | 348 | c = manage_channel(card, irm_id, generation, channels_lo, |
343 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, | 349 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, |
344 | allocate); | 350 | allocate); |
345 | if (c >= 0) | 351 | if (c >= 0) |
346 | c += 32; | 352 | c += 32; |
347 | } | 353 | } |
348 | *channel = c; | 354 | *channel = c; |
349 | 355 | ||
350 | if (allocate && channels_mask != 0 && c < 0) | 356 | if (allocate && channels_mask != 0 && c < 0) |
351 | *bandwidth = 0; | 357 | *bandwidth = 0; |
352 | 358 | ||
353 | if (*bandwidth == 0) | 359 | if (*bandwidth == 0) |
354 | return; | 360 | return; |
355 | 361 | ||
356 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); | 362 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); |
357 | if (ret < 0) | 363 | if (ret < 0) |
358 | *bandwidth = 0; | 364 | *bandwidth = 0; |
359 | 365 | ||
360 | if (allocate && ret < 0) { | 366 | if (allocate && ret < 0) { |
361 | if (c >= 0) | 367 | if (c >= 0) |
362 | deallocate_channel(card, irm_id, generation, c); | 368 | deallocate_channel(card, irm_id, generation, c); |
363 | *channel = ret; | 369 | *channel = ret; |
364 | } | 370 | } |
365 | } | 371 | } |
366 | EXPORT_SYMBOL(fw_iso_resource_manage); | 372 | EXPORT_SYMBOL(fw_iso_resource_manage); |
367 | 373 |
drivers/firewire/core.h
1 | #ifndef _FIREWIRE_CORE_H | 1 | #ifndef _FIREWIRE_CORE_H |
2 | #define _FIREWIRE_CORE_H | 2 | #define _FIREWIRE_CORE_H |
3 | 3 | ||
4 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
6 | #include <linux/idr.h> | 6 | #include <linux/idr.h> |
7 | #include <linux/mm_types.h> | 7 | #include <linux/mm_types.h> |
8 | #include <linux/rwsem.h> | 8 | #include <linux/rwsem.h> |
9 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | 11 | ||
12 | #include <asm/atomic.h> | 12 | #include <asm/atomic.h> |
13 | 13 | ||
14 | struct device; | 14 | struct device; |
15 | struct fw_card; | 15 | struct fw_card; |
16 | struct fw_device; | 16 | struct fw_device; |
17 | struct fw_iso_buffer; | 17 | struct fw_iso_buffer; |
18 | struct fw_iso_context; | 18 | struct fw_iso_context; |
19 | struct fw_iso_packet; | 19 | struct fw_iso_packet; |
20 | struct fw_node; | 20 | struct fw_node; |
21 | struct fw_packet; | 21 | struct fw_packet; |
22 | 22 | ||
23 | 23 | ||
24 | /* -card */ | 24 | /* -card */ |
25 | 25 | ||
26 | /* bitfields within the PHY registers */ | 26 | /* bitfields within the PHY registers */ |
27 | #define PHY_LINK_ACTIVE 0x80 | 27 | #define PHY_LINK_ACTIVE 0x80 |
28 | #define PHY_CONTENDER 0x40 | 28 | #define PHY_CONTENDER 0x40 |
29 | #define PHY_BUS_RESET 0x40 | 29 | #define PHY_BUS_RESET 0x40 |
30 | #define PHY_EXTENDED_REGISTERS 0xe0 | 30 | #define PHY_EXTENDED_REGISTERS 0xe0 |
31 | #define PHY_BUS_SHORT_RESET 0x40 | 31 | #define PHY_BUS_SHORT_RESET 0x40 |
32 | #define PHY_INT_STATUS_BITS 0x3c | 32 | #define PHY_INT_STATUS_BITS 0x3c |
33 | #define PHY_ENABLE_ACCEL 0x02 | 33 | #define PHY_ENABLE_ACCEL 0x02 |
34 | #define PHY_ENABLE_MULTI 0x01 | 34 | #define PHY_ENABLE_MULTI 0x01 |
35 | #define PHY_PAGE_SELECT 0xe0 | 35 | #define PHY_PAGE_SELECT 0xe0 |
36 | 36 | ||
37 | #define BANDWIDTH_AVAILABLE_INITIAL 4915 | 37 | #define BANDWIDTH_AVAILABLE_INITIAL 4915 |
38 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) | 38 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) |
39 | #define BROADCAST_CHANNEL_VALID (1 << 30) | 39 | #define BROADCAST_CHANNEL_VALID (1 << 30) |
40 | 40 | ||
41 | #define CSR_STATE_BIT_CMSTR (1 << 8) | 41 | #define CSR_STATE_BIT_CMSTR (1 << 8) |
42 | #define CSR_STATE_BIT_ABDICATE (1 << 10) | 42 | #define CSR_STATE_BIT_ABDICATE (1 << 10) |
43 | 43 | ||
44 | struct fw_card_driver { | 44 | struct fw_card_driver { |
45 | /* | 45 | /* |
46 | * Enable the given card with the given initial config rom. | 46 | * Enable the given card with the given initial config rom. |
47 | * This function is expected to activate the card, and either | 47 | * This function is expected to activate the card, and either |
48 | * enable the PHY or set the link_on bit and initiate a bus | 48 | * enable the PHY or set the link_on bit and initiate a bus |
49 | * reset. | 49 | * reset. |
50 | */ | 50 | */ |
51 | int (*enable)(struct fw_card *card, | 51 | int (*enable)(struct fw_card *card, |
52 | const __be32 *config_rom, size_t length); | 52 | const __be32 *config_rom, size_t length); |
53 | 53 | ||
54 | int (*read_phy_reg)(struct fw_card *card, int address); | 54 | int (*read_phy_reg)(struct fw_card *card, int address); |
55 | int (*update_phy_reg)(struct fw_card *card, int address, | 55 | int (*update_phy_reg)(struct fw_card *card, int address, |
56 | int clear_bits, int set_bits); | 56 | int clear_bits, int set_bits); |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * Update the config rom for an enabled card. This function | 59 | * Update the config rom for an enabled card. This function |
60 | * should change the config rom that is presented on the bus | 60 | * should change the config rom that is presented on the bus |
61 | * and initiate a bus reset. | 61 | * and initiate a bus reset. |
62 | */ | 62 | */ |
63 | int (*set_config_rom)(struct fw_card *card, | 63 | int (*set_config_rom)(struct fw_card *card, |
64 | const __be32 *config_rom, size_t length); | 64 | const __be32 *config_rom, size_t length); |
65 | 65 | ||
66 | void (*send_request)(struct fw_card *card, struct fw_packet *packet); | 66 | void (*send_request)(struct fw_card *card, struct fw_packet *packet); |
67 | void (*send_response)(struct fw_card *card, struct fw_packet *packet); | 67 | void (*send_response)(struct fw_card *card, struct fw_packet *packet); |
68 | /* Calling cancel is valid once a packet has been submitted. */ | 68 | /* Calling cancel is valid once a packet has been submitted. */ |
69 | int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet); | 69 | int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet); |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Allow the specified node ID to do direct DMA out and in of | 72 | * Allow the specified node ID to do direct DMA out and in of |
73 | * host memory. The card will disable this for all node when | 73 | * host memory. The card will disable this for all node when |
74 | * a bus reset happens, so driver need to reenable this after | 74 | * a bus reset happens, so driver need to reenable this after |
75 | * bus reset. Returns 0 on success, -ENODEV if the card | 75 | * bus reset. Returns 0 on success, -ENODEV if the card |
76 | * doesn't support this, -ESTALE if the generation doesn't | 76 | * doesn't support this, -ESTALE if the generation doesn't |
77 | * match. | 77 | * match. |
78 | */ | 78 | */ |
79 | int (*enable_phys_dma)(struct fw_card *card, | 79 | int (*enable_phys_dma)(struct fw_card *card, |
80 | int node_id, int generation); | 80 | int node_id, int generation); |
81 | 81 | ||
82 | u32 (*read_csr)(struct fw_card *card, int csr_offset); | 82 | u32 (*read_csr)(struct fw_card *card, int csr_offset); |
83 | void (*write_csr)(struct fw_card *card, int csr_offset, u32 value); | 83 | void (*write_csr)(struct fw_card *card, int csr_offset, u32 value); |
84 | 84 | ||
85 | struct fw_iso_context * | 85 | struct fw_iso_context * |
86 | (*allocate_iso_context)(struct fw_card *card, | 86 | (*allocate_iso_context)(struct fw_card *card, |
87 | int type, int channel, size_t header_size); | 87 | int type, int channel, size_t header_size); |
88 | void (*free_iso_context)(struct fw_iso_context *ctx); | 88 | void (*free_iso_context)(struct fw_iso_context *ctx); |
89 | 89 | ||
90 | int (*start_iso)(struct fw_iso_context *ctx, | 90 | int (*start_iso)(struct fw_iso_context *ctx, |
91 | s32 cycle, u32 sync, u32 tags); | 91 | s32 cycle, u32 sync, u32 tags); |
92 | 92 | ||
93 | int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels); | 93 | int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels); |
94 | 94 | ||
95 | int (*queue_iso)(struct fw_iso_context *ctx, | 95 | int (*queue_iso)(struct fw_iso_context *ctx, |
96 | struct fw_iso_packet *packet, | 96 | struct fw_iso_packet *packet, |
97 | struct fw_iso_buffer *buffer, | 97 | struct fw_iso_buffer *buffer, |
98 | unsigned long payload); | 98 | unsigned long payload); |
99 | 99 | ||
100 | void (*flush_queue_iso)(struct fw_iso_context *ctx); | ||
101 | |||
100 | int (*stop_iso)(struct fw_iso_context *ctx); | 102 | int (*stop_iso)(struct fw_iso_context *ctx); |
101 | }; | 103 | }; |
102 | 104 | ||
103 | void fw_card_initialize(struct fw_card *card, | 105 | void fw_card_initialize(struct fw_card *card, |
104 | const struct fw_card_driver *driver, struct device *device); | 106 | const struct fw_card_driver *driver, struct device *device); |
105 | int fw_card_add(struct fw_card *card, | 107 | int fw_card_add(struct fw_card *card, |
106 | u32 max_receive, u32 link_speed, u64 guid); | 108 | u32 max_receive, u32 link_speed, u64 guid); |
107 | void fw_core_remove_card(struct fw_card *card); | 109 | void fw_core_remove_card(struct fw_card *card); |
108 | int fw_compute_block_crc(__be32 *block); | 110 | int fw_compute_block_crc(__be32 *block); |
109 | void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset); | 111 | void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset); |
110 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); | 112 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); |
111 | 113 | ||
112 | static inline struct fw_card *fw_card_get(struct fw_card *card) | 114 | static inline struct fw_card *fw_card_get(struct fw_card *card) |
113 | { | 115 | { |
114 | kref_get(&card->kref); | 116 | kref_get(&card->kref); |
115 | 117 | ||
116 | return card; | 118 | return card; |
117 | } | 119 | } |
118 | 120 | ||
119 | void fw_card_release(struct kref *kref); | 121 | void fw_card_release(struct kref *kref); |
120 | 122 | ||
121 | static inline void fw_card_put(struct fw_card *card) | 123 | static inline void fw_card_put(struct fw_card *card) |
122 | { | 124 | { |
123 | kref_put(&card->kref, fw_card_release); | 125 | kref_put(&card->kref, fw_card_release); |
124 | } | 126 | } |
125 | 127 | ||
126 | 128 | ||
127 | /* -cdev */ | 129 | /* -cdev */ |
128 | 130 | ||
129 | extern const struct file_operations fw_device_ops; | 131 | extern const struct file_operations fw_device_ops; |
130 | 132 | ||
131 | void fw_device_cdev_update(struct fw_device *device); | 133 | void fw_device_cdev_update(struct fw_device *device); |
132 | void fw_device_cdev_remove(struct fw_device *device); | 134 | void fw_device_cdev_remove(struct fw_device *device); |
133 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p); | 135 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p); |
134 | 136 | ||
135 | 137 | ||
136 | /* -device */ | 138 | /* -device */ |
137 | 139 | ||
138 | extern struct rw_semaphore fw_device_rwsem; | 140 | extern struct rw_semaphore fw_device_rwsem; |
139 | extern struct idr fw_device_idr; | 141 | extern struct idr fw_device_idr; |
140 | extern int fw_cdev_major; | 142 | extern int fw_cdev_major; |
141 | 143 | ||
142 | struct fw_device *fw_device_get_by_devt(dev_t devt); | 144 | struct fw_device *fw_device_get_by_devt(dev_t devt); |
143 | int fw_device_set_broadcast_channel(struct device *dev, void *gen); | 145 | int fw_device_set_broadcast_channel(struct device *dev, void *gen); |
144 | void fw_node_event(struct fw_card *card, struct fw_node *node, int event); | 146 | void fw_node_event(struct fw_card *card, struct fw_node *node, int event); |
145 | 147 | ||
146 | 148 | ||
147 | /* -iso */ | 149 | /* -iso */ |
148 | 150 | ||
149 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); | 151 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); |
150 | 152 | ||
151 | 153 | ||
152 | /* -topology */ | 154 | /* -topology */ |
153 | 155 | ||
154 | enum { | 156 | enum { |
155 | FW_NODE_CREATED, | 157 | FW_NODE_CREATED, |
156 | FW_NODE_UPDATED, | 158 | FW_NODE_UPDATED, |
157 | FW_NODE_DESTROYED, | 159 | FW_NODE_DESTROYED, |
158 | FW_NODE_LINK_ON, | 160 | FW_NODE_LINK_ON, |
159 | FW_NODE_LINK_OFF, | 161 | FW_NODE_LINK_OFF, |
160 | FW_NODE_INITIATED_RESET, | 162 | FW_NODE_INITIATED_RESET, |
161 | }; | 163 | }; |
162 | 164 | ||
163 | struct fw_node { | 165 | struct fw_node { |
164 | u16 node_id; | 166 | u16 node_id; |
165 | u8 color; | 167 | u8 color; |
166 | u8 port_count; | 168 | u8 port_count; |
167 | u8 link_on:1; | 169 | u8 link_on:1; |
168 | u8 initiated_reset:1; | 170 | u8 initiated_reset:1; |
169 | u8 b_path:1; | 171 | u8 b_path:1; |
170 | u8 phy_speed:2; /* As in the self ID packet. */ | 172 | u8 phy_speed:2; /* As in the self ID packet. */ |
171 | u8 max_speed:2; /* Minimum of all phy-speeds on the path from the | 173 | u8 max_speed:2; /* Minimum of all phy-speeds on the path from the |
172 | * local node to this node. */ | 174 | * local node to this node. */ |
173 | u8 max_depth:4; /* Maximum depth to any leaf node */ | 175 | u8 max_depth:4; /* Maximum depth to any leaf node */ |
174 | u8 max_hops:4; /* Max hops in this sub tree */ | 176 | u8 max_hops:4; /* Max hops in this sub tree */ |
175 | atomic_t ref_count; | 177 | atomic_t ref_count; |
176 | 178 | ||
177 | /* For serializing node topology into a list. */ | 179 | /* For serializing node topology into a list. */ |
178 | struct list_head link; | 180 | struct list_head link; |
179 | 181 | ||
180 | /* Upper layer specific data. */ | 182 | /* Upper layer specific data. */ |
181 | void *data; | 183 | void *data; |
182 | 184 | ||
183 | struct fw_node *ports[0]; | 185 | struct fw_node *ports[0]; |
184 | }; | 186 | }; |
185 | 187 | ||
186 | static inline struct fw_node *fw_node_get(struct fw_node *node) | 188 | static inline struct fw_node *fw_node_get(struct fw_node *node) |
187 | { | 189 | { |
188 | atomic_inc(&node->ref_count); | 190 | atomic_inc(&node->ref_count); |
189 | 191 | ||
190 | return node; | 192 | return node; |
191 | } | 193 | } |
192 | 194 | ||
193 | static inline void fw_node_put(struct fw_node *node) | 195 | static inline void fw_node_put(struct fw_node *node) |
194 | { | 196 | { |
195 | if (atomic_dec_and_test(&node->ref_count)) | 197 | if (atomic_dec_and_test(&node->ref_count)) |
196 | kfree(node); | 198 | kfree(node); |
197 | } | 199 | } |
198 | 200 | ||
199 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, | 201 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, |
200 | int generation, int self_id_count, u32 *self_ids, bool bm_abdicate); | 202 | int generation, int self_id_count, u32 *self_ids, bool bm_abdicate); |
201 | void fw_destroy_nodes(struct fw_card *card); | 203 | void fw_destroy_nodes(struct fw_card *card); |
202 | 204 | ||
203 | /* | 205 | /* |
204 | * Check whether new_generation is the immediate successor of old_generation. | 206 | * Check whether new_generation is the immediate successor of old_generation. |
205 | * Take counter roll-over at 255 (as per OHCI) into account. | 207 | * Take counter roll-over at 255 (as per OHCI) into account. |
206 | */ | 208 | */ |
207 | static inline bool is_next_generation(int new_generation, int old_generation) | 209 | static inline bool is_next_generation(int new_generation, int old_generation) |
208 | { | 210 | { |
209 | return (new_generation & 0xff) == ((old_generation + 1) & 0xff); | 211 | return (new_generation & 0xff) == ((old_generation + 1) & 0xff); |
210 | } | 212 | } |
211 | 213 | ||
212 | 214 | ||
213 | /* -transaction */ | 215 | /* -transaction */ |
214 | 216 | ||
215 | #define TCODE_LINK_INTERNAL 0xe | 217 | #define TCODE_LINK_INTERNAL 0xe |
216 | 218 | ||
217 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) | 219 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) |
218 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) | 220 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) |
219 | #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL) | 221 | #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL) |
220 | #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) | 222 | #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) |
221 | #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) | 223 | #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) |
222 | #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) | 224 | #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) |
223 | #define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0) | 225 | #define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0) |
224 | 226 | ||
225 | #define LOCAL_BUS 0xffc0 | 227 | #define LOCAL_BUS 0xffc0 |
226 | 228 | ||
227 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); | 229 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); |
228 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); | 230 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); |
229 | int fw_get_response_length(struct fw_request *request); | 231 | int fw_get_response_length(struct fw_request *request); |
230 | void fw_fill_response(struct fw_packet *response, u32 *request_header, | 232 | void fw_fill_response(struct fw_packet *response, u32 *request_header, |
231 | int rcode, void *payload, size_t length); | 233 | int rcode, void *payload, size_t length); |
232 | 234 | ||
233 | #define FW_PHY_CONFIG_NO_NODE_ID -1 | 235 | #define FW_PHY_CONFIG_NO_NODE_ID -1 |
234 | #define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1 | 236 | #define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1 |
235 | void fw_send_phy_config(struct fw_card *card, | 237 | void fw_send_phy_config(struct fw_card *card, |
236 | int node_id, int generation, int gap_count); | 238 | int node_id, int generation, int gap_count); |
237 | 239 | ||
238 | static inline bool is_ping_packet(u32 *data) | 240 | static inline bool is_ping_packet(u32 *data) |
239 | { | 241 | { |
240 | return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1]; | 242 | return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1]; |
241 | } | 243 | } |
242 | 244 | ||
243 | #endif /* _FIREWIRE_CORE_H */ | 245 | #endif /* _FIREWIRE_CORE_H */ |
244 | 246 |
drivers/firewire/net.c
1 | /* | 1 | /* |
2 | * IPv4 over IEEE 1394, per RFC 2734 | 2 | * IPv4 over IEEE 1394, per RFC 2734 |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com> | 4 | * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com> |
5 | * | 5 | * |
6 | * based on eth1394 by Ben Collins et al | 6 | * based on eth1394 by Ben Collins et al |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/bug.h> | 9 | #include <linux/bug.h> |
10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
11 | #include <linux/device.h> | 11 | #include <linux/device.h> |
12 | #include <linux/ethtool.h> | 12 | #include <linux/ethtool.h> |
13 | #include <linux/firewire.h> | 13 | #include <linux/firewire.h> |
14 | #include <linux/firewire-constants.h> | 14 | #include <linux/firewire-constants.h> |
15 | #include <linux/highmem.h> | 15 | #include <linux/highmem.h> |
16 | #include <linux/in.h> | 16 | #include <linux/in.h> |
17 | #include <linux/ip.h> | 17 | #include <linux/ip.h> |
18 | #include <linux/jiffies.h> | 18 | #include <linux/jiffies.h> |
19 | #include <linux/mod_devicetable.h> | 19 | #include <linux/mod_devicetable.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/moduleparam.h> | 21 | #include <linux/moduleparam.h> |
22 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
24 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | 27 | ||
28 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
29 | #include <net/arp.h> | 29 | #include <net/arp.h> |
30 | 30 | ||
31 | /* rx limits */ | 31 | /* rx limits */ |
32 | #define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ | 32 | #define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ |
33 | #define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2) | 33 | #define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2) |
34 | 34 | ||
35 | /* tx limits */ | 35 | /* tx limits */ |
36 | #define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */ | 36 | #define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */ |
37 | #define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */ | 37 | #define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */ |
38 | #define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */ | 38 | #define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */ |
39 | 39 | ||
40 | #define IEEE1394_BROADCAST_CHANNEL 31 | 40 | #define IEEE1394_BROADCAST_CHANNEL 31 |
41 | #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) | 41 | #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) |
42 | #define IEEE1394_MAX_PAYLOAD_S100 512 | 42 | #define IEEE1394_MAX_PAYLOAD_S100 512 |
43 | #define FWNET_NO_FIFO_ADDR (~0ULL) | 43 | #define FWNET_NO_FIFO_ADDR (~0ULL) |
44 | 44 | ||
45 | #define IANA_SPECIFIER_ID 0x00005eU | 45 | #define IANA_SPECIFIER_ID 0x00005eU |
46 | #define RFC2734_SW_VERSION 0x000001U | 46 | #define RFC2734_SW_VERSION 0x000001U |
47 | 47 | ||
48 | #define IEEE1394_GASP_HDR_SIZE 8 | 48 | #define IEEE1394_GASP_HDR_SIZE 8 |
49 | 49 | ||
50 | #define RFC2374_UNFRAG_HDR_SIZE 4 | 50 | #define RFC2374_UNFRAG_HDR_SIZE 4 |
51 | #define RFC2374_FRAG_HDR_SIZE 8 | 51 | #define RFC2374_FRAG_HDR_SIZE 8 |
52 | #define RFC2374_FRAG_OVERHEAD 4 | 52 | #define RFC2374_FRAG_OVERHEAD 4 |
53 | 53 | ||
54 | #define RFC2374_HDR_UNFRAG 0 /* unfragmented */ | 54 | #define RFC2374_HDR_UNFRAG 0 /* unfragmented */ |
55 | #define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */ | 55 | #define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */ |
56 | #define RFC2374_HDR_LASTFRAG 2 /* last fragment */ | 56 | #define RFC2374_HDR_LASTFRAG 2 /* last fragment */ |
57 | #define RFC2374_HDR_INTFRAG 3 /* interior fragment */ | 57 | #define RFC2374_HDR_INTFRAG 3 /* interior fragment */ |
58 | 58 | ||
59 | #define RFC2734_HW_ADDR_LEN 16 | 59 | #define RFC2734_HW_ADDR_LEN 16 |
60 | 60 | ||
61 | struct rfc2734_arp { | 61 | struct rfc2734_arp { |
62 | __be16 hw_type; /* 0x0018 */ | 62 | __be16 hw_type; /* 0x0018 */ |
63 | __be16 proto_type; /* 0x0806 */ | 63 | __be16 proto_type; /* 0x0806 */ |
64 | u8 hw_addr_len; /* 16 */ | 64 | u8 hw_addr_len; /* 16 */ |
65 | u8 ip_addr_len; /* 4 */ | 65 | u8 ip_addr_len; /* 4 */ |
66 | __be16 opcode; /* ARP Opcode */ | 66 | __be16 opcode; /* ARP Opcode */ |
67 | /* Above is exactly the same format as struct arphdr */ | 67 | /* Above is exactly the same format as struct arphdr */ |
68 | 68 | ||
69 | __be64 s_uniq_id; /* Sender's 64bit EUI */ | 69 | __be64 s_uniq_id; /* Sender's 64bit EUI */ |
70 | u8 max_rec; /* Sender's max packet size */ | 70 | u8 max_rec; /* Sender's max packet size */ |
71 | u8 sspd; /* Sender's max speed */ | 71 | u8 sspd; /* Sender's max speed */ |
72 | __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */ | 72 | __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */ |
73 | __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */ | 73 | __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */ |
74 | __be32 sip; /* Sender's IP Address */ | 74 | __be32 sip; /* Sender's IP Address */ |
75 | __be32 tip; /* IP Address of requested hw addr */ | 75 | __be32 tip; /* IP Address of requested hw addr */ |
76 | } __attribute__((packed)); | 76 | } __attribute__((packed)); |
77 | 77 | ||
78 | /* This header format is specific to this driver implementation. */ | 78 | /* This header format is specific to this driver implementation. */ |
79 | #define FWNET_ALEN 8 | 79 | #define FWNET_ALEN 8 |
80 | #define FWNET_HLEN 10 | 80 | #define FWNET_HLEN 10 |
81 | struct fwnet_header { | 81 | struct fwnet_header { |
82 | u8 h_dest[FWNET_ALEN]; /* destination address */ | 82 | u8 h_dest[FWNET_ALEN]; /* destination address */ |
83 | __be16 h_proto; /* packet type ID field */ | 83 | __be16 h_proto; /* packet type ID field */ |
84 | } __attribute__((packed)); | 84 | } __attribute__((packed)); |
85 | 85 | ||
86 | /* IPv4 and IPv6 encapsulation header */ | 86 | /* IPv4 and IPv6 encapsulation header */ |
87 | struct rfc2734_header { | 87 | struct rfc2734_header { |
88 | u32 w0; | 88 | u32 w0; |
89 | u32 w1; | 89 | u32 w1; |
90 | }; | 90 | }; |
91 | 91 | ||
92 | #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) | 92 | #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) |
93 | #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) | 93 | #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) |
94 | #define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) | 94 | #define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) |
95 | #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) | 95 | #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) |
96 | #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) | 96 | #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) |
97 | 97 | ||
98 | #define fwnet_set_hdr_lf(lf) ((lf) << 30) | 98 | #define fwnet_set_hdr_lf(lf) ((lf) << 30) |
99 | #define fwnet_set_hdr_ether_type(et) (et) | 99 | #define fwnet_set_hdr_ether_type(et) (et) |
100 | #define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) | 100 | #define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) |
101 | #define fwnet_set_hdr_fg_off(fgo) (fgo) | 101 | #define fwnet_set_hdr_fg_off(fgo) (fgo) |
102 | 102 | ||
103 | #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) | 103 | #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) |
104 | 104 | ||
105 | static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr, | 105 | static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr, |
106 | unsigned ether_type) | 106 | unsigned ether_type) |
107 | { | 107 | { |
108 | hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG) | 108 | hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG) |
109 | | fwnet_set_hdr_ether_type(ether_type); | 109 | | fwnet_set_hdr_ether_type(ether_type); |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr, | 112 | static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr, |
113 | unsigned ether_type, unsigned dg_size, unsigned dgl) | 113 | unsigned ether_type, unsigned dg_size, unsigned dgl) |
114 | { | 114 | { |
115 | hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG) | 115 | hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG) |
116 | | fwnet_set_hdr_dg_size(dg_size) | 116 | | fwnet_set_hdr_dg_size(dg_size) |
117 | | fwnet_set_hdr_ether_type(ether_type); | 117 | | fwnet_set_hdr_ether_type(ether_type); |
118 | hdr->w1 = fwnet_set_hdr_dgl(dgl); | 118 | hdr->w1 = fwnet_set_hdr_dgl(dgl); |
119 | } | 119 | } |
120 | 120 | ||
121 | static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr, | 121 | static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr, |
122 | unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl) | 122 | unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl) |
123 | { | 123 | { |
124 | hdr->w0 = fwnet_set_hdr_lf(lf) | 124 | hdr->w0 = fwnet_set_hdr_lf(lf) |
125 | | fwnet_set_hdr_dg_size(dg_size) | 125 | | fwnet_set_hdr_dg_size(dg_size) |
126 | | fwnet_set_hdr_fg_off(fg_off); | 126 | | fwnet_set_hdr_fg_off(fg_off); |
127 | hdr->w1 = fwnet_set_hdr_dgl(dgl); | 127 | hdr->w1 = fwnet_set_hdr_dgl(dgl); |
128 | } | 128 | } |
129 | 129 | ||
130 | /* This list keeps track of what parts of the datagram have been filled in */ | 130 | /* This list keeps track of what parts of the datagram have been filled in */ |
131 | struct fwnet_fragment_info { | 131 | struct fwnet_fragment_info { |
132 | struct list_head fi_link; | 132 | struct list_head fi_link; |
133 | u16 offset; | 133 | u16 offset; |
134 | u16 len; | 134 | u16 len; |
135 | }; | 135 | }; |
136 | 136 | ||
137 | struct fwnet_partial_datagram { | 137 | struct fwnet_partial_datagram { |
138 | struct list_head pd_link; | 138 | struct list_head pd_link; |
139 | struct list_head fi_list; | 139 | struct list_head fi_list; |
140 | struct sk_buff *skb; | 140 | struct sk_buff *skb; |
141 | /* FIXME Why not use skb->data? */ | 141 | /* FIXME Why not use skb->data? */ |
142 | char *pbuf; | 142 | char *pbuf; |
143 | u16 datagram_label; | 143 | u16 datagram_label; |
144 | u16 ether_type; | 144 | u16 ether_type; |
145 | u16 datagram_size; | 145 | u16 datagram_size; |
146 | }; | 146 | }; |
147 | 147 | ||
148 | static DEFINE_MUTEX(fwnet_device_mutex); | 148 | static DEFINE_MUTEX(fwnet_device_mutex); |
149 | static LIST_HEAD(fwnet_device_list); | 149 | static LIST_HEAD(fwnet_device_list); |
150 | 150 | ||
151 | struct fwnet_device { | 151 | struct fwnet_device { |
152 | struct list_head dev_link; | 152 | struct list_head dev_link; |
153 | spinlock_t lock; | 153 | spinlock_t lock; |
154 | enum { | 154 | enum { |
155 | FWNET_BROADCAST_ERROR, | 155 | FWNET_BROADCAST_ERROR, |
156 | FWNET_BROADCAST_RUNNING, | 156 | FWNET_BROADCAST_RUNNING, |
157 | FWNET_BROADCAST_STOPPED, | 157 | FWNET_BROADCAST_STOPPED, |
158 | } broadcast_state; | 158 | } broadcast_state; |
159 | struct fw_iso_context *broadcast_rcv_context; | 159 | struct fw_iso_context *broadcast_rcv_context; |
160 | struct fw_iso_buffer broadcast_rcv_buffer; | 160 | struct fw_iso_buffer broadcast_rcv_buffer; |
161 | void **broadcast_rcv_buffer_ptrs; | 161 | void **broadcast_rcv_buffer_ptrs; |
162 | unsigned broadcast_rcv_next_ptr; | 162 | unsigned broadcast_rcv_next_ptr; |
163 | unsigned num_broadcast_rcv_ptrs; | 163 | unsigned num_broadcast_rcv_ptrs; |
164 | unsigned rcv_buffer_size; | 164 | unsigned rcv_buffer_size; |
165 | /* | 165 | /* |
166 | * This value is the maximum unfragmented datagram size that can be | 166 | * This value is the maximum unfragmented datagram size that can be |
167 | * sent by the hardware. It already has the GASP overhead and the | 167 | * sent by the hardware. It already has the GASP overhead and the |
168 | * unfragmented datagram header overhead calculated into it. | 168 | * unfragmented datagram header overhead calculated into it. |
169 | */ | 169 | */ |
170 | unsigned broadcast_xmt_max_payload; | 170 | unsigned broadcast_xmt_max_payload; |
171 | u16 broadcast_xmt_datagramlabel; | 171 | u16 broadcast_xmt_datagramlabel; |
172 | 172 | ||
173 | /* | 173 | /* |
174 | * The CSR address that remote nodes must send datagrams to for us to | 174 | * The CSR address that remote nodes must send datagrams to for us to |
175 | * receive them. | 175 | * receive them. |
176 | */ | 176 | */ |
177 | struct fw_address_handler handler; | 177 | struct fw_address_handler handler; |
178 | u64 local_fifo; | 178 | u64 local_fifo; |
179 | 179 | ||
180 | /* Number of tx datagrams that have been queued but not yet acked */ | 180 | /* Number of tx datagrams that have been queued but not yet acked */ |
181 | int queued_datagrams; | 181 | int queued_datagrams; |
182 | 182 | ||
183 | int peer_count; | 183 | int peer_count; |
184 | struct list_head peer_list; | 184 | struct list_head peer_list; |
185 | struct fw_card *card; | 185 | struct fw_card *card; |
186 | struct net_device *netdev; | 186 | struct net_device *netdev; |
187 | }; | 187 | }; |
188 | 188 | ||
189 | struct fwnet_peer { | 189 | struct fwnet_peer { |
190 | struct list_head peer_link; | 190 | struct list_head peer_link; |
191 | struct fwnet_device *dev; | 191 | struct fwnet_device *dev; |
192 | u64 guid; | 192 | u64 guid; |
193 | u64 fifo; | 193 | u64 fifo; |
194 | __be32 ip; | 194 | __be32 ip; |
195 | 195 | ||
196 | /* guarded by dev->lock */ | 196 | /* guarded by dev->lock */ |
197 | struct list_head pd_list; /* received partial datagrams */ | 197 | struct list_head pd_list; /* received partial datagrams */ |
198 | unsigned pdg_size; /* pd_list size */ | 198 | unsigned pdg_size; /* pd_list size */ |
199 | 199 | ||
200 | u16 datagram_label; /* outgoing datagram label */ | 200 | u16 datagram_label; /* outgoing datagram label */ |
201 | u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ | 201 | u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ |
202 | int node_id; | 202 | int node_id; |
203 | int generation; | 203 | int generation; |
204 | unsigned speed; | 204 | unsigned speed; |
205 | }; | 205 | }; |
206 | 206 | ||
207 | /* This is our task struct. It's used for the packet complete callback. */ | 207 | /* This is our task struct. It's used for the packet complete callback. */ |
208 | struct fwnet_packet_task { | 208 | struct fwnet_packet_task { |
209 | struct fw_transaction transaction; | 209 | struct fw_transaction transaction; |
210 | struct rfc2734_header hdr; | 210 | struct rfc2734_header hdr; |
211 | struct sk_buff *skb; | 211 | struct sk_buff *skb; |
212 | struct fwnet_device *dev; | 212 | struct fwnet_device *dev; |
213 | 213 | ||
214 | int outstanding_pkts; | 214 | int outstanding_pkts; |
215 | u64 fifo_addr; | 215 | u64 fifo_addr; |
216 | u16 dest_node; | 216 | u16 dest_node; |
217 | u16 max_payload; | 217 | u16 max_payload; |
218 | u8 generation; | 218 | u8 generation; |
219 | u8 speed; | 219 | u8 speed; |
220 | u8 enqueued; | 220 | u8 enqueued; |
221 | }; | 221 | }; |
222 | 222 | ||
223 | /* | 223 | /* |
224 | * saddr == NULL means use device source address. | 224 | * saddr == NULL means use device source address. |
225 | * daddr == NULL means leave destination address (eg unresolved arp). | 225 | * daddr == NULL means leave destination address (eg unresolved arp). |
226 | */ | 226 | */ |
227 | static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, | 227 | static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, |
228 | unsigned short type, const void *daddr, | 228 | unsigned short type, const void *daddr, |
229 | const void *saddr, unsigned len) | 229 | const void *saddr, unsigned len) |
230 | { | 230 | { |
231 | struct fwnet_header *h; | 231 | struct fwnet_header *h; |
232 | 232 | ||
233 | h = (struct fwnet_header *)skb_push(skb, sizeof(*h)); | 233 | h = (struct fwnet_header *)skb_push(skb, sizeof(*h)); |
234 | put_unaligned_be16(type, &h->h_proto); | 234 | put_unaligned_be16(type, &h->h_proto); |
235 | 235 | ||
236 | if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) { | 236 | if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) { |
237 | memset(h->h_dest, 0, net->addr_len); | 237 | memset(h->h_dest, 0, net->addr_len); |
238 | 238 | ||
239 | return net->hard_header_len; | 239 | return net->hard_header_len; |
240 | } | 240 | } |
241 | 241 | ||
242 | if (daddr) { | 242 | if (daddr) { |
243 | memcpy(h->h_dest, daddr, net->addr_len); | 243 | memcpy(h->h_dest, daddr, net->addr_len); |
244 | 244 | ||
245 | return net->hard_header_len; | 245 | return net->hard_header_len; |
246 | } | 246 | } |
247 | 247 | ||
248 | return -net->hard_header_len; | 248 | return -net->hard_header_len; |
249 | } | 249 | } |
250 | 250 | ||
251 | static int fwnet_header_rebuild(struct sk_buff *skb) | 251 | static int fwnet_header_rebuild(struct sk_buff *skb) |
252 | { | 252 | { |
253 | struct fwnet_header *h = (struct fwnet_header *)skb->data; | 253 | struct fwnet_header *h = (struct fwnet_header *)skb->data; |
254 | 254 | ||
255 | if (get_unaligned_be16(&h->h_proto) == ETH_P_IP) | 255 | if (get_unaligned_be16(&h->h_proto) == ETH_P_IP) |
256 | return arp_find((unsigned char *)&h->h_dest, skb); | 256 | return arp_find((unsigned char *)&h->h_dest, skb); |
257 | 257 | ||
258 | fw_notify("%s: unable to resolve type %04x addresses\n", | 258 | fw_notify("%s: unable to resolve type %04x addresses\n", |
259 | skb->dev->name, be16_to_cpu(h->h_proto)); | 259 | skb->dev->name, be16_to_cpu(h->h_proto)); |
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
262 | 262 | ||
263 | static int fwnet_header_cache(const struct neighbour *neigh, | 263 | static int fwnet_header_cache(const struct neighbour *neigh, |
264 | struct hh_cache *hh) | 264 | struct hh_cache *hh) |
265 | { | 265 | { |
266 | struct net_device *net; | 266 | struct net_device *net; |
267 | struct fwnet_header *h; | 267 | struct fwnet_header *h; |
268 | 268 | ||
269 | if (hh->hh_type == cpu_to_be16(ETH_P_802_3)) | 269 | if (hh->hh_type == cpu_to_be16(ETH_P_802_3)) |
270 | return -1; | 270 | return -1; |
271 | net = neigh->dev; | 271 | net = neigh->dev; |
272 | h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h)); | 272 | h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h)); |
273 | h->h_proto = hh->hh_type; | 273 | h->h_proto = hh->hh_type; |
274 | memcpy(h->h_dest, neigh->ha, net->addr_len); | 274 | memcpy(h->h_dest, neigh->ha, net->addr_len); |
275 | hh->hh_len = FWNET_HLEN; | 275 | hh->hh_len = FWNET_HLEN; |
276 | 276 | ||
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | 279 | ||
280 | /* Called by Address Resolution module to notify changes in address. */ | 280 | /* Called by Address Resolution module to notify changes in address. */ |
281 | static void fwnet_header_cache_update(struct hh_cache *hh, | 281 | static void fwnet_header_cache_update(struct hh_cache *hh, |
282 | const struct net_device *net, const unsigned char *haddr) | 282 | const struct net_device *net, const unsigned char *haddr) |
283 | { | 283 | { |
284 | memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len); | 284 | memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len); |
285 | } | 285 | } |
286 | 286 | ||
287 | static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) | 287 | static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) |
288 | { | 288 | { |
289 | memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); | 289 | memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); |
290 | 290 | ||
291 | return FWNET_ALEN; | 291 | return FWNET_ALEN; |
292 | } | 292 | } |
293 | 293 | ||
294 | static const struct header_ops fwnet_header_ops = { | 294 | static const struct header_ops fwnet_header_ops = { |
295 | .create = fwnet_header_create, | 295 | .create = fwnet_header_create, |
296 | .rebuild = fwnet_header_rebuild, | 296 | .rebuild = fwnet_header_rebuild, |
297 | .cache = fwnet_header_cache, | 297 | .cache = fwnet_header_cache, |
298 | .cache_update = fwnet_header_cache_update, | 298 | .cache_update = fwnet_header_cache_update, |
299 | .parse = fwnet_header_parse, | 299 | .parse = fwnet_header_parse, |
300 | }; | 300 | }; |
301 | 301 | ||
302 | /* FIXME: is this correct for all cases? */ | 302 | /* FIXME: is this correct for all cases? */ |
303 | static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd, | 303 | static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd, |
304 | unsigned offset, unsigned len) | 304 | unsigned offset, unsigned len) |
305 | { | 305 | { |
306 | struct fwnet_fragment_info *fi; | 306 | struct fwnet_fragment_info *fi; |
307 | unsigned end = offset + len; | 307 | unsigned end = offset + len; |
308 | 308 | ||
309 | list_for_each_entry(fi, &pd->fi_list, fi_link) | 309 | list_for_each_entry(fi, &pd->fi_list, fi_link) |
310 | if (offset < fi->offset + fi->len && end > fi->offset) | 310 | if (offset < fi->offset + fi->len && end > fi->offset) |
311 | return true; | 311 | return true; |
312 | 312 | ||
313 | return false; | 313 | return false; |
314 | } | 314 | } |
315 | 315 | ||
316 | /* Assumes that new fragment does not overlap any existing fragments */ | 316 | /* Assumes that new fragment does not overlap any existing fragments */ |
317 | static struct fwnet_fragment_info *fwnet_frag_new( | 317 | static struct fwnet_fragment_info *fwnet_frag_new( |
318 | struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) | 318 | struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) |
319 | { | 319 | { |
320 | struct fwnet_fragment_info *fi, *fi2, *new; | 320 | struct fwnet_fragment_info *fi, *fi2, *new; |
321 | struct list_head *list; | 321 | struct list_head *list; |
322 | 322 | ||
323 | list = &pd->fi_list; | 323 | list = &pd->fi_list; |
324 | list_for_each_entry(fi, &pd->fi_list, fi_link) { | 324 | list_for_each_entry(fi, &pd->fi_list, fi_link) { |
325 | if (fi->offset + fi->len == offset) { | 325 | if (fi->offset + fi->len == offset) { |
326 | /* The new fragment can be tacked on to the end */ | 326 | /* The new fragment can be tacked on to the end */ |
327 | /* Did the new fragment plug a hole? */ | 327 | /* Did the new fragment plug a hole? */ |
328 | fi2 = list_entry(fi->fi_link.next, | 328 | fi2 = list_entry(fi->fi_link.next, |
329 | struct fwnet_fragment_info, fi_link); | 329 | struct fwnet_fragment_info, fi_link); |
330 | if (fi->offset + fi->len == fi2->offset) { | 330 | if (fi->offset + fi->len == fi2->offset) { |
331 | /* glue fragments together */ | 331 | /* glue fragments together */ |
332 | fi->len += len + fi2->len; | 332 | fi->len += len + fi2->len; |
333 | list_del(&fi2->fi_link); | 333 | list_del(&fi2->fi_link); |
334 | kfree(fi2); | 334 | kfree(fi2); |
335 | } else { | 335 | } else { |
336 | fi->len += len; | 336 | fi->len += len; |
337 | } | 337 | } |
338 | 338 | ||
339 | return fi; | 339 | return fi; |
340 | } | 340 | } |
341 | if (offset + len == fi->offset) { | 341 | if (offset + len == fi->offset) { |
342 | /* The new fragment can be tacked on to the beginning */ | 342 | /* The new fragment can be tacked on to the beginning */ |
343 | /* Did the new fragment plug a hole? */ | 343 | /* Did the new fragment plug a hole? */ |
344 | fi2 = list_entry(fi->fi_link.prev, | 344 | fi2 = list_entry(fi->fi_link.prev, |
345 | struct fwnet_fragment_info, fi_link); | 345 | struct fwnet_fragment_info, fi_link); |
346 | if (fi2->offset + fi2->len == fi->offset) { | 346 | if (fi2->offset + fi2->len == fi->offset) { |
347 | /* glue fragments together */ | 347 | /* glue fragments together */ |
348 | fi2->len += fi->len + len; | 348 | fi2->len += fi->len + len; |
349 | list_del(&fi->fi_link); | 349 | list_del(&fi->fi_link); |
350 | kfree(fi); | 350 | kfree(fi); |
351 | 351 | ||
352 | return fi2; | 352 | return fi2; |
353 | } | 353 | } |
354 | fi->offset = offset; | 354 | fi->offset = offset; |
355 | fi->len += len; | 355 | fi->len += len; |
356 | 356 | ||
357 | return fi; | 357 | return fi; |
358 | } | 358 | } |
359 | if (offset > fi->offset + fi->len) { | 359 | if (offset > fi->offset + fi->len) { |
360 | list = &fi->fi_link; | 360 | list = &fi->fi_link; |
361 | break; | 361 | break; |
362 | } | 362 | } |
363 | if (offset + len < fi->offset) { | 363 | if (offset + len < fi->offset) { |
364 | list = fi->fi_link.prev; | 364 | list = fi->fi_link.prev; |
365 | break; | 365 | break; |
366 | } | 366 | } |
367 | } | 367 | } |
368 | 368 | ||
369 | new = kmalloc(sizeof(*new), GFP_ATOMIC); | 369 | new = kmalloc(sizeof(*new), GFP_ATOMIC); |
370 | if (!new) { | 370 | if (!new) { |
371 | fw_error("out of memory\n"); | 371 | fw_error("out of memory\n"); |
372 | return NULL; | 372 | return NULL; |
373 | } | 373 | } |
374 | 374 | ||
375 | new->offset = offset; | 375 | new->offset = offset; |
376 | new->len = len; | 376 | new->len = len; |
377 | list_add(&new->fi_link, list); | 377 | list_add(&new->fi_link, list); |
378 | 378 | ||
379 | return new; | 379 | return new; |
380 | } | 380 | } |
381 | 381 | ||
382 | static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net, | 382 | static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net, |
383 | struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size, | 383 | struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size, |
384 | void *frag_buf, unsigned frag_off, unsigned frag_len) | 384 | void *frag_buf, unsigned frag_off, unsigned frag_len) |
385 | { | 385 | { |
386 | struct fwnet_partial_datagram *new; | 386 | struct fwnet_partial_datagram *new; |
387 | struct fwnet_fragment_info *fi; | 387 | struct fwnet_fragment_info *fi; |
388 | 388 | ||
389 | new = kmalloc(sizeof(*new), GFP_ATOMIC); | 389 | new = kmalloc(sizeof(*new), GFP_ATOMIC); |
390 | if (!new) | 390 | if (!new) |
391 | goto fail; | 391 | goto fail; |
392 | 392 | ||
393 | INIT_LIST_HEAD(&new->fi_list); | 393 | INIT_LIST_HEAD(&new->fi_list); |
394 | fi = fwnet_frag_new(new, frag_off, frag_len); | 394 | fi = fwnet_frag_new(new, frag_off, frag_len); |
395 | if (fi == NULL) | 395 | if (fi == NULL) |
396 | goto fail_w_new; | 396 | goto fail_w_new; |
397 | 397 | ||
398 | new->datagram_label = datagram_label; | 398 | new->datagram_label = datagram_label; |
399 | new->datagram_size = dg_size; | 399 | new->datagram_size = dg_size; |
400 | new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15); | 400 | new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15); |
401 | if (new->skb == NULL) | 401 | if (new->skb == NULL) |
402 | goto fail_w_fi; | 402 | goto fail_w_fi; |
403 | 403 | ||
404 | skb_reserve(new->skb, (net->hard_header_len + 15) & ~15); | 404 | skb_reserve(new->skb, (net->hard_header_len + 15) & ~15); |
405 | new->pbuf = skb_put(new->skb, dg_size); | 405 | new->pbuf = skb_put(new->skb, dg_size); |
406 | memcpy(new->pbuf + frag_off, frag_buf, frag_len); | 406 | memcpy(new->pbuf + frag_off, frag_buf, frag_len); |
407 | list_add_tail(&new->pd_link, &peer->pd_list); | 407 | list_add_tail(&new->pd_link, &peer->pd_list); |
408 | 408 | ||
409 | return new; | 409 | return new; |
410 | 410 | ||
411 | fail_w_fi: | 411 | fail_w_fi: |
412 | kfree(fi); | 412 | kfree(fi); |
413 | fail_w_new: | 413 | fail_w_new: |
414 | kfree(new); | 414 | kfree(new); |
415 | fail: | 415 | fail: |
416 | fw_error("out of memory\n"); | 416 | fw_error("out of memory\n"); |
417 | 417 | ||
418 | return NULL; | 418 | return NULL; |
419 | } | 419 | } |
420 | 420 | ||
421 | static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer, | 421 | static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer, |
422 | u16 datagram_label) | 422 | u16 datagram_label) |
423 | { | 423 | { |
424 | struct fwnet_partial_datagram *pd; | 424 | struct fwnet_partial_datagram *pd; |
425 | 425 | ||
426 | list_for_each_entry(pd, &peer->pd_list, pd_link) | 426 | list_for_each_entry(pd, &peer->pd_list, pd_link) |
427 | if (pd->datagram_label == datagram_label) | 427 | if (pd->datagram_label == datagram_label) |
428 | return pd; | 428 | return pd; |
429 | 429 | ||
430 | return NULL; | 430 | return NULL; |
431 | } | 431 | } |
432 | 432 | ||
433 | 433 | ||
434 | static void fwnet_pd_delete(struct fwnet_partial_datagram *old) | 434 | static void fwnet_pd_delete(struct fwnet_partial_datagram *old) |
435 | { | 435 | { |
436 | struct fwnet_fragment_info *fi, *n; | 436 | struct fwnet_fragment_info *fi, *n; |
437 | 437 | ||
438 | list_for_each_entry_safe(fi, n, &old->fi_list, fi_link) | 438 | list_for_each_entry_safe(fi, n, &old->fi_list, fi_link) |
439 | kfree(fi); | 439 | kfree(fi); |
440 | 440 | ||
441 | list_del(&old->pd_link); | 441 | list_del(&old->pd_link); |
442 | dev_kfree_skb_any(old->skb); | 442 | dev_kfree_skb_any(old->skb); |
443 | kfree(old); | 443 | kfree(old); |
444 | } | 444 | } |
445 | 445 | ||
446 | static bool fwnet_pd_update(struct fwnet_peer *peer, | 446 | static bool fwnet_pd_update(struct fwnet_peer *peer, |
447 | struct fwnet_partial_datagram *pd, void *frag_buf, | 447 | struct fwnet_partial_datagram *pd, void *frag_buf, |
448 | unsigned frag_off, unsigned frag_len) | 448 | unsigned frag_off, unsigned frag_len) |
449 | { | 449 | { |
450 | if (fwnet_frag_new(pd, frag_off, frag_len) == NULL) | 450 | if (fwnet_frag_new(pd, frag_off, frag_len) == NULL) |
451 | return false; | 451 | return false; |
452 | 452 | ||
453 | memcpy(pd->pbuf + frag_off, frag_buf, frag_len); | 453 | memcpy(pd->pbuf + frag_off, frag_buf, frag_len); |
454 | 454 | ||
455 | /* | 455 | /* |
456 | * Move list entry to beginning of list so that oldest partial | 456 | * Move list entry to beginning of list so that oldest partial |
457 | * datagrams percolate to the end of the list | 457 | * datagrams percolate to the end of the list |
458 | */ | 458 | */ |
459 | list_move_tail(&pd->pd_link, &peer->pd_list); | 459 | list_move_tail(&pd->pd_link, &peer->pd_list); |
460 | 460 | ||
461 | return true; | 461 | return true; |
462 | } | 462 | } |
463 | 463 | ||
464 | static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd) | 464 | static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd) |
465 | { | 465 | { |
466 | struct fwnet_fragment_info *fi; | 466 | struct fwnet_fragment_info *fi; |
467 | 467 | ||
468 | fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link); | 468 | fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link); |
469 | 469 | ||
470 | return fi->len == pd->datagram_size; | 470 | return fi->len == pd->datagram_size; |
471 | } | 471 | } |
472 | 472 | ||
473 | /* caller must hold dev->lock */ | 473 | /* caller must hold dev->lock */ |
474 | static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev, | 474 | static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev, |
475 | u64 guid) | 475 | u64 guid) |
476 | { | 476 | { |
477 | struct fwnet_peer *peer; | 477 | struct fwnet_peer *peer; |
478 | 478 | ||
479 | list_for_each_entry(peer, &dev->peer_list, peer_link) | 479 | list_for_each_entry(peer, &dev->peer_list, peer_link) |
480 | if (peer->guid == guid) | 480 | if (peer->guid == guid) |
481 | return peer; | 481 | return peer; |
482 | 482 | ||
483 | return NULL; | 483 | return NULL; |
484 | } | 484 | } |
485 | 485 | ||
486 | /* caller must hold dev->lock */ | 486 | /* caller must hold dev->lock */ |
487 | static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev, | 487 | static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev, |
488 | int node_id, int generation) | 488 | int node_id, int generation) |
489 | { | 489 | { |
490 | struct fwnet_peer *peer; | 490 | struct fwnet_peer *peer; |
491 | 491 | ||
492 | list_for_each_entry(peer, &dev->peer_list, peer_link) | 492 | list_for_each_entry(peer, &dev->peer_list, peer_link) |
493 | if (peer->node_id == node_id && | 493 | if (peer->node_id == node_id && |
494 | peer->generation == generation) | 494 | peer->generation == generation) |
495 | return peer; | 495 | return peer; |
496 | 496 | ||
497 | return NULL; | 497 | return NULL; |
498 | } | 498 | } |
499 | 499 | ||
500 | /* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */ | 500 | /* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */ |
501 | static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed) | 501 | static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed) |
502 | { | 502 | { |
503 | max_rec = min(max_rec, speed + 8); | 503 | max_rec = min(max_rec, speed + 8); |
504 | max_rec = min(max_rec, 0xbU); /* <= 4096 */ | 504 | max_rec = min(max_rec, 0xbU); /* <= 4096 */ |
505 | if (max_rec < 8) { | 505 | if (max_rec < 8) { |
506 | fw_notify("max_rec %x out of range\n", max_rec); | 506 | fw_notify("max_rec %x out of range\n", max_rec); |
507 | max_rec = 8; | 507 | max_rec = 8; |
508 | } | 508 | } |
509 | 509 | ||
510 | return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE; | 510 | return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE; |
511 | } | 511 | } |
512 | 512 | ||
513 | 513 | ||
514 | static int fwnet_finish_incoming_packet(struct net_device *net, | 514 | static int fwnet_finish_incoming_packet(struct net_device *net, |
515 | struct sk_buff *skb, u16 source_node_id, | 515 | struct sk_buff *skb, u16 source_node_id, |
516 | bool is_broadcast, u16 ether_type) | 516 | bool is_broadcast, u16 ether_type) |
517 | { | 517 | { |
518 | struct fwnet_device *dev; | 518 | struct fwnet_device *dev; |
519 | static const __be64 broadcast_hw = cpu_to_be64(~0ULL); | 519 | static const __be64 broadcast_hw = cpu_to_be64(~0ULL); |
520 | int status; | 520 | int status; |
521 | __be64 guid; | 521 | __be64 guid; |
522 | 522 | ||
523 | dev = netdev_priv(net); | 523 | dev = netdev_priv(net); |
524 | /* Write metadata, and then pass to the receive level */ | 524 | /* Write metadata, and then pass to the receive level */ |
525 | skb->dev = net; | 525 | skb->dev = net; |
526 | skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */ | 526 | skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */ |
527 | 527 | ||
528 | /* | 528 | /* |
529 | * Parse the encapsulation header. This actually does the job of | 529 | * Parse the encapsulation header. This actually does the job of |
530 | * converting to an ethernet frame header, as well as arp | 530 | * converting to an ethernet frame header, as well as arp |
531 | * conversion if needed. ARP conversion is easier in this | 531 | * conversion if needed. ARP conversion is easier in this |
532 | * direction, since we are using ethernet as our backend. | 532 | * direction, since we are using ethernet as our backend. |
533 | */ | 533 | */ |
534 | /* | 534 | /* |
535 | * If this is an ARP packet, convert it. First, we want to make | 535 | * If this is an ARP packet, convert it. First, we want to make |
536 | * use of some of the fields, since they tell us a little bit | 536 | * use of some of the fields, since they tell us a little bit |
537 | * about the sending machine. | 537 | * about the sending machine. |
538 | */ | 538 | */ |
539 | if (ether_type == ETH_P_ARP) { | 539 | if (ether_type == ETH_P_ARP) { |
540 | struct rfc2734_arp *arp1394; | 540 | struct rfc2734_arp *arp1394; |
541 | struct arphdr *arp; | 541 | struct arphdr *arp; |
542 | unsigned char *arp_ptr; | 542 | unsigned char *arp_ptr; |
543 | u64 fifo_addr; | 543 | u64 fifo_addr; |
544 | u64 peer_guid; | 544 | u64 peer_guid; |
545 | unsigned sspd; | 545 | unsigned sspd; |
546 | u16 max_payload; | 546 | u16 max_payload; |
547 | struct fwnet_peer *peer; | 547 | struct fwnet_peer *peer; |
548 | unsigned long flags; | 548 | unsigned long flags; |
549 | 549 | ||
550 | arp1394 = (struct rfc2734_arp *)skb->data; | 550 | arp1394 = (struct rfc2734_arp *)skb->data; |
551 | arp = (struct arphdr *)skb->data; | 551 | arp = (struct arphdr *)skb->data; |
552 | arp_ptr = (unsigned char *)(arp + 1); | 552 | arp_ptr = (unsigned char *)(arp + 1); |
553 | peer_guid = get_unaligned_be64(&arp1394->s_uniq_id); | 553 | peer_guid = get_unaligned_be64(&arp1394->s_uniq_id); |
554 | fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32 | 554 | fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32 |
555 | | get_unaligned_be32(&arp1394->fifo_lo); | 555 | | get_unaligned_be32(&arp1394->fifo_lo); |
556 | 556 | ||
557 | sspd = arp1394->sspd; | 557 | sspd = arp1394->sspd; |
558 | /* Sanity check. OS X 10.3 PPC reportedly sends 131. */ | 558 | /* Sanity check. OS X 10.3 PPC reportedly sends 131. */ |
559 | if (sspd > SCODE_3200) { | 559 | if (sspd > SCODE_3200) { |
560 | fw_notify("sspd %x out of range\n", sspd); | 560 | fw_notify("sspd %x out of range\n", sspd); |
561 | sspd = SCODE_3200; | 561 | sspd = SCODE_3200; |
562 | } | 562 | } |
563 | max_payload = fwnet_max_payload(arp1394->max_rec, sspd); | 563 | max_payload = fwnet_max_payload(arp1394->max_rec, sspd); |
564 | 564 | ||
565 | spin_lock_irqsave(&dev->lock, flags); | 565 | spin_lock_irqsave(&dev->lock, flags); |
566 | peer = fwnet_peer_find_by_guid(dev, peer_guid); | 566 | peer = fwnet_peer_find_by_guid(dev, peer_guid); |
567 | if (peer) { | 567 | if (peer) { |
568 | peer->fifo = fifo_addr; | 568 | peer->fifo = fifo_addr; |
569 | 569 | ||
570 | if (peer->speed > sspd) | 570 | if (peer->speed > sspd) |
571 | peer->speed = sspd; | 571 | peer->speed = sspd; |
572 | if (peer->max_payload > max_payload) | 572 | if (peer->max_payload > max_payload) |
573 | peer->max_payload = max_payload; | 573 | peer->max_payload = max_payload; |
574 | 574 | ||
575 | peer->ip = arp1394->sip; | 575 | peer->ip = arp1394->sip; |
576 | } | 576 | } |
577 | spin_unlock_irqrestore(&dev->lock, flags); | 577 | spin_unlock_irqrestore(&dev->lock, flags); |
578 | 578 | ||
579 | if (!peer) { | 579 | if (!peer) { |
580 | fw_notify("No peer for ARP packet from %016llx\n", | 580 | fw_notify("No peer for ARP packet from %016llx\n", |
581 | (unsigned long long)peer_guid); | 581 | (unsigned long long)peer_guid); |
582 | goto no_peer; | 582 | goto no_peer; |
583 | } | 583 | } |
584 | 584 | ||
585 | /* | 585 | /* |
586 | * Now that we're done with the 1394 specific stuff, we'll | 586 | * Now that we're done with the 1394 specific stuff, we'll |
587 | * need to alter some of the data. Believe it or not, all | 587 | * need to alter some of the data. Believe it or not, all |
588 | * that needs to be done is sender_IP_address needs to be | 588 | * that needs to be done is sender_IP_address needs to be |
589 | * moved, the destination hardware address get stuffed | 589 | * moved, the destination hardware address get stuffed |
590 | * in and the hardware address length set to 8. | 590 | * in and the hardware address length set to 8. |
591 | * | 591 | * |
592 | * IMPORTANT: The code below overwrites 1394 specific data | 592 | * IMPORTANT: The code below overwrites 1394 specific data |
593 | * needed above so keep the munging of the data for the | 593 | * needed above so keep the munging of the data for the |
594 | * higher level IP stack last. | 594 | * higher level IP stack last. |
595 | */ | 595 | */ |
596 | 596 | ||
597 | arp->ar_hln = 8; | 597 | arp->ar_hln = 8; |
598 | /* skip over sender unique id */ | 598 | /* skip over sender unique id */ |
599 | arp_ptr += arp->ar_hln; | 599 | arp_ptr += arp->ar_hln; |
600 | /* move sender IP addr */ | 600 | /* move sender IP addr */ |
601 | put_unaligned(arp1394->sip, (u32 *)arp_ptr); | 601 | put_unaligned(arp1394->sip, (u32 *)arp_ptr); |
602 | /* skip over sender IP addr */ | 602 | /* skip over sender IP addr */ |
603 | arp_ptr += arp->ar_pln; | 603 | arp_ptr += arp->ar_pln; |
604 | 604 | ||
605 | if (arp->ar_op == htons(ARPOP_REQUEST)) | 605 | if (arp->ar_op == htons(ARPOP_REQUEST)) |
606 | memset(arp_ptr, 0, sizeof(u64)); | 606 | memset(arp_ptr, 0, sizeof(u64)); |
607 | else | 607 | else |
608 | memcpy(arp_ptr, net->dev_addr, sizeof(u64)); | 608 | memcpy(arp_ptr, net->dev_addr, sizeof(u64)); |
609 | } | 609 | } |
610 | 610 | ||
611 | /* Now add the ethernet header. */ | 611 | /* Now add the ethernet header. */ |
612 | guid = cpu_to_be64(dev->card->guid); | 612 | guid = cpu_to_be64(dev->card->guid); |
613 | if (dev_hard_header(skb, net, ether_type, | 613 | if (dev_hard_header(skb, net, ether_type, |
614 | is_broadcast ? &broadcast_hw : &guid, | 614 | is_broadcast ? &broadcast_hw : &guid, |
615 | NULL, skb->len) >= 0) { | 615 | NULL, skb->len) >= 0) { |
616 | struct fwnet_header *eth; | 616 | struct fwnet_header *eth; |
617 | u16 *rawp; | 617 | u16 *rawp; |
618 | __be16 protocol; | 618 | __be16 protocol; |
619 | 619 | ||
620 | skb_reset_mac_header(skb); | 620 | skb_reset_mac_header(skb); |
621 | skb_pull(skb, sizeof(*eth)); | 621 | skb_pull(skb, sizeof(*eth)); |
622 | eth = (struct fwnet_header *)skb_mac_header(skb); | 622 | eth = (struct fwnet_header *)skb_mac_header(skb); |
623 | if (*eth->h_dest & 1) { | 623 | if (*eth->h_dest & 1) { |
624 | if (memcmp(eth->h_dest, net->broadcast, | 624 | if (memcmp(eth->h_dest, net->broadcast, |
625 | net->addr_len) == 0) | 625 | net->addr_len) == 0) |
626 | skb->pkt_type = PACKET_BROADCAST; | 626 | skb->pkt_type = PACKET_BROADCAST; |
627 | #if 0 | 627 | #if 0 |
628 | else | 628 | else |
629 | skb->pkt_type = PACKET_MULTICAST; | 629 | skb->pkt_type = PACKET_MULTICAST; |
630 | #endif | 630 | #endif |
631 | } else { | 631 | } else { |
632 | if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) | 632 | if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) |
633 | skb->pkt_type = PACKET_OTHERHOST; | 633 | skb->pkt_type = PACKET_OTHERHOST; |
634 | } | 634 | } |
635 | if (ntohs(eth->h_proto) >= 1536) { | 635 | if (ntohs(eth->h_proto) >= 1536) { |
636 | protocol = eth->h_proto; | 636 | protocol = eth->h_proto; |
637 | } else { | 637 | } else { |
638 | rawp = (u16 *)skb->data; | 638 | rawp = (u16 *)skb->data; |
639 | if (*rawp == 0xffff) | 639 | if (*rawp == 0xffff) |
640 | protocol = htons(ETH_P_802_3); | 640 | protocol = htons(ETH_P_802_3); |
641 | else | 641 | else |
642 | protocol = htons(ETH_P_802_2); | 642 | protocol = htons(ETH_P_802_2); |
643 | } | 643 | } |
644 | skb->protocol = protocol; | 644 | skb->protocol = protocol; |
645 | } | 645 | } |
646 | status = netif_rx(skb); | 646 | status = netif_rx(skb); |
647 | if (status == NET_RX_DROP) { | 647 | if (status == NET_RX_DROP) { |
648 | net->stats.rx_errors++; | 648 | net->stats.rx_errors++; |
649 | net->stats.rx_dropped++; | 649 | net->stats.rx_dropped++; |
650 | } else { | 650 | } else { |
651 | net->stats.rx_packets++; | 651 | net->stats.rx_packets++; |
652 | net->stats.rx_bytes += skb->len; | 652 | net->stats.rx_bytes += skb->len; |
653 | } | 653 | } |
654 | 654 | ||
655 | return 0; | 655 | return 0; |
656 | 656 | ||
657 | no_peer: | 657 | no_peer: |
658 | net->stats.rx_errors++; | 658 | net->stats.rx_errors++; |
659 | net->stats.rx_dropped++; | 659 | net->stats.rx_dropped++; |
660 | 660 | ||
661 | dev_kfree_skb_any(skb); | 661 | dev_kfree_skb_any(skb); |
662 | 662 | ||
663 | return -ENOENT; | 663 | return -ENOENT; |
664 | } | 664 | } |
665 | 665 | ||
666 | static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, | 666 | static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, |
667 | int source_node_id, int generation, | 667 | int source_node_id, int generation, |
668 | bool is_broadcast) | 668 | bool is_broadcast) |
669 | { | 669 | { |
670 | struct sk_buff *skb; | 670 | struct sk_buff *skb; |
671 | struct net_device *net = dev->netdev; | 671 | struct net_device *net = dev->netdev; |
672 | struct rfc2734_header hdr; | 672 | struct rfc2734_header hdr; |
673 | unsigned lf; | 673 | unsigned lf; |
674 | unsigned long flags; | 674 | unsigned long flags; |
675 | struct fwnet_peer *peer; | 675 | struct fwnet_peer *peer; |
676 | struct fwnet_partial_datagram *pd; | 676 | struct fwnet_partial_datagram *pd; |
677 | int fg_off; | 677 | int fg_off; |
678 | int dg_size; | 678 | int dg_size; |
679 | u16 datagram_label; | 679 | u16 datagram_label; |
680 | int retval; | 680 | int retval; |
681 | u16 ether_type; | 681 | u16 ether_type; |
682 | 682 | ||
683 | hdr.w0 = be32_to_cpu(buf[0]); | 683 | hdr.w0 = be32_to_cpu(buf[0]); |
684 | lf = fwnet_get_hdr_lf(&hdr); | 684 | lf = fwnet_get_hdr_lf(&hdr); |
685 | if (lf == RFC2374_HDR_UNFRAG) { | 685 | if (lf == RFC2374_HDR_UNFRAG) { |
686 | /* | 686 | /* |
687 | * An unfragmented datagram has been received by the ieee1394 | 687 | * An unfragmented datagram has been received by the ieee1394 |
688 | * bus. Build an skbuff around it so we can pass it to the | 688 | * bus. Build an skbuff around it so we can pass it to the |
689 | * high level network layer. | 689 | * high level network layer. |
690 | */ | 690 | */ |
691 | ether_type = fwnet_get_hdr_ether_type(&hdr); | 691 | ether_type = fwnet_get_hdr_ether_type(&hdr); |
692 | buf++; | 692 | buf++; |
693 | len -= RFC2374_UNFRAG_HDR_SIZE; | 693 | len -= RFC2374_UNFRAG_HDR_SIZE; |
694 | 694 | ||
695 | skb = dev_alloc_skb(len + net->hard_header_len + 15); | 695 | skb = dev_alloc_skb(len + net->hard_header_len + 15); |
696 | if (unlikely(!skb)) { | 696 | if (unlikely(!skb)) { |
697 | fw_error("out of memory\n"); | 697 | fw_error("out of memory\n"); |
698 | net->stats.rx_dropped++; | 698 | net->stats.rx_dropped++; |
699 | 699 | ||
700 | return -ENOMEM; | 700 | return -ENOMEM; |
701 | } | 701 | } |
702 | skb_reserve(skb, (net->hard_header_len + 15) & ~15); | 702 | skb_reserve(skb, (net->hard_header_len + 15) & ~15); |
703 | memcpy(skb_put(skb, len), buf, len); | 703 | memcpy(skb_put(skb, len), buf, len); |
704 | 704 | ||
705 | return fwnet_finish_incoming_packet(net, skb, source_node_id, | 705 | return fwnet_finish_incoming_packet(net, skb, source_node_id, |
706 | is_broadcast, ether_type); | 706 | is_broadcast, ether_type); |
707 | } | 707 | } |
708 | /* A datagram fragment has been received, now the fun begins. */ | 708 | /* A datagram fragment has been received, now the fun begins. */ |
709 | hdr.w1 = ntohl(buf[1]); | 709 | hdr.w1 = ntohl(buf[1]); |
710 | buf += 2; | 710 | buf += 2; |
711 | len -= RFC2374_FRAG_HDR_SIZE; | 711 | len -= RFC2374_FRAG_HDR_SIZE; |
712 | if (lf == RFC2374_HDR_FIRSTFRAG) { | 712 | if (lf == RFC2374_HDR_FIRSTFRAG) { |
713 | ether_type = fwnet_get_hdr_ether_type(&hdr); | 713 | ether_type = fwnet_get_hdr_ether_type(&hdr); |
714 | fg_off = 0; | 714 | fg_off = 0; |
715 | } else { | 715 | } else { |
716 | ether_type = 0; | 716 | ether_type = 0; |
717 | fg_off = fwnet_get_hdr_fg_off(&hdr); | 717 | fg_off = fwnet_get_hdr_fg_off(&hdr); |
718 | } | 718 | } |
719 | datagram_label = fwnet_get_hdr_dgl(&hdr); | 719 | datagram_label = fwnet_get_hdr_dgl(&hdr); |
720 | dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ | 720 | dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ |
721 | 721 | ||
722 | spin_lock_irqsave(&dev->lock, flags); | 722 | spin_lock_irqsave(&dev->lock, flags); |
723 | 723 | ||
724 | peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); | 724 | peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); |
725 | if (!peer) { | 725 | if (!peer) { |
726 | retval = -ENOENT; | 726 | retval = -ENOENT; |
727 | goto fail; | 727 | goto fail; |
728 | } | 728 | } |
729 | 729 | ||
730 | pd = fwnet_pd_find(peer, datagram_label); | 730 | pd = fwnet_pd_find(peer, datagram_label); |
731 | if (pd == NULL) { | 731 | if (pd == NULL) { |
732 | while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) { | 732 | while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) { |
733 | /* remove the oldest */ | 733 | /* remove the oldest */ |
734 | fwnet_pd_delete(list_first_entry(&peer->pd_list, | 734 | fwnet_pd_delete(list_first_entry(&peer->pd_list, |
735 | struct fwnet_partial_datagram, pd_link)); | 735 | struct fwnet_partial_datagram, pd_link)); |
736 | peer->pdg_size--; | 736 | peer->pdg_size--; |
737 | } | 737 | } |
738 | pd = fwnet_pd_new(net, peer, datagram_label, | 738 | pd = fwnet_pd_new(net, peer, datagram_label, |
739 | dg_size, buf, fg_off, len); | 739 | dg_size, buf, fg_off, len); |
740 | if (pd == NULL) { | 740 | if (pd == NULL) { |
741 | retval = -ENOMEM; | 741 | retval = -ENOMEM; |
742 | goto fail; | 742 | goto fail; |
743 | } | 743 | } |
744 | peer->pdg_size++; | 744 | peer->pdg_size++; |
745 | } else { | 745 | } else { |
746 | if (fwnet_frag_overlap(pd, fg_off, len) || | 746 | if (fwnet_frag_overlap(pd, fg_off, len) || |
747 | pd->datagram_size != dg_size) { | 747 | pd->datagram_size != dg_size) { |
748 | /* | 748 | /* |
749 | * Differing datagram sizes or overlapping fragments, | 749 | * Differing datagram sizes or overlapping fragments, |
750 | * discard old datagram and start a new one. | 750 | * discard old datagram and start a new one. |
751 | */ | 751 | */ |
752 | fwnet_pd_delete(pd); | 752 | fwnet_pd_delete(pd); |
753 | pd = fwnet_pd_new(net, peer, datagram_label, | 753 | pd = fwnet_pd_new(net, peer, datagram_label, |
754 | dg_size, buf, fg_off, len); | 754 | dg_size, buf, fg_off, len); |
755 | if (pd == NULL) { | 755 | if (pd == NULL) { |
756 | peer->pdg_size--; | 756 | peer->pdg_size--; |
757 | retval = -ENOMEM; | 757 | retval = -ENOMEM; |
758 | goto fail; | 758 | goto fail; |
759 | } | 759 | } |
760 | } else { | 760 | } else { |
761 | if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { | 761 | if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { |
762 | /* | 762 | /* |
763 | * Couldn't save off fragment anyway | 763 | * Couldn't save off fragment anyway |
764 | * so might as well obliterate the | 764 | * so might as well obliterate the |
765 | * datagram now. | 765 | * datagram now. |
766 | */ | 766 | */ |
767 | fwnet_pd_delete(pd); | 767 | fwnet_pd_delete(pd); |
768 | peer->pdg_size--; | 768 | peer->pdg_size--; |
769 | retval = -ENOMEM; | 769 | retval = -ENOMEM; |
770 | goto fail; | 770 | goto fail; |
771 | } | 771 | } |
772 | } | 772 | } |
773 | } /* new datagram or add to existing one */ | 773 | } /* new datagram or add to existing one */ |
774 | 774 | ||
775 | if (lf == RFC2374_HDR_FIRSTFRAG) | 775 | if (lf == RFC2374_HDR_FIRSTFRAG) |
776 | pd->ether_type = ether_type; | 776 | pd->ether_type = ether_type; |
777 | 777 | ||
778 | if (fwnet_pd_is_complete(pd)) { | 778 | if (fwnet_pd_is_complete(pd)) { |
779 | ether_type = pd->ether_type; | 779 | ether_type = pd->ether_type; |
780 | peer->pdg_size--; | 780 | peer->pdg_size--; |
781 | skb = skb_get(pd->skb); | 781 | skb = skb_get(pd->skb); |
782 | fwnet_pd_delete(pd); | 782 | fwnet_pd_delete(pd); |
783 | 783 | ||
784 | spin_unlock_irqrestore(&dev->lock, flags); | 784 | spin_unlock_irqrestore(&dev->lock, flags); |
785 | 785 | ||
786 | return fwnet_finish_incoming_packet(net, skb, source_node_id, | 786 | return fwnet_finish_incoming_packet(net, skb, source_node_id, |
787 | false, ether_type); | 787 | false, ether_type); |
788 | } | 788 | } |
789 | /* | 789 | /* |
790 | * Datagram is not complete, we're done for the | 790 | * Datagram is not complete, we're done for the |
791 | * moment. | 791 | * moment. |
792 | */ | 792 | */ |
793 | retval = 0; | 793 | retval = 0; |
794 | fail: | 794 | fail: |
795 | spin_unlock_irqrestore(&dev->lock, flags); | 795 | spin_unlock_irqrestore(&dev->lock, flags); |
796 | 796 | ||
797 | return retval; | 797 | return retval; |
798 | } | 798 | } |
799 | 799 | ||
800 | static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, | 800 | static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, |
801 | int tcode, int destination, int source, int generation, | 801 | int tcode, int destination, int source, int generation, |
802 | unsigned long long offset, void *payload, size_t length, | 802 | unsigned long long offset, void *payload, size_t length, |
803 | void *callback_data) | 803 | void *callback_data) |
804 | { | 804 | { |
805 | struct fwnet_device *dev = callback_data; | 805 | struct fwnet_device *dev = callback_data; |
806 | int rcode; | 806 | int rcode; |
807 | 807 | ||
808 | if (destination == IEEE1394_ALL_NODES) { | 808 | if (destination == IEEE1394_ALL_NODES) { |
809 | kfree(r); | 809 | kfree(r); |
810 | 810 | ||
811 | return; | 811 | return; |
812 | } | 812 | } |
813 | 813 | ||
814 | if (offset != dev->handler.offset) | 814 | if (offset != dev->handler.offset) |
815 | rcode = RCODE_ADDRESS_ERROR; | 815 | rcode = RCODE_ADDRESS_ERROR; |
816 | else if (tcode != TCODE_WRITE_BLOCK_REQUEST) | 816 | else if (tcode != TCODE_WRITE_BLOCK_REQUEST) |
817 | rcode = RCODE_TYPE_ERROR; | 817 | rcode = RCODE_TYPE_ERROR; |
818 | else if (fwnet_incoming_packet(dev, payload, length, | 818 | else if (fwnet_incoming_packet(dev, payload, length, |
819 | source, generation, false) != 0) { | 819 | source, generation, false) != 0) { |
820 | fw_error("Incoming packet failure\n"); | 820 | fw_error("Incoming packet failure\n"); |
821 | rcode = RCODE_CONFLICT_ERROR; | 821 | rcode = RCODE_CONFLICT_ERROR; |
822 | } else | 822 | } else |
823 | rcode = RCODE_COMPLETE; | 823 | rcode = RCODE_COMPLETE; |
824 | 824 | ||
825 | fw_send_response(card, r, rcode); | 825 | fw_send_response(card, r, rcode); |
826 | } | 826 | } |
827 | 827 | ||
828 | static void fwnet_receive_broadcast(struct fw_iso_context *context, | 828 | static void fwnet_receive_broadcast(struct fw_iso_context *context, |
829 | u32 cycle, size_t header_length, void *header, void *data) | 829 | u32 cycle, size_t header_length, void *header, void *data) |
830 | { | 830 | { |
831 | struct fwnet_device *dev; | 831 | struct fwnet_device *dev; |
832 | struct fw_iso_packet packet; | 832 | struct fw_iso_packet packet; |
833 | struct fw_card *card; | 833 | struct fw_card *card; |
834 | __be16 *hdr_ptr; | 834 | __be16 *hdr_ptr; |
835 | __be32 *buf_ptr; | 835 | __be32 *buf_ptr; |
836 | int retval; | 836 | int retval; |
837 | u32 length; | 837 | u32 length; |
838 | u16 source_node_id; | 838 | u16 source_node_id; |
839 | u32 specifier_id; | 839 | u32 specifier_id; |
840 | u32 ver; | 840 | u32 ver; |
841 | unsigned long offset; | 841 | unsigned long offset; |
842 | unsigned long flags; | 842 | unsigned long flags; |
843 | 843 | ||
844 | dev = data; | 844 | dev = data; |
845 | card = dev->card; | 845 | card = dev->card; |
846 | hdr_ptr = header; | 846 | hdr_ptr = header; |
847 | length = be16_to_cpup(hdr_ptr); | 847 | length = be16_to_cpup(hdr_ptr); |
848 | 848 | ||
849 | spin_lock_irqsave(&dev->lock, flags); | 849 | spin_lock_irqsave(&dev->lock, flags); |
850 | 850 | ||
851 | offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr; | 851 | offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr; |
852 | buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++]; | 852 | buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++]; |
853 | if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs) | 853 | if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs) |
854 | dev->broadcast_rcv_next_ptr = 0; | 854 | dev->broadcast_rcv_next_ptr = 0; |
855 | 855 | ||
856 | spin_unlock_irqrestore(&dev->lock, flags); | 856 | spin_unlock_irqrestore(&dev->lock, flags); |
857 | 857 | ||
858 | specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 | 858 | specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 |
859 | | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; | 859 | | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; |
860 | ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; | 860 | ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; |
861 | source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; | 861 | source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; |
862 | 862 | ||
863 | if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) { | 863 | if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) { |
864 | buf_ptr += 2; | 864 | buf_ptr += 2; |
865 | length -= IEEE1394_GASP_HDR_SIZE; | 865 | length -= IEEE1394_GASP_HDR_SIZE; |
866 | fwnet_incoming_packet(dev, buf_ptr, length, | 866 | fwnet_incoming_packet(dev, buf_ptr, length, |
867 | source_node_id, -1, true); | 867 | source_node_id, -1, true); |
868 | } | 868 | } |
869 | 869 | ||
870 | packet.payload_length = dev->rcv_buffer_size; | 870 | packet.payload_length = dev->rcv_buffer_size; |
871 | packet.interrupt = 1; | 871 | packet.interrupt = 1; |
872 | packet.skip = 0; | 872 | packet.skip = 0; |
873 | packet.tag = 3; | 873 | packet.tag = 3; |
874 | packet.sy = 0; | 874 | packet.sy = 0; |
875 | packet.header_length = IEEE1394_GASP_HDR_SIZE; | 875 | packet.header_length = IEEE1394_GASP_HDR_SIZE; |
876 | 876 | ||
877 | spin_lock_irqsave(&dev->lock, flags); | 877 | spin_lock_irqsave(&dev->lock, flags); |
878 | 878 | ||
879 | retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet, | 879 | retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet, |
880 | &dev->broadcast_rcv_buffer, offset); | 880 | &dev->broadcast_rcv_buffer, offset); |
881 | 881 | ||
882 | spin_unlock_irqrestore(&dev->lock, flags); | 882 | spin_unlock_irqrestore(&dev->lock, flags); |
883 | 883 | ||
884 | if (retval < 0) | 884 | if (retval >= 0) |
885 | fw_iso_context_queue_flush(dev->broadcast_rcv_context); | ||
886 | else | ||
885 | fw_error("requeue failed\n"); | 887 | fw_error("requeue failed\n"); |
886 | } | 888 | } |
887 | 889 | ||
888 | static struct kmem_cache *fwnet_packet_task_cache; | 890 | static struct kmem_cache *fwnet_packet_task_cache; |
889 | 891 | ||
890 | static void fwnet_free_ptask(struct fwnet_packet_task *ptask) | 892 | static void fwnet_free_ptask(struct fwnet_packet_task *ptask) |
891 | { | 893 | { |
892 | dev_kfree_skb_any(ptask->skb); | 894 | dev_kfree_skb_any(ptask->skb); |
893 | kmem_cache_free(fwnet_packet_task_cache, ptask); | 895 | kmem_cache_free(fwnet_packet_task_cache, ptask); |
894 | } | 896 | } |
895 | 897 | ||
896 | /* Caller must hold dev->lock. */ | 898 | /* Caller must hold dev->lock. */ |
897 | static void dec_queued_datagrams(struct fwnet_device *dev) | 899 | static void dec_queued_datagrams(struct fwnet_device *dev) |
898 | { | 900 | { |
899 | if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS) | 901 | if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS) |
900 | netif_wake_queue(dev->netdev); | 902 | netif_wake_queue(dev->netdev); |
901 | } | 903 | } |
902 | 904 | ||
903 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); | 905 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); |
904 | 906 | ||
905 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | 907 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) |
906 | { | 908 | { |
907 | struct fwnet_device *dev = ptask->dev; | 909 | struct fwnet_device *dev = ptask->dev; |
908 | struct sk_buff *skb = ptask->skb; | 910 | struct sk_buff *skb = ptask->skb; |
909 | unsigned long flags; | 911 | unsigned long flags; |
910 | bool free; | 912 | bool free; |
911 | 913 | ||
912 | spin_lock_irqsave(&dev->lock, flags); | 914 | spin_lock_irqsave(&dev->lock, flags); |
913 | 915 | ||
914 | ptask->outstanding_pkts--; | 916 | ptask->outstanding_pkts--; |
915 | 917 | ||
916 | /* Check whether we or the networking TX soft-IRQ is last user. */ | 918 | /* Check whether we or the networking TX soft-IRQ is last user. */ |
917 | free = (ptask->outstanding_pkts == 0 && ptask->enqueued); | 919 | free = (ptask->outstanding_pkts == 0 && ptask->enqueued); |
918 | if (free) | 920 | if (free) |
919 | dec_queued_datagrams(dev); | 921 | dec_queued_datagrams(dev); |
920 | 922 | ||
921 | if (ptask->outstanding_pkts == 0) { | 923 | if (ptask->outstanding_pkts == 0) { |
922 | dev->netdev->stats.tx_packets++; | 924 | dev->netdev->stats.tx_packets++; |
923 | dev->netdev->stats.tx_bytes += skb->len; | 925 | dev->netdev->stats.tx_bytes += skb->len; |
924 | } | 926 | } |
925 | 927 | ||
926 | spin_unlock_irqrestore(&dev->lock, flags); | 928 | spin_unlock_irqrestore(&dev->lock, flags); |
927 | 929 | ||
928 | if (ptask->outstanding_pkts > 0) { | 930 | if (ptask->outstanding_pkts > 0) { |
929 | u16 dg_size; | 931 | u16 dg_size; |
930 | u16 fg_off; | 932 | u16 fg_off; |
931 | u16 datagram_label; | 933 | u16 datagram_label; |
932 | u16 lf; | 934 | u16 lf; |
933 | 935 | ||
934 | /* Update the ptask to point to the next fragment and send it */ | 936 | /* Update the ptask to point to the next fragment and send it */ |
935 | lf = fwnet_get_hdr_lf(&ptask->hdr); | 937 | lf = fwnet_get_hdr_lf(&ptask->hdr); |
936 | switch (lf) { | 938 | switch (lf) { |
937 | case RFC2374_HDR_LASTFRAG: | 939 | case RFC2374_HDR_LASTFRAG: |
938 | case RFC2374_HDR_UNFRAG: | 940 | case RFC2374_HDR_UNFRAG: |
939 | default: | 941 | default: |
940 | fw_error("Outstanding packet %x lf %x, header %x,%x\n", | 942 | fw_error("Outstanding packet %x lf %x, header %x,%x\n", |
941 | ptask->outstanding_pkts, lf, ptask->hdr.w0, | 943 | ptask->outstanding_pkts, lf, ptask->hdr.w0, |
942 | ptask->hdr.w1); | 944 | ptask->hdr.w1); |
943 | BUG(); | 945 | BUG(); |
944 | 946 | ||
945 | case RFC2374_HDR_FIRSTFRAG: | 947 | case RFC2374_HDR_FIRSTFRAG: |
946 | /* Set frag type here for future interior fragments */ | 948 | /* Set frag type here for future interior fragments */ |
947 | dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); | 949 | dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); |
948 | fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE; | 950 | fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE; |
949 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); | 951 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); |
950 | break; | 952 | break; |
951 | 953 | ||
952 | case RFC2374_HDR_INTFRAG: | 954 | case RFC2374_HDR_INTFRAG: |
953 | dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); | 955 | dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); |
954 | fg_off = fwnet_get_hdr_fg_off(&ptask->hdr) | 956 | fg_off = fwnet_get_hdr_fg_off(&ptask->hdr) |
955 | + ptask->max_payload - RFC2374_FRAG_HDR_SIZE; | 957 | + ptask->max_payload - RFC2374_FRAG_HDR_SIZE; |
956 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); | 958 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); |
957 | break; | 959 | break; |
958 | } | 960 | } |
959 | 961 | ||
960 | skb_pull(skb, ptask->max_payload); | 962 | skb_pull(skb, ptask->max_payload); |
961 | if (ptask->outstanding_pkts > 1) { | 963 | if (ptask->outstanding_pkts > 1) { |
962 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, | 964 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, |
963 | dg_size, fg_off, datagram_label); | 965 | dg_size, fg_off, datagram_label); |
964 | } else { | 966 | } else { |
965 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG, | 967 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG, |
966 | dg_size, fg_off, datagram_label); | 968 | dg_size, fg_off, datagram_label); |
967 | ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; | 969 | ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; |
968 | } | 970 | } |
969 | fwnet_send_packet(ptask); | 971 | fwnet_send_packet(ptask); |
970 | } | 972 | } |
971 | 973 | ||
972 | if (free) | 974 | if (free) |
973 | fwnet_free_ptask(ptask); | 975 | fwnet_free_ptask(ptask); |
974 | } | 976 | } |
975 | 977 | ||
976 | static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask) | 978 | static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask) |
977 | { | 979 | { |
978 | struct fwnet_device *dev = ptask->dev; | 980 | struct fwnet_device *dev = ptask->dev; |
979 | unsigned long flags; | 981 | unsigned long flags; |
980 | bool free; | 982 | bool free; |
981 | 983 | ||
982 | spin_lock_irqsave(&dev->lock, flags); | 984 | spin_lock_irqsave(&dev->lock, flags); |
983 | 985 | ||
984 | /* One fragment failed; don't try to send remaining fragments. */ | 986 | /* One fragment failed; don't try to send remaining fragments. */ |
985 | ptask->outstanding_pkts = 0; | 987 | ptask->outstanding_pkts = 0; |
986 | 988 | ||
987 | /* Check whether we or the networking TX soft-IRQ is last user. */ | 989 | /* Check whether we or the networking TX soft-IRQ is last user. */ |
988 | free = ptask->enqueued; | 990 | free = ptask->enqueued; |
989 | if (free) | 991 | if (free) |
990 | dec_queued_datagrams(dev); | 992 | dec_queued_datagrams(dev); |
991 | 993 | ||
992 | dev->netdev->stats.tx_dropped++; | 994 | dev->netdev->stats.tx_dropped++; |
993 | dev->netdev->stats.tx_errors++; | 995 | dev->netdev->stats.tx_errors++; |
994 | 996 | ||
995 | spin_unlock_irqrestore(&dev->lock, flags); | 997 | spin_unlock_irqrestore(&dev->lock, flags); |
996 | 998 | ||
997 | if (free) | 999 | if (free) |
998 | fwnet_free_ptask(ptask); | 1000 | fwnet_free_ptask(ptask); |
999 | } | 1001 | } |
1000 | 1002 | ||
1001 | static void fwnet_write_complete(struct fw_card *card, int rcode, | 1003 | static void fwnet_write_complete(struct fw_card *card, int rcode, |
1002 | void *payload, size_t length, void *data) | 1004 | void *payload, size_t length, void *data) |
1003 | { | 1005 | { |
1004 | struct fwnet_packet_task *ptask = data; | 1006 | struct fwnet_packet_task *ptask = data; |
1005 | static unsigned long j; | 1007 | static unsigned long j; |
1006 | static int last_rcode, errors_skipped; | 1008 | static int last_rcode, errors_skipped; |
1007 | 1009 | ||
1008 | if (rcode == RCODE_COMPLETE) { | 1010 | if (rcode == RCODE_COMPLETE) { |
1009 | fwnet_transmit_packet_done(ptask); | 1011 | fwnet_transmit_packet_done(ptask); |
1010 | } else { | 1012 | } else { |
1011 | fwnet_transmit_packet_failed(ptask); | 1013 | fwnet_transmit_packet_failed(ptask); |
1012 | 1014 | ||
1013 | if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { | 1015 | if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { |
1014 | fw_error("fwnet_write_complete: " | 1016 | fw_error("fwnet_write_complete: " |
1015 | "failed: %x (skipped %d)\n", rcode, errors_skipped); | 1017 | "failed: %x (skipped %d)\n", rcode, errors_skipped); |
1016 | 1018 | ||
1017 | errors_skipped = 0; | 1019 | errors_skipped = 0; |
1018 | last_rcode = rcode; | 1020 | last_rcode = rcode; |
1019 | } else | 1021 | } else |
1020 | errors_skipped++; | 1022 | errors_skipped++; |
1021 | } | 1023 | } |
1022 | } | 1024 | } |
1023 | 1025 | ||
1024 | static int fwnet_send_packet(struct fwnet_packet_task *ptask) | 1026 | static int fwnet_send_packet(struct fwnet_packet_task *ptask) |
1025 | { | 1027 | { |
1026 | struct fwnet_device *dev; | 1028 | struct fwnet_device *dev; |
1027 | unsigned tx_len; | 1029 | unsigned tx_len; |
1028 | struct rfc2734_header *bufhdr; | 1030 | struct rfc2734_header *bufhdr; |
1029 | unsigned long flags; | 1031 | unsigned long flags; |
1030 | bool free; | 1032 | bool free; |
1031 | 1033 | ||
1032 | dev = ptask->dev; | 1034 | dev = ptask->dev; |
1033 | tx_len = ptask->max_payload; | 1035 | tx_len = ptask->max_payload; |
1034 | switch (fwnet_get_hdr_lf(&ptask->hdr)) { | 1036 | switch (fwnet_get_hdr_lf(&ptask->hdr)) { |
1035 | case RFC2374_HDR_UNFRAG: | 1037 | case RFC2374_HDR_UNFRAG: |
1036 | bufhdr = (struct rfc2734_header *) | 1038 | bufhdr = (struct rfc2734_header *) |
1037 | skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE); | 1039 | skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE); |
1038 | put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); | 1040 | put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); |
1039 | break; | 1041 | break; |
1040 | 1042 | ||
1041 | case RFC2374_HDR_FIRSTFRAG: | 1043 | case RFC2374_HDR_FIRSTFRAG: |
1042 | case RFC2374_HDR_INTFRAG: | 1044 | case RFC2374_HDR_INTFRAG: |
1043 | case RFC2374_HDR_LASTFRAG: | 1045 | case RFC2374_HDR_LASTFRAG: |
1044 | bufhdr = (struct rfc2734_header *) | 1046 | bufhdr = (struct rfc2734_header *) |
1045 | skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE); | 1047 | skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE); |
1046 | put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); | 1048 | put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); |
1047 | put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1); | 1049 | put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1); |
1048 | break; | 1050 | break; |
1049 | 1051 | ||
1050 | default: | 1052 | default: |
1051 | BUG(); | 1053 | BUG(); |
1052 | } | 1054 | } |
1053 | if (ptask->dest_node == IEEE1394_ALL_NODES) { | 1055 | if (ptask->dest_node == IEEE1394_ALL_NODES) { |
1054 | u8 *p; | 1056 | u8 *p; |
1055 | int generation; | 1057 | int generation; |
1056 | int node_id; | 1058 | int node_id; |
1057 | 1059 | ||
1058 | /* ptask->generation may not have been set yet */ | 1060 | /* ptask->generation may not have been set yet */ |
1059 | generation = dev->card->generation; | 1061 | generation = dev->card->generation; |
1060 | smp_rmb(); | 1062 | smp_rmb(); |
1061 | node_id = dev->card->node_id; | 1063 | node_id = dev->card->node_id; |
1062 | 1064 | ||
1063 | p = skb_push(ptask->skb, 8); | 1065 | p = skb_push(ptask->skb, 8); |
1064 | put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); | 1066 | put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); |
1065 | put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 | 1067 | put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 |
1066 | | RFC2734_SW_VERSION, &p[4]); | 1068 | | RFC2734_SW_VERSION, &p[4]); |
1067 | 1069 | ||
1068 | /* We should not transmit if broadcast_channel.valid == 0. */ | 1070 | /* We should not transmit if broadcast_channel.valid == 0. */ |
1069 | fw_send_request(dev->card, &ptask->transaction, | 1071 | fw_send_request(dev->card, &ptask->transaction, |
1070 | TCODE_STREAM_DATA, | 1072 | TCODE_STREAM_DATA, |
1071 | fw_stream_packet_destination_id(3, | 1073 | fw_stream_packet_destination_id(3, |
1072 | IEEE1394_BROADCAST_CHANNEL, 0), | 1074 | IEEE1394_BROADCAST_CHANNEL, 0), |
1073 | generation, SCODE_100, 0ULL, ptask->skb->data, | 1075 | generation, SCODE_100, 0ULL, ptask->skb->data, |
1074 | tx_len + 8, fwnet_write_complete, ptask); | 1076 | tx_len + 8, fwnet_write_complete, ptask); |
1075 | 1077 | ||
1076 | spin_lock_irqsave(&dev->lock, flags); | 1078 | spin_lock_irqsave(&dev->lock, flags); |
1077 | 1079 | ||
1078 | /* If the AT tasklet already ran, we may be last user. */ | 1080 | /* If the AT tasklet already ran, we may be last user. */ |
1079 | free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); | 1081 | free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); |
1080 | if (!free) | 1082 | if (!free) |
1081 | ptask->enqueued = true; | 1083 | ptask->enqueued = true; |
1082 | else | 1084 | else |
1083 | dec_queued_datagrams(dev); | 1085 | dec_queued_datagrams(dev); |
1084 | 1086 | ||
1085 | spin_unlock_irqrestore(&dev->lock, flags); | 1087 | spin_unlock_irqrestore(&dev->lock, flags); |
1086 | 1088 | ||
1087 | goto out; | 1089 | goto out; |
1088 | } | 1090 | } |
1089 | 1091 | ||
1090 | fw_send_request(dev->card, &ptask->transaction, | 1092 | fw_send_request(dev->card, &ptask->transaction, |
1091 | TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node, | 1093 | TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node, |
1092 | ptask->generation, ptask->speed, ptask->fifo_addr, | 1094 | ptask->generation, ptask->speed, ptask->fifo_addr, |
1093 | ptask->skb->data, tx_len, fwnet_write_complete, ptask); | 1095 | ptask->skb->data, tx_len, fwnet_write_complete, ptask); |
1094 | 1096 | ||
1095 | spin_lock_irqsave(&dev->lock, flags); | 1097 | spin_lock_irqsave(&dev->lock, flags); |
1096 | 1098 | ||
1097 | /* If the AT tasklet already ran, we may be last user. */ | 1099 | /* If the AT tasklet already ran, we may be last user. */ |
1098 | free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); | 1100 | free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); |
1099 | if (!free) | 1101 | if (!free) |
1100 | ptask->enqueued = true; | 1102 | ptask->enqueued = true; |
1101 | else | 1103 | else |
1102 | dec_queued_datagrams(dev); | 1104 | dec_queued_datagrams(dev); |
1103 | 1105 | ||
1104 | spin_unlock_irqrestore(&dev->lock, flags); | 1106 | spin_unlock_irqrestore(&dev->lock, flags); |
1105 | 1107 | ||
1106 | dev->netdev->trans_start = jiffies; | 1108 | dev->netdev->trans_start = jiffies; |
1107 | out: | 1109 | out: |
1108 | if (free) | 1110 | if (free) |
1109 | fwnet_free_ptask(ptask); | 1111 | fwnet_free_ptask(ptask); |
1110 | 1112 | ||
1111 | return 0; | 1113 | return 0; |
1112 | } | 1114 | } |
1113 | 1115 | ||
1114 | static int fwnet_broadcast_start(struct fwnet_device *dev) | 1116 | static int fwnet_broadcast_start(struct fwnet_device *dev) |
1115 | { | 1117 | { |
1116 | struct fw_iso_context *context; | 1118 | struct fw_iso_context *context; |
1117 | int retval; | 1119 | int retval; |
1118 | unsigned num_packets; | 1120 | unsigned num_packets; |
1119 | unsigned max_receive; | 1121 | unsigned max_receive; |
1120 | struct fw_iso_packet packet; | 1122 | struct fw_iso_packet packet; |
1121 | unsigned long offset; | 1123 | unsigned long offset; |
1122 | unsigned u; | 1124 | unsigned u; |
1123 | 1125 | ||
1124 | if (dev->local_fifo == FWNET_NO_FIFO_ADDR) { | 1126 | if (dev->local_fifo == FWNET_NO_FIFO_ADDR) { |
1125 | /* outside OHCI posted write area? */ | 1127 | /* outside OHCI posted write area? */ |
1126 | static const struct fw_address_region region = { | 1128 | static const struct fw_address_region region = { |
1127 | .start = 0xffff00000000ULL, | 1129 | .start = 0xffff00000000ULL, |
1128 | .end = CSR_REGISTER_BASE, | 1130 | .end = CSR_REGISTER_BASE, |
1129 | }; | 1131 | }; |
1130 | 1132 | ||
1131 | dev->handler.length = 4096; | 1133 | dev->handler.length = 4096; |
1132 | dev->handler.address_callback = fwnet_receive_packet; | 1134 | dev->handler.address_callback = fwnet_receive_packet; |
1133 | dev->handler.callback_data = dev; | 1135 | dev->handler.callback_data = dev; |
1134 | 1136 | ||
1135 | retval = fw_core_add_address_handler(&dev->handler, ®ion); | 1137 | retval = fw_core_add_address_handler(&dev->handler, ®ion); |
1136 | if (retval < 0) | 1138 | if (retval < 0) |
1137 | goto failed_initial; | 1139 | goto failed_initial; |
1138 | 1140 | ||
1139 | dev->local_fifo = dev->handler.offset; | 1141 | dev->local_fifo = dev->handler.offset; |
1140 | } | 1142 | } |
1141 | 1143 | ||
1142 | max_receive = 1U << (dev->card->max_receive + 1); | 1144 | max_receive = 1U << (dev->card->max_receive + 1); |
1143 | num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; | 1145 | num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; |
1144 | 1146 | ||
1145 | if (!dev->broadcast_rcv_context) { | 1147 | if (!dev->broadcast_rcv_context) { |
1146 | void **ptrptr; | 1148 | void **ptrptr; |
1147 | 1149 | ||
1148 | context = fw_iso_context_create(dev->card, | 1150 | context = fw_iso_context_create(dev->card, |
1149 | FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL, | 1151 | FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL, |
1150 | dev->card->link_speed, 8, fwnet_receive_broadcast, dev); | 1152 | dev->card->link_speed, 8, fwnet_receive_broadcast, dev); |
1151 | if (IS_ERR(context)) { | 1153 | if (IS_ERR(context)) { |
1152 | retval = PTR_ERR(context); | 1154 | retval = PTR_ERR(context); |
1153 | goto failed_context_create; | 1155 | goto failed_context_create; |
1154 | } | 1156 | } |
1155 | 1157 | ||
1156 | retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, | 1158 | retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, |
1157 | dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE); | 1159 | dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE); |
1158 | if (retval < 0) | 1160 | if (retval < 0) |
1159 | goto failed_buffer_init; | 1161 | goto failed_buffer_init; |
1160 | 1162 | ||
1161 | ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); | 1163 | ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); |
1162 | if (!ptrptr) { | 1164 | if (!ptrptr) { |
1163 | retval = -ENOMEM; | 1165 | retval = -ENOMEM; |
1164 | goto failed_ptrs_alloc; | 1166 | goto failed_ptrs_alloc; |
1165 | } | 1167 | } |
1166 | 1168 | ||
1167 | dev->broadcast_rcv_buffer_ptrs = ptrptr; | 1169 | dev->broadcast_rcv_buffer_ptrs = ptrptr; |
1168 | for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) { | 1170 | for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) { |
1169 | void *ptr; | 1171 | void *ptr; |
1170 | unsigned v; | 1172 | unsigned v; |
1171 | 1173 | ||
1172 | ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); | 1174 | ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); |
1173 | for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++) | 1175 | for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++) |
1174 | *ptrptr++ = (void *) | 1176 | *ptrptr++ = (void *) |
1175 | ((char *)ptr + v * max_receive); | 1177 | ((char *)ptr + v * max_receive); |
1176 | } | 1178 | } |
1177 | dev->broadcast_rcv_context = context; | 1179 | dev->broadcast_rcv_context = context; |
1178 | } else { | 1180 | } else { |
1179 | context = dev->broadcast_rcv_context; | 1181 | context = dev->broadcast_rcv_context; |
1180 | } | 1182 | } |
1181 | 1183 | ||
1182 | packet.payload_length = max_receive; | 1184 | packet.payload_length = max_receive; |
1183 | packet.interrupt = 1; | 1185 | packet.interrupt = 1; |
1184 | packet.skip = 0; | 1186 | packet.skip = 0; |
1185 | packet.tag = 3; | 1187 | packet.tag = 3; |
1186 | packet.sy = 0; | 1188 | packet.sy = 0; |
1187 | packet.header_length = IEEE1394_GASP_HDR_SIZE; | 1189 | packet.header_length = IEEE1394_GASP_HDR_SIZE; |
1188 | offset = 0; | 1190 | offset = 0; |
1189 | 1191 | ||
1190 | for (u = 0; u < num_packets; u++) { | 1192 | for (u = 0; u < num_packets; u++) { |
1191 | retval = fw_iso_context_queue(context, &packet, | 1193 | retval = fw_iso_context_queue(context, &packet, |
1192 | &dev->broadcast_rcv_buffer, offset); | 1194 | &dev->broadcast_rcv_buffer, offset); |
1193 | if (retval < 0) | 1195 | if (retval < 0) |
1194 | goto failed_rcv_queue; | 1196 | goto failed_rcv_queue; |
1195 | 1197 | ||
1196 | offset += max_receive; | 1198 | offset += max_receive; |
1197 | } | 1199 | } |
1198 | dev->num_broadcast_rcv_ptrs = num_packets; | 1200 | dev->num_broadcast_rcv_ptrs = num_packets; |
1199 | dev->rcv_buffer_size = max_receive; | 1201 | dev->rcv_buffer_size = max_receive; |
1200 | dev->broadcast_rcv_next_ptr = 0U; | 1202 | dev->broadcast_rcv_next_ptr = 0U; |
1201 | retval = fw_iso_context_start(context, -1, 0, | 1203 | retval = fw_iso_context_start(context, -1, 0, |
1202 | FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */ | 1204 | FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */ |
1203 | if (retval < 0) | 1205 | if (retval < 0) |
1204 | goto failed_rcv_queue; | 1206 | goto failed_rcv_queue; |
1205 | 1207 | ||
1206 | /* FIXME: adjust it according to the min. speed of all known peers? */ | 1208 | /* FIXME: adjust it according to the min. speed of all known peers? */ |
1207 | dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100 | 1209 | dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100 |
1208 | - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE; | 1210 | - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE; |
1209 | dev->broadcast_state = FWNET_BROADCAST_RUNNING; | 1211 | dev->broadcast_state = FWNET_BROADCAST_RUNNING; |
1210 | 1212 | ||
1211 | return 0; | 1213 | return 0; |
1212 | 1214 | ||
1213 | failed_rcv_queue: | 1215 | failed_rcv_queue: |
1214 | kfree(dev->broadcast_rcv_buffer_ptrs); | 1216 | kfree(dev->broadcast_rcv_buffer_ptrs); |
1215 | dev->broadcast_rcv_buffer_ptrs = NULL; | 1217 | dev->broadcast_rcv_buffer_ptrs = NULL; |
1216 | failed_ptrs_alloc: | 1218 | failed_ptrs_alloc: |
1217 | fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card); | 1219 | fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card); |
1218 | failed_buffer_init: | 1220 | failed_buffer_init: |
1219 | fw_iso_context_destroy(context); | 1221 | fw_iso_context_destroy(context); |
1220 | dev->broadcast_rcv_context = NULL; | 1222 | dev->broadcast_rcv_context = NULL; |
1221 | failed_context_create: | 1223 | failed_context_create: |
1222 | fw_core_remove_address_handler(&dev->handler); | 1224 | fw_core_remove_address_handler(&dev->handler); |
1223 | failed_initial: | 1225 | failed_initial: |
1224 | dev->local_fifo = FWNET_NO_FIFO_ADDR; | 1226 | dev->local_fifo = FWNET_NO_FIFO_ADDR; |
1225 | 1227 | ||
1226 | return retval; | 1228 | return retval; |
1227 | } | 1229 | } |
1228 | 1230 | ||
1229 | static void set_carrier_state(struct fwnet_device *dev) | 1231 | static void set_carrier_state(struct fwnet_device *dev) |
1230 | { | 1232 | { |
1231 | if (dev->peer_count > 1) | 1233 | if (dev->peer_count > 1) |
1232 | netif_carrier_on(dev->netdev); | 1234 | netif_carrier_on(dev->netdev); |
1233 | else | 1235 | else |
1234 | netif_carrier_off(dev->netdev); | 1236 | netif_carrier_off(dev->netdev); |
1235 | } | 1237 | } |
1236 | 1238 | ||
1237 | /* ifup */ | 1239 | /* ifup */ |
1238 | static int fwnet_open(struct net_device *net) | 1240 | static int fwnet_open(struct net_device *net) |
1239 | { | 1241 | { |
1240 | struct fwnet_device *dev = netdev_priv(net); | 1242 | struct fwnet_device *dev = netdev_priv(net); |
1241 | int ret; | 1243 | int ret; |
1242 | 1244 | ||
1243 | if (dev->broadcast_state == FWNET_BROADCAST_ERROR) { | 1245 | if (dev->broadcast_state == FWNET_BROADCAST_ERROR) { |
1244 | ret = fwnet_broadcast_start(dev); | 1246 | ret = fwnet_broadcast_start(dev); |
1245 | if (ret) | 1247 | if (ret) |
1246 | return ret; | 1248 | return ret; |
1247 | } | 1249 | } |
1248 | netif_start_queue(net); | 1250 | netif_start_queue(net); |
1249 | 1251 | ||
1250 | spin_lock_irq(&dev->lock); | 1252 | spin_lock_irq(&dev->lock); |
1251 | set_carrier_state(dev); | 1253 | set_carrier_state(dev); |
1252 | spin_unlock_irq(&dev->lock); | 1254 | spin_unlock_irq(&dev->lock); |
1253 | 1255 | ||
1254 | return 0; | 1256 | return 0; |
1255 | } | 1257 | } |
1256 | 1258 | ||
1257 | /* ifdown */ | 1259 | /* ifdown */ |
1258 | static int fwnet_stop(struct net_device *net) | 1260 | static int fwnet_stop(struct net_device *net) |
1259 | { | 1261 | { |
1260 | netif_stop_queue(net); | 1262 | netif_stop_queue(net); |
1261 | 1263 | ||
1262 | /* Deallocate iso context for use by other applications? */ | 1264 | /* Deallocate iso context for use by other applications? */ |
1263 | 1265 | ||
1264 | return 0; | 1266 | return 0; |
1265 | } | 1267 | } |
1266 | 1268 | ||
1267 | static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) | 1269 | static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) |
1268 | { | 1270 | { |
1269 | struct fwnet_header hdr_buf; | 1271 | struct fwnet_header hdr_buf; |
1270 | struct fwnet_device *dev = netdev_priv(net); | 1272 | struct fwnet_device *dev = netdev_priv(net); |
1271 | __be16 proto; | 1273 | __be16 proto; |
1272 | u16 dest_node; | 1274 | u16 dest_node; |
1273 | unsigned max_payload; | 1275 | unsigned max_payload; |
1274 | u16 dg_size; | 1276 | u16 dg_size; |
1275 | u16 *datagram_label_ptr; | 1277 | u16 *datagram_label_ptr; |
1276 | struct fwnet_packet_task *ptask; | 1278 | struct fwnet_packet_task *ptask; |
1277 | struct fwnet_peer *peer; | 1279 | struct fwnet_peer *peer; |
1278 | unsigned long flags; | 1280 | unsigned long flags; |
1279 | 1281 | ||
1280 | spin_lock_irqsave(&dev->lock, flags); | 1282 | spin_lock_irqsave(&dev->lock, flags); |
1281 | 1283 | ||
1282 | /* Can this happen? */ | 1284 | /* Can this happen? */ |
1283 | if (netif_queue_stopped(dev->netdev)) { | 1285 | if (netif_queue_stopped(dev->netdev)) { |
1284 | spin_unlock_irqrestore(&dev->lock, flags); | 1286 | spin_unlock_irqrestore(&dev->lock, flags); |
1285 | 1287 | ||
1286 | return NETDEV_TX_BUSY; | 1288 | return NETDEV_TX_BUSY; |
1287 | } | 1289 | } |
1288 | 1290 | ||
1289 | ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); | 1291 | ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); |
1290 | if (ptask == NULL) | 1292 | if (ptask == NULL) |
1291 | goto fail; | 1293 | goto fail; |
1292 | 1294 | ||
1293 | skb = skb_share_check(skb, GFP_ATOMIC); | 1295 | skb = skb_share_check(skb, GFP_ATOMIC); |
1294 | if (!skb) | 1296 | if (!skb) |
1295 | goto fail; | 1297 | goto fail; |
1296 | 1298 | ||
1297 | /* | 1299 | /* |
1298 | * Make a copy of the driver-specific header. | 1300 | * Make a copy of the driver-specific header. |
1299 | * We might need to rebuild the header on tx failure. | 1301 | * We might need to rebuild the header on tx failure. |
1300 | */ | 1302 | */ |
1301 | memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); | 1303 | memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); |
1302 | skb_pull(skb, sizeof(hdr_buf)); | 1304 | skb_pull(skb, sizeof(hdr_buf)); |
1303 | 1305 | ||
1304 | proto = hdr_buf.h_proto; | 1306 | proto = hdr_buf.h_proto; |
1305 | dg_size = skb->len; | 1307 | dg_size = skb->len; |
1306 | 1308 | ||
1307 | /* | 1309 | /* |
1308 | * Set the transmission type for the packet. ARP packets and IP | 1310 | * Set the transmission type for the packet. ARP packets and IP |
1309 | * broadcast packets are sent via GASP. | 1311 | * broadcast packets are sent via GASP. |
1310 | */ | 1312 | */ |
1311 | if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0 | 1313 | if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0 |
1312 | || proto == htons(ETH_P_ARP) | 1314 | || proto == htons(ETH_P_ARP) |
1313 | || (proto == htons(ETH_P_IP) | 1315 | || (proto == htons(ETH_P_IP) |
1314 | && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) { | 1316 | && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) { |
1315 | max_payload = dev->broadcast_xmt_max_payload; | 1317 | max_payload = dev->broadcast_xmt_max_payload; |
1316 | datagram_label_ptr = &dev->broadcast_xmt_datagramlabel; | 1318 | datagram_label_ptr = &dev->broadcast_xmt_datagramlabel; |
1317 | 1319 | ||
1318 | ptask->fifo_addr = FWNET_NO_FIFO_ADDR; | 1320 | ptask->fifo_addr = FWNET_NO_FIFO_ADDR; |
1319 | ptask->generation = 0; | 1321 | ptask->generation = 0; |
1320 | ptask->dest_node = IEEE1394_ALL_NODES; | 1322 | ptask->dest_node = IEEE1394_ALL_NODES; |
1321 | ptask->speed = SCODE_100; | 1323 | ptask->speed = SCODE_100; |
1322 | } else { | 1324 | } else { |
1323 | __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest); | 1325 | __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest); |
1324 | u8 generation; | 1326 | u8 generation; |
1325 | 1327 | ||
1326 | peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); | 1328 | peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); |
1327 | if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) | 1329 | if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) |
1328 | goto fail; | 1330 | goto fail; |
1329 | 1331 | ||
1330 | generation = peer->generation; | 1332 | generation = peer->generation; |
1331 | dest_node = peer->node_id; | 1333 | dest_node = peer->node_id; |
1332 | max_payload = peer->max_payload; | 1334 | max_payload = peer->max_payload; |
1333 | datagram_label_ptr = &peer->datagram_label; | 1335 | datagram_label_ptr = &peer->datagram_label; |
1334 | 1336 | ||
1335 | ptask->fifo_addr = peer->fifo; | 1337 | ptask->fifo_addr = peer->fifo; |
1336 | ptask->generation = generation; | 1338 | ptask->generation = generation; |
1337 | ptask->dest_node = dest_node; | 1339 | ptask->dest_node = dest_node; |
1338 | ptask->speed = peer->speed; | 1340 | ptask->speed = peer->speed; |
1339 | } | 1341 | } |
1340 | 1342 | ||
1341 | /* If this is an ARP packet, convert it */ | 1343 | /* If this is an ARP packet, convert it */ |
1342 | if (proto == htons(ETH_P_ARP)) { | 1344 | if (proto == htons(ETH_P_ARP)) { |
1343 | struct arphdr *arp = (struct arphdr *)skb->data; | 1345 | struct arphdr *arp = (struct arphdr *)skb->data; |
1344 | unsigned char *arp_ptr = (unsigned char *)(arp + 1); | 1346 | unsigned char *arp_ptr = (unsigned char *)(arp + 1); |
1345 | struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data; | 1347 | struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data; |
1346 | __be32 ipaddr; | 1348 | __be32 ipaddr; |
1347 | 1349 | ||
1348 | ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN)); | 1350 | ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN)); |
1349 | 1351 | ||
1350 | arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN; | 1352 | arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN; |
1351 | arp1394->max_rec = dev->card->max_receive; | 1353 | arp1394->max_rec = dev->card->max_receive; |
1352 | arp1394->sspd = dev->card->link_speed; | 1354 | arp1394->sspd = dev->card->link_speed; |
1353 | 1355 | ||
1354 | put_unaligned_be16(dev->local_fifo >> 32, | 1356 | put_unaligned_be16(dev->local_fifo >> 32, |
1355 | &arp1394->fifo_hi); | 1357 | &arp1394->fifo_hi); |
1356 | put_unaligned_be32(dev->local_fifo & 0xffffffff, | 1358 | put_unaligned_be32(dev->local_fifo & 0xffffffff, |
1357 | &arp1394->fifo_lo); | 1359 | &arp1394->fifo_lo); |
1358 | put_unaligned(ipaddr, &arp1394->sip); | 1360 | put_unaligned(ipaddr, &arp1394->sip); |
1359 | } | 1361 | } |
1360 | 1362 | ||
1361 | ptask->hdr.w0 = 0; | 1363 | ptask->hdr.w0 = 0; |
1362 | ptask->hdr.w1 = 0; | 1364 | ptask->hdr.w1 = 0; |
1363 | ptask->skb = skb; | 1365 | ptask->skb = skb; |
1364 | ptask->dev = dev; | 1366 | ptask->dev = dev; |
1365 | 1367 | ||
1366 | /* Does it all fit in one packet? */ | 1368 | /* Does it all fit in one packet? */ |
1367 | if (dg_size <= max_payload) { | 1369 | if (dg_size <= max_payload) { |
1368 | fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto)); | 1370 | fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto)); |
1369 | ptask->outstanding_pkts = 1; | 1371 | ptask->outstanding_pkts = 1; |
1370 | max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE; | 1372 | max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE; |
1371 | } else { | 1373 | } else { |
1372 | u16 datagram_label; | 1374 | u16 datagram_label; |
1373 | 1375 | ||
1374 | max_payload -= RFC2374_FRAG_OVERHEAD; | 1376 | max_payload -= RFC2374_FRAG_OVERHEAD; |
1375 | datagram_label = (*datagram_label_ptr)++; | 1377 | datagram_label = (*datagram_label_ptr)++; |
1376 | fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size, | 1378 | fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size, |
1377 | datagram_label); | 1379 | datagram_label); |
1378 | ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload); | 1380 | ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload); |
1379 | max_payload += RFC2374_FRAG_HDR_SIZE; | 1381 | max_payload += RFC2374_FRAG_HDR_SIZE; |
1380 | } | 1382 | } |
1381 | 1383 | ||
1382 | if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS) | 1384 | if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS) |
1383 | netif_stop_queue(dev->netdev); | 1385 | netif_stop_queue(dev->netdev); |
1384 | 1386 | ||
1385 | spin_unlock_irqrestore(&dev->lock, flags); | 1387 | spin_unlock_irqrestore(&dev->lock, flags); |
1386 | 1388 | ||
1387 | ptask->max_payload = max_payload; | 1389 | ptask->max_payload = max_payload; |
1388 | ptask->enqueued = 0; | 1390 | ptask->enqueued = 0; |
1389 | 1391 | ||
1390 | fwnet_send_packet(ptask); | 1392 | fwnet_send_packet(ptask); |
1391 | 1393 | ||
1392 | return NETDEV_TX_OK; | 1394 | return NETDEV_TX_OK; |
1393 | 1395 | ||
1394 | fail: | 1396 | fail: |
1395 | spin_unlock_irqrestore(&dev->lock, flags); | 1397 | spin_unlock_irqrestore(&dev->lock, flags); |
1396 | 1398 | ||
1397 | if (ptask) | 1399 | if (ptask) |
1398 | kmem_cache_free(fwnet_packet_task_cache, ptask); | 1400 | kmem_cache_free(fwnet_packet_task_cache, ptask); |
1399 | 1401 | ||
1400 | if (skb != NULL) | 1402 | if (skb != NULL) |
1401 | dev_kfree_skb(skb); | 1403 | dev_kfree_skb(skb); |
1402 | 1404 | ||
1403 | net->stats.tx_dropped++; | 1405 | net->stats.tx_dropped++; |
1404 | net->stats.tx_errors++; | 1406 | net->stats.tx_errors++; |
1405 | 1407 | ||
1406 | /* | 1408 | /* |
1407 | * FIXME: According to a patch from 2003-02-26, "returning non-zero | 1409 | * FIXME: According to a patch from 2003-02-26, "returning non-zero |
1408 | * causes serious problems" here, allegedly. Before that patch, | 1410 | * causes serious problems" here, allegedly. Before that patch, |
1409 | * -ERRNO was returned which is not appropriate under Linux 2.6. | 1411 | * -ERRNO was returned which is not appropriate under Linux 2.6. |
1410 | * Perhaps more needs to be done? Stop the queue in serious | 1412 | * Perhaps more needs to be done? Stop the queue in serious |
1411 | * conditions and restart it elsewhere? | 1413 | * conditions and restart it elsewhere? |
1412 | */ | 1414 | */ |
1413 | return NETDEV_TX_OK; | 1415 | return NETDEV_TX_OK; |
1414 | } | 1416 | } |
1415 | 1417 | ||
1416 | static int fwnet_change_mtu(struct net_device *net, int new_mtu) | 1418 | static int fwnet_change_mtu(struct net_device *net, int new_mtu) |
1417 | { | 1419 | { |
1418 | if (new_mtu < 68) | 1420 | if (new_mtu < 68) |
1419 | return -EINVAL; | 1421 | return -EINVAL; |
1420 | 1422 | ||
1421 | net->mtu = new_mtu; | 1423 | net->mtu = new_mtu; |
1422 | return 0; | 1424 | return 0; |
1423 | } | 1425 | } |
1424 | 1426 | ||
1425 | static const struct ethtool_ops fwnet_ethtool_ops = { | 1427 | static const struct ethtool_ops fwnet_ethtool_ops = { |
1426 | .get_link = ethtool_op_get_link, | 1428 | .get_link = ethtool_op_get_link, |
1427 | }; | 1429 | }; |
1428 | 1430 | ||
1429 | static const struct net_device_ops fwnet_netdev_ops = { | 1431 | static const struct net_device_ops fwnet_netdev_ops = { |
1430 | .ndo_open = fwnet_open, | 1432 | .ndo_open = fwnet_open, |
1431 | .ndo_stop = fwnet_stop, | 1433 | .ndo_stop = fwnet_stop, |
1432 | .ndo_start_xmit = fwnet_tx, | 1434 | .ndo_start_xmit = fwnet_tx, |
1433 | .ndo_change_mtu = fwnet_change_mtu, | 1435 | .ndo_change_mtu = fwnet_change_mtu, |
1434 | }; | 1436 | }; |
1435 | 1437 | ||
1436 | static void fwnet_init_dev(struct net_device *net) | 1438 | static void fwnet_init_dev(struct net_device *net) |
1437 | { | 1439 | { |
1438 | net->header_ops = &fwnet_header_ops; | 1440 | net->header_ops = &fwnet_header_ops; |
1439 | net->netdev_ops = &fwnet_netdev_ops; | 1441 | net->netdev_ops = &fwnet_netdev_ops; |
1440 | net->watchdog_timeo = 2 * HZ; | 1442 | net->watchdog_timeo = 2 * HZ; |
1441 | net->flags = IFF_BROADCAST | IFF_MULTICAST; | 1443 | net->flags = IFF_BROADCAST | IFF_MULTICAST; |
1442 | net->features = NETIF_F_HIGHDMA; | 1444 | net->features = NETIF_F_HIGHDMA; |
1443 | net->addr_len = FWNET_ALEN; | 1445 | net->addr_len = FWNET_ALEN; |
1444 | net->hard_header_len = FWNET_HLEN; | 1446 | net->hard_header_len = FWNET_HLEN; |
1445 | net->type = ARPHRD_IEEE1394; | 1447 | net->type = ARPHRD_IEEE1394; |
1446 | net->tx_queue_len = FWNET_TX_QUEUE_LEN; | 1448 | net->tx_queue_len = FWNET_TX_QUEUE_LEN; |
1447 | net->ethtool_ops = &fwnet_ethtool_ops; | 1449 | net->ethtool_ops = &fwnet_ethtool_ops; |
1448 | } | 1450 | } |
1449 | 1451 | ||
1450 | /* caller must hold fwnet_device_mutex */ | 1452 | /* caller must hold fwnet_device_mutex */ |
1451 | static struct fwnet_device *fwnet_dev_find(struct fw_card *card) | 1453 | static struct fwnet_device *fwnet_dev_find(struct fw_card *card) |
1452 | { | 1454 | { |
1453 | struct fwnet_device *dev; | 1455 | struct fwnet_device *dev; |
1454 | 1456 | ||
1455 | list_for_each_entry(dev, &fwnet_device_list, dev_link) | 1457 | list_for_each_entry(dev, &fwnet_device_list, dev_link) |
1456 | if (dev->card == card) | 1458 | if (dev->card == card) |
1457 | return dev; | 1459 | return dev; |
1458 | 1460 | ||
1459 | return NULL; | 1461 | return NULL; |
1460 | } | 1462 | } |
1461 | 1463 | ||
1462 | static int fwnet_add_peer(struct fwnet_device *dev, | 1464 | static int fwnet_add_peer(struct fwnet_device *dev, |
1463 | struct fw_unit *unit, struct fw_device *device) | 1465 | struct fw_unit *unit, struct fw_device *device) |
1464 | { | 1466 | { |
1465 | struct fwnet_peer *peer; | 1467 | struct fwnet_peer *peer; |
1466 | 1468 | ||
1467 | peer = kmalloc(sizeof(*peer), GFP_KERNEL); | 1469 | peer = kmalloc(sizeof(*peer), GFP_KERNEL); |
1468 | if (!peer) | 1470 | if (!peer) |
1469 | return -ENOMEM; | 1471 | return -ENOMEM; |
1470 | 1472 | ||
1471 | dev_set_drvdata(&unit->device, peer); | 1473 | dev_set_drvdata(&unit->device, peer); |
1472 | 1474 | ||
1473 | peer->dev = dev; | 1475 | peer->dev = dev; |
1474 | peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | 1476 | peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; |
1475 | peer->fifo = FWNET_NO_FIFO_ADDR; | 1477 | peer->fifo = FWNET_NO_FIFO_ADDR; |
1476 | peer->ip = 0; | 1478 | peer->ip = 0; |
1477 | INIT_LIST_HEAD(&peer->pd_list); | 1479 | INIT_LIST_HEAD(&peer->pd_list); |
1478 | peer->pdg_size = 0; | 1480 | peer->pdg_size = 0; |
1479 | peer->datagram_label = 0; | 1481 | peer->datagram_label = 0; |
1480 | peer->speed = device->max_speed; | 1482 | peer->speed = device->max_speed; |
1481 | peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed); | 1483 | peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed); |
1482 | 1484 | ||
1483 | peer->generation = device->generation; | 1485 | peer->generation = device->generation; |
1484 | smp_rmb(); | 1486 | smp_rmb(); |
1485 | peer->node_id = device->node_id; | 1487 | peer->node_id = device->node_id; |
1486 | 1488 | ||
1487 | spin_lock_irq(&dev->lock); | 1489 | spin_lock_irq(&dev->lock); |
1488 | list_add_tail(&peer->peer_link, &dev->peer_list); | 1490 | list_add_tail(&peer->peer_link, &dev->peer_list); |
1489 | dev->peer_count++; | 1491 | dev->peer_count++; |
1490 | set_carrier_state(dev); | 1492 | set_carrier_state(dev); |
1491 | spin_unlock_irq(&dev->lock); | 1493 | spin_unlock_irq(&dev->lock); |
1492 | 1494 | ||
1493 | return 0; | 1495 | return 0; |
1494 | } | 1496 | } |
1495 | 1497 | ||
1496 | static int fwnet_probe(struct device *_dev) | 1498 | static int fwnet_probe(struct device *_dev) |
1497 | { | 1499 | { |
1498 | struct fw_unit *unit = fw_unit(_dev); | 1500 | struct fw_unit *unit = fw_unit(_dev); |
1499 | struct fw_device *device = fw_parent_device(unit); | 1501 | struct fw_device *device = fw_parent_device(unit); |
1500 | struct fw_card *card = device->card; | 1502 | struct fw_card *card = device->card; |
1501 | struct net_device *net; | 1503 | struct net_device *net; |
1502 | bool allocated_netdev = false; | 1504 | bool allocated_netdev = false; |
1503 | struct fwnet_device *dev; | 1505 | struct fwnet_device *dev; |
1504 | unsigned max_mtu; | 1506 | unsigned max_mtu; |
1505 | int ret; | 1507 | int ret; |
1506 | 1508 | ||
1507 | mutex_lock(&fwnet_device_mutex); | 1509 | mutex_lock(&fwnet_device_mutex); |
1508 | 1510 | ||
1509 | dev = fwnet_dev_find(card); | 1511 | dev = fwnet_dev_find(card); |
1510 | if (dev) { | 1512 | if (dev) { |
1511 | net = dev->netdev; | 1513 | net = dev->netdev; |
1512 | goto have_dev; | 1514 | goto have_dev; |
1513 | } | 1515 | } |
1514 | 1516 | ||
1515 | net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev); | 1517 | net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev); |
1516 | if (net == NULL) { | 1518 | if (net == NULL) { |
1517 | ret = -ENOMEM; | 1519 | ret = -ENOMEM; |
1518 | goto out; | 1520 | goto out; |
1519 | } | 1521 | } |
1520 | 1522 | ||
1521 | allocated_netdev = true; | 1523 | allocated_netdev = true; |
1522 | SET_NETDEV_DEV(net, card->device); | 1524 | SET_NETDEV_DEV(net, card->device); |
1523 | dev = netdev_priv(net); | 1525 | dev = netdev_priv(net); |
1524 | 1526 | ||
1525 | spin_lock_init(&dev->lock); | 1527 | spin_lock_init(&dev->lock); |
1526 | dev->broadcast_state = FWNET_BROADCAST_ERROR; | 1528 | dev->broadcast_state = FWNET_BROADCAST_ERROR; |
1527 | dev->broadcast_rcv_context = NULL; | 1529 | dev->broadcast_rcv_context = NULL; |
1528 | dev->broadcast_xmt_max_payload = 0; | 1530 | dev->broadcast_xmt_max_payload = 0; |
1529 | dev->broadcast_xmt_datagramlabel = 0; | 1531 | dev->broadcast_xmt_datagramlabel = 0; |
1530 | dev->local_fifo = FWNET_NO_FIFO_ADDR; | 1532 | dev->local_fifo = FWNET_NO_FIFO_ADDR; |
1531 | dev->queued_datagrams = 0; | 1533 | dev->queued_datagrams = 0; |
1532 | INIT_LIST_HEAD(&dev->peer_list); | 1534 | INIT_LIST_HEAD(&dev->peer_list); |
1533 | dev->card = card; | 1535 | dev->card = card; |
1534 | dev->netdev = net; | 1536 | dev->netdev = net; |
1535 | 1537 | ||
1536 | /* | 1538 | /* |
1537 | * Use the RFC 2734 default 1500 octets or the maximum payload | 1539 | * Use the RFC 2734 default 1500 octets or the maximum payload |
1538 | * as initial MTU | 1540 | * as initial MTU |
1539 | */ | 1541 | */ |
1540 | max_mtu = (1 << (card->max_receive + 1)) | 1542 | max_mtu = (1 << (card->max_receive + 1)) |
1541 | - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE; | 1543 | - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE; |
1542 | net->mtu = min(1500U, max_mtu); | 1544 | net->mtu = min(1500U, max_mtu); |
1543 | 1545 | ||
1544 | /* Set our hardware address while we're at it */ | 1546 | /* Set our hardware address while we're at it */ |
1545 | put_unaligned_be64(card->guid, net->dev_addr); | 1547 | put_unaligned_be64(card->guid, net->dev_addr); |
1546 | put_unaligned_be64(~0ULL, net->broadcast); | 1548 | put_unaligned_be64(~0ULL, net->broadcast); |
1547 | ret = register_netdev(net); | 1549 | ret = register_netdev(net); |
1548 | if (ret) { | 1550 | if (ret) { |
1549 | fw_error("Cannot register the driver\n"); | 1551 | fw_error("Cannot register the driver\n"); |
1550 | goto out; | 1552 | goto out; |
1551 | } | 1553 | } |
1552 | 1554 | ||
1553 | list_add_tail(&dev->dev_link, &fwnet_device_list); | 1555 | list_add_tail(&dev->dev_link, &fwnet_device_list); |
1554 | fw_notify("%s: IPv4 over FireWire on device %016llx\n", | 1556 | fw_notify("%s: IPv4 over FireWire on device %016llx\n", |
1555 | net->name, (unsigned long long)card->guid); | 1557 | net->name, (unsigned long long)card->guid); |
1556 | have_dev: | 1558 | have_dev: |
1557 | ret = fwnet_add_peer(dev, unit, device); | 1559 | ret = fwnet_add_peer(dev, unit, device); |
1558 | if (ret && allocated_netdev) { | 1560 | if (ret && allocated_netdev) { |
1559 | unregister_netdev(net); | 1561 | unregister_netdev(net); |
1560 | list_del(&dev->dev_link); | 1562 | list_del(&dev->dev_link); |
1561 | } | 1563 | } |
1562 | out: | 1564 | out: |
1563 | if (ret && allocated_netdev) | 1565 | if (ret && allocated_netdev) |
1564 | free_netdev(net); | 1566 | free_netdev(net); |
1565 | 1567 | ||
1566 | mutex_unlock(&fwnet_device_mutex); | 1568 | mutex_unlock(&fwnet_device_mutex); |
1567 | 1569 | ||
1568 | return ret; | 1570 | return ret; |
1569 | } | 1571 | } |
1570 | 1572 | ||
1571 | static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev) | 1573 | static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev) |
1572 | { | 1574 | { |
1573 | struct fwnet_partial_datagram *pd, *pd_next; | 1575 | struct fwnet_partial_datagram *pd, *pd_next; |
1574 | 1576 | ||
1575 | spin_lock_irq(&dev->lock); | 1577 | spin_lock_irq(&dev->lock); |
1576 | list_del(&peer->peer_link); | 1578 | list_del(&peer->peer_link); |
1577 | dev->peer_count--; | 1579 | dev->peer_count--; |
1578 | set_carrier_state(dev); | 1580 | set_carrier_state(dev); |
1579 | spin_unlock_irq(&dev->lock); | 1581 | spin_unlock_irq(&dev->lock); |
1580 | 1582 | ||
1581 | list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) | 1583 | list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) |
1582 | fwnet_pd_delete(pd); | 1584 | fwnet_pd_delete(pd); |
1583 | 1585 | ||
1584 | kfree(peer); | 1586 | kfree(peer); |
1585 | } | 1587 | } |
1586 | 1588 | ||
1587 | static int fwnet_remove(struct device *_dev) | 1589 | static int fwnet_remove(struct device *_dev) |
1588 | { | 1590 | { |
1589 | struct fwnet_peer *peer = dev_get_drvdata(_dev); | 1591 | struct fwnet_peer *peer = dev_get_drvdata(_dev); |
1590 | struct fwnet_device *dev = peer->dev; | 1592 | struct fwnet_device *dev = peer->dev; |
1591 | struct net_device *net; | 1593 | struct net_device *net; |
1592 | int i; | 1594 | int i; |
1593 | 1595 | ||
1594 | mutex_lock(&fwnet_device_mutex); | 1596 | mutex_lock(&fwnet_device_mutex); |
1595 | 1597 | ||
1596 | net = dev->netdev; | 1598 | net = dev->netdev; |
1597 | if (net && peer->ip) | 1599 | if (net && peer->ip) |
1598 | arp_invalidate(net, peer->ip); | 1600 | arp_invalidate(net, peer->ip); |
1599 | 1601 | ||
1600 | fwnet_remove_peer(peer, dev); | 1602 | fwnet_remove_peer(peer, dev); |
1601 | 1603 | ||
1602 | if (list_empty(&dev->peer_list)) { | 1604 | if (list_empty(&dev->peer_list)) { |
1603 | unregister_netdev(net); | 1605 | unregister_netdev(net); |
1604 | 1606 | ||
1605 | if (dev->local_fifo != FWNET_NO_FIFO_ADDR) | 1607 | if (dev->local_fifo != FWNET_NO_FIFO_ADDR) |
1606 | fw_core_remove_address_handler(&dev->handler); | 1608 | fw_core_remove_address_handler(&dev->handler); |
1607 | if (dev->broadcast_rcv_context) { | 1609 | if (dev->broadcast_rcv_context) { |
1608 | fw_iso_context_stop(dev->broadcast_rcv_context); | 1610 | fw_iso_context_stop(dev->broadcast_rcv_context); |
1609 | fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, | 1611 | fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, |
1610 | dev->card); | 1612 | dev->card); |
1611 | fw_iso_context_destroy(dev->broadcast_rcv_context); | 1613 | fw_iso_context_destroy(dev->broadcast_rcv_context); |
1612 | } | 1614 | } |
1613 | for (i = 0; dev->queued_datagrams && i < 5; i++) | 1615 | for (i = 0; dev->queued_datagrams && i < 5; i++) |
1614 | ssleep(1); | 1616 | ssleep(1); |
1615 | WARN_ON(dev->queued_datagrams); | 1617 | WARN_ON(dev->queued_datagrams); |
1616 | list_del(&dev->dev_link); | 1618 | list_del(&dev->dev_link); |
1617 | 1619 | ||
1618 | free_netdev(net); | 1620 | free_netdev(net); |
1619 | } | 1621 | } |
1620 | 1622 | ||
1621 | mutex_unlock(&fwnet_device_mutex); | 1623 | mutex_unlock(&fwnet_device_mutex); |
1622 | 1624 | ||
1623 | return 0; | 1625 | return 0; |
1624 | } | 1626 | } |
1625 | 1627 | ||
1626 | /* | 1628 | /* |
1627 | * FIXME abort partially sent fragmented datagrams, | 1629 | * FIXME abort partially sent fragmented datagrams, |
1628 | * discard partially received fragmented datagrams | 1630 | * discard partially received fragmented datagrams |
1629 | */ | 1631 | */ |
1630 | static void fwnet_update(struct fw_unit *unit) | 1632 | static void fwnet_update(struct fw_unit *unit) |
1631 | { | 1633 | { |
1632 | struct fw_device *device = fw_parent_device(unit); | 1634 | struct fw_device *device = fw_parent_device(unit); |
1633 | struct fwnet_peer *peer = dev_get_drvdata(&unit->device); | 1635 | struct fwnet_peer *peer = dev_get_drvdata(&unit->device); |
1634 | int generation; | 1636 | int generation; |
1635 | 1637 | ||
1636 | generation = device->generation; | 1638 | generation = device->generation; |
1637 | 1639 | ||
1638 | spin_lock_irq(&peer->dev->lock); | 1640 | spin_lock_irq(&peer->dev->lock); |
1639 | peer->node_id = device->node_id; | 1641 | peer->node_id = device->node_id; |
1640 | peer->generation = generation; | 1642 | peer->generation = generation; |
1641 | spin_unlock_irq(&peer->dev->lock); | 1643 | spin_unlock_irq(&peer->dev->lock); |
1642 | } | 1644 | } |
1643 | 1645 | ||
1644 | static const struct ieee1394_device_id fwnet_id_table[] = { | 1646 | static const struct ieee1394_device_id fwnet_id_table[] = { |
1645 | { | 1647 | { |
1646 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | | 1648 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | |
1647 | IEEE1394_MATCH_VERSION, | 1649 | IEEE1394_MATCH_VERSION, |
1648 | .specifier_id = IANA_SPECIFIER_ID, | 1650 | .specifier_id = IANA_SPECIFIER_ID, |
1649 | .version = RFC2734_SW_VERSION, | 1651 | .version = RFC2734_SW_VERSION, |
1650 | }, | 1652 | }, |
1651 | { } | 1653 | { } |
1652 | }; | 1654 | }; |
1653 | 1655 | ||
1654 | static struct fw_driver fwnet_driver = { | 1656 | static struct fw_driver fwnet_driver = { |
1655 | .driver = { | 1657 | .driver = { |
1656 | .owner = THIS_MODULE, | 1658 | .owner = THIS_MODULE, |
1657 | .name = "net", | 1659 | .name = "net", |
1658 | .bus = &fw_bus_type, | 1660 | .bus = &fw_bus_type, |
1659 | .probe = fwnet_probe, | 1661 | .probe = fwnet_probe, |
1660 | .remove = fwnet_remove, | 1662 | .remove = fwnet_remove, |
1661 | }, | 1663 | }, |
1662 | .update = fwnet_update, | 1664 | .update = fwnet_update, |
1663 | .id_table = fwnet_id_table, | 1665 | .id_table = fwnet_id_table, |
1664 | }; | 1666 | }; |
1665 | 1667 | ||
1666 | static const u32 rfc2374_unit_directory_data[] = { | 1668 | static const u32 rfc2374_unit_directory_data[] = { |
1667 | 0x00040000, /* directory_length */ | 1669 | 0x00040000, /* directory_length */ |
1668 | 0x1200005e, /* unit_specifier_id: IANA */ | 1670 | 0x1200005e, /* unit_specifier_id: IANA */ |
1669 | 0x81000003, /* textual descriptor offset */ | 1671 | 0x81000003, /* textual descriptor offset */ |
1670 | 0x13000001, /* unit_sw_version: RFC 2734 */ | 1672 | 0x13000001, /* unit_sw_version: RFC 2734 */ |
1671 | 0x81000005, /* textual descriptor offset */ | 1673 | 0x81000005, /* textual descriptor offset */ |
1672 | 0x00030000, /* descriptor_length */ | 1674 | 0x00030000, /* descriptor_length */ |
1673 | 0x00000000, /* text */ | 1675 | 0x00000000, /* text */ |
1674 | 0x00000000, /* minimal ASCII, en */ | 1676 | 0x00000000, /* minimal ASCII, en */ |
1675 | 0x49414e41, /* I A N A */ | 1677 | 0x49414e41, /* I A N A */ |
1676 | 0x00030000, /* descriptor_length */ | 1678 | 0x00030000, /* descriptor_length */ |
1677 | 0x00000000, /* text */ | 1679 | 0x00000000, /* text */ |
1678 | 0x00000000, /* minimal ASCII, en */ | 1680 | 0x00000000, /* minimal ASCII, en */ |
1679 | 0x49507634, /* I P v 4 */ | 1681 | 0x49507634, /* I P v 4 */ |
1680 | }; | 1682 | }; |
1681 | 1683 | ||
1682 | static struct fw_descriptor rfc2374_unit_directory = { | 1684 | static struct fw_descriptor rfc2374_unit_directory = { |
1683 | .length = ARRAY_SIZE(rfc2374_unit_directory_data), | 1685 | .length = ARRAY_SIZE(rfc2374_unit_directory_data), |
1684 | .key = (CSR_DIRECTORY | CSR_UNIT) << 24, | 1686 | .key = (CSR_DIRECTORY | CSR_UNIT) << 24, |
1685 | .data = rfc2374_unit_directory_data | 1687 | .data = rfc2374_unit_directory_data |
1686 | }; | 1688 | }; |
1687 | 1689 | ||
1688 | static int __init fwnet_init(void) | 1690 | static int __init fwnet_init(void) |
1689 | { | 1691 | { |
1690 | int err; | 1692 | int err; |
1691 | 1693 | ||
1692 | err = fw_core_add_descriptor(&rfc2374_unit_directory); | 1694 | err = fw_core_add_descriptor(&rfc2374_unit_directory); |
1693 | if (err) | 1695 | if (err) |
1694 | return err; | 1696 | return err; |
1695 | 1697 | ||
1696 | fwnet_packet_task_cache = kmem_cache_create("packet_task", | 1698 | fwnet_packet_task_cache = kmem_cache_create("packet_task", |
1697 | sizeof(struct fwnet_packet_task), 0, 0, NULL); | 1699 | sizeof(struct fwnet_packet_task), 0, 0, NULL); |
1698 | if (!fwnet_packet_task_cache) { | 1700 | if (!fwnet_packet_task_cache) { |
1699 | err = -ENOMEM; | 1701 | err = -ENOMEM; |
1700 | goto out; | 1702 | goto out; |
1701 | } | 1703 | } |
1702 | 1704 | ||
1703 | err = driver_register(&fwnet_driver.driver); | 1705 | err = driver_register(&fwnet_driver.driver); |
1704 | if (!err) | 1706 | if (!err) |
1705 | return 0; | 1707 | return 0; |
1706 | 1708 | ||
1707 | kmem_cache_destroy(fwnet_packet_task_cache); | 1709 | kmem_cache_destroy(fwnet_packet_task_cache); |
1708 | out: | 1710 | out: |
1709 | fw_core_remove_descriptor(&rfc2374_unit_directory); | 1711 | fw_core_remove_descriptor(&rfc2374_unit_directory); |
1710 | 1712 | ||
1711 | return err; | 1713 | return err; |
1712 | } | 1714 | } |
1713 | module_init(fwnet_init); | 1715 | module_init(fwnet_init); |
1714 | 1716 | ||
1715 | static void __exit fwnet_cleanup(void) | 1717 | static void __exit fwnet_cleanup(void) |
1716 | { | 1718 | { |
1717 | driver_unregister(&fwnet_driver.driver); | 1719 | driver_unregister(&fwnet_driver.driver); |
1718 | kmem_cache_destroy(fwnet_packet_task_cache); | 1720 | kmem_cache_destroy(fwnet_packet_task_cache); |
1719 | fw_core_remove_descriptor(&rfc2374_unit_directory); | 1721 | fw_core_remove_descriptor(&rfc2374_unit_directory); |
1720 | } | 1722 | } |
1721 | module_exit(fwnet_cleanup); | 1723 | module_exit(fwnet_cleanup); |
1722 | 1724 | ||
1723 | MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>"); | 1725 | MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>"); |
1724 | MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734"); | 1726 | MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734"); |
1725 | MODULE_LICENSE("GPL"); | 1727 | MODULE_LICENSE("GPL"); |
1726 | MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table); | 1728 | MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table); |
1727 | 1729 |
drivers/firewire/ohci.c
1 | /* | 1 | /* |
2 | * Driver for OHCI 1394 controllers | 2 | * Driver for OHCI 1394 controllers |
3 | * | 3 | * |
4 | * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> | 4 | * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software Foundation, | 17 | * along with this program; if not, write to the Free Software Foundation, |
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/bug.h> | 22 | #include <linux/bug.h> |
23 | #include <linux/compiler.h> | 23 | #include <linux/compiler.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/device.h> | 25 | #include <linux/device.h> |
26 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
27 | #include <linux/firewire.h> | 27 | #include <linux/firewire.h> |
28 | #include <linux/firewire-constants.h> | 28 | #include <linux/firewire-constants.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
34 | #include <linux/mm.h> | 34 | #include <linux/mm.h> |
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <linux/moduleparam.h> | 36 | #include <linux/moduleparam.h> |
37 | #include <linux/mutex.h> | 37 | #include <linux/mutex.h> |
38 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
39 | #include <linux/pci_ids.h> | 39 | #include <linux/pci_ids.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/spinlock.h> | 41 | #include <linux/spinlock.h> |
42 | #include <linux/string.h> | 42 | #include <linux/string.h> |
43 | #include <linux/time.h> | 43 | #include <linux/time.h> |
44 | #include <linux/vmalloc.h> | 44 | #include <linux/vmalloc.h> |
45 | 45 | ||
46 | #include <asm/byteorder.h> | 46 | #include <asm/byteorder.h> |
47 | #include <asm/page.h> | 47 | #include <asm/page.h> |
48 | #include <asm/system.h> | 48 | #include <asm/system.h> |
49 | 49 | ||
50 | #ifdef CONFIG_PPC_PMAC | 50 | #ifdef CONFIG_PPC_PMAC |
51 | #include <asm/pmac_feature.h> | 51 | #include <asm/pmac_feature.h> |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | #include "core.h" | 54 | #include "core.h" |
55 | #include "ohci.h" | 55 | #include "ohci.h" |
56 | 56 | ||
57 | #define DESCRIPTOR_OUTPUT_MORE 0 | 57 | #define DESCRIPTOR_OUTPUT_MORE 0 |
58 | #define DESCRIPTOR_OUTPUT_LAST (1 << 12) | 58 | #define DESCRIPTOR_OUTPUT_LAST (1 << 12) |
59 | #define DESCRIPTOR_INPUT_MORE (2 << 12) | 59 | #define DESCRIPTOR_INPUT_MORE (2 << 12) |
60 | #define DESCRIPTOR_INPUT_LAST (3 << 12) | 60 | #define DESCRIPTOR_INPUT_LAST (3 << 12) |
61 | #define DESCRIPTOR_STATUS (1 << 11) | 61 | #define DESCRIPTOR_STATUS (1 << 11) |
62 | #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) | 62 | #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) |
63 | #define DESCRIPTOR_PING (1 << 7) | 63 | #define DESCRIPTOR_PING (1 << 7) |
64 | #define DESCRIPTOR_YY (1 << 6) | 64 | #define DESCRIPTOR_YY (1 << 6) |
65 | #define DESCRIPTOR_NO_IRQ (0 << 4) | 65 | #define DESCRIPTOR_NO_IRQ (0 << 4) |
66 | #define DESCRIPTOR_IRQ_ERROR (1 << 4) | 66 | #define DESCRIPTOR_IRQ_ERROR (1 << 4) |
67 | #define DESCRIPTOR_IRQ_ALWAYS (3 << 4) | 67 | #define DESCRIPTOR_IRQ_ALWAYS (3 << 4) |
68 | #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) | 68 | #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) |
69 | #define DESCRIPTOR_WAIT (3 << 0) | 69 | #define DESCRIPTOR_WAIT (3 << 0) |
70 | 70 | ||
71 | struct descriptor { | 71 | struct descriptor { |
72 | __le16 req_count; | 72 | __le16 req_count; |
73 | __le16 control; | 73 | __le16 control; |
74 | __le32 data_address; | 74 | __le32 data_address; |
75 | __le32 branch_address; | 75 | __le32 branch_address; |
76 | __le16 res_count; | 76 | __le16 res_count; |
77 | __le16 transfer_status; | 77 | __le16 transfer_status; |
78 | } __attribute__((aligned(16))); | 78 | } __attribute__((aligned(16))); |
79 | 79 | ||
80 | #define CONTROL_SET(regs) (regs) | 80 | #define CONTROL_SET(regs) (regs) |
81 | #define CONTROL_CLEAR(regs) ((regs) + 4) | 81 | #define CONTROL_CLEAR(regs) ((regs) + 4) |
82 | #define COMMAND_PTR(regs) ((regs) + 12) | 82 | #define COMMAND_PTR(regs) ((regs) + 12) |
83 | #define CONTEXT_MATCH(regs) ((regs) + 16) | 83 | #define CONTEXT_MATCH(regs) ((regs) + 16) |
84 | 84 | ||
85 | #define AR_BUFFER_SIZE (32*1024) | 85 | #define AR_BUFFER_SIZE (32*1024) |
86 | #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE) | 86 | #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE) |
87 | /* we need at least two pages for proper list management */ | 87 | /* we need at least two pages for proper list management */ |
88 | #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2) | 88 | #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2) |
89 | 89 | ||
90 | #define MAX_ASYNC_PAYLOAD 4096 | 90 | #define MAX_ASYNC_PAYLOAD 4096 |
91 | #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4) | 91 | #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4) |
92 | #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE) | 92 | #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE) |
93 | 93 | ||
94 | struct ar_context { | 94 | struct ar_context { |
95 | struct fw_ohci *ohci; | 95 | struct fw_ohci *ohci; |
96 | struct page *pages[AR_BUFFERS]; | 96 | struct page *pages[AR_BUFFERS]; |
97 | void *buffer; | 97 | void *buffer; |
98 | struct descriptor *descriptors; | 98 | struct descriptor *descriptors; |
99 | dma_addr_t descriptors_bus; | 99 | dma_addr_t descriptors_bus; |
100 | void *pointer; | 100 | void *pointer; |
101 | unsigned int last_buffer_index; | 101 | unsigned int last_buffer_index; |
102 | u32 regs; | 102 | u32 regs; |
103 | struct tasklet_struct tasklet; | 103 | struct tasklet_struct tasklet; |
104 | }; | 104 | }; |
105 | 105 | ||
106 | struct context; | 106 | struct context; |
107 | 107 | ||
108 | typedef int (*descriptor_callback_t)(struct context *ctx, | 108 | typedef int (*descriptor_callback_t)(struct context *ctx, |
109 | struct descriptor *d, | 109 | struct descriptor *d, |
110 | struct descriptor *last); | 110 | struct descriptor *last); |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * A buffer that contains a block of DMA-able coherent memory used for | 113 | * A buffer that contains a block of DMA-able coherent memory used for |
114 | * storing a portion of a DMA descriptor program. | 114 | * storing a portion of a DMA descriptor program. |
115 | */ | 115 | */ |
116 | struct descriptor_buffer { | 116 | struct descriptor_buffer { |
117 | struct list_head list; | 117 | struct list_head list; |
118 | dma_addr_t buffer_bus; | 118 | dma_addr_t buffer_bus; |
119 | size_t buffer_size; | 119 | size_t buffer_size; |
120 | size_t used; | 120 | size_t used; |
121 | struct descriptor buffer[0]; | 121 | struct descriptor buffer[0]; |
122 | }; | 122 | }; |
123 | 123 | ||
124 | struct context { | 124 | struct context { |
125 | struct fw_ohci *ohci; | 125 | struct fw_ohci *ohci; |
126 | u32 regs; | 126 | u32 regs; |
127 | int total_allocation; | 127 | int total_allocation; |
128 | bool running; | 128 | bool running; |
129 | bool flushing; | 129 | bool flushing; |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * List of page-sized buffers for storing DMA descriptors. | 132 | * List of page-sized buffers for storing DMA descriptors. |
133 | * Head of list contains buffers in use and tail of list contains | 133 | * Head of list contains buffers in use and tail of list contains |
134 | * free buffers. | 134 | * free buffers. |
135 | */ | 135 | */ |
136 | struct list_head buffer_list; | 136 | struct list_head buffer_list; |
137 | 137 | ||
138 | /* | 138 | /* |
139 | * Pointer to a buffer inside buffer_list that contains the tail | 139 | * Pointer to a buffer inside buffer_list that contains the tail |
140 | * end of the current DMA program. | 140 | * end of the current DMA program. |
141 | */ | 141 | */ |
142 | struct descriptor_buffer *buffer_tail; | 142 | struct descriptor_buffer *buffer_tail; |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * The descriptor containing the branch address of the first | 145 | * The descriptor containing the branch address of the first |
146 | * descriptor that has not yet been filled by the device. | 146 | * descriptor that has not yet been filled by the device. |
147 | */ | 147 | */ |
148 | struct descriptor *last; | 148 | struct descriptor *last; |
149 | 149 | ||
150 | /* | 150 | /* |
151 | * The last descriptor in the DMA program. It contains the branch | 151 | * The last descriptor in the DMA program. It contains the branch |
152 | * address that must be updated upon appending a new descriptor. | 152 | * address that must be updated upon appending a new descriptor. |
153 | */ | 153 | */ |
154 | struct descriptor *prev; | 154 | struct descriptor *prev; |
155 | 155 | ||
156 | descriptor_callback_t callback; | 156 | descriptor_callback_t callback; |
157 | 157 | ||
158 | struct tasklet_struct tasklet; | 158 | struct tasklet_struct tasklet; |
159 | }; | 159 | }; |
160 | 160 | ||
161 | #define IT_HEADER_SY(v) ((v) << 0) | 161 | #define IT_HEADER_SY(v) ((v) << 0) |
162 | #define IT_HEADER_TCODE(v) ((v) << 4) | 162 | #define IT_HEADER_TCODE(v) ((v) << 4) |
163 | #define IT_HEADER_CHANNEL(v) ((v) << 8) | 163 | #define IT_HEADER_CHANNEL(v) ((v) << 8) |
164 | #define IT_HEADER_TAG(v) ((v) << 14) | 164 | #define IT_HEADER_TAG(v) ((v) << 14) |
165 | #define IT_HEADER_SPEED(v) ((v) << 16) | 165 | #define IT_HEADER_SPEED(v) ((v) << 16) |
166 | #define IT_HEADER_DATA_LENGTH(v) ((v) << 16) | 166 | #define IT_HEADER_DATA_LENGTH(v) ((v) << 16) |
167 | 167 | ||
168 | struct iso_context { | 168 | struct iso_context { |
169 | struct fw_iso_context base; | 169 | struct fw_iso_context base; |
170 | struct context context; | 170 | struct context context; |
171 | int excess_bytes; | 171 | int excess_bytes; |
172 | void *header; | 172 | void *header; |
173 | size_t header_length; | 173 | size_t header_length; |
174 | 174 | ||
175 | u8 sync; | 175 | u8 sync; |
176 | u8 tags; | 176 | u8 tags; |
177 | }; | 177 | }; |
178 | 178 | ||
179 | #define CONFIG_ROM_SIZE 1024 | 179 | #define CONFIG_ROM_SIZE 1024 |
180 | 180 | ||
181 | struct fw_ohci { | 181 | struct fw_ohci { |
182 | struct fw_card card; | 182 | struct fw_card card; |
183 | 183 | ||
184 | __iomem char *registers; | 184 | __iomem char *registers; |
185 | int node_id; | 185 | int node_id; |
186 | int generation; | 186 | int generation; |
187 | int request_generation; /* for timestamping incoming requests */ | 187 | int request_generation; /* for timestamping incoming requests */ |
188 | unsigned quirks; | 188 | unsigned quirks; |
189 | unsigned int pri_req_max; | 189 | unsigned int pri_req_max; |
190 | u32 bus_time; | 190 | u32 bus_time; |
191 | bool is_root; | 191 | bool is_root; |
192 | bool csr_state_setclear_abdicate; | 192 | bool csr_state_setclear_abdicate; |
193 | int n_ir; | 193 | int n_ir; |
194 | int n_it; | 194 | int n_it; |
195 | /* | 195 | /* |
196 | * Spinlock for accessing fw_ohci data. Never call out of | 196 | * Spinlock for accessing fw_ohci data. Never call out of |
197 | * this driver with this lock held. | 197 | * this driver with this lock held. |
198 | */ | 198 | */ |
199 | spinlock_t lock; | 199 | spinlock_t lock; |
200 | 200 | ||
201 | struct mutex phy_reg_mutex; | 201 | struct mutex phy_reg_mutex; |
202 | 202 | ||
203 | void *misc_buffer; | 203 | void *misc_buffer; |
204 | dma_addr_t misc_buffer_bus; | 204 | dma_addr_t misc_buffer_bus; |
205 | 205 | ||
206 | struct ar_context ar_request_ctx; | 206 | struct ar_context ar_request_ctx; |
207 | struct ar_context ar_response_ctx; | 207 | struct ar_context ar_response_ctx; |
208 | struct context at_request_ctx; | 208 | struct context at_request_ctx; |
209 | struct context at_response_ctx; | 209 | struct context at_response_ctx; |
210 | 210 | ||
211 | u32 it_context_support; | 211 | u32 it_context_support; |
212 | u32 it_context_mask; /* unoccupied IT contexts */ | 212 | u32 it_context_mask; /* unoccupied IT contexts */ |
213 | struct iso_context *it_context_list; | 213 | struct iso_context *it_context_list; |
214 | u64 ir_context_channels; /* unoccupied channels */ | 214 | u64 ir_context_channels; /* unoccupied channels */ |
215 | u32 ir_context_support; | 215 | u32 ir_context_support; |
216 | u32 ir_context_mask; /* unoccupied IR contexts */ | 216 | u32 ir_context_mask; /* unoccupied IR contexts */ |
217 | struct iso_context *ir_context_list; | 217 | struct iso_context *ir_context_list; |
218 | u64 mc_channels; /* channels in use by the multichannel IR context */ | 218 | u64 mc_channels; /* channels in use by the multichannel IR context */ |
219 | bool mc_allocated; | 219 | bool mc_allocated; |
220 | 220 | ||
221 | __be32 *config_rom; | 221 | __be32 *config_rom; |
222 | dma_addr_t config_rom_bus; | 222 | dma_addr_t config_rom_bus; |
223 | __be32 *next_config_rom; | 223 | __be32 *next_config_rom; |
224 | dma_addr_t next_config_rom_bus; | 224 | dma_addr_t next_config_rom_bus; |
225 | __be32 next_header; | 225 | __be32 next_header; |
226 | 226 | ||
227 | __le32 *self_id_cpu; | 227 | __le32 *self_id_cpu; |
228 | dma_addr_t self_id_bus; | 228 | dma_addr_t self_id_bus; |
229 | struct tasklet_struct bus_reset_tasklet; | 229 | struct tasklet_struct bus_reset_tasklet; |
230 | 230 | ||
231 | u32 self_id_buffer[512]; | 231 | u32 self_id_buffer[512]; |
232 | }; | 232 | }; |
233 | 233 | ||
234 | static inline struct fw_ohci *fw_ohci(struct fw_card *card) | 234 | static inline struct fw_ohci *fw_ohci(struct fw_card *card) |
235 | { | 235 | { |
236 | return container_of(card, struct fw_ohci, card); | 236 | return container_of(card, struct fw_ohci, card); |
237 | } | 237 | } |
238 | 238 | ||
239 | #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 | 239 | #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 |
240 | #define IR_CONTEXT_BUFFER_FILL 0x80000000 | 240 | #define IR_CONTEXT_BUFFER_FILL 0x80000000 |
241 | #define IR_CONTEXT_ISOCH_HEADER 0x40000000 | 241 | #define IR_CONTEXT_ISOCH_HEADER 0x40000000 |
242 | #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000 | 242 | #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000 |
243 | #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000 | 243 | #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000 |
244 | #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000 | 244 | #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000 |
245 | 245 | ||
246 | #define CONTEXT_RUN 0x8000 | 246 | #define CONTEXT_RUN 0x8000 |
247 | #define CONTEXT_WAKE 0x1000 | 247 | #define CONTEXT_WAKE 0x1000 |
248 | #define CONTEXT_DEAD 0x0800 | 248 | #define CONTEXT_DEAD 0x0800 |
249 | #define CONTEXT_ACTIVE 0x0400 | 249 | #define CONTEXT_ACTIVE 0x0400 |
250 | 250 | ||
251 | #define OHCI1394_MAX_AT_REQ_RETRIES 0xf | 251 | #define OHCI1394_MAX_AT_REQ_RETRIES 0xf |
252 | #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 | 252 | #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 |
253 | #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 | 253 | #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 |
254 | 254 | ||
255 | #define OHCI1394_REGISTER_SIZE 0x800 | 255 | #define OHCI1394_REGISTER_SIZE 0x800 |
256 | #define OHCI_LOOP_COUNT 500 | 256 | #define OHCI_LOOP_COUNT 500 |
257 | #define OHCI1394_PCI_HCI_Control 0x40 | 257 | #define OHCI1394_PCI_HCI_Control 0x40 |
258 | #define SELF_ID_BUF_SIZE 0x800 | 258 | #define SELF_ID_BUF_SIZE 0x800 |
259 | #define OHCI_TCODE_PHY_PACKET 0x0e | 259 | #define OHCI_TCODE_PHY_PACKET 0x0e |
260 | #define OHCI_VERSION_1_1 0x010010 | 260 | #define OHCI_VERSION_1_1 0x010010 |
261 | 261 | ||
262 | static char ohci_driver_name[] = KBUILD_MODNAME; | 262 | static char ohci_driver_name[] = KBUILD_MODNAME; |
263 | 263 | ||
264 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 | 264 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 |
265 | #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 | 265 | #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 |
266 | #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 | 266 | #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 |
267 | 267 | ||
268 | #define QUIRK_CYCLE_TIMER 1 | 268 | #define QUIRK_CYCLE_TIMER 1 |
269 | #define QUIRK_RESET_PACKET 2 | 269 | #define QUIRK_RESET_PACKET 2 |
270 | #define QUIRK_BE_HEADERS 4 | 270 | #define QUIRK_BE_HEADERS 4 |
271 | #define QUIRK_NO_1394A 8 | 271 | #define QUIRK_NO_1394A 8 |
272 | #define QUIRK_NO_MSI 16 | 272 | #define QUIRK_NO_MSI 16 |
273 | 273 | ||
274 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ | 274 | /* In case of multiple matches in ohci_quirks[], only the first one is used. */ |
275 | static const struct { | 275 | static const struct { |
276 | unsigned short vendor, device, revision, flags; | 276 | unsigned short vendor, device, revision, flags; |
277 | } ohci_quirks[] = { | 277 | } ohci_quirks[] = { |
278 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, | 278 | {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, |
279 | QUIRK_CYCLE_TIMER}, | 279 | QUIRK_CYCLE_TIMER}, |
280 | 280 | ||
281 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, | 281 | {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, |
282 | QUIRK_BE_HEADERS}, | 282 | QUIRK_BE_HEADERS}, |
283 | 283 | ||
284 | {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, | 284 | {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, |
285 | QUIRK_NO_MSI}, | 285 | QUIRK_NO_MSI}, |
286 | 286 | ||
287 | {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, | 287 | {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, |
288 | QUIRK_NO_MSI}, | 288 | QUIRK_NO_MSI}, |
289 | 289 | ||
290 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, | 290 | {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, |
291 | QUIRK_CYCLE_TIMER}, | 291 | QUIRK_CYCLE_TIMER}, |
292 | 292 | ||
293 | {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, | 293 | {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, |
294 | QUIRK_CYCLE_TIMER}, | 294 | QUIRK_CYCLE_TIMER}, |
295 | 295 | ||
296 | {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, | 296 | {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, |
297 | QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, | 297 | QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, |
298 | 298 | ||
299 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, | 299 | {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, |
300 | QUIRK_RESET_PACKET}, | 300 | QUIRK_RESET_PACKET}, |
301 | 301 | ||
302 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, | 302 | {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, |
303 | QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, | 303 | QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, |
304 | }; | 304 | }; |
305 | 305 | ||
306 | /* This overrides anything that was found in ohci_quirks[]. */ | 306 | /* This overrides anything that was found in ohci_quirks[]. */ |
307 | static int param_quirks; | 307 | static int param_quirks; |
308 | module_param_named(quirks, param_quirks, int, 0644); | 308 | module_param_named(quirks, param_quirks, int, 0644); |
309 | MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" | 309 | MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" |
310 | ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) | 310 | ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) |
311 | ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) | 311 | ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) |
312 | ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) | 312 | ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) |
313 | ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) | 313 | ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) |
314 | ", disable MSI = " __stringify(QUIRK_NO_MSI) | 314 | ", disable MSI = " __stringify(QUIRK_NO_MSI) |
315 | ")"); | 315 | ")"); |
316 | 316 | ||
317 | #define OHCI_PARAM_DEBUG_AT_AR 1 | 317 | #define OHCI_PARAM_DEBUG_AT_AR 1 |
318 | #define OHCI_PARAM_DEBUG_SELFIDS 2 | 318 | #define OHCI_PARAM_DEBUG_SELFIDS 2 |
319 | #define OHCI_PARAM_DEBUG_IRQS 4 | 319 | #define OHCI_PARAM_DEBUG_IRQS 4 |
320 | #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ | 320 | #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ |
321 | 321 | ||
322 | #ifdef CONFIG_FIREWIRE_OHCI_DEBUG | 322 | #ifdef CONFIG_FIREWIRE_OHCI_DEBUG |
323 | 323 | ||
324 | static int param_debug; | 324 | static int param_debug; |
325 | module_param_named(debug, param_debug, int, 0644); | 325 | module_param_named(debug, param_debug, int, 0644); |
326 | MODULE_PARM_DESC(debug, "Verbose logging (default = 0" | 326 | MODULE_PARM_DESC(debug, "Verbose logging (default = 0" |
327 | ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) | 327 | ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) |
328 | ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) | 328 | ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) |
329 | ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) | 329 | ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) |
330 | ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) | 330 | ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) |
331 | ", or a combination, or all = -1)"); | 331 | ", or a combination, or all = -1)"); |
332 | 332 | ||
333 | static void log_irqs(u32 evt) | 333 | static void log_irqs(u32 evt) |
334 | { | 334 | { |
335 | if (likely(!(param_debug & | 335 | if (likely(!(param_debug & |
336 | (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) | 336 | (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) |
337 | return; | 337 | return; |
338 | 338 | ||
339 | if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && | 339 | if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && |
340 | !(evt & OHCI1394_busReset)) | 340 | !(evt & OHCI1394_busReset)) |
341 | return; | 341 | return; |
342 | 342 | ||
343 | fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, | 343 | fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, |
344 | evt & OHCI1394_selfIDComplete ? " selfID" : "", | 344 | evt & OHCI1394_selfIDComplete ? " selfID" : "", |
345 | evt & OHCI1394_RQPkt ? " AR_req" : "", | 345 | evt & OHCI1394_RQPkt ? " AR_req" : "", |
346 | evt & OHCI1394_RSPkt ? " AR_resp" : "", | 346 | evt & OHCI1394_RSPkt ? " AR_resp" : "", |
347 | evt & OHCI1394_reqTxComplete ? " AT_req" : "", | 347 | evt & OHCI1394_reqTxComplete ? " AT_req" : "", |
348 | evt & OHCI1394_respTxComplete ? " AT_resp" : "", | 348 | evt & OHCI1394_respTxComplete ? " AT_resp" : "", |
349 | evt & OHCI1394_isochRx ? " IR" : "", | 349 | evt & OHCI1394_isochRx ? " IR" : "", |
350 | evt & OHCI1394_isochTx ? " IT" : "", | 350 | evt & OHCI1394_isochTx ? " IT" : "", |
351 | evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", | 351 | evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", |
352 | evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", | 352 | evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", |
353 | evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", | 353 | evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", |
354 | evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", | 354 | evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", |
355 | evt & OHCI1394_regAccessFail ? " regAccessFail" : "", | 355 | evt & OHCI1394_regAccessFail ? " regAccessFail" : "", |
356 | evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "", | 356 | evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "", |
357 | evt & OHCI1394_busReset ? " busReset" : "", | 357 | evt & OHCI1394_busReset ? " busReset" : "", |
358 | evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | | 358 | evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | |
359 | OHCI1394_RSPkt | OHCI1394_reqTxComplete | | 359 | OHCI1394_RSPkt | OHCI1394_reqTxComplete | |
360 | OHCI1394_respTxComplete | OHCI1394_isochRx | | 360 | OHCI1394_respTxComplete | OHCI1394_isochRx | |
361 | OHCI1394_isochTx | OHCI1394_postedWriteErr | | 361 | OHCI1394_isochTx | OHCI1394_postedWriteErr | |
362 | OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | | 362 | OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | |
363 | OHCI1394_cycleInconsistent | | 363 | OHCI1394_cycleInconsistent | |
364 | OHCI1394_regAccessFail | OHCI1394_busReset) | 364 | OHCI1394_regAccessFail | OHCI1394_busReset) |
365 | ? " ?" : ""); | 365 | ? " ?" : ""); |
366 | } | 366 | } |
367 | 367 | ||
368 | static const char *speed[] = { | 368 | static const char *speed[] = { |
369 | [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", | 369 | [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", |
370 | }; | 370 | }; |
371 | static const char *power[] = { | 371 | static const char *power[] = { |
372 | [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", | 372 | [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", |
373 | [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", | 373 | [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", |
374 | }; | 374 | }; |
375 | static const char port[] = { '.', '-', 'p', 'c', }; | 375 | static const char port[] = { '.', '-', 'p', 'c', }; |
376 | 376 | ||
377 | static char _p(u32 *s, int shift) | 377 | static char _p(u32 *s, int shift) |
378 | { | 378 | { |
379 | return port[*s >> shift & 3]; | 379 | return port[*s >> shift & 3]; |
380 | } | 380 | } |
381 | 381 | ||
382 | static void log_selfids(int node_id, int generation, int self_id_count, u32 *s) | 382 | static void log_selfids(int node_id, int generation, int self_id_count, u32 *s) |
383 | { | 383 | { |
384 | if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) | 384 | if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) |
385 | return; | 385 | return; |
386 | 386 | ||
387 | fw_notify("%d selfIDs, generation %d, local node ID %04x\n", | 387 | fw_notify("%d selfIDs, generation %d, local node ID %04x\n", |
388 | self_id_count, generation, node_id); | 388 | self_id_count, generation, node_id); |
389 | 389 | ||
390 | for (; self_id_count--; ++s) | 390 | for (; self_id_count--; ++s) |
391 | if ((*s & 1 << 23) == 0) | 391 | if ((*s & 1 << 23) == 0) |
392 | fw_notify("selfID 0: %08x, phy %d [%c%c%c] " | 392 | fw_notify("selfID 0: %08x, phy %d [%c%c%c] " |
393 | "%s gc=%d %s %s%s%s\n", | 393 | "%s gc=%d %s %s%s%s\n", |
394 | *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), | 394 | *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), |
395 | speed[*s >> 14 & 3], *s >> 16 & 63, | 395 | speed[*s >> 14 & 3], *s >> 16 & 63, |
396 | power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", | 396 | power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", |
397 | *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); | 397 | *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); |
398 | else | 398 | else |
399 | fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", | 399 | fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", |
400 | *s, *s >> 24 & 63, | 400 | *s, *s >> 24 & 63, |
401 | _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), | 401 | _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), |
402 | _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); | 402 | _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); |
403 | } | 403 | } |
404 | 404 | ||
405 | static const char *evts[] = { | 405 | static const char *evts[] = { |
406 | [0x00] = "evt_no_status", [0x01] = "-reserved-", | 406 | [0x00] = "evt_no_status", [0x01] = "-reserved-", |
407 | [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", | 407 | [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", |
408 | [0x04] = "evt_underrun", [0x05] = "evt_overrun", | 408 | [0x04] = "evt_underrun", [0x05] = "evt_overrun", |
409 | [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", | 409 | [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", |
410 | [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", | 410 | [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", |
411 | [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", | 411 | [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", |
412 | [0x0c] = "-reserved-", [0x0d] = "-reserved-", | 412 | [0x0c] = "-reserved-", [0x0d] = "-reserved-", |
413 | [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", | 413 | [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", |
414 | [0x10] = "-reserved-", [0x11] = "ack_complete", | 414 | [0x10] = "-reserved-", [0x11] = "ack_complete", |
415 | [0x12] = "ack_pending ", [0x13] = "-reserved-", | 415 | [0x12] = "ack_pending ", [0x13] = "-reserved-", |
416 | [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", | 416 | [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", |
417 | [0x16] = "ack_busy_B", [0x17] = "-reserved-", | 417 | [0x16] = "ack_busy_B", [0x17] = "-reserved-", |
418 | [0x18] = "-reserved-", [0x19] = "-reserved-", | 418 | [0x18] = "-reserved-", [0x19] = "-reserved-", |
419 | [0x1a] = "-reserved-", [0x1b] = "ack_tardy", | 419 | [0x1a] = "-reserved-", [0x1b] = "ack_tardy", |
420 | [0x1c] = "-reserved-", [0x1d] = "ack_data_error", | 420 | [0x1c] = "-reserved-", [0x1d] = "ack_data_error", |
421 | [0x1e] = "ack_type_error", [0x1f] = "-reserved-", | 421 | [0x1e] = "ack_type_error", [0x1f] = "-reserved-", |
422 | [0x20] = "pending/cancelled", | 422 | [0x20] = "pending/cancelled", |
423 | }; | 423 | }; |
424 | static const char *tcodes[] = { | 424 | static const char *tcodes[] = { |
425 | [0x0] = "QW req", [0x1] = "BW req", | 425 | [0x0] = "QW req", [0x1] = "BW req", |
426 | [0x2] = "W resp", [0x3] = "-reserved-", | 426 | [0x2] = "W resp", [0x3] = "-reserved-", |
427 | [0x4] = "QR req", [0x5] = "BR req", | 427 | [0x4] = "QR req", [0x5] = "BR req", |
428 | [0x6] = "QR resp", [0x7] = "BR resp", | 428 | [0x6] = "QR resp", [0x7] = "BR resp", |
429 | [0x8] = "cycle start", [0x9] = "Lk req", | 429 | [0x8] = "cycle start", [0x9] = "Lk req", |
430 | [0xa] = "async stream packet", [0xb] = "Lk resp", | 430 | [0xa] = "async stream packet", [0xb] = "Lk resp", |
431 | [0xc] = "-reserved-", [0xd] = "-reserved-", | 431 | [0xc] = "-reserved-", [0xd] = "-reserved-", |
432 | [0xe] = "link internal", [0xf] = "-reserved-", | 432 | [0xe] = "link internal", [0xf] = "-reserved-", |
433 | }; | 433 | }; |
434 | 434 | ||
435 | static void log_ar_at_event(char dir, int speed, u32 *header, int evt) | 435 | static void log_ar_at_event(char dir, int speed, u32 *header, int evt) |
436 | { | 436 | { |
437 | int tcode = header[0] >> 4 & 0xf; | 437 | int tcode = header[0] >> 4 & 0xf; |
438 | char specific[12]; | 438 | char specific[12]; |
439 | 439 | ||
440 | if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) | 440 | if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) |
441 | return; | 441 | return; |
442 | 442 | ||
443 | if (unlikely(evt >= ARRAY_SIZE(evts))) | 443 | if (unlikely(evt >= ARRAY_SIZE(evts))) |
444 | evt = 0x1f; | 444 | evt = 0x1f; |
445 | 445 | ||
446 | if (evt == OHCI1394_evt_bus_reset) { | 446 | if (evt == OHCI1394_evt_bus_reset) { |
447 | fw_notify("A%c evt_bus_reset, generation %d\n", | 447 | fw_notify("A%c evt_bus_reset, generation %d\n", |
448 | dir, (header[2] >> 16) & 0xff); | 448 | dir, (header[2] >> 16) & 0xff); |
449 | return; | 449 | return; |
450 | } | 450 | } |
451 | 451 | ||
452 | switch (tcode) { | 452 | switch (tcode) { |
453 | case 0x0: case 0x6: case 0x8: | 453 | case 0x0: case 0x6: case 0x8: |
454 | snprintf(specific, sizeof(specific), " = %08x", | 454 | snprintf(specific, sizeof(specific), " = %08x", |
455 | be32_to_cpu((__force __be32)header[3])); | 455 | be32_to_cpu((__force __be32)header[3])); |
456 | break; | 456 | break; |
457 | case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: | 457 | case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: |
458 | snprintf(specific, sizeof(specific), " %x,%x", | 458 | snprintf(specific, sizeof(specific), " %x,%x", |
459 | header[3] >> 16, header[3] & 0xffff); | 459 | header[3] >> 16, header[3] & 0xffff); |
460 | break; | 460 | break; |
461 | default: | 461 | default: |
462 | specific[0] = '\0'; | 462 | specific[0] = '\0'; |
463 | } | 463 | } |
464 | 464 | ||
465 | switch (tcode) { | 465 | switch (tcode) { |
466 | case 0xa: | 466 | case 0xa: |
467 | fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); | 467 | fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); |
468 | break; | 468 | break; |
469 | case 0xe: | 469 | case 0xe: |
470 | fw_notify("A%c %s, PHY %08x %08x\n", | 470 | fw_notify("A%c %s, PHY %08x %08x\n", |
471 | dir, evts[evt], header[1], header[2]); | 471 | dir, evts[evt], header[1], header[2]); |
472 | break; | 472 | break; |
473 | case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: | 473 | case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: |
474 | fw_notify("A%c spd %x tl %02x, " | 474 | fw_notify("A%c spd %x tl %02x, " |
475 | "%04x -> %04x, %s, " | 475 | "%04x -> %04x, %s, " |
476 | "%s, %04x%08x%s\n", | 476 | "%s, %04x%08x%s\n", |
477 | dir, speed, header[0] >> 10 & 0x3f, | 477 | dir, speed, header[0] >> 10 & 0x3f, |
478 | header[1] >> 16, header[0] >> 16, evts[evt], | 478 | header[1] >> 16, header[0] >> 16, evts[evt], |
479 | tcodes[tcode], header[1] & 0xffff, header[2], specific); | 479 | tcodes[tcode], header[1] & 0xffff, header[2], specific); |
480 | break; | 480 | break; |
481 | default: | 481 | default: |
482 | fw_notify("A%c spd %x tl %02x, " | 482 | fw_notify("A%c spd %x tl %02x, " |
483 | "%04x -> %04x, %s, " | 483 | "%04x -> %04x, %s, " |
484 | "%s%s\n", | 484 | "%s%s\n", |
485 | dir, speed, header[0] >> 10 & 0x3f, | 485 | dir, speed, header[0] >> 10 & 0x3f, |
486 | header[1] >> 16, header[0] >> 16, evts[evt], | 486 | header[1] >> 16, header[0] >> 16, evts[evt], |
487 | tcodes[tcode], specific); | 487 | tcodes[tcode], specific); |
488 | } | 488 | } |
489 | } | 489 | } |
490 | 490 | ||
491 | #else | 491 | #else |
492 | 492 | ||
493 | #define param_debug 0 | 493 | #define param_debug 0 |
494 | static inline void log_irqs(u32 evt) {} | 494 | static inline void log_irqs(u32 evt) {} |
495 | static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {} | 495 | static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {} |
496 | static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {} | 496 | static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {} |
497 | 497 | ||
498 | #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ | 498 | #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ |
499 | 499 | ||
500 | static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) | 500 | static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) |
501 | { | 501 | { |
502 | writel(data, ohci->registers + offset); | 502 | writel(data, ohci->registers + offset); |
503 | } | 503 | } |
504 | 504 | ||
505 | static inline u32 reg_read(const struct fw_ohci *ohci, int offset) | 505 | static inline u32 reg_read(const struct fw_ohci *ohci, int offset) |
506 | { | 506 | { |
507 | return readl(ohci->registers + offset); | 507 | return readl(ohci->registers + offset); |
508 | } | 508 | } |
509 | 509 | ||
510 | static inline void flush_writes(const struct fw_ohci *ohci) | 510 | static inline void flush_writes(const struct fw_ohci *ohci) |
511 | { | 511 | { |
512 | /* Do a dummy read to flush writes. */ | 512 | /* Do a dummy read to flush writes. */ |
513 | reg_read(ohci, OHCI1394_Version); | 513 | reg_read(ohci, OHCI1394_Version); |
514 | } | 514 | } |
515 | 515 | ||
516 | static int read_phy_reg(struct fw_ohci *ohci, int addr) | 516 | static int read_phy_reg(struct fw_ohci *ohci, int addr) |
517 | { | 517 | { |
518 | u32 val; | 518 | u32 val; |
519 | int i; | 519 | int i; |
520 | 520 | ||
521 | reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); | 521 | reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); |
522 | for (i = 0; i < 3 + 100; i++) { | 522 | for (i = 0; i < 3 + 100; i++) { |
523 | val = reg_read(ohci, OHCI1394_PhyControl); | 523 | val = reg_read(ohci, OHCI1394_PhyControl); |
524 | if (val & OHCI1394_PhyControl_ReadDone) | 524 | if (val & OHCI1394_PhyControl_ReadDone) |
525 | return OHCI1394_PhyControl_ReadData(val); | 525 | return OHCI1394_PhyControl_ReadData(val); |
526 | 526 | ||
527 | /* | 527 | /* |
528 | * Try a few times without waiting. Sleeping is necessary | 528 | * Try a few times without waiting. Sleeping is necessary |
529 | * only when the link/PHY interface is busy. | 529 | * only when the link/PHY interface is busy. |
530 | */ | 530 | */ |
531 | if (i >= 3) | 531 | if (i >= 3) |
532 | msleep(1); | 532 | msleep(1); |
533 | } | 533 | } |
534 | fw_error("failed to read phy reg\n"); | 534 | fw_error("failed to read phy reg\n"); |
535 | 535 | ||
536 | return -EBUSY; | 536 | return -EBUSY; |
537 | } | 537 | } |
538 | 538 | ||
539 | static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) | 539 | static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) |
540 | { | 540 | { |
541 | int i; | 541 | int i; |
542 | 542 | ||
543 | reg_write(ohci, OHCI1394_PhyControl, | 543 | reg_write(ohci, OHCI1394_PhyControl, |
544 | OHCI1394_PhyControl_Write(addr, val)); | 544 | OHCI1394_PhyControl_Write(addr, val)); |
545 | for (i = 0; i < 3 + 100; i++) { | 545 | for (i = 0; i < 3 + 100; i++) { |
546 | val = reg_read(ohci, OHCI1394_PhyControl); | 546 | val = reg_read(ohci, OHCI1394_PhyControl); |
547 | if (!(val & OHCI1394_PhyControl_WritePending)) | 547 | if (!(val & OHCI1394_PhyControl_WritePending)) |
548 | return 0; | 548 | return 0; |
549 | 549 | ||
550 | if (i >= 3) | 550 | if (i >= 3) |
551 | msleep(1); | 551 | msleep(1); |
552 | } | 552 | } |
553 | fw_error("failed to write phy reg\n"); | 553 | fw_error("failed to write phy reg\n"); |
554 | 554 | ||
555 | return -EBUSY; | 555 | return -EBUSY; |
556 | } | 556 | } |
557 | 557 | ||
558 | static int update_phy_reg(struct fw_ohci *ohci, int addr, | 558 | static int update_phy_reg(struct fw_ohci *ohci, int addr, |
559 | int clear_bits, int set_bits) | 559 | int clear_bits, int set_bits) |
560 | { | 560 | { |
561 | int ret = read_phy_reg(ohci, addr); | 561 | int ret = read_phy_reg(ohci, addr); |
562 | if (ret < 0) | 562 | if (ret < 0) |
563 | return ret; | 563 | return ret; |
564 | 564 | ||
565 | /* | 565 | /* |
566 | * The interrupt status bits are cleared by writing a one bit. | 566 | * The interrupt status bits are cleared by writing a one bit. |
567 | * Avoid clearing them unless explicitly requested in set_bits. | 567 | * Avoid clearing them unless explicitly requested in set_bits. |
568 | */ | 568 | */ |
569 | if (addr == 5) | 569 | if (addr == 5) |
570 | clear_bits |= PHY_INT_STATUS_BITS; | 570 | clear_bits |= PHY_INT_STATUS_BITS; |
571 | 571 | ||
572 | return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits); | 572 | return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits); |
573 | } | 573 | } |
574 | 574 | ||
575 | static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) | 575 | static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) |
576 | { | 576 | { |
577 | int ret; | 577 | int ret; |
578 | 578 | ||
579 | ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); | 579 | ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); |
580 | if (ret < 0) | 580 | if (ret < 0) |
581 | return ret; | 581 | return ret; |
582 | 582 | ||
583 | return read_phy_reg(ohci, addr); | 583 | return read_phy_reg(ohci, addr); |
584 | } | 584 | } |
585 | 585 | ||
586 | static int ohci_read_phy_reg(struct fw_card *card, int addr) | 586 | static int ohci_read_phy_reg(struct fw_card *card, int addr) |
587 | { | 587 | { |
588 | struct fw_ohci *ohci = fw_ohci(card); | 588 | struct fw_ohci *ohci = fw_ohci(card); |
589 | int ret; | 589 | int ret; |
590 | 590 | ||
591 | mutex_lock(&ohci->phy_reg_mutex); | 591 | mutex_lock(&ohci->phy_reg_mutex); |
592 | ret = read_phy_reg(ohci, addr); | 592 | ret = read_phy_reg(ohci, addr); |
593 | mutex_unlock(&ohci->phy_reg_mutex); | 593 | mutex_unlock(&ohci->phy_reg_mutex); |
594 | 594 | ||
595 | return ret; | 595 | return ret; |
596 | } | 596 | } |
597 | 597 | ||
598 | static int ohci_update_phy_reg(struct fw_card *card, int addr, | 598 | static int ohci_update_phy_reg(struct fw_card *card, int addr, |
599 | int clear_bits, int set_bits) | 599 | int clear_bits, int set_bits) |
600 | { | 600 | { |
601 | struct fw_ohci *ohci = fw_ohci(card); | 601 | struct fw_ohci *ohci = fw_ohci(card); |
602 | int ret; | 602 | int ret; |
603 | 603 | ||
604 | mutex_lock(&ohci->phy_reg_mutex); | 604 | mutex_lock(&ohci->phy_reg_mutex); |
605 | ret = update_phy_reg(ohci, addr, clear_bits, set_bits); | 605 | ret = update_phy_reg(ohci, addr, clear_bits, set_bits); |
606 | mutex_unlock(&ohci->phy_reg_mutex); | 606 | mutex_unlock(&ohci->phy_reg_mutex); |
607 | 607 | ||
608 | return ret; | 608 | return ret; |
609 | } | 609 | } |
610 | 610 | ||
611 | static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) | 611 | static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) |
612 | { | 612 | { |
613 | return page_private(ctx->pages[i]); | 613 | return page_private(ctx->pages[i]); |
614 | } | 614 | } |
615 | 615 | ||
616 | static void ar_context_link_page(struct ar_context *ctx, unsigned int index) | 616 | static void ar_context_link_page(struct ar_context *ctx, unsigned int index) |
617 | { | 617 | { |
618 | struct descriptor *d; | 618 | struct descriptor *d; |
619 | 619 | ||
620 | d = &ctx->descriptors[index]; | 620 | d = &ctx->descriptors[index]; |
621 | d->branch_address &= cpu_to_le32(~0xf); | 621 | d->branch_address &= cpu_to_le32(~0xf); |
622 | d->res_count = cpu_to_le16(PAGE_SIZE); | 622 | d->res_count = cpu_to_le16(PAGE_SIZE); |
623 | d->transfer_status = 0; | 623 | d->transfer_status = 0; |
624 | 624 | ||
625 | wmb(); /* finish init of new descriptors before branch_address update */ | 625 | wmb(); /* finish init of new descriptors before branch_address update */ |
626 | d = &ctx->descriptors[ctx->last_buffer_index]; | 626 | d = &ctx->descriptors[ctx->last_buffer_index]; |
627 | d->branch_address |= cpu_to_le32(1); | 627 | d->branch_address |= cpu_to_le32(1); |
628 | 628 | ||
629 | ctx->last_buffer_index = index; | 629 | ctx->last_buffer_index = index; |
630 | 630 | ||
631 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); | 631 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); |
632 | flush_writes(ctx->ohci); | 632 | flush_writes(ctx->ohci); |
633 | } | 633 | } |
634 | 634 | ||
635 | static void ar_context_release(struct ar_context *ctx) | 635 | static void ar_context_release(struct ar_context *ctx) |
636 | { | 636 | { |
637 | unsigned int i; | 637 | unsigned int i; |
638 | 638 | ||
639 | if (ctx->buffer) | 639 | if (ctx->buffer) |
640 | vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES); | 640 | vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES); |
641 | 641 | ||
642 | for (i = 0; i < AR_BUFFERS; i++) | 642 | for (i = 0; i < AR_BUFFERS; i++) |
643 | if (ctx->pages[i]) { | 643 | if (ctx->pages[i]) { |
644 | dma_unmap_page(ctx->ohci->card.device, | 644 | dma_unmap_page(ctx->ohci->card.device, |
645 | ar_buffer_bus(ctx, i), | 645 | ar_buffer_bus(ctx, i), |
646 | PAGE_SIZE, DMA_FROM_DEVICE); | 646 | PAGE_SIZE, DMA_FROM_DEVICE); |
647 | __free_page(ctx->pages[i]); | 647 | __free_page(ctx->pages[i]); |
648 | } | 648 | } |
649 | } | 649 | } |
650 | 650 | ||
651 | static void ar_context_abort(struct ar_context *ctx, const char *error_msg) | 651 | static void ar_context_abort(struct ar_context *ctx, const char *error_msg) |
652 | { | 652 | { |
653 | if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { | 653 | if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { |
654 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); | 654 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); |
655 | flush_writes(ctx->ohci); | 655 | flush_writes(ctx->ohci); |
656 | 656 | ||
657 | fw_error("AR error: %s; DMA stopped\n", error_msg); | 657 | fw_error("AR error: %s; DMA stopped\n", error_msg); |
658 | } | 658 | } |
659 | /* FIXME: restart? */ | 659 | /* FIXME: restart? */ |
660 | } | 660 | } |
661 | 661 | ||
662 | static inline unsigned int ar_next_buffer_index(unsigned int index) | 662 | static inline unsigned int ar_next_buffer_index(unsigned int index) |
663 | { | 663 | { |
664 | return (index + 1) % AR_BUFFERS; | 664 | return (index + 1) % AR_BUFFERS; |
665 | } | 665 | } |
666 | 666 | ||
667 | static inline unsigned int ar_prev_buffer_index(unsigned int index) | 667 | static inline unsigned int ar_prev_buffer_index(unsigned int index) |
668 | { | 668 | { |
669 | return (index - 1 + AR_BUFFERS) % AR_BUFFERS; | 669 | return (index - 1 + AR_BUFFERS) % AR_BUFFERS; |
670 | } | 670 | } |
671 | 671 | ||
672 | static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) | 672 | static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) |
673 | { | 673 | { |
674 | return ar_next_buffer_index(ctx->last_buffer_index); | 674 | return ar_next_buffer_index(ctx->last_buffer_index); |
675 | } | 675 | } |
676 | 676 | ||
677 | /* | 677 | /* |
678 | * We search for the buffer that contains the last AR packet DMA data written | 678 | * We search for the buffer that contains the last AR packet DMA data written |
679 | * by the controller. | 679 | * by the controller. |
680 | */ | 680 | */ |
681 | static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | 681 | static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, |
682 | unsigned int *buffer_offset) | 682 | unsigned int *buffer_offset) |
683 | { | 683 | { |
684 | unsigned int i, next_i, last = ctx->last_buffer_index; | 684 | unsigned int i, next_i, last = ctx->last_buffer_index; |
685 | __le16 res_count, next_res_count; | 685 | __le16 res_count, next_res_count; |
686 | 686 | ||
687 | i = ar_first_buffer_index(ctx); | 687 | i = ar_first_buffer_index(ctx); |
688 | res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); | 688 | res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); |
689 | 689 | ||
690 | /* A buffer that is not yet completely filled must be the last one. */ | 690 | /* A buffer that is not yet completely filled must be the last one. */ |
691 | while (i != last && res_count == 0) { | 691 | while (i != last && res_count == 0) { |
692 | 692 | ||
693 | /* Peek at the next descriptor. */ | 693 | /* Peek at the next descriptor. */ |
694 | next_i = ar_next_buffer_index(i); | 694 | next_i = ar_next_buffer_index(i); |
695 | rmb(); /* read descriptors in order */ | 695 | rmb(); /* read descriptors in order */ |
696 | next_res_count = ACCESS_ONCE( | 696 | next_res_count = ACCESS_ONCE( |
697 | ctx->descriptors[next_i].res_count); | 697 | ctx->descriptors[next_i].res_count); |
698 | /* | 698 | /* |
699 | * If the next descriptor is still empty, we must stop at this | 699 | * If the next descriptor is still empty, we must stop at this |
700 | * descriptor. | 700 | * descriptor. |
701 | */ | 701 | */ |
702 | if (next_res_count == cpu_to_le16(PAGE_SIZE)) { | 702 | if (next_res_count == cpu_to_le16(PAGE_SIZE)) { |
703 | /* | 703 | /* |
704 | * The exception is when the DMA data for one packet is | 704 | * The exception is when the DMA data for one packet is |
705 | * split over three buffers; in this case, the middle | 705 | * split over three buffers; in this case, the middle |
706 | * buffer's descriptor might be never updated by the | 706 | * buffer's descriptor might be never updated by the |
707 | * controller and look still empty, and we have to peek | 707 | * controller and look still empty, and we have to peek |
708 | * at the third one. | 708 | * at the third one. |
709 | */ | 709 | */ |
710 | if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { | 710 | if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { |
711 | next_i = ar_next_buffer_index(next_i); | 711 | next_i = ar_next_buffer_index(next_i); |
712 | rmb(); | 712 | rmb(); |
713 | next_res_count = ACCESS_ONCE( | 713 | next_res_count = ACCESS_ONCE( |
714 | ctx->descriptors[next_i].res_count); | 714 | ctx->descriptors[next_i].res_count); |
715 | if (next_res_count != cpu_to_le16(PAGE_SIZE)) | 715 | if (next_res_count != cpu_to_le16(PAGE_SIZE)) |
716 | goto next_buffer_is_active; | 716 | goto next_buffer_is_active; |
717 | } | 717 | } |
718 | 718 | ||
719 | break; | 719 | break; |
720 | } | 720 | } |
721 | 721 | ||
722 | next_buffer_is_active: | 722 | next_buffer_is_active: |
723 | i = next_i; | 723 | i = next_i; |
724 | res_count = next_res_count; | 724 | res_count = next_res_count; |
725 | } | 725 | } |
726 | 726 | ||
727 | rmb(); /* read res_count before the DMA data */ | 727 | rmb(); /* read res_count before the DMA data */ |
728 | 728 | ||
729 | *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); | 729 | *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); |
730 | if (*buffer_offset > PAGE_SIZE) { | 730 | if (*buffer_offset > PAGE_SIZE) { |
731 | *buffer_offset = 0; | 731 | *buffer_offset = 0; |
732 | ar_context_abort(ctx, "corrupted descriptor"); | 732 | ar_context_abort(ctx, "corrupted descriptor"); |
733 | } | 733 | } |
734 | 734 | ||
735 | return i; | 735 | return i; |
736 | } | 736 | } |
737 | 737 | ||
738 | static void ar_sync_buffers_for_cpu(struct ar_context *ctx, | 738 | static void ar_sync_buffers_for_cpu(struct ar_context *ctx, |
739 | unsigned int end_buffer_index, | 739 | unsigned int end_buffer_index, |
740 | unsigned int end_buffer_offset) | 740 | unsigned int end_buffer_offset) |
741 | { | 741 | { |
742 | unsigned int i; | 742 | unsigned int i; |
743 | 743 | ||
744 | i = ar_first_buffer_index(ctx); | 744 | i = ar_first_buffer_index(ctx); |
745 | while (i != end_buffer_index) { | 745 | while (i != end_buffer_index) { |
746 | dma_sync_single_for_cpu(ctx->ohci->card.device, | 746 | dma_sync_single_for_cpu(ctx->ohci->card.device, |
747 | ar_buffer_bus(ctx, i), | 747 | ar_buffer_bus(ctx, i), |
748 | PAGE_SIZE, DMA_FROM_DEVICE); | 748 | PAGE_SIZE, DMA_FROM_DEVICE); |
749 | i = ar_next_buffer_index(i); | 749 | i = ar_next_buffer_index(i); |
750 | } | 750 | } |
751 | if (end_buffer_offset > 0) | 751 | if (end_buffer_offset > 0) |
752 | dma_sync_single_for_cpu(ctx->ohci->card.device, | 752 | dma_sync_single_for_cpu(ctx->ohci->card.device, |
753 | ar_buffer_bus(ctx, i), | 753 | ar_buffer_bus(ctx, i), |
754 | end_buffer_offset, DMA_FROM_DEVICE); | 754 | end_buffer_offset, DMA_FROM_DEVICE); |
755 | } | 755 | } |
756 | 756 | ||
757 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) | 757 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) |
758 | #define cond_le32_to_cpu(v) \ | 758 | #define cond_le32_to_cpu(v) \ |
759 | (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) | 759 | (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) |
760 | #else | 760 | #else |
761 | #define cond_le32_to_cpu(v) le32_to_cpu(v) | 761 | #define cond_le32_to_cpu(v) le32_to_cpu(v) |
762 | #endif | 762 | #endif |
763 | 763 | ||
764 | static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | 764 | static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) |
765 | { | 765 | { |
766 | struct fw_ohci *ohci = ctx->ohci; | 766 | struct fw_ohci *ohci = ctx->ohci; |
767 | struct fw_packet p; | 767 | struct fw_packet p; |
768 | u32 status, length, tcode; | 768 | u32 status, length, tcode; |
769 | int evt; | 769 | int evt; |
770 | 770 | ||
771 | p.header[0] = cond_le32_to_cpu(buffer[0]); | 771 | p.header[0] = cond_le32_to_cpu(buffer[0]); |
772 | p.header[1] = cond_le32_to_cpu(buffer[1]); | 772 | p.header[1] = cond_le32_to_cpu(buffer[1]); |
773 | p.header[2] = cond_le32_to_cpu(buffer[2]); | 773 | p.header[2] = cond_le32_to_cpu(buffer[2]); |
774 | 774 | ||
775 | tcode = (p.header[0] >> 4) & 0x0f; | 775 | tcode = (p.header[0] >> 4) & 0x0f; |
776 | switch (tcode) { | 776 | switch (tcode) { |
777 | case TCODE_WRITE_QUADLET_REQUEST: | 777 | case TCODE_WRITE_QUADLET_REQUEST: |
778 | case TCODE_READ_QUADLET_RESPONSE: | 778 | case TCODE_READ_QUADLET_RESPONSE: |
779 | p.header[3] = (__force __u32) buffer[3]; | 779 | p.header[3] = (__force __u32) buffer[3]; |
780 | p.header_length = 16; | 780 | p.header_length = 16; |
781 | p.payload_length = 0; | 781 | p.payload_length = 0; |
782 | break; | 782 | break; |
783 | 783 | ||
784 | case TCODE_READ_BLOCK_REQUEST : | 784 | case TCODE_READ_BLOCK_REQUEST : |
785 | p.header[3] = cond_le32_to_cpu(buffer[3]); | 785 | p.header[3] = cond_le32_to_cpu(buffer[3]); |
786 | p.header_length = 16; | 786 | p.header_length = 16; |
787 | p.payload_length = 0; | 787 | p.payload_length = 0; |
788 | break; | 788 | break; |
789 | 789 | ||
790 | case TCODE_WRITE_BLOCK_REQUEST: | 790 | case TCODE_WRITE_BLOCK_REQUEST: |
791 | case TCODE_READ_BLOCK_RESPONSE: | 791 | case TCODE_READ_BLOCK_RESPONSE: |
792 | case TCODE_LOCK_REQUEST: | 792 | case TCODE_LOCK_REQUEST: |
793 | case TCODE_LOCK_RESPONSE: | 793 | case TCODE_LOCK_RESPONSE: |
794 | p.header[3] = cond_le32_to_cpu(buffer[3]); | 794 | p.header[3] = cond_le32_to_cpu(buffer[3]); |
795 | p.header_length = 16; | 795 | p.header_length = 16; |
796 | p.payload_length = p.header[3] >> 16; | 796 | p.payload_length = p.header[3] >> 16; |
797 | if (p.payload_length > MAX_ASYNC_PAYLOAD) { | 797 | if (p.payload_length > MAX_ASYNC_PAYLOAD) { |
798 | ar_context_abort(ctx, "invalid packet length"); | 798 | ar_context_abort(ctx, "invalid packet length"); |
799 | return NULL; | 799 | return NULL; |
800 | } | 800 | } |
801 | break; | 801 | break; |
802 | 802 | ||
803 | case TCODE_WRITE_RESPONSE: | 803 | case TCODE_WRITE_RESPONSE: |
804 | case TCODE_READ_QUADLET_REQUEST: | 804 | case TCODE_READ_QUADLET_REQUEST: |
805 | case OHCI_TCODE_PHY_PACKET: | 805 | case OHCI_TCODE_PHY_PACKET: |
806 | p.header_length = 12; | 806 | p.header_length = 12; |
807 | p.payload_length = 0; | 807 | p.payload_length = 0; |
808 | break; | 808 | break; |
809 | 809 | ||
810 | default: | 810 | default: |
811 | ar_context_abort(ctx, "invalid tcode"); | 811 | ar_context_abort(ctx, "invalid tcode"); |
812 | return NULL; | 812 | return NULL; |
813 | } | 813 | } |
814 | 814 | ||
815 | p.payload = (void *) buffer + p.header_length; | 815 | p.payload = (void *) buffer + p.header_length; |
816 | 816 | ||
817 | /* FIXME: What to do about evt_* errors? */ | 817 | /* FIXME: What to do about evt_* errors? */ |
818 | length = (p.header_length + p.payload_length + 3) / 4; | 818 | length = (p.header_length + p.payload_length + 3) / 4; |
819 | status = cond_le32_to_cpu(buffer[length]); | 819 | status = cond_le32_to_cpu(buffer[length]); |
820 | evt = (status >> 16) & 0x1f; | 820 | evt = (status >> 16) & 0x1f; |
821 | 821 | ||
822 | p.ack = evt - 16; | 822 | p.ack = evt - 16; |
823 | p.speed = (status >> 21) & 0x7; | 823 | p.speed = (status >> 21) & 0x7; |
824 | p.timestamp = status & 0xffff; | 824 | p.timestamp = status & 0xffff; |
825 | p.generation = ohci->request_generation; | 825 | p.generation = ohci->request_generation; |
826 | 826 | ||
827 | log_ar_at_event('R', p.speed, p.header, evt); | 827 | log_ar_at_event('R', p.speed, p.header, evt); |
828 | 828 | ||
829 | /* | 829 | /* |
830 | * Several controllers, notably from NEC and VIA, forget to | 830 | * Several controllers, notably from NEC and VIA, forget to |
831 | * write ack_complete status at PHY packet reception. | 831 | * write ack_complete status at PHY packet reception. |
832 | */ | 832 | */ |
833 | if (evt == OHCI1394_evt_no_status && | 833 | if (evt == OHCI1394_evt_no_status && |
834 | (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) | 834 | (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) |
835 | p.ack = ACK_COMPLETE; | 835 | p.ack = ACK_COMPLETE; |
836 | 836 | ||
837 | /* | 837 | /* |
838 | * The OHCI bus reset handler synthesizes a PHY packet with | 838 | * The OHCI bus reset handler synthesizes a PHY packet with |
839 | * the new generation number when a bus reset happens (see | 839 | * the new generation number when a bus reset happens (see |
840 | * section 8.4.2.3). This helps us determine when a request | 840 | * section 8.4.2.3). This helps us determine when a request |
841 | * was received and make sure we send the response in the same | 841 | * was received and make sure we send the response in the same |
842 | * generation. We only need this for requests; for responses | 842 | * generation. We only need this for requests; for responses |
843 | * we use the unique tlabel for finding the matching | 843 | * we use the unique tlabel for finding the matching |
844 | * request. | 844 | * request. |
845 | * | 845 | * |
846 | * Alas some chips sometimes emit bus reset packets with a | 846 | * Alas some chips sometimes emit bus reset packets with a |
847 | * wrong generation. We set the correct generation for these | 847 | * wrong generation. We set the correct generation for these |
848 | * at a slightly incorrect time (in bus_reset_tasklet). | 848 | * at a slightly incorrect time (in bus_reset_tasklet). |
849 | */ | 849 | */ |
850 | if (evt == OHCI1394_evt_bus_reset) { | 850 | if (evt == OHCI1394_evt_bus_reset) { |
851 | if (!(ohci->quirks & QUIRK_RESET_PACKET)) | 851 | if (!(ohci->quirks & QUIRK_RESET_PACKET)) |
852 | ohci->request_generation = (p.header[2] >> 16) & 0xff; | 852 | ohci->request_generation = (p.header[2] >> 16) & 0xff; |
853 | } else if (ctx == &ohci->ar_request_ctx) { | 853 | } else if (ctx == &ohci->ar_request_ctx) { |
854 | fw_core_handle_request(&ohci->card, &p); | 854 | fw_core_handle_request(&ohci->card, &p); |
855 | } else { | 855 | } else { |
856 | fw_core_handle_response(&ohci->card, &p); | 856 | fw_core_handle_response(&ohci->card, &p); |
857 | } | 857 | } |
858 | 858 | ||
859 | return buffer + length + 1; | 859 | return buffer + length + 1; |
860 | } | 860 | } |
861 | 861 | ||
862 | static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end) | 862 | static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end) |
863 | { | 863 | { |
864 | void *next; | 864 | void *next; |
865 | 865 | ||
866 | while (p < end) { | 866 | while (p < end) { |
867 | next = handle_ar_packet(ctx, p); | 867 | next = handle_ar_packet(ctx, p); |
868 | if (!next) | 868 | if (!next) |
869 | return p; | 869 | return p; |
870 | p = next; | 870 | p = next; |
871 | } | 871 | } |
872 | 872 | ||
873 | return p; | 873 | return p; |
874 | } | 874 | } |
875 | 875 | ||
876 | static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) | 876 | static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) |
877 | { | 877 | { |
878 | unsigned int i; | 878 | unsigned int i; |
879 | 879 | ||
880 | i = ar_first_buffer_index(ctx); | 880 | i = ar_first_buffer_index(ctx); |
881 | while (i != end_buffer) { | 881 | while (i != end_buffer) { |
882 | dma_sync_single_for_device(ctx->ohci->card.device, | 882 | dma_sync_single_for_device(ctx->ohci->card.device, |
883 | ar_buffer_bus(ctx, i), | 883 | ar_buffer_bus(ctx, i), |
884 | PAGE_SIZE, DMA_FROM_DEVICE); | 884 | PAGE_SIZE, DMA_FROM_DEVICE); |
885 | ar_context_link_page(ctx, i); | 885 | ar_context_link_page(ctx, i); |
886 | i = ar_next_buffer_index(i); | 886 | i = ar_next_buffer_index(i); |
887 | } | 887 | } |
888 | } | 888 | } |
889 | 889 | ||
890 | static void ar_context_tasklet(unsigned long data) | 890 | static void ar_context_tasklet(unsigned long data) |
891 | { | 891 | { |
892 | struct ar_context *ctx = (struct ar_context *)data; | 892 | struct ar_context *ctx = (struct ar_context *)data; |
893 | unsigned int end_buffer_index, end_buffer_offset; | 893 | unsigned int end_buffer_index, end_buffer_offset; |
894 | void *p, *end; | 894 | void *p, *end; |
895 | 895 | ||
896 | p = ctx->pointer; | 896 | p = ctx->pointer; |
897 | if (!p) | 897 | if (!p) |
898 | return; | 898 | return; |
899 | 899 | ||
900 | end_buffer_index = ar_search_last_active_buffer(ctx, | 900 | end_buffer_index = ar_search_last_active_buffer(ctx, |
901 | &end_buffer_offset); | 901 | &end_buffer_offset); |
902 | ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); | 902 | ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); |
903 | end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; | 903 | end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; |
904 | 904 | ||
905 | if (end_buffer_index < ar_first_buffer_index(ctx)) { | 905 | if (end_buffer_index < ar_first_buffer_index(ctx)) { |
906 | /* | 906 | /* |
907 | * The filled part of the overall buffer wraps around; handle | 907 | * The filled part of the overall buffer wraps around; handle |
908 | * all packets up to the buffer end here. If the last packet | 908 | * all packets up to the buffer end here. If the last packet |
909 | * wraps around, its tail will be visible after the buffer end | 909 | * wraps around, its tail will be visible after the buffer end |
910 | * because the buffer start pages are mapped there again. | 910 | * because the buffer start pages are mapped there again. |
911 | */ | 911 | */ |
912 | void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; | 912 | void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; |
913 | p = handle_ar_packets(ctx, p, buffer_end); | 913 | p = handle_ar_packets(ctx, p, buffer_end); |
914 | if (p < buffer_end) | 914 | if (p < buffer_end) |
915 | goto error; | 915 | goto error; |
916 | /* adjust p to point back into the actual buffer */ | 916 | /* adjust p to point back into the actual buffer */ |
917 | p -= AR_BUFFERS * PAGE_SIZE; | 917 | p -= AR_BUFFERS * PAGE_SIZE; |
918 | } | 918 | } |
919 | 919 | ||
920 | p = handle_ar_packets(ctx, p, end); | 920 | p = handle_ar_packets(ctx, p, end); |
921 | if (p != end) { | 921 | if (p != end) { |
922 | if (p > end) | 922 | if (p > end) |
923 | ar_context_abort(ctx, "inconsistent descriptor"); | 923 | ar_context_abort(ctx, "inconsistent descriptor"); |
924 | goto error; | 924 | goto error; |
925 | } | 925 | } |
926 | 926 | ||
927 | ctx->pointer = p; | 927 | ctx->pointer = p; |
928 | ar_recycle_buffers(ctx, end_buffer_index); | 928 | ar_recycle_buffers(ctx, end_buffer_index); |
929 | 929 | ||
930 | return; | 930 | return; |
931 | 931 | ||
932 | error: | 932 | error: |
933 | ctx->pointer = NULL; | 933 | ctx->pointer = NULL; |
934 | } | 934 | } |
935 | 935 | ||
936 | static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, | 936 | static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, |
937 | unsigned int descriptors_offset, u32 regs) | 937 | unsigned int descriptors_offset, u32 regs) |
938 | { | 938 | { |
939 | unsigned int i; | 939 | unsigned int i; |
940 | dma_addr_t dma_addr; | 940 | dma_addr_t dma_addr; |
941 | struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES]; | 941 | struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES]; |
942 | struct descriptor *d; | 942 | struct descriptor *d; |
943 | 943 | ||
944 | ctx->regs = regs; | 944 | ctx->regs = regs; |
945 | ctx->ohci = ohci; | 945 | ctx->ohci = ohci; |
946 | tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); | 946 | tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); |
947 | 947 | ||
948 | for (i = 0; i < AR_BUFFERS; i++) { | 948 | for (i = 0; i < AR_BUFFERS; i++) { |
949 | ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); | 949 | ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); |
950 | if (!ctx->pages[i]) | 950 | if (!ctx->pages[i]) |
951 | goto out_of_memory; | 951 | goto out_of_memory; |
952 | dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], | 952 | dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], |
953 | 0, PAGE_SIZE, DMA_FROM_DEVICE); | 953 | 0, PAGE_SIZE, DMA_FROM_DEVICE); |
954 | if (dma_mapping_error(ohci->card.device, dma_addr)) { | 954 | if (dma_mapping_error(ohci->card.device, dma_addr)) { |
955 | __free_page(ctx->pages[i]); | 955 | __free_page(ctx->pages[i]); |
956 | ctx->pages[i] = NULL; | 956 | ctx->pages[i] = NULL; |
957 | goto out_of_memory; | 957 | goto out_of_memory; |
958 | } | 958 | } |
959 | set_page_private(ctx->pages[i], dma_addr); | 959 | set_page_private(ctx->pages[i], dma_addr); |
960 | } | 960 | } |
961 | 961 | ||
962 | for (i = 0; i < AR_BUFFERS; i++) | 962 | for (i = 0; i < AR_BUFFERS; i++) |
963 | pages[i] = ctx->pages[i]; | 963 | pages[i] = ctx->pages[i]; |
964 | for (i = 0; i < AR_WRAPAROUND_PAGES; i++) | 964 | for (i = 0; i < AR_WRAPAROUND_PAGES; i++) |
965 | pages[AR_BUFFERS + i] = ctx->pages[i]; | 965 | pages[AR_BUFFERS + i] = ctx->pages[i]; |
966 | ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES, | 966 | ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES, |
967 | -1, PAGE_KERNEL); | 967 | -1, PAGE_KERNEL); |
968 | if (!ctx->buffer) | 968 | if (!ctx->buffer) |
969 | goto out_of_memory; | 969 | goto out_of_memory; |
970 | 970 | ||
971 | ctx->descriptors = ohci->misc_buffer + descriptors_offset; | 971 | ctx->descriptors = ohci->misc_buffer + descriptors_offset; |
972 | ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; | 972 | ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; |
973 | 973 | ||
974 | for (i = 0; i < AR_BUFFERS; i++) { | 974 | for (i = 0; i < AR_BUFFERS; i++) { |
975 | d = &ctx->descriptors[i]; | 975 | d = &ctx->descriptors[i]; |
976 | d->req_count = cpu_to_le16(PAGE_SIZE); | 976 | d->req_count = cpu_to_le16(PAGE_SIZE); |
977 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | 977 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | |
978 | DESCRIPTOR_STATUS | | 978 | DESCRIPTOR_STATUS | |
979 | DESCRIPTOR_BRANCH_ALWAYS); | 979 | DESCRIPTOR_BRANCH_ALWAYS); |
980 | d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); | 980 | d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); |
981 | d->branch_address = cpu_to_le32(ctx->descriptors_bus + | 981 | d->branch_address = cpu_to_le32(ctx->descriptors_bus + |
982 | ar_next_buffer_index(i) * sizeof(struct descriptor)); | 982 | ar_next_buffer_index(i) * sizeof(struct descriptor)); |
983 | } | 983 | } |
984 | 984 | ||
985 | return 0; | 985 | return 0; |
986 | 986 | ||
987 | out_of_memory: | 987 | out_of_memory: |
988 | ar_context_release(ctx); | 988 | ar_context_release(ctx); |
989 | 989 | ||
990 | return -ENOMEM; | 990 | return -ENOMEM; |
991 | } | 991 | } |
992 | 992 | ||
993 | static void ar_context_run(struct ar_context *ctx) | 993 | static void ar_context_run(struct ar_context *ctx) |
994 | { | 994 | { |
995 | unsigned int i; | 995 | unsigned int i; |
996 | 996 | ||
997 | for (i = 0; i < AR_BUFFERS; i++) | 997 | for (i = 0; i < AR_BUFFERS; i++) |
998 | ar_context_link_page(ctx, i); | 998 | ar_context_link_page(ctx, i); |
999 | 999 | ||
1000 | ctx->pointer = ctx->buffer; | 1000 | ctx->pointer = ctx->buffer; |
1001 | 1001 | ||
1002 | reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); | 1002 | reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); |
1003 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); | 1003 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); |
1004 | flush_writes(ctx->ohci); | 1004 | flush_writes(ctx->ohci); |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) | 1007 | static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) |
1008 | { | 1008 | { |
1009 | __le16 branch; | 1009 | __le16 branch; |
1010 | 1010 | ||
1011 | branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); | 1011 | branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); |
1012 | 1012 | ||
1013 | /* figure out which descriptor the branch address goes in */ | 1013 | /* figure out which descriptor the branch address goes in */ |
1014 | if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) | 1014 | if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) |
1015 | return d; | 1015 | return d; |
1016 | else | 1016 | else |
1017 | return d + z - 1; | 1017 | return d + z - 1; |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | static void context_tasklet(unsigned long data) | 1020 | static void context_tasklet(unsigned long data) |
1021 | { | 1021 | { |
1022 | struct context *ctx = (struct context *) data; | 1022 | struct context *ctx = (struct context *) data; |
1023 | struct descriptor *d, *last; | 1023 | struct descriptor *d, *last; |
1024 | u32 address; | 1024 | u32 address; |
1025 | int z; | 1025 | int z; |
1026 | struct descriptor_buffer *desc; | 1026 | struct descriptor_buffer *desc; |
1027 | 1027 | ||
1028 | desc = list_entry(ctx->buffer_list.next, | 1028 | desc = list_entry(ctx->buffer_list.next, |
1029 | struct descriptor_buffer, list); | 1029 | struct descriptor_buffer, list); |
1030 | last = ctx->last; | 1030 | last = ctx->last; |
1031 | while (last->branch_address != 0) { | 1031 | while (last->branch_address != 0) { |
1032 | struct descriptor_buffer *old_desc = desc; | 1032 | struct descriptor_buffer *old_desc = desc; |
1033 | address = le32_to_cpu(last->branch_address); | 1033 | address = le32_to_cpu(last->branch_address); |
1034 | z = address & 0xf; | 1034 | z = address & 0xf; |
1035 | address &= ~0xf; | 1035 | address &= ~0xf; |
1036 | 1036 | ||
1037 | /* If the branch address points to a buffer outside of the | 1037 | /* If the branch address points to a buffer outside of the |
1038 | * current buffer, advance to the next buffer. */ | 1038 | * current buffer, advance to the next buffer. */ |
1039 | if (address < desc->buffer_bus || | 1039 | if (address < desc->buffer_bus || |
1040 | address >= desc->buffer_bus + desc->used) | 1040 | address >= desc->buffer_bus + desc->used) |
1041 | desc = list_entry(desc->list.next, | 1041 | desc = list_entry(desc->list.next, |
1042 | struct descriptor_buffer, list); | 1042 | struct descriptor_buffer, list); |
1043 | d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); | 1043 | d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); |
1044 | last = find_branch_descriptor(d, z); | 1044 | last = find_branch_descriptor(d, z); |
1045 | 1045 | ||
1046 | if (!ctx->callback(ctx, d, last)) | 1046 | if (!ctx->callback(ctx, d, last)) |
1047 | break; | 1047 | break; |
1048 | 1048 | ||
1049 | if (old_desc != desc) { | 1049 | if (old_desc != desc) { |
1050 | /* If we've advanced to the next buffer, move the | 1050 | /* If we've advanced to the next buffer, move the |
1051 | * previous buffer to the free list. */ | 1051 | * previous buffer to the free list. */ |
1052 | unsigned long flags; | 1052 | unsigned long flags; |
1053 | old_desc->used = 0; | 1053 | old_desc->used = 0; |
1054 | spin_lock_irqsave(&ctx->ohci->lock, flags); | 1054 | spin_lock_irqsave(&ctx->ohci->lock, flags); |
1055 | list_move_tail(&old_desc->list, &ctx->buffer_list); | 1055 | list_move_tail(&old_desc->list, &ctx->buffer_list); |
1056 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); | 1056 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); |
1057 | } | 1057 | } |
1058 | ctx->last = last; | 1058 | ctx->last = last; |
1059 | } | 1059 | } |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | /* | 1062 | /* |
1063 | * Allocate a new buffer and add it to the list of free buffers for this | 1063 | * Allocate a new buffer and add it to the list of free buffers for this |
1064 | * context. Must be called with ohci->lock held. | 1064 | * context. Must be called with ohci->lock held. |
1065 | */ | 1065 | */ |
1066 | static int context_add_buffer(struct context *ctx) | 1066 | static int context_add_buffer(struct context *ctx) |
1067 | { | 1067 | { |
1068 | struct descriptor_buffer *desc; | 1068 | struct descriptor_buffer *desc; |
1069 | dma_addr_t uninitialized_var(bus_addr); | 1069 | dma_addr_t uninitialized_var(bus_addr); |
1070 | int offset; | 1070 | int offset; |
1071 | 1071 | ||
1072 | /* | 1072 | /* |
1073 | * 16MB of descriptors should be far more than enough for any DMA | 1073 | * 16MB of descriptors should be far more than enough for any DMA |
1074 | * program. This will catch run-away userspace or DoS attacks. | 1074 | * program. This will catch run-away userspace or DoS attacks. |
1075 | */ | 1075 | */ |
1076 | if (ctx->total_allocation >= 16*1024*1024) | 1076 | if (ctx->total_allocation >= 16*1024*1024) |
1077 | return -ENOMEM; | 1077 | return -ENOMEM; |
1078 | 1078 | ||
1079 | desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, | 1079 | desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, |
1080 | &bus_addr, GFP_ATOMIC); | 1080 | &bus_addr, GFP_ATOMIC); |
1081 | if (!desc) | 1081 | if (!desc) |
1082 | return -ENOMEM; | 1082 | return -ENOMEM; |
1083 | 1083 | ||
1084 | offset = (void *)&desc->buffer - (void *)desc; | 1084 | offset = (void *)&desc->buffer - (void *)desc; |
1085 | desc->buffer_size = PAGE_SIZE - offset; | 1085 | desc->buffer_size = PAGE_SIZE - offset; |
1086 | desc->buffer_bus = bus_addr + offset; | 1086 | desc->buffer_bus = bus_addr + offset; |
1087 | desc->used = 0; | 1087 | desc->used = 0; |
1088 | 1088 | ||
1089 | list_add_tail(&desc->list, &ctx->buffer_list); | 1089 | list_add_tail(&desc->list, &ctx->buffer_list); |
1090 | ctx->total_allocation += PAGE_SIZE; | 1090 | ctx->total_allocation += PAGE_SIZE; |
1091 | 1091 | ||
1092 | return 0; | 1092 | return 0; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static int context_init(struct context *ctx, struct fw_ohci *ohci, | 1095 | static int context_init(struct context *ctx, struct fw_ohci *ohci, |
1096 | u32 regs, descriptor_callback_t callback) | 1096 | u32 regs, descriptor_callback_t callback) |
1097 | { | 1097 | { |
1098 | ctx->ohci = ohci; | 1098 | ctx->ohci = ohci; |
1099 | ctx->regs = regs; | 1099 | ctx->regs = regs; |
1100 | ctx->total_allocation = 0; | 1100 | ctx->total_allocation = 0; |
1101 | 1101 | ||
1102 | INIT_LIST_HEAD(&ctx->buffer_list); | 1102 | INIT_LIST_HEAD(&ctx->buffer_list); |
1103 | if (context_add_buffer(ctx) < 0) | 1103 | if (context_add_buffer(ctx) < 0) |
1104 | return -ENOMEM; | 1104 | return -ENOMEM; |
1105 | 1105 | ||
1106 | ctx->buffer_tail = list_entry(ctx->buffer_list.next, | 1106 | ctx->buffer_tail = list_entry(ctx->buffer_list.next, |
1107 | struct descriptor_buffer, list); | 1107 | struct descriptor_buffer, list); |
1108 | 1108 | ||
1109 | tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); | 1109 | tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); |
1110 | ctx->callback = callback; | 1110 | ctx->callback = callback; |
1111 | 1111 | ||
1112 | /* | 1112 | /* |
1113 | * We put a dummy descriptor in the buffer that has a NULL | 1113 | * We put a dummy descriptor in the buffer that has a NULL |
1114 | * branch address and looks like it's been sent. That way we | 1114 | * branch address and looks like it's been sent. That way we |
1115 | * have a descriptor to append DMA programs to. | 1115 | * have a descriptor to append DMA programs to. |
1116 | */ | 1116 | */ |
1117 | memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); | 1117 | memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); |
1118 | ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); | 1118 | ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); |
1119 | ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); | 1119 | ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); |
1120 | ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); | 1120 | ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); |
1121 | ctx->last = ctx->buffer_tail->buffer; | 1121 | ctx->last = ctx->buffer_tail->buffer; |
1122 | ctx->prev = ctx->buffer_tail->buffer; | 1122 | ctx->prev = ctx->buffer_tail->buffer; |
1123 | 1123 | ||
1124 | return 0; | 1124 | return 0; |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | static void context_release(struct context *ctx) | 1127 | static void context_release(struct context *ctx) |
1128 | { | 1128 | { |
1129 | struct fw_card *card = &ctx->ohci->card; | 1129 | struct fw_card *card = &ctx->ohci->card; |
1130 | struct descriptor_buffer *desc, *tmp; | 1130 | struct descriptor_buffer *desc, *tmp; |
1131 | 1131 | ||
1132 | list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) | 1132 | list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) |
1133 | dma_free_coherent(card->device, PAGE_SIZE, desc, | 1133 | dma_free_coherent(card->device, PAGE_SIZE, desc, |
1134 | desc->buffer_bus - | 1134 | desc->buffer_bus - |
1135 | ((void *)&desc->buffer - (void *)desc)); | 1135 | ((void *)&desc->buffer - (void *)desc)); |
1136 | } | 1136 | } |
1137 | 1137 | ||
1138 | /* Must be called with ohci->lock held */ | 1138 | /* Must be called with ohci->lock held */ |
1139 | static struct descriptor *context_get_descriptors(struct context *ctx, | 1139 | static struct descriptor *context_get_descriptors(struct context *ctx, |
1140 | int z, dma_addr_t *d_bus) | 1140 | int z, dma_addr_t *d_bus) |
1141 | { | 1141 | { |
1142 | struct descriptor *d = NULL; | 1142 | struct descriptor *d = NULL; |
1143 | struct descriptor_buffer *desc = ctx->buffer_tail; | 1143 | struct descriptor_buffer *desc = ctx->buffer_tail; |
1144 | 1144 | ||
1145 | if (z * sizeof(*d) > desc->buffer_size) | 1145 | if (z * sizeof(*d) > desc->buffer_size) |
1146 | return NULL; | 1146 | return NULL; |
1147 | 1147 | ||
1148 | if (z * sizeof(*d) > desc->buffer_size - desc->used) { | 1148 | if (z * sizeof(*d) > desc->buffer_size - desc->used) { |
1149 | /* No room for the descriptor in this buffer, so advance to the | 1149 | /* No room for the descriptor in this buffer, so advance to the |
1150 | * next one. */ | 1150 | * next one. */ |
1151 | 1151 | ||
1152 | if (desc->list.next == &ctx->buffer_list) { | 1152 | if (desc->list.next == &ctx->buffer_list) { |
1153 | /* If there is no free buffer next in the list, | 1153 | /* If there is no free buffer next in the list, |
1154 | * allocate one. */ | 1154 | * allocate one. */ |
1155 | if (context_add_buffer(ctx) < 0) | 1155 | if (context_add_buffer(ctx) < 0) |
1156 | return NULL; | 1156 | return NULL; |
1157 | } | 1157 | } |
1158 | desc = list_entry(desc->list.next, | 1158 | desc = list_entry(desc->list.next, |
1159 | struct descriptor_buffer, list); | 1159 | struct descriptor_buffer, list); |
1160 | ctx->buffer_tail = desc; | 1160 | ctx->buffer_tail = desc; |
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | d = desc->buffer + desc->used / sizeof(*d); | 1163 | d = desc->buffer + desc->used / sizeof(*d); |
1164 | memset(d, 0, z * sizeof(*d)); | 1164 | memset(d, 0, z * sizeof(*d)); |
1165 | *d_bus = desc->buffer_bus + desc->used; | 1165 | *d_bus = desc->buffer_bus + desc->used; |
1166 | 1166 | ||
1167 | return d; | 1167 | return d; |
1168 | } | 1168 | } |
1169 | 1169 | ||
1170 | static void context_run(struct context *ctx, u32 extra) | 1170 | static void context_run(struct context *ctx, u32 extra) |
1171 | { | 1171 | { |
1172 | struct fw_ohci *ohci = ctx->ohci; | 1172 | struct fw_ohci *ohci = ctx->ohci; |
1173 | 1173 | ||
1174 | reg_write(ohci, COMMAND_PTR(ctx->regs), | 1174 | reg_write(ohci, COMMAND_PTR(ctx->regs), |
1175 | le32_to_cpu(ctx->last->branch_address)); | 1175 | le32_to_cpu(ctx->last->branch_address)); |
1176 | reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); | 1176 | reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); |
1177 | reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); | 1177 | reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); |
1178 | ctx->running = true; | 1178 | ctx->running = true; |
1179 | flush_writes(ohci); | 1179 | flush_writes(ohci); |
1180 | } | 1180 | } |
1181 | 1181 | ||
1182 | static void context_append(struct context *ctx, | 1182 | static void context_append(struct context *ctx, |
1183 | struct descriptor *d, int z, int extra) | 1183 | struct descriptor *d, int z, int extra) |
1184 | { | 1184 | { |
1185 | dma_addr_t d_bus; | 1185 | dma_addr_t d_bus; |
1186 | struct descriptor_buffer *desc = ctx->buffer_tail; | 1186 | struct descriptor_buffer *desc = ctx->buffer_tail; |
1187 | 1187 | ||
1188 | d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); | 1188 | d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); |
1189 | 1189 | ||
1190 | desc->used += (z + extra) * sizeof(*d); | 1190 | desc->used += (z + extra) * sizeof(*d); |
1191 | 1191 | ||
1192 | wmb(); /* finish init of new descriptors before branch_address update */ | 1192 | wmb(); /* finish init of new descriptors before branch_address update */ |
1193 | ctx->prev->branch_address = cpu_to_le32(d_bus | z); | 1193 | ctx->prev->branch_address = cpu_to_le32(d_bus | z); |
1194 | ctx->prev = find_branch_descriptor(d, z); | 1194 | ctx->prev = find_branch_descriptor(d, z); |
1195 | |||
1196 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); | ||
1197 | flush_writes(ctx->ohci); | ||
1198 | } | 1195 | } |
1199 | 1196 | ||
1200 | static void context_stop(struct context *ctx) | 1197 | static void context_stop(struct context *ctx) |
1201 | { | 1198 | { |
1202 | u32 reg; | 1199 | u32 reg; |
1203 | int i; | 1200 | int i; |
1204 | 1201 | ||
1205 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); | 1202 | reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); |
1206 | ctx->running = false; | 1203 | ctx->running = false; |
1207 | flush_writes(ctx->ohci); | 1204 | flush_writes(ctx->ohci); |
1208 | 1205 | ||
1209 | for (i = 0; i < 10; i++) { | 1206 | for (i = 0; i < 10; i++) { |
1210 | reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); | 1207 | reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); |
1211 | if ((reg & CONTEXT_ACTIVE) == 0) | 1208 | if ((reg & CONTEXT_ACTIVE) == 0) |
1212 | return; | 1209 | return; |
1213 | 1210 | ||
1214 | mdelay(1); | 1211 | mdelay(1); |
1215 | } | 1212 | } |
1216 | fw_error("Error: DMA context still active (0x%08x)\n", reg); | 1213 | fw_error("Error: DMA context still active (0x%08x)\n", reg); |
1217 | } | 1214 | } |
1218 | 1215 | ||
1219 | struct driver_data { | 1216 | struct driver_data { |
1220 | u8 inline_data[8]; | 1217 | u8 inline_data[8]; |
1221 | struct fw_packet *packet; | 1218 | struct fw_packet *packet; |
1222 | }; | 1219 | }; |
1223 | 1220 | ||
1224 | /* | 1221 | /* |
1225 | * This function apppends a packet to the DMA queue for transmission. | 1222 | * This function apppends a packet to the DMA queue for transmission. |
1226 | * Must always be called with the ochi->lock held to ensure proper | 1223 | * Must always be called with the ochi->lock held to ensure proper |
1227 | * generation handling and locking around packet queue manipulation. | 1224 | * generation handling and locking around packet queue manipulation. |
1228 | */ | 1225 | */ |
1229 | static int at_context_queue_packet(struct context *ctx, | 1226 | static int at_context_queue_packet(struct context *ctx, |
1230 | struct fw_packet *packet) | 1227 | struct fw_packet *packet) |
1231 | { | 1228 | { |
1232 | struct fw_ohci *ohci = ctx->ohci; | 1229 | struct fw_ohci *ohci = ctx->ohci; |
1233 | dma_addr_t d_bus, uninitialized_var(payload_bus); | 1230 | dma_addr_t d_bus, uninitialized_var(payload_bus); |
1234 | struct driver_data *driver_data; | 1231 | struct driver_data *driver_data; |
1235 | struct descriptor *d, *last; | 1232 | struct descriptor *d, *last; |
1236 | __le32 *header; | 1233 | __le32 *header; |
1237 | int z, tcode; | 1234 | int z, tcode; |
1238 | 1235 | ||
1239 | d = context_get_descriptors(ctx, 4, &d_bus); | 1236 | d = context_get_descriptors(ctx, 4, &d_bus); |
1240 | if (d == NULL) { | 1237 | if (d == NULL) { |
1241 | packet->ack = RCODE_SEND_ERROR; | 1238 | packet->ack = RCODE_SEND_ERROR; |
1242 | return -1; | 1239 | return -1; |
1243 | } | 1240 | } |
1244 | 1241 | ||
1245 | d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); | 1242 | d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); |
1246 | d[0].res_count = cpu_to_le16(packet->timestamp); | 1243 | d[0].res_count = cpu_to_le16(packet->timestamp); |
1247 | 1244 | ||
1248 | /* | 1245 | /* |
1249 | * The DMA format for asyncronous link packets is different | 1246 | * The DMA format for asyncronous link packets is different |
1250 | * from the IEEE1394 layout, so shift the fields around | 1247 | * from the IEEE1394 layout, so shift the fields around |
1251 | * accordingly. | 1248 | * accordingly. |
1252 | */ | 1249 | */ |
1253 | 1250 | ||
1254 | tcode = (packet->header[0] >> 4) & 0x0f; | 1251 | tcode = (packet->header[0] >> 4) & 0x0f; |
1255 | header = (__le32 *) &d[1]; | 1252 | header = (__le32 *) &d[1]; |
1256 | switch (tcode) { | 1253 | switch (tcode) { |
1257 | case TCODE_WRITE_QUADLET_REQUEST: | 1254 | case TCODE_WRITE_QUADLET_REQUEST: |
1258 | case TCODE_WRITE_BLOCK_REQUEST: | 1255 | case TCODE_WRITE_BLOCK_REQUEST: |
1259 | case TCODE_WRITE_RESPONSE: | 1256 | case TCODE_WRITE_RESPONSE: |
1260 | case TCODE_READ_QUADLET_REQUEST: | 1257 | case TCODE_READ_QUADLET_REQUEST: |
1261 | case TCODE_READ_BLOCK_REQUEST: | 1258 | case TCODE_READ_BLOCK_REQUEST: |
1262 | case TCODE_READ_QUADLET_RESPONSE: | 1259 | case TCODE_READ_QUADLET_RESPONSE: |
1263 | case TCODE_READ_BLOCK_RESPONSE: | 1260 | case TCODE_READ_BLOCK_RESPONSE: |
1264 | case TCODE_LOCK_REQUEST: | 1261 | case TCODE_LOCK_REQUEST: |
1265 | case TCODE_LOCK_RESPONSE: | 1262 | case TCODE_LOCK_RESPONSE: |
1266 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | | 1263 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | |
1267 | (packet->speed << 16)); | 1264 | (packet->speed << 16)); |
1268 | header[1] = cpu_to_le32((packet->header[1] & 0xffff) | | 1265 | header[1] = cpu_to_le32((packet->header[1] & 0xffff) | |
1269 | (packet->header[0] & 0xffff0000)); | 1266 | (packet->header[0] & 0xffff0000)); |
1270 | header[2] = cpu_to_le32(packet->header[2]); | 1267 | header[2] = cpu_to_le32(packet->header[2]); |
1271 | 1268 | ||
1272 | if (TCODE_IS_BLOCK_PACKET(tcode)) | 1269 | if (TCODE_IS_BLOCK_PACKET(tcode)) |
1273 | header[3] = cpu_to_le32(packet->header[3]); | 1270 | header[3] = cpu_to_le32(packet->header[3]); |
1274 | else | 1271 | else |
1275 | header[3] = (__force __le32) packet->header[3]; | 1272 | header[3] = (__force __le32) packet->header[3]; |
1276 | 1273 | ||
1277 | d[0].req_count = cpu_to_le16(packet->header_length); | 1274 | d[0].req_count = cpu_to_le16(packet->header_length); |
1278 | break; | 1275 | break; |
1279 | 1276 | ||
1280 | case TCODE_LINK_INTERNAL: | 1277 | case TCODE_LINK_INTERNAL: |
1281 | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | | 1278 | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | |
1282 | (packet->speed << 16)); | 1279 | (packet->speed << 16)); |
1283 | header[1] = cpu_to_le32(packet->header[1]); | 1280 | header[1] = cpu_to_le32(packet->header[1]); |
1284 | header[2] = cpu_to_le32(packet->header[2]); | 1281 | header[2] = cpu_to_le32(packet->header[2]); |
1285 | d[0].req_count = cpu_to_le16(12); | 1282 | d[0].req_count = cpu_to_le16(12); |
1286 | 1283 | ||
1287 | if (is_ping_packet(&packet->header[1])) | 1284 | if (is_ping_packet(&packet->header[1])) |
1288 | d[0].control |= cpu_to_le16(DESCRIPTOR_PING); | 1285 | d[0].control |= cpu_to_le16(DESCRIPTOR_PING); |
1289 | break; | 1286 | break; |
1290 | 1287 | ||
1291 | case TCODE_STREAM_DATA: | 1288 | case TCODE_STREAM_DATA: |
1292 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | | 1289 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | |
1293 | (packet->speed << 16)); | 1290 | (packet->speed << 16)); |
1294 | header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); | 1291 | header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); |
1295 | d[0].req_count = cpu_to_le16(8); | 1292 | d[0].req_count = cpu_to_le16(8); |
1296 | break; | 1293 | break; |
1297 | 1294 | ||
1298 | default: | 1295 | default: |
1299 | /* BUG(); */ | 1296 | /* BUG(); */ |
1300 | packet->ack = RCODE_SEND_ERROR; | 1297 | packet->ack = RCODE_SEND_ERROR; |
1301 | return -1; | 1298 | return -1; |
1302 | } | 1299 | } |
1303 | 1300 | ||
1304 | BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor)); | 1301 | BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor)); |
1305 | driver_data = (struct driver_data *) &d[3]; | 1302 | driver_data = (struct driver_data *) &d[3]; |
1306 | driver_data->packet = packet; | 1303 | driver_data->packet = packet; |
1307 | packet->driver_data = driver_data; | 1304 | packet->driver_data = driver_data; |
1308 | 1305 | ||
1309 | if (packet->payload_length > 0) { | 1306 | if (packet->payload_length > 0) { |
1310 | if (packet->payload_length > sizeof(driver_data->inline_data)) { | 1307 | if (packet->payload_length > sizeof(driver_data->inline_data)) { |
1311 | payload_bus = dma_map_single(ohci->card.device, | 1308 | payload_bus = dma_map_single(ohci->card.device, |
1312 | packet->payload, | 1309 | packet->payload, |
1313 | packet->payload_length, | 1310 | packet->payload_length, |
1314 | DMA_TO_DEVICE); | 1311 | DMA_TO_DEVICE); |
1315 | if (dma_mapping_error(ohci->card.device, payload_bus)) { | 1312 | if (dma_mapping_error(ohci->card.device, payload_bus)) { |
1316 | packet->ack = RCODE_SEND_ERROR; | 1313 | packet->ack = RCODE_SEND_ERROR; |
1317 | return -1; | 1314 | return -1; |
1318 | } | 1315 | } |
1319 | packet->payload_bus = payload_bus; | 1316 | packet->payload_bus = payload_bus; |
1320 | packet->payload_mapped = true; | 1317 | packet->payload_mapped = true; |
1321 | } else { | 1318 | } else { |
1322 | memcpy(driver_data->inline_data, packet->payload, | 1319 | memcpy(driver_data->inline_data, packet->payload, |
1323 | packet->payload_length); | 1320 | packet->payload_length); |
1324 | payload_bus = d_bus + 3 * sizeof(*d); | 1321 | payload_bus = d_bus + 3 * sizeof(*d); |
1325 | } | 1322 | } |
1326 | 1323 | ||
1327 | d[2].req_count = cpu_to_le16(packet->payload_length); | 1324 | d[2].req_count = cpu_to_le16(packet->payload_length); |
1328 | d[2].data_address = cpu_to_le32(payload_bus); | 1325 | d[2].data_address = cpu_to_le32(payload_bus); |
1329 | last = &d[2]; | 1326 | last = &d[2]; |
1330 | z = 3; | 1327 | z = 3; |
1331 | } else { | 1328 | } else { |
1332 | last = &d[0]; | 1329 | last = &d[0]; |
1333 | z = 2; | 1330 | z = 2; |
1334 | } | 1331 | } |
1335 | 1332 | ||
1336 | last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | | 1333 | last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | |
1337 | DESCRIPTOR_IRQ_ALWAYS | | 1334 | DESCRIPTOR_IRQ_ALWAYS | |
1338 | DESCRIPTOR_BRANCH_ALWAYS); | 1335 | DESCRIPTOR_BRANCH_ALWAYS); |
1339 | 1336 | ||
1340 | /* FIXME: Document how the locking works. */ | 1337 | /* FIXME: Document how the locking works. */ |
1341 | if (ohci->generation != packet->generation) { | 1338 | if (ohci->generation != packet->generation) { |
1342 | if (packet->payload_mapped) | 1339 | if (packet->payload_mapped) |
1343 | dma_unmap_single(ohci->card.device, payload_bus, | 1340 | dma_unmap_single(ohci->card.device, payload_bus, |
1344 | packet->payload_length, DMA_TO_DEVICE); | 1341 | packet->payload_length, DMA_TO_DEVICE); |
1345 | packet->ack = RCODE_GENERATION; | 1342 | packet->ack = RCODE_GENERATION; |
1346 | return -1; | 1343 | return -1; |
1347 | } | 1344 | } |
1348 | 1345 | ||
1349 | context_append(ctx, d, z, 4 - z); | 1346 | context_append(ctx, d, z, 4 - z); |
1350 | 1347 | ||
1351 | if (!ctx->running) | 1348 | if (ctx->running) { |
1349 | reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); | ||
1350 | flush_writes(ohci); | ||
1351 | } else { | ||
1352 | context_run(ctx, 0); | 1352 | context_run(ctx, 0); |
1353 | } | ||
1353 | 1354 | ||
1354 | return 0; | 1355 | return 0; |
1355 | } | 1356 | } |
1356 | 1357 | ||
1357 | static void at_context_flush(struct context *ctx) | 1358 | static void at_context_flush(struct context *ctx) |
1358 | { | 1359 | { |
1359 | tasklet_disable(&ctx->tasklet); | 1360 | tasklet_disable(&ctx->tasklet); |
1360 | 1361 | ||
1361 | ctx->flushing = true; | 1362 | ctx->flushing = true; |
1362 | context_tasklet((unsigned long)ctx); | 1363 | context_tasklet((unsigned long)ctx); |
1363 | ctx->flushing = false; | 1364 | ctx->flushing = false; |
1364 | 1365 | ||
1365 | tasklet_enable(&ctx->tasklet); | 1366 | tasklet_enable(&ctx->tasklet); |
1366 | } | 1367 | } |
1367 | 1368 | ||
1368 | static int handle_at_packet(struct context *context, | 1369 | static int handle_at_packet(struct context *context, |
1369 | struct descriptor *d, | 1370 | struct descriptor *d, |
1370 | struct descriptor *last) | 1371 | struct descriptor *last) |
1371 | { | 1372 | { |
1372 | struct driver_data *driver_data; | 1373 | struct driver_data *driver_data; |
1373 | struct fw_packet *packet; | 1374 | struct fw_packet *packet; |
1374 | struct fw_ohci *ohci = context->ohci; | 1375 | struct fw_ohci *ohci = context->ohci; |
1375 | int evt; | 1376 | int evt; |
1376 | 1377 | ||
1377 | if (last->transfer_status == 0 && !context->flushing) | 1378 | if (last->transfer_status == 0 && !context->flushing) |
1378 | /* This descriptor isn't done yet, stop iteration. */ | 1379 | /* This descriptor isn't done yet, stop iteration. */ |
1379 | return 0; | 1380 | return 0; |
1380 | 1381 | ||
1381 | driver_data = (struct driver_data *) &d[3]; | 1382 | driver_data = (struct driver_data *) &d[3]; |
1382 | packet = driver_data->packet; | 1383 | packet = driver_data->packet; |
1383 | if (packet == NULL) | 1384 | if (packet == NULL) |
1384 | /* This packet was cancelled, just continue. */ | 1385 | /* This packet was cancelled, just continue. */ |
1385 | return 1; | 1386 | return 1; |
1386 | 1387 | ||
1387 | if (packet->payload_mapped) | 1388 | if (packet->payload_mapped) |
1388 | dma_unmap_single(ohci->card.device, packet->payload_bus, | 1389 | dma_unmap_single(ohci->card.device, packet->payload_bus, |
1389 | packet->payload_length, DMA_TO_DEVICE); | 1390 | packet->payload_length, DMA_TO_DEVICE); |
1390 | 1391 | ||
1391 | evt = le16_to_cpu(last->transfer_status) & 0x1f; | 1392 | evt = le16_to_cpu(last->transfer_status) & 0x1f; |
1392 | packet->timestamp = le16_to_cpu(last->res_count); | 1393 | packet->timestamp = le16_to_cpu(last->res_count); |
1393 | 1394 | ||
1394 | log_ar_at_event('T', packet->speed, packet->header, evt); | 1395 | log_ar_at_event('T', packet->speed, packet->header, evt); |
1395 | 1396 | ||
1396 | switch (evt) { | 1397 | switch (evt) { |
1397 | case OHCI1394_evt_timeout: | 1398 | case OHCI1394_evt_timeout: |
1398 | /* Async response transmit timed out. */ | 1399 | /* Async response transmit timed out. */ |
1399 | packet->ack = RCODE_CANCELLED; | 1400 | packet->ack = RCODE_CANCELLED; |
1400 | break; | 1401 | break; |
1401 | 1402 | ||
1402 | case OHCI1394_evt_flushed: | 1403 | case OHCI1394_evt_flushed: |
1403 | /* | 1404 | /* |
1404 | * The packet was flushed should give same error as | 1405 | * The packet was flushed should give same error as |
1405 | * when we try to use a stale generation count. | 1406 | * when we try to use a stale generation count. |
1406 | */ | 1407 | */ |
1407 | packet->ack = RCODE_GENERATION; | 1408 | packet->ack = RCODE_GENERATION; |
1408 | break; | 1409 | break; |
1409 | 1410 | ||
1410 | case OHCI1394_evt_missing_ack: | 1411 | case OHCI1394_evt_missing_ack: |
1411 | if (context->flushing) | 1412 | if (context->flushing) |
1412 | packet->ack = RCODE_GENERATION; | 1413 | packet->ack = RCODE_GENERATION; |
1413 | else { | 1414 | else { |
1414 | /* | 1415 | /* |
1415 | * Using a valid (current) generation count, but the | 1416 | * Using a valid (current) generation count, but the |
1416 | * node is not on the bus or not sending acks. | 1417 | * node is not on the bus or not sending acks. |
1417 | */ | 1418 | */ |
1418 | packet->ack = RCODE_NO_ACK; | 1419 | packet->ack = RCODE_NO_ACK; |
1419 | } | 1420 | } |
1420 | break; | 1421 | break; |
1421 | 1422 | ||
1422 | case ACK_COMPLETE + 0x10: | 1423 | case ACK_COMPLETE + 0x10: |
1423 | case ACK_PENDING + 0x10: | 1424 | case ACK_PENDING + 0x10: |
1424 | case ACK_BUSY_X + 0x10: | 1425 | case ACK_BUSY_X + 0x10: |
1425 | case ACK_BUSY_A + 0x10: | 1426 | case ACK_BUSY_A + 0x10: |
1426 | case ACK_BUSY_B + 0x10: | 1427 | case ACK_BUSY_B + 0x10: |
1427 | case ACK_DATA_ERROR + 0x10: | 1428 | case ACK_DATA_ERROR + 0x10: |
1428 | case ACK_TYPE_ERROR + 0x10: | 1429 | case ACK_TYPE_ERROR + 0x10: |
1429 | packet->ack = evt - 0x10; | 1430 | packet->ack = evt - 0x10; |
1430 | break; | 1431 | break; |
1431 | 1432 | ||
1432 | case OHCI1394_evt_no_status: | 1433 | case OHCI1394_evt_no_status: |
1433 | if (context->flushing) { | 1434 | if (context->flushing) { |
1434 | packet->ack = RCODE_GENERATION; | 1435 | packet->ack = RCODE_GENERATION; |
1435 | break; | 1436 | break; |
1436 | } | 1437 | } |
1437 | /* fall through */ | 1438 | /* fall through */ |
1438 | 1439 | ||
1439 | default: | 1440 | default: |
1440 | packet->ack = RCODE_SEND_ERROR; | 1441 | packet->ack = RCODE_SEND_ERROR; |
1441 | break; | 1442 | break; |
1442 | } | 1443 | } |
1443 | 1444 | ||
1444 | packet->callback(packet, &ohci->card, packet->ack); | 1445 | packet->callback(packet, &ohci->card, packet->ack); |
1445 | 1446 | ||
1446 | return 1; | 1447 | return 1; |
1447 | } | 1448 | } |
1448 | 1449 | ||
1449 | #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) | 1450 | #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) |
1450 | #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) | 1451 | #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) |
1451 | #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) | 1452 | #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) |
1452 | #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) | 1453 | #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) |
1453 | #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) | 1454 | #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) |
1454 | 1455 | ||
1455 | static void handle_local_rom(struct fw_ohci *ohci, | 1456 | static void handle_local_rom(struct fw_ohci *ohci, |
1456 | struct fw_packet *packet, u32 csr) | 1457 | struct fw_packet *packet, u32 csr) |
1457 | { | 1458 | { |
1458 | struct fw_packet response; | 1459 | struct fw_packet response; |
1459 | int tcode, length, i; | 1460 | int tcode, length, i; |
1460 | 1461 | ||
1461 | tcode = HEADER_GET_TCODE(packet->header[0]); | 1462 | tcode = HEADER_GET_TCODE(packet->header[0]); |
1462 | if (TCODE_IS_BLOCK_PACKET(tcode)) | 1463 | if (TCODE_IS_BLOCK_PACKET(tcode)) |
1463 | length = HEADER_GET_DATA_LENGTH(packet->header[3]); | 1464 | length = HEADER_GET_DATA_LENGTH(packet->header[3]); |
1464 | else | 1465 | else |
1465 | length = 4; | 1466 | length = 4; |
1466 | 1467 | ||
1467 | i = csr - CSR_CONFIG_ROM; | 1468 | i = csr - CSR_CONFIG_ROM; |
1468 | if (i + length > CONFIG_ROM_SIZE) { | 1469 | if (i + length > CONFIG_ROM_SIZE) { |
1469 | fw_fill_response(&response, packet->header, | 1470 | fw_fill_response(&response, packet->header, |
1470 | RCODE_ADDRESS_ERROR, NULL, 0); | 1471 | RCODE_ADDRESS_ERROR, NULL, 0); |
1471 | } else if (!TCODE_IS_READ_REQUEST(tcode)) { | 1472 | } else if (!TCODE_IS_READ_REQUEST(tcode)) { |
1472 | fw_fill_response(&response, packet->header, | 1473 | fw_fill_response(&response, packet->header, |
1473 | RCODE_TYPE_ERROR, NULL, 0); | 1474 | RCODE_TYPE_ERROR, NULL, 0); |
1474 | } else { | 1475 | } else { |
1475 | fw_fill_response(&response, packet->header, RCODE_COMPLETE, | 1476 | fw_fill_response(&response, packet->header, RCODE_COMPLETE, |
1476 | (void *) ohci->config_rom + i, length); | 1477 | (void *) ohci->config_rom + i, length); |
1477 | } | 1478 | } |
1478 | 1479 | ||
1479 | fw_core_handle_response(&ohci->card, &response); | 1480 | fw_core_handle_response(&ohci->card, &response); |
1480 | } | 1481 | } |
1481 | 1482 | ||
1482 | static void handle_local_lock(struct fw_ohci *ohci, | 1483 | static void handle_local_lock(struct fw_ohci *ohci, |
1483 | struct fw_packet *packet, u32 csr) | 1484 | struct fw_packet *packet, u32 csr) |
1484 | { | 1485 | { |
1485 | struct fw_packet response; | 1486 | struct fw_packet response; |
1486 | int tcode, length, ext_tcode, sel, try; | 1487 | int tcode, length, ext_tcode, sel, try; |
1487 | __be32 *payload, lock_old; | 1488 | __be32 *payload, lock_old; |
1488 | u32 lock_arg, lock_data; | 1489 | u32 lock_arg, lock_data; |
1489 | 1490 | ||
1490 | tcode = HEADER_GET_TCODE(packet->header[0]); | 1491 | tcode = HEADER_GET_TCODE(packet->header[0]); |
1491 | length = HEADER_GET_DATA_LENGTH(packet->header[3]); | 1492 | length = HEADER_GET_DATA_LENGTH(packet->header[3]); |
1492 | payload = packet->payload; | 1493 | payload = packet->payload; |
1493 | ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); | 1494 | ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); |
1494 | 1495 | ||
1495 | if (tcode == TCODE_LOCK_REQUEST && | 1496 | if (tcode == TCODE_LOCK_REQUEST && |
1496 | ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { | 1497 | ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { |
1497 | lock_arg = be32_to_cpu(payload[0]); | 1498 | lock_arg = be32_to_cpu(payload[0]); |
1498 | lock_data = be32_to_cpu(payload[1]); | 1499 | lock_data = be32_to_cpu(payload[1]); |
1499 | } else if (tcode == TCODE_READ_QUADLET_REQUEST) { | 1500 | } else if (tcode == TCODE_READ_QUADLET_REQUEST) { |
1500 | lock_arg = 0; | 1501 | lock_arg = 0; |
1501 | lock_data = 0; | 1502 | lock_data = 0; |
1502 | } else { | 1503 | } else { |
1503 | fw_fill_response(&response, packet->header, | 1504 | fw_fill_response(&response, packet->header, |
1504 | RCODE_TYPE_ERROR, NULL, 0); | 1505 | RCODE_TYPE_ERROR, NULL, 0); |
1505 | goto out; | 1506 | goto out; |
1506 | } | 1507 | } |
1507 | 1508 | ||
1508 | sel = (csr - CSR_BUS_MANAGER_ID) / 4; | 1509 | sel = (csr - CSR_BUS_MANAGER_ID) / 4; |
1509 | reg_write(ohci, OHCI1394_CSRData, lock_data); | 1510 | reg_write(ohci, OHCI1394_CSRData, lock_data); |
1510 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); | 1511 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); |
1511 | reg_write(ohci, OHCI1394_CSRControl, sel); | 1512 | reg_write(ohci, OHCI1394_CSRControl, sel); |
1512 | 1513 | ||
1513 | for (try = 0; try < 20; try++) | 1514 | for (try = 0; try < 20; try++) |
1514 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { | 1515 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { |
1515 | lock_old = cpu_to_be32(reg_read(ohci, | 1516 | lock_old = cpu_to_be32(reg_read(ohci, |
1516 | OHCI1394_CSRData)); | 1517 | OHCI1394_CSRData)); |
1517 | fw_fill_response(&response, packet->header, | 1518 | fw_fill_response(&response, packet->header, |
1518 | RCODE_COMPLETE, | 1519 | RCODE_COMPLETE, |
1519 | &lock_old, sizeof(lock_old)); | 1520 | &lock_old, sizeof(lock_old)); |
1520 | goto out; | 1521 | goto out; |
1521 | } | 1522 | } |
1522 | 1523 | ||
1523 | fw_error("swap not done (CSR lock timeout)\n"); | 1524 | fw_error("swap not done (CSR lock timeout)\n"); |
1524 | fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); | 1525 | fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); |
1525 | 1526 | ||
1526 | out: | 1527 | out: |
1527 | fw_core_handle_response(&ohci->card, &response); | 1528 | fw_core_handle_response(&ohci->card, &response); |
1528 | } | 1529 | } |
1529 | 1530 | ||
1530 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) | 1531 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) |
1531 | { | 1532 | { |
1532 | u64 offset, csr; | 1533 | u64 offset, csr; |
1533 | 1534 | ||
1534 | if (ctx == &ctx->ohci->at_request_ctx) { | 1535 | if (ctx == &ctx->ohci->at_request_ctx) { |
1535 | packet->ack = ACK_PENDING; | 1536 | packet->ack = ACK_PENDING; |
1536 | packet->callback(packet, &ctx->ohci->card, packet->ack); | 1537 | packet->callback(packet, &ctx->ohci->card, packet->ack); |
1537 | } | 1538 | } |
1538 | 1539 | ||
1539 | offset = | 1540 | offset = |
1540 | ((unsigned long long) | 1541 | ((unsigned long long) |
1541 | HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | | 1542 | HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | |
1542 | packet->header[2]; | 1543 | packet->header[2]; |
1543 | csr = offset - CSR_REGISTER_BASE; | 1544 | csr = offset - CSR_REGISTER_BASE; |
1544 | 1545 | ||
1545 | /* Handle config rom reads. */ | 1546 | /* Handle config rom reads. */ |
1546 | if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) | 1547 | if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) |
1547 | handle_local_rom(ctx->ohci, packet, csr); | 1548 | handle_local_rom(ctx->ohci, packet, csr); |
1548 | else switch (csr) { | 1549 | else switch (csr) { |
1549 | case CSR_BUS_MANAGER_ID: | 1550 | case CSR_BUS_MANAGER_ID: |
1550 | case CSR_BANDWIDTH_AVAILABLE: | 1551 | case CSR_BANDWIDTH_AVAILABLE: |
1551 | case CSR_CHANNELS_AVAILABLE_HI: | 1552 | case CSR_CHANNELS_AVAILABLE_HI: |
1552 | case CSR_CHANNELS_AVAILABLE_LO: | 1553 | case CSR_CHANNELS_AVAILABLE_LO: |
1553 | handle_local_lock(ctx->ohci, packet, csr); | 1554 | handle_local_lock(ctx->ohci, packet, csr); |
1554 | break; | 1555 | break; |
1555 | default: | 1556 | default: |
1556 | if (ctx == &ctx->ohci->at_request_ctx) | 1557 | if (ctx == &ctx->ohci->at_request_ctx) |
1557 | fw_core_handle_request(&ctx->ohci->card, packet); | 1558 | fw_core_handle_request(&ctx->ohci->card, packet); |
1558 | else | 1559 | else |
1559 | fw_core_handle_response(&ctx->ohci->card, packet); | 1560 | fw_core_handle_response(&ctx->ohci->card, packet); |
1560 | break; | 1561 | break; |
1561 | } | 1562 | } |
1562 | 1563 | ||
1563 | if (ctx == &ctx->ohci->at_response_ctx) { | 1564 | if (ctx == &ctx->ohci->at_response_ctx) { |
1564 | packet->ack = ACK_COMPLETE; | 1565 | packet->ack = ACK_COMPLETE; |
1565 | packet->callback(packet, &ctx->ohci->card, packet->ack); | 1566 | packet->callback(packet, &ctx->ohci->card, packet->ack); |
1566 | } | 1567 | } |
1567 | } | 1568 | } |
1568 | 1569 | ||
1569 | static void at_context_transmit(struct context *ctx, struct fw_packet *packet) | 1570 | static void at_context_transmit(struct context *ctx, struct fw_packet *packet) |
1570 | { | 1571 | { |
1571 | unsigned long flags; | 1572 | unsigned long flags; |
1572 | int ret; | 1573 | int ret; |
1573 | 1574 | ||
1574 | spin_lock_irqsave(&ctx->ohci->lock, flags); | 1575 | spin_lock_irqsave(&ctx->ohci->lock, flags); |
1575 | 1576 | ||
1576 | if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && | 1577 | if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && |
1577 | ctx->ohci->generation == packet->generation) { | 1578 | ctx->ohci->generation == packet->generation) { |
1578 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); | 1579 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); |
1579 | handle_local_request(ctx, packet); | 1580 | handle_local_request(ctx, packet); |
1580 | return; | 1581 | return; |
1581 | } | 1582 | } |
1582 | 1583 | ||
1583 | ret = at_context_queue_packet(ctx, packet); | 1584 | ret = at_context_queue_packet(ctx, packet); |
1584 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); | 1585 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); |
1585 | 1586 | ||
1586 | if (ret < 0) | 1587 | if (ret < 0) |
1587 | packet->callback(packet, &ctx->ohci->card, packet->ack); | 1588 | packet->callback(packet, &ctx->ohci->card, packet->ack); |
1588 | 1589 | ||
1589 | } | 1590 | } |
1590 | 1591 | ||
1591 | static void detect_dead_context(struct fw_ohci *ohci, | 1592 | static void detect_dead_context(struct fw_ohci *ohci, |
1592 | const char *name, unsigned int regs) | 1593 | const char *name, unsigned int regs) |
1593 | { | 1594 | { |
1594 | u32 ctl; | 1595 | u32 ctl; |
1595 | 1596 | ||
1596 | ctl = reg_read(ohci, CONTROL_SET(regs)); | 1597 | ctl = reg_read(ohci, CONTROL_SET(regs)); |
1597 | if (ctl & CONTEXT_DEAD) { | 1598 | if (ctl & CONTEXT_DEAD) { |
1598 | #ifdef CONFIG_FIREWIRE_OHCI_DEBUG | 1599 | #ifdef CONFIG_FIREWIRE_OHCI_DEBUG |
1599 | fw_error("DMA context %s has stopped, error code: %s\n", | 1600 | fw_error("DMA context %s has stopped, error code: %s\n", |
1600 | name, evts[ctl & 0x1f]); | 1601 | name, evts[ctl & 0x1f]); |
1601 | #else | 1602 | #else |
1602 | fw_error("DMA context %s has stopped, error code: %#x\n", | 1603 | fw_error("DMA context %s has stopped, error code: %#x\n", |
1603 | name, ctl & 0x1f); | 1604 | name, ctl & 0x1f); |
1604 | #endif | 1605 | #endif |
1605 | } | 1606 | } |
1606 | } | 1607 | } |
1607 | 1608 | ||
1608 | static void handle_dead_contexts(struct fw_ohci *ohci) | 1609 | static void handle_dead_contexts(struct fw_ohci *ohci) |
1609 | { | 1610 | { |
1610 | unsigned int i; | 1611 | unsigned int i; |
1611 | char name[8]; | 1612 | char name[8]; |
1612 | 1613 | ||
1613 | detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase); | 1614 | detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase); |
1614 | detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase); | 1615 | detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase); |
1615 | detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase); | 1616 | detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase); |
1616 | detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase); | 1617 | detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase); |
1617 | for (i = 0; i < 32; ++i) { | 1618 | for (i = 0; i < 32; ++i) { |
1618 | if (!(ohci->it_context_support & (1 << i))) | 1619 | if (!(ohci->it_context_support & (1 << i))) |
1619 | continue; | 1620 | continue; |
1620 | sprintf(name, "IT%u", i); | 1621 | sprintf(name, "IT%u", i); |
1621 | detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i)); | 1622 | detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i)); |
1622 | } | 1623 | } |
1623 | for (i = 0; i < 32; ++i) { | 1624 | for (i = 0; i < 32; ++i) { |
1624 | if (!(ohci->ir_context_support & (1 << i))) | 1625 | if (!(ohci->ir_context_support & (1 << i))) |
1625 | continue; | 1626 | continue; |
1626 | sprintf(name, "IR%u", i); | 1627 | sprintf(name, "IR%u", i); |
1627 | detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i)); | 1628 | detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i)); |
1628 | } | 1629 | } |
1629 | /* TODO: maybe try to flush and restart the dead contexts */ | 1630 | /* TODO: maybe try to flush and restart the dead contexts */ |
1630 | } | 1631 | } |
1631 | 1632 | ||
1632 | static u32 cycle_timer_ticks(u32 cycle_timer) | 1633 | static u32 cycle_timer_ticks(u32 cycle_timer) |
1633 | { | 1634 | { |
1634 | u32 ticks; | 1635 | u32 ticks; |
1635 | 1636 | ||
1636 | ticks = cycle_timer & 0xfff; | 1637 | ticks = cycle_timer & 0xfff; |
1637 | ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); | 1638 | ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); |
1638 | ticks += (3072 * 8000) * (cycle_timer >> 25); | 1639 | ticks += (3072 * 8000) * (cycle_timer >> 25); |
1639 | 1640 | ||
1640 | return ticks; | 1641 | return ticks; |
1641 | } | 1642 | } |
1642 | 1643 | ||
1643 | /* | 1644 | /* |
1644 | * Some controllers exhibit one or more of the following bugs when updating the | 1645 | * Some controllers exhibit one or more of the following bugs when updating the |
1645 | * iso cycle timer register: | 1646 | * iso cycle timer register: |
1646 | * - When the lowest six bits are wrapping around to zero, a read that happens | 1647 | * - When the lowest six bits are wrapping around to zero, a read that happens |
1647 | * at the same time will return garbage in the lowest ten bits. | 1648 | * at the same time will return garbage in the lowest ten bits. |
1648 | * - When the cycleOffset field wraps around to zero, the cycleCount field is | 1649 | * - When the cycleOffset field wraps around to zero, the cycleCount field is |
1649 | * not incremented for about 60 ns. | 1650 | * not incremented for about 60 ns. |
1650 | * - Occasionally, the entire register reads zero. | 1651 | * - Occasionally, the entire register reads zero. |
1651 | * | 1652 | * |
1652 | * To catch these, we read the register three times and ensure that the | 1653 | * To catch these, we read the register three times and ensure that the |
1653 | * difference between each two consecutive reads is approximately the same, i.e. | 1654 | * difference between each two consecutive reads is approximately the same, i.e. |
1654 | * less than twice the other. Furthermore, any negative difference indicates an | 1655 | * less than twice the other. Furthermore, any negative difference indicates an |
1655 | * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to | 1656 | * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to |
1656 | * execute, so we have enough precision to compute the ratio of the differences.) | 1657 | * execute, so we have enough precision to compute the ratio of the differences.) |
1657 | */ | 1658 | */ |
1658 | static u32 get_cycle_time(struct fw_ohci *ohci) | 1659 | static u32 get_cycle_time(struct fw_ohci *ohci) |
1659 | { | 1660 | { |
1660 | u32 c0, c1, c2; | 1661 | u32 c0, c1, c2; |
1661 | u32 t0, t1, t2; | 1662 | u32 t0, t1, t2; |
1662 | s32 diff01, diff12; | 1663 | s32 diff01, diff12; |
1663 | int i; | 1664 | int i; |
1664 | 1665 | ||
1665 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | 1666 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); |
1666 | 1667 | ||
1667 | if (ohci->quirks & QUIRK_CYCLE_TIMER) { | 1668 | if (ohci->quirks & QUIRK_CYCLE_TIMER) { |
1668 | i = 0; | 1669 | i = 0; |
1669 | c1 = c2; | 1670 | c1 = c2; |
1670 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | 1671 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); |
1671 | do { | 1672 | do { |
1672 | c0 = c1; | 1673 | c0 = c1; |
1673 | c1 = c2; | 1674 | c1 = c2; |
1674 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); | 1675 | c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); |
1675 | t0 = cycle_timer_ticks(c0); | 1676 | t0 = cycle_timer_ticks(c0); |
1676 | t1 = cycle_timer_ticks(c1); | 1677 | t1 = cycle_timer_ticks(c1); |
1677 | t2 = cycle_timer_ticks(c2); | 1678 | t2 = cycle_timer_ticks(c2); |
1678 | diff01 = t1 - t0; | 1679 | diff01 = t1 - t0; |
1679 | diff12 = t2 - t1; | 1680 | diff12 = t2 - t1; |
1680 | } while ((diff01 <= 0 || diff12 <= 0 || | 1681 | } while ((diff01 <= 0 || diff12 <= 0 || |
1681 | diff01 / diff12 >= 2 || diff12 / diff01 >= 2) | 1682 | diff01 / diff12 >= 2 || diff12 / diff01 >= 2) |
1682 | && i++ < 20); | 1683 | && i++ < 20); |
1683 | } | 1684 | } |
1684 | 1685 | ||
1685 | return c2; | 1686 | return c2; |
1686 | } | 1687 | } |
1687 | 1688 | ||
1688 | /* | 1689 | /* |
1689 | * This function has to be called at least every 64 seconds. The bus_time | 1690 | * This function has to be called at least every 64 seconds. The bus_time |
1690 | * field stores not only the upper 25 bits of the BUS_TIME register but also | 1691 | * field stores not only the upper 25 bits of the BUS_TIME register but also |
1691 | * the most significant bit of the cycle timer in bit 6 so that we can detect | 1692 | * the most significant bit of the cycle timer in bit 6 so that we can detect |
1692 | * changes in this bit. | 1693 | * changes in this bit. |
1693 | */ | 1694 | */ |
1694 | static u32 update_bus_time(struct fw_ohci *ohci) | 1695 | static u32 update_bus_time(struct fw_ohci *ohci) |
1695 | { | 1696 | { |
1696 | u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; | 1697 | u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; |
1697 | 1698 | ||
1698 | if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) | 1699 | if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) |
1699 | ohci->bus_time += 0x40; | 1700 | ohci->bus_time += 0x40; |
1700 | 1701 | ||
1701 | return ohci->bus_time | cycle_time_seconds; | 1702 | return ohci->bus_time | cycle_time_seconds; |
1702 | } | 1703 | } |
1703 | 1704 | ||
1704 | static void bus_reset_tasklet(unsigned long data) | 1705 | static void bus_reset_tasklet(unsigned long data) |
1705 | { | 1706 | { |
1706 | struct fw_ohci *ohci = (struct fw_ohci *)data; | 1707 | struct fw_ohci *ohci = (struct fw_ohci *)data; |
1707 | int self_id_count, i, j, reg; | 1708 | int self_id_count, i, j, reg; |
1708 | int generation, new_generation; | 1709 | int generation, new_generation; |
1709 | unsigned long flags; | 1710 | unsigned long flags; |
1710 | void *free_rom = NULL; | 1711 | void *free_rom = NULL; |
1711 | dma_addr_t free_rom_bus = 0; | 1712 | dma_addr_t free_rom_bus = 0; |
1712 | bool is_new_root; | 1713 | bool is_new_root; |
1713 | 1714 | ||
1714 | reg = reg_read(ohci, OHCI1394_NodeID); | 1715 | reg = reg_read(ohci, OHCI1394_NodeID); |
1715 | if (!(reg & OHCI1394_NodeID_idValid)) { | 1716 | if (!(reg & OHCI1394_NodeID_idValid)) { |
1716 | fw_notify("node ID not valid, new bus reset in progress\n"); | 1717 | fw_notify("node ID not valid, new bus reset in progress\n"); |
1717 | return; | 1718 | return; |
1718 | } | 1719 | } |
1719 | if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { | 1720 | if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { |
1720 | fw_notify("malconfigured bus\n"); | 1721 | fw_notify("malconfigured bus\n"); |
1721 | return; | 1722 | return; |
1722 | } | 1723 | } |
1723 | ohci->node_id = reg & (OHCI1394_NodeID_busNumber | | 1724 | ohci->node_id = reg & (OHCI1394_NodeID_busNumber | |
1724 | OHCI1394_NodeID_nodeNumber); | 1725 | OHCI1394_NodeID_nodeNumber); |
1725 | 1726 | ||
1726 | is_new_root = (reg & OHCI1394_NodeID_root) != 0; | 1727 | is_new_root = (reg & OHCI1394_NodeID_root) != 0; |
1727 | if (!(ohci->is_root && is_new_root)) | 1728 | if (!(ohci->is_root && is_new_root)) |
1728 | reg_write(ohci, OHCI1394_LinkControlSet, | 1729 | reg_write(ohci, OHCI1394_LinkControlSet, |
1729 | OHCI1394_LinkControl_cycleMaster); | 1730 | OHCI1394_LinkControl_cycleMaster); |
1730 | ohci->is_root = is_new_root; | 1731 | ohci->is_root = is_new_root; |
1731 | 1732 | ||
1732 | reg = reg_read(ohci, OHCI1394_SelfIDCount); | 1733 | reg = reg_read(ohci, OHCI1394_SelfIDCount); |
1733 | if (reg & OHCI1394_SelfIDCount_selfIDError) { | 1734 | if (reg & OHCI1394_SelfIDCount_selfIDError) { |
1734 | fw_notify("inconsistent self IDs\n"); | 1735 | fw_notify("inconsistent self IDs\n"); |
1735 | return; | 1736 | return; |
1736 | } | 1737 | } |
1737 | /* | 1738 | /* |
1738 | * The count in the SelfIDCount register is the number of | 1739 | * The count in the SelfIDCount register is the number of |
1739 | * bytes in the self ID receive buffer. Since we also receive | 1740 | * bytes in the self ID receive buffer. Since we also receive |
1740 | * the inverted quadlets and a header quadlet, we shift one | 1741 | * the inverted quadlets and a header quadlet, we shift one |
1741 | * bit extra to get the actual number of self IDs. | 1742 | * bit extra to get the actual number of self IDs. |
1742 | */ | 1743 | */ |
1743 | self_id_count = (reg >> 3) & 0xff; | 1744 | self_id_count = (reg >> 3) & 0xff; |
1744 | if (self_id_count == 0 || self_id_count > 252) { | 1745 | if (self_id_count == 0 || self_id_count > 252) { |
1745 | fw_notify("inconsistent self IDs\n"); | 1746 | fw_notify("inconsistent self IDs\n"); |
1746 | return; | 1747 | return; |
1747 | } | 1748 | } |
1748 | generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; | 1749 | generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; |
1749 | rmb(); | 1750 | rmb(); |
1750 | 1751 | ||
1751 | for (i = 1, j = 0; j < self_id_count; i += 2, j++) { | 1752 | for (i = 1, j = 0; j < self_id_count; i += 2, j++) { |
1752 | if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) { | 1753 | if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) { |
1753 | fw_notify("inconsistent self IDs\n"); | 1754 | fw_notify("inconsistent self IDs\n"); |
1754 | return; | 1755 | return; |
1755 | } | 1756 | } |
1756 | ohci->self_id_buffer[j] = | 1757 | ohci->self_id_buffer[j] = |
1757 | cond_le32_to_cpu(ohci->self_id_cpu[i]); | 1758 | cond_le32_to_cpu(ohci->self_id_cpu[i]); |
1758 | } | 1759 | } |
1759 | rmb(); | 1760 | rmb(); |
1760 | 1761 | ||
1761 | /* | 1762 | /* |
1762 | * Check the consistency of the self IDs we just read. The | 1763 | * Check the consistency of the self IDs we just read. The |
1763 | * problem we face is that a new bus reset can start while we | 1764 | * problem we face is that a new bus reset can start while we |
1764 | * read out the self IDs from the DMA buffer. If this happens, | 1765 | * read out the self IDs from the DMA buffer. If this happens, |
1765 | * the DMA buffer will be overwritten with new self IDs and we | 1766 | * the DMA buffer will be overwritten with new self IDs and we |
1766 | * will read out inconsistent data. The OHCI specification | 1767 | * will read out inconsistent data. The OHCI specification |
1767 | * (section 11.2) recommends a technique similar to | 1768 | * (section 11.2) recommends a technique similar to |
1768 | * linux/seqlock.h, where we remember the generation of the | 1769 | * linux/seqlock.h, where we remember the generation of the |
1769 | * self IDs in the buffer before reading them out and compare | 1770 | * self IDs in the buffer before reading them out and compare |
1770 | * it to the current generation after reading them out. If | 1771 | * it to the current generation after reading them out. If |
1771 | * the two generations match we know we have a consistent set | 1772 | * the two generations match we know we have a consistent set |
1772 | * of self IDs. | 1773 | * of self IDs. |
1773 | */ | 1774 | */ |
1774 | 1775 | ||
1775 | new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; | 1776 | new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; |
1776 | if (new_generation != generation) { | 1777 | if (new_generation != generation) { |
1777 | fw_notify("recursive bus reset detected, " | 1778 | fw_notify("recursive bus reset detected, " |
1778 | "discarding self ids\n"); | 1779 | "discarding self ids\n"); |
1779 | return; | 1780 | return; |
1780 | } | 1781 | } |
1781 | 1782 | ||
1782 | /* FIXME: Document how the locking works. */ | 1783 | /* FIXME: Document how the locking works. */ |
1783 | spin_lock_irqsave(&ohci->lock, flags); | 1784 | spin_lock_irqsave(&ohci->lock, flags); |
1784 | 1785 | ||
1785 | ohci->generation = -1; /* prevent AT packet queueing */ | 1786 | ohci->generation = -1; /* prevent AT packet queueing */ |
1786 | context_stop(&ohci->at_request_ctx); | 1787 | context_stop(&ohci->at_request_ctx); |
1787 | context_stop(&ohci->at_response_ctx); | 1788 | context_stop(&ohci->at_response_ctx); |
1788 | 1789 | ||
1789 | spin_unlock_irqrestore(&ohci->lock, flags); | 1790 | spin_unlock_irqrestore(&ohci->lock, flags); |
1790 | 1791 | ||
1791 | /* | 1792 | /* |
1792 | * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent | 1793 | * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent |
1793 | * packets in the AT queues and software needs to drain them. | 1794 | * packets in the AT queues and software needs to drain them. |
1794 | * Some OHCI 1.1 controllers (JMicron) apparently require this too. | 1795 | * Some OHCI 1.1 controllers (JMicron) apparently require this too. |
1795 | */ | 1796 | */ |
1796 | at_context_flush(&ohci->at_request_ctx); | 1797 | at_context_flush(&ohci->at_request_ctx); |
1797 | at_context_flush(&ohci->at_response_ctx); | 1798 | at_context_flush(&ohci->at_response_ctx); |
1798 | 1799 | ||
1799 | spin_lock_irqsave(&ohci->lock, flags); | 1800 | spin_lock_irqsave(&ohci->lock, flags); |
1800 | 1801 | ||
1801 | ohci->generation = generation; | 1802 | ohci->generation = generation; |
1802 | reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); | 1803 | reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); |
1803 | 1804 | ||
1804 | if (ohci->quirks & QUIRK_RESET_PACKET) | 1805 | if (ohci->quirks & QUIRK_RESET_PACKET) |
1805 | ohci->request_generation = generation; | 1806 | ohci->request_generation = generation; |
1806 | 1807 | ||
1807 | /* | 1808 | /* |
1808 | * This next bit is unrelated to the AT context stuff but we | 1809 | * This next bit is unrelated to the AT context stuff but we |
1809 | * have to do it under the spinlock also. If a new config rom | 1810 | * have to do it under the spinlock also. If a new config rom |
1810 | * was set up before this reset, the old one is now no longer | 1811 | * was set up before this reset, the old one is now no longer |
1811 | * in use and we can free it. Update the config rom pointers | 1812 | * in use and we can free it. Update the config rom pointers |
1812 | * to point to the current config rom and clear the | 1813 | * to point to the current config rom and clear the |
1813 | * next_config_rom pointer so a new update can take place. | 1814 | * next_config_rom pointer so a new update can take place. |
1814 | */ | 1815 | */ |
1815 | 1816 | ||
1816 | if (ohci->next_config_rom != NULL) { | 1817 | if (ohci->next_config_rom != NULL) { |
1817 | if (ohci->next_config_rom != ohci->config_rom) { | 1818 | if (ohci->next_config_rom != ohci->config_rom) { |
1818 | free_rom = ohci->config_rom; | 1819 | free_rom = ohci->config_rom; |
1819 | free_rom_bus = ohci->config_rom_bus; | 1820 | free_rom_bus = ohci->config_rom_bus; |
1820 | } | 1821 | } |
1821 | ohci->config_rom = ohci->next_config_rom; | 1822 | ohci->config_rom = ohci->next_config_rom; |
1822 | ohci->config_rom_bus = ohci->next_config_rom_bus; | 1823 | ohci->config_rom_bus = ohci->next_config_rom_bus; |
1823 | ohci->next_config_rom = NULL; | 1824 | ohci->next_config_rom = NULL; |
1824 | 1825 | ||
1825 | /* | 1826 | /* |
1826 | * Restore config_rom image and manually update | 1827 | * Restore config_rom image and manually update |
1827 | * config_rom registers. Writing the header quadlet | 1828 | * config_rom registers. Writing the header quadlet |
1828 | * will indicate that the config rom is ready, so we | 1829 | * will indicate that the config rom is ready, so we |
1829 | * do that last. | 1830 | * do that last. |
1830 | */ | 1831 | */ |
1831 | reg_write(ohci, OHCI1394_BusOptions, | 1832 | reg_write(ohci, OHCI1394_BusOptions, |
1832 | be32_to_cpu(ohci->config_rom[2])); | 1833 | be32_to_cpu(ohci->config_rom[2])); |
1833 | ohci->config_rom[0] = ohci->next_header; | 1834 | ohci->config_rom[0] = ohci->next_header; |
1834 | reg_write(ohci, OHCI1394_ConfigROMhdr, | 1835 | reg_write(ohci, OHCI1394_ConfigROMhdr, |
1835 | be32_to_cpu(ohci->next_header)); | 1836 | be32_to_cpu(ohci->next_header)); |
1836 | } | 1837 | } |
1837 | 1838 | ||
1838 | #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA | 1839 | #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA |
1839 | reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); | 1840 | reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); |
1840 | reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); | 1841 | reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); |
1841 | #endif | 1842 | #endif |
1842 | 1843 | ||
1843 | spin_unlock_irqrestore(&ohci->lock, flags); | 1844 | spin_unlock_irqrestore(&ohci->lock, flags); |
1844 | 1845 | ||
1845 | if (free_rom) | 1846 | if (free_rom) |
1846 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 1847 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1847 | free_rom, free_rom_bus); | 1848 | free_rom, free_rom_bus); |
1848 | 1849 | ||
1849 | log_selfids(ohci->node_id, generation, | 1850 | log_selfids(ohci->node_id, generation, |
1850 | self_id_count, ohci->self_id_buffer); | 1851 | self_id_count, ohci->self_id_buffer); |
1851 | 1852 | ||
1852 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, | 1853 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, |
1853 | self_id_count, ohci->self_id_buffer, | 1854 | self_id_count, ohci->self_id_buffer, |
1854 | ohci->csr_state_setclear_abdicate); | 1855 | ohci->csr_state_setclear_abdicate); |
1855 | ohci->csr_state_setclear_abdicate = false; | 1856 | ohci->csr_state_setclear_abdicate = false; |
1856 | } | 1857 | } |
1857 | 1858 | ||
1858 | static irqreturn_t irq_handler(int irq, void *data) | 1859 | static irqreturn_t irq_handler(int irq, void *data) |
1859 | { | 1860 | { |
1860 | struct fw_ohci *ohci = data; | 1861 | struct fw_ohci *ohci = data; |
1861 | u32 event, iso_event; | 1862 | u32 event, iso_event; |
1862 | int i; | 1863 | int i; |
1863 | 1864 | ||
1864 | event = reg_read(ohci, OHCI1394_IntEventClear); | 1865 | event = reg_read(ohci, OHCI1394_IntEventClear); |
1865 | 1866 | ||
1866 | if (!event || !~event) | 1867 | if (!event || !~event) |
1867 | return IRQ_NONE; | 1868 | return IRQ_NONE; |
1868 | 1869 | ||
1869 | /* | 1870 | /* |
1870 | * busReset and postedWriteErr must not be cleared yet | 1871 | * busReset and postedWriteErr must not be cleared yet |
1871 | * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) | 1872 | * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) |
1872 | */ | 1873 | */ |
1873 | reg_write(ohci, OHCI1394_IntEventClear, | 1874 | reg_write(ohci, OHCI1394_IntEventClear, |
1874 | event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); | 1875 | event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); |
1875 | log_irqs(event); | 1876 | log_irqs(event); |
1876 | 1877 | ||
1877 | if (event & OHCI1394_selfIDComplete) | 1878 | if (event & OHCI1394_selfIDComplete) |
1878 | tasklet_schedule(&ohci->bus_reset_tasklet); | 1879 | tasklet_schedule(&ohci->bus_reset_tasklet); |
1879 | 1880 | ||
1880 | if (event & OHCI1394_RQPkt) | 1881 | if (event & OHCI1394_RQPkt) |
1881 | tasklet_schedule(&ohci->ar_request_ctx.tasklet); | 1882 | tasklet_schedule(&ohci->ar_request_ctx.tasklet); |
1882 | 1883 | ||
1883 | if (event & OHCI1394_RSPkt) | 1884 | if (event & OHCI1394_RSPkt) |
1884 | tasklet_schedule(&ohci->ar_response_ctx.tasklet); | 1885 | tasklet_schedule(&ohci->ar_response_ctx.tasklet); |
1885 | 1886 | ||
1886 | if (event & OHCI1394_reqTxComplete) | 1887 | if (event & OHCI1394_reqTxComplete) |
1887 | tasklet_schedule(&ohci->at_request_ctx.tasklet); | 1888 | tasklet_schedule(&ohci->at_request_ctx.tasklet); |
1888 | 1889 | ||
1889 | if (event & OHCI1394_respTxComplete) | 1890 | if (event & OHCI1394_respTxComplete) |
1890 | tasklet_schedule(&ohci->at_response_ctx.tasklet); | 1891 | tasklet_schedule(&ohci->at_response_ctx.tasklet); |
1891 | 1892 | ||
1892 | if (event & OHCI1394_isochRx) { | 1893 | if (event & OHCI1394_isochRx) { |
1893 | iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); | 1894 | iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); |
1894 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); | 1895 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); |
1895 | 1896 | ||
1896 | while (iso_event) { | 1897 | while (iso_event) { |
1897 | i = ffs(iso_event) - 1; | 1898 | i = ffs(iso_event) - 1; |
1898 | tasklet_schedule( | 1899 | tasklet_schedule( |
1899 | &ohci->ir_context_list[i].context.tasklet); | 1900 | &ohci->ir_context_list[i].context.tasklet); |
1900 | iso_event &= ~(1 << i); | 1901 | iso_event &= ~(1 << i); |
1901 | } | 1902 | } |
1902 | } | 1903 | } |
1903 | 1904 | ||
1904 | if (event & OHCI1394_isochTx) { | 1905 | if (event & OHCI1394_isochTx) { |
1905 | iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); | 1906 | iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); |
1906 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); | 1907 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); |
1907 | 1908 | ||
1908 | while (iso_event) { | 1909 | while (iso_event) { |
1909 | i = ffs(iso_event) - 1; | 1910 | i = ffs(iso_event) - 1; |
1910 | tasklet_schedule( | 1911 | tasklet_schedule( |
1911 | &ohci->it_context_list[i].context.tasklet); | 1912 | &ohci->it_context_list[i].context.tasklet); |
1912 | iso_event &= ~(1 << i); | 1913 | iso_event &= ~(1 << i); |
1913 | } | 1914 | } |
1914 | } | 1915 | } |
1915 | 1916 | ||
1916 | if (unlikely(event & OHCI1394_regAccessFail)) | 1917 | if (unlikely(event & OHCI1394_regAccessFail)) |
1917 | fw_error("Register access failure - " | 1918 | fw_error("Register access failure - " |
1918 | "please notify linux1394-devel@lists.sf.net\n"); | 1919 | "please notify linux1394-devel@lists.sf.net\n"); |
1919 | 1920 | ||
1920 | if (unlikely(event & OHCI1394_postedWriteErr)) { | 1921 | if (unlikely(event & OHCI1394_postedWriteErr)) { |
1921 | reg_read(ohci, OHCI1394_PostedWriteAddressHi); | 1922 | reg_read(ohci, OHCI1394_PostedWriteAddressHi); |
1922 | reg_read(ohci, OHCI1394_PostedWriteAddressLo); | 1923 | reg_read(ohci, OHCI1394_PostedWriteAddressLo); |
1923 | reg_write(ohci, OHCI1394_IntEventClear, | 1924 | reg_write(ohci, OHCI1394_IntEventClear, |
1924 | OHCI1394_postedWriteErr); | 1925 | OHCI1394_postedWriteErr); |
1925 | fw_error("PCI posted write error\n"); | 1926 | fw_error("PCI posted write error\n"); |
1926 | } | 1927 | } |
1927 | 1928 | ||
1928 | if (unlikely(event & OHCI1394_cycleTooLong)) { | 1929 | if (unlikely(event & OHCI1394_cycleTooLong)) { |
1929 | if (printk_ratelimit()) | 1930 | if (printk_ratelimit()) |
1930 | fw_notify("isochronous cycle too long\n"); | 1931 | fw_notify("isochronous cycle too long\n"); |
1931 | reg_write(ohci, OHCI1394_LinkControlSet, | 1932 | reg_write(ohci, OHCI1394_LinkControlSet, |
1932 | OHCI1394_LinkControl_cycleMaster); | 1933 | OHCI1394_LinkControl_cycleMaster); |
1933 | } | 1934 | } |
1934 | 1935 | ||
1935 | if (unlikely(event & OHCI1394_cycleInconsistent)) { | 1936 | if (unlikely(event & OHCI1394_cycleInconsistent)) { |
1936 | /* | 1937 | /* |
1937 | * We need to clear this event bit in order to make | 1938 | * We need to clear this event bit in order to make |
1938 | * cycleMatch isochronous I/O work. In theory we should | 1939 | * cycleMatch isochronous I/O work. In theory we should |
1939 | * stop active cycleMatch iso contexts now and restart | 1940 | * stop active cycleMatch iso contexts now and restart |
1940 | * them at least two cycles later. (FIXME?) | 1941 | * them at least two cycles later. (FIXME?) |
1941 | */ | 1942 | */ |
1942 | if (printk_ratelimit()) | 1943 | if (printk_ratelimit()) |
1943 | fw_notify("isochronous cycle inconsistent\n"); | 1944 | fw_notify("isochronous cycle inconsistent\n"); |
1944 | } | 1945 | } |
1945 | 1946 | ||
1946 | if (unlikely(event & OHCI1394_unrecoverableError)) | 1947 | if (unlikely(event & OHCI1394_unrecoverableError)) |
1947 | handle_dead_contexts(ohci); | 1948 | handle_dead_contexts(ohci); |
1948 | 1949 | ||
1949 | if (event & OHCI1394_cycle64Seconds) { | 1950 | if (event & OHCI1394_cycle64Seconds) { |
1950 | spin_lock(&ohci->lock); | 1951 | spin_lock(&ohci->lock); |
1951 | update_bus_time(ohci); | 1952 | update_bus_time(ohci); |
1952 | spin_unlock(&ohci->lock); | 1953 | spin_unlock(&ohci->lock); |
1953 | } else | 1954 | } else |
1954 | flush_writes(ohci); | 1955 | flush_writes(ohci); |
1955 | 1956 | ||
1956 | return IRQ_HANDLED; | 1957 | return IRQ_HANDLED; |
1957 | } | 1958 | } |
1958 | 1959 | ||
1959 | static int software_reset(struct fw_ohci *ohci) | 1960 | static int software_reset(struct fw_ohci *ohci) |
1960 | { | 1961 | { |
1961 | int i; | 1962 | int i; |
1962 | 1963 | ||
1963 | reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); | 1964 | reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); |
1964 | 1965 | ||
1965 | for (i = 0; i < OHCI_LOOP_COUNT; i++) { | 1966 | for (i = 0; i < OHCI_LOOP_COUNT; i++) { |
1966 | if ((reg_read(ohci, OHCI1394_HCControlSet) & | 1967 | if ((reg_read(ohci, OHCI1394_HCControlSet) & |
1967 | OHCI1394_HCControl_softReset) == 0) | 1968 | OHCI1394_HCControl_softReset) == 0) |
1968 | return 0; | 1969 | return 0; |
1969 | msleep(1); | 1970 | msleep(1); |
1970 | } | 1971 | } |
1971 | 1972 | ||
1972 | return -EBUSY; | 1973 | return -EBUSY; |
1973 | } | 1974 | } |
1974 | 1975 | ||
1975 | static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) | 1976 | static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) |
1976 | { | 1977 | { |
1977 | size_t size = length * 4; | 1978 | size_t size = length * 4; |
1978 | 1979 | ||
1979 | memcpy(dest, src, size); | 1980 | memcpy(dest, src, size); |
1980 | if (size < CONFIG_ROM_SIZE) | 1981 | if (size < CONFIG_ROM_SIZE) |
1981 | memset(&dest[length], 0, CONFIG_ROM_SIZE - size); | 1982 | memset(&dest[length], 0, CONFIG_ROM_SIZE - size); |
1982 | } | 1983 | } |
1983 | 1984 | ||
1984 | static int configure_1394a_enhancements(struct fw_ohci *ohci) | 1985 | static int configure_1394a_enhancements(struct fw_ohci *ohci) |
1985 | { | 1986 | { |
1986 | bool enable_1394a; | 1987 | bool enable_1394a; |
1987 | int ret, clear, set, offset; | 1988 | int ret, clear, set, offset; |
1988 | 1989 | ||
1989 | /* Check if the driver should configure link and PHY. */ | 1990 | /* Check if the driver should configure link and PHY. */ |
1990 | if (!(reg_read(ohci, OHCI1394_HCControlSet) & | 1991 | if (!(reg_read(ohci, OHCI1394_HCControlSet) & |
1991 | OHCI1394_HCControl_programPhyEnable)) | 1992 | OHCI1394_HCControl_programPhyEnable)) |
1992 | return 0; | 1993 | return 0; |
1993 | 1994 | ||
1994 | /* Paranoia: check whether the PHY supports 1394a, too. */ | 1995 | /* Paranoia: check whether the PHY supports 1394a, too. */ |
1995 | enable_1394a = false; | 1996 | enable_1394a = false; |
1996 | ret = read_phy_reg(ohci, 2); | 1997 | ret = read_phy_reg(ohci, 2); |
1997 | if (ret < 0) | 1998 | if (ret < 0) |
1998 | return ret; | 1999 | return ret; |
1999 | if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) { | 2000 | if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) { |
2000 | ret = read_paged_phy_reg(ohci, 1, 8); | 2001 | ret = read_paged_phy_reg(ohci, 1, 8); |
2001 | if (ret < 0) | 2002 | if (ret < 0) |
2002 | return ret; | 2003 | return ret; |
2003 | if (ret >= 1) | 2004 | if (ret >= 1) |
2004 | enable_1394a = true; | 2005 | enable_1394a = true; |
2005 | } | 2006 | } |
2006 | 2007 | ||
2007 | if (ohci->quirks & QUIRK_NO_1394A) | 2008 | if (ohci->quirks & QUIRK_NO_1394A) |
2008 | enable_1394a = false; | 2009 | enable_1394a = false; |
2009 | 2010 | ||
2010 | /* Configure PHY and link consistently. */ | 2011 | /* Configure PHY and link consistently. */ |
2011 | if (enable_1394a) { | 2012 | if (enable_1394a) { |
2012 | clear = 0; | 2013 | clear = 0; |
2013 | set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; | 2014 | set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; |
2014 | } else { | 2015 | } else { |
2015 | clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; | 2016 | clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; |
2016 | set = 0; | 2017 | set = 0; |
2017 | } | 2018 | } |
2018 | ret = update_phy_reg(ohci, 5, clear, set); | 2019 | ret = update_phy_reg(ohci, 5, clear, set); |
2019 | if (ret < 0) | 2020 | if (ret < 0) |
2020 | return ret; | 2021 | return ret; |
2021 | 2022 | ||
2022 | if (enable_1394a) | 2023 | if (enable_1394a) |
2023 | offset = OHCI1394_HCControlSet; | 2024 | offset = OHCI1394_HCControlSet; |
2024 | else | 2025 | else |
2025 | offset = OHCI1394_HCControlClear; | 2026 | offset = OHCI1394_HCControlClear; |
2026 | reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable); | 2027 | reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable); |
2027 | 2028 | ||
2028 | /* Clean up: configuration has been taken care of. */ | 2029 | /* Clean up: configuration has been taken care of. */ |
2029 | reg_write(ohci, OHCI1394_HCControlClear, | 2030 | reg_write(ohci, OHCI1394_HCControlClear, |
2030 | OHCI1394_HCControl_programPhyEnable); | 2031 | OHCI1394_HCControl_programPhyEnable); |
2031 | 2032 | ||
2032 | return 0; | 2033 | return 0; |
2033 | } | 2034 | } |
2034 | 2035 | ||
2035 | static int ohci_enable(struct fw_card *card, | 2036 | static int ohci_enable(struct fw_card *card, |
2036 | const __be32 *config_rom, size_t length) | 2037 | const __be32 *config_rom, size_t length) |
2037 | { | 2038 | { |
2038 | struct fw_ohci *ohci = fw_ohci(card); | 2039 | struct fw_ohci *ohci = fw_ohci(card); |
2039 | struct pci_dev *dev = to_pci_dev(card->device); | 2040 | struct pci_dev *dev = to_pci_dev(card->device); |
2040 | u32 lps, seconds, version, irqs; | 2041 | u32 lps, seconds, version, irqs; |
2041 | int i, ret; | 2042 | int i, ret; |
2042 | 2043 | ||
2043 | if (software_reset(ohci)) { | 2044 | if (software_reset(ohci)) { |
2044 | fw_error("Failed to reset ohci card.\n"); | 2045 | fw_error("Failed to reset ohci card.\n"); |
2045 | return -EBUSY; | 2046 | return -EBUSY; |
2046 | } | 2047 | } |
2047 | 2048 | ||
2048 | /* | 2049 | /* |
2049 | * Now enable LPS, which we need in order to start accessing | 2050 | * Now enable LPS, which we need in order to start accessing |
2050 | * most of the registers. In fact, on some cards (ALI M5251), | 2051 | * most of the registers. In fact, on some cards (ALI M5251), |
2051 | * accessing registers in the SClk domain without LPS enabled | 2052 | * accessing registers in the SClk domain without LPS enabled |
2052 | * will lock up the machine. Wait 50msec to make sure we have | 2053 | * will lock up the machine. Wait 50msec to make sure we have |
2053 | * full link enabled. However, with some cards (well, at least | 2054 | * full link enabled. However, with some cards (well, at least |
2054 | * a JMicron PCIe card), we have to try again sometimes. | 2055 | * a JMicron PCIe card), we have to try again sometimes. |
2055 | */ | 2056 | */ |
2056 | reg_write(ohci, OHCI1394_HCControlSet, | 2057 | reg_write(ohci, OHCI1394_HCControlSet, |
2057 | OHCI1394_HCControl_LPS | | 2058 | OHCI1394_HCControl_LPS | |
2058 | OHCI1394_HCControl_postedWriteEnable); | 2059 | OHCI1394_HCControl_postedWriteEnable); |
2059 | flush_writes(ohci); | 2060 | flush_writes(ohci); |
2060 | 2061 | ||
2061 | for (lps = 0, i = 0; !lps && i < 3; i++) { | 2062 | for (lps = 0, i = 0; !lps && i < 3; i++) { |
2062 | msleep(50); | 2063 | msleep(50); |
2063 | lps = reg_read(ohci, OHCI1394_HCControlSet) & | 2064 | lps = reg_read(ohci, OHCI1394_HCControlSet) & |
2064 | OHCI1394_HCControl_LPS; | 2065 | OHCI1394_HCControl_LPS; |
2065 | } | 2066 | } |
2066 | 2067 | ||
2067 | if (!lps) { | 2068 | if (!lps) { |
2068 | fw_error("Failed to set Link Power Status\n"); | 2069 | fw_error("Failed to set Link Power Status\n"); |
2069 | return -EIO; | 2070 | return -EIO; |
2070 | } | 2071 | } |
2071 | 2072 | ||
2072 | reg_write(ohci, OHCI1394_HCControlClear, | 2073 | reg_write(ohci, OHCI1394_HCControlClear, |
2073 | OHCI1394_HCControl_noByteSwapData); | 2074 | OHCI1394_HCControl_noByteSwapData); |
2074 | 2075 | ||
2075 | reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); | 2076 | reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); |
2076 | reg_write(ohci, OHCI1394_LinkControlSet, | 2077 | reg_write(ohci, OHCI1394_LinkControlSet, |
2077 | OHCI1394_LinkControl_cycleTimerEnable | | 2078 | OHCI1394_LinkControl_cycleTimerEnable | |
2078 | OHCI1394_LinkControl_cycleMaster); | 2079 | OHCI1394_LinkControl_cycleMaster); |
2079 | 2080 | ||
2080 | reg_write(ohci, OHCI1394_ATRetries, | 2081 | reg_write(ohci, OHCI1394_ATRetries, |
2081 | OHCI1394_MAX_AT_REQ_RETRIES | | 2082 | OHCI1394_MAX_AT_REQ_RETRIES | |
2082 | (OHCI1394_MAX_AT_RESP_RETRIES << 4) | | 2083 | (OHCI1394_MAX_AT_RESP_RETRIES << 4) | |
2083 | (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | | 2084 | (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | |
2084 | (200 << 16)); | 2085 | (200 << 16)); |
2085 | 2086 | ||
2086 | seconds = lower_32_bits(get_seconds()); | 2087 | seconds = lower_32_bits(get_seconds()); |
2087 | reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); | 2088 | reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); |
2088 | ohci->bus_time = seconds & ~0x3f; | 2089 | ohci->bus_time = seconds & ~0x3f; |
2089 | 2090 | ||
2090 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; | 2091 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; |
2091 | if (version >= OHCI_VERSION_1_1) { | 2092 | if (version >= OHCI_VERSION_1_1) { |
2092 | reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, | 2093 | reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, |
2093 | 0xfffffffe); | 2094 | 0xfffffffe); |
2094 | card->broadcast_channel_auto_allocated = true; | 2095 | card->broadcast_channel_auto_allocated = true; |
2095 | } | 2096 | } |
2096 | 2097 | ||
2097 | /* Get implemented bits of the priority arbitration request counter. */ | 2098 | /* Get implemented bits of the priority arbitration request counter. */ |
2098 | reg_write(ohci, OHCI1394_FairnessControl, 0x3f); | 2099 | reg_write(ohci, OHCI1394_FairnessControl, 0x3f); |
2099 | ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; | 2100 | ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; |
2100 | reg_write(ohci, OHCI1394_FairnessControl, 0); | 2101 | reg_write(ohci, OHCI1394_FairnessControl, 0); |
2101 | card->priority_budget_implemented = ohci->pri_req_max != 0; | 2102 | card->priority_budget_implemented = ohci->pri_req_max != 0; |
2102 | 2103 | ||
2103 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); | 2104 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); |
2104 | reg_write(ohci, OHCI1394_IntEventClear, ~0); | 2105 | reg_write(ohci, OHCI1394_IntEventClear, ~0); |
2105 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); | 2106 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); |
2106 | 2107 | ||
2107 | ret = configure_1394a_enhancements(ohci); | 2108 | ret = configure_1394a_enhancements(ohci); |
2108 | if (ret < 0) | 2109 | if (ret < 0) |
2109 | return ret; | 2110 | return ret; |
2110 | 2111 | ||
2111 | /* Activate link_on bit and contender bit in our self ID packets.*/ | 2112 | /* Activate link_on bit and contender bit in our self ID packets.*/ |
2112 | ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER); | 2113 | ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER); |
2113 | if (ret < 0) | 2114 | if (ret < 0) |
2114 | return ret; | 2115 | return ret; |
2115 | 2116 | ||
2116 | /* | 2117 | /* |
2117 | * When the link is not yet enabled, the atomic config rom | 2118 | * When the link is not yet enabled, the atomic config rom |
2118 | * update mechanism described below in ohci_set_config_rom() | 2119 | * update mechanism described below in ohci_set_config_rom() |
2119 | * is not active. We have to update ConfigRomHeader and | 2120 | * is not active. We have to update ConfigRomHeader and |
2120 | * BusOptions manually, and the write to ConfigROMmap takes | 2121 | * BusOptions manually, and the write to ConfigROMmap takes |
2121 | * effect immediately. We tie this to the enabling of the | 2122 | * effect immediately. We tie this to the enabling of the |
2122 | * link, so we have a valid config rom before enabling - the | 2123 | * link, so we have a valid config rom before enabling - the |
2123 | * OHCI requires that ConfigROMhdr and BusOptions have valid | 2124 | * OHCI requires that ConfigROMhdr and BusOptions have valid |
2124 | * values before enabling. | 2125 | * values before enabling. |
2125 | * | 2126 | * |
2126 | * However, when the ConfigROMmap is written, some controllers | 2127 | * However, when the ConfigROMmap is written, some controllers |
2127 | * always read back quadlets 0 and 2 from the config rom to | 2128 | * always read back quadlets 0 and 2 from the config rom to |
2128 | * the ConfigRomHeader and BusOptions registers on bus reset. | 2129 | * the ConfigRomHeader and BusOptions registers on bus reset. |
2129 | * They shouldn't do that in this initial case where the link | 2130 | * They shouldn't do that in this initial case where the link |
2130 | * isn't enabled. This means we have to use the same | 2131 | * isn't enabled. This means we have to use the same |
2131 | * workaround here, setting the bus header to 0 and then write | 2132 | * workaround here, setting the bus header to 0 and then write |
2132 | * the right values in the bus reset tasklet. | 2133 | * the right values in the bus reset tasklet. |
2133 | */ | 2134 | */ |
2134 | 2135 | ||
2135 | if (config_rom) { | 2136 | if (config_rom) { |
2136 | ohci->next_config_rom = | 2137 | ohci->next_config_rom = |
2137 | dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 2138 | dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
2138 | &ohci->next_config_rom_bus, | 2139 | &ohci->next_config_rom_bus, |
2139 | GFP_KERNEL); | 2140 | GFP_KERNEL); |
2140 | if (ohci->next_config_rom == NULL) | 2141 | if (ohci->next_config_rom == NULL) |
2141 | return -ENOMEM; | 2142 | return -ENOMEM; |
2142 | 2143 | ||
2143 | copy_config_rom(ohci->next_config_rom, config_rom, length); | 2144 | copy_config_rom(ohci->next_config_rom, config_rom, length); |
2144 | } else { | 2145 | } else { |
2145 | /* | 2146 | /* |
2146 | * In the suspend case, config_rom is NULL, which | 2147 | * In the suspend case, config_rom is NULL, which |
2147 | * means that we just reuse the old config rom. | 2148 | * means that we just reuse the old config rom. |
2148 | */ | 2149 | */ |
2149 | ohci->next_config_rom = ohci->config_rom; | 2150 | ohci->next_config_rom = ohci->config_rom; |
2150 | ohci->next_config_rom_bus = ohci->config_rom_bus; | 2151 | ohci->next_config_rom_bus = ohci->config_rom_bus; |
2151 | } | 2152 | } |
2152 | 2153 | ||
2153 | ohci->next_header = ohci->next_config_rom[0]; | 2154 | ohci->next_header = ohci->next_config_rom[0]; |
2154 | ohci->next_config_rom[0] = 0; | 2155 | ohci->next_config_rom[0] = 0; |
2155 | reg_write(ohci, OHCI1394_ConfigROMhdr, 0); | 2156 | reg_write(ohci, OHCI1394_ConfigROMhdr, 0); |
2156 | reg_write(ohci, OHCI1394_BusOptions, | 2157 | reg_write(ohci, OHCI1394_BusOptions, |
2157 | be32_to_cpu(ohci->next_config_rom[2])); | 2158 | be32_to_cpu(ohci->next_config_rom[2])); |
2158 | reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); | 2159 | reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); |
2159 | 2160 | ||
2160 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); | 2161 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); |
2161 | 2162 | ||
2162 | if (!(ohci->quirks & QUIRK_NO_MSI)) | 2163 | if (!(ohci->quirks & QUIRK_NO_MSI)) |
2163 | pci_enable_msi(dev); | 2164 | pci_enable_msi(dev); |
2164 | if (request_irq(dev->irq, irq_handler, | 2165 | if (request_irq(dev->irq, irq_handler, |
2165 | pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, | 2166 | pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, |
2166 | ohci_driver_name, ohci)) { | 2167 | ohci_driver_name, ohci)) { |
2167 | fw_error("Failed to allocate interrupt %d.\n", dev->irq); | 2168 | fw_error("Failed to allocate interrupt %d.\n", dev->irq); |
2168 | pci_disable_msi(dev); | 2169 | pci_disable_msi(dev); |
2169 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 2170 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
2170 | ohci->config_rom, ohci->config_rom_bus); | 2171 | ohci->config_rom, ohci->config_rom_bus); |
2171 | return -EIO; | 2172 | return -EIO; |
2172 | } | 2173 | } |
2173 | 2174 | ||
2174 | irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | | 2175 | irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | |
2175 | OHCI1394_RQPkt | OHCI1394_RSPkt | | 2176 | OHCI1394_RQPkt | OHCI1394_RSPkt | |
2176 | OHCI1394_isochTx | OHCI1394_isochRx | | 2177 | OHCI1394_isochTx | OHCI1394_isochRx | |
2177 | OHCI1394_postedWriteErr | | 2178 | OHCI1394_postedWriteErr | |
2178 | OHCI1394_selfIDComplete | | 2179 | OHCI1394_selfIDComplete | |
2179 | OHCI1394_regAccessFail | | 2180 | OHCI1394_regAccessFail | |
2180 | OHCI1394_cycle64Seconds | | 2181 | OHCI1394_cycle64Seconds | |
2181 | OHCI1394_cycleInconsistent | | 2182 | OHCI1394_cycleInconsistent | |
2182 | OHCI1394_unrecoverableError | | 2183 | OHCI1394_unrecoverableError | |
2183 | OHCI1394_cycleTooLong | | 2184 | OHCI1394_cycleTooLong | |
2184 | OHCI1394_masterIntEnable; | 2185 | OHCI1394_masterIntEnable; |
2185 | if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) | 2186 | if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) |
2186 | irqs |= OHCI1394_busReset; | 2187 | irqs |= OHCI1394_busReset; |
2187 | reg_write(ohci, OHCI1394_IntMaskSet, irqs); | 2188 | reg_write(ohci, OHCI1394_IntMaskSet, irqs); |
2188 | 2189 | ||
2189 | reg_write(ohci, OHCI1394_HCControlSet, | 2190 | reg_write(ohci, OHCI1394_HCControlSet, |
2190 | OHCI1394_HCControl_linkEnable | | 2191 | OHCI1394_HCControl_linkEnable | |
2191 | OHCI1394_HCControl_BIBimageValid); | 2192 | OHCI1394_HCControl_BIBimageValid); |
2192 | 2193 | ||
2193 | reg_write(ohci, OHCI1394_LinkControlSet, | 2194 | reg_write(ohci, OHCI1394_LinkControlSet, |
2194 | OHCI1394_LinkControl_rcvSelfID | | 2195 | OHCI1394_LinkControl_rcvSelfID | |
2195 | OHCI1394_LinkControl_rcvPhyPkt); | 2196 | OHCI1394_LinkControl_rcvPhyPkt); |
2196 | 2197 | ||
2197 | ar_context_run(&ohci->ar_request_ctx); | 2198 | ar_context_run(&ohci->ar_request_ctx); |
2198 | ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */ | 2199 | ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */ |
2199 | 2200 | ||
2200 | /* We are ready to go, reset bus to finish initialization. */ | 2201 | /* We are ready to go, reset bus to finish initialization. */ |
2201 | fw_schedule_bus_reset(&ohci->card, false, true); | 2202 | fw_schedule_bus_reset(&ohci->card, false, true); |
2202 | 2203 | ||
2203 | return 0; | 2204 | return 0; |
2204 | } | 2205 | } |
2205 | 2206 | ||
2206 | static int ohci_set_config_rom(struct fw_card *card, | 2207 | static int ohci_set_config_rom(struct fw_card *card, |
2207 | const __be32 *config_rom, size_t length) | 2208 | const __be32 *config_rom, size_t length) |
2208 | { | 2209 | { |
2209 | struct fw_ohci *ohci; | 2210 | struct fw_ohci *ohci; |
2210 | unsigned long flags; | 2211 | unsigned long flags; |
2211 | __be32 *next_config_rom; | 2212 | __be32 *next_config_rom; |
2212 | dma_addr_t uninitialized_var(next_config_rom_bus); | 2213 | dma_addr_t uninitialized_var(next_config_rom_bus); |
2213 | 2214 | ||
2214 | ohci = fw_ohci(card); | 2215 | ohci = fw_ohci(card); |
2215 | 2216 | ||
2216 | /* | 2217 | /* |
2217 | * When the OHCI controller is enabled, the config rom update | 2218 | * When the OHCI controller is enabled, the config rom update |
2218 | * mechanism is a bit tricky, but easy enough to use. See | 2219 | * mechanism is a bit tricky, but easy enough to use. See |
2219 | * section 5.5.6 in the OHCI specification. | 2220 | * section 5.5.6 in the OHCI specification. |
2220 | * | 2221 | * |
2221 | * The OHCI controller caches the new config rom address in a | 2222 | * The OHCI controller caches the new config rom address in a |
2222 | * shadow register (ConfigROMmapNext) and needs a bus reset | 2223 | * shadow register (ConfigROMmapNext) and needs a bus reset |
2223 | * for the changes to take place. When the bus reset is | 2224 | * for the changes to take place. When the bus reset is |
2224 | * detected, the controller loads the new values for the | 2225 | * detected, the controller loads the new values for the |
2225 | * ConfigRomHeader and BusOptions registers from the specified | 2226 | * ConfigRomHeader and BusOptions registers from the specified |
2226 | * config rom and loads ConfigROMmap from the ConfigROMmapNext | 2227 | * config rom and loads ConfigROMmap from the ConfigROMmapNext |
2227 | * shadow register. All automatically and atomically. | 2228 | * shadow register. All automatically and atomically. |
2228 | * | 2229 | * |
2229 | * Now, there's a twist to this story. The automatic load of | 2230 | * Now, there's a twist to this story. The automatic load of |
2230 | * ConfigRomHeader and BusOptions doesn't honor the | 2231 | * ConfigRomHeader and BusOptions doesn't honor the |
2231 | * noByteSwapData bit, so with a be32 config rom, the | 2232 | * noByteSwapData bit, so with a be32 config rom, the |
2232 | * controller will load be32 values in to these registers | 2233 | * controller will load be32 values in to these registers |
2233 | * during the atomic update, even on litte endian | 2234 | * during the atomic update, even on litte endian |
2234 | * architectures. The workaround we use is to put a 0 in the | 2235 | * architectures. The workaround we use is to put a 0 in the |
2235 | * header quadlet; 0 is endian agnostic and means that the | 2236 | * header quadlet; 0 is endian agnostic and means that the |
2236 | * config rom isn't ready yet. In the bus reset tasklet we | 2237 | * config rom isn't ready yet. In the bus reset tasklet we |
2237 | * then set up the real values for the two registers. | 2238 | * then set up the real values for the two registers. |
2238 | * | 2239 | * |
2239 | * We use ohci->lock to avoid racing with the code that sets | 2240 | * We use ohci->lock to avoid racing with the code that sets |
2240 | * ohci->next_config_rom to NULL (see bus_reset_tasklet). | 2241 | * ohci->next_config_rom to NULL (see bus_reset_tasklet). |
2241 | */ | 2242 | */ |
2242 | 2243 | ||
2243 | next_config_rom = | 2244 | next_config_rom = |
2244 | dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 2245 | dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
2245 | &next_config_rom_bus, GFP_KERNEL); | 2246 | &next_config_rom_bus, GFP_KERNEL); |
2246 | if (next_config_rom == NULL) | 2247 | if (next_config_rom == NULL) |
2247 | return -ENOMEM; | 2248 | return -ENOMEM; |
2248 | 2249 | ||
2249 | spin_lock_irqsave(&ohci->lock, flags); | 2250 | spin_lock_irqsave(&ohci->lock, flags); |
2250 | 2251 | ||
2251 | /* | 2252 | /* |
2252 | * If there is not an already pending config_rom update, | 2253 | * If there is not an already pending config_rom update, |
2253 | * push our new allocation into the ohci->next_config_rom | 2254 | * push our new allocation into the ohci->next_config_rom |
2254 | * and then mark the local variable as null so that we | 2255 | * and then mark the local variable as null so that we |
2255 | * won't deallocate the new buffer. | 2256 | * won't deallocate the new buffer. |
2256 | * | 2257 | * |
2257 | * OTOH, if there is a pending config_rom update, just | 2258 | * OTOH, if there is a pending config_rom update, just |
2258 | * use that buffer with the new config_rom data, and | 2259 | * use that buffer with the new config_rom data, and |
2259 | * let this routine free the unused DMA allocation. | 2260 | * let this routine free the unused DMA allocation. |
2260 | */ | 2261 | */ |
2261 | 2262 | ||
2262 | if (ohci->next_config_rom == NULL) { | 2263 | if (ohci->next_config_rom == NULL) { |
2263 | ohci->next_config_rom = next_config_rom; | 2264 | ohci->next_config_rom = next_config_rom; |
2264 | ohci->next_config_rom_bus = next_config_rom_bus; | 2265 | ohci->next_config_rom_bus = next_config_rom_bus; |
2265 | next_config_rom = NULL; | 2266 | next_config_rom = NULL; |
2266 | } | 2267 | } |
2267 | 2268 | ||
2268 | copy_config_rom(ohci->next_config_rom, config_rom, length); | 2269 | copy_config_rom(ohci->next_config_rom, config_rom, length); |
2269 | 2270 | ||
2270 | ohci->next_header = config_rom[0]; | 2271 | ohci->next_header = config_rom[0]; |
2271 | ohci->next_config_rom[0] = 0; | 2272 | ohci->next_config_rom[0] = 0; |
2272 | 2273 | ||
2273 | reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); | 2274 | reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); |
2274 | 2275 | ||
2275 | spin_unlock_irqrestore(&ohci->lock, flags); | 2276 | spin_unlock_irqrestore(&ohci->lock, flags); |
2276 | 2277 | ||
2277 | /* If we didn't use the DMA allocation, delete it. */ | 2278 | /* If we didn't use the DMA allocation, delete it. */ |
2278 | if (next_config_rom != NULL) | 2279 | if (next_config_rom != NULL) |
2279 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 2280 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
2280 | next_config_rom, next_config_rom_bus); | 2281 | next_config_rom, next_config_rom_bus); |
2281 | 2282 | ||
2282 | /* | 2283 | /* |
2283 | * Now initiate a bus reset to have the changes take | 2284 | * Now initiate a bus reset to have the changes take |
2284 | * effect. We clean up the old config rom memory and DMA | 2285 | * effect. We clean up the old config rom memory and DMA |
2285 | * mappings in the bus reset tasklet, since the OHCI | 2286 | * mappings in the bus reset tasklet, since the OHCI |
2286 | * controller could need to access it before the bus reset | 2287 | * controller could need to access it before the bus reset |
2287 | * takes effect. | 2288 | * takes effect. |
2288 | */ | 2289 | */ |
2289 | 2290 | ||
2290 | fw_schedule_bus_reset(&ohci->card, true, true); | 2291 | fw_schedule_bus_reset(&ohci->card, true, true); |
2291 | 2292 | ||
2292 | return 0; | 2293 | return 0; |
2293 | } | 2294 | } |
2294 | 2295 | ||
2295 | static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) | 2296 | static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) |
2296 | { | 2297 | { |
2297 | struct fw_ohci *ohci = fw_ohci(card); | 2298 | struct fw_ohci *ohci = fw_ohci(card); |
2298 | 2299 | ||
2299 | at_context_transmit(&ohci->at_request_ctx, packet); | 2300 | at_context_transmit(&ohci->at_request_ctx, packet); |
2300 | } | 2301 | } |
2301 | 2302 | ||
2302 | static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) | 2303 | static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) |
2303 | { | 2304 | { |
2304 | struct fw_ohci *ohci = fw_ohci(card); | 2305 | struct fw_ohci *ohci = fw_ohci(card); |
2305 | 2306 | ||
2306 | at_context_transmit(&ohci->at_response_ctx, packet); | 2307 | at_context_transmit(&ohci->at_response_ctx, packet); |
2307 | } | 2308 | } |
2308 | 2309 | ||
2309 | static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) | 2310 | static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) |
2310 | { | 2311 | { |
2311 | struct fw_ohci *ohci = fw_ohci(card); | 2312 | struct fw_ohci *ohci = fw_ohci(card); |
2312 | struct context *ctx = &ohci->at_request_ctx; | 2313 | struct context *ctx = &ohci->at_request_ctx; |
2313 | struct driver_data *driver_data = packet->driver_data; | 2314 | struct driver_data *driver_data = packet->driver_data; |
2314 | int ret = -ENOENT; | 2315 | int ret = -ENOENT; |
2315 | 2316 | ||
2316 | tasklet_disable(&ctx->tasklet); | 2317 | tasklet_disable(&ctx->tasklet); |
2317 | 2318 | ||
2318 | if (packet->ack != 0) | 2319 | if (packet->ack != 0) |
2319 | goto out; | 2320 | goto out; |
2320 | 2321 | ||
2321 | if (packet->payload_mapped) | 2322 | if (packet->payload_mapped) |
2322 | dma_unmap_single(ohci->card.device, packet->payload_bus, | 2323 | dma_unmap_single(ohci->card.device, packet->payload_bus, |
2323 | packet->payload_length, DMA_TO_DEVICE); | 2324 | packet->payload_length, DMA_TO_DEVICE); |
2324 | 2325 | ||
2325 | log_ar_at_event('T', packet->speed, packet->header, 0x20); | 2326 | log_ar_at_event('T', packet->speed, packet->header, 0x20); |
2326 | driver_data->packet = NULL; | 2327 | driver_data->packet = NULL; |
2327 | packet->ack = RCODE_CANCELLED; | 2328 | packet->ack = RCODE_CANCELLED; |
2328 | packet->callback(packet, &ohci->card, packet->ack); | 2329 | packet->callback(packet, &ohci->card, packet->ack); |
2329 | ret = 0; | 2330 | ret = 0; |
2330 | out: | 2331 | out: |
2331 | tasklet_enable(&ctx->tasklet); | 2332 | tasklet_enable(&ctx->tasklet); |
2332 | 2333 | ||
2333 | return ret; | 2334 | return ret; |
2334 | } | 2335 | } |
2335 | 2336 | ||
2336 | static int ohci_enable_phys_dma(struct fw_card *card, | 2337 | static int ohci_enable_phys_dma(struct fw_card *card, |
2337 | int node_id, int generation) | 2338 | int node_id, int generation) |
2338 | { | 2339 | { |
2339 | #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA | 2340 | #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA |
2340 | return 0; | 2341 | return 0; |
2341 | #else | 2342 | #else |
2342 | struct fw_ohci *ohci = fw_ohci(card); | 2343 | struct fw_ohci *ohci = fw_ohci(card); |
2343 | unsigned long flags; | 2344 | unsigned long flags; |
2344 | int n, ret = 0; | 2345 | int n, ret = 0; |
2345 | 2346 | ||
2346 | /* | 2347 | /* |
2347 | * FIXME: Make sure this bitmask is cleared when we clear the busReset | 2348 | * FIXME: Make sure this bitmask is cleared when we clear the busReset |
2348 | * interrupt bit. Clear physReqResourceAllBuses on bus reset. | 2349 | * interrupt bit. Clear physReqResourceAllBuses on bus reset. |
2349 | */ | 2350 | */ |
2350 | 2351 | ||
2351 | spin_lock_irqsave(&ohci->lock, flags); | 2352 | spin_lock_irqsave(&ohci->lock, flags); |
2352 | 2353 | ||
2353 | if (ohci->generation != generation) { | 2354 | if (ohci->generation != generation) { |
2354 | ret = -ESTALE; | 2355 | ret = -ESTALE; |
2355 | goto out; | 2356 | goto out; |
2356 | } | 2357 | } |
2357 | 2358 | ||
2358 | /* | 2359 | /* |
2359 | * Note, if the node ID contains a non-local bus ID, physical DMA is | 2360 | * Note, if the node ID contains a non-local bus ID, physical DMA is |
2360 | * enabled for _all_ nodes on remote buses. | 2361 | * enabled for _all_ nodes on remote buses. |
2361 | */ | 2362 | */ |
2362 | 2363 | ||
2363 | n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; | 2364 | n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; |
2364 | if (n < 32) | 2365 | if (n < 32) |
2365 | reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); | 2366 | reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); |
2366 | else | 2367 | else |
2367 | reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); | 2368 | reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); |
2368 | 2369 | ||
2369 | flush_writes(ohci); | 2370 | flush_writes(ohci); |
2370 | out: | 2371 | out: |
2371 | spin_unlock_irqrestore(&ohci->lock, flags); | 2372 | spin_unlock_irqrestore(&ohci->lock, flags); |
2372 | 2373 | ||
2373 | return ret; | 2374 | return ret; |
2374 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ | 2375 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ |
2375 | } | 2376 | } |
2376 | 2377 | ||
2377 | static u32 ohci_read_csr(struct fw_card *card, int csr_offset) | 2378 | static u32 ohci_read_csr(struct fw_card *card, int csr_offset) |
2378 | { | 2379 | { |
2379 | struct fw_ohci *ohci = fw_ohci(card); | 2380 | struct fw_ohci *ohci = fw_ohci(card); |
2380 | unsigned long flags; | 2381 | unsigned long flags; |
2381 | u32 value; | 2382 | u32 value; |
2382 | 2383 | ||
2383 | switch (csr_offset) { | 2384 | switch (csr_offset) { |
2384 | case CSR_STATE_CLEAR: | 2385 | case CSR_STATE_CLEAR: |
2385 | case CSR_STATE_SET: | 2386 | case CSR_STATE_SET: |
2386 | if (ohci->is_root && | 2387 | if (ohci->is_root && |
2387 | (reg_read(ohci, OHCI1394_LinkControlSet) & | 2388 | (reg_read(ohci, OHCI1394_LinkControlSet) & |
2388 | OHCI1394_LinkControl_cycleMaster)) | 2389 | OHCI1394_LinkControl_cycleMaster)) |
2389 | value = CSR_STATE_BIT_CMSTR; | 2390 | value = CSR_STATE_BIT_CMSTR; |
2390 | else | 2391 | else |
2391 | value = 0; | 2392 | value = 0; |
2392 | if (ohci->csr_state_setclear_abdicate) | 2393 | if (ohci->csr_state_setclear_abdicate) |
2393 | value |= CSR_STATE_BIT_ABDICATE; | 2394 | value |= CSR_STATE_BIT_ABDICATE; |
2394 | 2395 | ||
2395 | return value; | 2396 | return value; |
2396 | 2397 | ||
2397 | case CSR_NODE_IDS: | 2398 | case CSR_NODE_IDS: |
2398 | return reg_read(ohci, OHCI1394_NodeID) << 16; | 2399 | return reg_read(ohci, OHCI1394_NodeID) << 16; |
2399 | 2400 | ||
2400 | case CSR_CYCLE_TIME: | 2401 | case CSR_CYCLE_TIME: |
2401 | return get_cycle_time(ohci); | 2402 | return get_cycle_time(ohci); |
2402 | 2403 | ||
2403 | case CSR_BUS_TIME: | 2404 | case CSR_BUS_TIME: |
2404 | /* | 2405 | /* |
2405 | * We might be called just after the cycle timer has wrapped | 2406 | * We might be called just after the cycle timer has wrapped |
2406 | * around but just before the cycle64Seconds handler, so we | 2407 | * around but just before the cycle64Seconds handler, so we |
2407 | * better check here, too, if the bus time needs to be updated. | 2408 | * better check here, too, if the bus time needs to be updated. |
2408 | */ | 2409 | */ |
2409 | spin_lock_irqsave(&ohci->lock, flags); | 2410 | spin_lock_irqsave(&ohci->lock, flags); |
2410 | value = update_bus_time(ohci); | 2411 | value = update_bus_time(ohci); |
2411 | spin_unlock_irqrestore(&ohci->lock, flags); | 2412 | spin_unlock_irqrestore(&ohci->lock, flags); |
2412 | return value; | 2413 | return value; |
2413 | 2414 | ||
2414 | case CSR_BUSY_TIMEOUT: | 2415 | case CSR_BUSY_TIMEOUT: |
2415 | value = reg_read(ohci, OHCI1394_ATRetries); | 2416 | value = reg_read(ohci, OHCI1394_ATRetries); |
2416 | return (value >> 4) & 0x0ffff00f; | 2417 | return (value >> 4) & 0x0ffff00f; |
2417 | 2418 | ||
2418 | case CSR_PRIORITY_BUDGET: | 2419 | case CSR_PRIORITY_BUDGET: |
2419 | return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | | 2420 | return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | |
2420 | (ohci->pri_req_max << 8); | 2421 | (ohci->pri_req_max << 8); |
2421 | 2422 | ||
2422 | default: | 2423 | default: |
2423 | WARN_ON(1); | 2424 | WARN_ON(1); |
2424 | return 0; | 2425 | return 0; |
2425 | } | 2426 | } |
2426 | } | 2427 | } |
2427 | 2428 | ||
2428 | static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) | 2429 | static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) |
2429 | { | 2430 | { |
2430 | struct fw_ohci *ohci = fw_ohci(card); | 2431 | struct fw_ohci *ohci = fw_ohci(card); |
2431 | unsigned long flags; | 2432 | unsigned long flags; |
2432 | 2433 | ||
2433 | switch (csr_offset) { | 2434 | switch (csr_offset) { |
2434 | case CSR_STATE_CLEAR: | 2435 | case CSR_STATE_CLEAR: |
2435 | if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { | 2436 | if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { |
2436 | reg_write(ohci, OHCI1394_LinkControlClear, | 2437 | reg_write(ohci, OHCI1394_LinkControlClear, |
2437 | OHCI1394_LinkControl_cycleMaster); | 2438 | OHCI1394_LinkControl_cycleMaster); |
2438 | flush_writes(ohci); | 2439 | flush_writes(ohci); |
2439 | } | 2440 | } |
2440 | if (value & CSR_STATE_BIT_ABDICATE) | 2441 | if (value & CSR_STATE_BIT_ABDICATE) |
2441 | ohci->csr_state_setclear_abdicate = false; | 2442 | ohci->csr_state_setclear_abdicate = false; |
2442 | break; | 2443 | break; |
2443 | 2444 | ||
2444 | case CSR_STATE_SET: | 2445 | case CSR_STATE_SET: |
2445 | if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { | 2446 | if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { |
2446 | reg_write(ohci, OHCI1394_LinkControlSet, | 2447 | reg_write(ohci, OHCI1394_LinkControlSet, |
2447 | OHCI1394_LinkControl_cycleMaster); | 2448 | OHCI1394_LinkControl_cycleMaster); |
2448 | flush_writes(ohci); | 2449 | flush_writes(ohci); |
2449 | } | 2450 | } |
2450 | if (value & CSR_STATE_BIT_ABDICATE) | 2451 | if (value & CSR_STATE_BIT_ABDICATE) |
2451 | ohci->csr_state_setclear_abdicate = true; | 2452 | ohci->csr_state_setclear_abdicate = true; |
2452 | break; | 2453 | break; |
2453 | 2454 | ||
2454 | case CSR_NODE_IDS: | 2455 | case CSR_NODE_IDS: |
2455 | reg_write(ohci, OHCI1394_NodeID, value >> 16); | 2456 | reg_write(ohci, OHCI1394_NodeID, value >> 16); |
2456 | flush_writes(ohci); | 2457 | flush_writes(ohci); |
2457 | break; | 2458 | break; |
2458 | 2459 | ||
2459 | case CSR_CYCLE_TIME: | 2460 | case CSR_CYCLE_TIME: |
2460 | reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); | 2461 | reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); |
2461 | reg_write(ohci, OHCI1394_IntEventSet, | 2462 | reg_write(ohci, OHCI1394_IntEventSet, |
2462 | OHCI1394_cycleInconsistent); | 2463 | OHCI1394_cycleInconsistent); |
2463 | flush_writes(ohci); | 2464 | flush_writes(ohci); |
2464 | break; | 2465 | break; |
2465 | 2466 | ||
2466 | case CSR_BUS_TIME: | 2467 | case CSR_BUS_TIME: |
2467 | spin_lock_irqsave(&ohci->lock, flags); | 2468 | spin_lock_irqsave(&ohci->lock, flags); |
2468 | ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); | 2469 | ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); |
2469 | spin_unlock_irqrestore(&ohci->lock, flags); | 2470 | spin_unlock_irqrestore(&ohci->lock, flags); |
2470 | break; | 2471 | break; |
2471 | 2472 | ||
2472 | case CSR_BUSY_TIMEOUT: | 2473 | case CSR_BUSY_TIMEOUT: |
2473 | value = (value & 0xf) | ((value & 0xf) << 4) | | 2474 | value = (value & 0xf) | ((value & 0xf) << 4) | |
2474 | ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); | 2475 | ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); |
2475 | reg_write(ohci, OHCI1394_ATRetries, value); | 2476 | reg_write(ohci, OHCI1394_ATRetries, value); |
2476 | flush_writes(ohci); | 2477 | flush_writes(ohci); |
2477 | break; | 2478 | break; |
2478 | 2479 | ||
2479 | case CSR_PRIORITY_BUDGET: | 2480 | case CSR_PRIORITY_BUDGET: |
2480 | reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); | 2481 | reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); |
2481 | flush_writes(ohci); | 2482 | flush_writes(ohci); |
2482 | break; | 2483 | break; |
2483 | 2484 | ||
2484 | default: | 2485 | default: |
2485 | WARN_ON(1); | 2486 | WARN_ON(1); |
2486 | break; | 2487 | break; |
2487 | } | 2488 | } |
2488 | } | 2489 | } |
2489 | 2490 | ||
2490 | static void copy_iso_headers(struct iso_context *ctx, void *p) | 2491 | static void copy_iso_headers(struct iso_context *ctx, void *p) |
2491 | { | 2492 | { |
2492 | int i = ctx->header_length; | 2493 | int i = ctx->header_length; |
2493 | 2494 | ||
2494 | if (i + ctx->base.header_size > PAGE_SIZE) | 2495 | if (i + ctx->base.header_size > PAGE_SIZE) |
2495 | return; | 2496 | return; |
2496 | 2497 | ||
2497 | /* | 2498 | /* |
2498 | * The iso header is byteswapped to little endian by | 2499 | * The iso header is byteswapped to little endian by |
2499 | * the controller, but the remaining header quadlets | 2500 | * the controller, but the remaining header quadlets |
2500 | * are big endian. We want to present all the headers | 2501 | * are big endian. We want to present all the headers |
2501 | * as big endian, so we have to swap the first quadlet. | 2502 | * as big endian, so we have to swap the first quadlet. |
2502 | */ | 2503 | */ |
2503 | if (ctx->base.header_size > 0) | 2504 | if (ctx->base.header_size > 0) |
2504 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); | 2505 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); |
2505 | if (ctx->base.header_size > 4) | 2506 | if (ctx->base.header_size > 4) |
2506 | *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); | 2507 | *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); |
2507 | if (ctx->base.header_size > 8) | 2508 | if (ctx->base.header_size > 8) |
2508 | memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); | 2509 | memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); |
2509 | ctx->header_length += ctx->base.header_size; | 2510 | ctx->header_length += ctx->base.header_size; |
2510 | } | 2511 | } |
2511 | 2512 | ||
2512 | static int handle_ir_packet_per_buffer(struct context *context, | 2513 | static int handle_ir_packet_per_buffer(struct context *context, |
2513 | struct descriptor *d, | 2514 | struct descriptor *d, |
2514 | struct descriptor *last) | 2515 | struct descriptor *last) |
2515 | { | 2516 | { |
2516 | struct iso_context *ctx = | 2517 | struct iso_context *ctx = |
2517 | container_of(context, struct iso_context, context); | 2518 | container_of(context, struct iso_context, context); |
2518 | struct descriptor *pd; | 2519 | struct descriptor *pd; |
2519 | __le32 *ir_header; | 2520 | __le32 *ir_header; |
2520 | void *p; | 2521 | void *p; |
2521 | 2522 | ||
2522 | for (pd = d; pd <= last; pd++) | 2523 | for (pd = d; pd <= last; pd++) |
2523 | if (pd->transfer_status) | 2524 | if (pd->transfer_status) |
2524 | break; | 2525 | break; |
2525 | if (pd > last) | 2526 | if (pd > last) |
2526 | /* Descriptor(s) not done yet, stop iteration */ | 2527 | /* Descriptor(s) not done yet, stop iteration */ |
2527 | return 0; | 2528 | return 0; |
2528 | 2529 | ||
2529 | p = last + 1; | 2530 | p = last + 1; |
2530 | copy_iso_headers(ctx, p); | 2531 | copy_iso_headers(ctx, p); |
2531 | 2532 | ||
2532 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 2533 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
2533 | ir_header = (__le32 *) p; | 2534 | ir_header = (__le32 *) p; |
2534 | ctx->base.callback.sc(&ctx->base, | 2535 | ctx->base.callback.sc(&ctx->base, |
2535 | le32_to_cpu(ir_header[0]) & 0xffff, | 2536 | le32_to_cpu(ir_header[0]) & 0xffff, |
2536 | ctx->header_length, ctx->header, | 2537 | ctx->header_length, ctx->header, |
2537 | ctx->base.callback_data); | 2538 | ctx->base.callback_data); |
2538 | ctx->header_length = 0; | 2539 | ctx->header_length = 0; |
2539 | } | 2540 | } |
2540 | 2541 | ||
2541 | return 1; | 2542 | return 1; |
2542 | } | 2543 | } |
2543 | 2544 | ||
2544 | /* d == last because each descriptor block is only a single descriptor. */ | 2545 | /* d == last because each descriptor block is only a single descriptor. */ |
2545 | static int handle_ir_buffer_fill(struct context *context, | 2546 | static int handle_ir_buffer_fill(struct context *context, |
2546 | struct descriptor *d, | 2547 | struct descriptor *d, |
2547 | struct descriptor *last) | 2548 | struct descriptor *last) |
2548 | { | 2549 | { |
2549 | struct iso_context *ctx = | 2550 | struct iso_context *ctx = |
2550 | container_of(context, struct iso_context, context); | 2551 | container_of(context, struct iso_context, context); |
2551 | 2552 | ||
2552 | if (!last->transfer_status) | 2553 | if (!last->transfer_status) |
2553 | /* Descriptor(s) not done yet, stop iteration */ | 2554 | /* Descriptor(s) not done yet, stop iteration */ |
2554 | return 0; | 2555 | return 0; |
2555 | 2556 | ||
2556 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) | 2557 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) |
2557 | ctx->base.callback.mc(&ctx->base, | 2558 | ctx->base.callback.mc(&ctx->base, |
2558 | le32_to_cpu(last->data_address) + | 2559 | le32_to_cpu(last->data_address) + |
2559 | le16_to_cpu(last->req_count) - | 2560 | le16_to_cpu(last->req_count) - |
2560 | le16_to_cpu(last->res_count), | 2561 | le16_to_cpu(last->res_count), |
2561 | ctx->base.callback_data); | 2562 | ctx->base.callback_data); |
2562 | 2563 | ||
2563 | return 1; | 2564 | return 1; |
2564 | } | 2565 | } |
2565 | 2566 | ||
2566 | static int handle_it_packet(struct context *context, | 2567 | static int handle_it_packet(struct context *context, |
2567 | struct descriptor *d, | 2568 | struct descriptor *d, |
2568 | struct descriptor *last) | 2569 | struct descriptor *last) |
2569 | { | 2570 | { |
2570 | struct iso_context *ctx = | 2571 | struct iso_context *ctx = |
2571 | container_of(context, struct iso_context, context); | 2572 | container_of(context, struct iso_context, context); |
2572 | int i; | 2573 | int i; |
2573 | struct descriptor *pd; | 2574 | struct descriptor *pd; |
2574 | 2575 | ||
2575 | for (pd = d; pd <= last; pd++) | 2576 | for (pd = d; pd <= last; pd++) |
2576 | if (pd->transfer_status) | 2577 | if (pd->transfer_status) |
2577 | break; | 2578 | break; |
2578 | if (pd > last) | 2579 | if (pd > last) |
2579 | /* Descriptor(s) not done yet, stop iteration */ | 2580 | /* Descriptor(s) not done yet, stop iteration */ |
2580 | return 0; | 2581 | return 0; |
2581 | 2582 | ||
2582 | i = ctx->header_length; | 2583 | i = ctx->header_length; |
2583 | if (i + 4 < PAGE_SIZE) { | 2584 | if (i + 4 < PAGE_SIZE) { |
2584 | /* Present this value as big-endian to match the receive code */ | 2585 | /* Present this value as big-endian to match the receive code */ |
2585 | *(__be32 *)(ctx->header + i) = cpu_to_be32( | 2586 | *(__be32 *)(ctx->header + i) = cpu_to_be32( |
2586 | ((u32)le16_to_cpu(pd->transfer_status) << 16) | | 2587 | ((u32)le16_to_cpu(pd->transfer_status) << 16) | |
2587 | le16_to_cpu(pd->res_count)); | 2588 | le16_to_cpu(pd->res_count)); |
2588 | ctx->header_length += 4; | 2589 | ctx->header_length += 4; |
2589 | } | 2590 | } |
2590 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 2591 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
2591 | ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), | 2592 | ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), |
2592 | ctx->header_length, ctx->header, | 2593 | ctx->header_length, ctx->header, |
2593 | ctx->base.callback_data); | 2594 | ctx->base.callback_data); |
2594 | ctx->header_length = 0; | 2595 | ctx->header_length = 0; |
2595 | } | 2596 | } |
2596 | return 1; | 2597 | return 1; |
2597 | } | 2598 | } |
2598 | 2599 | ||
2599 | static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) | 2600 | static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) |
2600 | { | 2601 | { |
2601 | u32 hi = channels >> 32, lo = channels; | 2602 | u32 hi = channels >> 32, lo = channels; |
2602 | 2603 | ||
2603 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); | 2604 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); |
2604 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); | 2605 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); |
2605 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); | 2606 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); |
2606 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); | 2607 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); |
2607 | mmiowb(); | 2608 | mmiowb(); |
2608 | ohci->mc_channels = channels; | 2609 | ohci->mc_channels = channels; |
2609 | } | 2610 | } |
2610 | 2611 | ||
2611 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, | 2612 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, |
2612 | int type, int channel, size_t header_size) | 2613 | int type, int channel, size_t header_size) |
2613 | { | 2614 | { |
2614 | struct fw_ohci *ohci = fw_ohci(card); | 2615 | struct fw_ohci *ohci = fw_ohci(card); |
2615 | struct iso_context *uninitialized_var(ctx); | 2616 | struct iso_context *uninitialized_var(ctx); |
2616 | descriptor_callback_t uninitialized_var(callback); | 2617 | descriptor_callback_t uninitialized_var(callback); |
2617 | u64 *uninitialized_var(channels); | 2618 | u64 *uninitialized_var(channels); |
2618 | u32 *uninitialized_var(mask), uninitialized_var(regs); | 2619 | u32 *uninitialized_var(mask), uninitialized_var(regs); |
2619 | unsigned long flags; | 2620 | unsigned long flags; |
2620 | int index, ret = -EBUSY; | 2621 | int index, ret = -EBUSY; |
2621 | 2622 | ||
2622 | spin_lock_irqsave(&ohci->lock, flags); | 2623 | spin_lock_irqsave(&ohci->lock, flags); |
2623 | 2624 | ||
2624 | switch (type) { | 2625 | switch (type) { |
2625 | case FW_ISO_CONTEXT_TRANSMIT: | 2626 | case FW_ISO_CONTEXT_TRANSMIT: |
2626 | mask = &ohci->it_context_mask; | 2627 | mask = &ohci->it_context_mask; |
2627 | callback = handle_it_packet; | 2628 | callback = handle_it_packet; |
2628 | index = ffs(*mask) - 1; | 2629 | index = ffs(*mask) - 1; |
2629 | if (index >= 0) { | 2630 | if (index >= 0) { |
2630 | *mask &= ~(1 << index); | 2631 | *mask &= ~(1 << index); |
2631 | regs = OHCI1394_IsoXmitContextBase(index); | 2632 | regs = OHCI1394_IsoXmitContextBase(index); |
2632 | ctx = &ohci->it_context_list[index]; | 2633 | ctx = &ohci->it_context_list[index]; |
2633 | } | 2634 | } |
2634 | break; | 2635 | break; |
2635 | 2636 | ||
2636 | case FW_ISO_CONTEXT_RECEIVE: | 2637 | case FW_ISO_CONTEXT_RECEIVE: |
2637 | channels = &ohci->ir_context_channels; | 2638 | channels = &ohci->ir_context_channels; |
2638 | mask = &ohci->ir_context_mask; | 2639 | mask = &ohci->ir_context_mask; |
2639 | callback = handle_ir_packet_per_buffer; | 2640 | callback = handle_ir_packet_per_buffer; |
2640 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; | 2641 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; |
2641 | if (index >= 0) { | 2642 | if (index >= 0) { |
2642 | *channels &= ~(1ULL << channel); | 2643 | *channels &= ~(1ULL << channel); |
2643 | *mask &= ~(1 << index); | 2644 | *mask &= ~(1 << index); |
2644 | regs = OHCI1394_IsoRcvContextBase(index); | 2645 | regs = OHCI1394_IsoRcvContextBase(index); |
2645 | ctx = &ohci->ir_context_list[index]; | 2646 | ctx = &ohci->ir_context_list[index]; |
2646 | } | 2647 | } |
2647 | break; | 2648 | break; |
2648 | 2649 | ||
2649 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | 2650 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2650 | mask = &ohci->ir_context_mask; | 2651 | mask = &ohci->ir_context_mask; |
2651 | callback = handle_ir_buffer_fill; | 2652 | callback = handle_ir_buffer_fill; |
2652 | index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; | 2653 | index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; |
2653 | if (index >= 0) { | 2654 | if (index >= 0) { |
2654 | ohci->mc_allocated = true; | 2655 | ohci->mc_allocated = true; |
2655 | *mask &= ~(1 << index); | 2656 | *mask &= ~(1 << index); |
2656 | regs = OHCI1394_IsoRcvContextBase(index); | 2657 | regs = OHCI1394_IsoRcvContextBase(index); |
2657 | ctx = &ohci->ir_context_list[index]; | 2658 | ctx = &ohci->ir_context_list[index]; |
2658 | } | 2659 | } |
2659 | break; | 2660 | break; |
2660 | 2661 | ||
2661 | default: | 2662 | default: |
2662 | index = -1; | 2663 | index = -1; |
2663 | ret = -ENOSYS; | 2664 | ret = -ENOSYS; |
2664 | } | 2665 | } |
2665 | 2666 | ||
2666 | spin_unlock_irqrestore(&ohci->lock, flags); | 2667 | spin_unlock_irqrestore(&ohci->lock, flags); |
2667 | 2668 | ||
2668 | if (index < 0) | 2669 | if (index < 0) |
2669 | return ERR_PTR(ret); | 2670 | return ERR_PTR(ret); |
2670 | 2671 | ||
2671 | memset(ctx, 0, sizeof(*ctx)); | 2672 | memset(ctx, 0, sizeof(*ctx)); |
2672 | ctx->header_length = 0; | 2673 | ctx->header_length = 0; |
2673 | ctx->header = (void *) __get_free_page(GFP_KERNEL); | 2674 | ctx->header = (void *) __get_free_page(GFP_KERNEL); |
2674 | if (ctx->header == NULL) { | 2675 | if (ctx->header == NULL) { |
2675 | ret = -ENOMEM; | 2676 | ret = -ENOMEM; |
2676 | goto out; | 2677 | goto out; |
2677 | } | 2678 | } |
2678 | ret = context_init(&ctx->context, ohci, regs, callback); | 2679 | ret = context_init(&ctx->context, ohci, regs, callback); |
2679 | if (ret < 0) | 2680 | if (ret < 0) |
2680 | goto out_with_header; | 2681 | goto out_with_header; |
2681 | 2682 | ||
2682 | if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) | 2683 | if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) |
2683 | set_multichannel_mask(ohci, 0); | 2684 | set_multichannel_mask(ohci, 0); |
2684 | 2685 | ||
2685 | return &ctx->base; | 2686 | return &ctx->base; |
2686 | 2687 | ||
2687 | out_with_header: | 2688 | out_with_header: |
2688 | free_page((unsigned long)ctx->header); | 2689 | free_page((unsigned long)ctx->header); |
2689 | out: | 2690 | out: |
2690 | spin_lock_irqsave(&ohci->lock, flags); | 2691 | spin_lock_irqsave(&ohci->lock, flags); |
2691 | 2692 | ||
2692 | switch (type) { | 2693 | switch (type) { |
2693 | case FW_ISO_CONTEXT_RECEIVE: | 2694 | case FW_ISO_CONTEXT_RECEIVE: |
2694 | *channels |= 1ULL << channel; | 2695 | *channels |= 1ULL << channel; |
2695 | break; | 2696 | break; |
2696 | 2697 | ||
2697 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | 2698 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2698 | ohci->mc_allocated = false; | 2699 | ohci->mc_allocated = false; |
2699 | break; | 2700 | break; |
2700 | } | 2701 | } |
2701 | *mask |= 1 << index; | 2702 | *mask |= 1 << index; |
2702 | 2703 | ||
2703 | spin_unlock_irqrestore(&ohci->lock, flags); | 2704 | spin_unlock_irqrestore(&ohci->lock, flags); |
2704 | 2705 | ||
2705 | return ERR_PTR(ret); | 2706 | return ERR_PTR(ret); |
2706 | } | 2707 | } |
2707 | 2708 | ||
2708 | static int ohci_start_iso(struct fw_iso_context *base, | 2709 | static int ohci_start_iso(struct fw_iso_context *base, |
2709 | s32 cycle, u32 sync, u32 tags) | 2710 | s32 cycle, u32 sync, u32 tags) |
2710 | { | 2711 | { |
2711 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2712 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2712 | struct fw_ohci *ohci = ctx->context.ohci; | 2713 | struct fw_ohci *ohci = ctx->context.ohci; |
2713 | u32 control = IR_CONTEXT_ISOCH_HEADER, match; | 2714 | u32 control = IR_CONTEXT_ISOCH_HEADER, match; |
2714 | int index; | 2715 | int index; |
2715 | 2716 | ||
2716 | /* the controller cannot start without any queued packets */ | 2717 | /* the controller cannot start without any queued packets */ |
2717 | if (ctx->context.last->branch_address == 0) | 2718 | if (ctx->context.last->branch_address == 0) |
2718 | return -ENODATA; | 2719 | return -ENODATA; |
2719 | 2720 | ||
2720 | switch (ctx->base.type) { | 2721 | switch (ctx->base.type) { |
2721 | case FW_ISO_CONTEXT_TRANSMIT: | 2722 | case FW_ISO_CONTEXT_TRANSMIT: |
2722 | index = ctx - ohci->it_context_list; | 2723 | index = ctx - ohci->it_context_list; |
2723 | match = 0; | 2724 | match = 0; |
2724 | if (cycle >= 0) | 2725 | if (cycle >= 0) |
2725 | match = IT_CONTEXT_CYCLE_MATCH_ENABLE | | 2726 | match = IT_CONTEXT_CYCLE_MATCH_ENABLE | |
2726 | (cycle & 0x7fff) << 16; | 2727 | (cycle & 0x7fff) << 16; |
2727 | 2728 | ||
2728 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); | 2729 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); |
2729 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); | 2730 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); |
2730 | context_run(&ctx->context, match); | 2731 | context_run(&ctx->context, match); |
2731 | break; | 2732 | break; |
2732 | 2733 | ||
2733 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | 2734 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2734 | control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; | 2735 | control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; |
2735 | /* fall through */ | 2736 | /* fall through */ |
2736 | case FW_ISO_CONTEXT_RECEIVE: | 2737 | case FW_ISO_CONTEXT_RECEIVE: |
2737 | index = ctx - ohci->ir_context_list; | 2738 | index = ctx - ohci->ir_context_list; |
2738 | match = (tags << 28) | (sync << 8) | ctx->base.channel; | 2739 | match = (tags << 28) | (sync << 8) | ctx->base.channel; |
2739 | if (cycle >= 0) { | 2740 | if (cycle >= 0) { |
2740 | match |= (cycle & 0x07fff) << 12; | 2741 | match |= (cycle & 0x07fff) << 12; |
2741 | control |= IR_CONTEXT_CYCLE_MATCH_ENABLE; | 2742 | control |= IR_CONTEXT_CYCLE_MATCH_ENABLE; |
2742 | } | 2743 | } |
2743 | 2744 | ||
2744 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); | 2745 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); |
2745 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); | 2746 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); |
2746 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); | 2747 | reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); |
2747 | context_run(&ctx->context, control); | 2748 | context_run(&ctx->context, control); |
2748 | 2749 | ||
2749 | ctx->sync = sync; | 2750 | ctx->sync = sync; |
2750 | ctx->tags = tags; | 2751 | ctx->tags = tags; |
2751 | 2752 | ||
2752 | break; | 2753 | break; |
2753 | } | 2754 | } |
2754 | 2755 | ||
2755 | return 0; | 2756 | return 0; |
2756 | } | 2757 | } |
2757 | 2758 | ||
2758 | static int ohci_stop_iso(struct fw_iso_context *base) | 2759 | static int ohci_stop_iso(struct fw_iso_context *base) |
2759 | { | 2760 | { |
2760 | struct fw_ohci *ohci = fw_ohci(base->card); | 2761 | struct fw_ohci *ohci = fw_ohci(base->card); |
2761 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2762 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2762 | int index; | 2763 | int index; |
2763 | 2764 | ||
2764 | switch (ctx->base.type) { | 2765 | switch (ctx->base.type) { |
2765 | case FW_ISO_CONTEXT_TRANSMIT: | 2766 | case FW_ISO_CONTEXT_TRANSMIT: |
2766 | index = ctx - ohci->it_context_list; | 2767 | index = ctx - ohci->it_context_list; |
2767 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); | 2768 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); |
2768 | break; | 2769 | break; |
2769 | 2770 | ||
2770 | case FW_ISO_CONTEXT_RECEIVE: | 2771 | case FW_ISO_CONTEXT_RECEIVE: |
2771 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | 2772 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2772 | index = ctx - ohci->ir_context_list; | 2773 | index = ctx - ohci->ir_context_list; |
2773 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); | 2774 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); |
2774 | break; | 2775 | break; |
2775 | } | 2776 | } |
2776 | flush_writes(ohci); | 2777 | flush_writes(ohci); |
2777 | context_stop(&ctx->context); | 2778 | context_stop(&ctx->context); |
2778 | tasklet_kill(&ctx->context.tasklet); | 2779 | tasklet_kill(&ctx->context.tasklet); |
2779 | 2780 | ||
2780 | return 0; | 2781 | return 0; |
2781 | } | 2782 | } |
2782 | 2783 | ||
2783 | static void ohci_free_iso_context(struct fw_iso_context *base) | 2784 | static void ohci_free_iso_context(struct fw_iso_context *base) |
2784 | { | 2785 | { |
2785 | struct fw_ohci *ohci = fw_ohci(base->card); | 2786 | struct fw_ohci *ohci = fw_ohci(base->card); |
2786 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2787 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2787 | unsigned long flags; | 2788 | unsigned long flags; |
2788 | int index; | 2789 | int index; |
2789 | 2790 | ||
2790 | ohci_stop_iso(base); | 2791 | ohci_stop_iso(base); |
2791 | context_release(&ctx->context); | 2792 | context_release(&ctx->context); |
2792 | free_page((unsigned long)ctx->header); | 2793 | free_page((unsigned long)ctx->header); |
2793 | 2794 | ||
2794 | spin_lock_irqsave(&ohci->lock, flags); | 2795 | spin_lock_irqsave(&ohci->lock, flags); |
2795 | 2796 | ||
2796 | switch (base->type) { | 2797 | switch (base->type) { |
2797 | case FW_ISO_CONTEXT_TRANSMIT: | 2798 | case FW_ISO_CONTEXT_TRANSMIT: |
2798 | index = ctx - ohci->it_context_list; | 2799 | index = ctx - ohci->it_context_list; |
2799 | ohci->it_context_mask |= 1 << index; | 2800 | ohci->it_context_mask |= 1 << index; |
2800 | break; | 2801 | break; |
2801 | 2802 | ||
2802 | case FW_ISO_CONTEXT_RECEIVE: | 2803 | case FW_ISO_CONTEXT_RECEIVE: |
2803 | index = ctx - ohci->ir_context_list; | 2804 | index = ctx - ohci->ir_context_list; |
2804 | ohci->ir_context_mask |= 1 << index; | 2805 | ohci->ir_context_mask |= 1 << index; |
2805 | ohci->ir_context_channels |= 1ULL << base->channel; | 2806 | ohci->ir_context_channels |= 1ULL << base->channel; |
2806 | break; | 2807 | break; |
2807 | 2808 | ||
2808 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | 2809 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2809 | index = ctx - ohci->ir_context_list; | 2810 | index = ctx - ohci->ir_context_list; |
2810 | ohci->ir_context_mask |= 1 << index; | 2811 | ohci->ir_context_mask |= 1 << index; |
2811 | ohci->ir_context_channels |= ohci->mc_channels; | 2812 | ohci->ir_context_channels |= ohci->mc_channels; |
2812 | ohci->mc_channels = 0; | 2813 | ohci->mc_channels = 0; |
2813 | ohci->mc_allocated = false; | 2814 | ohci->mc_allocated = false; |
2814 | break; | 2815 | break; |
2815 | } | 2816 | } |
2816 | 2817 | ||
2817 | spin_unlock_irqrestore(&ohci->lock, flags); | 2818 | spin_unlock_irqrestore(&ohci->lock, flags); |
2818 | } | 2819 | } |
2819 | 2820 | ||
2820 | static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) | 2821 | static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) |
2821 | { | 2822 | { |
2822 | struct fw_ohci *ohci = fw_ohci(base->card); | 2823 | struct fw_ohci *ohci = fw_ohci(base->card); |
2823 | unsigned long flags; | 2824 | unsigned long flags; |
2824 | int ret; | 2825 | int ret; |
2825 | 2826 | ||
2826 | switch (base->type) { | 2827 | switch (base->type) { |
2827 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | 2828 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
2828 | 2829 | ||
2829 | spin_lock_irqsave(&ohci->lock, flags); | 2830 | spin_lock_irqsave(&ohci->lock, flags); |
2830 | 2831 | ||
2831 | /* Don't allow multichannel to grab other contexts' channels. */ | 2832 | /* Don't allow multichannel to grab other contexts' channels. */ |
2832 | if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { | 2833 | if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { |
2833 | *channels = ohci->ir_context_channels; | 2834 | *channels = ohci->ir_context_channels; |
2834 | ret = -EBUSY; | 2835 | ret = -EBUSY; |
2835 | } else { | 2836 | } else { |
2836 | set_multichannel_mask(ohci, *channels); | 2837 | set_multichannel_mask(ohci, *channels); |
2837 | ret = 0; | 2838 | ret = 0; |
2838 | } | 2839 | } |
2839 | 2840 | ||
2840 | spin_unlock_irqrestore(&ohci->lock, flags); | 2841 | spin_unlock_irqrestore(&ohci->lock, flags); |
2841 | 2842 | ||
2842 | break; | 2843 | break; |
2843 | default: | 2844 | default: |
2844 | ret = -EINVAL; | 2845 | ret = -EINVAL; |
2845 | } | 2846 | } |
2846 | 2847 | ||
2847 | return ret; | 2848 | return ret; |
2848 | } | 2849 | } |
2849 | 2850 | ||
2850 | #ifdef CONFIG_PM | 2851 | #ifdef CONFIG_PM |
2851 | static void ohci_resume_iso_dma(struct fw_ohci *ohci) | 2852 | static void ohci_resume_iso_dma(struct fw_ohci *ohci) |
2852 | { | 2853 | { |
2853 | int i; | 2854 | int i; |
2854 | struct iso_context *ctx; | 2855 | struct iso_context *ctx; |
2855 | 2856 | ||
2856 | for (i = 0 ; i < ohci->n_ir ; i++) { | 2857 | for (i = 0 ; i < ohci->n_ir ; i++) { |
2857 | ctx = &ohci->ir_context_list[i]; | 2858 | ctx = &ohci->ir_context_list[i]; |
2858 | if (ctx->context.running) | 2859 | if (ctx->context.running) |
2859 | ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); | 2860 | ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); |
2860 | } | 2861 | } |
2861 | 2862 | ||
2862 | for (i = 0 ; i < ohci->n_it ; i++) { | 2863 | for (i = 0 ; i < ohci->n_it ; i++) { |
2863 | ctx = &ohci->it_context_list[i]; | 2864 | ctx = &ohci->it_context_list[i]; |
2864 | if (ctx->context.running) | 2865 | if (ctx->context.running) |
2865 | ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); | 2866 | ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); |
2866 | } | 2867 | } |
2867 | } | 2868 | } |
2868 | #endif | 2869 | #endif |
2869 | 2870 | ||
2870 | static int queue_iso_transmit(struct iso_context *ctx, | 2871 | static int queue_iso_transmit(struct iso_context *ctx, |
2871 | struct fw_iso_packet *packet, | 2872 | struct fw_iso_packet *packet, |
2872 | struct fw_iso_buffer *buffer, | 2873 | struct fw_iso_buffer *buffer, |
2873 | unsigned long payload) | 2874 | unsigned long payload) |
2874 | { | 2875 | { |
2875 | struct descriptor *d, *last, *pd; | 2876 | struct descriptor *d, *last, *pd; |
2876 | struct fw_iso_packet *p; | 2877 | struct fw_iso_packet *p; |
2877 | __le32 *header; | 2878 | __le32 *header; |
2878 | dma_addr_t d_bus, page_bus; | 2879 | dma_addr_t d_bus, page_bus; |
2879 | u32 z, header_z, payload_z, irq; | 2880 | u32 z, header_z, payload_z, irq; |
2880 | u32 payload_index, payload_end_index, next_page_index; | 2881 | u32 payload_index, payload_end_index, next_page_index; |
2881 | int page, end_page, i, length, offset; | 2882 | int page, end_page, i, length, offset; |
2882 | 2883 | ||
2883 | p = packet; | 2884 | p = packet; |
2884 | payload_index = payload; | 2885 | payload_index = payload; |
2885 | 2886 | ||
2886 | if (p->skip) | 2887 | if (p->skip) |
2887 | z = 1; | 2888 | z = 1; |
2888 | else | 2889 | else |
2889 | z = 2; | 2890 | z = 2; |
2890 | if (p->header_length > 0) | 2891 | if (p->header_length > 0) |
2891 | z++; | 2892 | z++; |
2892 | 2893 | ||
2893 | /* Determine the first page the payload isn't contained in. */ | 2894 | /* Determine the first page the payload isn't contained in. */ |
2894 | end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; | 2895 | end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; |
2895 | if (p->payload_length > 0) | 2896 | if (p->payload_length > 0) |
2896 | payload_z = end_page - (payload_index >> PAGE_SHIFT); | 2897 | payload_z = end_page - (payload_index >> PAGE_SHIFT); |
2897 | else | 2898 | else |
2898 | payload_z = 0; | 2899 | payload_z = 0; |
2899 | 2900 | ||
2900 | z += payload_z; | 2901 | z += payload_z; |
2901 | 2902 | ||
2902 | /* Get header size in number of descriptors. */ | 2903 | /* Get header size in number of descriptors. */ |
2903 | header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); | 2904 | header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); |
2904 | 2905 | ||
2905 | d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); | 2906 | d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); |
2906 | if (d == NULL) | 2907 | if (d == NULL) |
2907 | return -ENOMEM; | 2908 | return -ENOMEM; |
2908 | 2909 | ||
2909 | if (!p->skip) { | 2910 | if (!p->skip) { |
2910 | d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); | 2911 | d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); |
2911 | d[0].req_count = cpu_to_le16(8); | 2912 | d[0].req_count = cpu_to_le16(8); |
2912 | /* | 2913 | /* |
2913 | * Link the skip address to this descriptor itself. This causes | 2914 | * Link the skip address to this descriptor itself. This causes |
2914 | * a context to skip a cycle whenever lost cycles or FIFO | 2915 | * a context to skip a cycle whenever lost cycles or FIFO |
2915 | * overruns occur, without dropping the data. The application | 2916 | * overruns occur, without dropping the data. The application |
2916 | * should then decide whether this is an error condition or not. | 2917 | * should then decide whether this is an error condition or not. |
2917 | * FIXME: Make the context's cycle-lost behaviour configurable? | 2918 | * FIXME: Make the context's cycle-lost behaviour configurable? |
2918 | */ | 2919 | */ |
2919 | d[0].branch_address = cpu_to_le32(d_bus | z); | 2920 | d[0].branch_address = cpu_to_le32(d_bus | z); |
2920 | 2921 | ||
2921 | header = (__le32 *) &d[1]; | 2922 | header = (__le32 *) &d[1]; |
2922 | header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | | 2923 | header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | |
2923 | IT_HEADER_TAG(p->tag) | | 2924 | IT_HEADER_TAG(p->tag) | |
2924 | IT_HEADER_TCODE(TCODE_STREAM_DATA) | | 2925 | IT_HEADER_TCODE(TCODE_STREAM_DATA) | |
2925 | IT_HEADER_CHANNEL(ctx->base.channel) | | 2926 | IT_HEADER_CHANNEL(ctx->base.channel) | |
2926 | IT_HEADER_SPEED(ctx->base.speed)); | 2927 | IT_HEADER_SPEED(ctx->base.speed)); |
2927 | header[1] = | 2928 | header[1] = |
2928 | cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + | 2929 | cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + |
2929 | p->payload_length)); | 2930 | p->payload_length)); |
2930 | } | 2931 | } |
2931 | 2932 | ||
2932 | if (p->header_length > 0) { | 2933 | if (p->header_length > 0) { |
2933 | d[2].req_count = cpu_to_le16(p->header_length); | 2934 | d[2].req_count = cpu_to_le16(p->header_length); |
2934 | d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d)); | 2935 | d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d)); |
2935 | memcpy(&d[z], p->header, p->header_length); | 2936 | memcpy(&d[z], p->header, p->header_length); |
2936 | } | 2937 | } |
2937 | 2938 | ||
2938 | pd = d + z - payload_z; | 2939 | pd = d + z - payload_z; |
2939 | payload_end_index = payload_index + p->payload_length; | 2940 | payload_end_index = payload_index + p->payload_length; |
2940 | for (i = 0; i < payload_z; i++) { | 2941 | for (i = 0; i < payload_z; i++) { |
2941 | page = payload_index >> PAGE_SHIFT; | 2942 | page = payload_index >> PAGE_SHIFT; |
2942 | offset = payload_index & ~PAGE_MASK; | 2943 | offset = payload_index & ~PAGE_MASK; |
2943 | next_page_index = (page + 1) << PAGE_SHIFT; | 2944 | next_page_index = (page + 1) << PAGE_SHIFT; |
2944 | length = | 2945 | length = |
2945 | min(next_page_index, payload_end_index) - payload_index; | 2946 | min(next_page_index, payload_end_index) - payload_index; |
2946 | pd[i].req_count = cpu_to_le16(length); | 2947 | pd[i].req_count = cpu_to_le16(length); |
2947 | 2948 | ||
2948 | page_bus = page_private(buffer->pages[page]); | 2949 | page_bus = page_private(buffer->pages[page]); |
2949 | pd[i].data_address = cpu_to_le32(page_bus + offset); | 2950 | pd[i].data_address = cpu_to_le32(page_bus + offset); |
2950 | 2951 | ||
2951 | payload_index += length; | 2952 | payload_index += length; |
2952 | } | 2953 | } |
2953 | 2954 | ||
2954 | if (p->interrupt) | 2955 | if (p->interrupt) |
2955 | irq = DESCRIPTOR_IRQ_ALWAYS; | 2956 | irq = DESCRIPTOR_IRQ_ALWAYS; |
2956 | else | 2957 | else |
2957 | irq = DESCRIPTOR_NO_IRQ; | 2958 | irq = DESCRIPTOR_NO_IRQ; |
2958 | 2959 | ||
2959 | last = z == 2 ? d : d + z - 1; | 2960 | last = z == 2 ? d : d + z - 1; |
2960 | last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | | 2961 | last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | |
2961 | DESCRIPTOR_STATUS | | 2962 | DESCRIPTOR_STATUS | |
2962 | DESCRIPTOR_BRANCH_ALWAYS | | 2963 | DESCRIPTOR_BRANCH_ALWAYS | |
2963 | irq); | 2964 | irq); |
2964 | 2965 | ||
2965 | context_append(&ctx->context, d, z, header_z); | 2966 | context_append(&ctx->context, d, z, header_z); |
2966 | 2967 | ||
2967 | return 0; | 2968 | return 0; |
2968 | } | 2969 | } |
2969 | 2970 | ||
2970 | static int queue_iso_packet_per_buffer(struct iso_context *ctx, | 2971 | static int queue_iso_packet_per_buffer(struct iso_context *ctx, |
2971 | struct fw_iso_packet *packet, | 2972 | struct fw_iso_packet *packet, |
2972 | struct fw_iso_buffer *buffer, | 2973 | struct fw_iso_buffer *buffer, |
2973 | unsigned long payload) | 2974 | unsigned long payload) |
2974 | { | 2975 | { |
2975 | struct descriptor *d, *pd; | 2976 | struct descriptor *d, *pd; |
2976 | dma_addr_t d_bus, page_bus; | 2977 | dma_addr_t d_bus, page_bus; |
2977 | u32 z, header_z, rest; | 2978 | u32 z, header_z, rest; |
2978 | int i, j, length; | 2979 | int i, j, length; |
2979 | int page, offset, packet_count, header_size, payload_per_buffer; | 2980 | int page, offset, packet_count, header_size, payload_per_buffer; |
2980 | 2981 | ||
2981 | /* | 2982 | /* |
2982 | * The OHCI controller puts the isochronous header and trailer in the | 2983 | * The OHCI controller puts the isochronous header and trailer in the |
2983 | * buffer, so we need at least 8 bytes. | 2984 | * buffer, so we need at least 8 bytes. |
2984 | */ | 2985 | */ |
2985 | packet_count = packet->header_length / ctx->base.header_size; | 2986 | packet_count = packet->header_length / ctx->base.header_size; |
2986 | header_size = max(ctx->base.header_size, (size_t)8); | 2987 | header_size = max(ctx->base.header_size, (size_t)8); |
2987 | 2988 | ||
2988 | /* Get header size in number of descriptors. */ | 2989 | /* Get header size in number of descriptors. */ |
2989 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); | 2990 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); |
2990 | page = payload >> PAGE_SHIFT; | 2991 | page = payload >> PAGE_SHIFT; |
2991 | offset = payload & ~PAGE_MASK; | 2992 | offset = payload & ~PAGE_MASK; |
2992 | payload_per_buffer = packet->payload_length / packet_count; | 2993 | payload_per_buffer = packet->payload_length / packet_count; |
2993 | 2994 | ||
2994 | for (i = 0; i < packet_count; i++) { | 2995 | for (i = 0; i < packet_count; i++) { |
2995 | /* d points to the header descriptor */ | 2996 | /* d points to the header descriptor */ |
2996 | z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; | 2997 | z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; |
2997 | d = context_get_descriptors(&ctx->context, | 2998 | d = context_get_descriptors(&ctx->context, |
2998 | z + header_z, &d_bus); | 2999 | z + header_z, &d_bus); |
2999 | if (d == NULL) | 3000 | if (d == NULL) |
3000 | return -ENOMEM; | 3001 | return -ENOMEM; |
3001 | 3002 | ||
3002 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | | 3003 | d->control = cpu_to_le16(DESCRIPTOR_STATUS | |
3003 | DESCRIPTOR_INPUT_MORE); | 3004 | DESCRIPTOR_INPUT_MORE); |
3004 | if (packet->skip && i == 0) | 3005 | if (packet->skip && i == 0) |
3005 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); | 3006 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); |
3006 | d->req_count = cpu_to_le16(header_size); | 3007 | d->req_count = cpu_to_le16(header_size); |
3007 | d->res_count = d->req_count; | 3008 | d->res_count = d->req_count; |
3008 | d->transfer_status = 0; | 3009 | d->transfer_status = 0; |
3009 | d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); | 3010 | d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); |
3010 | 3011 | ||
3011 | rest = payload_per_buffer; | 3012 | rest = payload_per_buffer; |
3012 | pd = d; | 3013 | pd = d; |
3013 | for (j = 1; j < z; j++) { | 3014 | for (j = 1; j < z; j++) { |
3014 | pd++; | 3015 | pd++; |
3015 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | | 3016 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | |
3016 | DESCRIPTOR_INPUT_MORE); | 3017 | DESCRIPTOR_INPUT_MORE); |
3017 | 3018 | ||
3018 | if (offset + rest < PAGE_SIZE) | 3019 | if (offset + rest < PAGE_SIZE) |
3019 | length = rest; | 3020 | length = rest; |
3020 | else | 3021 | else |
3021 | length = PAGE_SIZE - offset; | 3022 | length = PAGE_SIZE - offset; |
3022 | pd->req_count = cpu_to_le16(length); | 3023 | pd->req_count = cpu_to_le16(length); |
3023 | pd->res_count = pd->req_count; | 3024 | pd->res_count = pd->req_count; |
3024 | pd->transfer_status = 0; | 3025 | pd->transfer_status = 0; |
3025 | 3026 | ||
3026 | page_bus = page_private(buffer->pages[page]); | 3027 | page_bus = page_private(buffer->pages[page]); |
3027 | pd->data_address = cpu_to_le32(page_bus + offset); | 3028 | pd->data_address = cpu_to_le32(page_bus + offset); |
3028 | 3029 | ||
3029 | offset = (offset + length) & ~PAGE_MASK; | 3030 | offset = (offset + length) & ~PAGE_MASK; |
3030 | rest -= length; | 3031 | rest -= length; |
3031 | if (offset == 0) | 3032 | if (offset == 0) |
3032 | page++; | 3033 | page++; |
3033 | } | 3034 | } |
3034 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | | 3035 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | |
3035 | DESCRIPTOR_INPUT_LAST | | 3036 | DESCRIPTOR_INPUT_LAST | |
3036 | DESCRIPTOR_BRANCH_ALWAYS); | 3037 | DESCRIPTOR_BRANCH_ALWAYS); |
3037 | if (packet->interrupt && i == packet_count - 1) | 3038 | if (packet->interrupt && i == packet_count - 1) |
3038 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); | 3039 | pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); |
3039 | 3040 | ||
3040 | context_append(&ctx->context, d, z, header_z); | 3041 | context_append(&ctx->context, d, z, header_z); |
3041 | } | 3042 | } |
3042 | 3043 | ||
3043 | return 0; | 3044 | return 0; |
3044 | } | 3045 | } |
3045 | 3046 | ||
3046 | static int queue_iso_buffer_fill(struct iso_context *ctx, | 3047 | static int queue_iso_buffer_fill(struct iso_context *ctx, |
3047 | struct fw_iso_packet *packet, | 3048 | struct fw_iso_packet *packet, |
3048 | struct fw_iso_buffer *buffer, | 3049 | struct fw_iso_buffer *buffer, |
3049 | unsigned long payload) | 3050 | unsigned long payload) |
3050 | { | 3051 | { |
3051 | struct descriptor *d; | 3052 | struct descriptor *d; |
3052 | dma_addr_t d_bus, page_bus; | 3053 | dma_addr_t d_bus, page_bus; |
3053 | int page, offset, rest, z, i, length; | 3054 | int page, offset, rest, z, i, length; |
3054 | 3055 | ||
3055 | page = payload >> PAGE_SHIFT; | 3056 | page = payload >> PAGE_SHIFT; |
3056 | offset = payload & ~PAGE_MASK; | 3057 | offset = payload & ~PAGE_MASK; |
3057 | rest = packet->payload_length; | 3058 | rest = packet->payload_length; |
3058 | 3059 | ||
3059 | /* We need one descriptor for each page in the buffer. */ | 3060 | /* We need one descriptor for each page in the buffer. */ |
3060 | z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); | 3061 | z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); |
3061 | 3062 | ||
3062 | if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) | 3063 | if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) |
3063 | return -EFAULT; | 3064 | return -EFAULT; |
3064 | 3065 | ||
3065 | for (i = 0; i < z; i++) { | 3066 | for (i = 0; i < z; i++) { |
3066 | d = context_get_descriptors(&ctx->context, 1, &d_bus); | 3067 | d = context_get_descriptors(&ctx->context, 1, &d_bus); |
3067 | if (d == NULL) | 3068 | if (d == NULL) |
3068 | return -ENOMEM; | 3069 | return -ENOMEM; |
3069 | 3070 | ||
3070 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | 3071 | d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | |
3071 | DESCRIPTOR_BRANCH_ALWAYS); | 3072 | DESCRIPTOR_BRANCH_ALWAYS); |
3072 | if (packet->skip && i == 0) | 3073 | if (packet->skip && i == 0) |
3073 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); | 3074 | d->control |= cpu_to_le16(DESCRIPTOR_WAIT); |
3074 | if (packet->interrupt && i == z - 1) | 3075 | if (packet->interrupt && i == z - 1) |
3075 | d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); | 3076 | d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); |
3076 | 3077 | ||
3077 | if (offset + rest < PAGE_SIZE) | 3078 | if (offset + rest < PAGE_SIZE) |
3078 | length = rest; | 3079 | length = rest; |
3079 | else | 3080 | else |
3080 | length = PAGE_SIZE - offset; | 3081 | length = PAGE_SIZE - offset; |
3081 | d->req_count = cpu_to_le16(length); | 3082 | d->req_count = cpu_to_le16(length); |
3082 | d->res_count = d->req_count; | 3083 | d->res_count = d->req_count; |
3083 | d->transfer_status = 0; | 3084 | d->transfer_status = 0; |
3084 | 3085 | ||
3085 | page_bus = page_private(buffer->pages[page]); | 3086 | page_bus = page_private(buffer->pages[page]); |
3086 | d->data_address = cpu_to_le32(page_bus + offset); | 3087 | d->data_address = cpu_to_le32(page_bus + offset); |
3087 | 3088 | ||
3088 | rest -= length; | 3089 | rest -= length; |
3089 | offset = 0; | 3090 | offset = 0; |
3090 | page++; | 3091 | page++; |
3091 | 3092 | ||
3092 | context_append(&ctx->context, d, 1, 0); | 3093 | context_append(&ctx->context, d, 1, 0); |
3093 | } | 3094 | } |
3094 | 3095 | ||
3095 | return 0; | 3096 | return 0; |
3096 | } | 3097 | } |
3097 | 3098 | ||
3098 | static int ohci_queue_iso(struct fw_iso_context *base, | 3099 | static int ohci_queue_iso(struct fw_iso_context *base, |
3099 | struct fw_iso_packet *packet, | 3100 | struct fw_iso_packet *packet, |
3100 | struct fw_iso_buffer *buffer, | 3101 | struct fw_iso_buffer *buffer, |
3101 | unsigned long payload) | 3102 | unsigned long payload) |
3102 | { | 3103 | { |
3103 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 3104 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
3104 | unsigned long flags; | 3105 | unsigned long flags; |
3105 | int ret = -ENOSYS; | 3106 | int ret = -ENOSYS; |
3106 | 3107 | ||
3107 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); | 3108 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); |
3108 | switch (base->type) { | 3109 | switch (base->type) { |
3109 | case FW_ISO_CONTEXT_TRANSMIT: | 3110 | case FW_ISO_CONTEXT_TRANSMIT: |
3110 | ret = queue_iso_transmit(ctx, packet, buffer, payload); | 3111 | ret = queue_iso_transmit(ctx, packet, buffer, payload); |
3111 | break; | 3112 | break; |
3112 | case FW_ISO_CONTEXT_RECEIVE: | 3113 | case FW_ISO_CONTEXT_RECEIVE: |
3113 | ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); | 3114 | ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); |
3114 | break; | 3115 | break; |
3115 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | 3116 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
3116 | ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); | 3117 | ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); |
3117 | break; | 3118 | break; |
3118 | } | 3119 | } |
3119 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); | 3120 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); |
3120 | 3121 | ||
3121 | return ret; | 3122 | return ret; |
3122 | } | 3123 | } |
3123 | 3124 | ||
3125 | static void ohci_flush_queue_iso(struct fw_iso_context *base) | ||
3126 | { | ||
3127 | struct context *ctx = | ||
3128 | &container_of(base, struct iso_context, base)->context; | ||
3129 | |||
3130 | reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); | ||
3131 | flush_writes(ctx->ohci); | ||
3132 | } | ||
3133 | |||
3124 | static const struct fw_card_driver ohci_driver = { | 3134 | static const struct fw_card_driver ohci_driver = { |
3125 | .enable = ohci_enable, | 3135 | .enable = ohci_enable, |
3126 | .read_phy_reg = ohci_read_phy_reg, | 3136 | .read_phy_reg = ohci_read_phy_reg, |
3127 | .update_phy_reg = ohci_update_phy_reg, | 3137 | .update_phy_reg = ohci_update_phy_reg, |
3128 | .set_config_rom = ohci_set_config_rom, | 3138 | .set_config_rom = ohci_set_config_rom, |
3129 | .send_request = ohci_send_request, | 3139 | .send_request = ohci_send_request, |
3130 | .send_response = ohci_send_response, | 3140 | .send_response = ohci_send_response, |
3131 | .cancel_packet = ohci_cancel_packet, | 3141 | .cancel_packet = ohci_cancel_packet, |
3132 | .enable_phys_dma = ohci_enable_phys_dma, | 3142 | .enable_phys_dma = ohci_enable_phys_dma, |
3133 | .read_csr = ohci_read_csr, | 3143 | .read_csr = ohci_read_csr, |
3134 | .write_csr = ohci_write_csr, | 3144 | .write_csr = ohci_write_csr, |
3135 | 3145 | ||
3136 | .allocate_iso_context = ohci_allocate_iso_context, | 3146 | .allocate_iso_context = ohci_allocate_iso_context, |
3137 | .free_iso_context = ohci_free_iso_context, | 3147 | .free_iso_context = ohci_free_iso_context, |
3138 | .set_iso_channels = ohci_set_iso_channels, | 3148 | .set_iso_channels = ohci_set_iso_channels, |
3139 | .queue_iso = ohci_queue_iso, | 3149 | .queue_iso = ohci_queue_iso, |
3150 | .flush_queue_iso = ohci_flush_queue_iso, | ||
3140 | .start_iso = ohci_start_iso, | 3151 | .start_iso = ohci_start_iso, |
3141 | .stop_iso = ohci_stop_iso, | 3152 | .stop_iso = ohci_stop_iso, |
3142 | }; | 3153 | }; |
3143 | 3154 | ||
3144 | #ifdef CONFIG_PPC_PMAC | 3155 | #ifdef CONFIG_PPC_PMAC |
3145 | static void pmac_ohci_on(struct pci_dev *dev) | 3156 | static void pmac_ohci_on(struct pci_dev *dev) |
3146 | { | 3157 | { |
3147 | if (machine_is(powermac)) { | 3158 | if (machine_is(powermac)) { |
3148 | struct device_node *ofn = pci_device_to_OF_node(dev); | 3159 | struct device_node *ofn = pci_device_to_OF_node(dev); |
3149 | 3160 | ||
3150 | if (ofn) { | 3161 | if (ofn) { |
3151 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); | 3162 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); |
3152 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); | 3163 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); |
3153 | } | 3164 | } |
3154 | } | 3165 | } |
3155 | } | 3166 | } |
3156 | 3167 | ||
3157 | static void pmac_ohci_off(struct pci_dev *dev) | 3168 | static void pmac_ohci_off(struct pci_dev *dev) |
3158 | { | 3169 | { |
3159 | if (machine_is(powermac)) { | 3170 | if (machine_is(powermac)) { |
3160 | struct device_node *ofn = pci_device_to_OF_node(dev); | 3171 | struct device_node *ofn = pci_device_to_OF_node(dev); |
3161 | 3172 | ||
3162 | if (ofn) { | 3173 | if (ofn) { |
3163 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); | 3174 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); |
3164 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); | 3175 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); |
3165 | } | 3176 | } |
3166 | } | 3177 | } |
3167 | } | 3178 | } |
3168 | #else | 3179 | #else |
3169 | static inline void pmac_ohci_on(struct pci_dev *dev) {} | 3180 | static inline void pmac_ohci_on(struct pci_dev *dev) {} |
3170 | static inline void pmac_ohci_off(struct pci_dev *dev) {} | 3181 | static inline void pmac_ohci_off(struct pci_dev *dev) {} |
3171 | #endif /* CONFIG_PPC_PMAC */ | 3182 | #endif /* CONFIG_PPC_PMAC */ |
3172 | 3183 | ||
3173 | static int __devinit pci_probe(struct pci_dev *dev, | 3184 | static int __devinit pci_probe(struct pci_dev *dev, |
3174 | const struct pci_device_id *ent) | 3185 | const struct pci_device_id *ent) |
3175 | { | 3186 | { |
3176 | struct fw_ohci *ohci; | 3187 | struct fw_ohci *ohci; |
3177 | u32 bus_options, max_receive, link_speed, version; | 3188 | u32 bus_options, max_receive, link_speed, version; |
3178 | u64 guid; | 3189 | u64 guid; |
3179 | int i, err; | 3190 | int i, err; |
3180 | size_t size; | 3191 | size_t size; |
3181 | 3192 | ||
3182 | ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); | 3193 | ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); |
3183 | if (ohci == NULL) { | 3194 | if (ohci == NULL) { |
3184 | err = -ENOMEM; | 3195 | err = -ENOMEM; |
3185 | goto fail; | 3196 | goto fail; |
3186 | } | 3197 | } |
3187 | 3198 | ||
3188 | fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); | 3199 | fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); |
3189 | 3200 | ||
3190 | pmac_ohci_on(dev); | 3201 | pmac_ohci_on(dev); |
3191 | 3202 | ||
3192 | err = pci_enable_device(dev); | 3203 | err = pci_enable_device(dev); |
3193 | if (err) { | 3204 | if (err) { |
3194 | fw_error("Failed to enable OHCI hardware\n"); | 3205 | fw_error("Failed to enable OHCI hardware\n"); |
3195 | goto fail_free; | 3206 | goto fail_free; |
3196 | } | 3207 | } |
3197 | 3208 | ||
3198 | pci_set_master(dev); | 3209 | pci_set_master(dev); |
3199 | pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); | 3210 | pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); |
3200 | pci_set_drvdata(dev, ohci); | 3211 | pci_set_drvdata(dev, ohci); |
3201 | 3212 | ||
3202 | spin_lock_init(&ohci->lock); | 3213 | spin_lock_init(&ohci->lock); |
3203 | mutex_init(&ohci->phy_reg_mutex); | 3214 | mutex_init(&ohci->phy_reg_mutex); |
3204 | 3215 | ||
3205 | tasklet_init(&ohci->bus_reset_tasklet, | 3216 | tasklet_init(&ohci->bus_reset_tasklet, |
3206 | bus_reset_tasklet, (unsigned long)ohci); | 3217 | bus_reset_tasklet, (unsigned long)ohci); |
3207 | 3218 | ||
3208 | err = pci_request_region(dev, 0, ohci_driver_name); | 3219 | err = pci_request_region(dev, 0, ohci_driver_name); |
3209 | if (err) { | 3220 | if (err) { |
3210 | fw_error("MMIO resource unavailable\n"); | 3221 | fw_error("MMIO resource unavailable\n"); |
3211 | goto fail_disable; | 3222 | goto fail_disable; |
3212 | } | 3223 | } |
3213 | 3224 | ||
3214 | ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); | 3225 | ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); |
3215 | if (ohci->registers == NULL) { | 3226 | if (ohci->registers == NULL) { |
3216 | fw_error("Failed to remap registers\n"); | 3227 | fw_error("Failed to remap registers\n"); |
3217 | err = -ENXIO; | 3228 | err = -ENXIO; |
3218 | goto fail_iomem; | 3229 | goto fail_iomem; |
3219 | } | 3230 | } |
3220 | 3231 | ||
3221 | for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) | 3232 | for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) |
3222 | if ((ohci_quirks[i].vendor == dev->vendor) && | 3233 | if ((ohci_quirks[i].vendor == dev->vendor) && |
3223 | (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || | 3234 | (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || |
3224 | ohci_quirks[i].device == dev->device) && | 3235 | ohci_quirks[i].device == dev->device) && |
3225 | (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || | 3236 | (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || |
3226 | ohci_quirks[i].revision >= dev->revision)) { | 3237 | ohci_quirks[i].revision >= dev->revision)) { |
3227 | ohci->quirks = ohci_quirks[i].flags; | 3238 | ohci->quirks = ohci_quirks[i].flags; |
3228 | break; | 3239 | break; |
3229 | } | 3240 | } |
3230 | if (param_quirks) | 3241 | if (param_quirks) |
3231 | ohci->quirks = param_quirks; | 3242 | ohci->quirks = param_quirks; |
3232 | 3243 | ||
3233 | /* | 3244 | /* |
3234 | * Because dma_alloc_coherent() allocates at least one page, | 3245 | * Because dma_alloc_coherent() allocates at least one page, |
3235 | * we save space by using a common buffer for the AR request/ | 3246 | * we save space by using a common buffer for the AR request/ |
3236 | * response descriptors and the self IDs buffer. | 3247 | * response descriptors and the self IDs buffer. |
3237 | */ | 3248 | */ |
3238 | BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4); | 3249 | BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4); |
3239 | BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2); | 3250 | BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2); |
3240 | ohci->misc_buffer = dma_alloc_coherent(ohci->card.device, | 3251 | ohci->misc_buffer = dma_alloc_coherent(ohci->card.device, |
3241 | PAGE_SIZE, | 3252 | PAGE_SIZE, |
3242 | &ohci->misc_buffer_bus, | 3253 | &ohci->misc_buffer_bus, |
3243 | GFP_KERNEL); | 3254 | GFP_KERNEL); |
3244 | if (!ohci->misc_buffer) { | 3255 | if (!ohci->misc_buffer) { |
3245 | err = -ENOMEM; | 3256 | err = -ENOMEM; |
3246 | goto fail_iounmap; | 3257 | goto fail_iounmap; |
3247 | } | 3258 | } |
3248 | 3259 | ||
3249 | err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, | 3260 | err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, |
3250 | OHCI1394_AsReqRcvContextControlSet); | 3261 | OHCI1394_AsReqRcvContextControlSet); |
3251 | if (err < 0) | 3262 | if (err < 0) |
3252 | goto fail_misc_buf; | 3263 | goto fail_misc_buf; |
3253 | 3264 | ||
3254 | err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, | 3265 | err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, |
3255 | OHCI1394_AsRspRcvContextControlSet); | 3266 | OHCI1394_AsRspRcvContextControlSet); |
3256 | if (err < 0) | 3267 | if (err < 0) |
3257 | goto fail_arreq_ctx; | 3268 | goto fail_arreq_ctx; |
3258 | 3269 | ||
3259 | err = context_init(&ohci->at_request_ctx, ohci, | 3270 | err = context_init(&ohci->at_request_ctx, ohci, |
3260 | OHCI1394_AsReqTrContextControlSet, handle_at_packet); | 3271 | OHCI1394_AsReqTrContextControlSet, handle_at_packet); |
3261 | if (err < 0) | 3272 | if (err < 0) |
3262 | goto fail_arrsp_ctx; | 3273 | goto fail_arrsp_ctx; |
3263 | 3274 | ||
3264 | err = context_init(&ohci->at_response_ctx, ohci, | 3275 | err = context_init(&ohci->at_response_ctx, ohci, |
3265 | OHCI1394_AsRspTrContextControlSet, handle_at_packet); | 3276 | OHCI1394_AsRspTrContextControlSet, handle_at_packet); |
3266 | if (err < 0) | 3277 | if (err < 0) |
3267 | goto fail_atreq_ctx; | 3278 | goto fail_atreq_ctx; |
3268 | 3279 | ||
3269 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); | 3280 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); |
3270 | ohci->ir_context_channels = ~0ULL; | 3281 | ohci->ir_context_channels = ~0ULL; |
3271 | ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); | 3282 | ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); |
3272 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); | 3283 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); |
3273 | ohci->ir_context_mask = ohci->ir_context_support; | 3284 | ohci->ir_context_mask = ohci->ir_context_support; |
3274 | ohci->n_ir = hweight32(ohci->ir_context_mask); | 3285 | ohci->n_ir = hweight32(ohci->ir_context_mask); |
3275 | size = sizeof(struct iso_context) * ohci->n_ir; | 3286 | size = sizeof(struct iso_context) * ohci->n_ir; |
3276 | ohci->ir_context_list = kzalloc(size, GFP_KERNEL); | 3287 | ohci->ir_context_list = kzalloc(size, GFP_KERNEL); |
3277 | 3288 | ||
3278 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); | 3289 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); |
3279 | ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); | 3290 | ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); |
3280 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); | 3291 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); |
3281 | ohci->it_context_mask = ohci->it_context_support; | 3292 | ohci->it_context_mask = ohci->it_context_support; |
3282 | ohci->n_it = hweight32(ohci->it_context_mask); | 3293 | ohci->n_it = hweight32(ohci->it_context_mask); |
3283 | size = sizeof(struct iso_context) * ohci->n_it; | 3294 | size = sizeof(struct iso_context) * ohci->n_it; |
3284 | ohci->it_context_list = kzalloc(size, GFP_KERNEL); | 3295 | ohci->it_context_list = kzalloc(size, GFP_KERNEL); |
3285 | 3296 | ||
3286 | if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { | 3297 | if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { |
3287 | err = -ENOMEM; | 3298 | err = -ENOMEM; |
3288 | goto fail_contexts; | 3299 | goto fail_contexts; |
3289 | } | 3300 | } |
3290 | 3301 | ||
3291 | ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2; | 3302 | ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2; |
3292 | ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; | 3303 | ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; |
3293 | 3304 | ||
3294 | bus_options = reg_read(ohci, OHCI1394_BusOptions); | 3305 | bus_options = reg_read(ohci, OHCI1394_BusOptions); |
3295 | max_receive = (bus_options >> 12) & 0xf; | 3306 | max_receive = (bus_options >> 12) & 0xf; |
3296 | link_speed = bus_options & 0x7; | 3307 | link_speed = bus_options & 0x7; |
3297 | guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | | 3308 | guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | |
3298 | reg_read(ohci, OHCI1394_GUIDLo); | 3309 | reg_read(ohci, OHCI1394_GUIDLo); |
3299 | 3310 | ||
3300 | err = fw_card_add(&ohci->card, max_receive, link_speed, guid); | 3311 | err = fw_card_add(&ohci->card, max_receive, link_speed, guid); |
3301 | if (err) | 3312 | if (err) |
3302 | goto fail_contexts; | 3313 | goto fail_contexts; |
3303 | 3314 | ||
3304 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; | 3315 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; |
3305 | fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " | 3316 | fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " |
3306 | "%d IR + %d IT contexts, quirks 0x%x\n", | 3317 | "%d IR + %d IT contexts, quirks 0x%x\n", |
3307 | dev_name(&dev->dev), version >> 16, version & 0xff, | 3318 | dev_name(&dev->dev), version >> 16, version & 0xff, |
3308 | ohci->n_ir, ohci->n_it, ohci->quirks); | 3319 | ohci->n_ir, ohci->n_it, ohci->quirks); |
3309 | 3320 | ||
3310 | return 0; | 3321 | return 0; |
3311 | 3322 | ||
3312 | fail_contexts: | 3323 | fail_contexts: |
3313 | kfree(ohci->ir_context_list); | 3324 | kfree(ohci->ir_context_list); |
3314 | kfree(ohci->it_context_list); | 3325 | kfree(ohci->it_context_list); |
3315 | context_release(&ohci->at_response_ctx); | 3326 | context_release(&ohci->at_response_ctx); |
3316 | fail_atreq_ctx: | 3327 | fail_atreq_ctx: |
3317 | context_release(&ohci->at_request_ctx); | 3328 | context_release(&ohci->at_request_ctx); |
3318 | fail_arrsp_ctx: | 3329 | fail_arrsp_ctx: |
3319 | ar_context_release(&ohci->ar_response_ctx); | 3330 | ar_context_release(&ohci->ar_response_ctx); |
3320 | fail_arreq_ctx: | 3331 | fail_arreq_ctx: |
3321 | ar_context_release(&ohci->ar_request_ctx); | 3332 | ar_context_release(&ohci->ar_request_ctx); |
3322 | fail_misc_buf: | 3333 | fail_misc_buf: |
3323 | dma_free_coherent(ohci->card.device, PAGE_SIZE, | 3334 | dma_free_coherent(ohci->card.device, PAGE_SIZE, |
3324 | ohci->misc_buffer, ohci->misc_buffer_bus); | 3335 | ohci->misc_buffer, ohci->misc_buffer_bus); |
3325 | fail_iounmap: | 3336 | fail_iounmap: |
3326 | pci_iounmap(dev, ohci->registers); | 3337 | pci_iounmap(dev, ohci->registers); |
3327 | fail_iomem: | 3338 | fail_iomem: |
3328 | pci_release_region(dev, 0); | 3339 | pci_release_region(dev, 0); |
3329 | fail_disable: | 3340 | fail_disable: |
3330 | pci_disable_device(dev); | 3341 | pci_disable_device(dev); |
3331 | fail_free: | 3342 | fail_free: |
3332 | kfree(ohci); | 3343 | kfree(ohci); |
3333 | pmac_ohci_off(dev); | 3344 | pmac_ohci_off(dev); |
3334 | fail: | 3345 | fail: |
3335 | if (err == -ENOMEM) | 3346 | if (err == -ENOMEM) |
3336 | fw_error("Out of memory\n"); | 3347 | fw_error("Out of memory\n"); |
3337 | 3348 | ||
3338 | return err; | 3349 | return err; |
3339 | } | 3350 | } |
3340 | 3351 | ||
3341 | static void pci_remove(struct pci_dev *dev) | 3352 | static void pci_remove(struct pci_dev *dev) |
3342 | { | 3353 | { |
3343 | struct fw_ohci *ohci; | 3354 | struct fw_ohci *ohci; |
3344 | 3355 | ||
3345 | ohci = pci_get_drvdata(dev); | 3356 | ohci = pci_get_drvdata(dev); |
3346 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); | 3357 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); |
3347 | flush_writes(ohci); | 3358 | flush_writes(ohci); |
3348 | fw_core_remove_card(&ohci->card); | 3359 | fw_core_remove_card(&ohci->card); |
3349 | 3360 | ||
3350 | /* | 3361 | /* |
3351 | * FIXME: Fail all pending packets here, now that the upper | 3362 | * FIXME: Fail all pending packets here, now that the upper |
3352 | * layers can't queue any more. | 3363 | * layers can't queue any more. |
3353 | */ | 3364 | */ |
3354 | 3365 | ||
3355 | software_reset(ohci); | 3366 | software_reset(ohci); |
3356 | free_irq(dev->irq, ohci); | 3367 | free_irq(dev->irq, ohci); |
3357 | 3368 | ||
3358 | if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) | 3369 | if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) |
3359 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 3370 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
3360 | ohci->next_config_rom, ohci->next_config_rom_bus); | 3371 | ohci->next_config_rom, ohci->next_config_rom_bus); |
3361 | if (ohci->config_rom) | 3372 | if (ohci->config_rom) |
3362 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 3373 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
3363 | ohci->config_rom, ohci->config_rom_bus); | 3374 | ohci->config_rom, ohci->config_rom_bus); |
3364 | ar_context_release(&ohci->ar_request_ctx); | 3375 | ar_context_release(&ohci->ar_request_ctx); |
3365 | ar_context_release(&ohci->ar_response_ctx); | 3376 | ar_context_release(&ohci->ar_response_ctx); |
3366 | dma_free_coherent(ohci->card.device, PAGE_SIZE, | 3377 | dma_free_coherent(ohci->card.device, PAGE_SIZE, |
3367 | ohci->misc_buffer, ohci->misc_buffer_bus); | 3378 | ohci->misc_buffer, ohci->misc_buffer_bus); |
3368 | context_release(&ohci->at_request_ctx); | 3379 | context_release(&ohci->at_request_ctx); |
3369 | context_release(&ohci->at_response_ctx); | 3380 | context_release(&ohci->at_response_ctx); |
3370 | kfree(ohci->it_context_list); | 3381 | kfree(ohci->it_context_list); |
3371 | kfree(ohci->ir_context_list); | 3382 | kfree(ohci->ir_context_list); |
3372 | pci_disable_msi(dev); | 3383 | pci_disable_msi(dev); |
3373 | pci_iounmap(dev, ohci->registers); | 3384 | pci_iounmap(dev, ohci->registers); |
3374 | pci_release_region(dev, 0); | 3385 | pci_release_region(dev, 0); |
3375 | pci_disable_device(dev); | 3386 | pci_disable_device(dev); |
3376 | kfree(ohci); | 3387 | kfree(ohci); |
3377 | pmac_ohci_off(dev); | 3388 | pmac_ohci_off(dev); |
3378 | 3389 | ||
3379 | fw_notify("Removed fw-ohci device.\n"); | 3390 | fw_notify("Removed fw-ohci device.\n"); |
3380 | } | 3391 | } |
3381 | 3392 | ||
3382 | #ifdef CONFIG_PM | 3393 | #ifdef CONFIG_PM |
3383 | static int pci_suspend(struct pci_dev *dev, pm_message_t state) | 3394 | static int pci_suspend(struct pci_dev *dev, pm_message_t state) |
3384 | { | 3395 | { |
3385 | struct fw_ohci *ohci = pci_get_drvdata(dev); | 3396 | struct fw_ohci *ohci = pci_get_drvdata(dev); |
3386 | int err; | 3397 | int err; |
3387 | 3398 | ||
3388 | software_reset(ohci); | 3399 | software_reset(ohci); |
3389 | free_irq(dev->irq, ohci); | 3400 | free_irq(dev->irq, ohci); |
3390 | pci_disable_msi(dev); | 3401 | pci_disable_msi(dev); |
3391 | err = pci_save_state(dev); | 3402 | err = pci_save_state(dev); |
3392 | if (err) { | 3403 | if (err) { |
3393 | fw_error("pci_save_state failed\n"); | 3404 | fw_error("pci_save_state failed\n"); |
3394 | return err; | 3405 | return err; |
3395 | } | 3406 | } |
3396 | err = pci_set_power_state(dev, pci_choose_state(dev, state)); | 3407 | err = pci_set_power_state(dev, pci_choose_state(dev, state)); |
3397 | if (err) | 3408 | if (err) |
3398 | fw_error("pci_set_power_state failed with %d\n", err); | 3409 | fw_error("pci_set_power_state failed with %d\n", err); |
3399 | pmac_ohci_off(dev); | 3410 | pmac_ohci_off(dev); |
3400 | 3411 | ||
3401 | return 0; | 3412 | return 0; |
3402 | } | 3413 | } |
3403 | 3414 | ||
3404 | static int pci_resume(struct pci_dev *dev) | 3415 | static int pci_resume(struct pci_dev *dev) |
3405 | { | 3416 | { |
3406 | struct fw_ohci *ohci = pci_get_drvdata(dev); | 3417 | struct fw_ohci *ohci = pci_get_drvdata(dev); |
3407 | int err; | 3418 | int err; |
3408 | 3419 | ||
3409 | pmac_ohci_on(dev); | 3420 | pmac_ohci_on(dev); |
3410 | pci_set_power_state(dev, PCI_D0); | 3421 | pci_set_power_state(dev, PCI_D0); |
3411 | pci_restore_state(dev); | 3422 | pci_restore_state(dev); |
3412 | err = pci_enable_device(dev); | 3423 | err = pci_enable_device(dev); |
3413 | if (err) { | 3424 | if (err) { |
3414 | fw_error("pci_enable_device failed\n"); | 3425 | fw_error("pci_enable_device failed\n"); |
3415 | return err; | 3426 | return err; |
3416 | } | 3427 | } |
3417 | 3428 | ||
3418 | /* Some systems don't setup GUID register on resume from ram */ | 3429 | /* Some systems don't setup GUID register on resume from ram */ |
3419 | if (!reg_read(ohci, OHCI1394_GUIDLo) && | 3430 | if (!reg_read(ohci, OHCI1394_GUIDLo) && |
3420 | !reg_read(ohci, OHCI1394_GUIDHi)) { | 3431 | !reg_read(ohci, OHCI1394_GUIDHi)) { |
3421 | reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); | 3432 | reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); |
3422 | reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); | 3433 | reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); |
3423 | } | 3434 | } |
3424 | 3435 | ||
3425 | err = ohci_enable(&ohci->card, NULL, 0); | 3436 | err = ohci_enable(&ohci->card, NULL, 0); |
3426 | if (err) | 3437 | if (err) |
3427 | return err; | 3438 | return err; |
3428 | 3439 | ||
3429 | ohci_resume_iso_dma(ohci); | 3440 | ohci_resume_iso_dma(ohci); |
3430 | 3441 | ||
3431 | return 0; | 3442 | return 0; |
3432 | } | 3443 | } |
3433 | #endif | 3444 | #endif |
3434 | 3445 | ||
3435 | static const struct pci_device_id pci_table[] = { | 3446 | static const struct pci_device_id pci_table[] = { |
3436 | { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, | 3447 | { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, |
3437 | { } | 3448 | { } |
3438 | }; | 3449 | }; |
3439 | 3450 | ||
3440 | MODULE_DEVICE_TABLE(pci, pci_table); | 3451 | MODULE_DEVICE_TABLE(pci, pci_table); |
3441 | 3452 | ||
3442 | static struct pci_driver fw_ohci_pci_driver = { | 3453 | static struct pci_driver fw_ohci_pci_driver = { |
3443 | .name = ohci_driver_name, | 3454 | .name = ohci_driver_name, |
3444 | .id_table = pci_table, | 3455 | .id_table = pci_table, |
3445 | .probe = pci_probe, | 3456 | .probe = pci_probe, |
3446 | .remove = pci_remove, | 3457 | .remove = pci_remove, |
3447 | #ifdef CONFIG_PM | 3458 | #ifdef CONFIG_PM |
3448 | .resume = pci_resume, | 3459 | .resume = pci_resume, |
3449 | .suspend = pci_suspend, | 3460 | .suspend = pci_suspend, |
3450 | #endif | 3461 | #endif |
3451 | }; | 3462 | }; |
3452 | 3463 | ||
3453 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); | 3464 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); |
3454 | MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); | 3465 | MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); |
3455 | MODULE_LICENSE("GPL"); | 3466 | MODULE_LICENSE("GPL"); |
3456 | 3467 | ||
3457 | /* Provide a module alias so root-on-sbp2 initrds don't break. */ | 3468 | /* Provide a module alias so root-on-sbp2 initrds don't break. */ |
3458 | #ifndef CONFIG_IEEE1394_OHCI1394_MODULE | 3469 | #ifndef CONFIG_IEEE1394_OHCI1394_MODULE |
3459 | MODULE_ALIAS("ohci1394"); | 3470 | MODULE_ALIAS("ohci1394"); |
3460 | #endif | 3471 | #endif |
3461 | 3472 | ||
3462 | static int __init fw_ohci_init(void) | 3473 | static int __init fw_ohci_init(void) |
3463 | { | 3474 | { |
3464 | return pci_register_driver(&fw_ohci_pci_driver); | 3475 | return pci_register_driver(&fw_ohci_pci_driver); |
3465 | } | 3476 | } |
3466 | 3477 | ||
3467 | static void __exit fw_ohci_cleanup(void) | 3478 | static void __exit fw_ohci_cleanup(void) |
3468 | { | 3479 | { |
3469 | pci_unregister_driver(&fw_ohci_pci_driver); | 3480 | pci_unregister_driver(&fw_ohci_pci_driver); |
3470 | } | 3481 | } |
3471 | 3482 |
drivers/media/dvb/firewire/firedtv-fw.c
1 | /* | 1 | /* |
2 | * FireDTV driver -- firewire I/O backend | 2 | * FireDTV driver -- firewire I/O backend |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <linux/errno.h> | 6 | #include <linux/errno.h> |
7 | #include <linux/firewire.h> | 7 | #include <linux/firewire.h> |
8 | #include <linux/firewire-constants.h> | 8 | #include <linux/firewire-constants.h> |
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/mod_devicetable.h> | 12 | #include <linux/mod_devicetable.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
20 | #include <linux/workqueue.h> | 20 | #include <linux/workqueue.h> |
21 | 21 | ||
22 | #include <asm/page.h> | 22 | #include <asm/page.h> |
23 | #include <asm/system.h> | 23 | #include <asm/system.h> |
24 | 24 | ||
25 | #include <dvb_demux.h> | 25 | #include <dvb_demux.h> |
26 | 26 | ||
27 | #include "firedtv.h" | 27 | #include "firedtv.h" |
28 | 28 | ||
29 | static LIST_HEAD(node_list); | 29 | static LIST_HEAD(node_list); |
30 | static DEFINE_SPINLOCK(node_list_lock); | 30 | static DEFINE_SPINLOCK(node_list_lock); |
31 | 31 | ||
32 | static inline struct fw_device *device_of(struct firedtv *fdtv) | 32 | static inline struct fw_device *device_of(struct firedtv *fdtv) |
33 | { | 33 | { |
34 | return fw_device(fdtv->device->parent); | 34 | return fw_device(fdtv->device->parent); |
35 | } | 35 | } |
36 | 36 | ||
37 | static int node_req(struct firedtv *fdtv, u64 addr, void *data, size_t len, | 37 | static int node_req(struct firedtv *fdtv, u64 addr, void *data, size_t len, |
38 | int tcode) | 38 | int tcode) |
39 | { | 39 | { |
40 | struct fw_device *device = device_of(fdtv); | 40 | struct fw_device *device = device_of(fdtv); |
41 | int rcode, generation = device->generation; | 41 | int rcode, generation = device->generation; |
42 | 42 | ||
43 | smp_rmb(); /* node_id vs. generation */ | 43 | smp_rmb(); /* node_id vs. generation */ |
44 | 44 | ||
45 | rcode = fw_run_transaction(device->card, tcode, device->node_id, | 45 | rcode = fw_run_transaction(device->card, tcode, device->node_id, |
46 | generation, device->max_speed, addr, data, len); | 46 | generation, device->max_speed, addr, data, len); |
47 | 47 | ||
48 | return rcode != RCODE_COMPLETE ? -EIO : 0; | 48 | return rcode != RCODE_COMPLETE ? -EIO : 0; |
49 | } | 49 | } |
50 | 50 | ||
51 | int fdtv_lock(struct firedtv *fdtv, u64 addr, void *data) | 51 | int fdtv_lock(struct firedtv *fdtv, u64 addr, void *data) |
52 | { | 52 | { |
53 | return node_req(fdtv, addr, data, 8, TCODE_LOCK_COMPARE_SWAP); | 53 | return node_req(fdtv, addr, data, 8, TCODE_LOCK_COMPARE_SWAP); |
54 | } | 54 | } |
55 | 55 | ||
56 | int fdtv_read(struct firedtv *fdtv, u64 addr, void *data) | 56 | int fdtv_read(struct firedtv *fdtv, u64 addr, void *data) |
57 | { | 57 | { |
58 | return node_req(fdtv, addr, data, 4, TCODE_READ_QUADLET_REQUEST); | 58 | return node_req(fdtv, addr, data, 4, TCODE_READ_QUADLET_REQUEST); |
59 | } | 59 | } |
60 | 60 | ||
61 | int fdtv_write(struct firedtv *fdtv, u64 addr, void *data, size_t len) | 61 | int fdtv_write(struct firedtv *fdtv, u64 addr, void *data, size_t len) |
62 | { | 62 | { |
63 | return node_req(fdtv, addr, data, len, TCODE_WRITE_BLOCK_REQUEST); | 63 | return node_req(fdtv, addr, data, len, TCODE_WRITE_BLOCK_REQUEST); |
64 | } | 64 | } |
65 | 65 | ||
66 | #define ISO_HEADER_SIZE 4 | 66 | #define ISO_HEADER_SIZE 4 |
67 | #define CIP_HEADER_SIZE 8 | 67 | #define CIP_HEADER_SIZE 8 |
68 | #define MPEG2_TS_HEADER_SIZE 4 | 68 | #define MPEG2_TS_HEADER_SIZE 4 |
69 | #define MPEG2_TS_SOURCE_PACKET_SIZE (4 + 188) | 69 | #define MPEG2_TS_SOURCE_PACKET_SIZE (4 + 188) |
70 | 70 | ||
71 | #define MAX_PACKET_SIZE 1024 /* 776, rounded up to 2^n */ | 71 | #define MAX_PACKET_SIZE 1024 /* 776, rounded up to 2^n */ |
72 | #define PACKETS_PER_PAGE (PAGE_SIZE / MAX_PACKET_SIZE) | 72 | #define PACKETS_PER_PAGE (PAGE_SIZE / MAX_PACKET_SIZE) |
73 | #define N_PACKETS 64 /* buffer size */ | 73 | #define N_PACKETS 64 /* buffer size */ |
74 | #define N_PAGES DIV_ROUND_UP(N_PACKETS, PACKETS_PER_PAGE) | 74 | #define N_PAGES DIV_ROUND_UP(N_PACKETS, PACKETS_PER_PAGE) |
75 | #define IRQ_INTERVAL 16 | 75 | #define IRQ_INTERVAL 16 |
76 | 76 | ||
77 | struct fdtv_ir_context { | 77 | struct fdtv_ir_context { |
78 | struct fw_iso_context *context; | 78 | struct fw_iso_context *context; |
79 | struct fw_iso_buffer buffer; | 79 | struct fw_iso_buffer buffer; |
80 | int interrupt_packet; | 80 | int interrupt_packet; |
81 | int current_packet; | 81 | int current_packet; |
82 | char *pages[N_PAGES]; | 82 | char *pages[N_PAGES]; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | static int queue_iso(struct fdtv_ir_context *ctx, int index) | 85 | static int queue_iso(struct fdtv_ir_context *ctx, int index) |
86 | { | 86 | { |
87 | struct fw_iso_packet p; | 87 | struct fw_iso_packet p; |
88 | 88 | ||
89 | p.payload_length = MAX_PACKET_SIZE; | 89 | p.payload_length = MAX_PACKET_SIZE; |
90 | p.interrupt = !(++ctx->interrupt_packet & (IRQ_INTERVAL - 1)); | 90 | p.interrupt = !(++ctx->interrupt_packet & (IRQ_INTERVAL - 1)); |
91 | p.skip = 0; | 91 | p.skip = 0; |
92 | p.header_length = ISO_HEADER_SIZE; | 92 | p.header_length = ISO_HEADER_SIZE; |
93 | 93 | ||
94 | return fw_iso_context_queue(ctx->context, &p, &ctx->buffer, | 94 | return fw_iso_context_queue(ctx->context, &p, &ctx->buffer, |
95 | index * MAX_PACKET_SIZE); | 95 | index * MAX_PACKET_SIZE); |
96 | } | 96 | } |
97 | 97 | ||
98 | static void handle_iso(struct fw_iso_context *context, u32 cycle, | 98 | static void handle_iso(struct fw_iso_context *context, u32 cycle, |
99 | size_t header_length, void *header, void *data) | 99 | size_t header_length, void *header, void *data) |
100 | { | 100 | { |
101 | struct firedtv *fdtv = data; | 101 | struct firedtv *fdtv = data; |
102 | struct fdtv_ir_context *ctx = fdtv->ir_context; | 102 | struct fdtv_ir_context *ctx = fdtv->ir_context; |
103 | __be32 *h, *h_end; | 103 | __be32 *h, *h_end; |
104 | int length, err, i = ctx->current_packet; | 104 | int length, err, i = ctx->current_packet; |
105 | char *p, *p_end; | 105 | char *p, *p_end; |
106 | 106 | ||
107 | for (h = header, h_end = h + header_length / 4; h < h_end; h++) { | 107 | for (h = header, h_end = h + header_length / 4; h < h_end; h++) { |
108 | length = be32_to_cpup(h) >> 16; | 108 | length = be32_to_cpup(h) >> 16; |
109 | if (unlikely(length > MAX_PACKET_SIZE)) { | 109 | if (unlikely(length > MAX_PACKET_SIZE)) { |
110 | dev_err(fdtv->device, "length = %d\n", length); | 110 | dev_err(fdtv->device, "length = %d\n", length); |
111 | length = MAX_PACKET_SIZE; | 111 | length = MAX_PACKET_SIZE; |
112 | } | 112 | } |
113 | 113 | ||
114 | p = ctx->pages[i / PACKETS_PER_PAGE] | 114 | p = ctx->pages[i / PACKETS_PER_PAGE] |
115 | + (i % PACKETS_PER_PAGE) * MAX_PACKET_SIZE; | 115 | + (i % PACKETS_PER_PAGE) * MAX_PACKET_SIZE; |
116 | p_end = p + length; | 116 | p_end = p + length; |
117 | 117 | ||
118 | for (p += CIP_HEADER_SIZE + MPEG2_TS_HEADER_SIZE; p < p_end; | 118 | for (p += CIP_HEADER_SIZE + MPEG2_TS_HEADER_SIZE; p < p_end; |
119 | p += MPEG2_TS_SOURCE_PACKET_SIZE) | 119 | p += MPEG2_TS_SOURCE_PACKET_SIZE) |
120 | dvb_dmx_swfilter_packets(&fdtv->demux, p, 1); | 120 | dvb_dmx_swfilter_packets(&fdtv->demux, p, 1); |
121 | 121 | ||
122 | err = queue_iso(ctx, i); | 122 | err = queue_iso(ctx, i); |
123 | if (unlikely(err)) | 123 | if (unlikely(err)) |
124 | dev_err(fdtv->device, "requeue failed\n"); | 124 | dev_err(fdtv->device, "requeue failed\n"); |
125 | 125 | ||
126 | i = (i + 1) & (N_PACKETS - 1); | 126 | i = (i + 1) & (N_PACKETS - 1); |
127 | } | 127 | } |
128 | fw_iso_context_queue_flush(ctx->context); | ||
128 | ctx->current_packet = i; | 129 | ctx->current_packet = i; |
129 | } | 130 | } |
130 | 131 | ||
131 | int fdtv_start_iso(struct firedtv *fdtv) | 132 | int fdtv_start_iso(struct firedtv *fdtv) |
132 | { | 133 | { |
133 | struct fdtv_ir_context *ctx; | 134 | struct fdtv_ir_context *ctx; |
134 | struct fw_device *device = device_of(fdtv); | 135 | struct fw_device *device = device_of(fdtv); |
135 | int i, err; | 136 | int i, err; |
136 | 137 | ||
137 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | 138 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
138 | if (!ctx) | 139 | if (!ctx) |
139 | return -ENOMEM; | 140 | return -ENOMEM; |
140 | 141 | ||
141 | ctx->context = fw_iso_context_create(device->card, | 142 | ctx->context = fw_iso_context_create(device->card, |
142 | FW_ISO_CONTEXT_RECEIVE, fdtv->isochannel, | 143 | FW_ISO_CONTEXT_RECEIVE, fdtv->isochannel, |
143 | device->max_speed, ISO_HEADER_SIZE, handle_iso, fdtv); | 144 | device->max_speed, ISO_HEADER_SIZE, handle_iso, fdtv); |
144 | if (IS_ERR(ctx->context)) { | 145 | if (IS_ERR(ctx->context)) { |
145 | err = PTR_ERR(ctx->context); | 146 | err = PTR_ERR(ctx->context); |
146 | goto fail_free; | 147 | goto fail_free; |
147 | } | 148 | } |
148 | 149 | ||
149 | err = fw_iso_buffer_init(&ctx->buffer, device->card, | 150 | err = fw_iso_buffer_init(&ctx->buffer, device->card, |
150 | N_PAGES, DMA_FROM_DEVICE); | 151 | N_PAGES, DMA_FROM_DEVICE); |
151 | if (err) | 152 | if (err) |
152 | goto fail_context_destroy; | 153 | goto fail_context_destroy; |
153 | 154 | ||
154 | ctx->interrupt_packet = 0; | 155 | ctx->interrupt_packet = 0; |
155 | ctx->current_packet = 0; | 156 | ctx->current_packet = 0; |
156 | 157 | ||
157 | for (i = 0; i < N_PAGES; i++) | 158 | for (i = 0; i < N_PAGES; i++) |
158 | ctx->pages[i] = page_address(ctx->buffer.pages[i]); | 159 | ctx->pages[i] = page_address(ctx->buffer.pages[i]); |
159 | 160 | ||
160 | for (i = 0; i < N_PACKETS; i++) { | 161 | for (i = 0; i < N_PACKETS; i++) { |
161 | err = queue_iso(ctx, i); | 162 | err = queue_iso(ctx, i); |
162 | if (err) | 163 | if (err) |
163 | goto fail; | 164 | goto fail; |
164 | } | 165 | } |
165 | 166 | ||
166 | err = fw_iso_context_start(ctx->context, -1, 0, | 167 | err = fw_iso_context_start(ctx->context, -1, 0, |
167 | FW_ISO_CONTEXT_MATCH_ALL_TAGS); | 168 | FW_ISO_CONTEXT_MATCH_ALL_TAGS); |
168 | if (err) | 169 | if (err) |
169 | goto fail; | 170 | goto fail; |
170 | 171 | ||
171 | fdtv->ir_context = ctx; | 172 | fdtv->ir_context = ctx; |
172 | 173 | ||
173 | return 0; | 174 | return 0; |
174 | fail: | 175 | fail: |
175 | fw_iso_buffer_destroy(&ctx->buffer, device->card); | 176 | fw_iso_buffer_destroy(&ctx->buffer, device->card); |
176 | fail_context_destroy: | 177 | fail_context_destroy: |
177 | fw_iso_context_destroy(ctx->context); | 178 | fw_iso_context_destroy(ctx->context); |
178 | fail_free: | 179 | fail_free: |
179 | kfree(ctx); | 180 | kfree(ctx); |
180 | 181 | ||
181 | return err; | 182 | return err; |
182 | } | 183 | } |
183 | 184 | ||
184 | void fdtv_stop_iso(struct firedtv *fdtv) | 185 | void fdtv_stop_iso(struct firedtv *fdtv) |
185 | { | 186 | { |
186 | struct fdtv_ir_context *ctx = fdtv->ir_context; | 187 | struct fdtv_ir_context *ctx = fdtv->ir_context; |
187 | 188 | ||
188 | fw_iso_context_stop(ctx->context); | 189 | fw_iso_context_stop(ctx->context); |
189 | fw_iso_buffer_destroy(&ctx->buffer, device_of(fdtv)->card); | 190 | fw_iso_buffer_destroy(&ctx->buffer, device_of(fdtv)->card); |
190 | fw_iso_context_destroy(ctx->context); | 191 | fw_iso_context_destroy(ctx->context); |
191 | kfree(ctx); | 192 | kfree(ctx); |
192 | } | 193 | } |
193 | 194 | ||
194 | static void handle_fcp(struct fw_card *card, struct fw_request *request, | 195 | static void handle_fcp(struct fw_card *card, struct fw_request *request, |
195 | int tcode, int destination, int source, int generation, | 196 | int tcode, int destination, int source, int generation, |
196 | unsigned long long offset, void *payload, size_t length, | 197 | unsigned long long offset, void *payload, size_t length, |
197 | void *callback_data) | 198 | void *callback_data) |
198 | { | 199 | { |
199 | struct firedtv *f, *fdtv = NULL; | 200 | struct firedtv *f, *fdtv = NULL; |
200 | struct fw_device *device; | 201 | struct fw_device *device; |
201 | unsigned long flags; | 202 | unsigned long flags; |
202 | int su; | 203 | int su; |
203 | 204 | ||
204 | if (length < 2 || (((u8 *)payload)[0] & 0xf0) != 0) | 205 | if (length < 2 || (((u8 *)payload)[0] & 0xf0) != 0) |
205 | return; | 206 | return; |
206 | 207 | ||
207 | su = ((u8 *)payload)[1] & 0x7; | 208 | su = ((u8 *)payload)[1] & 0x7; |
208 | 209 | ||
209 | spin_lock_irqsave(&node_list_lock, flags); | 210 | spin_lock_irqsave(&node_list_lock, flags); |
210 | list_for_each_entry(f, &node_list, list) { | 211 | list_for_each_entry(f, &node_list, list) { |
211 | device = device_of(f); | 212 | device = device_of(f); |
212 | if (device->generation != generation) | 213 | if (device->generation != generation) |
213 | continue; | 214 | continue; |
214 | 215 | ||
215 | smp_rmb(); /* node_id vs. generation */ | 216 | smp_rmb(); /* node_id vs. generation */ |
216 | 217 | ||
217 | if (device->card == card && | 218 | if (device->card == card && |
218 | device->node_id == source && | 219 | device->node_id == source && |
219 | (f->subunit == su || (f->subunit == 0 && su == 0x7))) { | 220 | (f->subunit == su || (f->subunit == 0 && su == 0x7))) { |
220 | fdtv = f; | 221 | fdtv = f; |
221 | break; | 222 | break; |
222 | } | 223 | } |
223 | } | 224 | } |
224 | spin_unlock_irqrestore(&node_list_lock, flags); | 225 | spin_unlock_irqrestore(&node_list_lock, flags); |
225 | 226 | ||
226 | if (fdtv) | 227 | if (fdtv) |
227 | avc_recv(fdtv, payload, length); | 228 | avc_recv(fdtv, payload, length); |
228 | } | 229 | } |
229 | 230 | ||
230 | static struct fw_address_handler fcp_handler = { | 231 | static struct fw_address_handler fcp_handler = { |
231 | .length = CSR_FCP_END - CSR_FCP_RESPONSE, | 232 | .length = CSR_FCP_END - CSR_FCP_RESPONSE, |
232 | .address_callback = handle_fcp, | 233 | .address_callback = handle_fcp, |
233 | }; | 234 | }; |
234 | 235 | ||
235 | static const struct fw_address_region fcp_region = { | 236 | static const struct fw_address_region fcp_region = { |
236 | .start = CSR_REGISTER_BASE + CSR_FCP_RESPONSE, | 237 | .start = CSR_REGISTER_BASE + CSR_FCP_RESPONSE, |
237 | .end = CSR_REGISTER_BASE + CSR_FCP_END, | 238 | .end = CSR_REGISTER_BASE + CSR_FCP_END, |
238 | }; | 239 | }; |
239 | 240 | ||
240 | static const char * const model_names[] = { | 241 | static const char * const model_names[] = { |
241 | [FIREDTV_UNKNOWN] = "unknown type", | 242 | [FIREDTV_UNKNOWN] = "unknown type", |
242 | [FIREDTV_DVB_S] = "FireDTV S/CI", | 243 | [FIREDTV_DVB_S] = "FireDTV S/CI", |
243 | [FIREDTV_DVB_C] = "FireDTV C/CI", | 244 | [FIREDTV_DVB_C] = "FireDTV C/CI", |
244 | [FIREDTV_DVB_T] = "FireDTV T/CI", | 245 | [FIREDTV_DVB_T] = "FireDTV T/CI", |
245 | [FIREDTV_DVB_S2] = "FireDTV S2 ", | 246 | [FIREDTV_DVB_S2] = "FireDTV S2 ", |
246 | }; | 247 | }; |
247 | 248 | ||
248 | /* Adjust the template string if models with longer names appear. */ | 249 | /* Adjust the template string if models with longer names appear. */ |
249 | #define MAX_MODEL_NAME_LEN sizeof("FireDTV ????") | 250 | #define MAX_MODEL_NAME_LEN sizeof("FireDTV ????") |
250 | 251 | ||
251 | static int node_probe(struct device *dev) | 252 | static int node_probe(struct device *dev) |
252 | { | 253 | { |
253 | struct firedtv *fdtv; | 254 | struct firedtv *fdtv; |
254 | char name[MAX_MODEL_NAME_LEN]; | 255 | char name[MAX_MODEL_NAME_LEN]; |
255 | int name_len, i, err; | 256 | int name_len, i, err; |
256 | 257 | ||
257 | fdtv = kzalloc(sizeof(*fdtv), GFP_KERNEL); | 258 | fdtv = kzalloc(sizeof(*fdtv), GFP_KERNEL); |
258 | if (!fdtv) | 259 | if (!fdtv) |
259 | return -ENOMEM; | 260 | return -ENOMEM; |
260 | 261 | ||
261 | dev_set_drvdata(dev, fdtv); | 262 | dev_set_drvdata(dev, fdtv); |
262 | fdtv->device = dev; | 263 | fdtv->device = dev; |
263 | fdtv->isochannel = -1; | 264 | fdtv->isochannel = -1; |
264 | fdtv->voltage = 0xff; | 265 | fdtv->voltage = 0xff; |
265 | fdtv->tone = 0xff; | 266 | fdtv->tone = 0xff; |
266 | 267 | ||
267 | mutex_init(&fdtv->avc_mutex); | 268 | mutex_init(&fdtv->avc_mutex); |
268 | init_waitqueue_head(&fdtv->avc_wait); | 269 | init_waitqueue_head(&fdtv->avc_wait); |
269 | mutex_init(&fdtv->demux_mutex); | 270 | mutex_init(&fdtv->demux_mutex); |
270 | INIT_WORK(&fdtv->remote_ctrl_work, avc_remote_ctrl_work); | 271 | INIT_WORK(&fdtv->remote_ctrl_work, avc_remote_ctrl_work); |
271 | 272 | ||
272 | name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL, | 273 | name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL, |
273 | name, sizeof(name)); | 274 | name, sizeof(name)); |
274 | for (i = ARRAY_SIZE(model_names); --i; ) | 275 | for (i = ARRAY_SIZE(model_names); --i; ) |
275 | if (strlen(model_names[i]) <= name_len && | 276 | if (strlen(model_names[i]) <= name_len && |
276 | strncmp(name, model_names[i], name_len) == 0) | 277 | strncmp(name, model_names[i], name_len) == 0) |
277 | break; | 278 | break; |
278 | fdtv->type = i; | 279 | fdtv->type = i; |
279 | 280 | ||
280 | err = fdtv_register_rc(fdtv, dev); | 281 | err = fdtv_register_rc(fdtv, dev); |
281 | if (err) | 282 | if (err) |
282 | goto fail_free; | 283 | goto fail_free; |
283 | 284 | ||
284 | spin_lock_irq(&node_list_lock); | 285 | spin_lock_irq(&node_list_lock); |
285 | list_add_tail(&fdtv->list, &node_list); | 286 | list_add_tail(&fdtv->list, &node_list); |
286 | spin_unlock_irq(&node_list_lock); | 287 | spin_unlock_irq(&node_list_lock); |
287 | 288 | ||
288 | err = avc_identify_subunit(fdtv); | 289 | err = avc_identify_subunit(fdtv); |
289 | if (err) | 290 | if (err) |
290 | goto fail; | 291 | goto fail; |
291 | 292 | ||
292 | err = fdtv_dvb_register(fdtv, model_names[fdtv->type]); | 293 | err = fdtv_dvb_register(fdtv, model_names[fdtv->type]); |
293 | if (err) | 294 | if (err) |
294 | goto fail; | 295 | goto fail; |
295 | 296 | ||
296 | avc_register_remote_control(fdtv); | 297 | avc_register_remote_control(fdtv); |
297 | 298 | ||
298 | return 0; | 299 | return 0; |
299 | fail: | 300 | fail: |
300 | spin_lock_irq(&node_list_lock); | 301 | spin_lock_irq(&node_list_lock); |
301 | list_del(&fdtv->list); | 302 | list_del(&fdtv->list); |
302 | spin_unlock_irq(&node_list_lock); | 303 | spin_unlock_irq(&node_list_lock); |
303 | fdtv_unregister_rc(fdtv); | 304 | fdtv_unregister_rc(fdtv); |
304 | fail_free: | 305 | fail_free: |
305 | kfree(fdtv); | 306 | kfree(fdtv); |
306 | 307 | ||
307 | return err; | 308 | return err; |
308 | } | 309 | } |
309 | 310 | ||
310 | static int node_remove(struct device *dev) | 311 | static int node_remove(struct device *dev) |
311 | { | 312 | { |
312 | struct firedtv *fdtv = dev_get_drvdata(dev); | 313 | struct firedtv *fdtv = dev_get_drvdata(dev); |
313 | 314 | ||
314 | fdtv_dvb_unregister(fdtv); | 315 | fdtv_dvb_unregister(fdtv); |
315 | 316 | ||
316 | spin_lock_irq(&node_list_lock); | 317 | spin_lock_irq(&node_list_lock); |
317 | list_del(&fdtv->list); | 318 | list_del(&fdtv->list); |
318 | spin_unlock_irq(&node_list_lock); | 319 | spin_unlock_irq(&node_list_lock); |
319 | 320 | ||
320 | fdtv_unregister_rc(fdtv); | 321 | fdtv_unregister_rc(fdtv); |
321 | 322 | ||
322 | kfree(fdtv); | 323 | kfree(fdtv); |
323 | return 0; | 324 | return 0; |
324 | } | 325 | } |
325 | 326 | ||
326 | static void node_update(struct fw_unit *unit) | 327 | static void node_update(struct fw_unit *unit) |
327 | { | 328 | { |
328 | struct firedtv *fdtv = dev_get_drvdata(&unit->device); | 329 | struct firedtv *fdtv = dev_get_drvdata(&unit->device); |
329 | 330 | ||
330 | if (fdtv->isochannel >= 0) | 331 | if (fdtv->isochannel >= 0) |
331 | cmp_establish_pp_connection(fdtv, fdtv->subunit, | 332 | cmp_establish_pp_connection(fdtv, fdtv->subunit, |
332 | fdtv->isochannel); | 333 | fdtv->isochannel); |
333 | } | 334 | } |
334 | 335 | ||
335 | #define MATCH_FLAGS (IEEE1394_MATCH_VENDOR_ID | IEEE1394_MATCH_MODEL_ID | \ | 336 | #define MATCH_FLAGS (IEEE1394_MATCH_VENDOR_ID | IEEE1394_MATCH_MODEL_ID | \ |
336 | IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION) | 337 | IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION) |
337 | 338 | ||
338 | #define DIGITAL_EVERYWHERE_OUI 0x001287 | 339 | #define DIGITAL_EVERYWHERE_OUI 0x001287 |
339 | #define AVC_UNIT_SPEC_ID_ENTRY 0x00a02d | 340 | #define AVC_UNIT_SPEC_ID_ENTRY 0x00a02d |
340 | #define AVC_SW_VERSION_ENTRY 0x010001 | 341 | #define AVC_SW_VERSION_ENTRY 0x010001 |
341 | 342 | ||
342 | static const struct ieee1394_device_id fdtv_id_table[] = { | 343 | static const struct ieee1394_device_id fdtv_id_table[] = { |
343 | { | 344 | { |
344 | /* FloppyDTV S/CI and FloppyDTV S2 */ | 345 | /* FloppyDTV S/CI and FloppyDTV S2 */ |
345 | .match_flags = MATCH_FLAGS, | 346 | .match_flags = MATCH_FLAGS, |
346 | .vendor_id = DIGITAL_EVERYWHERE_OUI, | 347 | .vendor_id = DIGITAL_EVERYWHERE_OUI, |
347 | .model_id = 0x000024, | 348 | .model_id = 0x000024, |
348 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, | 349 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, |
349 | .version = AVC_SW_VERSION_ENTRY, | 350 | .version = AVC_SW_VERSION_ENTRY, |
350 | }, { | 351 | }, { |
351 | /* FloppyDTV T/CI */ | 352 | /* FloppyDTV T/CI */ |
352 | .match_flags = MATCH_FLAGS, | 353 | .match_flags = MATCH_FLAGS, |
353 | .vendor_id = DIGITAL_EVERYWHERE_OUI, | 354 | .vendor_id = DIGITAL_EVERYWHERE_OUI, |
354 | .model_id = 0x000025, | 355 | .model_id = 0x000025, |
355 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, | 356 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, |
356 | .version = AVC_SW_VERSION_ENTRY, | 357 | .version = AVC_SW_VERSION_ENTRY, |
357 | }, { | 358 | }, { |
358 | /* FloppyDTV C/CI */ | 359 | /* FloppyDTV C/CI */ |
359 | .match_flags = MATCH_FLAGS, | 360 | .match_flags = MATCH_FLAGS, |
360 | .vendor_id = DIGITAL_EVERYWHERE_OUI, | 361 | .vendor_id = DIGITAL_EVERYWHERE_OUI, |
361 | .model_id = 0x000026, | 362 | .model_id = 0x000026, |
362 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, | 363 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, |
363 | .version = AVC_SW_VERSION_ENTRY, | 364 | .version = AVC_SW_VERSION_ENTRY, |
364 | }, { | 365 | }, { |
365 | /* FireDTV S/CI and FloppyDTV S2 */ | 366 | /* FireDTV S/CI and FloppyDTV S2 */ |
366 | .match_flags = MATCH_FLAGS, | 367 | .match_flags = MATCH_FLAGS, |
367 | .vendor_id = DIGITAL_EVERYWHERE_OUI, | 368 | .vendor_id = DIGITAL_EVERYWHERE_OUI, |
368 | .model_id = 0x000034, | 369 | .model_id = 0x000034, |
369 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, | 370 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, |
370 | .version = AVC_SW_VERSION_ENTRY, | 371 | .version = AVC_SW_VERSION_ENTRY, |
371 | }, { | 372 | }, { |
372 | /* FireDTV T/CI */ | 373 | /* FireDTV T/CI */ |
373 | .match_flags = MATCH_FLAGS, | 374 | .match_flags = MATCH_FLAGS, |
374 | .vendor_id = DIGITAL_EVERYWHERE_OUI, | 375 | .vendor_id = DIGITAL_EVERYWHERE_OUI, |
375 | .model_id = 0x000035, | 376 | .model_id = 0x000035, |
376 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, | 377 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, |
377 | .version = AVC_SW_VERSION_ENTRY, | 378 | .version = AVC_SW_VERSION_ENTRY, |
378 | }, { | 379 | }, { |
379 | /* FireDTV C/CI */ | 380 | /* FireDTV C/CI */ |
380 | .match_flags = MATCH_FLAGS, | 381 | .match_flags = MATCH_FLAGS, |
381 | .vendor_id = DIGITAL_EVERYWHERE_OUI, | 382 | .vendor_id = DIGITAL_EVERYWHERE_OUI, |
382 | .model_id = 0x000036, | 383 | .model_id = 0x000036, |
383 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, | 384 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY, |
384 | .version = AVC_SW_VERSION_ENTRY, | 385 | .version = AVC_SW_VERSION_ENTRY, |
385 | }, {} | 386 | }, {} |
386 | }; | 387 | }; |
387 | MODULE_DEVICE_TABLE(ieee1394, fdtv_id_table); | 388 | MODULE_DEVICE_TABLE(ieee1394, fdtv_id_table); |
388 | 389 | ||
389 | static struct fw_driver fdtv_driver = { | 390 | static struct fw_driver fdtv_driver = { |
390 | .driver = { | 391 | .driver = { |
391 | .owner = THIS_MODULE, | 392 | .owner = THIS_MODULE, |
392 | .name = "firedtv", | 393 | .name = "firedtv", |
393 | .bus = &fw_bus_type, | 394 | .bus = &fw_bus_type, |
394 | .probe = node_probe, | 395 | .probe = node_probe, |
395 | .remove = node_remove, | 396 | .remove = node_remove, |
396 | }, | 397 | }, |
397 | .update = node_update, | 398 | .update = node_update, |
398 | .id_table = fdtv_id_table, | 399 | .id_table = fdtv_id_table, |
399 | }; | 400 | }; |
400 | 401 | ||
401 | static int __init fdtv_init(void) | 402 | static int __init fdtv_init(void) |
402 | { | 403 | { |
403 | int ret; | 404 | int ret; |
404 | 405 | ||
405 | ret = fw_core_add_address_handler(&fcp_handler, &fcp_region); | 406 | ret = fw_core_add_address_handler(&fcp_handler, &fcp_region); |
406 | if (ret < 0) | 407 | if (ret < 0) |
407 | return ret; | 408 | return ret; |
408 | 409 | ||
409 | ret = driver_register(&fdtv_driver.driver); | 410 | ret = driver_register(&fdtv_driver.driver); |
410 | if (ret < 0) | 411 | if (ret < 0) |
411 | fw_core_remove_address_handler(&fcp_handler); | 412 | fw_core_remove_address_handler(&fcp_handler); |
412 | 413 | ||
413 | return ret; | 414 | return ret; |
414 | } | 415 | } |
415 | 416 | ||
416 | static void __exit fdtv_exit(void) | 417 | static void __exit fdtv_exit(void) |
417 | { | 418 | { |
418 | driver_unregister(&fdtv_driver.driver); | 419 | driver_unregister(&fdtv_driver.driver); |
419 | fw_core_remove_address_handler(&fcp_handler); | 420 | fw_core_remove_address_handler(&fcp_handler); |
420 | } | 421 | } |
421 | 422 | ||
422 | module_init(fdtv_init); | 423 | module_init(fdtv_init); |
423 | module_exit(fdtv_exit); | 424 | module_exit(fdtv_exit); |
424 | 425 | ||
425 | MODULE_AUTHOR("Andreas Monitzer <andy@monitzer.com>"); | 426 | MODULE_AUTHOR("Andreas Monitzer <andy@monitzer.com>"); |
426 | MODULE_AUTHOR("Ben Backx <ben@bbackx.com>"); | 427 | MODULE_AUTHOR("Ben Backx <ben@bbackx.com>"); |
427 | MODULE_DESCRIPTION("FireDTV DVB Driver"); | 428 | MODULE_DESCRIPTION("FireDTV DVB Driver"); |
428 | MODULE_LICENSE("GPL"); | 429 | MODULE_LICENSE("GPL"); |
429 | MODULE_SUPPORTED_DEVICE("FireDTV DVB"); | 430 | MODULE_SUPPORTED_DEVICE("FireDTV DVB"); |
430 | 431 |
include/linux/firewire.h
1 | #ifndef _LINUX_FIREWIRE_H | 1 | #ifndef _LINUX_FIREWIRE_H |
2 | #define _LINUX_FIREWIRE_H | 2 | #define _LINUX_FIREWIRE_H |
3 | 3 | ||
4 | #include <linux/completion.h> | 4 | #include <linux/completion.h> |
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <linux/dma-mapping.h> | 6 | #include <linux/dma-mapping.h> |
7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
8 | #include <linux/kref.h> | 8 | #include <linux/kref.h> |
9 | #include <linux/list.h> | 9 | #include <linux/list.h> |
10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/sysfs.h> | 12 | #include <linux/sysfs.h> |
13 | #include <linux/timer.h> | 13 | #include <linux/timer.h> |
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
16 | 16 | ||
17 | #include <asm/atomic.h> | 17 | #include <asm/atomic.h> |
18 | #include <asm/byteorder.h> | 18 | #include <asm/byteorder.h> |
19 | 19 | ||
20 | #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) | 20 | #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) |
21 | #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) | 21 | #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) |
22 | 22 | ||
23 | #define CSR_REGISTER_BASE 0xfffff0000000ULL | 23 | #define CSR_REGISTER_BASE 0xfffff0000000ULL |
24 | 24 | ||
25 | /* register offsets are relative to CSR_REGISTER_BASE */ | 25 | /* register offsets are relative to CSR_REGISTER_BASE */ |
26 | #define CSR_STATE_CLEAR 0x0 | 26 | #define CSR_STATE_CLEAR 0x0 |
27 | #define CSR_STATE_SET 0x4 | 27 | #define CSR_STATE_SET 0x4 |
28 | #define CSR_NODE_IDS 0x8 | 28 | #define CSR_NODE_IDS 0x8 |
29 | #define CSR_RESET_START 0xc | 29 | #define CSR_RESET_START 0xc |
30 | #define CSR_SPLIT_TIMEOUT_HI 0x18 | 30 | #define CSR_SPLIT_TIMEOUT_HI 0x18 |
31 | #define CSR_SPLIT_TIMEOUT_LO 0x1c | 31 | #define CSR_SPLIT_TIMEOUT_LO 0x1c |
32 | #define CSR_CYCLE_TIME 0x200 | 32 | #define CSR_CYCLE_TIME 0x200 |
33 | #define CSR_BUS_TIME 0x204 | 33 | #define CSR_BUS_TIME 0x204 |
34 | #define CSR_BUSY_TIMEOUT 0x210 | 34 | #define CSR_BUSY_TIMEOUT 0x210 |
35 | #define CSR_PRIORITY_BUDGET 0x218 | 35 | #define CSR_PRIORITY_BUDGET 0x218 |
36 | #define CSR_BUS_MANAGER_ID 0x21c | 36 | #define CSR_BUS_MANAGER_ID 0x21c |
37 | #define CSR_BANDWIDTH_AVAILABLE 0x220 | 37 | #define CSR_BANDWIDTH_AVAILABLE 0x220 |
38 | #define CSR_CHANNELS_AVAILABLE 0x224 | 38 | #define CSR_CHANNELS_AVAILABLE 0x224 |
39 | #define CSR_CHANNELS_AVAILABLE_HI 0x224 | 39 | #define CSR_CHANNELS_AVAILABLE_HI 0x224 |
40 | #define CSR_CHANNELS_AVAILABLE_LO 0x228 | 40 | #define CSR_CHANNELS_AVAILABLE_LO 0x228 |
41 | #define CSR_MAINT_UTILITY 0x230 | 41 | #define CSR_MAINT_UTILITY 0x230 |
42 | #define CSR_BROADCAST_CHANNEL 0x234 | 42 | #define CSR_BROADCAST_CHANNEL 0x234 |
43 | #define CSR_CONFIG_ROM 0x400 | 43 | #define CSR_CONFIG_ROM 0x400 |
44 | #define CSR_CONFIG_ROM_END 0x800 | 44 | #define CSR_CONFIG_ROM_END 0x800 |
45 | #define CSR_OMPR 0x900 | 45 | #define CSR_OMPR 0x900 |
46 | #define CSR_OPCR(i) (0x904 + (i) * 4) | 46 | #define CSR_OPCR(i) (0x904 + (i) * 4) |
47 | #define CSR_IMPR 0x980 | 47 | #define CSR_IMPR 0x980 |
48 | #define CSR_IPCR(i) (0x984 + (i) * 4) | 48 | #define CSR_IPCR(i) (0x984 + (i) * 4) |
49 | #define CSR_FCP_COMMAND 0xB00 | 49 | #define CSR_FCP_COMMAND 0xB00 |
50 | #define CSR_FCP_RESPONSE 0xD00 | 50 | #define CSR_FCP_RESPONSE 0xD00 |
51 | #define CSR_FCP_END 0xF00 | 51 | #define CSR_FCP_END 0xF00 |
52 | #define CSR_TOPOLOGY_MAP 0x1000 | 52 | #define CSR_TOPOLOGY_MAP 0x1000 |
53 | #define CSR_TOPOLOGY_MAP_END 0x1400 | 53 | #define CSR_TOPOLOGY_MAP_END 0x1400 |
54 | #define CSR_SPEED_MAP 0x2000 | 54 | #define CSR_SPEED_MAP 0x2000 |
55 | #define CSR_SPEED_MAP_END 0x3000 | 55 | #define CSR_SPEED_MAP_END 0x3000 |
56 | 56 | ||
57 | #define CSR_OFFSET 0x40 | 57 | #define CSR_OFFSET 0x40 |
58 | #define CSR_LEAF 0x80 | 58 | #define CSR_LEAF 0x80 |
59 | #define CSR_DIRECTORY 0xc0 | 59 | #define CSR_DIRECTORY 0xc0 |
60 | 60 | ||
61 | #define CSR_DESCRIPTOR 0x01 | 61 | #define CSR_DESCRIPTOR 0x01 |
62 | #define CSR_VENDOR 0x03 | 62 | #define CSR_VENDOR 0x03 |
63 | #define CSR_HARDWARE_VERSION 0x04 | 63 | #define CSR_HARDWARE_VERSION 0x04 |
64 | #define CSR_UNIT 0x11 | 64 | #define CSR_UNIT 0x11 |
65 | #define CSR_SPECIFIER_ID 0x12 | 65 | #define CSR_SPECIFIER_ID 0x12 |
66 | #define CSR_VERSION 0x13 | 66 | #define CSR_VERSION 0x13 |
67 | #define CSR_DEPENDENT_INFO 0x14 | 67 | #define CSR_DEPENDENT_INFO 0x14 |
68 | #define CSR_MODEL 0x17 | 68 | #define CSR_MODEL 0x17 |
69 | #define CSR_DIRECTORY_ID 0x20 | 69 | #define CSR_DIRECTORY_ID 0x20 |
70 | 70 | ||
71 | struct fw_csr_iterator { | 71 | struct fw_csr_iterator { |
72 | const u32 *p; | 72 | const u32 *p; |
73 | const u32 *end; | 73 | const u32 *end; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p); | 76 | void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p); |
77 | int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); | 77 | int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); |
78 | int fw_csr_string(const u32 *directory, int key, char *buf, size_t size); | 78 | int fw_csr_string(const u32 *directory, int key, char *buf, size_t size); |
79 | 79 | ||
80 | extern struct bus_type fw_bus_type; | 80 | extern struct bus_type fw_bus_type; |
81 | 81 | ||
82 | struct fw_card_driver; | 82 | struct fw_card_driver; |
83 | struct fw_node; | 83 | struct fw_node; |
84 | 84 | ||
85 | struct fw_card { | 85 | struct fw_card { |
86 | const struct fw_card_driver *driver; | 86 | const struct fw_card_driver *driver; |
87 | struct device *device; | 87 | struct device *device; |
88 | struct kref kref; | 88 | struct kref kref; |
89 | struct completion done; | 89 | struct completion done; |
90 | 90 | ||
91 | int node_id; | 91 | int node_id; |
92 | int generation; | 92 | int generation; |
93 | int current_tlabel; | 93 | int current_tlabel; |
94 | u64 tlabel_mask; | 94 | u64 tlabel_mask; |
95 | struct list_head transaction_list; | 95 | struct list_head transaction_list; |
96 | u64 reset_jiffies; | 96 | u64 reset_jiffies; |
97 | 97 | ||
98 | u32 split_timeout_hi; | 98 | u32 split_timeout_hi; |
99 | u32 split_timeout_lo; | 99 | u32 split_timeout_lo; |
100 | unsigned int split_timeout_cycles; | 100 | unsigned int split_timeout_cycles; |
101 | unsigned int split_timeout_jiffies; | 101 | unsigned int split_timeout_jiffies; |
102 | 102 | ||
103 | unsigned long long guid; | 103 | unsigned long long guid; |
104 | unsigned max_receive; | 104 | unsigned max_receive; |
105 | int link_speed; | 105 | int link_speed; |
106 | int config_rom_generation; | 106 | int config_rom_generation; |
107 | 107 | ||
108 | spinlock_t lock; /* Take this lock when handling the lists in | 108 | spinlock_t lock; /* Take this lock when handling the lists in |
109 | * this struct. */ | 109 | * this struct. */ |
110 | struct fw_node *local_node; | 110 | struct fw_node *local_node; |
111 | struct fw_node *root_node; | 111 | struct fw_node *root_node; |
112 | struct fw_node *irm_node; | 112 | struct fw_node *irm_node; |
113 | u8 color; /* must be u8 to match the definition in struct fw_node */ | 113 | u8 color; /* must be u8 to match the definition in struct fw_node */ |
114 | int gap_count; | 114 | int gap_count; |
115 | bool beta_repeaters_present; | 115 | bool beta_repeaters_present; |
116 | 116 | ||
117 | int index; | 117 | int index; |
118 | struct list_head link; | 118 | struct list_head link; |
119 | 119 | ||
120 | struct list_head phy_receiver_list; | 120 | struct list_head phy_receiver_list; |
121 | 121 | ||
122 | struct delayed_work br_work; /* bus reset job */ | 122 | struct delayed_work br_work; /* bus reset job */ |
123 | bool br_short; | 123 | bool br_short; |
124 | 124 | ||
125 | struct delayed_work bm_work; /* bus manager job */ | 125 | struct delayed_work bm_work; /* bus manager job */ |
126 | int bm_retries; | 126 | int bm_retries; |
127 | int bm_generation; | 127 | int bm_generation; |
128 | int bm_node_id; | 128 | int bm_node_id; |
129 | bool bm_abdicate; | 129 | bool bm_abdicate; |
130 | 130 | ||
131 | bool priority_budget_implemented; /* controller feature */ | 131 | bool priority_budget_implemented; /* controller feature */ |
132 | bool broadcast_channel_auto_allocated; /* controller feature */ | 132 | bool broadcast_channel_auto_allocated; /* controller feature */ |
133 | 133 | ||
134 | bool broadcast_channel_allocated; | 134 | bool broadcast_channel_allocated; |
135 | u32 broadcast_channel; | 135 | u32 broadcast_channel; |
136 | __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; | 136 | __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; |
137 | 137 | ||
138 | __be32 maint_utility_register; | 138 | __be32 maint_utility_register; |
139 | }; | 139 | }; |
140 | 140 | ||
141 | struct fw_attribute_group { | 141 | struct fw_attribute_group { |
142 | struct attribute_group *groups[2]; | 142 | struct attribute_group *groups[2]; |
143 | struct attribute_group group; | 143 | struct attribute_group group; |
144 | struct attribute *attrs[12]; | 144 | struct attribute *attrs[12]; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | enum fw_device_state { | 147 | enum fw_device_state { |
148 | FW_DEVICE_INITIALIZING, | 148 | FW_DEVICE_INITIALIZING, |
149 | FW_DEVICE_RUNNING, | 149 | FW_DEVICE_RUNNING, |
150 | FW_DEVICE_GONE, | 150 | FW_DEVICE_GONE, |
151 | FW_DEVICE_SHUTDOWN, | 151 | FW_DEVICE_SHUTDOWN, |
152 | }; | 152 | }; |
153 | 153 | ||
154 | /* | 154 | /* |
155 | * Note, fw_device.generation always has to be read before fw_device.node_id. | 155 | * Note, fw_device.generation always has to be read before fw_device.node_id. |
156 | * Use SMP memory barriers to ensure this. Otherwise requests will be sent | 156 | * Use SMP memory barriers to ensure this. Otherwise requests will be sent |
157 | * to an outdated node_id if the generation was updated in the meantime due | 157 | * to an outdated node_id if the generation was updated in the meantime due |
158 | * to a bus reset. | 158 | * to a bus reset. |
159 | * | 159 | * |
160 | * Likewise, fw-core will take care to update .node_id before .generation so | 160 | * Likewise, fw-core will take care to update .node_id before .generation so |
161 | * that whenever fw_device.generation is current WRT the actual bus generation, | 161 | * that whenever fw_device.generation is current WRT the actual bus generation, |
162 | * fw_device.node_id is guaranteed to be current too. | 162 | * fw_device.node_id is guaranteed to be current too. |
163 | * | 163 | * |
164 | * The same applies to fw_device.card->node_id vs. fw_device.generation. | 164 | * The same applies to fw_device.card->node_id vs. fw_device.generation. |
165 | * | 165 | * |
166 | * fw_device.config_rom and fw_device.config_rom_length may be accessed during | 166 | * fw_device.config_rom and fw_device.config_rom_length may be accessed during |
167 | * the lifetime of any fw_unit belonging to the fw_device, before device_del() | 167 | * the lifetime of any fw_unit belonging to the fw_device, before device_del() |
168 | * was called on the last fw_unit. Alternatively, they may be accessed while | 168 | * was called on the last fw_unit. Alternatively, they may be accessed while |
169 | * holding fw_device_rwsem. | 169 | * holding fw_device_rwsem. |
170 | */ | 170 | */ |
171 | struct fw_device { | 171 | struct fw_device { |
172 | atomic_t state; | 172 | atomic_t state; |
173 | struct fw_node *node; | 173 | struct fw_node *node; |
174 | int node_id; | 174 | int node_id; |
175 | int generation; | 175 | int generation; |
176 | unsigned max_speed; | 176 | unsigned max_speed; |
177 | struct fw_card *card; | 177 | struct fw_card *card; |
178 | struct device device; | 178 | struct device device; |
179 | 179 | ||
180 | struct mutex client_list_mutex; | 180 | struct mutex client_list_mutex; |
181 | struct list_head client_list; | 181 | struct list_head client_list; |
182 | 182 | ||
183 | const u32 *config_rom; | 183 | const u32 *config_rom; |
184 | size_t config_rom_length; | 184 | size_t config_rom_length; |
185 | int config_rom_retries; | 185 | int config_rom_retries; |
186 | unsigned is_local:1; | 186 | unsigned is_local:1; |
187 | unsigned max_rec:4; | 187 | unsigned max_rec:4; |
188 | unsigned cmc:1; | 188 | unsigned cmc:1; |
189 | unsigned irmc:1; | 189 | unsigned irmc:1; |
190 | unsigned bc_implemented:2; | 190 | unsigned bc_implemented:2; |
191 | 191 | ||
192 | struct delayed_work work; | 192 | struct delayed_work work; |
193 | struct fw_attribute_group attribute_group; | 193 | struct fw_attribute_group attribute_group; |
194 | }; | 194 | }; |
195 | 195 | ||
196 | static inline struct fw_device *fw_device(struct device *dev) | 196 | static inline struct fw_device *fw_device(struct device *dev) |
197 | { | 197 | { |
198 | return container_of(dev, struct fw_device, device); | 198 | return container_of(dev, struct fw_device, device); |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline int fw_device_is_shutdown(struct fw_device *device) | 201 | static inline int fw_device_is_shutdown(struct fw_device *device) |
202 | { | 202 | { |
203 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; | 203 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; |
204 | } | 204 | } |
205 | 205 | ||
206 | static inline struct fw_device *fw_device_get(struct fw_device *device) | 206 | static inline struct fw_device *fw_device_get(struct fw_device *device) |
207 | { | 207 | { |
208 | get_device(&device->device); | 208 | get_device(&device->device); |
209 | 209 | ||
210 | return device; | 210 | return device; |
211 | } | 211 | } |
212 | 212 | ||
213 | static inline void fw_device_put(struct fw_device *device) | 213 | static inline void fw_device_put(struct fw_device *device) |
214 | { | 214 | { |
215 | put_device(&device->device); | 215 | put_device(&device->device); |
216 | } | 216 | } |
217 | 217 | ||
218 | int fw_device_enable_phys_dma(struct fw_device *device); | 218 | int fw_device_enable_phys_dma(struct fw_device *device); |
219 | 219 | ||
220 | /* | 220 | /* |
221 | * fw_unit.directory must not be accessed after device_del(&fw_unit.device). | 221 | * fw_unit.directory must not be accessed after device_del(&fw_unit.device). |
222 | */ | 222 | */ |
223 | struct fw_unit { | 223 | struct fw_unit { |
224 | struct device device; | 224 | struct device device; |
225 | const u32 *directory; | 225 | const u32 *directory; |
226 | struct fw_attribute_group attribute_group; | 226 | struct fw_attribute_group attribute_group; |
227 | }; | 227 | }; |
228 | 228 | ||
229 | static inline struct fw_unit *fw_unit(struct device *dev) | 229 | static inline struct fw_unit *fw_unit(struct device *dev) |
230 | { | 230 | { |
231 | return container_of(dev, struct fw_unit, device); | 231 | return container_of(dev, struct fw_unit, device); |
232 | } | 232 | } |
233 | 233 | ||
234 | static inline struct fw_unit *fw_unit_get(struct fw_unit *unit) | 234 | static inline struct fw_unit *fw_unit_get(struct fw_unit *unit) |
235 | { | 235 | { |
236 | get_device(&unit->device); | 236 | get_device(&unit->device); |
237 | 237 | ||
238 | return unit; | 238 | return unit; |
239 | } | 239 | } |
240 | 240 | ||
241 | static inline void fw_unit_put(struct fw_unit *unit) | 241 | static inline void fw_unit_put(struct fw_unit *unit) |
242 | { | 242 | { |
243 | put_device(&unit->device); | 243 | put_device(&unit->device); |
244 | } | 244 | } |
245 | 245 | ||
246 | static inline struct fw_device *fw_parent_device(struct fw_unit *unit) | 246 | static inline struct fw_device *fw_parent_device(struct fw_unit *unit) |
247 | { | 247 | { |
248 | return fw_device(unit->device.parent); | 248 | return fw_device(unit->device.parent); |
249 | } | 249 | } |
250 | 250 | ||
251 | struct ieee1394_device_id; | 251 | struct ieee1394_device_id; |
252 | 252 | ||
253 | struct fw_driver { | 253 | struct fw_driver { |
254 | struct device_driver driver; | 254 | struct device_driver driver; |
255 | /* Called when the parent device sits through a bus reset. */ | 255 | /* Called when the parent device sits through a bus reset. */ |
256 | void (*update)(struct fw_unit *unit); | 256 | void (*update)(struct fw_unit *unit); |
257 | const struct ieee1394_device_id *id_table; | 257 | const struct ieee1394_device_id *id_table; |
258 | }; | 258 | }; |
259 | 259 | ||
260 | struct fw_packet; | 260 | struct fw_packet; |
261 | struct fw_request; | 261 | struct fw_request; |
262 | 262 | ||
263 | typedef void (*fw_packet_callback_t)(struct fw_packet *packet, | 263 | typedef void (*fw_packet_callback_t)(struct fw_packet *packet, |
264 | struct fw_card *card, int status); | 264 | struct fw_card *card, int status); |
265 | typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, | 265 | typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, |
266 | void *data, size_t length, | 266 | void *data, size_t length, |
267 | void *callback_data); | 267 | void *callback_data); |
268 | /* | 268 | /* |
269 | * Important note: Except for the FCP registers, the callback must guarantee | 269 | * Important note: Except for the FCP registers, the callback must guarantee |
270 | * that either fw_send_response() or kfree() is called on the @request. | 270 | * that either fw_send_response() or kfree() is called on the @request. |
271 | */ | 271 | */ |
272 | typedef void (*fw_address_callback_t)(struct fw_card *card, | 272 | typedef void (*fw_address_callback_t)(struct fw_card *card, |
273 | struct fw_request *request, | 273 | struct fw_request *request, |
274 | int tcode, int destination, int source, | 274 | int tcode, int destination, int source, |
275 | int generation, | 275 | int generation, |
276 | unsigned long long offset, | 276 | unsigned long long offset, |
277 | void *data, size_t length, | 277 | void *data, size_t length, |
278 | void *callback_data); | 278 | void *callback_data); |
279 | 279 | ||
280 | struct fw_packet { | 280 | struct fw_packet { |
281 | int speed; | 281 | int speed; |
282 | int generation; | 282 | int generation; |
283 | u32 header[4]; | 283 | u32 header[4]; |
284 | size_t header_length; | 284 | size_t header_length; |
285 | void *payload; | 285 | void *payload; |
286 | size_t payload_length; | 286 | size_t payload_length; |
287 | dma_addr_t payload_bus; | 287 | dma_addr_t payload_bus; |
288 | bool payload_mapped; | 288 | bool payload_mapped; |
289 | u32 timestamp; | 289 | u32 timestamp; |
290 | 290 | ||
291 | /* | 291 | /* |
292 | * This callback is called when the packet transmission has completed. | 292 | * This callback is called when the packet transmission has completed. |
293 | * For successful transmission, the status code is the ack received | 293 | * For successful transmission, the status code is the ack received |
294 | * from the destination. Otherwise it is one of the juju-specific | 294 | * from the destination. Otherwise it is one of the juju-specific |
295 | * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. | 295 | * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. |
296 | * The callback can be called from tasklet context and thus | 296 | * The callback can be called from tasklet context and thus |
297 | * must never block. | 297 | * must never block. |
298 | */ | 298 | */ |
299 | fw_packet_callback_t callback; | 299 | fw_packet_callback_t callback; |
300 | int ack; | 300 | int ack; |
301 | struct list_head link; | 301 | struct list_head link; |
302 | void *driver_data; | 302 | void *driver_data; |
303 | }; | 303 | }; |
304 | 304 | ||
305 | struct fw_transaction { | 305 | struct fw_transaction { |
306 | int node_id; /* The generation is implied; it is always the current. */ | 306 | int node_id; /* The generation is implied; it is always the current. */ |
307 | int tlabel; | 307 | int tlabel; |
308 | struct list_head link; | 308 | struct list_head link; |
309 | struct fw_card *card; | 309 | struct fw_card *card; |
310 | bool is_split_transaction; | 310 | bool is_split_transaction; |
311 | struct timer_list split_timeout_timer; | 311 | struct timer_list split_timeout_timer; |
312 | 312 | ||
313 | struct fw_packet packet; | 313 | struct fw_packet packet; |
314 | 314 | ||
315 | /* | 315 | /* |
316 | * The data passed to the callback is valid only during the | 316 | * The data passed to the callback is valid only during the |
317 | * callback. | 317 | * callback. |
318 | */ | 318 | */ |
319 | fw_transaction_callback_t callback; | 319 | fw_transaction_callback_t callback; |
320 | void *callback_data; | 320 | void *callback_data; |
321 | }; | 321 | }; |
322 | 322 | ||
323 | struct fw_address_handler { | 323 | struct fw_address_handler { |
324 | u64 offset; | 324 | u64 offset; |
325 | size_t length; | 325 | size_t length; |
326 | fw_address_callback_t address_callback; | 326 | fw_address_callback_t address_callback; |
327 | void *callback_data; | 327 | void *callback_data; |
328 | struct list_head link; | 328 | struct list_head link; |
329 | }; | 329 | }; |
330 | 330 | ||
331 | struct fw_address_region { | 331 | struct fw_address_region { |
332 | u64 start; | 332 | u64 start; |
333 | u64 end; | 333 | u64 end; |
334 | }; | 334 | }; |
335 | 335 | ||
336 | extern const struct fw_address_region fw_high_memory_region; | 336 | extern const struct fw_address_region fw_high_memory_region; |
337 | 337 | ||
338 | int fw_core_add_address_handler(struct fw_address_handler *handler, | 338 | int fw_core_add_address_handler(struct fw_address_handler *handler, |
339 | const struct fw_address_region *region); | 339 | const struct fw_address_region *region); |
340 | void fw_core_remove_address_handler(struct fw_address_handler *handler); | 340 | void fw_core_remove_address_handler(struct fw_address_handler *handler); |
341 | void fw_send_response(struct fw_card *card, | 341 | void fw_send_response(struct fw_card *card, |
342 | struct fw_request *request, int rcode); | 342 | struct fw_request *request, int rcode); |
343 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, | 343 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, |
344 | int tcode, int destination_id, int generation, int speed, | 344 | int tcode, int destination_id, int generation, int speed, |
345 | unsigned long long offset, void *payload, size_t length, | 345 | unsigned long long offset, void *payload, size_t length, |
346 | fw_transaction_callback_t callback, void *callback_data); | 346 | fw_transaction_callback_t callback, void *callback_data); |
347 | int fw_cancel_transaction(struct fw_card *card, | 347 | int fw_cancel_transaction(struct fw_card *card, |
348 | struct fw_transaction *transaction); | 348 | struct fw_transaction *transaction); |
349 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | 349 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, |
350 | int generation, int speed, unsigned long long offset, | 350 | int generation, int speed, unsigned long long offset, |
351 | void *payload, size_t length); | 351 | void *payload, size_t length); |
352 | 352 | ||
353 | static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) | 353 | static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) |
354 | { | 354 | { |
355 | return tag << 14 | channel << 8 | sy; | 355 | return tag << 14 | channel << 8 | sy; |
356 | } | 356 | } |
357 | 357 | ||
358 | struct fw_descriptor { | 358 | struct fw_descriptor { |
359 | struct list_head link; | 359 | struct list_head link; |
360 | size_t length; | 360 | size_t length; |
361 | u32 immediate; | 361 | u32 immediate; |
362 | u32 key; | 362 | u32 key; |
363 | const u32 *data; | 363 | const u32 *data; |
364 | }; | 364 | }; |
365 | 365 | ||
366 | int fw_core_add_descriptor(struct fw_descriptor *desc); | 366 | int fw_core_add_descriptor(struct fw_descriptor *desc); |
367 | void fw_core_remove_descriptor(struct fw_descriptor *desc); | 367 | void fw_core_remove_descriptor(struct fw_descriptor *desc); |
368 | 368 | ||
369 | /* | 369 | /* |
370 | * The iso packet format allows for an immediate header/payload part | 370 | * The iso packet format allows for an immediate header/payload part |
371 | * stored in 'header' immediately after the packet info plus an | 371 | * stored in 'header' immediately after the packet info plus an |
372 | * indirect payload part that is pointer to by the 'payload' field. | 372 | * indirect payload part that is pointer to by the 'payload' field. |
373 | * Applications can use one or the other or both to implement simple | 373 | * Applications can use one or the other or both to implement simple |
374 | * low-bandwidth streaming (e.g. audio) or more advanced | 374 | * low-bandwidth streaming (e.g. audio) or more advanced |
375 | * scatter-gather streaming (e.g. assembling video frame automatically). | 375 | * scatter-gather streaming (e.g. assembling video frame automatically). |
376 | */ | 376 | */ |
377 | struct fw_iso_packet { | 377 | struct fw_iso_packet { |
378 | u16 payload_length; /* Length of indirect payload */ | 378 | u16 payload_length; /* Length of indirect payload */ |
379 | u32 interrupt:1; /* Generate interrupt on this packet */ | 379 | u32 interrupt:1; /* Generate interrupt on this packet */ |
380 | u32 skip:1; /* tx: Set to not send packet at all */ | 380 | u32 skip:1; /* tx: Set to not send packet at all */ |
381 | /* rx: Sync bit, wait for matching sy */ | 381 | /* rx: Sync bit, wait for matching sy */ |
382 | u32 tag:2; /* tx: Tag in packet header */ | 382 | u32 tag:2; /* tx: Tag in packet header */ |
383 | u32 sy:4; /* tx: Sy in packet header */ | 383 | u32 sy:4; /* tx: Sy in packet header */ |
384 | u32 header_length:8; /* Length of immediate header */ | 384 | u32 header_length:8; /* Length of immediate header */ |
385 | u32 header[0]; /* tx: Top of 1394 isoch. data_block */ | 385 | u32 header[0]; /* tx: Top of 1394 isoch. data_block */ |
386 | }; | 386 | }; |
387 | 387 | ||
388 | #define FW_ISO_CONTEXT_TRANSMIT 0 | 388 | #define FW_ISO_CONTEXT_TRANSMIT 0 |
389 | #define FW_ISO_CONTEXT_RECEIVE 1 | 389 | #define FW_ISO_CONTEXT_RECEIVE 1 |
390 | #define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 | 390 | #define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 |
391 | 391 | ||
392 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 | 392 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 |
393 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 | 393 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 |
394 | #define FW_ISO_CONTEXT_MATCH_TAG2 4 | 394 | #define FW_ISO_CONTEXT_MATCH_TAG2 4 |
395 | #define FW_ISO_CONTEXT_MATCH_TAG3 8 | 395 | #define FW_ISO_CONTEXT_MATCH_TAG3 8 |
396 | #define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 | 396 | #define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 |
397 | 397 | ||
398 | /* | 398 | /* |
399 | * An iso buffer is just a set of pages mapped for DMA in the | 399 | * An iso buffer is just a set of pages mapped for DMA in the |
400 | * specified direction. Since the pages are to be used for DMA, they | 400 | * specified direction. Since the pages are to be used for DMA, they |
401 | * are not mapped into the kernel virtual address space. We store the | 401 | * are not mapped into the kernel virtual address space. We store the |
402 | * DMA address in the page private. The helper function | 402 | * DMA address in the page private. The helper function |
403 | * fw_iso_buffer_map() will map the pages into a given vma. | 403 | * fw_iso_buffer_map() will map the pages into a given vma. |
404 | */ | 404 | */ |
405 | struct fw_iso_buffer { | 405 | struct fw_iso_buffer { |
406 | enum dma_data_direction direction; | 406 | enum dma_data_direction direction; |
407 | struct page **pages; | 407 | struct page **pages; |
408 | int page_count; | 408 | int page_count; |
409 | }; | 409 | }; |
410 | 410 | ||
411 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | 411 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, |
412 | int page_count, enum dma_data_direction direction); | 412 | int page_count, enum dma_data_direction direction); |
413 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); | 413 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); |
414 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); | 414 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); |
415 | 415 | ||
416 | struct fw_iso_context; | 416 | struct fw_iso_context; |
417 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, | 417 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, |
418 | u32 cycle, size_t header_length, | 418 | u32 cycle, size_t header_length, |
419 | void *header, void *data); | 419 | void *header, void *data); |
420 | typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, | 420 | typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, |
421 | dma_addr_t completed, void *data); | 421 | dma_addr_t completed, void *data); |
422 | struct fw_iso_context { | 422 | struct fw_iso_context { |
423 | struct fw_card *card; | 423 | struct fw_card *card; |
424 | int type; | 424 | int type; |
425 | int channel; | 425 | int channel; |
426 | int speed; | 426 | int speed; |
427 | size_t header_size; | 427 | size_t header_size; |
428 | union { | 428 | union { |
429 | fw_iso_callback_t sc; | 429 | fw_iso_callback_t sc; |
430 | fw_iso_mc_callback_t mc; | 430 | fw_iso_mc_callback_t mc; |
431 | } callback; | 431 | } callback; |
432 | void *callback_data; | 432 | void *callback_data; |
433 | }; | 433 | }; |
434 | 434 | ||
435 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | 435 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
436 | int type, int channel, int speed, size_t header_size, | 436 | int type, int channel, int speed, size_t header_size, |
437 | fw_iso_callback_t callback, void *callback_data); | 437 | fw_iso_callback_t callback, void *callback_data); |
438 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); | 438 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); |
439 | int fw_iso_context_queue(struct fw_iso_context *ctx, | 439 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
440 | struct fw_iso_packet *packet, | 440 | struct fw_iso_packet *packet, |
441 | struct fw_iso_buffer *buffer, | 441 | struct fw_iso_buffer *buffer, |
442 | unsigned long payload); | 442 | unsigned long payload); |
443 | void fw_iso_context_queue_flush(struct fw_iso_context *ctx); | ||
443 | int fw_iso_context_start(struct fw_iso_context *ctx, | 444 | int fw_iso_context_start(struct fw_iso_context *ctx, |
444 | int cycle, int sync, int tags); | 445 | int cycle, int sync, int tags); |
445 | int fw_iso_context_stop(struct fw_iso_context *ctx); | 446 | int fw_iso_context_stop(struct fw_iso_context *ctx); |
446 | void fw_iso_context_destroy(struct fw_iso_context *ctx); | 447 | void fw_iso_context_destroy(struct fw_iso_context *ctx); |
447 | void fw_iso_resource_manage(struct fw_card *card, int generation, | 448 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
448 | u64 channels_mask, int *channel, int *bandwidth, | 449 | u64 channels_mask, int *channel, int *bandwidth, |
449 | bool allocate); | 450 | bool allocate); |
450 | 451 | ||
451 | #endif /* _LINUX_FIREWIRE_H */ | 452 | #endif /* _LINUX_FIREWIRE_H */ |
452 | 453 |
sound/firewire/amdtp.c
1 | /* | 1 | /* |
2 | * Audio and Music Data Transmission Protocol (IEC 61883-6) streams | 2 | * Audio and Music Data Transmission Protocol (IEC 61883-6) streams |
3 | * with Common Isochronous Packet (IEC 61883-1) headers | 3 | * with Common Isochronous Packet (IEC 61883-1) headers |
4 | * | 4 | * |
5 | * Copyright (c) Clemens Ladisch <clemens@ladisch.de> | 5 | * Copyright (c) Clemens Ladisch <clemens@ladisch.de> |
6 | * Licensed under the terms of the GNU General Public License, version 2. | 6 | * Licensed under the terms of the GNU General Public License, version 2. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
10 | #include <linux/err.h> | 10 | #include <linux/err.h> |
11 | #include <linux/firewire.h> | 11 | #include <linux/firewire.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <sound/pcm.h> | 14 | #include <sound/pcm.h> |
15 | #include "amdtp.h" | 15 | #include "amdtp.h" |
16 | 16 | ||
17 | #define TICKS_PER_CYCLE 3072 | 17 | #define TICKS_PER_CYCLE 3072 |
18 | #define CYCLES_PER_SECOND 8000 | 18 | #define CYCLES_PER_SECOND 8000 |
19 | #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) | 19 | #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) |
20 | 20 | ||
21 | #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 ยตs */ | 21 | #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 ยตs */ |
22 | 22 | ||
23 | #define TAG_CIP 1 | 23 | #define TAG_CIP 1 |
24 | 24 | ||
25 | #define CIP_EOH (1u << 31) | 25 | #define CIP_EOH (1u << 31) |
26 | #define CIP_FMT_AM (0x10 << 24) | 26 | #define CIP_FMT_AM (0x10 << 24) |
27 | #define AMDTP_FDF_AM824 (0 << 19) | 27 | #define AMDTP_FDF_AM824 (0 << 19) |
28 | #define AMDTP_FDF_SFC_SHIFT 16 | 28 | #define AMDTP_FDF_SFC_SHIFT 16 |
29 | 29 | ||
30 | /* TODO: make these configurable */ | 30 | /* TODO: make these configurable */ |
31 | #define INTERRUPT_INTERVAL 16 | 31 | #define INTERRUPT_INTERVAL 16 |
32 | #define QUEUE_LENGTH 48 | 32 | #define QUEUE_LENGTH 48 |
33 | 33 | ||
34 | /** | 34 | /** |
35 | * amdtp_out_stream_init - initialize an AMDTP output stream structure | 35 | * amdtp_out_stream_init - initialize an AMDTP output stream structure |
36 | * @s: the AMDTP output stream to initialize | 36 | * @s: the AMDTP output stream to initialize |
37 | * @unit: the target of the stream | 37 | * @unit: the target of the stream |
38 | * @flags: the packet transmission method to use | 38 | * @flags: the packet transmission method to use |
39 | */ | 39 | */ |
40 | int amdtp_out_stream_init(struct amdtp_out_stream *s, struct fw_unit *unit, | 40 | int amdtp_out_stream_init(struct amdtp_out_stream *s, struct fw_unit *unit, |
41 | enum cip_out_flags flags) | 41 | enum cip_out_flags flags) |
42 | { | 42 | { |
43 | if (flags != CIP_NONBLOCKING) | 43 | if (flags != CIP_NONBLOCKING) |
44 | return -EINVAL; | 44 | return -EINVAL; |
45 | 45 | ||
46 | s->unit = fw_unit_get(unit); | 46 | s->unit = fw_unit_get(unit); |
47 | s->flags = flags; | 47 | s->flags = flags; |
48 | s->context = ERR_PTR(-1); | 48 | s->context = ERR_PTR(-1); |
49 | mutex_init(&s->mutex); | 49 | mutex_init(&s->mutex); |
50 | s->packet_index = 0; | 50 | s->packet_index = 0; |
51 | 51 | ||
52 | return 0; | 52 | return 0; |
53 | } | 53 | } |
54 | EXPORT_SYMBOL(amdtp_out_stream_init); | 54 | EXPORT_SYMBOL(amdtp_out_stream_init); |
55 | 55 | ||
56 | /** | 56 | /** |
57 | * amdtp_out_stream_destroy - free stream resources | 57 | * amdtp_out_stream_destroy - free stream resources |
58 | * @s: the AMDTP output stream to destroy | 58 | * @s: the AMDTP output stream to destroy |
59 | */ | 59 | */ |
60 | void amdtp_out_stream_destroy(struct amdtp_out_stream *s) | 60 | void amdtp_out_stream_destroy(struct amdtp_out_stream *s) |
61 | { | 61 | { |
62 | WARN_ON(!IS_ERR(s->context)); | 62 | WARN_ON(!IS_ERR(s->context)); |
63 | mutex_destroy(&s->mutex); | 63 | mutex_destroy(&s->mutex); |
64 | fw_unit_put(s->unit); | 64 | fw_unit_put(s->unit); |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(amdtp_out_stream_destroy); | 66 | EXPORT_SYMBOL(amdtp_out_stream_destroy); |
67 | 67 | ||
68 | /** | 68 | /** |
69 | * amdtp_out_stream_set_rate - set the sample rate | 69 | * amdtp_out_stream_set_rate - set the sample rate |
70 | * @s: the AMDTP output stream to configure | 70 | * @s: the AMDTP output stream to configure |
71 | * @rate: the sample rate | 71 | * @rate: the sample rate |
72 | * | 72 | * |
73 | * The sample rate must be set before the stream is started, and must not be | 73 | * The sample rate must be set before the stream is started, and must not be |
74 | * changed while the stream is running. | 74 | * changed while the stream is running. |
75 | */ | 75 | */ |
76 | void amdtp_out_stream_set_rate(struct amdtp_out_stream *s, unsigned int rate) | 76 | void amdtp_out_stream_set_rate(struct amdtp_out_stream *s, unsigned int rate) |
77 | { | 77 | { |
78 | static const struct { | 78 | static const struct { |
79 | unsigned int rate; | 79 | unsigned int rate; |
80 | unsigned int syt_interval; | 80 | unsigned int syt_interval; |
81 | } rate_info[] = { | 81 | } rate_info[] = { |
82 | [CIP_SFC_32000] = { 32000, 8, }, | 82 | [CIP_SFC_32000] = { 32000, 8, }, |
83 | [CIP_SFC_44100] = { 44100, 8, }, | 83 | [CIP_SFC_44100] = { 44100, 8, }, |
84 | [CIP_SFC_48000] = { 48000, 8, }, | 84 | [CIP_SFC_48000] = { 48000, 8, }, |
85 | [CIP_SFC_88200] = { 88200, 16, }, | 85 | [CIP_SFC_88200] = { 88200, 16, }, |
86 | [CIP_SFC_96000] = { 96000, 16, }, | 86 | [CIP_SFC_96000] = { 96000, 16, }, |
87 | [CIP_SFC_176400] = { 176400, 32, }, | 87 | [CIP_SFC_176400] = { 176400, 32, }, |
88 | [CIP_SFC_192000] = { 192000, 32, }, | 88 | [CIP_SFC_192000] = { 192000, 32, }, |
89 | }; | 89 | }; |
90 | unsigned int sfc; | 90 | unsigned int sfc; |
91 | 91 | ||
92 | if (WARN_ON(!IS_ERR(s->context))) | 92 | if (WARN_ON(!IS_ERR(s->context))) |
93 | return; | 93 | return; |
94 | 94 | ||
95 | for (sfc = 0; sfc < ARRAY_SIZE(rate_info); ++sfc) | 95 | for (sfc = 0; sfc < ARRAY_SIZE(rate_info); ++sfc) |
96 | if (rate_info[sfc].rate == rate) { | 96 | if (rate_info[sfc].rate == rate) { |
97 | s->sfc = sfc; | 97 | s->sfc = sfc; |
98 | s->syt_interval = rate_info[sfc].syt_interval; | 98 | s->syt_interval = rate_info[sfc].syt_interval; |
99 | return; | 99 | return; |
100 | } | 100 | } |
101 | WARN_ON(1); | 101 | WARN_ON(1); |
102 | } | 102 | } |
103 | EXPORT_SYMBOL(amdtp_out_stream_set_rate); | 103 | EXPORT_SYMBOL(amdtp_out_stream_set_rate); |
104 | 104 | ||
105 | /** | 105 | /** |
106 | * amdtp_out_stream_get_max_payload - get the stream's packet size | 106 | * amdtp_out_stream_get_max_payload - get the stream's packet size |
107 | * @s: the AMDTP output stream | 107 | * @s: the AMDTP output stream |
108 | * | 108 | * |
109 | * This function must not be called before the stream has been configured | 109 | * This function must not be called before the stream has been configured |
110 | * with amdtp_out_stream_set_hw_params(), amdtp_out_stream_set_pcm(), and | 110 | * with amdtp_out_stream_set_hw_params(), amdtp_out_stream_set_pcm(), and |
111 | * amdtp_out_stream_set_midi(). | 111 | * amdtp_out_stream_set_midi(). |
112 | */ | 112 | */ |
113 | unsigned int amdtp_out_stream_get_max_payload(struct amdtp_out_stream *s) | 113 | unsigned int amdtp_out_stream_get_max_payload(struct amdtp_out_stream *s) |
114 | { | 114 | { |
115 | static const unsigned int max_data_blocks[] = { | 115 | static const unsigned int max_data_blocks[] = { |
116 | [CIP_SFC_32000] = 4, | 116 | [CIP_SFC_32000] = 4, |
117 | [CIP_SFC_44100] = 6, | 117 | [CIP_SFC_44100] = 6, |
118 | [CIP_SFC_48000] = 6, | 118 | [CIP_SFC_48000] = 6, |
119 | [CIP_SFC_88200] = 12, | 119 | [CIP_SFC_88200] = 12, |
120 | [CIP_SFC_96000] = 12, | 120 | [CIP_SFC_96000] = 12, |
121 | [CIP_SFC_176400] = 23, | 121 | [CIP_SFC_176400] = 23, |
122 | [CIP_SFC_192000] = 24, | 122 | [CIP_SFC_192000] = 24, |
123 | }; | 123 | }; |
124 | 124 | ||
125 | s->data_block_quadlets = s->pcm_channels; | 125 | s->data_block_quadlets = s->pcm_channels; |
126 | s->data_block_quadlets += DIV_ROUND_UP(s->midi_ports, 8); | 126 | s->data_block_quadlets += DIV_ROUND_UP(s->midi_ports, 8); |
127 | 127 | ||
128 | return 8 + max_data_blocks[s->sfc] * 4 * s->data_block_quadlets; | 128 | return 8 + max_data_blocks[s->sfc] * 4 * s->data_block_quadlets; |
129 | } | 129 | } |
130 | EXPORT_SYMBOL(amdtp_out_stream_get_max_payload); | 130 | EXPORT_SYMBOL(amdtp_out_stream_get_max_payload); |
131 | 131 | ||
132 | static void amdtp_write_s16(struct amdtp_out_stream *s, | 132 | static void amdtp_write_s16(struct amdtp_out_stream *s, |
133 | struct snd_pcm_substream *pcm, | 133 | struct snd_pcm_substream *pcm, |
134 | __be32 *buffer, unsigned int frames); | 134 | __be32 *buffer, unsigned int frames); |
135 | static void amdtp_write_s32(struct amdtp_out_stream *s, | 135 | static void amdtp_write_s32(struct amdtp_out_stream *s, |
136 | struct snd_pcm_substream *pcm, | 136 | struct snd_pcm_substream *pcm, |
137 | __be32 *buffer, unsigned int frames); | 137 | __be32 *buffer, unsigned int frames); |
138 | 138 | ||
139 | /** | 139 | /** |
140 | * amdtp_out_stream_set_pcm_format - set the PCM format | 140 | * amdtp_out_stream_set_pcm_format - set the PCM format |
141 | * @s: the AMDTP output stream to configure | 141 | * @s: the AMDTP output stream to configure |
142 | * @format: the format of the ALSA PCM device | 142 | * @format: the format of the ALSA PCM device |
143 | * | 143 | * |
144 | * The sample format must be set before the stream is started, and must not be | 144 | * The sample format must be set before the stream is started, and must not be |
145 | * changed while the stream is running. | 145 | * changed while the stream is running. |
146 | */ | 146 | */ |
147 | void amdtp_out_stream_set_pcm_format(struct amdtp_out_stream *s, | 147 | void amdtp_out_stream_set_pcm_format(struct amdtp_out_stream *s, |
148 | snd_pcm_format_t format) | 148 | snd_pcm_format_t format) |
149 | { | 149 | { |
150 | if (WARN_ON(!IS_ERR(s->context))) | 150 | if (WARN_ON(!IS_ERR(s->context))) |
151 | return; | 151 | return; |
152 | 152 | ||
153 | switch (format) { | 153 | switch (format) { |
154 | default: | 154 | default: |
155 | WARN_ON(1); | 155 | WARN_ON(1); |
156 | /* fall through */ | 156 | /* fall through */ |
157 | case SNDRV_PCM_FORMAT_S16: | 157 | case SNDRV_PCM_FORMAT_S16: |
158 | s->transfer_samples = amdtp_write_s16; | 158 | s->transfer_samples = amdtp_write_s16; |
159 | break; | 159 | break; |
160 | case SNDRV_PCM_FORMAT_S32: | 160 | case SNDRV_PCM_FORMAT_S32: |
161 | s->transfer_samples = amdtp_write_s32; | 161 | s->transfer_samples = amdtp_write_s32; |
162 | break; | 162 | break; |
163 | } | 163 | } |
164 | } | 164 | } |
165 | EXPORT_SYMBOL(amdtp_out_stream_set_pcm_format); | 165 | EXPORT_SYMBOL(amdtp_out_stream_set_pcm_format); |
166 | 166 | ||
167 | static unsigned int calculate_data_blocks(struct amdtp_out_stream *s) | 167 | static unsigned int calculate_data_blocks(struct amdtp_out_stream *s) |
168 | { | 168 | { |
169 | unsigned int phase, data_blocks; | 169 | unsigned int phase, data_blocks; |
170 | 170 | ||
171 | if (!cip_sfc_is_base_44100(s->sfc)) { | 171 | if (!cip_sfc_is_base_44100(s->sfc)) { |
172 | /* Sample_rate / 8000 is an integer, and precomputed. */ | 172 | /* Sample_rate / 8000 is an integer, and precomputed. */ |
173 | data_blocks = s->data_block_state; | 173 | data_blocks = s->data_block_state; |
174 | } else { | 174 | } else { |
175 | phase = s->data_block_state; | 175 | phase = s->data_block_state; |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * This calculates the number of data blocks per packet so that | 178 | * This calculates the number of data blocks per packet so that |
179 | * 1) the overall rate is correct and exactly synchronized to | 179 | * 1) the overall rate is correct and exactly synchronized to |
180 | * the bus clock, and | 180 | * the bus clock, and |
181 | * 2) packets with a rounded-up number of blocks occur as early | 181 | * 2) packets with a rounded-up number of blocks occur as early |
182 | * as possible in the sequence (to prevent underruns of the | 182 | * as possible in the sequence (to prevent underruns of the |
183 | * device's buffer). | 183 | * device's buffer). |
184 | */ | 184 | */ |
185 | if (s->sfc == CIP_SFC_44100) | 185 | if (s->sfc == CIP_SFC_44100) |
186 | /* 6 6 5 6 5 6 5 ... */ | 186 | /* 6 6 5 6 5 6 5 ... */ |
187 | data_blocks = 5 + ((phase & 1) ^ | 187 | data_blocks = 5 + ((phase & 1) ^ |
188 | (phase == 0 || phase >= 40)); | 188 | (phase == 0 || phase >= 40)); |
189 | else | 189 | else |
190 | /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ | 190 | /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ |
191 | data_blocks = 11 * (s->sfc >> 1) + (phase == 0); | 191 | data_blocks = 11 * (s->sfc >> 1) + (phase == 0); |
192 | if (++phase >= (80 >> (s->sfc >> 1))) | 192 | if (++phase >= (80 >> (s->sfc >> 1))) |
193 | phase = 0; | 193 | phase = 0; |
194 | s->data_block_state = phase; | 194 | s->data_block_state = phase; |
195 | } | 195 | } |
196 | 196 | ||
197 | return data_blocks; | 197 | return data_blocks; |
198 | } | 198 | } |
199 | 199 | ||
200 | static unsigned int calculate_syt(struct amdtp_out_stream *s, | 200 | static unsigned int calculate_syt(struct amdtp_out_stream *s, |
201 | unsigned int cycle) | 201 | unsigned int cycle) |
202 | { | 202 | { |
203 | unsigned int syt_offset, phase, index, syt; | 203 | unsigned int syt_offset, phase, index, syt; |
204 | 204 | ||
205 | if (s->last_syt_offset < TICKS_PER_CYCLE) { | 205 | if (s->last_syt_offset < TICKS_PER_CYCLE) { |
206 | if (!cip_sfc_is_base_44100(s->sfc)) | 206 | if (!cip_sfc_is_base_44100(s->sfc)) |
207 | syt_offset = s->last_syt_offset + s->syt_offset_state; | 207 | syt_offset = s->last_syt_offset + s->syt_offset_state; |
208 | else { | 208 | else { |
209 | /* | 209 | /* |
210 | * The time, in ticks, of the n'th SYT_INTERVAL sample is: | 210 | * The time, in ticks, of the n'th SYT_INTERVAL sample is: |
211 | * n * SYT_INTERVAL * 24576000 / sample_rate | 211 | * n * SYT_INTERVAL * 24576000 / sample_rate |
212 | * Modulo TICKS_PER_CYCLE, the difference between successive | 212 | * Modulo TICKS_PER_CYCLE, the difference between successive |
213 | * elements is about 1386.23. Rounding the results of this | 213 | * elements is about 1386.23. Rounding the results of this |
214 | * formula to the SYT precision results in a sequence of | 214 | * formula to the SYT precision results in a sequence of |
215 | * differences that begins with: | 215 | * differences that begins with: |
216 | * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... | 216 | * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... |
217 | * This code generates _exactly_ the same sequence. | 217 | * This code generates _exactly_ the same sequence. |
218 | */ | 218 | */ |
219 | phase = s->syt_offset_state; | 219 | phase = s->syt_offset_state; |
220 | index = phase % 13; | 220 | index = phase % 13; |
221 | syt_offset = s->last_syt_offset; | 221 | syt_offset = s->last_syt_offset; |
222 | syt_offset += 1386 + ((index && !(index & 3)) || | 222 | syt_offset += 1386 + ((index && !(index & 3)) || |
223 | phase == 146); | 223 | phase == 146); |
224 | if (++phase >= 147) | 224 | if (++phase >= 147) |
225 | phase = 0; | 225 | phase = 0; |
226 | s->syt_offset_state = phase; | 226 | s->syt_offset_state = phase; |
227 | } | 227 | } |
228 | } else | 228 | } else |
229 | syt_offset = s->last_syt_offset - TICKS_PER_CYCLE; | 229 | syt_offset = s->last_syt_offset - TICKS_PER_CYCLE; |
230 | s->last_syt_offset = syt_offset; | 230 | s->last_syt_offset = syt_offset; |
231 | 231 | ||
232 | if (syt_offset < TICKS_PER_CYCLE) { | 232 | if (syt_offset < TICKS_PER_CYCLE) { |
233 | syt_offset += TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; | 233 | syt_offset += TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; |
234 | syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12; | 234 | syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12; |
235 | syt += syt_offset % TICKS_PER_CYCLE; | 235 | syt += syt_offset % TICKS_PER_CYCLE; |
236 | 236 | ||
237 | return syt & 0xffff; | 237 | return syt & 0xffff; |
238 | } else { | 238 | } else { |
239 | return 0xffff; /* no info */ | 239 | return 0xffff; /* no info */ |
240 | } | 240 | } |
241 | } | 241 | } |
242 | 242 | ||
243 | static void amdtp_write_s32(struct amdtp_out_stream *s, | 243 | static void amdtp_write_s32(struct amdtp_out_stream *s, |
244 | struct snd_pcm_substream *pcm, | 244 | struct snd_pcm_substream *pcm, |
245 | __be32 *buffer, unsigned int frames) | 245 | __be32 *buffer, unsigned int frames) |
246 | { | 246 | { |
247 | struct snd_pcm_runtime *runtime = pcm->runtime; | 247 | struct snd_pcm_runtime *runtime = pcm->runtime; |
248 | unsigned int channels, remaining_frames, frame_step, i, c; | 248 | unsigned int channels, remaining_frames, frame_step, i, c; |
249 | const u32 *src; | 249 | const u32 *src; |
250 | 250 | ||
251 | channels = s->pcm_channels; | 251 | channels = s->pcm_channels; |
252 | src = (void *)runtime->dma_area + | 252 | src = (void *)runtime->dma_area + |
253 | s->pcm_buffer_pointer * (runtime->frame_bits / 8); | 253 | s->pcm_buffer_pointer * (runtime->frame_bits / 8); |
254 | remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer; | 254 | remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer; |
255 | frame_step = s->data_block_quadlets - channels; | 255 | frame_step = s->data_block_quadlets - channels; |
256 | 256 | ||
257 | for (i = 0; i < frames; ++i) { | 257 | for (i = 0; i < frames; ++i) { |
258 | for (c = 0; c < channels; ++c) { | 258 | for (c = 0; c < channels; ++c) { |
259 | *buffer = cpu_to_be32((*src >> 8) | 0x40000000); | 259 | *buffer = cpu_to_be32((*src >> 8) | 0x40000000); |
260 | src++; | 260 | src++; |
261 | buffer++; | 261 | buffer++; |
262 | } | 262 | } |
263 | buffer += frame_step; | 263 | buffer += frame_step; |
264 | if (--remaining_frames == 0) | 264 | if (--remaining_frames == 0) |
265 | src = (void *)runtime->dma_area; | 265 | src = (void *)runtime->dma_area; |
266 | } | 266 | } |
267 | } | 267 | } |
268 | 268 | ||
269 | static void amdtp_write_s16(struct amdtp_out_stream *s, | 269 | static void amdtp_write_s16(struct amdtp_out_stream *s, |
270 | struct snd_pcm_substream *pcm, | 270 | struct snd_pcm_substream *pcm, |
271 | __be32 *buffer, unsigned int frames) | 271 | __be32 *buffer, unsigned int frames) |
272 | { | 272 | { |
273 | struct snd_pcm_runtime *runtime = pcm->runtime; | 273 | struct snd_pcm_runtime *runtime = pcm->runtime; |
274 | unsigned int channels, remaining_frames, frame_step, i, c; | 274 | unsigned int channels, remaining_frames, frame_step, i, c; |
275 | const u16 *src; | 275 | const u16 *src; |
276 | 276 | ||
277 | channels = s->pcm_channels; | 277 | channels = s->pcm_channels; |
278 | src = (void *)runtime->dma_area + | 278 | src = (void *)runtime->dma_area + |
279 | s->pcm_buffer_pointer * (runtime->frame_bits / 8); | 279 | s->pcm_buffer_pointer * (runtime->frame_bits / 8); |
280 | remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer; | 280 | remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer; |
281 | frame_step = s->data_block_quadlets - channels; | 281 | frame_step = s->data_block_quadlets - channels; |
282 | 282 | ||
283 | for (i = 0; i < frames; ++i) { | 283 | for (i = 0; i < frames; ++i) { |
284 | for (c = 0; c < channels; ++c) { | 284 | for (c = 0; c < channels; ++c) { |
285 | *buffer = cpu_to_be32((*src << 8) | 0x40000000); | 285 | *buffer = cpu_to_be32((*src << 8) | 0x40000000); |
286 | src++; | 286 | src++; |
287 | buffer++; | 287 | buffer++; |
288 | } | 288 | } |
289 | buffer += frame_step; | 289 | buffer += frame_step; |
290 | if (--remaining_frames == 0) | 290 | if (--remaining_frames == 0) |
291 | src = (void *)runtime->dma_area; | 291 | src = (void *)runtime->dma_area; |
292 | } | 292 | } |
293 | } | 293 | } |
294 | 294 | ||
295 | static void amdtp_fill_pcm_silence(struct amdtp_out_stream *s, | 295 | static void amdtp_fill_pcm_silence(struct amdtp_out_stream *s, |
296 | __be32 *buffer, unsigned int frames) | 296 | __be32 *buffer, unsigned int frames) |
297 | { | 297 | { |
298 | unsigned int i, c; | 298 | unsigned int i, c; |
299 | 299 | ||
300 | for (i = 0; i < frames; ++i) { | 300 | for (i = 0; i < frames; ++i) { |
301 | for (c = 0; c < s->pcm_channels; ++c) | 301 | for (c = 0; c < s->pcm_channels; ++c) |
302 | buffer[c] = cpu_to_be32(0x40000000); | 302 | buffer[c] = cpu_to_be32(0x40000000); |
303 | buffer += s->data_block_quadlets; | 303 | buffer += s->data_block_quadlets; |
304 | } | 304 | } |
305 | } | 305 | } |
306 | 306 | ||
307 | static void amdtp_fill_midi(struct amdtp_out_stream *s, | 307 | static void amdtp_fill_midi(struct amdtp_out_stream *s, |
308 | __be32 *buffer, unsigned int frames) | 308 | __be32 *buffer, unsigned int frames) |
309 | { | 309 | { |
310 | unsigned int i; | 310 | unsigned int i; |
311 | 311 | ||
312 | for (i = 0; i < frames; ++i) | 312 | for (i = 0; i < frames; ++i) |
313 | buffer[s->pcm_channels + i * s->data_block_quadlets] = | 313 | buffer[s->pcm_channels + i * s->data_block_quadlets] = |
314 | cpu_to_be32(0x80000000); | 314 | cpu_to_be32(0x80000000); |
315 | } | 315 | } |
316 | 316 | ||
317 | static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle) | 317 | static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle) |
318 | { | 318 | { |
319 | __be32 *buffer; | 319 | __be32 *buffer; |
320 | unsigned int index, data_blocks, syt, ptr; | 320 | unsigned int index, data_blocks, syt, ptr; |
321 | struct snd_pcm_substream *pcm; | 321 | struct snd_pcm_substream *pcm; |
322 | struct fw_iso_packet packet; | 322 | struct fw_iso_packet packet; |
323 | int err; | 323 | int err; |
324 | 324 | ||
325 | if (s->packet_index < 0) | 325 | if (s->packet_index < 0) |
326 | return; | 326 | return; |
327 | index = s->packet_index; | 327 | index = s->packet_index; |
328 | 328 | ||
329 | data_blocks = calculate_data_blocks(s); | 329 | data_blocks = calculate_data_blocks(s); |
330 | syt = calculate_syt(s, cycle); | 330 | syt = calculate_syt(s, cycle); |
331 | 331 | ||
332 | buffer = s->buffer.packets[index].buffer; | 332 | buffer = s->buffer.packets[index].buffer; |
333 | buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) | | 333 | buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) | |
334 | (s->data_block_quadlets << 16) | | 334 | (s->data_block_quadlets << 16) | |
335 | s->data_block_counter); | 335 | s->data_block_counter); |
336 | buffer[1] = cpu_to_be32(CIP_EOH | CIP_FMT_AM | AMDTP_FDF_AM824 | | 336 | buffer[1] = cpu_to_be32(CIP_EOH | CIP_FMT_AM | AMDTP_FDF_AM824 | |
337 | (s->sfc << AMDTP_FDF_SFC_SHIFT) | syt); | 337 | (s->sfc << AMDTP_FDF_SFC_SHIFT) | syt); |
338 | buffer += 2; | 338 | buffer += 2; |
339 | 339 | ||
340 | pcm = ACCESS_ONCE(s->pcm); | 340 | pcm = ACCESS_ONCE(s->pcm); |
341 | if (pcm) | 341 | if (pcm) |
342 | s->transfer_samples(s, pcm, buffer, data_blocks); | 342 | s->transfer_samples(s, pcm, buffer, data_blocks); |
343 | else | 343 | else |
344 | amdtp_fill_pcm_silence(s, buffer, data_blocks); | 344 | amdtp_fill_pcm_silence(s, buffer, data_blocks); |
345 | if (s->midi_ports) | 345 | if (s->midi_ports) |
346 | amdtp_fill_midi(s, buffer, data_blocks); | 346 | amdtp_fill_midi(s, buffer, data_blocks); |
347 | 347 | ||
348 | s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff; | 348 | s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff; |
349 | 349 | ||
350 | packet.payload_length = 8 + data_blocks * 4 * s->data_block_quadlets; | 350 | packet.payload_length = 8 + data_blocks * 4 * s->data_block_quadlets; |
351 | packet.interrupt = IS_ALIGNED(index + 1, INTERRUPT_INTERVAL); | 351 | packet.interrupt = IS_ALIGNED(index + 1, INTERRUPT_INTERVAL); |
352 | packet.skip = 0; | 352 | packet.skip = 0; |
353 | packet.tag = TAG_CIP; | 353 | packet.tag = TAG_CIP; |
354 | packet.sy = 0; | 354 | packet.sy = 0; |
355 | packet.header_length = 0; | 355 | packet.header_length = 0; |
356 | 356 | ||
357 | err = fw_iso_context_queue(s->context, &packet, &s->buffer.iso_buffer, | 357 | err = fw_iso_context_queue(s->context, &packet, &s->buffer.iso_buffer, |
358 | s->buffer.packets[index].offset); | 358 | s->buffer.packets[index].offset); |
359 | if (err < 0) { | 359 | if (err < 0) { |
360 | dev_err(&s->unit->device, "queueing error: %d\n", err); | 360 | dev_err(&s->unit->device, "queueing error: %d\n", err); |
361 | s->packet_index = -1; | 361 | s->packet_index = -1; |
362 | amdtp_out_stream_pcm_abort(s); | 362 | amdtp_out_stream_pcm_abort(s); |
363 | return; | 363 | return; |
364 | } | 364 | } |
365 | 365 | ||
366 | if (++index >= QUEUE_LENGTH) | 366 | if (++index >= QUEUE_LENGTH) |
367 | index = 0; | 367 | index = 0; |
368 | s->packet_index = index; | 368 | s->packet_index = index; |
369 | 369 | ||
370 | if (pcm) { | 370 | if (pcm) { |
371 | ptr = s->pcm_buffer_pointer + data_blocks; | 371 | ptr = s->pcm_buffer_pointer + data_blocks; |
372 | if (ptr >= pcm->runtime->buffer_size) | 372 | if (ptr >= pcm->runtime->buffer_size) |
373 | ptr -= pcm->runtime->buffer_size; | 373 | ptr -= pcm->runtime->buffer_size; |
374 | ACCESS_ONCE(s->pcm_buffer_pointer) = ptr; | 374 | ACCESS_ONCE(s->pcm_buffer_pointer) = ptr; |
375 | 375 | ||
376 | s->pcm_period_pointer += data_blocks; | 376 | s->pcm_period_pointer += data_blocks; |
377 | if (s->pcm_period_pointer >= pcm->runtime->period_size) { | 377 | if (s->pcm_period_pointer >= pcm->runtime->period_size) { |
378 | s->pcm_period_pointer -= pcm->runtime->period_size; | 378 | s->pcm_period_pointer -= pcm->runtime->period_size; |
379 | snd_pcm_period_elapsed(pcm); | 379 | snd_pcm_period_elapsed(pcm); |
380 | } | 380 | } |
381 | } | 381 | } |
382 | } | 382 | } |
383 | 383 | ||
384 | static void out_packet_callback(struct fw_iso_context *context, u32 cycle, | 384 | static void out_packet_callback(struct fw_iso_context *context, u32 cycle, |
385 | size_t header_length, void *header, void *data) | 385 | size_t header_length, void *header, void *data) |
386 | { | 386 | { |
387 | struct amdtp_out_stream *s = data; | 387 | struct amdtp_out_stream *s = data; |
388 | unsigned int i, packets = header_length / 4; | 388 | unsigned int i, packets = header_length / 4; |
389 | 389 | ||
390 | /* | 390 | /* |
391 | * Compute the cycle of the last queued packet. | 391 | * Compute the cycle of the last queued packet. |
392 | * (We need only the four lowest bits for the SYT, so we can ignore | 392 | * (We need only the four lowest bits for the SYT, so we can ignore |
393 | * that bits 0-11 must wrap around at 3072.) | 393 | * that bits 0-11 must wrap around at 3072.) |
394 | */ | 394 | */ |
395 | cycle += QUEUE_LENGTH - packets; | 395 | cycle += QUEUE_LENGTH - packets; |
396 | 396 | ||
397 | for (i = 0; i < packets; ++i) | 397 | for (i = 0; i < packets; ++i) |
398 | queue_out_packet(s, ++cycle); | 398 | queue_out_packet(s, ++cycle); |
399 | fw_iso_context_queue_flush(s->context); | ||
399 | } | 400 | } |
400 | 401 | ||
401 | static int queue_initial_skip_packets(struct amdtp_out_stream *s) | 402 | static int queue_initial_skip_packets(struct amdtp_out_stream *s) |
402 | { | 403 | { |
403 | struct fw_iso_packet skip_packet = { | 404 | struct fw_iso_packet skip_packet = { |
404 | .skip = 1, | 405 | .skip = 1, |
405 | }; | 406 | }; |
406 | unsigned int i; | 407 | unsigned int i; |
407 | int err; | 408 | int err; |
408 | 409 | ||
409 | for (i = 0; i < QUEUE_LENGTH; ++i) { | 410 | for (i = 0; i < QUEUE_LENGTH; ++i) { |
410 | skip_packet.interrupt = IS_ALIGNED(s->packet_index + 1, | 411 | skip_packet.interrupt = IS_ALIGNED(s->packet_index + 1, |
411 | INTERRUPT_INTERVAL); | 412 | INTERRUPT_INTERVAL); |
412 | err = fw_iso_context_queue(s->context, &skip_packet, NULL, 0); | 413 | err = fw_iso_context_queue(s->context, &skip_packet, NULL, 0); |
413 | if (err < 0) | 414 | if (err < 0) |
414 | return err; | 415 | return err; |
415 | if (++s->packet_index >= QUEUE_LENGTH) | 416 | if (++s->packet_index >= QUEUE_LENGTH) |
416 | s->packet_index = 0; | 417 | s->packet_index = 0; |
417 | } | 418 | } |
418 | 419 | ||
419 | return 0; | 420 | return 0; |
420 | } | 421 | } |
421 | 422 | ||
422 | /** | 423 | /** |
423 | * amdtp_out_stream_start - start sending packets | 424 | * amdtp_out_stream_start - start sending packets |
424 | * @s: the AMDTP output stream to start | 425 | * @s: the AMDTP output stream to start |
425 | * @channel: the isochronous channel on the bus | 426 | * @channel: the isochronous channel on the bus |
426 | * @speed: firewire speed code | 427 | * @speed: firewire speed code |
427 | * | 428 | * |
428 | * The stream cannot be started until it has been configured with | 429 | * The stream cannot be started until it has been configured with |
429 | * amdtp_out_stream_set_hw_params(), amdtp_out_stream_set_pcm(), and | 430 | * amdtp_out_stream_set_hw_params(), amdtp_out_stream_set_pcm(), and |
430 | * amdtp_out_stream_set_midi(); and it must be started before any | 431 | * amdtp_out_stream_set_midi(); and it must be started before any |
431 | * PCM or MIDI device can be started. | 432 | * PCM or MIDI device can be started. |
432 | */ | 433 | */ |
433 | int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed) | 434 | int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed) |
434 | { | 435 | { |
435 | static const struct { | 436 | static const struct { |
436 | unsigned int data_block; | 437 | unsigned int data_block; |
437 | unsigned int syt_offset; | 438 | unsigned int syt_offset; |
438 | } initial_state[] = { | 439 | } initial_state[] = { |
439 | [CIP_SFC_32000] = { 4, 3072 }, | 440 | [CIP_SFC_32000] = { 4, 3072 }, |
440 | [CIP_SFC_48000] = { 6, 1024 }, | 441 | [CIP_SFC_48000] = { 6, 1024 }, |
441 | [CIP_SFC_96000] = { 12, 1024 }, | 442 | [CIP_SFC_96000] = { 12, 1024 }, |
442 | [CIP_SFC_192000] = { 24, 1024 }, | 443 | [CIP_SFC_192000] = { 24, 1024 }, |
443 | [CIP_SFC_44100] = { 0, 67 }, | 444 | [CIP_SFC_44100] = { 0, 67 }, |
444 | [CIP_SFC_88200] = { 0, 67 }, | 445 | [CIP_SFC_88200] = { 0, 67 }, |
445 | [CIP_SFC_176400] = { 0, 67 }, | 446 | [CIP_SFC_176400] = { 0, 67 }, |
446 | }; | 447 | }; |
447 | int err; | 448 | int err; |
448 | 449 | ||
449 | mutex_lock(&s->mutex); | 450 | mutex_lock(&s->mutex); |
450 | 451 | ||
451 | if (WARN_ON(!IS_ERR(s->context) || | 452 | if (WARN_ON(!IS_ERR(s->context) || |
452 | (!s->pcm_channels && !s->midi_ports))) { | 453 | (!s->pcm_channels && !s->midi_ports))) { |
453 | err = -EBADFD; | 454 | err = -EBADFD; |
454 | goto err_unlock; | 455 | goto err_unlock; |
455 | } | 456 | } |
456 | 457 | ||
457 | s->data_block_state = initial_state[s->sfc].data_block; | 458 | s->data_block_state = initial_state[s->sfc].data_block; |
458 | s->syt_offset_state = initial_state[s->sfc].syt_offset; | 459 | s->syt_offset_state = initial_state[s->sfc].syt_offset; |
459 | s->last_syt_offset = TICKS_PER_CYCLE; | 460 | s->last_syt_offset = TICKS_PER_CYCLE; |
460 | 461 | ||
461 | err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH, | 462 | err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH, |
462 | amdtp_out_stream_get_max_payload(s), | 463 | amdtp_out_stream_get_max_payload(s), |
463 | DMA_TO_DEVICE); | 464 | DMA_TO_DEVICE); |
464 | if (err < 0) | 465 | if (err < 0) |
465 | goto err_unlock; | 466 | goto err_unlock; |
466 | 467 | ||
467 | s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, | 468 | s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, |
468 | FW_ISO_CONTEXT_TRANSMIT, | 469 | FW_ISO_CONTEXT_TRANSMIT, |
469 | channel, speed, 0, | 470 | channel, speed, 0, |
470 | out_packet_callback, s); | 471 | out_packet_callback, s); |
471 | if (IS_ERR(s->context)) { | 472 | if (IS_ERR(s->context)) { |
472 | err = PTR_ERR(s->context); | 473 | err = PTR_ERR(s->context); |
473 | if (err == -EBUSY) | 474 | if (err == -EBUSY) |
474 | dev_err(&s->unit->device, | 475 | dev_err(&s->unit->device, |
475 | "no free output stream on this controller\n"); | 476 | "no free output stream on this controller\n"); |
476 | goto err_buffer; | 477 | goto err_buffer; |
477 | } | 478 | } |
478 | 479 | ||
479 | amdtp_out_stream_update(s); | 480 | amdtp_out_stream_update(s); |
480 | 481 | ||
481 | s->packet_index = 0; | 482 | s->packet_index = 0; |
482 | s->data_block_counter = 0; | 483 | s->data_block_counter = 0; |
483 | err = queue_initial_skip_packets(s); | 484 | err = queue_initial_skip_packets(s); |
484 | if (err < 0) | 485 | if (err < 0) |
485 | goto err_context; | 486 | goto err_context; |
486 | 487 | ||
487 | err = fw_iso_context_start(s->context, -1, 0, 0); | 488 | err = fw_iso_context_start(s->context, -1, 0, 0); |
488 | if (err < 0) | 489 | if (err < 0) |
489 | goto err_context; | 490 | goto err_context; |
490 | 491 | ||
491 | mutex_unlock(&s->mutex); | 492 | mutex_unlock(&s->mutex); |
492 | 493 | ||
493 | return 0; | 494 | return 0; |
494 | 495 | ||
495 | err_context: | 496 | err_context: |
496 | fw_iso_context_destroy(s->context); | 497 | fw_iso_context_destroy(s->context); |
497 | s->context = ERR_PTR(-1); | 498 | s->context = ERR_PTR(-1); |
498 | err_buffer: | 499 | err_buffer: |
499 | iso_packets_buffer_destroy(&s->buffer, s->unit); | 500 | iso_packets_buffer_destroy(&s->buffer, s->unit); |
500 | err_unlock: | 501 | err_unlock: |
501 | mutex_unlock(&s->mutex); | 502 | mutex_unlock(&s->mutex); |
502 | 503 | ||
503 | return err; | 504 | return err; |
504 | } | 505 | } |
505 | EXPORT_SYMBOL(amdtp_out_stream_start); | 506 | EXPORT_SYMBOL(amdtp_out_stream_start); |
506 | 507 | ||
507 | /** | 508 | /** |
508 | * amdtp_out_stream_update - update the stream after a bus reset | 509 | * amdtp_out_stream_update - update the stream after a bus reset |
509 | * @s: the AMDTP output stream | 510 | * @s: the AMDTP output stream |
510 | */ | 511 | */ |
511 | void amdtp_out_stream_update(struct amdtp_out_stream *s) | 512 | void amdtp_out_stream_update(struct amdtp_out_stream *s) |
512 | { | 513 | { |
513 | ACCESS_ONCE(s->source_node_id_field) = | 514 | ACCESS_ONCE(s->source_node_id_field) = |
514 | (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24; | 515 | (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24; |
515 | } | 516 | } |
516 | EXPORT_SYMBOL(amdtp_out_stream_update); | 517 | EXPORT_SYMBOL(amdtp_out_stream_update); |
517 | 518 | ||
518 | /** | 519 | /** |
519 | * amdtp_out_stream_stop - stop sending packets | 520 | * amdtp_out_stream_stop - stop sending packets |
520 | * @s: the AMDTP output stream to stop | 521 | * @s: the AMDTP output stream to stop |
521 | * | 522 | * |
522 | * All PCM and MIDI devices of the stream must be stopped before the stream | 523 | * All PCM and MIDI devices of the stream must be stopped before the stream |
523 | * itself can be stopped. | 524 | * itself can be stopped. |
524 | */ | 525 | */ |
525 | void amdtp_out_stream_stop(struct amdtp_out_stream *s) | 526 | void amdtp_out_stream_stop(struct amdtp_out_stream *s) |
526 | { | 527 | { |
527 | mutex_lock(&s->mutex); | 528 | mutex_lock(&s->mutex); |
528 | 529 | ||
529 | if (IS_ERR(s->context)) { | 530 | if (IS_ERR(s->context)) { |
530 | mutex_unlock(&s->mutex); | 531 | mutex_unlock(&s->mutex); |
531 | return; | 532 | return; |
532 | } | 533 | } |
533 | 534 | ||
534 | fw_iso_context_stop(s->context); | 535 | fw_iso_context_stop(s->context); |
535 | fw_iso_context_destroy(s->context); | 536 | fw_iso_context_destroy(s->context); |
536 | s->context = ERR_PTR(-1); | 537 | s->context = ERR_PTR(-1); |
537 | iso_packets_buffer_destroy(&s->buffer, s->unit); | 538 | iso_packets_buffer_destroy(&s->buffer, s->unit); |
538 | 539 | ||
539 | mutex_unlock(&s->mutex); | 540 | mutex_unlock(&s->mutex); |
540 | } | 541 | } |
541 | EXPORT_SYMBOL(amdtp_out_stream_stop); | 542 | EXPORT_SYMBOL(amdtp_out_stream_stop); |
542 | 543 | ||
543 | /** | 544 | /** |
544 | * amdtp_out_stream_pcm_abort - abort the running PCM device | 545 | * amdtp_out_stream_pcm_abort - abort the running PCM device |
545 | * @s: the AMDTP stream about to be stopped | 546 | * @s: the AMDTP stream about to be stopped |
546 | * | 547 | * |
547 | * If the isochronous stream needs to be stopped asynchronously, call this | 548 | * If the isochronous stream needs to be stopped asynchronously, call this |
548 | * function first to stop the PCM device. | 549 | * function first to stop the PCM device. |
549 | */ | 550 | */ |
550 | void amdtp_out_stream_pcm_abort(struct amdtp_out_stream *s) | 551 | void amdtp_out_stream_pcm_abort(struct amdtp_out_stream *s) |
551 | { | 552 | { |
552 | struct snd_pcm_substream *pcm; | 553 | struct snd_pcm_substream *pcm; |
553 | 554 | ||
554 | pcm = ACCESS_ONCE(s->pcm); | 555 | pcm = ACCESS_ONCE(s->pcm); |
555 | if (pcm) { | 556 | if (pcm) { |
556 | snd_pcm_stream_lock_irq(pcm); | 557 | snd_pcm_stream_lock_irq(pcm); |
557 | if (snd_pcm_running(pcm)) | 558 | if (snd_pcm_running(pcm)) |
558 | snd_pcm_stop(pcm, SNDRV_PCM_STATE_XRUN); | 559 | snd_pcm_stop(pcm, SNDRV_PCM_STATE_XRUN); |
559 | snd_pcm_stream_unlock_irq(pcm); | 560 | snd_pcm_stream_unlock_irq(pcm); |
560 | } | 561 | } |
561 | } | 562 | } |
562 | EXPORT_SYMBOL(amdtp_out_stream_pcm_abort); | 563 | EXPORT_SYMBOL(amdtp_out_stream_pcm_abort); |
563 | 564 |