Commit b10cec8a4e8167075b9e1ff3f05419769e7f381a
Committed by
David S. Miller
1 parent
48bdf072c3
Exists in
master
and in
7 other branches
drivers/net: ks8842 Fix crash on received packet when in PIO mode.
This patch fixes a driver crash during packet reception due to not enough bytes allocated in the skb. Since the loop reads out 4 bytes at a time, we need to allow for up to 3 bytes of slack space. Signed-off-by: Dennis Aberilla <denzzzhome@yahoo.com> Signed-off-by: David S. Miller <davem@zippy.davemloft.net>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
drivers/net/ks8842.c
1 | /* | 1 | /* |
2 | * ks8842.c timberdale KS8842 ethernet driver | 2 | * ks8842.c timberdale KS8842 ethernet driver |
3 | * Copyright (c) 2009 Intel Corporation | 3 | * Copyright (c) 2009 Intel Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* Supports: | 19 | /* Supports: |
20 | * The Micrel KS8842 behind the timberdale FPGA | 20 | * The Micrel KS8842 behind the timberdale FPGA |
21 | * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface | 21 | * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
25 | 25 | ||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
29 | #include <linux/mfd/core.h> | 29 | #include <linux/mfd/core.h> |
30 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
31 | #include <linux/etherdevice.h> | 31 | #include <linux/etherdevice.h> |
32 | #include <linux/ethtool.h> | 32 | #include <linux/ethtool.h> |
33 | #include <linux/ks8842.h> | 33 | #include <linux/ks8842.h> |
34 | #include <linux/dmaengine.h> | 34 | #include <linux/dmaengine.h> |
35 | #include <linux/dma-mapping.h> | 35 | #include <linux/dma-mapping.h> |
36 | #include <linux/scatterlist.h> | 36 | #include <linux/scatterlist.h> |
37 | 37 | ||
38 | #define DRV_NAME "ks8842" | 38 | #define DRV_NAME "ks8842" |
39 | 39 | ||
40 | /* Timberdale specific Registers */ | 40 | /* Timberdale specific Registers */ |
41 | #define REG_TIMB_RST 0x1c | 41 | #define REG_TIMB_RST 0x1c |
42 | #define REG_TIMB_FIFO 0x20 | 42 | #define REG_TIMB_FIFO 0x20 |
43 | #define REG_TIMB_ISR 0x24 | 43 | #define REG_TIMB_ISR 0x24 |
44 | #define REG_TIMB_IER 0x28 | 44 | #define REG_TIMB_IER 0x28 |
45 | #define REG_TIMB_IAR 0x2C | 45 | #define REG_TIMB_IAR 0x2C |
46 | #define REQ_TIMB_DMA_RESUME 0x30 | 46 | #define REQ_TIMB_DMA_RESUME 0x30 |
47 | 47 | ||
48 | /* KS8842 registers */ | 48 | /* KS8842 registers */ |
49 | 49 | ||
50 | #define REG_SELECT_BANK 0x0e | 50 | #define REG_SELECT_BANK 0x0e |
51 | 51 | ||
52 | /* bank 0 registers */ | 52 | /* bank 0 registers */ |
53 | #define REG_QRFCR 0x04 | 53 | #define REG_QRFCR 0x04 |
54 | 54 | ||
55 | /* bank 2 registers */ | 55 | /* bank 2 registers */ |
56 | #define REG_MARL 0x00 | 56 | #define REG_MARL 0x00 |
57 | #define REG_MARM 0x02 | 57 | #define REG_MARM 0x02 |
58 | #define REG_MARH 0x04 | 58 | #define REG_MARH 0x04 |
59 | 59 | ||
60 | /* bank 3 registers */ | 60 | /* bank 3 registers */ |
61 | #define REG_GRR 0x06 | 61 | #define REG_GRR 0x06 |
62 | 62 | ||
63 | /* bank 16 registers */ | 63 | /* bank 16 registers */ |
64 | #define REG_TXCR 0x00 | 64 | #define REG_TXCR 0x00 |
65 | #define REG_TXSR 0x02 | 65 | #define REG_TXSR 0x02 |
66 | #define REG_RXCR 0x04 | 66 | #define REG_RXCR 0x04 |
67 | #define REG_TXMIR 0x08 | 67 | #define REG_TXMIR 0x08 |
68 | #define REG_RXMIR 0x0A | 68 | #define REG_RXMIR 0x0A |
69 | 69 | ||
70 | /* bank 17 registers */ | 70 | /* bank 17 registers */ |
71 | #define REG_TXQCR 0x00 | 71 | #define REG_TXQCR 0x00 |
72 | #define REG_RXQCR 0x02 | 72 | #define REG_RXQCR 0x02 |
73 | #define REG_TXFDPR 0x04 | 73 | #define REG_TXFDPR 0x04 |
74 | #define REG_RXFDPR 0x06 | 74 | #define REG_RXFDPR 0x06 |
75 | #define REG_QMU_DATA_LO 0x08 | 75 | #define REG_QMU_DATA_LO 0x08 |
76 | #define REG_QMU_DATA_HI 0x0A | 76 | #define REG_QMU_DATA_HI 0x0A |
77 | 77 | ||
78 | /* bank 18 registers */ | 78 | /* bank 18 registers */ |
79 | #define REG_IER 0x00 | 79 | #define REG_IER 0x00 |
80 | #define IRQ_LINK_CHANGE 0x8000 | 80 | #define IRQ_LINK_CHANGE 0x8000 |
81 | #define IRQ_TX 0x4000 | 81 | #define IRQ_TX 0x4000 |
82 | #define IRQ_RX 0x2000 | 82 | #define IRQ_RX 0x2000 |
83 | #define IRQ_RX_OVERRUN 0x0800 | 83 | #define IRQ_RX_OVERRUN 0x0800 |
84 | #define IRQ_TX_STOPPED 0x0200 | 84 | #define IRQ_TX_STOPPED 0x0200 |
85 | #define IRQ_RX_STOPPED 0x0100 | 85 | #define IRQ_RX_STOPPED 0x0100 |
86 | #define IRQ_RX_ERROR 0x0080 | 86 | #define IRQ_RX_ERROR 0x0080 |
87 | #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ | 87 | #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ |
88 | IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) | 88 | IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) |
89 | /* When running via timberdale in DMA mode, the RX interrupt should be | 89 | /* When running via timberdale in DMA mode, the RX interrupt should be |
90 | enabled in the KS8842, but not in the FPGA IP, since the IP handles | 90 | enabled in the KS8842, but not in the FPGA IP, since the IP handles |
91 | RX DMA internally. | 91 | RX DMA internally. |
92 | TX interrupts are not needed it is handled by the FPGA the driver is | 92 | TX interrupts are not needed it is handled by the FPGA the driver is |
93 | notified via DMA callbacks. | 93 | notified via DMA callbacks. |
94 | */ | 94 | */ |
95 | #define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \ | 95 | #define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \ |
96 | IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) | 96 | IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) |
97 | #define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX) | 97 | #define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX) |
98 | #define REG_ISR 0x02 | 98 | #define REG_ISR 0x02 |
99 | #define REG_RXSR 0x04 | 99 | #define REG_RXSR 0x04 |
100 | #define RXSR_VALID 0x8000 | 100 | #define RXSR_VALID 0x8000 |
101 | #define RXSR_BROADCAST 0x80 | 101 | #define RXSR_BROADCAST 0x80 |
102 | #define RXSR_MULTICAST 0x40 | 102 | #define RXSR_MULTICAST 0x40 |
103 | #define RXSR_UNICAST 0x20 | 103 | #define RXSR_UNICAST 0x20 |
104 | #define RXSR_FRAMETYPE 0x08 | 104 | #define RXSR_FRAMETYPE 0x08 |
105 | #define RXSR_TOO_LONG 0x04 | 105 | #define RXSR_TOO_LONG 0x04 |
106 | #define RXSR_RUNT 0x02 | 106 | #define RXSR_RUNT 0x02 |
107 | #define RXSR_CRC_ERROR 0x01 | 107 | #define RXSR_CRC_ERROR 0x01 |
108 | #define RXSR_ERROR (RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR) | 108 | #define RXSR_ERROR (RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR) |
109 | 109 | ||
110 | /* bank 32 registers */ | 110 | /* bank 32 registers */ |
111 | #define REG_SW_ID_AND_ENABLE 0x00 | 111 | #define REG_SW_ID_AND_ENABLE 0x00 |
112 | #define REG_SGCR1 0x02 | 112 | #define REG_SGCR1 0x02 |
113 | #define REG_SGCR2 0x04 | 113 | #define REG_SGCR2 0x04 |
114 | #define REG_SGCR3 0x06 | 114 | #define REG_SGCR3 0x06 |
115 | 115 | ||
116 | /* bank 39 registers */ | 116 | /* bank 39 registers */ |
117 | #define REG_MACAR1 0x00 | 117 | #define REG_MACAR1 0x00 |
118 | #define REG_MACAR2 0x02 | 118 | #define REG_MACAR2 0x02 |
119 | #define REG_MACAR3 0x04 | 119 | #define REG_MACAR3 0x04 |
120 | 120 | ||
121 | /* bank 45 registers */ | 121 | /* bank 45 registers */ |
122 | #define REG_P1MBCR 0x00 | 122 | #define REG_P1MBCR 0x00 |
123 | #define REG_P1MBSR 0x02 | 123 | #define REG_P1MBSR 0x02 |
124 | 124 | ||
125 | /* bank 46 registers */ | 125 | /* bank 46 registers */ |
126 | #define REG_P2MBCR 0x00 | 126 | #define REG_P2MBCR 0x00 |
127 | #define REG_P2MBSR 0x02 | 127 | #define REG_P2MBSR 0x02 |
128 | 128 | ||
129 | /* bank 48 registers */ | 129 | /* bank 48 registers */ |
130 | #define REG_P1CR2 0x02 | 130 | #define REG_P1CR2 0x02 |
131 | 131 | ||
132 | /* bank 49 registers */ | 132 | /* bank 49 registers */ |
133 | #define REG_P1CR4 0x02 | 133 | #define REG_P1CR4 0x02 |
134 | #define REG_P1SR 0x04 | 134 | #define REG_P1SR 0x04 |
135 | 135 | ||
136 | /* flags passed by platform_device for configuration */ | 136 | /* flags passed by platform_device for configuration */ |
137 | #define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */ | 137 | #define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */ |
138 | #define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */ | 138 | #define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */ |
139 | 139 | ||
140 | #define DMA_BUFFER_SIZE 2048 | 140 | #define DMA_BUFFER_SIZE 2048 |
141 | 141 | ||
142 | struct ks8842_tx_dma_ctl { | 142 | struct ks8842_tx_dma_ctl { |
143 | struct dma_chan *chan; | 143 | struct dma_chan *chan; |
144 | struct dma_async_tx_descriptor *adesc; | 144 | struct dma_async_tx_descriptor *adesc; |
145 | void *buf; | 145 | void *buf; |
146 | struct scatterlist sg; | 146 | struct scatterlist sg; |
147 | int channel; | 147 | int channel; |
148 | }; | 148 | }; |
149 | 149 | ||
150 | struct ks8842_rx_dma_ctl { | 150 | struct ks8842_rx_dma_ctl { |
151 | struct dma_chan *chan; | 151 | struct dma_chan *chan; |
152 | struct dma_async_tx_descriptor *adesc; | 152 | struct dma_async_tx_descriptor *adesc; |
153 | struct sk_buff *skb; | 153 | struct sk_buff *skb; |
154 | struct scatterlist sg; | 154 | struct scatterlist sg; |
155 | struct tasklet_struct tasklet; | 155 | struct tasklet_struct tasklet; |
156 | int channel; | 156 | int channel; |
157 | }; | 157 | }; |
158 | 158 | ||
159 | #define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \ | 159 | #define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \ |
160 | ((adapter)->dma_rx.channel != -1)) | 160 | ((adapter)->dma_rx.channel != -1)) |
161 | 161 | ||
162 | struct ks8842_adapter { | 162 | struct ks8842_adapter { |
163 | void __iomem *hw_addr; | 163 | void __iomem *hw_addr; |
164 | int irq; | 164 | int irq; |
165 | unsigned long conf_flags; /* copy of platform_device config */ | 165 | unsigned long conf_flags; /* copy of platform_device config */ |
166 | struct tasklet_struct tasklet; | 166 | struct tasklet_struct tasklet; |
167 | spinlock_t lock; /* spinlock to be interrupt safe */ | 167 | spinlock_t lock; /* spinlock to be interrupt safe */ |
168 | struct work_struct timeout_work; | 168 | struct work_struct timeout_work; |
169 | struct net_device *netdev; | 169 | struct net_device *netdev; |
170 | struct device *dev; | 170 | struct device *dev; |
171 | struct ks8842_tx_dma_ctl dma_tx; | 171 | struct ks8842_tx_dma_ctl dma_tx; |
172 | struct ks8842_rx_dma_ctl dma_rx; | 172 | struct ks8842_rx_dma_ctl dma_rx; |
173 | }; | 173 | }; |
174 | 174 | ||
175 | static void ks8842_dma_rx_cb(void *data); | 175 | static void ks8842_dma_rx_cb(void *data); |
176 | static void ks8842_dma_tx_cb(void *data); | 176 | static void ks8842_dma_tx_cb(void *data); |
177 | 177 | ||
178 | static inline void ks8842_resume_dma(struct ks8842_adapter *adapter) | 178 | static inline void ks8842_resume_dma(struct ks8842_adapter *adapter) |
179 | { | 179 | { |
180 | iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME); | 180 | iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME); |
181 | } | 181 | } |
182 | 182 | ||
183 | static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) | 183 | static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) |
184 | { | 184 | { |
185 | iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); | 185 | iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); |
186 | } | 186 | } |
187 | 187 | ||
188 | static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank, | 188 | static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank, |
189 | u8 value, int offset) | 189 | u8 value, int offset) |
190 | { | 190 | { |
191 | ks8842_select_bank(adapter, bank); | 191 | ks8842_select_bank(adapter, bank); |
192 | iowrite8(value, adapter->hw_addr + offset); | 192 | iowrite8(value, adapter->hw_addr + offset); |
193 | } | 193 | } |
194 | 194 | ||
195 | static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank, | 195 | static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank, |
196 | u16 value, int offset) | 196 | u16 value, int offset) |
197 | { | 197 | { |
198 | ks8842_select_bank(adapter, bank); | 198 | ks8842_select_bank(adapter, bank); |
199 | iowrite16(value, adapter->hw_addr + offset); | 199 | iowrite16(value, adapter->hw_addr + offset); |
200 | } | 200 | } |
201 | 201 | ||
202 | static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank, | 202 | static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank, |
203 | u16 bits, int offset) | 203 | u16 bits, int offset) |
204 | { | 204 | { |
205 | u16 reg; | 205 | u16 reg; |
206 | ks8842_select_bank(adapter, bank); | 206 | ks8842_select_bank(adapter, bank); |
207 | reg = ioread16(adapter->hw_addr + offset); | 207 | reg = ioread16(adapter->hw_addr + offset); |
208 | reg |= bits; | 208 | reg |= bits; |
209 | iowrite16(reg, adapter->hw_addr + offset); | 209 | iowrite16(reg, adapter->hw_addr + offset); |
210 | } | 210 | } |
211 | 211 | ||
212 | static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank, | 212 | static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank, |
213 | u16 bits, int offset) | 213 | u16 bits, int offset) |
214 | { | 214 | { |
215 | u16 reg; | 215 | u16 reg; |
216 | ks8842_select_bank(adapter, bank); | 216 | ks8842_select_bank(adapter, bank); |
217 | reg = ioread16(adapter->hw_addr + offset); | 217 | reg = ioread16(adapter->hw_addr + offset); |
218 | reg &= ~bits; | 218 | reg &= ~bits; |
219 | iowrite16(reg, adapter->hw_addr + offset); | 219 | iowrite16(reg, adapter->hw_addr + offset); |
220 | } | 220 | } |
221 | 221 | ||
222 | static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank, | 222 | static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank, |
223 | u32 value, int offset) | 223 | u32 value, int offset) |
224 | { | 224 | { |
225 | ks8842_select_bank(adapter, bank); | 225 | ks8842_select_bank(adapter, bank); |
226 | iowrite32(value, adapter->hw_addr + offset); | 226 | iowrite32(value, adapter->hw_addr + offset); |
227 | } | 227 | } |
228 | 228 | ||
229 | static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank, | 229 | static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank, |
230 | int offset) | 230 | int offset) |
231 | { | 231 | { |
232 | ks8842_select_bank(adapter, bank); | 232 | ks8842_select_bank(adapter, bank); |
233 | return ioread8(adapter->hw_addr + offset); | 233 | return ioread8(adapter->hw_addr + offset); |
234 | } | 234 | } |
235 | 235 | ||
236 | static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank, | 236 | static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank, |
237 | int offset) | 237 | int offset) |
238 | { | 238 | { |
239 | ks8842_select_bank(adapter, bank); | 239 | ks8842_select_bank(adapter, bank); |
240 | return ioread16(adapter->hw_addr + offset); | 240 | return ioread16(adapter->hw_addr + offset); |
241 | } | 241 | } |
242 | 242 | ||
243 | static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank, | 243 | static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank, |
244 | int offset) | 244 | int offset) |
245 | { | 245 | { |
246 | ks8842_select_bank(adapter, bank); | 246 | ks8842_select_bank(adapter, bank); |
247 | return ioread32(adapter->hw_addr + offset); | 247 | return ioread32(adapter->hw_addr + offset); |
248 | } | 248 | } |
249 | 249 | ||
250 | static void ks8842_reset(struct ks8842_adapter *adapter) | 250 | static void ks8842_reset(struct ks8842_adapter *adapter) |
251 | { | 251 | { |
252 | if (adapter->conf_flags & MICREL_KS884X) { | 252 | if (adapter->conf_flags & MICREL_KS884X) { |
253 | ks8842_write16(adapter, 3, 1, REG_GRR); | 253 | ks8842_write16(adapter, 3, 1, REG_GRR); |
254 | msleep(10); | 254 | msleep(10); |
255 | iowrite16(0, adapter->hw_addr + REG_GRR); | 255 | iowrite16(0, adapter->hw_addr + REG_GRR); |
256 | } else { | 256 | } else { |
257 | /* The KS8842 goes haywire when doing softare reset | 257 | /* The KS8842 goes haywire when doing softare reset |
258 | * a work around in the timberdale IP is implemented to | 258 | * a work around in the timberdale IP is implemented to |
259 | * do a hardware reset instead | 259 | * do a hardware reset instead |
260 | ks8842_write16(adapter, 3, 1, REG_GRR); | 260 | ks8842_write16(adapter, 3, 1, REG_GRR); |
261 | msleep(10); | 261 | msleep(10); |
262 | iowrite16(0, adapter->hw_addr + REG_GRR); | 262 | iowrite16(0, adapter->hw_addr + REG_GRR); |
263 | */ | 263 | */ |
264 | iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST); | 264 | iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST); |
265 | msleep(20); | 265 | msleep(20); |
266 | } | 266 | } |
267 | } | 267 | } |
268 | 268 | ||
269 | static void ks8842_update_link_status(struct net_device *netdev, | 269 | static void ks8842_update_link_status(struct net_device *netdev, |
270 | struct ks8842_adapter *adapter) | 270 | struct ks8842_adapter *adapter) |
271 | { | 271 | { |
272 | /* check the status of the link */ | 272 | /* check the status of the link */ |
273 | if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) { | 273 | if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) { |
274 | netif_carrier_on(netdev); | 274 | netif_carrier_on(netdev); |
275 | netif_wake_queue(netdev); | 275 | netif_wake_queue(netdev); |
276 | } else { | 276 | } else { |
277 | netif_stop_queue(netdev); | 277 | netif_stop_queue(netdev); |
278 | netif_carrier_off(netdev); | 278 | netif_carrier_off(netdev); |
279 | } | 279 | } |
280 | } | 280 | } |
281 | 281 | ||
282 | static void ks8842_enable_tx(struct ks8842_adapter *adapter) | 282 | static void ks8842_enable_tx(struct ks8842_adapter *adapter) |
283 | { | 283 | { |
284 | ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR); | 284 | ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR); |
285 | } | 285 | } |
286 | 286 | ||
287 | static void ks8842_disable_tx(struct ks8842_adapter *adapter) | 287 | static void ks8842_disable_tx(struct ks8842_adapter *adapter) |
288 | { | 288 | { |
289 | ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR); | 289 | ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR); |
290 | } | 290 | } |
291 | 291 | ||
292 | static void ks8842_enable_rx(struct ks8842_adapter *adapter) | 292 | static void ks8842_enable_rx(struct ks8842_adapter *adapter) |
293 | { | 293 | { |
294 | ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR); | 294 | ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR); |
295 | } | 295 | } |
296 | 296 | ||
297 | static void ks8842_disable_rx(struct ks8842_adapter *adapter) | 297 | static void ks8842_disable_rx(struct ks8842_adapter *adapter) |
298 | { | 298 | { |
299 | ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR); | 299 | ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR); |
300 | } | 300 | } |
301 | 301 | ||
302 | static void ks8842_reset_hw(struct ks8842_adapter *adapter) | 302 | static void ks8842_reset_hw(struct ks8842_adapter *adapter) |
303 | { | 303 | { |
304 | /* reset the HW */ | 304 | /* reset the HW */ |
305 | ks8842_reset(adapter); | 305 | ks8842_reset(adapter); |
306 | 306 | ||
307 | /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */ | 307 | /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */ |
308 | ks8842_write16(adapter, 16, 0x000E, REG_TXCR); | 308 | ks8842_write16(adapter, 16, 0x000E, REG_TXCR); |
309 | 309 | ||
310 | /* enable the receiver, uni + multi + broadcast + flow ctrl | 310 | /* enable the receiver, uni + multi + broadcast + flow ctrl |
311 | + crc strip */ | 311 | + crc strip */ |
312 | ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400, | 312 | ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400, |
313 | REG_RXCR); | 313 | REG_RXCR); |
314 | 314 | ||
315 | /* TX frame pointer autoincrement */ | 315 | /* TX frame pointer autoincrement */ |
316 | ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR); | 316 | ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR); |
317 | 317 | ||
318 | /* RX frame pointer autoincrement */ | 318 | /* RX frame pointer autoincrement */ |
319 | ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR); | 319 | ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR); |
320 | 320 | ||
321 | /* RX 2 kb high watermark */ | 321 | /* RX 2 kb high watermark */ |
322 | ks8842_write16(adapter, 0, 0x1000, REG_QRFCR); | 322 | ks8842_write16(adapter, 0, 0x1000, REG_QRFCR); |
323 | 323 | ||
324 | /* aggressive back off in half duplex */ | 324 | /* aggressive back off in half duplex */ |
325 | ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1); | 325 | ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1); |
326 | 326 | ||
327 | /* enable no excessive collison drop */ | 327 | /* enable no excessive collison drop */ |
328 | ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2); | 328 | ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2); |
329 | 329 | ||
330 | /* Enable port 1 force flow control / back pressure / transmit / recv */ | 330 | /* Enable port 1 force flow control / back pressure / transmit / recv */ |
331 | ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2); | 331 | ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2); |
332 | 332 | ||
333 | /* restart port auto-negotiation */ | 333 | /* restart port auto-negotiation */ |
334 | ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4); | 334 | ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4); |
335 | 335 | ||
336 | /* Enable the transmitter */ | 336 | /* Enable the transmitter */ |
337 | ks8842_enable_tx(adapter); | 337 | ks8842_enable_tx(adapter); |
338 | 338 | ||
339 | /* Enable the receiver */ | 339 | /* Enable the receiver */ |
340 | ks8842_enable_rx(adapter); | 340 | ks8842_enable_rx(adapter); |
341 | 341 | ||
342 | /* clear all interrupts */ | 342 | /* clear all interrupts */ |
343 | ks8842_write16(adapter, 18, 0xffff, REG_ISR); | 343 | ks8842_write16(adapter, 18, 0xffff, REG_ISR); |
344 | 344 | ||
345 | /* enable interrupts */ | 345 | /* enable interrupts */ |
346 | if (KS8842_USE_DMA(adapter)) { | 346 | if (KS8842_USE_DMA(adapter)) { |
347 | /* When running in DMA Mode the RX interrupt is not enabled in | 347 | /* When running in DMA Mode the RX interrupt is not enabled in |
348 | timberdale because RX data is received by DMA callbacks | 348 | timberdale because RX data is received by DMA callbacks |
349 | it must still be enabled in the KS8842 because it indicates | 349 | it must still be enabled in the KS8842 because it indicates |
350 | to timberdale when there is RX data for it's DMA FIFOs */ | 350 | to timberdale when there is RX data for it's DMA FIFOs */ |
351 | iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER); | 351 | iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER); |
352 | ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); | 352 | ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); |
353 | } else { | 353 | } else { |
354 | if (!(adapter->conf_flags & MICREL_KS884X)) | 354 | if (!(adapter->conf_flags & MICREL_KS884X)) |
355 | iowrite16(ENABLED_IRQS, | 355 | iowrite16(ENABLED_IRQS, |
356 | adapter->hw_addr + REG_TIMB_IER); | 356 | adapter->hw_addr + REG_TIMB_IER); |
357 | ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); | 357 | ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); |
358 | } | 358 | } |
359 | /* enable the switch */ | 359 | /* enable the switch */ |
360 | ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); | 360 | ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); |
361 | } | 361 | } |
362 | 362 | ||
363 | static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest) | 363 | static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest) |
364 | { | 364 | { |
365 | int i; | 365 | int i; |
366 | u16 mac; | 366 | u16 mac; |
367 | 367 | ||
368 | for (i = 0; i < ETH_ALEN; i++) | 368 | for (i = 0; i < ETH_ALEN; i++) |
369 | dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); | 369 | dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); |
370 | 370 | ||
371 | if (adapter->conf_flags & MICREL_KS884X) { | 371 | if (adapter->conf_flags & MICREL_KS884X) { |
372 | /* | 372 | /* |
373 | the sequence of saving mac addr between MAC and Switch is | 373 | the sequence of saving mac addr between MAC and Switch is |
374 | different. | 374 | different. |
375 | */ | 375 | */ |
376 | 376 | ||
377 | mac = ks8842_read16(adapter, 2, REG_MARL); | 377 | mac = ks8842_read16(adapter, 2, REG_MARL); |
378 | ks8842_write16(adapter, 39, mac, REG_MACAR3); | 378 | ks8842_write16(adapter, 39, mac, REG_MACAR3); |
379 | mac = ks8842_read16(adapter, 2, REG_MARM); | 379 | mac = ks8842_read16(adapter, 2, REG_MARM); |
380 | ks8842_write16(adapter, 39, mac, REG_MACAR2); | 380 | ks8842_write16(adapter, 39, mac, REG_MACAR2); |
381 | mac = ks8842_read16(adapter, 2, REG_MARH); | 381 | mac = ks8842_read16(adapter, 2, REG_MARH); |
382 | ks8842_write16(adapter, 39, mac, REG_MACAR1); | 382 | ks8842_write16(adapter, 39, mac, REG_MACAR1); |
383 | } else { | 383 | } else { |
384 | 384 | ||
385 | /* make sure the switch port uses the same MAC as the QMU */ | 385 | /* make sure the switch port uses the same MAC as the QMU */ |
386 | mac = ks8842_read16(adapter, 2, REG_MARL); | 386 | mac = ks8842_read16(adapter, 2, REG_MARL); |
387 | ks8842_write16(adapter, 39, mac, REG_MACAR1); | 387 | ks8842_write16(adapter, 39, mac, REG_MACAR1); |
388 | mac = ks8842_read16(adapter, 2, REG_MARM); | 388 | mac = ks8842_read16(adapter, 2, REG_MARM); |
389 | ks8842_write16(adapter, 39, mac, REG_MACAR2); | 389 | ks8842_write16(adapter, 39, mac, REG_MACAR2); |
390 | mac = ks8842_read16(adapter, 2, REG_MARH); | 390 | mac = ks8842_read16(adapter, 2, REG_MARH); |
391 | ks8842_write16(adapter, 39, mac, REG_MACAR3); | 391 | ks8842_write16(adapter, 39, mac, REG_MACAR3); |
392 | } | 392 | } |
393 | } | 393 | } |
394 | 394 | ||
395 | static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) | 395 | static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) |
396 | { | 396 | { |
397 | unsigned long flags; | 397 | unsigned long flags; |
398 | unsigned i; | 398 | unsigned i; |
399 | 399 | ||
400 | spin_lock_irqsave(&adapter->lock, flags); | 400 | spin_lock_irqsave(&adapter->lock, flags); |
401 | for (i = 0; i < ETH_ALEN; i++) { | 401 | for (i = 0; i < ETH_ALEN; i++) { |
402 | ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i); | 402 | ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i); |
403 | if (!(adapter->conf_flags & MICREL_KS884X)) | 403 | if (!(adapter->conf_flags & MICREL_KS884X)) |
404 | ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1], | 404 | ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1], |
405 | REG_MACAR1 + i); | 405 | REG_MACAR1 + i); |
406 | } | 406 | } |
407 | 407 | ||
408 | if (adapter->conf_flags & MICREL_KS884X) { | 408 | if (adapter->conf_flags & MICREL_KS884X) { |
409 | /* | 409 | /* |
410 | the sequence of saving mac addr between MAC and Switch is | 410 | the sequence of saving mac addr between MAC and Switch is |
411 | different. | 411 | different. |
412 | */ | 412 | */ |
413 | 413 | ||
414 | u16 mac; | 414 | u16 mac; |
415 | 415 | ||
416 | mac = ks8842_read16(adapter, 2, REG_MARL); | 416 | mac = ks8842_read16(adapter, 2, REG_MARL); |
417 | ks8842_write16(adapter, 39, mac, REG_MACAR3); | 417 | ks8842_write16(adapter, 39, mac, REG_MACAR3); |
418 | mac = ks8842_read16(adapter, 2, REG_MARM); | 418 | mac = ks8842_read16(adapter, 2, REG_MARM); |
419 | ks8842_write16(adapter, 39, mac, REG_MACAR2); | 419 | ks8842_write16(adapter, 39, mac, REG_MACAR2); |
420 | mac = ks8842_read16(adapter, 2, REG_MARH); | 420 | mac = ks8842_read16(adapter, 2, REG_MARH); |
421 | ks8842_write16(adapter, 39, mac, REG_MACAR1); | 421 | ks8842_write16(adapter, 39, mac, REG_MACAR1); |
422 | } | 422 | } |
423 | spin_unlock_irqrestore(&adapter->lock, flags); | 423 | spin_unlock_irqrestore(&adapter->lock, flags); |
424 | } | 424 | } |
425 | 425 | ||
426 | static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter) | 426 | static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter) |
427 | { | 427 | { |
428 | return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; | 428 | return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; |
429 | } | 429 | } |
430 | 430 | ||
431 | static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) | 431 | static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) |
432 | { | 432 | { |
433 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 433 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
434 | struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; | 434 | struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; |
435 | u8 *buf = ctl->buf; | 435 | u8 *buf = ctl->buf; |
436 | 436 | ||
437 | if (ctl->adesc) { | 437 | if (ctl->adesc) { |
438 | netdev_dbg(netdev, "%s: TX ongoing\n", __func__); | 438 | netdev_dbg(netdev, "%s: TX ongoing\n", __func__); |
439 | /* transfer ongoing */ | 439 | /* transfer ongoing */ |
440 | return NETDEV_TX_BUSY; | 440 | return NETDEV_TX_BUSY; |
441 | } | 441 | } |
442 | 442 | ||
443 | sg_dma_len(&ctl->sg) = skb->len + sizeof(u32); | 443 | sg_dma_len(&ctl->sg) = skb->len + sizeof(u32); |
444 | 444 | ||
445 | /* copy data to the TX buffer */ | 445 | /* copy data to the TX buffer */ |
446 | /* the control word, enable IRQ, port 1 and the length */ | 446 | /* the control word, enable IRQ, port 1 and the length */ |
447 | *buf++ = 0x00; | 447 | *buf++ = 0x00; |
448 | *buf++ = 0x01; /* Port 1 */ | 448 | *buf++ = 0x01; /* Port 1 */ |
449 | *buf++ = skb->len & 0xff; | 449 | *buf++ = skb->len & 0xff; |
450 | *buf++ = (skb->len >> 8) & 0xff; | 450 | *buf++ = (skb->len >> 8) & 0xff; |
451 | skb_copy_from_linear_data(skb, buf, skb->len); | 451 | skb_copy_from_linear_data(skb, buf, skb->len); |
452 | 452 | ||
453 | dma_sync_single_range_for_device(adapter->dev, | 453 | dma_sync_single_range_for_device(adapter->dev, |
454 | sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg), | 454 | sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg), |
455 | DMA_TO_DEVICE); | 455 | DMA_TO_DEVICE); |
456 | 456 | ||
457 | /* make sure the length is a multiple of 4 */ | 457 | /* make sure the length is a multiple of 4 */ |
458 | if (sg_dma_len(&ctl->sg) % 4) | 458 | if (sg_dma_len(&ctl->sg) % 4) |
459 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; | 459 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; |
460 | 460 | ||
461 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, | 461 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, |
462 | &ctl->sg, 1, DMA_TO_DEVICE, | 462 | &ctl->sg, 1, DMA_TO_DEVICE, |
463 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | 463 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); |
464 | if (!ctl->adesc) | 464 | if (!ctl->adesc) |
465 | return NETDEV_TX_BUSY; | 465 | return NETDEV_TX_BUSY; |
466 | 466 | ||
467 | ctl->adesc->callback_param = netdev; | 467 | ctl->adesc->callback_param = netdev; |
468 | ctl->adesc->callback = ks8842_dma_tx_cb; | 468 | ctl->adesc->callback = ks8842_dma_tx_cb; |
469 | ctl->adesc->tx_submit(ctl->adesc); | 469 | ctl->adesc->tx_submit(ctl->adesc); |
470 | 470 | ||
471 | netdev->stats.tx_bytes += skb->len; | 471 | netdev->stats.tx_bytes += skb->len; |
472 | 472 | ||
473 | dev_kfree_skb(skb); | 473 | dev_kfree_skb(skb); |
474 | 474 | ||
475 | return NETDEV_TX_OK; | 475 | return NETDEV_TX_OK; |
476 | } | 476 | } |
477 | 477 | ||
478 | static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) | 478 | static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) |
479 | { | 479 | { |
480 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 480 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
481 | int len = skb->len; | 481 | int len = skb->len; |
482 | 482 | ||
483 | netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n", | 483 | netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n", |
484 | __func__, skb->len, skb->head, skb->data, | 484 | __func__, skb->len, skb->head, skb->data, |
485 | skb_tail_pointer(skb), skb_end_pointer(skb)); | 485 | skb_tail_pointer(skb), skb_end_pointer(skb)); |
486 | 486 | ||
487 | /* check FIFO buffer space, we need space for CRC and command bits */ | 487 | /* check FIFO buffer space, we need space for CRC and command bits */ |
488 | if (ks8842_tx_fifo_space(adapter) < len + 8) | 488 | if (ks8842_tx_fifo_space(adapter) < len + 8) |
489 | return NETDEV_TX_BUSY; | 489 | return NETDEV_TX_BUSY; |
490 | 490 | ||
491 | if (adapter->conf_flags & KS884X_16BIT) { | 491 | if (adapter->conf_flags & KS884X_16BIT) { |
492 | u16 *ptr16 = (u16 *)skb->data; | 492 | u16 *ptr16 = (u16 *)skb->data; |
493 | ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO); | 493 | ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO); |
494 | ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI); | 494 | ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI); |
495 | netdev->stats.tx_bytes += len; | 495 | netdev->stats.tx_bytes += len; |
496 | 496 | ||
497 | /* copy buffer */ | 497 | /* copy buffer */ |
498 | while (len > 0) { | 498 | while (len > 0) { |
499 | iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO); | 499 | iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO); |
500 | iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI); | 500 | iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI); |
501 | len -= sizeof(u32); | 501 | len -= sizeof(u32); |
502 | } | 502 | } |
503 | } else { | 503 | } else { |
504 | 504 | ||
505 | u32 *ptr = (u32 *)skb->data; | 505 | u32 *ptr = (u32 *)skb->data; |
506 | u32 ctrl; | 506 | u32 ctrl; |
507 | /* the control word, enable IRQ, port 1 and the length */ | 507 | /* the control word, enable IRQ, port 1 and the length */ |
508 | ctrl = 0x8000 | 0x100 | (len << 16); | 508 | ctrl = 0x8000 | 0x100 | (len << 16); |
509 | ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO); | 509 | ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO); |
510 | 510 | ||
511 | netdev->stats.tx_bytes += len; | 511 | netdev->stats.tx_bytes += len; |
512 | 512 | ||
513 | /* copy buffer */ | 513 | /* copy buffer */ |
514 | while (len > 0) { | 514 | while (len > 0) { |
515 | iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO); | 515 | iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO); |
516 | len -= sizeof(u32); | 516 | len -= sizeof(u32); |
517 | ptr++; | 517 | ptr++; |
518 | } | 518 | } |
519 | } | 519 | } |
520 | 520 | ||
521 | /* enqueue packet */ | 521 | /* enqueue packet */ |
522 | ks8842_write16(adapter, 17, 1, REG_TXQCR); | 522 | ks8842_write16(adapter, 17, 1, REG_TXQCR); |
523 | 523 | ||
524 | dev_kfree_skb(skb); | 524 | dev_kfree_skb(skb); |
525 | 525 | ||
526 | return NETDEV_TX_OK; | 526 | return NETDEV_TX_OK; |
527 | } | 527 | } |
528 | 528 | ||
529 | static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status) | 529 | static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status) |
530 | { | 530 | { |
531 | netdev_dbg(netdev, "RX error, status: %x\n", status); | 531 | netdev_dbg(netdev, "RX error, status: %x\n", status); |
532 | 532 | ||
533 | netdev->stats.rx_errors++; | 533 | netdev->stats.rx_errors++; |
534 | if (status & RXSR_TOO_LONG) | 534 | if (status & RXSR_TOO_LONG) |
535 | netdev->stats.rx_length_errors++; | 535 | netdev->stats.rx_length_errors++; |
536 | if (status & RXSR_CRC_ERROR) | 536 | if (status & RXSR_CRC_ERROR) |
537 | netdev->stats.rx_crc_errors++; | 537 | netdev->stats.rx_crc_errors++; |
538 | if (status & RXSR_RUNT) | 538 | if (status & RXSR_RUNT) |
539 | netdev->stats.rx_frame_errors++; | 539 | netdev->stats.rx_frame_errors++; |
540 | } | 540 | } |
541 | 541 | ||
542 | static void ks8842_update_rx_counters(struct net_device *netdev, u32 status, | 542 | static void ks8842_update_rx_counters(struct net_device *netdev, u32 status, |
543 | int len) | 543 | int len) |
544 | { | 544 | { |
545 | netdev_dbg(netdev, "RX packet, len: %d\n", len); | 545 | netdev_dbg(netdev, "RX packet, len: %d\n", len); |
546 | 546 | ||
547 | netdev->stats.rx_packets++; | 547 | netdev->stats.rx_packets++; |
548 | netdev->stats.rx_bytes += len; | 548 | netdev->stats.rx_bytes += len; |
549 | if (status & RXSR_MULTICAST) | 549 | if (status & RXSR_MULTICAST) |
550 | netdev->stats.multicast++; | 550 | netdev->stats.multicast++; |
551 | } | 551 | } |
552 | 552 | ||
553 | static int __ks8842_start_new_rx_dma(struct net_device *netdev) | 553 | static int __ks8842_start_new_rx_dma(struct net_device *netdev) |
554 | { | 554 | { |
555 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 555 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
556 | struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; | 556 | struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; |
557 | struct scatterlist *sg = &ctl->sg; | 557 | struct scatterlist *sg = &ctl->sg; |
558 | int err; | 558 | int err; |
559 | 559 | ||
560 | ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE); | 560 | ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE); |
561 | if (ctl->skb) { | 561 | if (ctl->skb) { |
562 | sg_init_table(sg, 1); | 562 | sg_init_table(sg, 1); |
563 | sg_dma_address(sg) = dma_map_single(adapter->dev, | 563 | sg_dma_address(sg) = dma_map_single(adapter->dev, |
564 | ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); | 564 | ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); |
565 | err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); | 565 | err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); |
566 | if (unlikely(err)) { | 566 | if (unlikely(err)) { |
567 | sg_dma_address(sg) = 0; | 567 | sg_dma_address(sg) = 0; |
568 | goto out; | 568 | goto out; |
569 | } | 569 | } |
570 | 570 | ||
571 | sg_dma_len(sg) = DMA_BUFFER_SIZE; | 571 | sg_dma_len(sg) = DMA_BUFFER_SIZE; |
572 | 572 | ||
573 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, | 573 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, |
574 | sg, 1, DMA_FROM_DEVICE, | 574 | sg, 1, DMA_FROM_DEVICE, |
575 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | 575 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); |
576 | 576 | ||
577 | if (!ctl->adesc) | 577 | if (!ctl->adesc) |
578 | goto out; | 578 | goto out; |
579 | 579 | ||
580 | ctl->adesc->callback_param = netdev; | 580 | ctl->adesc->callback_param = netdev; |
581 | ctl->adesc->callback = ks8842_dma_rx_cb; | 581 | ctl->adesc->callback = ks8842_dma_rx_cb; |
582 | ctl->adesc->tx_submit(ctl->adesc); | 582 | ctl->adesc->tx_submit(ctl->adesc); |
583 | } else { | 583 | } else { |
584 | err = -ENOMEM; | 584 | err = -ENOMEM; |
585 | sg_dma_address(sg) = 0; | 585 | sg_dma_address(sg) = 0; |
586 | goto out; | 586 | goto out; |
587 | } | 587 | } |
588 | 588 | ||
589 | return err; | 589 | return err; |
590 | out: | 590 | out: |
591 | if (sg_dma_address(sg)) | 591 | if (sg_dma_address(sg)) |
592 | dma_unmap_single(adapter->dev, sg_dma_address(sg), | 592 | dma_unmap_single(adapter->dev, sg_dma_address(sg), |
593 | DMA_BUFFER_SIZE, DMA_FROM_DEVICE); | 593 | DMA_BUFFER_SIZE, DMA_FROM_DEVICE); |
594 | sg_dma_address(sg) = 0; | 594 | sg_dma_address(sg) = 0; |
595 | if (ctl->skb) | 595 | if (ctl->skb) |
596 | dev_kfree_skb(ctl->skb); | 596 | dev_kfree_skb(ctl->skb); |
597 | 597 | ||
598 | ctl->skb = NULL; | 598 | ctl->skb = NULL; |
599 | 599 | ||
600 | printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err); | 600 | printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err); |
601 | return err; | 601 | return err; |
602 | } | 602 | } |
603 | 603 | ||
604 | static void ks8842_rx_frame_dma_tasklet(unsigned long arg) | 604 | static void ks8842_rx_frame_dma_tasklet(unsigned long arg) |
605 | { | 605 | { |
606 | struct net_device *netdev = (struct net_device *)arg; | 606 | struct net_device *netdev = (struct net_device *)arg; |
607 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 607 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
608 | struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; | 608 | struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; |
609 | struct sk_buff *skb = ctl->skb; | 609 | struct sk_buff *skb = ctl->skb; |
610 | dma_addr_t addr = sg_dma_address(&ctl->sg); | 610 | dma_addr_t addr = sg_dma_address(&ctl->sg); |
611 | u32 status; | 611 | u32 status; |
612 | 612 | ||
613 | ctl->adesc = NULL; | 613 | ctl->adesc = NULL; |
614 | 614 | ||
615 | /* kick next transfer going */ | 615 | /* kick next transfer going */ |
616 | __ks8842_start_new_rx_dma(netdev); | 616 | __ks8842_start_new_rx_dma(netdev); |
617 | 617 | ||
618 | /* now handle the data we got */ | 618 | /* now handle the data we got */ |
619 | dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); | 619 | dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); |
620 | 620 | ||
621 | status = *((u32 *)skb->data); | 621 | status = *((u32 *)skb->data); |
622 | 622 | ||
623 | netdev_dbg(netdev, "%s - rx_data: status: %x\n", | 623 | netdev_dbg(netdev, "%s - rx_data: status: %x\n", |
624 | __func__, status & 0xffff); | 624 | __func__, status & 0xffff); |
625 | 625 | ||
626 | /* check the status */ | 626 | /* check the status */ |
627 | if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { | 627 | if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { |
628 | int len = (status >> 16) & 0x7ff; | 628 | int len = (status >> 16) & 0x7ff; |
629 | 629 | ||
630 | ks8842_update_rx_counters(netdev, status, len); | 630 | ks8842_update_rx_counters(netdev, status, len); |
631 | 631 | ||
632 | /* reserve 4 bytes which is the status word */ | 632 | /* reserve 4 bytes which is the status word */ |
633 | skb_reserve(skb, 4); | 633 | skb_reserve(skb, 4); |
634 | skb_put(skb, len); | 634 | skb_put(skb, len); |
635 | 635 | ||
636 | skb->protocol = eth_type_trans(skb, netdev); | 636 | skb->protocol = eth_type_trans(skb, netdev); |
637 | netif_rx(skb); | 637 | netif_rx(skb); |
638 | } else { | 638 | } else { |
639 | ks8842_update_rx_err_counters(netdev, status); | 639 | ks8842_update_rx_err_counters(netdev, status); |
640 | dev_kfree_skb(skb); | 640 | dev_kfree_skb(skb); |
641 | } | 641 | } |
642 | } | 642 | } |
643 | 643 | ||
644 | static void ks8842_rx_frame(struct net_device *netdev, | 644 | static void ks8842_rx_frame(struct net_device *netdev, |
645 | struct ks8842_adapter *adapter) | 645 | struct ks8842_adapter *adapter) |
646 | { | 646 | { |
647 | u32 status; | 647 | u32 status; |
648 | int len; | 648 | int len; |
649 | 649 | ||
650 | if (adapter->conf_flags & KS884X_16BIT) { | 650 | if (adapter->conf_flags & KS884X_16BIT) { |
651 | status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO); | 651 | status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO); |
652 | len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI); | 652 | len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI); |
653 | netdev_dbg(netdev, "%s - rx_data: status: %x\n", | 653 | netdev_dbg(netdev, "%s - rx_data: status: %x\n", |
654 | __func__, status); | 654 | __func__, status); |
655 | } else { | 655 | } else { |
656 | status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO); | 656 | status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO); |
657 | len = (status >> 16) & 0x7ff; | 657 | len = (status >> 16) & 0x7ff; |
658 | status &= 0xffff; | 658 | status &= 0xffff; |
659 | netdev_dbg(netdev, "%s - rx_data: status: %x\n", | 659 | netdev_dbg(netdev, "%s - rx_data: status: %x\n", |
660 | __func__, status); | 660 | __func__, status); |
661 | } | 661 | } |
662 | 662 | ||
663 | /* check the status */ | 663 | /* check the status */ |
664 | if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { | 664 | if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { |
665 | struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); | 665 | struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3); |
666 | 666 | ||
667 | if (skb) { | 667 | if (skb) { |
668 | 668 | ||
669 | ks8842_update_rx_counters(netdev, status, len); | 669 | ks8842_update_rx_counters(netdev, status, len); |
670 | 670 | ||
671 | if (adapter->conf_flags & KS884X_16BIT) { | 671 | if (adapter->conf_flags & KS884X_16BIT) { |
672 | u16 *data16 = (u16 *)skb_put(skb, len); | 672 | u16 *data16 = (u16 *)skb_put(skb, len); |
673 | ks8842_select_bank(adapter, 17); | 673 | ks8842_select_bank(adapter, 17); |
674 | while (len > 0) { | 674 | while (len > 0) { |
675 | *data16++ = ioread16(adapter->hw_addr + | 675 | *data16++ = ioread16(adapter->hw_addr + |
676 | REG_QMU_DATA_LO); | 676 | REG_QMU_DATA_LO); |
677 | *data16++ = ioread16(adapter->hw_addr + | 677 | *data16++ = ioread16(adapter->hw_addr + |
678 | REG_QMU_DATA_HI); | 678 | REG_QMU_DATA_HI); |
679 | len -= sizeof(u32); | 679 | len -= sizeof(u32); |
680 | } | 680 | } |
681 | } else { | 681 | } else { |
682 | u32 *data = (u32 *)skb_put(skb, len); | 682 | u32 *data = (u32 *)skb_put(skb, len); |
683 | 683 | ||
684 | ks8842_select_bank(adapter, 17); | 684 | ks8842_select_bank(adapter, 17); |
685 | while (len > 0) { | 685 | while (len > 0) { |
686 | *data++ = ioread32(adapter->hw_addr + | 686 | *data++ = ioread32(adapter->hw_addr + |
687 | REG_QMU_DATA_LO); | 687 | REG_QMU_DATA_LO); |
688 | len -= sizeof(u32); | 688 | len -= sizeof(u32); |
689 | } | 689 | } |
690 | } | 690 | } |
691 | skb->protocol = eth_type_trans(skb, netdev); | 691 | skb->protocol = eth_type_trans(skb, netdev); |
692 | netif_rx(skb); | 692 | netif_rx(skb); |
693 | } else | 693 | } else |
694 | netdev->stats.rx_dropped++; | 694 | netdev->stats.rx_dropped++; |
695 | } else | 695 | } else |
696 | ks8842_update_rx_err_counters(netdev, status); | 696 | ks8842_update_rx_err_counters(netdev, status); |
697 | 697 | ||
698 | /* set high watermark to 3K */ | 698 | /* set high watermark to 3K */ |
699 | ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); | 699 | ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); |
700 | 700 | ||
701 | /* release the frame */ | 701 | /* release the frame */ |
702 | ks8842_write16(adapter, 17, 0x01, REG_RXQCR); | 702 | ks8842_write16(adapter, 17, 0x01, REG_RXQCR); |
703 | 703 | ||
704 | /* set high watermark to 2K */ | 704 | /* set high watermark to 2K */ |
705 | ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR); | 705 | ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR); |
706 | } | 706 | } |
707 | 707 | ||
708 | void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) | 708 | void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) |
709 | { | 709 | { |
710 | u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; | 710 | u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; |
711 | netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data); | 711 | netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data); |
712 | while (rx_data) { | 712 | while (rx_data) { |
713 | ks8842_rx_frame(netdev, adapter); | 713 | ks8842_rx_frame(netdev, adapter); |
714 | rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; | 714 | rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; |
715 | } | 715 | } |
716 | } | 716 | } |
717 | 717 | ||
718 | void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) | 718 | void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) |
719 | { | 719 | { |
720 | u16 sr = ks8842_read16(adapter, 16, REG_TXSR); | 720 | u16 sr = ks8842_read16(adapter, 16, REG_TXSR); |
721 | netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr); | 721 | netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr); |
722 | netdev->stats.tx_packets++; | 722 | netdev->stats.tx_packets++; |
723 | if (netif_queue_stopped(netdev)) | 723 | if (netif_queue_stopped(netdev)) |
724 | netif_wake_queue(netdev); | 724 | netif_wake_queue(netdev); |
725 | } | 725 | } |
726 | 726 | ||
727 | void ks8842_handle_rx_overrun(struct net_device *netdev, | 727 | void ks8842_handle_rx_overrun(struct net_device *netdev, |
728 | struct ks8842_adapter *adapter) | 728 | struct ks8842_adapter *adapter) |
729 | { | 729 | { |
730 | netdev_dbg(netdev, "%s: entry\n", __func__); | 730 | netdev_dbg(netdev, "%s: entry\n", __func__); |
731 | netdev->stats.rx_errors++; | 731 | netdev->stats.rx_errors++; |
732 | netdev->stats.rx_fifo_errors++; | 732 | netdev->stats.rx_fifo_errors++; |
733 | } | 733 | } |
734 | 734 | ||
735 | void ks8842_tasklet(unsigned long arg) | 735 | void ks8842_tasklet(unsigned long arg) |
736 | { | 736 | { |
737 | struct net_device *netdev = (struct net_device *)arg; | 737 | struct net_device *netdev = (struct net_device *)arg; |
738 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 738 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
739 | u16 isr; | 739 | u16 isr; |
740 | unsigned long flags; | 740 | unsigned long flags; |
741 | u16 entry_bank; | 741 | u16 entry_bank; |
742 | 742 | ||
743 | /* read current bank to be able to set it back */ | 743 | /* read current bank to be able to set it back */ |
744 | spin_lock_irqsave(&adapter->lock, flags); | 744 | spin_lock_irqsave(&adapter->lock, flags); |
745 | entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); | 745 | entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); |
746 | spin_unlock_irqrestore(&adapter->lock, flags); | 746 | spin_unlock_irqrestore(&adapter->lock, flags); |
747 | 747 | ||
748 | isr = ks8842_read16(adapter, 18, REG_ISR); | 748 | isr = ks8842_read16(adapter, 18, REG_ISR); |
749 | netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); | 749 | netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); |
750 | 750 | ||
751 | /* when running in DMA mode, do not ack RX interrupts, it is handled | 751 | /* when running in DMA mode, do not ack RX interrupts, it is handled |
752 | internally by timberdale, otherwise it's DMA FIFO:s would stop | 752 | internally by timberdale, otherwise it's DMA FIFO:s would stop |
753 | */ | 753 | */ |
754 | if (KS8842_USE_DMA(adapter)) | 754 | if (KS8842_USE_DMA(adapter)) |
755 | isr &= ~IRQ_RX; | 755 | isr &= ~IRQ_RX; |
756 | 756 | ||
757 | /* Ack */ | 757 | /* Ack */ |
758 | ks8842_write16(adapter, 18, isr, REG_ISR); | 758 | ks8842_write16(adapter, 18, isr, REG_ISR); |
759 | 759 | ||
760 | if (!(adapter->conf_flags & MICREL_KS884X)) | 760 | if (!(adapter->conf_flags & MICREL_KS884X)) |
761 | /* Ack in the timberdale IP as well */ | 761 | /* Ack in the timberdale IP as well */ |
762 | iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR); | 762 | iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR); |
763 | 763 | ||
764 | if (!netif_running(netdev)) | 764 | if (!netif_running(netdev)) |
765 | return; | 765 | return; |
766 | 766 | ||
767 | if (isr & IRQ_LINK_CHANGE) | 767 | if (isr & IRQ_LINK_CHANGE) |
768 | ks8842_update_link_status(netdev, adapter); | 768 | ks8842_update_link_status(netdev, adapter); |
769 | 769 | ||
770 | /* should not get IRQ_RX when running DMA mode */ | 770 | /* should not get IRQ_RX when running DMA mode */ |
771 | if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter)) | 771 | if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter)) |
772 | ks8842_handle_rx(netdev, adapter); | 772 | ks8842_handle_rx(netdev, adapter); |
773 | 773 | ||
774 | /* should only happen when in PIO mode */ | 774 | /* should only happen when in PIO mode */ |
775 | if (isr & IRQ_TX) | 775 | if (isr & IRQ_TX) |
776 | ks8842_handle_tx(netdev, adapter); | 776 | ks8842_handle_tx(netdev, adapter); |
777 | 777 | ||
778 | if (isr & IRQ_RX_OVERRUN) | 778 | if (isr & IRQ_RX_OVERRUN) |
779 | ks8842_handle_rx_overrun(netdev, adapter); | 779 | ks8842_handle_rx_overrun(netdev, adapter); |
780 | 780 | ||
781 | if (isr & IRQ_TX_STOPPED) { | 781 | if (isr & IRQ_TX_STOPPED) { |
782 | ks8842_disable_tx(adapter); | 782 | ks8842_disable_tx(adapter); |
783 | ks8842_enable_tx(adapter); | 783 | ks8842_enable_tx(adapter); |
784 | } | 784 | } |
785 | 785 | ||
786 | if (isr & IRQ_RX_STOPPED) { | 786 | if (isr & IRQ_RX_STOPPED) { |
787 | ks8842_disable_rx(adapter); | 787 | ks8842_disable_rx(adapter); |
788 | ks8842_enable_rx(adapter); | 788 | ks8842_enable_rx(adapter); |
789 | } | 789 | } |
790 | 790 | ||
791 | /* re-enable interrupts, put back the bank selection register */ | 791 | /* re-enable interrupts, put back the bank selection register */ |
792 | spin_lock_irqsave(&adapter->lock, flags); | 792 | spin_lock_irqsave(&adapter->lock, flags); |
793 | if (KS8842_USE_DMA(adapter)) | 793 | if (KS8842_USE_DMA(adapter)) |
794 | ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); | 794 | ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); |
795 | else | 795 | else |
796 | ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); | 796 | ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); |
797 | iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); | 797 | iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); |
798 | 798 | ||
799 | /* Make sure timberdale continues DMA operations, they are stopped while | 799 | /* Make sure timberdale continues DMA operations, they are stopped while |
800 | we are handling the ks8842 because we might change bank */ | 800 | we are handling the ks8842 because we might change bank */ |
801 | if (KS8842_USE_DMA(adapter)) | 801 | if (KS8842_USE_DMA(adapter)) |
802 | ks8842_resume_dma(adapter); | 802 | ks8842_resume_dma(adapter); |
803 | 803 | ||
804 | spin_unlock_irqrestore(&adapter->lock, flags); | 804 | spin_unlock_irqrestore(&adapter->lock, flags); |
805 | } | 805 | } |
806 | 806 | ||
807 | static irqreturn_t ks8842_irq(int irq, void *devid) | 807 | static irqreturn_t ks8842_irq(int irq, void *devid) |
808 | { | 808 | { |
809 | struct net_device *netdev = devid; | 809 | struct net_device *netdev = devid; |
810 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 810 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
811 | u16 isr; | 811 | u16 isr; |
812 | u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); | 812 | u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); |
813 | irqreturn_t ret = IRQ_NONE; | 813 | irqreturn_t ret = IRQ_NONE; |
814 | 814 | ||
815 | isr = ks8842_read16(adapter, 18, REG_ISR); | 815 | isr = ks8842_read16(adapter, 18, REG_ISR); |
816 | netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); | 816 | netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); |
817 | 817 | ||
818 | if (isr) { | 818 | if (isr) { |
819 | if (KS8842_USE_DMA(adapter)) | 819 | if (KS8842_USE_DMA(adapter)) |
820 | /* disable all but RX IRQ, since the FPGA relies on it*/ | 820 | /* disable all but RX IRQ, since the FPGA relies on it*/ |
821 | ks8842_write16(adapter, 18, IRQ_RX, REG_IER); | 821 | ks8842_write16(adapter, 18, IRQ_RX, REG_IER); |
822 | else | 822 | else |
823 | /* disable IRQ */ | 823 | /* disable IRQ */ |
824 | ks8842_write16(adapter, 18, 0x00, REG_IER); | 824 | ks8842_write16(adapter, 18, 0x00, REG_IER); |
825 | 825 | ||
826 | /* schedule tasklet */ | 826 | /* schedule tasklet */ |
827 | tasklet_schedule(&adapter->tasklet); | 827 | tasklet_schedule(&adapter->tasklet); |
828 | 828 | ||
829 | ret = IRQ_HANDLED; | 829 | ret = IRQ_HANDLED; |
830 | } | 830 | } |
831 | 831 | ||
832 | iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); | 832 | iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); |
833 | 833 | ||
834 | /* After an interrupt, tell timberdale to continue DMA operations. | 834 | /* After an interrupt, tell timberdale to continue DMA operations. |
835 | DMA is disabled while we are handling the ks8842 because we might | 835 | DMA is disabled while we are handling the ks8842 because we might |
836 | change bank */ | 836 | change bank */ |
837 | ks8842_resume_dma(adapter); | 837 | ks8842_resume_dma(adapter); |
838 | 838 | ||
839 | return ret; | 839 | return ret; |
840 | } | 840 | } |
841 | 841 | ||
842 | static void ks8842_dma_rx_cb(void *data) | 842 | static void ks8842_dma_rx_cb(void *data) |
843 | { | 843 | { |
844 | struct net_device *netdev = data; | 844 | struct net_device *netdev = data; |
845 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 845 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
846 | 846 | ||
847 | netdev_dbg(netdev, "RX DMA finished\n"); | 847 | netdev_dbg(netdev, "RX DMA finished\n"); |
848 | /* schedule tasklet */ | 848 | /* schedule tasklet */ |
849 | if (adapter->dma_rx.adesc) | 849 | if (adapter->dma_rx.adesc) |
850 | tasklet_schedule(&adapter->dma_rx.tasklet); | 850 | tasklet_schedule(&adapter->dma_rx.tasklet); |
851 | } | 851 | } |
852 | 852 | ||
853 | static void ks8842_dma_tx_cb(void *data) | 853 | static void ks8842_dma_tx_cb(void *data) |
854 | { | 854 | { |
855 | struct net_device *netdev = data; | 855 | struct net_device *netdev = data; |
856 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 856 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
857 | struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; | 857 | struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; |
858 | 858 | ||
859 | netdev_dbg(netdev, "TX DMA finished\n"); | 859 | netdev_dbg(netdev, "TX DMA finished\n"); |
860 | 860 | ||
861 | if (!ctl->adesc) | 861 | if (!ctl->adesc) |
862 | return; | 862 | return; |
863 | 863 | ||
864 | netdev->stats.tx_packets++; | 864 | netdev->stats.tx_packets++; |
865 | ctl->adesc = NULL; | 865 | ctl->adesc = NULL; |
866 | 866 | ||
867 | if (netif_queue_stopped(netdev)) | 867 | if (netif_queue_stopped(netdev)) |
868 | netif_wake_queue(netdev); | 868 | netif_wake_queue(netdev); |
869 | } | 869 | } |
870 | 870 | ||
871 | static void ks8842_stop_dma(struct ks8842_adapter *adapter) | 871 | static void ks8842_stop_dma(struct ks8842_adapter *adapter) |
872 | { | 872 | { |
873 | struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; | 873 | struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; |
874 | struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; | 874 | struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; |
875 | 875 | ||
876 | tx_ctl->adesc = NULL; | 876 | tx_ctl->adesc = NULL; |
877 | if (tx_ctl->chan) | 877 | if (tx_ctl->chan) |
878 | tx_ctl->chan->device->device_control(tx_ctl->chan, | 878 | tx_ctl->chan->device->device_control(tx_ctl->chan, |
879 | DMA_TERMINATE_ALL, 0); | 879 | DMA_TERMINATE_ALL, 0); |
880 | 880 | ||
881 | rx_ctl->adesc = NULL; | 881 | rx_ctl->adesc = NULL; |
882 | if (rx_ctl->chan) | 882 | if (rx_ctl->chan) |
883 | rx_ctl->chan->device->device_control(rx_ctl->chan, | 883 | rx_ctl->chan->device->device_control(rx_ctl->chan, |
884 | DMA_TERMINATE_ALL, 0); | 884 | DMA_TERMINATE_ALL, 0); |
885 | 885 | ||
886 | if (sg_dma_address(&rx_ctl->sg)) | 886 | if (sg_dma_address(&rx_ctl->sg)) |
887 | dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), | 887 | dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), |
888 | DMA_BUFFER_SIZE, DMA_FROM_DEVICE); | 888 | DMA_BUFFER_SIZE, DMA_FROM_DEVICE); |
889 | sg_dma_address(&rx_ctl->sg) = 0; | 889 | sg_dma_address(&rx_ctl->sg) = 0; |
890 | 890 | ||
891 | dev_kfree_skb(rx_ctl->skb); | 891 | dev_kfree_skb(rx_ctl->skb); |
892 | rx_ctl->skb = NULL; | 892 | rx_ctl->skb = NULL; |
893 | } | 893 | } |
894 | 894 | ||
895 | static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter) | 895 | static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter) |
896 | { | 896 | { |
897 | struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; | 897 | struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; |
898 | struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; | 898 | struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; |
899 | 899 | ||
900 | ks8842_stop_dma(adapter); | 900 | ks8842_stop_dma(adapter); |
901 | 901 | ||
902 | if (tx_ctl->chan) | 902 | if (tx_ctl->chan) |
903 | dma_release_channel(tx_ctl->chan); | 903 | dma_release_channel(tx_ctl->chan); |
904 | tx_ctl->chan = NULL; | 904 | tx_ctl->chan = NULL; |
905 | 905 | ||
906 | if (rx_ctl->chan) | 906 | if (rx_ctl->chan) |
907 | dma_release_channel(rx_ctl->chan); | 907 | dma_release_channel(rx_ctl->chan); |
908 | rx_ctl->chan = NULL; | 908 | rx_ctl->chan = NULL; |
909 | 909 | ||
910 | tasklet_kill(&rx_ctl->tasklet); | 910 | tasklet_kill(&rx_ctl->tasklet); |
911 | 911 | ||
912 | if (sg_dma_address(&tx_ctl->sg)) | 912 | if (sg_dma_address(&tx_ctl->sg)) |
913 | dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg), | 913 | dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg), |
914 | DMA_BUFFER_SIZE, DMA_TO_DEVICE); | 914 | DMA_BUFFER_SIZE, DMA_TO_DEVICE); |
915 | sg_dma_address(&tx_ctl->sg) = 0; | 915 | sg_dma_address(&tx_ctl->sg) = 0; |
916 | 916 | ||
917 | kfree(tx_ctl->buf); | 917 | kfree(tx_ctl->buf); |
918 | tx_ctl->buf = NULL; | 918 | tx_ctl->buf = NULL; |
919 | } | 919 | } |
920 | 920 | ||
921 | static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param) | 921 | static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param) |
922 | { | 922 | { |
923 | return chan->chan_id == (long)filter_param; | 923 | return chan->chan_id == (long)filter_param; |
924 | } | 924 | } |
925 | 925 | ||
926 | static int ks8842_alloc_dma_bufs(struct net_device *netdev) | 926 | static int ks8842_alloc_dma_bufs(struct net_device *netdev) |
927 | { | 927 | { |
928 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 928 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
929 | struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; | 929 | struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; |
930 | struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; | 930 | struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; |
931 | int err; | 931 | int err; |
932 | 932 | ||
933 | dma_cap_mask_t mask; | 933 | dma_cap_mask_t mask; |
934 | 934 | ||
935 | dma_cap_zero(mask); | 935 | dma_cap_zero(mask); |
936 | dma_cap_set(DMA_SLAVE, mask); | 936 | dma_cap_set(DMA_SLAVE, mask); |
937 | dma_cap_set(DMA_PRIVATE, mask); | 937 | dma_cap_set(DMA_PRIVATE, mask); |
938 | 938 | ||
939 | sg_init_table(&tx_ctl->sg, 1); | 939 | sg_init_table(&tx_ctl->sg, 1); |
940 | 940 | ||
941 | tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, | 941 | tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, |
942 | (void *)(long)tx_ctl->channel); | 942 | (void *)(long)tx_ctl->channel); |
943 | if (!tx_ctl->chan) { | 943 | if (!tx_ctl->chan) { |
944 | err = -ENODEV; | 944 | err = -ENODEV; |
945 | goto err; | 945 | goto err; |
946 | } | 946 | } |
947 | 947 | ||
948 | /* allocate DMA buffer */ | 948 | /* allocate DMA buffer */ |
949 | tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL); | 949 | tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL); |
950 | if (!tx_ctl->buf) { | 950 | if (!tx_ctl->buf) { |
951 | err = -ENOMEM; | 951 | err = -ENOMEM; |
952 | goto err; | 952 | goto err; |
953 | } | 953 | } |
954 | 954 | ||
955 | sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, | 955 | sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, |
956 | tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); | 956 | tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); |
957 | err = dma_mapping_error(adapter->dev, | 957 | err = dma_mapping_error(adapter->dev, |
958 | sg_dma_address(&tx_ctl->sg)); | 958 | sg_dma_address(&tx_ctl->sg)); |
959 | if (err) { | 959 | if (err) { |
960 | sg_dma_address(&tx_ctl->sg) = 0; | 960 | sg_dma_address(&tx_ctl->sg) = 0; |
961 | goto err; | 961 | goto err; |
962 | } | 962 | } |
963 | 963 | ||
964 | rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, | 964 | rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, |
965 | (void *)(long)rx_ctl->channel); | 965 | (void *)(long)rx_ctl->channel); |
966 | if (!rx_ctl->chan) { | 966 | if (!rx_ctl->chan) { |
967 | err = -ENODEV; | 967 | err = -ENODEV; |
968 | goto err; | 968 | goto err; |
969 | } | 969 | } |
970 | 970 | ||
971 | tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet, | 971 | tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet, |
972 | (unsigned long)netdev); | 972 | (unsigned long)netdev); |
973 | 973 | ||
974 | return 0; | 974 | return 0; |
975 | err: | 975 | err: |
976 | ks8842_dealloc_dma_bufs(adapter); | 976 | ks8842_dealloc_dma_bufs(adapter); |
977 | return err; | 977 | return err; |
978 | } | 978 | } |
979 | 979 | ||
980 | /* Netdevice operations */ | 980 | /* Netdevice operations */ |
981 | 981 | ||
982 | static int ks8842_open(struct net_device *netdev) | 982 | static int ks8842_open(struct net_device *netdev) |
983 | { | 983 | { |
984 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 984 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
985 | int err; | 985 | int err; |
986 | 986 | ||
987 | netdev_dbg(netdev, "%s - entry\n", __func__); | 987 | netdev_dbg(netdev, "%s - entry\n", __func__); |
988 | 988 | ||
989 | if (KS8842_USE_DMA(adapter)) { | 989 | if (KS8842_USE_DMA(adapter)) { |
990 | err = ks8842_alloc_dma_bufs(netdev); | 990 | err = ks8842_alloc_dma_bufs(netdev); |
991 | 991 | ||
992 | if (!err) { | 992 | if (!err) { |
993 | /* start RX dma */ | 993 | /* start RX dma */ |
994 | err = __ks8842_start_new_rx_dma(netdev); | 994 | err = __ks8842_start_new_rx_dma(netdev); |
995 | if (err) | 995 | if (err) |
996 | ks8842_dealloc_dma_bufs(adapter); | 996 | ks8842_dealloc_dma_bufs(adapter); |
997 | } | 997 | } |
998 | 998 | ||
999 | if (err) { | 999 | if (err) { |
1000 | printk(KERN_WARNING DRV_NAME | 1000 | printk(KERN_WARNING DRV_NAME |
1001 | ": Failed to initiate DMA, running PIO\n"); | 1001 | ": Failed to initiate DMA, running PIO\n"); |
1002 | ks8842_dealloc_dma_bufs(adapter); | 1002 | ks8842_dealloc_dma_bufs(adapter); |
1003 | adapter->dma_rx.channel = -1; | 1003 | adapter->dma_rx.channel = -1; |
1004 | adapter->dma_tx.channel = -1; | 1004 | adapter->dma_tx.channel = -1; |
1005 | } | 1005 | } |
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | /* reset the HW */ | 1008 | /* reset the HW */ |
1009 | ks8842_reset_hw(adapter); | 1009 | ks8842_reset_hw(adapter); |
1010 | 1010 | ||
1011 | ks8842_write_mac_addr(adapter, netdev->dev_addr); | 1011 | ks8842_write_mac_addr(adapter, netdev->dev_addr); |
1012 | 1012 | ||
1013 | ks8842_update_link_status(netdev, adapter); | 1013 | ks8842_update_link_status(netdev, adapter); |
1014 | 1014 | ||
1015 | err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, | 1015 | err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, |
1016 | netdev); | 1016 | netdev); |
1017 | if (err) { | 1017 | if (err) { |
1018 | pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); | 1018 | pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); |
1019 | return err; | 1019 | return err; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | return 0; | 1022 | return 0; |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | static int ks8842_close(struct net_device *netdev) | 1025 | static int ks8842_close(struct net_device *netdev) |
1026 | { | 1026 | { |
1027 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 1027 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
1028 | 1028 | ||
1029 | netdev_dbg(netdev, "%s - entry\n", __func__); | 1029 | netdev_dbg(netdev, "%s - entry\n", __func__); |
1030 | 1030 | ||
1031 | cancel_work_sync(&adapter->timeout_work); | 1031 | cancel_work_sync(&adapter->timeout_work); |
1032 | 1032 | ||
1033 | if (KS8842_USE_DMA(adapter)) | 1033 | if (KS8842_USE_DMA(adapter)) |
1034 | ks8842_dealloc_dma_bufs(adapter); | 1034 | ks8842_dealloc_dma_bufs(adapter); |
1035 | 1035 | ||
1036 | /* free the irq */ | 1036 | /* free the irq */ |
1037 | free_irq(adapter->irq, netdev); | 1037 | free_irq(adapter->irq, netdev); |
1038 | 1038 | ||
1039 | /* disable the switch */ | 1039 | /* disable the switch */ |
1040 | ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE); | 1040 | ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE); |
1041 | 1041 | ||
1042 | return 0; | 1042 | return 0; |
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb, | 1045 | static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb, |
1046 | struct net_device *netdev) | 1046 | struct net_device *netdev) |
1047 | { | 1047 | { |
1048 | int ret; | 1048 | int ret; |
1049 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 1049 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
1050 | 1050 | ||
1051 | netdev_dbg(netdev, "%s: entry\n", __func__); | 1051 | netdev_dbg(netdev, "%s: entry\n", __func__); |
1052 | 1052 | ||
1053 | if (KS8842_USE_DMA(adapter)) { | 1053 | if (KS8842_USE_DMA(adapter)) { |
1054 | unsigned long flags; | 1054 | unsigned long flags; |
1055 | ret = ks8842_tx_frame_dma(skb, netdev); | 1055 | ret = ks8842_tx_frame_dma(skb, netdev); |
1056 | /* for now only allow one transfer at the time */ | 1056 | /* for now only allow one transfer at the time */ |
1057 | spin_lock_irqsave(&adapter->lock, flags); | 1057 | spin_lock_irqsave(&adapter->lock, flags); |
1058 | if (adapter->dma_tx.adesc) | 1058 | if (adapter->dma_tx.adesc) |
1059 | netif_stop_queue(netdev); | 1059 | netif_stop_queue(netdev); |
1060 | spin_unlock_irqrestore(&adapter->lock, flags); | 1060 | spin_unlock_irqrestore(&adapter->lock, flags); |
1061 | return ret; | 1061 | return ret; |
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | ret = ks8842_tx_frame(skb, netdev); | 1064 | ret = ks8842_tx_frame(skb, netdev); |
1065 | 1065 | ||
1066 | if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8) | 1066 | if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8) |
1067 | netif_stop_queue(netdev); | 1067 | netif_stop_queue(netdev); |
1068 | 1068 | ||
1069 | return ret; | 1069 | return ret; |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | static int ks8842_set_mac(struct net_device *netdev, void *p) | 1072 | static int ks8842_set_mac(struct net_device *netdev, void *p) |
1073 | { | 1073 | { |
1074 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 1074 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
1075 | struct sockaddr *addr = p; | 1075 | struct sockaddr *addr = p; |
1076 | char *mac = (u8 *)addr->sa_data; | 1076 | char *mac = (u8 *)addr->sa_data; |
1077 | 1077 | ||
1078 | netdev_dbg(netdev, "%s: entry\n", __func__); | 1078 | netdev_dbg(netdev, "%s: entry\n", __func__); |
1079 | 1079 | ||
1080 | if (!is_valid_ether_addr(addr->sa_data)) | 1080 | if (!is_valid_ether_addr(addr->sa_data)) |
1081 | return -EADDRNOTAVAIL; | 1081 | return -EADDRNOTAVAIL; |
1082 | 1082 | ||
1083 | memcpy(netdev->dev_addr, mac, netdev->addr_len); | 1083 | memcpy(netdev->dev_addr, mac, netdev->addr_len); |
1084 | 1084 | ||
1085 | ks8842_write_mac_addr(adapter, mac); | 1085 | ks8842_write_mac_addr(adapter, mac); |
1086 | return 0; | 1086 | return 0; |
1087 | } | 1087 | } |
1088 | 1088 | ||
1089 | static void ks8842_tx_timeout_work(struct work_struct *work) | 1089 | static void ks8842_tx_timeout_work(struct work_struct *work) |
1090 | { | 1090 | { |
1091 | struct ks8842_adapter *adapter = | 1091 | struct ks8842_adapter *adapter = |
1092 | container_of(work, struct ks8842_adapter, timeout_work); | 1092 | container_of(work, struct ks8842_adapter, timeout_work); |
1093 | struct net_device *netdev = adapter->netdev; | 1093 | struct net_device *netdev = adapter->netdev; |
1094 | unsigned long flags; | 1094 | unsigned long flags; |
1095 | 1095 | ||
1096 | netdev_dbg(netdev, "%s: entry\n", __func__); | 1096 | netdev_dbg(netdev, "%s: entry\n", __func__); |
1097 | 1097 | ||
1098 | spin_lock_irqsave(&adapter->lock, flags); | 1098 | spin_lock_irqsave(&adapter->lock, flags); |
1099 | 1099 | ||
1100 | if (KS8842_USE_DMA(adapter)) | 1100 | if (KS8842_USE_DMA(adapter)) |
1101 | ks8842_stop_dma(adapter); | 1101 | ks8842_stop_dma(adapter); |
1102 | 1102 | ||
1103 | /* disable interrupts */ | 1103 | /* disable interrupts */ |
1104 | ks8842_write16(adapter, 18, 0, REG_IER); | 1104 | ks8842_write16(adapter, 18, 0, REG_IER); |
1105 | ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); | 1105 | ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); |
1106 | 1106 | ||
1107 | netif_stop_queue(netdev); | 1107 | netif_stop_queue(netdev); |
1108 | 1108 | ||
1109 | spin_unlock_irqrestore(&adapter->lock, flags); | 1109 | spin_unlock_irqrestore(&adapter->lock, flags); |
1110 | 1110 | ||
1111 | ks8842_reset_hw(adapter); | 1111 | ks8842_reset_hw(adapter); |
1112 | 1112 | ||
1113 | ks8842_write_mac_addr(adapter, netdev->dev_addr); | 1113 | ks8842_write_mac_addr(adapter, netdev->dev_addr); |
1114 | 1114 | ||
1115 | ks8842_update_link_status(netdev, adapter); | 1115 | ks8842_update_link_status(netdev, adapter); |
1116 | 1116 | ||
1117 | if (KS8842_USE_DMA(adapter)) | 1117 | if (KS8842_USE_DMA(adapter)) |
1118 | __ks8842_start_new_rx_dma(netdev); | 1118 | __ks8842_start_new_rx_dma(netdev); |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | static void ks8842_tx_timeout(struct net_device *netdev) | 1121 | static void ks8842_tx_timeout(struct net_device *netdev) |
1122 | { | 1122 | { |
1123 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 1123 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
1124 | 1124 | ||
1125 | netdev_dbg(netdev, "%s: entry\n", __func__); | 1125 | netdev_dbg(netdev, "%s: entry\n", __func__); |
1126 | 1126 | ||
1127 | schedule_work(&adapter->timeout_work); | 1127 | schedule_work(&adapter->timeout_work); |
1128 | } | 1128 | } |
1129 | 1129 | ||
1130 | static const struct net_device_ops ks8842_netdev_ops = { | 1130 | static const struct net_device_ops ks8842_netdev_ops = { |
1131 | .ndo_open = ks8842_open, | 1131 | .ndo_open = ks8842_open, |
1132 | .ndo_stop = ks8842_close, | 1132 | .ndo_stop = ks8842_close, |
1133 | .ndo_start_xmit = ks8842_xmit_frame, | 1133 | .ndo_start_xmit = ks8842_xmit_frame, |
1134 | .ndo_set_mac_address = ks8842_set_mac, | 1134 | .ndo_set_mac_address = ks8842_set_mac, |
1135 | .ndo_tx_timeout = ks8842_tx_timeout, | 1135 | .ndo_tx_timeout = ks8842_tx_timeout, |
1136 | .ndo_validate_addr = eth_validate_addr | 1136 | .ndo_validate_addr = eth_validate_addr |
1137 | }; | 1137 | }; |
1138 | 1138 | ||
1139 | static const struct ethtool_ops ks8842_ethtool_ops = { | 1139 | static const struct ethtool_ops ks8842_ethtool_ops = { |
1140 | .get_link = ethtool_op_get_link, | 1140 | .get_link = ethtool_op_get_link, |
1141 | }; | 1141 | }; |
1142 | 1142 | ||
1143 | static int __devinit ks8842_probe(struct platform_device *pdev) | 1143 | static int __devinit ks8842_probe(struct platform_device *pdev) |
1144 | { | 1144 | { |
1145 | int err = -ENOMEM; | 1145 | int err = -ENOMEM; |
1146 | struct resource *iomem; | 1146 | struct resource *iomem; |
1147 | struct net_device *netdev; | 1147 | struct net_device *netdev; |
1148 | struct ks8842_adapter *adapter; | 1148 | struct ks8842_adapter *adapter; |
1149 | struct ks8842_platform_data *pdata = mfd_get_data(pdev); | 1149 | struct ks8842_platform_data *pdata = mfd_get_data(pdev); |
1150 | u16 id; | 1150 | u16 id; |
1151 | unsigned i; | 1151 | unsigned i; |
1152 | 1152 | ||
1153 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1153 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1154 | if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) | 1154 | if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) |
1155 | goto err_mem_region; | 1155 | goto err_mem_region; |
1156 | 1156 | ||
1157 | netdev = alloc_etherdev(sizeof(struct ks8842_adapter)); | 1157 | netdev = alloc_etherdev(sizeof(struct ks8842_adapter)); |
1158 | if (!netdev) | 1158 | if (!netdev) |
1159 | goto err_alloc_etherdev; | 1159 | goto err_alloc_etherdev; |
1160 | 1160 | ||
1161 | SET_NETDEV_DEV(netdev, &pdev->dev); | 1161 | SET_NETDEV_DEV(netdev, &pdev->dev); |
1162 | 1162 | ||
1163 | adapter = netdev_priv(netdev); | 1163 | adapter = netdev_priv(netdev); |
1164 | adapter->netdev = netdev; | 1164 | adapter->netdev = netdev; |
1165 | INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work); | 1165 | INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work); |
1166 | adapter->hw_addr = ioremap(iomem->start, resource_size(iomem)); | 1166 | adapter->hw_addr = ioremap(iomem->start, resource_size(iomem)); |
1167 | adapter->conf_flags = iomem->flags; | 1167 | adapter->conf_flags = iomem->flags; |
1168 | 1168 | ||
1169 | if (!adapter->hw_addr) | 1169 | if (!adapter->hw_addr) |
1170 | goto err_ioremap; | 1170 | goto err_ioremap; |
1171 | 1171 | ||
1172 | adapter->irq = platform_get_irq(pdev, 0); | 1172 | adapter->irq = platform_get_irq(pdev, 0); |
1173 | if (adapter->irq < 0) { | 1173 | if (adapter->irq < 0) { |
1174 | err = adapter->irq; | 1174 | err = adapter->irq; |
1175 | goto err_get_irq; | 1175 | goto err_get_irq; |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev; | 1178 | adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev; |
1179 | 1179 | ||
1180 | /* DMA is only supported when accessed via timberdale */ | 1180 | /* DMA is only supported when accessed via timberdale */ |
1181 | if (!(adapter->conf_flags & MICREL_KS884X) && pdata && | 1181 | if (!(adapter->conf_flags & MICREL_KS884X) && pdata && |
1182 | (pdata->tx_dma_channel != -1) && | 1182 | (pdata->tx_dma_channel != -1) && |
1183 | (pdata->rx_dma_channel != -1)) { | 1183 | (pdata->rx_dma_channel != -1)) { |
1184 | adapter->dma_rx.channel = pdata->rx_dma_channel; | 1184 | adapter->dma_rx.channel = pdata->rx_dma_channel; |
1185 | adapter->dma_tx.channel = pdata->tx_dma_channel; | 1185 | adapter->dma_tx.channel = pdata->tx_dma_channel; |
1186 | } else { | 1186 | } else { |
1187 | adapter->dma_rx.channel = -1; | 1187 | adapter->dma_rx.channel = -1; |
1188 | adapter->dma_tx.channel = -1; | 1188 | adapter->dma_tx.channel = -1; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); | 1191 | tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); |
1192 | spin_lock_init(&adapter->lock); | 1192 | spin_lock_init(&adapter->lock); |
1193 | 1193 | ||
1194 | netdev->netdev_ops = &ks8842_netdev_ops; | 1194 | netdev->netdev_ops = &ks8842_netdev_ops; |
1195 | netdev->ethtool_ops = &ks8842_ethtool_ops; | 1195 | netdev->ethtool_ops = &ks8842_ethtool_ops; |
1196 | 1196 | ||
1197 | /* Check if a mac address was given */ | 1197 | /* Check if a mac address was given */ |
1198 | i = netdev->addr_len; | 1198 | i = netdev->addr_len; |
1199 | if (pdata) { | 1199 | if (pdata) { |
1200 | for (i = 0; i < netdev->addr_len; i++) | 1200 | for (i = 0; i < netdev->addr_len; i++) |
1201 | if (pdata->macaddr[i] != 0) | 1201 | if (pdata->macaddr[i] != 0) |
1202 | break; | 1202 | break; |
1203 | 1203 | ||
1204 | if (i < netdev->addr_len) | 1204 | if (i < netdev->addr_len) |
1205 | /* an address was passed, use it */ | 1205 | /* an address was passed, use it */ |
1206 | memcpy(netdev->dev_addr, pdata->macaddr, | 1206 | memcpy(netdev->dev_addr, pdata->macaddr, |
1207 | netdev->addr_len); | 1207 | netdev->addr_len); |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | if (i == netdev->addr_len) { | 1210 | if (i == netdev->addr_len) { |
1211 | ks8842_read_mac_addr(adapter, netdev->dev_addr); | 1211 | ks8842_read_mac_addr(adapter, netdev->dev_addr); |
1212 | 1212 | ||
1213 | if (!is_valid_ether_addr(netdev->dev_addr)) | 1213 | if (!is_valid_ether_addr(netdev->dev_addr)) |
1214 | random_ether_addr(netdev->dev_addr); | 1214 | random_ether_addr(netdev->dev_addr); |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE); | 1217 | id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE); |
1218 | 1218 | ||
1219 | strcpy(netdev->name, "eth%d"); | 1219 | strcpy(netdev->name, "eth%d"); |
1220 | err = register_netdev(netdev); | 1220 | err = register_netdev(netdev); |
1221 | if (err) | 1221 | if (err) |
1222 | goto err_register; | 1222 | goto err_register; |
1223 | 1223 | ||
1224 | platform_set_drvdata(pdev, netdev); | 1224 | platform_set_drvdata(pdev, netdev); |
1225 | 1225 | ||
1226 | pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", | 1226 | pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", |
1227 | (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); | 1227 | (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); |
1228 | 1228 | ||
1229 | return 0; | 1229 | return 0; |
1230 | 1230 | ||
1231 | err_register: | 1231 | err_register: |
1232 | err_get_irq: | 1232 | err_get_irq: |
1233 | iounmap(adapter->hw_addr); | 1233 | iounmap(adapter->hw_addr); |
1234 | err_ioremap: | 1234 | err_ioremap: |
1235 | free_netdev(netdev); | 1235 | free_netdev(netdev); |
1236 | err_alloc_etherdev: | 1236 | err_alloc_etherdev: |
1237 | release_mem_region(iomem->start, resource_size(iomem)); | 1237 | release_mem_region(iomem->start, resource_size(iomem)); |
1238 | err_mem_region: | 1238 | err_mem_region: |
1239 | return err; | 1239 | return err; |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | static int __devexit ks8842_remove(struct platform_device *pdev) | 1242 | static int __devexit ks8842_remove(struct platform_device *pdev) |
1243 | { | 1243 | { |
1244 | struct net_device *netdev = platform_get_drvdata(pdev); | 1244 | struct net_device *netdev = platform_get_drvdata(pdev); |
1245 | struct ks8842_adapter *adapter = netdev_priv(netdev); | 1245 | struct ks8842_adapter *adapter = netdev_priv(netdev); |
1246 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1246 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1247 | 1247 | ||
1248 | unregister_netdev(netdev); | 1248 | unregister_netdev(netdev); |
1249 | tasklet_kill(&adapter->tasklet); | 1249 | tasklet_kill(&adapter->tasklet); |
1250 | iounmap(adapter->hw_addr); | 1250 | iounmap(adapter->hw_addr); |
1251 | free_netdev(netdev); | 1251 | free_netdev(netdev); |
1252 | release_mem_region(iomem->start, resource_size(iomem)); | 1252 | release_mem_region(iomem->start, resource_size(iomem)); |
1253 | platform_set_drvdata(pdev, NULL); | 1253 | platform_set_drvdata(pdev, NULL); |
1254 | return 0; | 1254 | return 0; |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | 1257 | ||
1258 | static struct platform_driver ks8842_platform_driver = { | 1258 | static struct platform_driver ks8842_platform_driver = { |
1259 | .driver = { | 1259 | .driver = { |
1260 | .name = DRV_NAME, | 1260 | .name = DRV_NAME, |
1261 | .owner = THIS_MODULE, | 1261 | .owner = THIS_MODULE, |
1262 | }, | 1262 | }, |
1263 | .probe = ks8842_probe, | 1263 | .probe = ks8842_probe, |
1264 | .remove = ks8842_remove, | 1264 | .remove = ks8842_remove, |
1265 | }; | 1265 | }; |
1266 | 1266 | ||
1267 | static int __init ks8842_init(void) | 1267 | static int __init ks8842_init(void) |
1268 | { | 1268 | { |
1269 | return platform_driver_register(&ks8842_platform_driver); | 1269 | return platform_driver_register(&ks8842_platform_driver); |
1270 | } | 1270 | } |
1271 | 1271 | ||
1272 | static void __exit ks8842_exit(void) | 1272 | static void __exit ks8842_exit(void) |
1273 | { | 1273 | { |
1274 | platform_driver_unregister(&ks8842_platform_driver); | 1274 | platform_driver_unregister(&ks8842_platform_driver); |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | module_init(ks8842_init); | 1277 | module_init(ks8842_init); |
1278 | module_exit(ks8842_exit); | 1278 | module_exit(ks8842_exit); |
1279 | 1279 | ||
1280 | MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver"); | 1280 | MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver"); |
1281 | MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); | 1281 | MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); |
1282 | MODULE_LICENSE("GPL v2"); | 1282 | MODULE_LICENSE("GPL v2"); |
1283 | MODULE_ALIAS("platform:ks8842"); | 1283 | MODULE_ALIAS("platform:ks8842"); |
1284 | 1284 | ||
1285 | 1285 |