Commit 23ecc4bde21f0ccb38f4b53cadde7fc5d67d68e3

Authored by Brian Hill
Committed by David S. Miller
1 parent 755fae0ac4

net: ll_temac: fix checksum offload logic

The current checksum offload code does not work and this corrects
that functionality. It also updates the interrupt coallescing
initialization so than there are fewer interrupts and performance
is increased.

Signed-off-by: Brian Hill <brian.hill@xilinx.com>
Signed-off-by: John Linn <john.linn@xilinx.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 2 changed files with 63 additions and 24 deletions Inline Diff

drivers/net/ll_temac.h
1 1
2 #ifndef XILINX_LL_TEMAC_H 2 #ifndef XILINX_LL_TEMAC_H
3 #define XILINX_LL_TEMAC_H 3 #define XILINX_LL_TEMAC_H
4 4
5 #include <linux/netdevice.h> 5 #include <linux/netdevice.h>
6 #include <linux/of.h> 6 #include <linux/of.h>
7 #include <linux/spinlock.h> 7 #include <linux/spinlock.h>
8 8
9 #ifdef CONFIG_PPC_DCR 9 #ifdef CONFIG_PPC_DCR
10 #include <asm/dcr.h> 10 #include <asm/dcr.h>
11 #include <asm/dcr-regs.h> 11 #include <asm/dcr-regs.h>
12 #endif 12 #endif
13 13
14 /* packet size info */ 14 /* packet size info */
15 #define XTE_HDR_SIZE 14 /* size of Ethernet header */ 15 #define XTE_HDR_SIZE 14 /* size of Ethernet header */
16 #define XTE_TRL_SIZE 4 /* size of Ethernet trailer (FCS) */ 16 #define XTE_TRL_SIZE 4 /* size of Ethernet trailer (FCS) */
17 #define XTE_JUMBO_MTU 9000 17 #define XTE_JUMBO_MTU 9000
18 #define XTE_MAX_JUMBO_FRAME_SIZE (XTE_JUMBO_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE) 18 #define XTE_MAX_JUMBO_FRAME_SIZE (XTE_JUMBO_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
19 19
20 /* Configuration options */ 20 /* Configuration options */
21 21
22 /* Accept all incoming packets. 22 /* Accept all incoming packets.
23 * This option defaults to disabled (cleared) */ 23 * This option defaults to disabled (cleared) */
24 #define XTE_OPTION_PROMISC (1 << 0) 24 #define XTE_OPTION_PROMISC (1 << 0)
25 /* Jumbo frame support for Tx & Rx. 25 /* Jumbo frame support for Tx & Rx.
26 * This option defaults to disabled (cleared) */ 26 * This option defaults to disabled (cleared) */
27 #define XTE_OPTION_JUMBO (1 << 1) 27 #define XTE_OPTION_JUMBO (1 << 1)
28 /* VLAN Rx & Tx frame support. 28 /* VLAN Rx & Tx frame support.
29 * This option defaults to disabled (cleared) */ 29 * This option defaults to disabled (cleared) */
30 #define XTE_OPTION_VLAN (1 << 2) 30 #define XTE_OPTION_VLAN (1 << 2)
31 /* Enable recognition of flow control frames on Rx 31 /* Enable recognition of flow control frames on Rx
32 * This option defaults to enabled (set) */ 32 * This option defaults to enabled (set) */
33 #define XTE_OPTION_FLOW_CONTROL (1 << 4) 33 #define XTE_OPTION_FLOW_CONTROL (1 << 4)
34 /* Strip FCS and PAD from incoming frames. 34 /* Strip FCS and PAD from incoming frames.
35 * Note: PAD from VLAN frames is not stripped. 35 * Note: PAD from VLAN frames is not stripped.
36 * This option defaults to disabled (set) */ 36 * This option defaults to disabled (set) */
37 #define XTE_OPTION_FCS_STRIP (1 << 5) 37 #define XTE_OPTION_FCS_STRIP (1 << 5)
38 /* Generate FCS field and add PAD automatically for outgoing frames. 38 /* Generate FCS field and add PAD automatically for outgoing frames.
39 * This option defaults to enabled (set) */ 39 * This option defaults to enabled (set) */
40 #define XTE_OPTION_FCS_INSERT (1 << 6) 40 #define XTE_OPTION_FCS_INSERT (1 << 6)
41 /* Enable Length/Type error checking for incoming frames. When this option is 41 /* Enable Length/Type error checking for incoming frames. When this option is
42 set, the MAC will filter frames that have a mismatched type/length field 42 set, the MAC will filter frames that have a mismatched type/length field
43 and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these 43 and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
44 types of frames are encountered. When this option is cleared, the MAC will 44 types of frames are encountered. When this option is cleared, the MAC will
45 allow these types of frames to be received. 45 allow these types of frames to be received.
46 This option defaults to enabled (set) */ 46 This option defaults to enabled (set) */
47 #define XTE_OPTION_LENTYPE_ERR (1 << 7) 47 #define XTE_OPTION_LENTYPE_ERR (1 << 7)
48 /* Enable the transmitter. 48 /* Enable the transmitter.
49 * This option defaults to enabled (set) */ 49 * This option defaults to enabled (set) */
50 #define XTE_OPTION_TXEN (1 << 11) 50 #define XTE_OPTION_TXEN (1 << 11)
51 /* Enable the receiver 51 /* Enable the receiver
52 * This option defaults to enabled (set) */ 52 * This option defaults to enabled (set) */
53 #define XTE_OPTION_RXEN (1 << 12) 53 #define XTE_OPTION_RXEN (1 << 12)
54 54
55 /* Default options set when device is initialized or reset */ 55 /* Default options set when device is initialized or reset */
56 #define XTE_OPTION_DEFAULTS \ 56 #define XTE_OPTION_DEFAULTS \
57 (XTE_OPTION_TXEN | \ 57 (XTE_OPTION_TXEN | \
58 XTE_OPTION_FLOW_CONTROL | \ 58 XTE_OPTION_FLOW_CONTROL | \
59 XTE_OPTION_RXEN) 59 XTE_OPTION_RXEN)
60 60
61 /* XPS_LL_TEMAC SDMA registers definition */ 61 /* XPS_LL_TEMAC SDMA registers definition */
62 62
63 #define TX_NXTDESC_PTR 0x00 /* r */ 63 #define TX_NXTDESC_PTR 0x00 /* r */
64 #define TX_CURBUF_ADDR 0x01 /* r */ 64 #define TX_CURBUF_ADDR 0x01 /* r */
65 #define TX_CURBUF_LENGTH 0x02 /* r */ 65 #define TX_CURBUF_LENGTH 0x02 /* r */
66 #define TX_CURDESC_PTR 0x03 /* rw */ 66 #define TX_CURDESC_PTR 0x03 /* rw */
67 #define TX_TAILDESC_PTR 0x04 /* rw */ 67 #define TX_TAILDESC_PTR 0x04 /* rw */
68 #define TX_CHNL_CTRL 0x05 /* rw */ 68 #define TX_CHNL_CTRL 0x05 /* rw */
69 /* 69 /*
70 0:7 24:31 IRQTimeout 70 0:7 24:31 IRQTimeout
71 8:15 16:23 IRQCount 71 8:15 16:23 IRQCount
72 16:20 11:15 Reserved 72 16:20 11:15 Reserved
73 21 10 0 73 21 10 0
74 22 9 UseIntOnEnd 74 22 9 UseIntOnEnd
75 23 8 LdIRQCnt 75 23 8 LdIRQCnt
76 24 7 IRQEn 76 24 7 IRQEn
77 25:28 3:6 Reserved 77 25:28 3:6 Reserved
78 29 2 IrqErrEn 78 29 2 IrqErrEn
79 30 1 IrqDlyEn 79 30 1 IrqDlyEn
80 31 0 IrqCoalEn 80 31 0 IrqCoalEn
81 */ 81 */
82 #define CHNL_CTRL_IRQ_IOE (1 << 9) 82 #define CHNL_CTRL_IRQ_IOE (1 << 9)
83 #define CHNL_CTRL_IRQ_EN (1 << 7) 83 #define CHNL_CTRL_IRQ_EN (1 << 7)
84 #define CHNL_CTRL_IRQ_ERR_EN (1 << 2) 84 #define CHNL_CTRL_IRQ_ERR_EN (1 << 2)
85 #define CHNL_CTRL_IRQ_DLY_EN (1 << 1) 85 #define CHNL_CTRL_IRQ_DLY_EN (1 << 1)
86 #define CHNL_CTRL_IRQ_COAL_EN (1 << 0) 86 #define CHNL_CTRL_IRQ_COAL_EN (1 << 0)
87 #define TX_IRQ_REG 0x06 /* rw */ 87 #define TX_IRQ_REG 0x06 /* rw */
88 /* 88 /*
89 0:7 24:31 DltTmrValue 89 0:7 24:31 DltTmrValue
90 8:15 16:23 ClscCntrValue 90 8:15 16:23 ClscCntrValue
91 16:17 14:15 Reserved 91 16:17 14:15 Reserved
92 18:21 10:13 ClscCnt 92 18:21 10:13 ClscCnt
93 22:23 8:9 DlyCnt 93 22:23 8:9 DlyCnt
94 24:28 3::7 Reserved 94 24:28 3::7 Reserved
95 29 2 ErrIrq 95 29 2 ErrIrq
96 30 1 DlyIrq 96 30 1 DlyIrq
97 31 0 CoalIrq 97 31 0 CoalIrq
98 */ 98 */
99 #define TX_CHNL_STS 0x07 /* r */ 99 #define TX_CHNL_STS 0x07 /* r */
100 /* 100 /*
101 0:9 22:31 Reserved 101 0:9 22:31 Reserved
102 10 21 TailPErr 102 10 21 TailPErr
103 11 20 CmpErr 103 11 20 CmpErr
104 12 19 AddrErr 104 12 19 AddrErr
105 13 18 NxtPErr 105 13 18 NxtPErr
106 14 17 CurPErr 106 14 17 CurPErr
107 15 16 BsyWr 107 15 16 BsyWr
108 16:23 8:15 Reserved 108 16:23 8:15 Reserved
109 24 7 Error 109 24 7 Error
110 25 6 IOE 110 25 6 IOE
111 26 5 SOE 111 26 5 SOE
112 27 4 Cmplt 112 27 4 Cmplt
113 28 3 SOP 113 28 3 SOP
114 29 2 EOP 114 29 2 EOP
115 30 1 EngBusy 115 30 1 EngBusy
116 31 0 Reserved 116 31 0 Reserved
117 */ 117 */
118 118
119 #define RX_NXTDESC_PTR 0x08 /* r */ 119 #define RX_NXTDESC_PTR 0x08 /* r */
120 #define RX_CURBUF_ADDR 0x09 /* r */ 120 #define RX_CURBUF_ADDR 0x09 /* r */
121 #define RX_CURBUF_LENGTH 0x0a /* r */ 121 #define RX_CURBUF_LENGTH 0x0a /* r */
122 #define RX_CURDESC_PTR 0x0b /* rw */ 122 #define RX_CURDESC_PTR 0x0b /* rw */
123 #define RX_TAILDESC_PTR 0x0c /* rw */ 123 #define RX_TAILDESC_PTR 0x0c /* rw */
124 #define RX_CHNL_CTRL 0x0d /* rw */ 124 #define RX_CHNL_CTRL 0x0d /* rw */
125 /* 125 /*
126 0:7 24:31 IRQTimeout 126 0:7 24:31 IRQTimeout
127 8:15 16:23 IRQCount 127 8:15 16:23 IRQCount
128 16:20 11:15 Reserved 128 16:20 11:15 Reserved
129 21 10 0 129 21 10 0
130 22 9 UseIntOnEnd 130 22 9 UseIntOnEnd
131 23 8 LdIRQCnt 131 23 8 LdIRQCnt
132 24 7 IRQEn 132 24 7 IRQEn
133 25:28 3:6 Reserved 133 25:28 3:6 Reserved
134 29 2 IrqErrEn 134 29 2 IrqErrEn
135 30 1 IrqDlyEn 135 30 1 IrqDlyEn
136 31 0 IrqCoalEn 136 31 0 IrqCoalEn
137 */ 137 */
138 #define RX_IRQ_REG 0x0e /* rw */ 138 #define RX_IRQ_REG 0x0e /* rw */
139 #define IRQ_COAL (1 << 0) 139 #define IRQ_COAL (1 << 0)
140 #define IRQ_DLY (1 << 1) 140 #define IRQ_DLY (1 << 1)
141 #define IRQ_ERR (1 << 2) 141 #define IRQ_ERR (1 << 2)
142 #define IRQ_DMAERR (1 << 7) /* this is not documented ??? */ 142 #define IRQ_DMAERR (1 << 7) /* this is not documented ??? */
143 /* 143 /*
144 0:7 24:31 DltTmrValue 144 0:7 24:31 DltTmrValue
145 8:15 16:23 ClscCntrValue 145 8:15 16:23 ClscCntrValue
146 16:17 14:15 Reserved 146 16:17 14:15 Reserved
147 18:21 10:13 ClscCnt 147 18:21 10:13 ClscCnt
148 22:23 8:9 DlyCnt 148 22:23 8:9 DlyCnt
149 24:28 3::7 Reserved 149 24:28 3::7 Reserved
150 */ 150 */
151 #define RX_CHNL_STS 0x0f /* r */ 151 #define RX_CHNL_STS 0x0f /* r */
152 #define CHNL_STS_ENGBUSY (1 << 1) 152 #define CHNL_STS_ENGBUSY (1 << 1)
153 #define CHNL_STS_EOP (1 << 2) 153 #define CHNL_STS_EOP (1 << 2)
154 #define CHNL_STS_SOP (1 << 3) 154 #define CHNL_STS_SOP (1 << 3)
155 #define CHNL_STS_CMPLT (1 << 4) 155 #define CHNL_STS_CMPLT (1 << 4)
156 #define CHNL_STS_SOE (1 << 5) 156 #define CHNL_STS_SOE (1 << 5)
157 #define CHNL_STS_IOE (1 << 6) 157 #define CHNL_STS_IOE (1 << 6)
158 #define CHNL_STS_ERR (1 << 7) 158 #define CHNL_STS_ERR (1 << 7)
159 159
160 #define CHNL_STS_BSYWR (1 << 16) 160 #define CHNL_STS_BSYWR (1 << 16)
161 #define CHNL_STS_CURPERR (1 << 17) 161 #define CHNL_STS_CURPERR (1 << 17)
162 #define CHNL_STS_NXTPERR (1 << 18) 162 #define CHNL_STS_NXTPERR (1 << 18)
163 #define CHNL_STS_ADDRERR (1 << 19) 163 #define CHNL_STS_ADDRERR (1 << 19)
164 #define CHNL_STS_CMPERR (1 << 20) 164 #define CHNL_STS_CMPERR (1 << 20)
165 #define CHNL_STS_TAILERR (1 << 21) 165 #define CHNL_STS_TAILERR (1 << 21)
166 /* 166 /*
167 0:9 22:31 Reserved 167 0:9 22:31 Reserved
168 10 21 TailPErr 168 10 21 TailPErr
169 11 20 CmpErr 169 11 20 CmpErr
170 12 19 AddrErr 170 12 19 AddrErr
171 13 18 NxtPErr 171 13 18 NxtPErr
172 14 17 CurPErr 172 14 17 CurPErr
173 15 16 BsyWr 173 15 16 BsyWr
174 16:23 8:15 Reserved 174 16:23 8:15 Reserved
175 24 7 Error 175 24 7 Error
176 25 6 IOE 176 25 6 IOE
177 26 5 SOE 177 26 5 SOE
178 27 4 Cmplt 178 27 4 Cmplt
179 28 3 SOP 179 28 3 SOP
180 29 2 EOP 180 29 2 EOP
181 30 1 EngBusy 181 30 1 EngBusy
182 31 0 Reserved 182 31 0 Reserved
183 */ 183 */
184 184
185 #define DMA_CONTROL_REG 0x10 /* rw */ 185 #define DMA_CONTROL_REG 0x10 /* rw */
186 #define DMA_CONTROL_RST (1 << 0) 186 #define DMA_CONTROL_RST (1 << 0)
187 #define DMA_TAIL_ENABLE (1 << 2) 187 #define DMA_TAIL_ENABLE (1 << 2)
188 188
189 /* XPS_LL_TEMAC direct registers definition */ 189 /* XPS_LL_TEMAC direct registers definition */
190 190
191 #define XTE_RAF0_OFFSET 0x00 191 #define XTE_RAF0_OFFSET 0x00
192 #define RAF0_RST (1 << 0) 192 #define RAF0_RST (1 << 0)
193 #define RAF0_MCSTREJ (1 << 1) 193 #define RAF0_MCSTREJ (1 << 1)
194 #define RAF0_BCSTREJ (1 << 2) 194 #define RAF0_BCSTREJ (1 << 2)
195 #define XTE_TPF0_OFFSET 0x04 195 #define XTE_TPF0_OFFSET 0x04
196 #define XTE_IFGP0_OFFSET 0x08 196 #define XTE_IFGP0_OFFSET 0x08
197 #define XTE_ISR0_OFFSET 0x0c 197 #define XTE_ISR0_OFFSET 0x0c
198 #define ISR0_HARDACSCMPLT (1 << 0) 198 #define ISR0_HARDACSCMPLT (1 << 0)
199 #define ISR0_AUTONEG (1 << 1) 199 #define ISR0_AUTONEG (1 << 1)
200 #define ISR0_RXCMPLT (1 << 2) 200 #define ISR0_RXCMPLT (1 << 2)
201 #define ISR0_RXREJ (1 << 3) 201 #define ISR0_RXREJ (1 << 3)
202 #define ISR0_RXFIFOOVR (1 << 4) 202 #define ISR0_RXFIFOOVR (1 << 4)
203 #define ISR0_TXCMPLT (1 << 5) 203 #define ISR0_TXCMPLT (1 << 5)
204 #define ISR0_RXDCMLCK (1 << 6) 204 #define ISR0_RXDCMLCK (1 << 6)
205 205
206 #define XTE_IPR0_OFFSET 0x10 206 #define XTE_IPR0_OFFSET 0x10
207 #define XTE_IER0_OFFSET 0x14 207 #define XTE_IER0_OFFSET 0x14
208 208
209 #define XTE_MSW0_OFFSET 0x20 209 #define XTE_MSW0_OFFSET 0x20
210 #define XTE_LSW0_OFFSET 0x24 210 #define XTE_LSW0_OFFSET 0x24
211 #define XTE_CTL0_OFFSET 0x28 211 #define XTE_CTL0_OFFSET 0x28
212 #define XTE_RDY0_OFFSET 0x2c 212 #define XTE_RDY0_OFFSET 0x2c
213 213
214 #define XTE_RSE_MIIM_RR_MASK 0x0002 214 #define XTE_RSE_MIIM_RR_MASK 0x0002
215 #define XTE_RSE_MIIM_WR_MASK 0x0004 215 #define XTE_RSE_MIIM_WR_MASK 0x0004
216 #define XTE_RSE_CFG_RR_MASK 0x0020 216 #define XTE_RSE_CFG_RR_MASK 0x0020
217 #define XTE_RSE_CFG_WR_MASK 0x0040 217 #define XTE_RSE_CFG_WR_MASK 0x0040
218 #define XTE_RDY0_HARD_ACS_RDY_MASK (0x10000) 218 #define XTE_RDY0_HARD_ACS_RDY_MASK (0x10000)
219 219
220 /* XPS_LL_TEMAC indirect registers offset definition */ 220 /* XPS_LL_TEMAC indirect registers offset definition */
221 221
222 #define XTE_RXC0_OFFSET 0x00000200 /* Rx configuration word 0 */ 222 #define XTE_RXC0_OFFSET 0x00000200 /* Rx configuration word 0 */
223 #define XTE_RXC1_OFFSET 0x00000240 /* Rx configuration word 1 */ 223 #define XTE_RXC1_OFFSET 0x00000240 /* Rx configuration word 1 */
224 #define XTE_RXC1_RXRST_MASK (1 << 31) /* Receiver reset */ 224 #define XTE_RXC1_RXRST_MASK (1 << 31) /* Receiver reset */
225 #define XTE_RXC1_RXJMBO_MASK (1 << 30) /* Jumbo frame enable */ 225 #define XTE_RXC1_RXJMBO_MASK (1 << 30) /* Jumbo frame enable */
226 #define XTE_RXC1_RXFCS_MASK (1 << 29) /* FCS not stripped */ 226 #define XTE_RXC1_RXFCS_MASK (1 << 29) /* FCS not stripped */
227 #define XTE_RXC1_RXEN_MASK (1 << 28) /* Receiver enable */ 227 #define XTE_RXC1_RXEN_MASK (1 << 28) /* Receiver enable */
228 #define XTE_RXC1_RXVLAN_MASK (1 << 27) /* VLAN enable */ 228 #define XTE_RXC1_RXVLAN_MASK (1 << 27) /* VLAN enable */
229 #define XTE_RXC1_RXHD_MASK (1 << 26) /* Half duplex */ 229 #define XTE_RXC1_RXHD_MASK (1 << 26) /* Half duplex */
230 #define XTE_RXC1_RXLT_MASK (1 << 25) /* Length/type check disable */ 230 #define XTE_RXC1_RXLT_MASK (1 << 25) /* Length/type check disable */
231 231
232 #define XTE_TXC_OFFSET 0x00000280 /* Tx configuration */ 232 #define XTE_TXC_OFFSET 0x00000280 /* Tx configuration */
233 #define XTE_TXC_TXRST_MASK (1 << 31) /* Transmitter reset */ 233 #define XTE_TXC_TXRST_MASK (1 << 31) /* Transmitter reset */
234 #define XTE_TXC_TXJMBO_MASK (1 << 30) /* Jumbo frame enable */ 234 #define XTE_TXC_TXJMBO_MASK (1 << 30) /* Jumbo frame enable */
235 #define XTE_TXC_TXFCS_MASK (1 << 29) /* Generate FCS */ 235 #define XTE_TXC_TXFCS_MASK (1 << 29) /* Generate FCS */
236 #define XTE_TXC_TXEN_MASK (1 << 28) /* Transmitter enable */ 236 #define XTE_TXC_TXEN_MASK (1 << 28) /* Transmitter enable */
237 #define XTE_TXC_TXVLAN_MASK (1 << 27) /* VLAN enable */ 237 #define XTE_TXC_TXVLAN_MASK (1 << 27) /* VLAN enable */
238 #define XTE_TXC_TXHD_MASK (1 << 26) /* Half duplex */ 238 #define XTE_TXC_TXHD_MASK (1 << 26) /* Half duplex */
239 239
240 #define XTE_FCC_OFFSET 0x000002C0 /* Flow control config */ 240 #define XTE_FCC_OFFSET 0x000002C0 /* Flow control config */
241 #define XTE_FCC_RXFLO_MASK (1 << 29) /* Rx flow control enable */ 241 #define XTE_FCC_RXFLO_MASK (1 << 29) /* Rx flow control enable */
242 #define XTE_FCC_TXFLO_MASK (1 << 30) /* Tx flow control enable */ 242 #define XTE_FCC_TXFLO_MASK (1 << 30) /* Tx flow control enable */
243 243
244 #define XTE_EMCFG_OFFSET 0x00000300 /* EMAC configuration */ 244 #define XTE_EMCFG_OFFSET 0x00000300 /* EMAC configuration */
245 #define XTE_EMCFG_LINKSPD_MASK 0xC0000000 /* Link speed */ 245 #define XTE_EMCFG_LINKSPD_MASK 0xC0000000 /* Link speed */
246 #define XTE_EMCFG_HOSTEN_MASK (1 << 26) /* Host interface enable */ 246 #define XTE_EMCFG_HOSTEN_MASK (1 << 26) /* Host interface enable */
247 #define XTE_EMCFG_LINKSPD_10 0x00000000 /* 10 Mbit LINKSPD_MASK */ 247 #define XTE_EMCFG_LINKSPD_10 0x00000000 /* 10 Mbit LINKSPD_MASK */
248 #define XTE_EMCFG_LINKSPD_100 (1 << 30) /* 100 Mbit LINKSPD_MASK */ 248 #define XTE_EMCFG_LINKSPD_100 (1 << 30) /* 100 Mbit LINKSPD_MASK */
249 #define XTE_EMCFG_LINKSPD_1000 (1 << 31) /* 1000 Mbit LINKSPD_MASK */ 249 #define XTE_EMCFG_LINKSPD_1000 (1 << 31) /* 1000 Mbit LINKSPD_MASK */
250 250
251 #define XTE_GMIC_OFFSET 0x00000320 /* RGMII/SGMII config */ 251 #define XTE_GMIC_OFFSET 0x00000320 /* RGMII/SGMII config */
252 #define XTE_MC_OFFSET 0x00000340 /* MDIO configuration */ 252 #define XTE_MC_OFFSET 0x00000340 /* MDIO configuration */
253 #define XTE_UAW0_OFFSET 0x00000380 /* Unicast address word 0 */ 253 #define XTE_UAW0_OFFSET 0x00000380 /* Unicast address word 0 */
254 #define XTE_UAW1_OFFSET 0x00000384 /* Unicast address word 1 */ 254 #define XTE_UAW1_OFFSET 0x00000384 /* Unicast address word 1 */
255 255
256 #define XTE_MAW0_OFFSET 0x00000388 /* Multicast addr word 0 */ 256 #define XTE_MAW0_OFFSET 0x00000388 /* Multicast addr word 0 */
257 #define XTE_MAW1_OFFSET 0x0000038C /* Multicast addr word 1 */ 257 #define XTE_MAW1_OFFSET 0x0000038C /* Multicast addr word 1 */
258 #define XTE_AFM_OFFSET 0x00000390 /* Promiscuous mode */ 258 #define XTE_AFM_OFFSET 0x00000390 /* Promiscuous mode */
259 #define XTE_AFM_EPPRM_MASK (1 << 31) /* Promiscuous mode enable */ 259 #define XTE_AFM_EPPRM_MASK (1 << 31) /* Promiscuous mode enable */
260 260
261 /* Interrupt Request status */ 261 /* Interrupt Request status */
262 #define XTE_TIS_OFFSET 0x000003A0 262 #define XTE_TIS_OFFSET 0x000003A0
263 #define TIS_FRIS (1 << 0) 263 #define TIS_FRIS (1 << 0)
264 #define TIS_MRIS (1 << 1) 264 #define TIS_MRIS (1 << 1)
265 #define TIS_MWIS (1 << 2) 265 #define TIS_MWIS (1 << 2)
266 #define TIS_ARIS (1 << 3) 266 #define TIS_ARIS (1 << 3)
267 #define TIS_AWIS (1 << 4) 267 #define TIS_AWIS (1 << 4)
268 #define TIS_CRIS (1 << 5) 268 #define TIS_CRIS (1 << 5)
269 #define TIS_CWIS (1 << 6) 269 #define TIS_CWIS (1 << 6)
270 270
271 #define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */ 271 #define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */
272 272
273 /** MII Mamagement Control register (MGTCR) */ 273 /** MII Mamagement Control register (MGTCR) */
274 #define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */ 274 #define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */
275 #define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */ 275 #define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */
276 276
277 #define CNTLREG_WRITE_ENABLE_MASK 0x8000 277 #define CNTLREG_WRITE_ENABLE_MASK 0x8000
278 #define CNTLREG_EMAC1SEL_MASK 0x0400 278 #define CNTLREG_EMAC1SEL_MASK 0x0400
279 #define CNTLREG_ADDRESSCODE_MASK 0x03ff 279 #define CNTLREG_ADDRESSCODE_MASK 0x03ff
280 280
281 /* CDMAC descriptor status bit definitions */ 281 /* CDMAC descriptor status bit definitions */
282 282
283 #define STS_CTRL_APP0_ERR (1 << 31) 283 #define STS_CTRL_APP0_ERR (1 << 31)
284 #define STS_CTRL_APP0_IRQONEND (1 << 30) 284 #define STS_CTRL_APP0_IRQONEND (1 << 30)
285 /* undoccumented */ 285 /* undoccumented */
286 #define STS_CTRL_APP0_STOPONEND (1 << 29) 286 #define STS_CTRL_APP0_STOPONEND (1 << 29)
287 #define STS_CTRL_APP0_CMPLT (1 << 28) 287 #define STS_CTRL_APP0_CMPLT (1 << 28)
288 #define STS_CTRL_APP0_SOP (1 << 27) 288 #define STS_CTRL_APP0_SOP (1 << 27)
289 #define STS_CTRL_APP0_EOP (1 << 26) 289 #define STS_CTRL_APP0_EOP (1 << 26)
290 #define STS_CTRL_APP0_ENGBUSY (1 << 25) 290 #define STS_CTRL_APP0_ENGBUSY (1 << 25)
291 /* undocumented */ 291 /* undocumented */
292 #define STS_CTRL_APP0_ENGRST (1 << 24) 292 #define STS_CTRL_APP0_ENGRST (1 << 24)
293 293
294 #define TX_CONTROL_CALC_CSUM_MASK 1 294 #define TX_CONTROL_CALC_CSUM_MASK 1
295 295
296 #define MULTICAST_CAM_TABLE_NUM 4 296 #define MULTICAST_CAM_TABLE_NUM 4
297 297
298 /* TEMAC Synthesis features */
299 #define TEMAC_FEATURE_RX_CSUM (1 << 0)
300 #define TEMAC_FEATURE_TX_CSUM (1 << 1)
301
298 /* TX/RX CURDESC_PTR points to first descriptor */ 302 /* TX/RX CURDESC_PTR points to first descriptor */
299 /* TX/RX TAILDESC_PTR points to last descriptor in linked list */ 303 /* TX/RX TAILDESC_PTR points to last descriptor in linked list */
300 304
301 /** 305 /**
302 * struct cdmac_bd - LocalLink buffer descriptor format 306 * struct cdmac_bd - LocalLink buffer descriptor format
303 * 307 *
304 * app0 bits: 308 * app0 bits:
305 * 0 Error 309 * 0 Error
306 * 1 IrqOnEnd generate an interrupt at completion of DMA op 310 * 1 IrqOnEnd generate an interrupt at completion of DMA op
307 * 2 reserved 311 * 2 reserved
308 * 3 completed Current descriptor completed 312 * 3 completed Current descriptor completed
309 * 4 SOP TX - marks first desc/ RX marks first desct 313 * 4 SOP TX - marks first desc/ RX marks first desct
310 * 5 EOP TX marks last desc/RX marks last desc 314 * 5 EOP TX marks last desc/RX marks last desc
311 * 6 EngBusy DMA is processing 315 * 6 EngBusy DMA is processing
312 * 7 reserved 316 * 7 reserved
313 * 8:31 application specific 317 * 8:31 application specific
314 */ 318 */
315 struct cdmac_bd { 319 struct cdmac_bd {
316 u32 next; /* Physical address of next buffer descriptor */ 320 u32 next; /* Physical address of next buffer descriptor */
317 u32 phys; 321 u32 phys;
318 u32 len; 322 u32 len;
319 u32 app0; 323 u32 app0;
320 u32 app1; /* TX start << 16 | insert */ 324 u32 app1; /* TX start << 16 | insert */
321 u32 app2; /* TX csum */ 325 u32 app2; /* TX csum */
322 u32 app3; 326 u32 app3;
323 u32 app4; /* skb for TX length for RX */ 327 u32 app4; /* skb for TX length for RX */
324 }; 328 };
325 329
326 struct temac_local { 330 struct temac_local {
327 struct net_device *ndev; 331 struct net_device *ndev;
328 struct device *dev; 332 struct device *dev;
329 333
330 /* Connection to PHY device */ 334 /* Connection to PHY device */
331 struct phy_device *phy_dev; /* Pointer to PHY device */ 335 struct phy_device *phy_dev; /* Pointer to PHY device */
332 struct device_node *phy_node; 336 struct device_node *phy_node;
333 337
334 /* MDIO bus data */ 338 /* MDIO bus data */
335 struct mii_bus *mii_bus; /* MII bus reference */ 339 struct mii_bus *mii_bus; /* MII bus reference */
336 int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ 340 int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
337 341
338 /* IO registers, dma functions and IRQs */ 342 /* IO registers, dma functions and IRQs */
339 void __iomem *regs; 343 void __iomem *regs;
340 void __iomem *sdma_regs; 344 void __iomem *sdma_regs;
341 #ifdef CONFIG_PPC_DCR 345 #ifdef CONFIG_PPC_DCR
342 dcr_host_t sdma_dcrs; 346 dcr_host_t sdma_dcrs;
343 #endif 347 #endif
344 u32 (*dma_in)(struct temac_local *, int); 348 u32 (*dma_in)(struct temac_local *, int);
345 void (*dma_out)(struct temac_local *, int, u32); 349 void (*dma_out)(struct temac_local *, int, u32);
346 350
347 int tx_irq; 351 int tx_irq;
348 int rx_irq; 352 int rx_irq;
349 int emac_num; 353 int emac_num;
350 354
351 struct sk_buff **rx_skb; 355 struct sk_buff **rx_skb;
352 spinlock_t rx_lock; 356 spinlock_t rx_lock;
353 struct mutex indirect_mutex; 357 struct mutex indirect_mutex;
354 u32 options; /* Current options word */ 358 u32 options; /* Current options word */
355 int last_link; 359 int last_link;
360 unsigned int temac_features;
356 361
357 /* Buffer descriptors */ 362 /* Buffer descriptors */
358 struct cdmac_bd *tx_bd_v; 363 struct cdmac_bd *tx_bd_v;
359 dma_addr_t tx_bd_p; 364 dma_addr_t tx_bd_p;
360 struct cdmac_bd *rx_bd_v; 365 struct cdmac_bd *rx_bd_v;
361 dma_addr_t rx_bd_p; 366 dma_addr_t rx_bd_p;
362 int tx_bd_ci; 367 int tx_bd_ci;
363 int tx_bd_next; 368 int tx_bd_next;
364 int tx_bd_tail; 369 int tx_bd_tail;
365 int rx_bd_ci; 370 int rx_bd_ci;
366 }; 371 };
367 372
368 /* xilinx_temac.c */ 373 /* xilinx_temac.c */
369 u32 temac_ior(struct temac_local *lp, int offset); 374 u32 temac_ior(struct temac_local *lp, int offset);
370 void temac_iow(struct temac_local *lp, int offset, u32 value); 375 void temac_iow(struct temac_local *lp, int offset, u32 value);
371 int temac_indirect_busywait(struct temac_local *lp); 376 int temac_indirect_busywait(struct temac_local *lp);
372 u32 temac_indirect_in32(struct temac_local *lp, int reg); 377 u32 temac_indirect_in32(struct temac_local *lp, int reg);
373 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value); 378 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value);
374 379
375 380
376 /* xilinx_temac_mdio.c */ 381 /* xilinx_temac_mdio.c */
377 int temac_mdio_setup(struct temac_local *lp, struct device_node *np); 382 int temac_mdio_setup(struct temac_local *lp, struct device_node *np);
378 void temac_mdio_teardown(struct temac_local *lp); 383 void temac_mdio_teardown(struct temac_local *lp);
379 384
380 #endif /* XILINX_LL_TEMAC_H */ 385 #endif /* XILINX_LL_TEMAC_H */
381 386
drivers/net/ll_temac_main.c
1 /* 1 /*
2 * Driver for Xilinx TEMAC Ethernet device 2 * Driver for Xilinx TEMAC Ethernet device
3 * 3 *
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7 * 7 *
8 * This is a driver for the Xilinx ll_temac ipcore which is often used 8 * This is a driver for the Xilinx ll_temac ipcore which is often used
9 * in the Virtex and Spartan series of chips. 9 * in the Virtex and Spartan series of chips.
10 * 10 *
11 * Notes: 11 * Notes:
12 * - The ll_temac hardware uses indirect access for many of the TEMAC 12 * - The ll_temac hardware uses indirect access for many of the TEMAC
13 * registers, include the MDIO bus. However, indirect access to MDIO 13 * registers, include the MDIO bus. However, indirect access to MDIO
14 * registers take considerably more clock cycles than to TEMAC registers. 14 * registers take considerably more clock cycles than to TEMAC registers.
15 * MDIO accesses are long, so threads doing them should probably sleep 15 * MDIO accesses are long, so threads doing them should probably sleep
16 * rather than busywait. However, since only one indirect access can be 16 * rather than busywait. However, since only one indirect access can be
17 * in progress at any given time, that means that *all* indirect accesses 17 * in progress at any given time, that means that *all* indirect accesses
18 * could end up sleeping (to wait for an MDIO access to complete). 18 * could end up sleeping (to wait for an MDIO access to complete).
19 * Fortunately none of the indirect accesses are on the 'hot' path for tx 19 * Fortunately none of the indirect accesses are on the 'hot' path for tx
20 * or rx, so this should be okay. 20 * or rx, so this should be okay.
21 * 21 *
22 * TODO: 22 * TODO:
23 * - Factor out locallink DMA code into separate driver 23 * - Factor out locallink DMA code into separate driver
24 * - Fix multicast assignment. 24 * - Fix multicast assignment.
25 * - Fix support for hardware checksumming. 25 * - Fix support for hardware checksumming.
26 * - Testing. Lots and lots of testing. 26 * - Testing. Lots and lots of testing.
27 * 27 *
28 */ 28 */
29 29
30 #include <linux/delay.h> 30 #include <linux/delay.h>
31 #include <linux/etherdevice.h> 31 #include <linux/etherdevice.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/mii.h> 33 #include <linux/mii.h>
34 #include <linux/module.h> 34 #include <linux/module.h>
35 #include <linux/mutex.h> 35 #include <linux/mutex.h>
36 #include <linux/netdevice.h> 36 #include <linux/netdevice.h>
37 #include <linux/of.h> 37 #include <linux/of.h>
38 #include <linux/of_device.h> 38 #include <linux/of_device.h>
39 #include <linux/of_mdio.h> 39 #include <linux/of_mdio.h>
40 #include <linux/of_platform.h> 40 #include <linux/of_platform.h>
41 #include <linux/skbuff.h> 41 #include <linux/skbuff.h>
42 #include <linux/spinlock.h> 42 #include <linux/spinlock.h>
43 #include <linux/tcp.h> /* needed for sizeof(tcphdr) */ 43 #include <linux/tcp.h> /* needed for sizeof(tcphdr) */
44 #include <linux/udp.h> /* needed for sizeof(udphdr) */ 44 #include <linux/udp.h> /* needed for sizeof(udphdr) */
45 #include <linux/phy.h> 45 #include <linux/phy.h>
46 #include <linux/in.h> 46 #include <linux/in.h>
47 #include <linux/io.h> 47 #include <linux/io.h>
48 #include <linux/ip.h> 48 #include <linux/ip.h>
49 #include <linux/slab.h> 49 #include <linux/slab.h>
50 50
51 #include "ll_temac.h" 51 #include "ll_temac.h"
52 52
53 #define TX_BD_NUM 64 53 #define TX_BD_NUM 64
54 #define RX_BD_NUM 128 54 #define RX_BD_NUM 128
55 55
56 /* --------------------------------------------------------------------- 56 /* ---------------------------------------------------------------------
57 * Low level register access functions 57 * Low level register access functions
58 */ 58 */
59 59
60 u32 temac_ior(struct temac_local *lp, int offset) 60 u32 temac_ior(struct temac_local *lp, int offset)
61 { 61 {
62 return in_be32((u32 *)(lp->regs + offset)); 62 return in_be32((u32 *)(lp->regs + offset));
63 } 63 }
64 64
65 void temac_iow(struct temac_local *lp, int offset, u32 value) 65 void temac_iow(struct temac_local *lp, int offset, u32 value)
66 { 66 {
67 out_be32((u32 *) (lp->regs + offset), value); 67 out_be32((u32 *) (lp->regs + offset), value);
68 } 68 }
69 69
70 int temac_indirect_busywait(struct temac_local *lp) 70 int temac_indirect_busywait(struct temac_local *lp)
71 { 71 {
72 long end = jiffies + 2; 72 long end = jiffies + 2;
73 73
74 while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) { 74 while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
75 if (end - jiffies <= 0) { 75 if (end - jiffies <= 0) {
76 WARN_ON(1); 76 WARN_ON(1);
77 return -ETIMEDOUT; 77 return -ETIMEDOUT;
78 } 78 }
79 msleep(1); 79 msleep(1);
80 } 80 }
81 return 0; 81 return 0;
82 } 82 }
83 83
84 /** 84 /**
85 * temac_indirect_in32 85 * temac_indirect_in32
86 * 86 *
87 * lp->indirect_mutex must be held when calling this function 87 * lp->indirect_mutex must be held when calling this function
88 */ 88 */
89 u32 temac_indirect_in32(struct temac_local *lp, int reg) 89 u32 temac_indirect_in32(struct temac_local *lp, int reg)
90 { 90 {
91 u32 val; 91 u32 val;
92 92
93 if (temac_indirect_busywait(lp)) 93 if (temac_indirect_busywait(lp))
94 return -ETIMEDOUT; 94 return -ETIMEDOUT;
95 temac_iow(lp, XTE_CTL0_OFFSET, reg); 95 temac_iow(lp, XTE_CTL0_OFFSET, reg);
96 if (temac_indirect_busywait(lp)) 96 if (temac_indirect_busywait(lp))
97 return -ETIMEDOUT; 97 return -ETIMEDOUT;
98 val = temac_ior(lp, XTE_LSW0_OFFSET); 98 val = temac_ior(lp, XTE_LSW0_OFFSET);
99 99
100 return val; 100 return val;
101 } 101 }
102 102
103 /** 103 /**
104 * temac_indirect_out32 104 * temac_indirect_out32
105 * 105 *
106 * lp->indirect_mutex must be held when calling this function 106 * lp->indirect_mutex must be held when calling this function
107 */ 107 */
108 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) 108 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
109 { 109 {
110 if (temac_indirect_busywait(lp)) 110 if (temac_indirect_busywait(lp))
111 return; 111 return;
112 temac_iow(lp, XTE_LSW0_OFFSET, value); 112 temac_iow(lp, XTE_LSW0_OFFSET, value);
113 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); 113 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
114 } 114 }
115 115
116 /** 116 /**
117 * temac_dma_in32 - Memory mapped DMA read, this function expects a 117 * temac_dma_in32 - Memory mapped DMA read, this function expects a
118 * register input that is based on DCR word addresses which 118 * register input that is based on DCR word addresses which
119 * are then converted to memory mapped byte addresses 119 * are then converted to memory mapped byte addresses
120 */ 120 */
121 static u32 temac_dma_in32(struct temac_local *lp, int reg) 121 static u32 temac_dma_in32(struct temac_local *lp, int reg)
122 { 122 {
123 return in_be32((u32 *)(lp->sdma_regs + (reg << 2))); 123 return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
124 } 124 }
125 125
126 /** 126 /**
127 * temac_dma_out32 - Memory mapped DMA read, this function expects a 127 * temac_dma_out32 - Memory mapped DMA read, this function expects a
128 * register input that is based on DCR word addresses which 128 * register input that is based on DCR word addresses which
129 * are then converted to memory mapped byte addresses 129 * are then converted to memory mapped byte addresses
130 */ 130 */
131 static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) 131 static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
132 { 132 {
133 out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value); 133 out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
134 } 134 }
135 135
136 /* DMA register access functions can be DCR based or memory mapped. 136 /* DMA register access functions can be DCR based or memory mapped.
137 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both 137 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
138 * memory mapped. 138 * memory mapped.
139 */ 139 */
140 #ifdef CONFIG_PPC_DCR 140 #ifdef CONFIG_PPC_DCR
141 141
142 /** 142 /**
143 * temac_dma_dcr_in32 - DCR based DMA read 143 * temac_dma_dcr_in32 - DCR based DMA read
144 */ 144 */
145 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg) 145 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
146 { 146 {
147 return dcr_read(lp->sdma_dcrs, reg); 147 return dcr_read(lp->sdma_dcrs, reg);
148 } 148 }
149 149
150 /** 150 /**
151 * temac_dma_dcr_out32 - DCR based DMA write 151 * temac_dma_dcr_out32 - DCR based DMA write
152 */ 152 */
153 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value) 153 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
154 { 154 {
155 dcr_write(lp->sdma_dcrs, reg, value); 155 dcr_write(lp->sdma_dcrs, reg, value);
156 } 156 }
157 157
158 /** 158 /**
159 * temac_dcr_setup - If the DMA is DCR based, then setup the address and 159 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
160 * I/O functions 160 * I/O functions
161 */ 161 */
162 static int temac_dcr_setup(struct temac_local *lp, struct of_device *op, 162 static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
163 struct device_node *np) 163 struct device_node *np)
164 { 164 {
165 unsigned int dcrs; 165 unsigned int dcrs;
166 166
167 /* setup the dcr address mapping if it's in the device tree */ 167 /* setup the dcr address mapping if it's in the device tree */
168 168
169 dcrs = dcr_resource_start(np, 0); 169 dcrs = dcr_resource_start(np, 0);
170 if (dcrs != 0) { 170 if (dcrs != 0) {
171 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0)); 171 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
172 lp->dma_in = temac_dma_dcr_in; 172 lp->dma_in = temac_dma_dcr_in;
173 lp->dma_out = temac_dma_dcr_out; 173 lp->dma_out = temac_dma_dcr_out;
174 dev_dbg(&op->dev, "DCR base: %x\n", dcrs); 174 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
175 return 0; 175 return 0;
176 } 176 }
177 /* no DCR in the device tree, indicate a failure */ 177 /* no DCR in the device tree, indicate a failure */
178 return -1; 178 return -1;
179 } 179 }
180 180
181 #else 181 #else
182 182
183 /* 183 /*
184 * temac_dcr_setup - This is a stub for when DCR is not supported, 184 * temac_dcr_setup - This is a stub for when DCR is not supported,
185 * such as with MicroBlaze 185 * such as with MicroBlaze
186 */ 186 */
187 static int temac_dcr_setup(struct temac_local *lp, struct of_device *op, 187 static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
188 struct device_node *np) 188 struct device_node *np)
189 { 189 {
190 return -1; 190 return -1;
191 } 191 }
192 192
193 #endif 193 #endif
194 194
195 /** 195 /**
196 * temac_dma_bd_init - Setup buffer descriptor rings 196 * temac_dma_bd_init - Setup buffer descriptor rings
197 */ 197 */
198 static int temac_dma_bd_init(struct net_device *ndev) 198 static int temac_dma_bd_init(struct net_device *ndev)
199 { 199 {
200 struct temac_local *lp = netdev_priv(ndev); 200 struct temac_local *lp = netdev_priv(ndev);
201 struct sk_buff *skb; 201 struct sk_buff *skb;
202 int i; 202 int i;
203 203
204 lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL); 204 lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
205 /* allocate the tx and rx ring buffer descriptors. */ 205 /* allocate the tx and rx ring buffer descriptors. */
206 /* returns a virtual addres and a physical address. */ 206 /* returns a virtual addres and a physical address. */
207 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 207 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
208 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 208 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
209 &lp->tx_bd_p, GFP_KERNEL); 209 &lp->tx_bd_p, GFP_KERNEL);
210 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 210 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
211 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 211 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
212 &lp->rx_bd_p, GFP_KERNEL); 212 &lp->rx_bd_p, GFP_KERNEL);
213 213
214 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM); 214 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
215 for (i = 0; i < TX_BD_NUM; i++) { 215 for (i = 0; i < TX_BD_NUM; i++) {
216 lp->tx_bd_v[i].next = lp->tx_bd_p + 216 lp->tx_bd_v[i].next = lp->tx_bd_p +
217 sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); 217 sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
218 } 218 }
219 219
220 memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM); 220 memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
221 for (i = 0; i < RX_BD_NUM; i++) { 221 for (i = 0; i < RX_BD_NUM; i++) {
222 lp->rx_bd_v[i].next = lp->rx_bd_p + 222 lp->rx_bd_v[i].next = lp->rx_bd_p +
223 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); 223 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
224 224
225 skb = netdev_alloc_skb_ip_align(ndev, 225 skb = netdev_alloc_skb_ip_align(ndev,
226 XTE_MAX_JUMBO_FRAME_SIZE); 226 XTE_MAX_JUMBO_FRAME_SIZE);
227 227
228 if (skb == 0) { 228 if (skb == 0) {
229 dev_err(&ndev->dev, "alloc_skb error %d\n", i); 229 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
230 return -1; 230 return -1;
231 } 231 }
232 lp->rx_skb[i] = skb; 232 lp->rx_skb[i] = skb;
233 /* returns physical address of skb->data */ 233 /* returns physical address of skb->data */
234 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 234 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
235 skb->data, 235 skb->data,
236 XTE_MAX_JUMBO_FRAME_SIZE, 236 XTE_MAX_JUMBO_FRAME_SIZE,
237 DMA_FROM_DEVICE); 237 DMA_FROM_DEVICE);
238 lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE; 238 lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
239 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND; 239 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
240 } 240 }
241 241
242 lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 | 242 lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
243 CHNL_CTRL_IRQ_EN | 243 CHNL_CTRL_IRQ_EN |
244 CHNL_CTRL_IRQ_DLY_EN | 244 CHNL_CTRL_IRQ_DLY_EN |
245 CHNL_CTRL_IRQ_COAL_EN); 245 CHNL_CTRL_IRQ_COAL_EN);
246 /* 0x10220483 */ 246 /* 0x10220483 */
247 /* 0x00100483 */ 247 /* 0x00100483 */
248 lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 | 248 lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
249 CHNL_CTRL_IRQ_EN | 249 CHNL_CTRL_IRQ_EN |
250 CHNL_CTRL_IRQ_DLY_EN | 250 CHNL_CTRL_IRQ_DLY_EN |
251 CHNL_CTRL_IRQ_COAL_EN | 251 CHNL_CTRL_IRQ_COAL_EN |
252 CHNL_CTRL_IRQ_IOE); 252 CHNL_CTRL_IRQ_IOE);
253 /* 0xff010283 */ 253 /* 0xff010283 */
254 254
255 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); 255 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
256 lp->dma_out(lp, RX_TAILDESC_PTR, 256 lp->dma_out(lp, RX_TAILDESC_PTR,
257 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 257 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
258 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); 258 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
259 259
260 return 0; 260 return 0;
261 } 261 }
262 262
263 /* --------------------------------------------------------------------- 263 /* ---------------------------------------------------------------------
264 * net_device_ops 264 * net_device_ops
265 */ 265 */
266 266
267 static int temac_set_mac_address(struct net_device *ndev, void *address) 267 static int temac_set_mac_address(struct net_device *ndev, void *address)
268 { 268 {
269 struct temac_local *lp = netdev_priv(ndev); 269 struct temac_local *lp = netdev_priv(ndev);
270 270
271 if (address) 271 if (address)
272 memcpy(ndev->dev_addr, address, ETH_ALEN); 272 memcpy(ndev->dev_addr, address, ETH_ALEN);
273 273
274 if (!is_valid_ether_addr(ndev->dev_addr)) 274 if (!is_valid_ether_addr(ndev->dev_addr))
275 random_ether_addr(ndev->dev_addr); 275 random_ether_addr(ndev->dev_addr);
276 276
277 /* set up unicast MAC address filter set its mac address */ 277 /* set up unicast MAC address filter set its mac address */
278 mutex_lock(&lp->indirect_mutex); 278 mutex_lock(&lp->indirect_mutex);
279 temac_indirect_out32(lp, XTE_UAW0_OFFSET, 279 temac_indirect_out32(lp, XTE_UAW0_OFFSET,
280 (ndev->dev_addr[0]) | 280 (ndev->dev_addr[0]) |
281 (ndev->dev_addr[1] << 8) | 281 (ndev->dev_addr[1] << 8) |
282 (ndev->dev_addr[2] << 16) | 282 (ndev->dev_addr[2] << 16) |
283 (ndev->dev_addr[3] << 24)); 283 (ndev->dev_addr[3] << 24));
284 /* There are reserved bits in EUAW1 284 /* There are reserved bits in EUAW1
285 * so don't affect them Set MAC bits [47:32] in EUAW1 */ 285 * so don't affect them Set MAC bits [47:32] in EUAW1 */
286 temac_indirect_out32(lp, XTE_UAW1_OFFSET, 286 temac_indirect_out32(lp, XTE_UAW1_OFFSET,
287 (ndev->dev_addr[4] & 0x000000ff) | 287 (ndev->dev_addr[4] & 0x000000ff) |
288 (ndev->dev_addr[5] << 8)); 288 (ndev->dev_addr[5] << 8));
289 mutex_unlock(&lp->indirect_mutex); 289 mutex_unlock(&lp->indirect_mutex);
290 290
291 return 0; 291 return 0;
292 } 292 }
293 293
294 static int netdev_set_mac_address(struct net_device *ndev, void *p) 294 static int netdev_set_mac_address(struct net_device *ndev, void *p)
295 { 295 {
296 struct sockaddr *addr = p; 296 struct sockaddr *addr = p;
297 297
298 return temac_set_mac_address(ndev, addr->sa_data); 298 return temac_set_mac_address(ndev, addr->sa_data);
299 } 299 }
300 300
301 static void temac_set_multicast_list(struct net_device *ndev) 301 static void temac_set_multicast_list(struct net_device *ndev)
302 { 302 {
303 struct temac_local *lp = netdev_priv(ndev); 303 struct temac_local *lp = netdev_priv(ndev);
304 u32 multi_addr_msw, multi_addr_lsw, val; 304 u32 multi_addr_msw, multi_addr_lsw, val;
305 int i; 305 int i;
306 306
307 mutex_lock(&lp->indirect_mutex); 307 mutex_lock(&lp->indirect_mutex);
308 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 308 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
309 netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) { 309 netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
310 /* 310 /*
311 * We must make the kernel realise we had to move 311 * We must make the kernel realise we had to move
312 * into promisc mode or we start all out war on 312 * into promisc mode or we start all out war on
313 * the cable. If it was a promisc request the 313 * the cable. If it was a promisc request the
314 * flag is already set. If not we assert it. 314 * flag is already set. If not we assert it.
315 */ 315 */
316 ndev->flags |= IFF_PROMISC; 316 ndev->flags |= IFF_PROMISC;
317 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); 317 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
318 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 318 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
319 } else if (!netdev_mc_empty(ndev)) { 319 } else if (!netdev_mc_empty(ndev)) {
320 struct netdev_hw_addr *ha; 320 struct netdev_hw_addr *ha;
321 321
322 i = 0; 322 i = 0;
323 netdev_for_each_mc_addr(ha, ndev) { 323 netdev_for_each_mc_addr(ha, ndev) {
324 if (i >= MULTICAST_CAM_TABLE_NUM) 324 if (i >= MULTICAST_CAM_TABLE_NUM)
325 break; 325 break;
326 multi_addr_msw = ((ha->addr[3] << 24) | 326 multi_addr_msw = ((ha->addr[3] << 24) |
327 (ha->addr[2] << 16) | 327 (ha->addr[2] << 16) |
328 (ha->addr[1] << 8) | 328 (ha->addr[1] << 8) |
329 (ha->addr[0])); 329 (ha->addr[0]));
330 temac_indirect_out32(lp, XTE_MAW0_OFFSET, 330 temac_indirect_out32(lp, XTE_MAW0_OFFSET,
331 multi_addr_msw); 331 multi_addr_msw);
332 multi_addr_lsw = ((ha->addr[5] << 8) | 332 multi_addr_lsw = ((ha->addr[5] << 8) |
333 (ha->addr[4]) | (i << 16)); 333 (ha->addr[4]) | (i << 16));
334 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 334 temac_indirect_out32(lp, XTE_MAW1_OFFSET,
335 multi_addr_lsw); 335 multi_addr_lsw);
336 i++; 336 i++;
337 } 337 }
338 } else { 338 } else {
339 val = temac_indirect_in32(lp, XTE_AFM_OFFSET); 339 val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
340 temac_indirect_out32(lp, XTE_AFM_OFFSET, 340 temac_indirect_out32(lp, XTE_AFM_OFFSET,
341 val & ~XTE_AFM_EPPRM_MASK); 341 val & ~XTE_AFM_EPPRM_MASK);
342 temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0); 342 temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
343 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0); 343 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
344 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 344 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
345 } 345 }
346 mutex_unlock(&lp->indirect_mutex); 346 mutex_unlock(&lp->indirect_mutex);
347 } 347 }
348 348
349 struct temac_option { 349 struct temac_option {
350 int flg; 350 int flg;
351 u32 opt; 351 u32 opt;
352 u32 reg; 352 u32 reg;
353 u32 m_or; 353 u32 m_or;
354 u32 m_and; 354 u32 m_and;
355 } temac_options[] = { 355 } temac_options[] = {
356 /* Turn on jumbo packet support for both Rx and Tx */ 356 /* Turn on jumbo packet support for both Rx and Tx */
357 { 357 {
358 .opt = XTE_OPTION_JUMBO, 358 .opt = XTE_OPTION_JUMBO,
359 .reg = XTE_TXC_OFFSET, 359 .reg = XTE_TXC_OFFSET,
360 .m_or = XTE_TXC_TXJMBO_MASK, 360 .m_or = XTE_TXC_TXJMBO_MASK,
361 }, 361 },
362 { 362 {
363 .opt = XTE_OPTION_JUMBO, 363 .opt = XTE_OPTION_JUMBO,
364 .reg = XTE_RXC1_OFFSET, 364 .reg = XTE_RXC1_OFFSET,
365 .m_or =XTE_RXC1_RXJMBO_MASK, 365 .m_or =XTE_RXC1_RXJMBO_MASK,
366 }, 366 },
367 /* Turn on VLAN packet support for both Rx and Tx */ 367 /* Turn on VLAN packet support for both Rx and Tx */
368 { 368 {
369 .opt = XTE_OPTION_VLAN, 369 .opt = XTE_OPTION_VLAN,
370 .reg = XTE_TXC_OFFSET, 370 .reg = XTE_TXC_OFFSET,
371 .m_or =XTE_TXC_TXVLAN_MASK, 371 .m_or =XTE_TXC_TXVLAN_MASK,
372 }, 372 },
373 { 373 {
374 .opt = XTE_OPTION_VLAN, 374 .opt = XTE_OPTION_VLAN,
375 .reg = XTE_RXC1_OFFSET, 375 .reg = XTE_RXC1_OFFSET,
376 .m_or =XTE_RXC1_RXVLAN_MASK, 376 .m_or =XTE_RXC1_RXVLAN_MASK,
377 }, 377 },
378 /* Turn on FCS stripping on receive packets */ 378 /* Turn on FCS stripping on receive packets */
379 { 379 {
380 .opt = XTE_OPTION_FCS_STRIP, 380 .opt = XTE_OPTION_FCS_STRIP,
381 .reg = XTE_RXC1_OFFSET, 381 .reg = XTE_RXC1_OFFSET,
382 .m_or =XTE_RXC1_RXFCS_MASK, 382 .m_or =XTE_RXC1_RXFCS_MASK,
383 }, 383 },
384 /* Turn on FCS insertion on transmit packets */ 384 /* Turn on FCS insertion on transmit packets */
385 { 385 {
386 .opt = XTE_OPTION_FCS_INSERT, 386 .opt = XTE_OPTION_FCS_INSERT,
387 .reg = XTE_TXC_OFFSET, 387 .reg = XTE_TXC_OFFSET,
388 .m_or =XTE_TXC_TXFCS_MASK, 388 .m_or =XTE_TXC_TXFCS_MASK,
389 }, 389 },
390 /* Turn on length/type field checking on receive packets */ 390 /* Turn on length/type field checking on receive packets */
391 { 391 {
392 .opt = XTE_OPTION_LENTYPE_ERR, 392 .opt = XTE_OPTION_LENTYPE_ERR,
393 .reg = XTE_RXC1_OFFSET, 393 .reg = XTE_RXC1_OFFSET,
394 .m_or =XTE_RXC1_RXLT_MASK, 394 .m_or =XTE_RXC1_RXLT_MASK,
395 }, 395 },
396 /* Turn on flow control */ 396 /* Turn on flow control */
397 { 397 {
398 .opt = XTE_OPTION_FLOW_CONTROL, 398 .opt = XTE_OPTION_FLOW_CONTROL,
399 .reg = XTE_FCC_OFFSET, 399 .reg = XTE_FCC_OFFSET,
400 .m_or =XTE_FCC_RXFLO_MASK, 400 .m_or =XTE_FCC_RXFLO_MASK,
401 }, 401 },
402 /* Turn on flow control */ 402 /* Turn on flow control */
403 { 403 {
404 .opt = XTE_OPTION_FLOW_CONTROL, 404 .opt = XTE_OPTION_FLOW_CONTROL,
405 .reg = XTE_FCC_OFFSET, 405 .reg = XTE_FCC_OFFSET,
406 .m_or =XTE_FCC_TXFLO_MASK, 406 .m_or =XTE_FCC_TXFLO_MASK,
407 }, 407 },
408 /* Turn on promiscuous frame filtering (all frames are received ) */ 408 /* Turn on promiscuous frame filtering (all frames are received ) */
409 { 409 {
410 .opt = XTE_OPTION_PROMISC, 410 .opt = XTE_OPTION_PROMISC,
411 .reg = XTE_AFM_OFFSET, 411 .reg = XTE_AFM_OFFSET,
412 .m_or =XTE_AFM_EPPRM_MASK, 412 .m_or =XTE_AFM_EPPRM_MASK,
413 }, 413 },
414 /* Enable transmitter if not already enabled */ 414 /* Enable transmitter if not already enabled */
415 { 415 {
416 .opt = XTE_OPTION_TXEN, 416 .opt = XTE_OPTION_TXEN,
417 .reg = XTE_TXC_OFFSET, 417 .reg = XTE_TXC_OFFSET,
418 .m_or =XTE_TXC_TXEN_MASK, 418 .m_or =XTE_TXC_TXEN_MASK,
419 }, 419 },
420 /* Enable receiver? */ 420 /* Enable receiver? */
421 { 421 {
422 .opt = XTE_OPTION_RXEN, 422 .opt = XTE_OPTION_RXEN,
423 .reg = XTE_RXC1_OFFSET, 423 .reg = XTE_RXC1_OFFSET,
424 .m_or =XTE_RXC1_RXEN_MASK, 424 .m_or =XTE_RXC1_RXEN_MASK,
425 }, 425 },
426 {} 426 {}
427 }; 427 };
428 428
429 /** 429 /**
430 * temac_setoptions 430 * temac_setoptions
431 */ 431 */
432 static u32 temac_setoptions(struct net_device *ndev, u32 options) 432 static u32 temac_setoptions(struct net_device *ndev, u32 options)
433 { 433 {
434 struct temac_local *lp = netdev_priv(ndev); 434 struct temac_local *lp = netdev_priv(ndev);
435 struct temac_option *tp = &temac_options[0]; 435 struct temac_option *tp = &temac_options[0];
436 int reg; 436 int reg;
437 437
438 mutex_lock(&lp->indirect_mutex); 438 mutex_lock(&lp->indirect_mutex);
439 while (tp->opt) { 439 while (tp->opt) {
440 reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or; 440 reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
441 if (options & tp->opt) 441 if (options & tp->opt)
442 reg |= tp->m_or; 442 reg |= tp->m_or;
443 temac_indirect_out32(lp, tp->reg, reg); 443 temac_indirect_out32(lp, tp->reg, reg);
444 tp++; 444 tp++;
445 } 445 }
446 lp->options |= options; 446 lp->options |= options;
447 mutex_unlock(&lp->indirect_mutex); 447 mutex_unlock(&lp->indirect_mutex);
448 448
449 return (0); 449 return (0);
450 } 450 }
451 451
452 /* Initilize temac */ 452 /* Initilize temac */
453 static void temac_device_reset(struct net_device *ndev) 453 static void temac_device_reset(struct net_device *ndev)
454 { 454 {
455 struct temac_local *lp = netdev_priv(ndev); 455 struct temac_local *lp = netdev_priv(ndev);
456 u32 timeout; 456 u32 timeout;
457 u32 val; 457 u32 val;
458 458
459 /* Perform a software reset */ 459 /* Perform a software reset */
460 460
461 /* 0x300 host enable bit ? */ 461 /* 0x300 host enable bit ? */
462 /* reset PHY through control register ?:1 */ 462 /* reset PHY through control register ?:1 */
463 463
464 dev_dbg(&ndev->dev, "%s()\n", __func__); 464 dev_dbg(&ndev->dev, "%s()\n", __func__);
465 465
466 mutex_lock(&lp->indirect_mutex); 466 mutex_lock(&lp->indirect_mutex);
467 /* Reset the receiver and wait for it to finish reset */ 467 /* Reset the receiver and wait for it to finish reset */
468 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK); 468 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
469 timeout = 1000; 469 timeout = 1000;
470 while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) { 470 while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
471 udelay(1); 471 udelay(1);
472 if (--timeout == 0) { 472 if (--timeout == 0) {
473 dev_err(&ndev->dev, 473 dev_err(&ndev->dev,
474 "temac_device_reset RX reset timeout!!\n"); 474 "temac_device_reset RX reset timeout!!\n");
475 break; 475 break;
476 } 476 }
477 } 477 }
478 478
479 /* Reset the transmitter and wait for it to finish reset */ 479 /* Reset the transmitter and wait for it to finish reset */
480 temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK); 480 temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
481 timeout = 1000; 481 timeout = 1000;
482 while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) { 482 while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
483 udelay(1); 483 udelay(1);
484 if (--timeout == 0) { 484 if (--timeout == 0) {
485 dev_err(&ndev->dev, 485 dev_err(&ndev->dev,
486 "temac_device_reset TX reset timeout!!\n"); 486 "temac_device_reset TX reset timeout!!\n");
487 break; 487 break;
488 } 488 }
489 } 489 }
490 490
491 /* Disable the receiver */ 491 /* Disable the receiver */
492 val = temac_indirect_in32(lp, XTE_RXC1_OFFSET); 492 val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
493 temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK); 493 temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
494 494
495 /* Reset Local Link (DMA) */ 495 /* Reset Local Link (DMA) */
496 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); 496 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
497 timeout = 1000; 497 timeout = 1000;
498 while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) { 498 while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
499 udelay(1); 499 udelay(1);
500 if (--timeout == 0) { 500 if (--timeout == 0) {
501 dev_err(&ndev->dev, 501 dev_err(&ndev->dev,
502 "temac_device_reset DMA reset timeout!!\n"); 502 "temac_device_reset DMA reset timeout!!\n");
503 break; 503 break;
504 } 504 }
505 } 505 }
506 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); 506 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
507 507
508 temac_dma_bd_init(ndev); 508 temac_dma_bd_init(ndev);
509 509
510 temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0); 510 temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
511 temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0); 511 temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
512 temac_indirect_out32(lp, XTE_TXC_OFFSET, 0); 512 temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
513 temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK); 513 temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
514 514
515 mutex_unlock(&lp->indirect_mutex); 515 mutex_unlock(&lp->indirect_mutex);
516 516
517 /* Sync default options with HW 517 /* Sync default options with HW
518 * but leave receiver and transmitter disabled. */ 518 * but leave receiver and transmitter disabled. */
519 temac_setoptions(ndev, 519 temac_setoptions(ndev,
520 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN)); 520 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
521 521
522 temac_set_mac_address(ndev, NULL); 522 temac_set_mac_address(ndev, NULL);
523 523
524 /* Set address filter table */ 524 /* Set address filter table */
525 temac_set_multicast_list(ndev); 525 temac_set_multicast_list(ndev);
526 if (temac_setoptions(ndev, lp->options)) 526 if (temac_setoptions(ndev, lp->options))
527 dev_err(&ndev->dev, "Error setting TEMAC options\n"); 527 dev_err(&ndev->dev, "Error setting TEMAC options\n");
528 528
529 /* Init Driver variable */ 529 /* Init Driver variable */
530 ndev->trans_start = jiffies; /* prevent tx timeout */ 530 ndev->trans_start = jiffies; /* prevent tx timeout */
531 } 531 }
532 532
533 void temac_adjust_link(struct net_device *ndev) 533 void temac_adjust_link(struct net_device *ndev)
534 { 534 {
535 struct temac_local *lp = netdev_priv(ndev); 535 struct temac_local *lp = netdev_priv(ndev);
536 struct phy_device *phy = lp->phy_dev; 536 struct phy_device *phy = lp->phy_dev;
537 u32 mii_speed; 537 u32 mii_speed;
538 int link_state; 538 int link_state;
539 539
540 /* hash together the state values to decide if something has changed */ 540 /* hash together the state values to decide if something has changed */
541 link_state = phy->speed | (phy->duplex << 1) | phy->link; 541 link_state = phy->speed | (phy->duplex << 1) | phy->link;
542 542
543 mutex_lock(&lp->indirect_mutex); 543 mutex_lock(&lp->indirect_mutex);
544 if (lp->last_link != link_state) { 544 if (lp->last_link != link_state) {
545 mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET); 545 mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
546 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK; 546 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
547 547
548 switch (phy->speed) { 548 switch (phy->speed) {
549 case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break; 549 case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
550 case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break; 550 case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
551 case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break; 551 case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
552 } 552 }
553 553
554 /* Write new speed setting out to TEMAC */ 554 /* Write new speed setting out to TEMAC */
555 temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed); 555 temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
556 lp->last_link = link_state; 556 lp->last_link = link_state;
557 phy_print_status(phy); 557 phy_print_status(phy);
558 } 558 }
559 mutex_unlock(&lp->indirect_mutex); 559 mutex_unlock(&lp->indirect_mutex);
560 } 560 }
561 561
562 static void temac_start_xmit_done(struct net_device *ndev) 562 static void temac_start_xmit_done(struct net_device *ndev)
563 { 563 {
564 struct temac_local *lp = netdev_priv(ndev); 564 struct temac_local *lp = netdev_priv(ndev);
565 struct cdmac_bd *cur_p; 565 struct cdmac_bd *cur_p;
566 unsigned int stat = 0; 566 unsigned int stat = 0;
567 567
568 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 568 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
569 stat = cur_p->app0; 569 stat = cur_p->app0;
570 570
571 while (stat & STS_CTRL_APP0_CMPLT) { 571 while (stat & STS_CTRL_APP0_CMPLT) {
572 dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len, 572 dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
573 DMA_TO_DEVICE); 573 DMA_TO_DEVICE);
574 if (cur_p->app4) 574 if (cur_p->app4)
575 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); 575 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
576 cur_p->app0 = 0; 576 cur_p->app0 = 0;
577 cur_p->app1 = 0;
578 cur_p->app2 = 0;
579 cur_p->app3 = 0;
580 cur_p->app4 = 0;
577 581
578 ndev->stats.tx_packets++; 582 ndev->stats.tx_packets++;
579 ndev->stats.tx_bytes += cur_p->len; 583 ndev->stats.tx_bytes += cur_p->len;
580 584
581 lp->tx_bd_ci++; 585 lp->tx_bd_ci++;
582 if (lp->tx_bd_ci >= TX_BD_NUM) 586 if (lp->tx_bd_ci >= TX_BD_NUM)
583 lp->tx_bd_ci = 0; 587 lp->tx_bd_ci = 0;
584 588
585 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 589 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
586 stat = cur_p->app0; 590 stat = cur_p->app0;
587 } 591 }
588 592
589 netif_wake_queue(ndev); 593 netif_wake_queue(ndev);
590 } 594 }
591 595
596 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
597 {
598 struct cdmac_bd *cur_p;
599 int tail;
600
601 tail = lp->tx_bd_tail;
602 cur_p = &lp->tx_bd_v[tail];
603
604 do {
605 if (cur_p->app0)
606 return NETDEV_TX_BUSY;
607
608 tail++;
609 if (tail >= TX_BD_NUM)
610 tail = 0;
611
612 cur_p = &lp->tx_bd_v[tail];
613 num_frag--;
614 } while (num_frag >= 0);
615
616 return 0;
617 }
618
592 static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) 619 static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
593 { 620 {
594 struct temac_local *lp = netdev_priv(ndev); 621 struct temac_local *lp = netdev_priv(ndev);
595 struct cdmac_bd *cur_p; 622 struct cdmac_bd *cur_p;
596 dma_addr_t start_p, tail_p; 623 dma_addr_t start_p, tail_p;
597 int ii; 624 int ii;
598 unsigned long num_frag; 625 unsigned long num_frag;
599 skb_frag_t *frag; 626 skb_frag_t *frag;
600 627
601 num_frag = skb_shinfo(skb)->nr_frags; 628 num_frag = skb_shinfo(skb)->nr_frags;
602 frag = &skb_shinfo(skb)->frags[0]; 629 frag = &skb_shinfo(skb)->frags[0];
603 start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 630 start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
604 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 631 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
605 632
606 if (cur_p->app0 & STS_CTRL_APP0_CMPLT) { 633 if (temac_check_tx_bd_space(lp, num_frag)) {
607 if (!netif_queue_stopped(ndev)) { 634 if (!netif_queue_stopped(ndev)) {
608 netif_stop_queue(ndev); 635 netif_stop_queue(ndev);
609 return NETDEV_TX_BUSY; 636 return NETDEV_TX_BUSY;
610 } 637 }
611 return NETDEV_TX_BUSY; 638 return NETDEV_TX_BUSY;
612 } 639 }
613 640
614 cur_p->app0 = 0; 641 cur_p->app0 = 0;
615 if (skb->ip_summed == CHECKSUM_PARTIAL) { 642 if (skb->ip_summed == CHECKSUM_PARTIAL) {
616 const struct iphdr *ip = ip_hdr(skb); 643 unsigned int csum_start_off = skb_transport_offset(skb);
617 int length = 0, start = 0, insert = 0; 644 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
618 645
619 switch (ip->protocol) { 646 cur_p->app0 |= 1; /* TX Checksum Enabled */
620 case IPPROTO_TCP: 647 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
621 start = sizeof(struct iphdr) + ETH_HLEN; 648 cur_p->app2 = 0; /* initial checksum seed */
622 insert = sizeof(struct iphdr) + ETH_HLEN + 16;
623 length = ip->tot_len - sizeof(struct iphdr);
624 break;
625 case IPPROTO_UDP:
626 start = sizeof(struct iphdr) + ETH_HLEN;
627 insert = sizeof(struct iphdr) + ETH_HLEN + 6;
628 length = ip->tot_len - sizeof(struct iphdr);
629 break;
630 default:
631 break;
632 }
633 cur_p->app1 = ((start << 16) | insert);
634 cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr,
635 length, ip->protocol, 0);
636 skb->data[insert] = 0;
637 skb->data[insert + 1] = 0;
638 } 649 }
650
639 cur_p->app0 |= STS_CTRL_APP0_SOP; 651 cur_p->app0 |= STS_CTRL_APP0_SOP;
640 cur_p->len = skb_headlen(skb); 652 cur_p->len = skb_headlen(skb);
641 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, 653 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
642 DMA_TO_DEVICE); 654 DMA_TO_DEVICE);
643 cur_p->app4 = (unsigned long)skb; 655 cur_p->app4 = (unsigned long)skb;
644 656
645 for (ii = 0; ii < num_frag; ii++) { 657 for (ii = 0; ii < num_frag; ii++) {
646 lp->tx_bd_tail++; 658 lp->tx_bd_tail++;
647 if (lp->tx_bd_tail >= TX_BD_NUM) 659 if (lp->tx_bd_tail >= TX_BD_NUM)
648 lp->tx_bd_tail = 0; 660 lp->tx_bd_tail = 0;
649 661
650 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 662 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
651 cur_p->phys = dma_map_single(ndev->dev.parent, 663 cur_p->phys = dma_map_single(ndev->dev.parent,
652 (void *)page_address(frag->page) + 664 (void *)page_address(frag->page) +
653 frag->page_offset, 665 frag->page_offset,
654 frag->size, DMA_TO_DEVICE); 666 frag->size, DMA_TO_DEVICE);
655 cur_p->len = frag->size; 667 cur_p->len = frag->size;
656 cur_p->app0 = 0; 668 cur_p->app0 = 0;
657 frag++; 669 frag++;
658 } 670 }
659 cur_p->app0 |= STS_CTRL_APP0_EOP; 671 cur_p->app0 |= STS_CTRL_APP0_EOP;
660 672
661 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 673 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
662 lp->tx_bd_tail++; 674 lp->tx_bd_tail++;
663 if (lp->tx_bd_tail >= TX_BD_NUM) 675 if (lp->tx_bd_tail >= TX_BD_NUM)
664 lp->tx_bd_tail = 0; 676 lp->tx_bd_tail = 0;
665 677
666 /* Kick off the transfer */ 678 /* Kick off the transfer */
667 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ 679 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
668 680
669 return NETDEV_TX_OK; 681 return NETDEV_TX_OK;
670 } 682 }
671 683
672 684
673 static void ll_temac_recv(struct net_device *ndev) 685 static void ll_temac_recv(struct net_device *ndev)
674 { 686 {
675 struct temac_local *lp = netdev_priv(ndev); 687 struct temac_local *lp = netdev_priv(ndev);
676 struct sk_buff *skb, *new_skb; 688 struct sk_buff *skb, *new_skb;
677 unsigned int bdstat; 689 unsigned int bdstat;
678 struct cdmac_bd *cur_p; 690 struct cdmac_bd *cur_p;
679 dma_addr_t tail_p; 691 dma_addr_t tail_p;
680 int length; 692 int length;
681 unsigned long flags; 693 unsigned long flags;
682 694
683 spin_lock_irqsave(&lp->rx_lock, flags); 695 spin_lock_irqsave(&lp->rx_lock, flags);
684 696
685 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 697 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
686 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 698 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
687 699
688 bdstat = cur_p->app0; 700 bdstat = cur_p->app0;
689 while ((bdstat & STS_CTRL_APP0_CMPLT)) { 701 while ((bdstat & STS_CTRL_APP0_CMPLT)) {
690 702
691 skb = lp->rx_skb[lp->rx_bd_ci]; 703 skb = lp->rx_skb[lp->rx_bd_ci];
692 length = cur_p->app4 & 0x3FFF; 704 length = cur_p->app4 & 0x3FFF;
693 705
694 dma_unmap_single(ndev->dev.parent, cur_p->phys, length, 706 dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
695 DMA_FROM_DEVICE); 707 DMA_FROM_DEVICE);
696 708
697 skb_put(skb, length); 709 skb_put(skb, length);
698 skb->dev = ndev; 710 skb->dev = ndev;
699 skb->protocol = eth_type_trans(skb, ndev); 711 skb->protocol = eth_type_trans(skb, ndev);
700 skb->ip_summed = CHECKSUM_NONE; 712 skb->ip_summed = CHECKSUM_NONE;
701 713
714 /* if we're doing rx csum offload, set it up */
715 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
716 (skb->protocol == __constant_htons(ETH_P_IP)) &&
717 (skb->len > 64)) {
718
719 skb->csum = cur_p->app3 & 0xFFFF;
720 skb->ip_summed = CHECKSUM_COMPLETE;
721 }
722
702 netif_rx(skb); 723 netif_rx(skb);
703 724
704 ndev->stats.rx_packets++; 725 ndev->stats.rx_packets++;
705 ndev->stats.rx_bytes += length; 726 ndev->stats.rx_bytes += length;
706 727
707 new_skb = netdev_alloc_skb_ip_align(ndev, 728 new_skb = netdev_alloc_skb_ip_align(ndev,
708 XTE_MAX_JUMBO_FRAME_SIZE); 729 XTE_MAX_JUMBO_FRAME_SIZE);
709 730
710 if (new_skb == 0) { 731 if (new_skb == 0) {
711 dev_err(&ndev->dev, "no memory for new sk_buff\n"); 732 dev_err(&ndev->dev, "no memory for new sk_buff\n");
712 spin_unlock_irqrestore(&lp->rx_lock, flags); 733 spin_unlock_irqrestore(&lp->rx_lock, flags);
713 return; 734 return;
714 } 735 }
715 736
716 cur_p->app0 = STS_CTRL_APP0_IRQONEND; 737 cur_p->app0 = STS_CTRL_APP0_IRQONEND;
717 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 738 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
718 XTE_MAX_JUMBO_FRAME_SIZE, 739 XTE_MAX_JUMBO_FRAME_SIZE,
719 DMA_FROM_DEVICE); 740 DMA_FROM_DEVICE);
720 cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE; 741 cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
721 lp->rx_skb[lp->rx_bd_ci] = new_skb; 742 lp->rx_skb[lp->rx_bd_ci] = new_skb;
722 743
723 lp->rx_bd_ci++; 744 lp->rx_bd_ci++;
724 if (lp->rx_bd_ci >= RX_BD_NUM) 745 if (lp->rx_bd_ci >= RX_BD_NUM)
725 lp->rx_bd_ci = 0; 746 lp->rx_bd_ci = 0;
726 747
727 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 748 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
728 bdstat = cur_p->app0; 749 bdstat = cur_p->app0;
729 } 750 }
730 lp->dma_out(lp, RX_TAILDESC_PTR, tail_p); 751 lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
731 752
732 spin_unlock_irqrestore(&lp->rx_lock, flags); 753 spin_unlock_irqrestore(&lp->rx_lock, flags);
733 } 754 }
734 755
735 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev) 756 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
736 { 757 {
737 struct net_device *ndev = _ndev; 758 struct net_device *ndev = _ndev;
738 struct temac_local *lp = netdev_priv(ndev); 759 struct temac_local *lp = netdev_priv(ndev);
739 unsigned int status; 760 unsigned int status;
740 761
741 status = lp->dma_in(lp, TX_IRQ_REG); 762 status = lp->dma_in(lp, TX_IRQ_REG);
742 lp->dma_out(lp, TX_IRQ_REG, status); 763 lp->dma_out(lp, TX_IRQ_REG, status);
743 764
744 if (status & (IRQ_COAL | IRQ_DLY)) 765 if (status & (IRQ_COAL | IRQ_DLY))
745 temac_start_xmit_done(lp->ndev); 766 temac_start_xmit_done(lp->ndev);
746 if (status & 0x080) 767 if (status & 0x080)
747 dev_err(&ndev->dev, "DMA error 0x%x\n", status); 768 dev_err(&ndev->dev, "DMA error 0x%x\n", status);
748 769
749 return IRQ_HANDLED; 770 return IRQ_HANDLED;
750 } 771 }
751 772
752 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev) 773 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
753 { 774 {
754 struct net_device *ndev = _ndev; 775 struct net_device *ndev = _ndev;
755 struct temac_local *lp = netdev_priv(ndev); 776 struct temac_local *lp = netdev_priv(ndev);
756 unsigned int status; 777 unsigned int status;
757 778
758 /* Read and clear the status registers */ 779 /* Read and clear the status registers */
759 status = lp->dma_in(lp, RX_IRQ_REG); 780 status = lp->dma_in(lp, RX_IRQ_REG);
760 lp->dma_out(lp, RX_IRQ_REG, status); 781 lp->dma_out(lp, RX_IRQ_REG, status);
761 782
762 if (status & (IRQ_COAL | IRQ_DLY)) 783 if (status & (IRQ_COAL | IRQ_DLY))
763 ll_temac_recv(lp->ndev); 784 ll_temac_recv(lp->ndev);
764 785
765 return IRQ_HANDLED; 786 return IRQ_HANDLED;
766 } 787 }
767 788
768 static int temac_open(struct net_device *ndev) 789 static int temac_open(struct net_device *ndev)
769 { 790 {
770 struct temac_local *lp = netdev_priv(ndev); 791 struct temac_local *lp = netdev_priv(ndev);
771 int rc; 792 int rc;
772 793
773 dev_dbg(&ndev->dev, "temac_open()\n"); 794 dev_dbg(&ndev->dev, "temac_open()\n");
774 795
775 if (lp->phy_node) { 796 if (lp->phy_node) {
776 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, 797 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
777 temac_adjust_link, 0, 0); 798 temac_adjust_link, 0, 0);
778 if (!lp->phy_dev) { 799 if (!lp->phy_dev) {
779 dev_err(lp->dev, "of_phy_connect() failed\n"); 800 dev_err(lp->dev, "of_phy_connect() failed\n");
780 return -ENODEV; 801 return -ENODEV;
781 } 802 }
782 803
783 phy_start(lp->phy_dev); 804 phy_start(lp->phy_dev);
784 } 805 }
785 806
786 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev); 807 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
787 if (rc) 808 if (rc)
788 goto err_tx_irq; 809 goto err_tx_irq;
789 rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev); 810 rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
790 if (rc) 811 if (rc)
791 goto err_rx_irq; 812 goto err_rx_irq;
792 813
793 temac_device_reset(ndev); 814 temac_device_reset(ndev);
794 return 0; 815 return 0;
795 816
796 err_rx_irq: 817 err_rx_irq:
797 free_irq(lp->tx_irq, ndev); 818 free_irq(lp->tx_irq, ndev);
798 err_tx_irq: 819 err_tx_irq:
799 if (lp->phy_dev) 820 if (lp->phy_dev)
800 phy_disconnect(lp->phy_dev); 821 phy_disconnect(lp->phy_dev);
801 lp->phy_dev = NULL; 822 lp->phy_dev = NULL;
802 dev_err(lp->dev, "request_irq() failed\n"); 823 dev_err(lp->dev, "request_irq() failed\n");
803 return rc; 824 return rc;
804 } 825 }
805 826
806 static int temac_stop(struct net_device *ndev) 827 static int temac_stop(struct net_device *ndev)
807 { 828 {
808 struct temac_local *lp = netdev_priv(ndev); 829 struct temac_local *lp = netdev_priv(ndev);
809 830
810 dev_dbg(&ndev->dev, "temac_close()\n"); 831 dev_dbg(&ndev->dev, "temac_close()\n");
811 832
812 free_irq(lp->tx_irq, ndev); 833 free_irq(lp->tx_irq, ndev);
813 free_irq(lp->rx_irq, ndev); 834 free_irq(lp->rx_irq, ndev);
814 835
815 if (lp->phy_dev) 836 if (lp->phy_dev)
816 phy_disconnect(lp->phy_dev); 837 phy_disconnect(lp->phy_dev);
817 lp->phy_dev = NULL; 838 lp->phy_dev = NULL;
818 839
819 return 0; 840 return 0;
820 } 841 }
821 842
822 #ifdef CONFIG_NET_POLL_CONTROLLER 843 #ifdef CONFIG_NET_POLL_CONTROLLER
823 static void 844 static void
824 temac_poll_controller(struct net_device *ndev) 845 temac_poll_controller(struct net_device *ndev)
825 { 846 {
826 struct temac_local *lp = netdev_priv(ndev); 847 struct temac_local *lp = netdev_priv(ndev);
827 848
828 disable_irq(lp->tx_irq); 849 disable_irq(lp->tx_irq);
829 disable_irq(lp->rx_irq); 850 disable_irq(lp->rx_irq);
830 851
831 ll_temac_rx_irq(lp->tx_irq, lp); 852 ll_temac_rx_irq(lp->tx_irq, lp);
832 ll_temac_tx_irq(lp->rx_irq, lp); 853 ll_temac_tx_irq(lp->rx_irq, lp);
833 854
834 enable_irq(lp->tx_irq); 855 enable_irq(lp->tx_irq);
835 enable_irq(lp->rx_irq); 856 enable_irq(lp->rx_irq);
836 } 857 }
837 #endif 858 #endif
838 859
839 static const struct net_device_ops temac_netdev_ops = { 860 static const struct net_device_ops temac_netdev_ops = {
840 .ndo_open = temac_open, 861 .ndo_open = temac_open,
841 .ndo_stop = temac_stop, 862 .ndo_stop = temac_stop,
842 .ndo_start_xmit = temac_start_xmit, 863 .ndo_start_xmit = temac_start_xmit,
843 .ndo_set_mac_address = netdev_set_mac_address, 864 .ndo_set_mac_address = netdev_set_mac_address,
844 //.ndo_set_multicast_list = temac_set_multicast_list, 865 //.ndo_set_multicast_list = temac_set_multicast_list,
845 #ifdef CONFIG_NET_POLL_CONTROLLER 866 #ifdef CONFIG_NET_POLL_CONTROLLER
846 .ndo_poll_controller = temac_poll_controller, 867 .ndo_poll_controller = temac_poll_controller,
847 #endif 868 #endif
848 }; 869 };
849 870
850 /* --------------------------------------------------------------------- 871 /* ---------------------------------------------------------------------
851 * SYSFS device attributes 872 * SYSFS device attributes
852 */ 873 */
853 static ssize_t temac_show_llink_regs(struct device *dev, 874 static ssize_t temac_show_llink_regs(struct device *dev,
854 struct device_attribute *attr, char *buf) 875 struct device_attribute *attr, char *buf)
855 { 876 {
856 struct net_device *ndev = dev_get_drvdata(dev); 877 struct net_device *ndev = dev_get_drvdata(dev);
857 struct temac_local *lp = netdev_priv(ndev); 878 struct temac_local *lp = netdev_priv(ndev);
858 int i, len = 0; 879 int i, len = 0;
859 880
860 for (i = 0; i < 0x11; i++) 881 for (i = 0; i < 0x11; i++)
861 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i), 882 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
862 (i % 8) == 7 ? "\n" : " "); 883 (i % 8) == 7 ? "\n" : " ");
863 len += sprintf(buf + len, "\n"); 884 len += sprintf(buf + len, "\n");
864 885
865 return len; 886 return len;
866 } 887 }
867 888
868 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL); 889 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
869 890
870 static struct attribute *temac_device_attrs[] = { 891 static struct attribute *temac_device_attrs[] = {
871 &dev_attr_llink_regs.attr, 892 &dev_attr_llink_regs.attr,
872 NULL, 893 NULL,
873 }; 894 };
874 895
875 static const struct attribute_group temac_attr_group = { 896 static const struct attribute_group temac_attr_group = {
876 .attrs = temac_device_attrs, 897 .attrs = temac_device_attrs,
877 }; 898 };
878 899
879 static int __init 900 static int __init
880 temac_of_probe(struct of_device *op, const struct of_device_id *match) 901 temac_of_probe(struct of_device *op, const struct of_device_id *match)
881 { 902 {
882 struct device_node *np; 903 struct device_node *np;
883 struct temac_local *lp; 904 struct temac_local *lp;
884 struct net_device *ndev; 905 struct net_device *ndev;
885 const void *addr; 906 const void *addr;
907 __be32 *p;
886 int size, rc = 0; 908 int size, rc = 0;
887 909
888 /* Init network device structure */ 910 /* Init network device structure */
889 ndev = alloc_etherdev(sizeof(*lp)); 911 ndev = alloc_etherdev(sizeof(*lp));
890 if (!ndev) { 912 if (!ndev) {
891 dev_err(&op->dev, "could not allocate device.\n"); 913 dev_err(&op->dev, "could not allocate device.\n");
892 return -ENOMEM; 914 return -ENOMEM;
893 } 915 }
894 ether_setup(ndev); 916 ether_setup(ndev);
895 dev_set_drvdata(&op->dev, ndev); 917 dev_set_drvdata(&op->dev, ndev);
896 SET_NETDEV_DEV(ndev, &op->dev); 918 SET_NETDEV_DEV(ndev, &op->dev);
897 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 919 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
898 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; 920 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
899 ndev->netdev_ops = &temac_netdev_ops; 921 ndev->netdev_ops = &temac_netdev_ops;
900 #if 0 922 #if 0
901 ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */ 923 ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
902 ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */ 924 ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
903 ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */ 925 ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
904 ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */ 926 ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
905 ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */ 927 ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
906 ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */ 928 ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
907 ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */ 929 ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
908 ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */ 930 ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
909 ndev->features |= NETIF_F_GSO; /* Enable software GSO. */ 931 ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
910 ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */ 932 ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
911 ndev->features |= NETIF_F_LRO; /* large receive offload */ 933 ndev->features |= NETIF_F_LRO; /* large receive offload */
912 #endif 934 #endif
913 935
914 /* setup temac private info structure */ 936 /* setup temac private info structure */
915 lp = netdev_priv(ndev); 937 lp = netdev_priv(ndev);
916 lp->ndev = ndev; 938 lp->ndev = ndev;
917 lp->dev = &op->dev; 939 lp->dev = &op->dev;
918 lp->options = XTE_OPTION_DEFAULTS; 940 lp->options = XTE_OPTION_DEFAULTS;
919 spin_lock_init(&lp->rx_lock); 941 spin_lock_init(&lp->rx_lock);
920 mutex_init(&lp->indirect_mutex); 942 mutex_init(&lp->indirect_mutex);
921 943
922 /* map device registers */ 944 /* map device registers */
923 lp->regs = of_iomap(op->node, 0); 945 lp->regs = of_iomap(op->node, 0);
924 if (!lp->regs) { 946 if (!lp->regs) {
925 dev_err(&op->dev, "could not map temac regs.\n"); 947 dev_err(&op->dev, "could not map temac regs.\n");
926 goto nodev; 948 goto nodev;
927 } 949 }
950
951 /* Setup checksum offload, but default to off if not specified */
952 lp->temac_features = 0;
953 p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
954 if (p && be32_to_cpu(*p)) {
955 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
956 /* Can checksum TCP/UDP over IPv4. */
957 ndev->features |= NETIF_F_IP_CSUM;
958 }
959 p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
960 if (p && be32_to_cpu(*p))
961 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
928 962
929 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 963 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
930 np = of_parse_phandle(op->node, "llink-connected", 0); 964 np = of_parse_phandle(op->node, "llink-connected", 0);
931 if (!np) { 965 if (!np) {
932 dev_err(&op->dev, "could not find DMA node\n"); 966 dev_err(&op->dev, "could not find DMA node\n");
933 goto nodev; 967 goto nodev;
934 } 968 }
935 969
936 /* Setup the DMA register accesses, could be DCR or memory mapped */ 970 /* Setup the DMA register accesses, could be DCR or memory mapped */
937 if (temac_dcr_setup(lp, op, np)) { 971 if (temac_dcr_setup(lp, op, np)) {
938 972
939 /* no DCR in the device tree, try non-DCR */ 973 /* no DCR in the device tree, try non-DCR */
940 lp->sdma_regs = of_iomap(np, 0); 974 lp->sdma_regs = of_iomap(np, 0);
941 if (lp->sdma_regs) { 975 if (lp->sdma_regs) {
942 lp->dma_in = temac_dma_in32; 976 lp->dma_in = temac_dma_in32;
943 lp->dma_out = temac_dma_out32; 977 lp->dma_out = temac_dma_out32;
944 dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); 978 dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
945 } else { 979 } else {
946 dev_err(&op->dev, "unable to map DMA registers\n"); 980 dev_err(&op->dev, "unable to map DMA registers\n");
947 goto nodev; 981 goto nodev;
948 } 982 }
949 } 983 }
950 984
951 lp->rx_irq = irq_of_parse_and_map(np, 0); 985 lp->rx_irq = irq_of_parse_and_map(np, 0);
952 lp->tx_irq = irq_of_parse_and_map(np, 1); 986 lp->tx_irq = irq_of_parse_and_map(np, 1);
953 if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { 987 if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
954 dev_err(&op->dev, "could not determine irqs\n"); 988 dev_err(&op->dev, "could not determine irqs\n");
955 rc = -ENOMEM; 989 rc = -ENOMEM;
956 goto nodev; 990 goto nodev;
957 } 991 }
958 992
959 of_node_put(np); /* Finished with the DMA node; drop the reference */ 993 of_node_put(np); /* Finished with the DMA node; drop the reference */
960 994
961 /* Retrieve the MAC address */ 995 /* Retrieve the MAC address */
962 addr = of_get_property(op->node, "local-mac-address", &size); 996 addr = of_get_property(op->node, "local-mac-address", &size);
963 if ((!addr) || (size != 6)) { 997 if ((!addr) || (size != 6)) {
964 dev_err(&op->dev, "could not find MAC address\n"); 998 dev_err(&op->dev, "could not find MAC address\n");
965 rc = -ENODEV; 999 rc = -ENODEV;
966 goto nodev; 1000 goto nodev;
967 } 1001 }
968 temac_set_mac_address(ndev, (void *)addr); 1002 temac_set_mac_address(ndev, (void *)addr);
969 1003
970 rc = temac_mdio_setup(lp, op->node); 1004 rc = temac_mdio_setup(lp, op->node);
971 if (rc) 1005 if (rc)
972 dev_warn(&op->dev, "error registering MDIO bus\n"); 1006 dev_warn(&op->dev, "error registering MDIO bus\n");
973 1007
974 lp->phy_node = of_parse_phandle(op->node, "phy-handle", 0); 1008 lp->phy_node = of_parse_phandle(op->node, "phy-handle", 0);
975 if (lp->phy_node) 1009 if (lp->phy_node)
976 dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np); 1010 dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
977 1011
978 /* Add the device attributes */ 1012 /* Add the device attributes */
979 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); 1013 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
980 if (rc) { 1014 if (rc) {
981 dev_err(lp->dev, "Error creating sysfs files\n"); 1015 dev_err(lp->dev, "Error creating sysfs files\n");
982 goto nodev; 1016 goto nodev;
983 } 1017 }
984 1018
985 rc = register_netdev(lp->ndev); 1019 rc = register_netdev(lp->ndev);
986 if (rc) { 1020 if (rc) {
987 dev_err(lp->dev, "register_netdev() error (%i)\n", rc); 1021 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
988 goto err_register_ndev; 1022 goto err_register_ndev;
989 } 1023 }
990 1024
991 return 0; 1025 return 0;
992 1026
993 err_register_ndev: 1027 err_register_ndev:
994 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); 1028 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
995 nodev: 1029 nodev:
996 free_netdev(ndev); 1030 free_netdev(ndev);
997 ndev = NULL; 1031 ndev = NULL;
998 return rc; 1032 return rc;
999 } 1033 }
1000 1034
1001 static int __devexit temac_of_remove(struct of_device *op) 1035 static int __devexit temac_of_remove(struct of_device *op)
1002 { 1036 {
1003 struct net_device *ndev = dev_get_drvdata(&op->dev); 1037 struct net_device *ndev = dev_get_drvdata(&op->dev);
1004 struct temac_local *lp = netdev_priv(ndev); 1038 struct temac_local *lp = netdev_priv(ndev);
1005 1039
1006 temac_mdio_teardown(lp); 1040 temac_mdio_teardown(lp);
1007 unregister_netdev(ndev); 1041 unregister_netdev(ndev);
1008 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); 1042 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1009 if (lp->phy_node) 1043 if (lp->phy_node)
1010 of_node_put(lp->phy_node); 1044 of_node_put(lp->phy_node);
1011 lp->phy_node = NULL; 1045 lp->phy_node = NULL;
1012 dev_set_drvdata(&op->dev, NULL); 1046 dev_set_drvdata(&op->dev, NULL);
1013 free_netdev(ndev); 1047 free_netdev(ndev);
1014 return 0; 1048 return 0;
1015 } 1049 }
1016 1050
1017 static struct of_device_id temac_of_match[] __devinitdata = { 1051 static struct of_device_id temac_of_match[] __devinitdata = {
1018 { .compatible = "xlnx,xps-ll-temac-1.01.b", }, 1052 { .compatible = "xlnx,xps-ll-temac-1.01.b", },
1019 { .compatible = "xlnx,xps-ll-temac-2.00.a", }, 1053 { .compatible = "xlnx,xps-ll-temac-2.00.a", },
1020 { .compatible = "xlnx,xps-ll-temac-2.02.a", }, 1054 { .compatible = "xlnx,xps-ll-temac-2.02.a", },
1021 { .compatible = "xlnx,xps-ll-temac-2.03.a", }, 1055 { .compatible = "xlnx,xps-ll-temac-2.03.a", },
1022 {}, 1056 {},
1023 }; 1057 };
1024 MODULE_DEVICE_TABLE(of, temac_of_match); 1058 MODULE_DEVICE_TABLE(of, temac_of_match);
1025 1059
1026 static struct of_platform_driver temac_of_driver = { 1060 static struct of_platform_driver temac_of_driver = {
1027 .match_table = temac_of_match, 1061 .match_table = temac_of_match,
1028 .probe = temac_of_probe, 1062 .probe = temac_of_probe,
1029 .remove = __devexit_p(temac_of_remove), 1063 .remove = __devexit_p(temac_of_remove),
1030 .driver = { 1064 .driver = {
1031 .owner = THIS_MODULE, 1065 .owner = THIS_MODULE,
1032 .name = "xilinx_temac", 1066 .name = "xilinx_temac",
1033 }, 1067 },
1034 }; 1068 };
1035 1069