Commit ac399bc0f40a9d578bc31e0e5e31814dd1083e68
Committed by
Greg Kroah-Hartman
1 parent
cfbc6a9221
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
drivers:staging:et131x Fix some typo's in staging et131x.
The below patch fixes some comments with typos in the them and makes a comment make more sense. Signed-off-by: Justin P. Mattock <justinmattock@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 2 changed files with 5 additions and 5 deletions Inline Diff
drivers/staging/et131x/et131x.c
1 | /* | 1 | /* |
2 | * Agere Systems Inc. | 2 | * Agere Systems Inc. |
3 | * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs | 3 | * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs |
4 | * | 4 | * |
5 | * Copyright © 2005 Agere Systems Inc. | 5 | * Copyright © 2005 Agere Systems Inc. |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * http://www.agere.com | 7 | * http://www.agere.com |
8 | * | 8 | * |
9 | * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> | 9 | * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> |
10 | * | 10 | * |
11 | *------------------------------------------------------------------------------ | 11 | *------------------------------------------------------------------------------ |
12 | * | 12 | * |
13 | * SOFTWARE LICENSE | 13 | * SOFTWARE LICENSE |
14 | * | 14 | * |
15 | * This software is provided subject to the following terms and conditions, | 15 | * This software is provided subject to the following terms and conditions, |
16 | * which you should read carefully before using the software. Using this | 16 | * which you should read carefully before using the software. Using this |
17 | * software indicates your acceptance of these terms and conditions. If you do | 17 | * software indicates your acceptance of these terms and conditions. If you do |
18 | * not agree with these terms and conditions, do not use the software. | 18 | * not agree with these terms and conditions, do not use the software. |
19 | * | 19 | * |
20 | * Copyright © 2005 Agere Systems Inc. | 20 | * Copyright © 2005 Agere Systems Inc. |
21 | * All rights reserved. | 21 | * All rights reserved. |
22 | * | 22 | * |
23 | * Redistribution and use in source or binary forms, with or without | 23 | * Redistribution and use in source or binary forms, with or without |
24 | * modifications, are permitted provided that the following conditions are met: | 24 | * modifications, are permitted provided that the following conditions are met: |
25 | * | 25 | * |
26 | * . Redistributions of source code must retain the above copyright notice, this | 26 | * . Redistributions of source code must retain the above copyright notice, this |
27 | * list of conditions and the following Disclaimer as comments in the code as | 27 | * list of conditions and the following Disclaimer as comments in the code as |
28 | * well as in the documentation and/or other materials provided with the | 28 | * well as in the documentation and/or other materials provided with the |
29 | * distribution. | 29 | * distribution. |
30 | * | 30 | * |
31 | * . Redistributions in binary form must reproduce the above copyright notice, | 31 | * . Redistributions in binary form must reproduce the above copyright notice, |
32 | * this list of conditions and the following Disclaimer in the documentation | 32 | * this list of conditions and the following Disclaimer in the documentation |
33 | * and/or other materials provided with the distribution. | 33 | * and/or other materials provided with the distribution. |
34 | * | 34 | * |
35 | * . Neither the name of Agere Systems Inc. nor the names of the contributors | 35 | * . Neither the name of Agere Systems Inc. nor the names of the contributors |
36 | * may be used to endorse or promote products derived from this software | 36 | * may be used to endorse or promote products derived from this software |
37 | * without specific prior written permission. | 37 | * without specific prior written permission. |
38 | * | 38 | * |
39 | * Disclaimer | 39 | * Disclaimer |
40 | * | 40 | * |
41 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, | 41 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
42 | * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF | 42 | * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF |
43 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY | 43 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY |
44 | * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN | 44 | * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN |
45 | * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY | 45 | * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY |
46 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | 46 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
47 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | 47 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
48 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | 48 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
49 | * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT | 49 | * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT |
50 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | 50 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
51 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH | 51 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH |
52 | * DAMAGE. | 52 | * DAMAGE. |
53 | * | 53 | * |
54 | */ | 54 | */ |
55 | 55 | ||
56 | #include <linux/pci.h> | 56 | #include <linux/pci.h> |
57 | #include <linux/init.h> | 57 | #include <linux/init.h> |
58 | #include <linux/module.h> | 58 | #include <linux/module.h> |
59 | #include <linux/types.h> | 59 | #include <linux/types.h> |
60 | #include <linux/kernel.h> | 60 | #include <linux/kernel.h> |
61 | 61 | ||
62 | #include <linux/sched.h> | 62 | #include <linux/sched.h> |
63 | #include <linux/ptrace.h> | 63 | #include <linux/ptrace.h> |
64 | #include <linux/slab.h> | 64 | #include <linux/slab.h> |
65 | #include <linux/ctype.h> | 65 | #include <linux/ctype.h> |
66 | #include <linux/string.h> | 66 | #include <linux/string.h> |
67 | #include <linux/timer.h> | 67 | #include <linux/timer.h> |
68 | #include <linux/interrupt.h> | 68 | #include <linux/interrupt.h> |
69 | #include <linux/in.h> | 69 | #include <linux/in.h> |
70 | #include <linux/delay.h> | 70 | #include <linux/delay.h> |
71 | #include <linux/bitops.h> | 71 | #include <linux/bitops.h> |
72 | #include <linux/io.h> | 72 | #include <linux/io.h> |
73 | #include <asm/system.h> | 73 | #include <asm/system.h> |
74 | 74 | ||
75 | #include <linux/netdevice.h> | 75 | #include <linux/netdevice.h> |
76 | #include <linux/etherdevice.h> | 76 | #include <linux/etherdevice.h> |
77 | #include <linux/skbuff.h> | 77 | #include <linux/skbuff.h> |
78 | #include <linux/if_arp.h> | 78 | #include <linux/if_arp.h> |
79 | #include <linux/ioport.h> | 79 | #include <linux/ioport.h> |
80 | #include <linux/crc32.h> | 80 | #include <linux/crc32.h> |
81 | #include <linux/random.h> | 81 | #include <linux/random.h> |
82 | #include <linux/phy.h> | 82 | #include <linux/phy.h> |
83 | 83 | ||
84 | #include "et131x.h" | 84 | #include "et131x.h" |
85 | 85 | ||
86 | MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); | 86 | MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); |
87 | MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); | 87 | MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); |
88 | MODULE_LICENSE("Dual BSD/GPL"); | 88 | MODULE_LICENSE("Dual BSD/GPL"); |
89 | MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver " | 89 | MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver " |
90 | "for the ET1310 by Agere Systems"); | 90 | "for the ET1310 by Agere Systems"); |
91 | 91 | ||
92 | /* EEPROM defines */ | 92 | /* EEPROM defines */ |
93 | #define MAX_NUM_REGISTER_POLLS 1000 | 93 | #define MAX_NUM_REGISTER_POLLS 1000 |
94 | #define MAX_NUM_WRITE_RETRIES 2 | 94 | #define MAX_NUM_WRITE_RETRIES 2 |
95 | 95 | ||
96 | /* MAC defines */ | 96 | /* MAC defines */ |
97 | #define COUNTER_WRAP_16_BIT 0x10000 | 97 | #define COUNTER_WRAP_16_BIT 0x10000 |
98 | #define COUNTER_WRAP_12_BIT 0x1000 | 98 | #define COUNTER_WRAP_12_BIT 0x1000 |
99 | 99 | ||
100 | /* PCI defines */ | 100 | /* PCI defines */ |
101 | #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ | 101 | #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ |
102 | #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ | 102 | #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ |
103 | 103 | ||
104 | /* ISR defines */ | 104 | /* ISR defines */ |
105 | /* | 105 | /* |
106 | * For interrupts, normal running is: | 106 | * For interrupts, normal running is: |
107 | * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, | 107 | * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, |
108 | * watchdog_interrupt & txdma_xfer_done | 108 | * watchdog_interrupt & txdma_xfer_done |
109 | * | 109 | * |
110 | * In both cases, when flow control is enabled for either Tx or bi-direction, | 110 | * In both cases, when flow control is enabled for either Tx or bi-direction, |
111 | * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the | 111 | * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the |
112 | * buffer rings are running low. | 112 | * buffer rings are running low. |
113 | */ | 113 | */ |
114 | #define INT_MASK_DISABLE 0xffffffff | 114 | #define INT_MASK_DISABLE 0xffffffff |
115 | 115 | ||
116 | /* NOTE: Masking out MAC_STAT Interrupt for now... | 116 | /* NOTE: Masking out MAC_STAT Interrupt for now... |
117 | * #define INT_MASK_ENABLE 0xfff6bf17 | 117 | * #define INT_MASK_ENABLE 0xfff6bf17 |
118 | * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 | 118 | * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 |
119 | */ | 119 | */ |
120 | #define INT_MASK_ENABLE 0xfffebf17 | 120 | #define INT_MASK_ENABLE 0xfffebf17 |
121 | #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 | 121 | #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 |
122 | 122 | ||
123 | /* General defines */ | 123 | /* General defines */ |
124 | /* Packet and header sizes */ | 124 | /* Packet and header sizes */ |
125 | #define NIC_MIN_PACKET_SIZE 60 | 125 | #define NIC_MIN_PACKET_SIZE 60 |
126 | 126 | ||
127 | /* Multicast list size */ | 127 | /* Multicast list size */ |
128 | #define NIC_MAX_MCAST_LIST 128 | 128 | #define NIC_MAX_MCAST_LIST 128 |
129 | 129 | ||
130 | /* Supported Filters */ | 130 | /* Supported Filters */ |
131 | #define ET131X_PACKET_TYPE_DIRECTED 0x0001 | 131 | #define ET131X_PACKET_TYPE_DIRECTED 0x0001 |
132 | #define ET131X_PACKET_TYPE_MULTICAST 0x0002 | 132 | #define ET131X_PACKET_TYPE_MULTICAST 0x0002 |
133 | #define ET131X_PACKET_TYPE_BROADCAST 0x0004 | 133 | #define ET131X_PACKET_TYPE_BROADCAST 0x0004 |
134 | #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 | 134 | #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 |
135 | #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 | 135 | #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 |
136 | 136 | ||
137 | /* Tx Timeout */ | 137 | /* Tx Timeout */ |
138 | #define ET131X_TX_TIMEOUT (1 * HZ) | 138 | #define ET131X_TX_TIMEOUT (1 * HZ) |
139 | #define NIC_SEND_HANG_THRESHOLD 0 | 139 | #define NIC_SEND_HANG_THRESHOLD 0 |
140 | 140 | ||
141 | /* MP_TCB flags */ | 141 | /* MP_TCB flags */ |
142 | #define fMP_DEST_MULTI 0x00000001 | 142 | #define fMP_DEST_MULTI 0x00000001 |
143 | #define fMP_DEST_BROAD 0x00000002 | 143 | #define fMP_DEST_BROAD 0x00000002 |
144 | 144 | ||
145 | /* MP_ADAPTER flags */ | 145 | /* MP_ADAPTER flags */ |
146 | #define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004 | 146 | #define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004 |
147 | #define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 | 147 | #define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 |
148 | 148 | ||
149 | /* MP_SHARED flags */ | 149 | /* MP_SHARED flags */ |
150 | #define fMP_ADAPTER_LOWER_POWER 0x00200000 | 150 | #define fMP_ADAPTER_LOWER_POWER 0x00200000 |
151 | 151 | ||
152 | #define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 | 152 | #define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 |
153 | #define fMP_ADAPTER_HARDWARE_ERROR 0x04000000 | 153 | #define fMP_ADAPTER_HARDWARE_ERROR 0x04000000 |
154 | 154 | ||
155 | #define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 | 155 | #define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 |
156 | 156 | ||
157 | /* Some offsets in PCI config space that are actually used. */ | 157 | /* Some offsets in PCI config space that are actually used. */ |
158 | #define ET1310_PCI_MAC_ADDRESS 0xA4 | 158 | #define ET1310_PCI_MAC_ADDRESS 0xA4 |
159 | #define ET1310_PCI_EEPROM_STATUS 0xB2 | 159 | #define ET1310_PCI_EEPROM_STATUS 0xB2 |
160 | #define ET1310_PCI_ACK_NACK 0xC0 | 160 | #define ET1310_PCI_ACK_NACK 0xC0 |
161 | #define ET1310_PCI_REPLAY 0xC2 | 161 | #define ET1310_PCI_REPLAY 0xC2 |
162 | #define ET1310_PCI_L0L1LATENCY 0xCF | 162 | #define ET1310_PCI_L0L1LATENCY 0xCF |
163 | 163 | ||
164 | /* PCI Product IDs */ | 164 | /* PCI Product IDs */ |
165 | #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ | 165 | #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ |
166 | #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ | 166 | #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ |
167 | 167 | ||
168 | /* Define order of magnitude converter */ | 168 | /* Define order of magnitude converter */ |
169 | #define NANO_IN_A_MICRO 1000 | 169 | #define NANO_IN_A_MICRO 1000 |
170 | 170 | ||
171 | #define PARM_RX_NUM_BUFS_DEF 4 | 171 | #define PARM_RX_NUM_BUFS_DEF 4 |
172 | #define PARM_RX_TIME_INT_DEF 10 | 172 | #define PARM_RX_TIME_INT_DEF 10 |
173 | #define PARM_RX_MEM_END_DEF 0x2bc | 173 | #define PARM_RX_MEM_END_DEF 0x2bc |
174 | #define PARM_TX_TIME_INT_DEF 40 | 174 | #define PARM_TX_TIME_INT_DEF 40 |
175 | #define PARM_TX_NUM_BUFS_DEF 4 | 175 | #define PARM_TX_NUM_BUFS_DEF 4 |
176 | #define PARM_DMA_CACHE_DEF 0 | 176 | #define PARM_DMA_CACHE_DEF 0 |
177 | 177 | ||
178 | /* RX defines */ | 178 | /* RX defines */ |
179 | #define USE_FBR0 1 | 179 | #define USE_FBR0 1 |
180 | #define FBR_CHUNKS 32 | 180 | #define FBR_CHUNKS 32 |
181 | #define MAX_DESC_PER_RING_RX 1024 | 181 | #define MAX_DESC_PER_RING_RX 1024 |
182 | 182 | ||
183 | /* number of RFDs - default and min */ | 183 | /* number of RFDs - default and min */ |
184 | #ifdef USE_FBR0 | 184 | #ifdef USE_FBR0 |
185 | #define RFD_LOW_WATER_MARK 40 | 185 | #define RFD_LOW_WATER_MARK 40 |
186 | #define NIC_DEFAULT_NUM_RFD 1024 | 186 | #define NIC_DEFAULT_NUM_RFD 1024 |
187 | #define NUM_FBRS 2 | 187 | #define NUM_FBRS 2 |
188 | #else | 188 | #else |
189 | #define RFD_LOW_WATER_MARK 20 | 189 | #define RFD_LOW_WATER_MARK 20 |
190 | #define NIC_DEFAULT_NUM_RFD 256 | 190 | #define NIC_DEFAULT_NUM_RFD 256 |
191 | #define NUM_FBRS 1 | 191 | #define NUM_FBRS 1 |
192 | #endif | 192 | #endif |
193 | 193 | ||
194 | #define NIC_MIN_NUM_RFD 64 | 194 | #define NIC_MIN_NUM_RFD 64 |
195 | #define NUM_PACKETS_HANDLED 256 | 195 | #define NUM_PACKETS_HANDLED 256 |
196 | 196 | ||
197 | #define ALCATEL_MULTICAST_PKT 0x01000000 | 197 | #define ALCATEL_MULTICAST_PKT 0x01000000 |
198 | #define ALCATEL_BROADCAST_PKT 0x02000000 | 198 | #define ALCATEL_BROADCAST_PKT 0x02000000 |
199 | 199 | ||
200 | /* typedefs for Free Buffer Descriptors */ | 200 | /* typedefs for Free Buffer Descriptors */ |
201 | struct fbr_desc { | 201 | struct fbr_desc { |
202 | u32 addr_lo; | 202 | u32 addr_lo; |
203 | u32 addr_hi; | 203 | u32 addr_hi; |
204 | u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ | 204 | u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ |
205 | }; | 205 | }; |
206 | 206 | ||
207 | /* Packet Status Ring Descriptors | 207 | /* Packet Status Ring Descriptors |
208 | * | 208 | * |
209 | * Word 0: | 209 | * Word 0: |
210 | * | 210 | * |
211 | * top 16 bits are from the Alcatel Status Word as enumerated in | 211 | * top 16 bits are from the Alcatel Status Word as enumerated in |
212 | * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) | 212 | * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) |
213 | * | 213 | * |
214 | * 0: hp hash pass | 214 | * 0: hp hash pass |
215 | * 1: ipa IP checksum assist | 215 | * 1: ipa IP checksum assist |
216 | * 2: ipp IP checksum pass | 216 | * 2: ipp IP checksum pass |
217 | * 3: tcpa TCP checksum assist | 217 | * 3: tcpa TCP checksum assist |
218 | * 4: tcpp TCP checksum pass | 218 | * 4: tcpp TCP checksum pass |
219 | * 5: wol WOL Event | 219 | * 5: wol WOL Event |
220 | * 6: rxmac_error RXMAC Error Indicator | 220 | * 6: rxmac_error RXMAC Error Indicator |
221 | * 7: drop Drop packet | 221 | * 7: drop Drop packet |
222 | * 8: ft Frame Truncated | 222 | * 8: ft Frame Truncated |
223 | * 9: jp Jumbo Packet | 223 | * 9: jp Jumbo Packet |
224 | * 10: vp VLAN Packet | 224 | * 10: vp VLAN Packet |
225 | * 11-15: unused | 225 | * 11-15: unused |
226 | * 16: asw_prev_pkt_dropped e.g. IFG too small on previous | 226 | * 16: asw_prev_pkt_dropped e.g. IFG too small on previous |
227 | * 17: asw_RX_DV_event short receive event detected | 227 | * 17: asw_RX_DV_event short receive event detected |
228 | * 18: asw_false_carrier_event bad carrier since last good packet | 228 | * 18: asw_false_carrier_event bad carrier since last good packet |
229 | * 19: asw_code_err one or more nibbles signalled as errors | 229 | * 19: asw_code_err one or more nibbles signalled as errors |
230 | * 20: asw_CRC_err CRC error | 230 | * 20: asw_CRC_err CRC error |
231 | * 21: asw_len_chk_err frame length field incorrect | 231 | * 21: asw_len_chk_err frame length field incorrect |
232 | * 22: asw_too_long frame length > 1518 bytes | 232 | * 22: asw_too_long frame length > 1518 bytes |
233 | * 23: asw_OK valid CRC + no code error | 233 | * 23: asw_OK valid CRC + no code error |
234 | * 24: asw_multicast has a multicast address | 234 | * 24: asw_multicast has a multicast address |
235 | * 25: asw_broadcast has a broadcast address | 235 | * 25: asw_broadcast has a broadcast address |
236 | * 26: asw_dribble_nibble spurious bits after EOP | 236 | * 26: asw_dribble_nibble spurious bits after EOP |
237 | * 27: asw_control_frame is a control frame | 237 | * 27: asw_control_frame is a control frame |
238 | * 28: asw_pause_frame is a pause frame | 238 | * 28: asw_pause_frame is a pause frame |
239 | * 29: asw_unsupported_op unsupported OP code | 239 | * 29: asw_unsupported_op unsupported OP code |
240 | * 30: asw_VLAN_tag VLAN tag detected | 240 | * 30: asw_VLAN_tag VLAN tag detected |
241 | * 31: asw_long_evt Rx long event | 241 | * 31: asw_long_evt Rx long event |
242 | * | 242 | * |
243 | * Word 1: | 243 | * Word 1: |
244 | * 0-15: length length in bytes | 244 | * 0-15: length length in bytes |
245 | * 16-25: bi Buffer Index | 245 | * 16-25: bi Buffer Index |
246 | * 26-27: ri Ring Index | 246 | * 26-27: ri Ring Index |
247 | * 28-31: reserved | 247 | * 28-31: reserved |
248 | */ | 248 | */ |
249 | 249 | ||
250 | struct pkt_stat_desc { | 250 | struct pkt_stat_desc { |
251 | u32 word0; | 251 | u32 word0; |
252 | u32 word1; | 252 | u32 word1; |
253 | }; | 253 | }; |
254 | 254 | ||
255 | /* Typedefs for the RX DMA status word */ | 255 | /* Typedefs for the RX DMA status word */ |
256 | 256 | ||
257 | /* | 257 | /* |
258 | * rx status word 0 holds part of the status bits of the Rx DMA engine | 258 | * rx status word 0 holds part of the status bits of the Rx DMA engine |
259 | * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word | 259 | * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word |
260 | * which contains the Free Buffer ring 0 and 1 available offset. | 260 | * which contains the Free Buffer ring 0 and 1 available offset. |
261 | * | 261 | * |
262 | * bit 0-9 FBR1 offset | 262 | * bit 0-9 FBR1 offset |
263 | * bit 10 Wrap flag for FBR1 | 263 | * bit 10 Wrap flag for FBR1 |
264 | * bit 16-25 FBR0 offset | 264 | * bit 16-25 FBR0 offset |
265 | * bit 26 Wrap flag for FBR0 | 265 | * bit 26 Wrap flag for FBR0 |
266 | */ | 266 | */ |
267 | 267 | ||
268 | /* | 268 | /* |
269 | * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine | 269 | * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine |
270 | * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word | 270 | * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word |
271 | * which contains the Packet Status Ring available offset. | 271 | * which contains the Packet Status Ring available offset. |
272 | * | 272 | * |
273 | * bit 0-15 reserved | 273 | * bit 0-15 reserved |
274 | * bit 16-27 PSRoffset | 274 | * bit 16-27 PSRoffset |
275 | * bit 28 PSRwrap | 275 | * bit 28 PSRwrap |
276 | * bit 29-31 unused | 276 | * bit 29-31 unused |
277 | */ | 277 | */ |
278 | 278 | ||
279 | /* | 279 | /* |
280 | * struct rx_status_block is a structure representing the status of the Rx | 280 | * struct rx_status_block is a structure representing the status of the Rx |
281 | * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 | 281 | * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 |
282 | */ | 282 | */ |
283 | struct rx_status_block { | 283 | struct rx_status_block { |
284 | u32 word0; | 284 | u32 word0; |
285 | u32 word1; | 285 | u32 word1; |
286 | }; | 286 | }; |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * Structure for look-up table holding free buffer ring pointers, addresses | 289 | * Structure for look-up table holding free buffer ring pointers, addresses |
290 | * and state. | 290 | * and state. |
291 | */ | 291 | */ |
292 | struct fbr_lookup { | 292 | struct fbr_lookup { |
293 | void *virt[MAX_DESC_PER_RING_RX]; | 293 | void *virt[MAX_DESC_PER_RING_RX]; |
294 | void *buffer1[MAX_DESC_PER_RING_RX]; | 294 | void *buffer1[MAX_DESC_PER_RING_RX]; |
295 | void *buffer2[MAX_DESC_PER_RING_RX]; | 295 | void *buffer2[MAX_DESC_PER_RING_RX]; |
296 | u32 bus_high[MAX_DESC_PER_RING_RX]; | 296 | u32 bus_high[MAX_DESC_PER_RING_RX]; |
297 | u32 bus_low[MAX_DESC_PER_RING_RX]; | 297 | u32 bus_low[MAX_DESC_PER_RING_RX]; |
298 | void *ring_virtaddr; | 298 | void *ring_virtaddr; |
299 | dma_addr_t ring_physaddr; | 299 | dma_addr_t ring_physaddr; |
300 | void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; | 300 | void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; |
301 | dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; | 301 | dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; |
302 | u64 real_physaddr; | 302 | u64 real_physaddr; |
303 | u64 offset; | 303 | u64 offset; |
304 | u32 local_full; | 304 | u32 local_full; |
305 | u32 num_entries; | 305 | u32 num_entries; |
306 | u32 buffsize; | 306 | u32 buffsize; |
307 | }; | 307 | }; |
308 | 308 | ||
309 | /* | 309 | /* |
310 | * struct rx_ring is the sructure representing the adaptor's local | 310 | * struct rx_ring is the sructure representing the adaptor's local |
311 | * reference(s) to the rings | 311 | * reference(s) to the rings |
312 | * | 312 | * |
313 | ****************************************************************************** | 313 | ****************************************************************************** |
314 | * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1 | 314 | * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1 |
315 | * and index 1 to refer to FRB0 | 315 | * and index 1 to refer to FRB0 |
316 | ****************************************************************************** | 316 | ****************************************************************************** |
317 | */ | 317 | */ |
318 | struct rx_ring { | 318 | struct rx_ring { |
319 | struct fbr_lookup *fbr[NUM_FBRS]; | 319 | struct fbr_lookup *fbr[NUM_FBRS]; |
320 | void *ps_ring_virtaddr; | 320 | void *ps_ring_virtaddr; |
321 | dma_addr_t ps_ring_physaddr; | 321 | dma_addr_t ps_ring_physaddr; |
322 | u32 local_psr_full; | 322 | u32 local_psr_full; |
323 | u32 psr_num_entries; | 323 | u32 psr_num_entries; |
324 | 324 | ||
325 | struct rx_status_block *rx_status_block; | 325 | struct rx_status_block *rx_status_block; |
326 | dma_addr_t rx_status_bus; | 326 | dma_addr_t rx_status_bus; |
327 | 327 | ||
328 | /* RECV */ | 328 | /* RECV */ |
329 | struct list_head recv_list; | 329 | struct list_head recv_list; |
330 | u32 num_ready_recv; | 330 | u32 num_ready_recv; |
331 | 331 | ||
332 | u32 num_rfd; | 332 | u32 num_rfd; |
333 | 333 | ||
334 | bool unfinished_receives; | 334 | bool unfinished_receives; |
335 | 335 | ||
336 | /* lookaside lists */ | 336 | /* lookaside lists */ |
337 | struct kmem_cache *recv_lookaside; | 337 | struct kmem_cache *recv_lookaside; |
338 | }; | 338 | }; |
339 | 339 | ||
340 | /* TX defines */ | 340 | /* TX defines */ |
341 | /* | 341 | /* |
342 | * word 2 of the control bits in the Tx Descriptor ring for the ET-1310 | 342 | * word 2 of the control bits in the Tx Descriptor ring for the ET-1310 |
343 | * | 343 | * |
344 | * 0-15: length of packet | 344 | * 0-15: length of packet |
345 | * 16-27: VLAN tag | 345 | * 16-27: VLAN tag |
346 | * 28: VLAN CFI | 346 | * 28: VLAN CFI |
347 | * 29-31: VLAN priority | 347 | * 29-31: VLAN priority |
348 | * | 348 | * |
349 | * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 | 349 | * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 |
350 | * | 350 | * |
351 | * 0: last packet in the sequence | 351 | * 0: last packet in the sequence |
352 | * 1: first packet in the sequence | 352 | * 1: first packet in the sequence |
353 | * 2: interrupt the processor when this pkt sent | 353 | * 2: interrupt the processor when this pkt sent |
354 | * 3: Control word - no packet data | 354 | * 3: Control word - no packet data |
355 | * 4: Issue half-duplex backpressure : XON/XOFF | 355 | * 4: Issue half-duplex backpressure : XON/XOFF |
356 | * 5: send pause frame | 356 | * 5: send pause frame |
357 | * 6: Tx frame has error | 357 | * 6: Tx frame has error |
358 | * 7: append CRC | 358 | * 7: append CRC |
359 | * 8: MAC override | 359 | * 8: MAC override |
360 | * 9: pad packet | 360 | * 9: pad packet |
361 | * 10: Packet is a Huge packet | 361 | * 10: Packet is a Huge packet |
362 | * 11: append VLAN tag | 362 | * 11: append VLAN tag |
363 | * 12: IP checksum assist | 363 | * 12: IP checksum assist |
364 | * 13: TCP checksum assist | 364 | * 13: TCP checksum assist |
365 | * 14: UDP checksum assist | 365 | * 14: UDP checksum assist |
366 | */ | 366 | */ |
367 | 367 | ||
368 | /* struct tx_desc represents each descriptor on the ring */ | 368 | /* struct tx_desc represents each descriptor on the ring */ |
369 | struct tx_desc { | 369 | struct tx_desc { |
370 | u32 addr_hi; | 370 | u32 addr_hi; |
371 | u32 addr_lo; | 371 | u32 addr_lo; |
372 | u32 len_vlan; /* control words how to xmit the */ | 372 | u32 len_vlan; /* control words how to xmit the */ |
373 | u32 flags; /* data (detailed above) */ | 373 | u32 flags; /* data (detailed above) */ |
374 | }; | 374 | }; |
375 | 375 | ||
376 | /* | 376 | /* |
377 | * The status of the Tx DMA engine it sits in free memory, and is pointed to | 377 | * The status of the Tx DMA engine it sits in free memory, and is pointed to |
378 | * by 0x101c / 0x1020. This is a DMA10 type | 378 | * by 0x101c / 0x1020. This is a DMA10 type |
379 | */ | 379 | */ |
380 | 380 | ||
381 | /* TCB (Transmit Control Block: Host Side) */ | 381 | /* TCB (Transmit Control Block: Host Side) */ |
382 | struct tcb { | 382 | struct tcb { |
383 | struct tcb *next; /* Next entry in ring */ | 383 | struct tcb *next; /* Next entry in ring */ |
384 | u32 flags; /* Our flags for the packet */ | 384 | u32 flags; /* Our flags for the packet */ |
385 | u32 count; /* Used to spot stuck/lost packets */ | 385 | u32 count; /* Used to spot stuck/lost packets */ |
386 | u32 stale; /* Used to spot stuck/lost packets */ | 386 | u32 stale; /* Used to spot stuck/lost packets */ |
387 | struct sk_buff *skb; /* Network skb we are tied to */ | 387 | struct sk_buff *skb; /* Network skb we are tied to */ |
388 | u32 index; /* Ring indexes */ | 388 | u32 index; /* Ring indexes */ |
389 | u32 index_start; | 389 | u32 index_start; |
390 | }; | 390 | }; |
391 | 391 | ||
392 | /* Structure representing our local reference(s) to the ring */ | 392 | /* Structure representing our local reference(s) to the ring */ |
393 | struct tx_ring { | 393 | struct tx_ring { |
394 | /* TCB (Transmit Control Block) memory and lists */ | 394 | /* TCB (Transmit Control Block) memory and lists */ |
395 | struct tcb *tcb_ring; | 395 | struct tcb *tcb_ring; |
396 | 396 | ||
397 | /* List of TCBs that are ready to be used */ | 397 | /* List of TCBs that are ready to be used */ |
398 | struct tcb *tcb_qhead; | 398 | struct tcb *tcb_qhead; |
399 | struct tcb *tcb_qtail; | 399 | struct tcb *tcb_qtail; |
400 | 400 | ||
401 | /* list of TCBs that are currently being sent. NOTE that access to all | 401 | /* list of TCBs that are currently being sent. NOTE that access to all |
402 | * three of these (including used) are controlled via the | 402 | * three of these (including used) are controlled via the |
403 | * TCBSendQLock. This lock should be secured prior to incementing / | 403 | * TCBSendQLock. This lock should be secured prior to incementing / |
404 | * decrementing used, or any queue manipulation on send_head / | 404 | * decrementing used, or any queue manipulation on send_head / |
405 | * tail | 405 | * tail |
406 | */ | 406 | */ |
407 | struct tcb *send_head; | 407 | struct tcb *send_head; |
408 | struct tcb *send_tail; | 408 | struct tcb *send_tail; |
409 | int used; | 409 | int used; |
410 | 410 | ||
411 | /* The actual descriptor ring */ | 411 | /* The actual descriptor ring */ |
412 | struct tx_desc *tx_desc_ring; | 412 | struct tx_desc *tx_desc_ring; |
413 | dma_addr_t tx_desc_ring_pa; | 413 | dma_addr_t tx_desc_ring_pa; |
414 | 414 | ||
415 | /* send_idx indicates where we last wrote to in the descriptor ring. */ | 415 | /* send_idx indicates where we last wrote to in the descriptor ring. */ |
416 | u32 send_idx; | 416 | u32 send_idx; |
417 | 417 | ||
418 | /* The location of the write-back status block */ | 418 | /* The location of the write-back status block */ |
419 | u32 *tx_status; | 419 | u32 *tx_status; |
420 | dma_addr_t tx_status_pa; | 420 | dma_addr_t tx_status_pa; |
421 | 421 | ||
422 | /* Packets since the last IRQ: used for interrupt coalescing */ | 422 | /* Packets since the last IRQ: used for interrupt coalescing */ |
423 | int since_irq; | 423 | int since_irq; |
424 | }; | 424 | }; |
425 | 425 | ||
426 | /* | 426 | /* |
427 | * Do not change these values: if changed, then change also in respective | 427 | * Do not change these values: if changed, then change also in respective |
428 | * TXdma and Rxdma engines | 428 | * TXdma and Rxdma engines |
429 | */ | 429 | */ |
430 | #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ | 430 | #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ |
431 | #define NUM_TCB 64 | 431 | #define NUM_TCB 64 |
432 | 432 | ||
433 | /* | 433 | /* |
434 | * These values are all superseded by registry entries to facilitate tuning. | 434 | * These values are all superseded by registry entries to facilitate tuning. |
435 | * Once the desired performance has been achieved, the optimal registry values | 435 | * Once the desired performance has been achieved, the optimal registry values |
436 | * should be re-populated to these #defines: | 436 | * should be re-populated to these #defines: |
437 | */ | 437 | */ |
438 | #define TX_ERROR_PERIOD 1000 | 438 | #define TX_ERROR_PERIOD 1000 |
439 | 439 | ||
440 | #define LO_MARK_PERCENT_FOR_PSR 15 | 440 | #define LO_MARK_PERCENT_FOR_PSR 15 |
441 | #define LO_MARK_PERCENT_FOR_RX 15 | 441 | #define LO_MARK_PERCENT_FOR_RX 15 |
442 | 442 | ||
443 | /* RFD (Receive Frame Descriptor) */ | 443 | /* RFD (Receive Frame Descriptor) */ |
444 | struct rfd { | 444 | struct rfd { |
445 | struct list_head list_node; | 445 | struct list_head list_node; |
446 | struct sk_buff *skb; | 446 | struct sk_buff *skb; |
447 | u32 len; /* total size of receive frame */ | 447 | u32 len; /* total size of receive frame */ |
448 | u16 bufferindex; | 448 | u16 bufferindex; |
449 | u8 ringindex; | 449 | u8 ringindex; |
450 | }; | 450 | }; |
451 | 451 | ||
452 | /* Flow Control */ | 452 | /* Flow Control */ |
453 | #define FLOW_BOTH 0 | 453 | #define FLOW_BOTH 0 |
454 | #define FLOW_TXONLY 1 | 454 | #define FLOW_TXONLY 1 |
455 | #define FLOW_RXONLY 2 | 455 | #define FLOW_RXONLY 2 |
456 | #define FLOW_NONE 3 | 456 | #define FLOW_NONE 3 |
457 | 457 | ||
458 | /* Struct to define some device statistics */ | 458 | /* Struct to define some device statistics */ |
459 | struct ce_stats { | 459 | struct ce_stats { |
460 | /* MIB II variables | 460 | /* MIB II variables |
461 | * | 461 | * |
462 | * NOTE: atomic_t types are only guaranteed to store 24-bits; if we | 462 | * NOTE: atomic_t types are only guaranteed to store 24-bits; if we |
463 | * MUST have 32, then we'll need another way to perform atomic | 463 | * MUST have 32, then we'll need another way to perform atomic |
464 | * operations | 464 | * operations |
465 | */ | 465 | */ |
466 | u32 unicast_pkts_rcvd; | 466 | u32 unicast_pkts_rcvd; |
467 | atomic_t unicast_pkts_xmtd; | 467 | atomic_t unicast_pkts_xmtd; |
468 | u32 multicast_pkts_rcvd; | 468 | u32 multicast_pkts_rcvd; |
469 | atomic_t multicast_pkts_xmtd; | 469 | atomic_t multicast_pkts_xmtd; |
470 | u32 broadcast_pkts_rcvd; | 470 | u32 broadcast_pkts_rcvd; |
471 | atomic_t broadcast_pkts_xmtd; | 471 | atomic_t broadcast_pkts_xmtd; |
472 | u32 rcvd_pkts_dropped; | 472 | u32 rcvd_pkts_dropped; |
473 | 473 | ||
474 | /* Tx Statistics. */ | 474 | /* Tx Statistics. */ |
475 | u32 tx_underflows; | 475 | u32 tx_underflows; |
476 | 476 | ||
477 | u32 tx_collisions; | 477 | u32 tx_collisions; |
478 | u32 tx_excessive_collisions; | 478 | u32 tx_excessive_collisions; |
479 | u32 tx_first_collisions; | 479 | u32 tx_first_collisions; |
480 | u32 tx_late_collisions; | 480 | u32 tx_late_collisions; |
481 | u32 tx_max_pkt_errs; | 481 | u32 tx_max_pkt_errs; |
482 | u32 tx_deferred; | 482 | u32 tx_deferred; |
483 | 483 | ||
484 | /* Rx Statistics. */ | 484 | /* Rx Statistics. */ |
485 | u32 rx_overflows; | 485 | u32 rx_overflows; |
486 | 486 | ||
487 | u32 rx_length_errs; | 487 | u32 rx_length_errs; |
488 | u32 rx_align_errs; | 488 | u32 rx_align_errs; |
489 | u32 rx_crc_errs; | 489 | u32 rx_crc_errs; |
490 | u32 rx_code_violations; | 490 | u32 rx_code_violations; |
491 | u32 rx_other_errs; | 491 | u32 rx_other_errs; |
492 | 492 | ||
493 | u32 synchronous_iterations; | 493 | u32 synchronous_iterations; |
494 | u32 interrupt_status; | 494 | u32 interrupt_status; |
495 | }; | 495 | }; |
496 | 496 | ||
497 | /* The private adapter structure */ | 497 | /* The private adapter structure */ |
498 | struct et131x_adapter { | 498 | struct et131x_adapter { |
499 | struct net_device *netdev; | 499 | struct net_device *netdev; |
500 | struct pci_dev *pdev; | 500 | struct pci_dev *pdev; |
501 | struct mii_bus *mii_bus; | 501 | struct mii_bus *mii_bus; |
502 | struct phy_device *phydev; | 502 | struct phy_device *phydev; |
503 | struct work_struct task; | 503 | struct work_struct task; |
504 | 504 | ||
505 | /* Flags that indicate current state of the adapter */ | 505 | /* Flags that indicate current state of the adapter */ |
506 | u32 flags; | 506 | u32 flags; |
507 | 507 | ||
508 | /* local link state, to determine if a state change has occurred */ | 508 | /* local link state, to determine if a state change has occurred */ |
509 | int link; | 509 | int link; |
510 | 510 | ||
511 | /* Configuration */ | 511 | /* Configuration */ |
512 | u8 rom_addr[ETH_ALEN]; | 512 | u8 rom_addr[ETH_ALEN]; |
513 | u8 addr[ETH_ALEN]; | 513 | u8 addr[ETH_ALEN]; |
514 | bool has_eeprom; | 514 | bool has_eeprom; |
515 | u8 eeprom_data[2]; | 515 | u8 eeprom_data[2]; |
516 | 516 | ||
517 | /* Spinlocks */ | 517 | /* Spinlocks */ |
518 | spinlock_t lock; | 518 | spinlock_t lock; |
519 | 519 | ||
520 | spinlock_t tcb_send_qlock; | 520 | spinlock_t tcb_send_qlock; |
521 | spinlock_t tcb_ready_qlock; | 521 | spinlock_t tcb_ready_qlock; |
522 | spinlock_t send_hw_lock; | 522 | spinlock_t send_hw_lock; |
523 | 523 | ||
524 | spinlock_t rcv_lock; | 524 | spinlock_t rcv_lock; |
525 | spinlock_t rcv_pend_lock; | 525 | spinlock_t rcv_pend_lock; |
526 | spinlock_t fbr_lock; | 526 | spinlock_t fbr_lock; |
527 | 527 | ||
528 | spinlock_t phy_lock; | 528 | spinlock_t phy_lock; |
529 | 529 | ||
530 | /* Packet Filter and look ahead size */ | 530 | /* Packet Filter and look ahead size */ |
531 | u32 packet_filter; | 531 | u32 packet_filter; |
532 | 532 | ||
533 | /* multicast list */ | 533 | /* multicast list */ |
534 | u32 multicast_addr_count; | 534 | u32 multicast_addr_count; |
535 | u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; | 535 | u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; |
536 | 536 | ||
537 | /* Pointer to the device's PCI register space */ | 537 | /* Pointer to the device's PCI register space */ |
538 | struct address_map __iomem *regs; | 538 | struct address_map __iomem *regs; |
539 | 539 | ||
540 | /* Registry parameters */ | 540 | /* Registry parameters */ |
541 | u8 wanted_flow; /* Flow we want for 802.3x flow control */ | 541 | u8 wanted_flow; /* Flow we want for 802.3x flow control */ |
542 | u32 registry_jumbo_packet; /* Max supported ethernet packet size */ | 542 | u32 registry_jumbo_packet; /* Max supported ethernet packet size */ |
543 | 543 | ||
544 | /* Derived from the registry: */ | 544 | /* Derived from the registry: */ |
545 | u8 flowcontrol; /* flow control validated by the far-end */ | 545 | u8 flowcontrol; /* flow control validated by the far-end */ |
546 | 546 | ||
547 | /* Minimize init-time */ | 547 | /* Minimize init-time */ |
548 | struct timer_list error_timer; | 548 | struct timer_list error_timer; |
549 | 549 | ||
550 | /* variable putting the phy into coma mode when boot up with no cable | 550 | /* variable putting the phy into coma mode when boot up with no cable |
551 | * plugged in after 5 seconds | 551 | * plugged in after 5 seconds |
552 | */ | 552 | */ |
553 | u8 boot_coma; | 553 | u8 boot_coma; |
554 | 554 | ||
555 | /* Next two used to save power information at power down. This | 555 | /* Next two used to save power information at power down. This |
556 | * information will be used during power up to set up parts of Power | 556 | * information will be used during power up to set up parts of Power |
557 | * Management in JAGCore | 557 | * Management in JAGCore |
558 | */ | 558 | */ |
559 | u16 pdown_speed; | 559 | u16 pdown_speed; |
560 | u8 pdown_duplex; | 560 | u8 pdown_duplex; |
561 | 561 | ||
562 | /* Tx Memory Variables */ | 562 | /* Tx Memory Variables */ |
563 | struct tx_ring tx_ring; | 563 | struct tx_ring tx_ring; |
564 | 564 | ||
565 | /* Rx Memory Variables */ | 565 | /* Rx Memory Variables */ |
566 | struct rx_ring rx_ring; | 566 | struct rx_ring rx_ring; |
567 | 567 | ||
568 | /* Stats */ | 568 | /* Stats */ |
569 | struct ce_stats stats; | 569 | struct ce_stats stats; |
570 | 570 | ||
571 | struct net_device_stats net_stats; | 571 | struct net_device_stats net_stats; |
572 | }; | 572 | }; |
573 | 573 | ||
574 | static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) | 574 | static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) |
575 | { | 575 | { |
576 | u32 reg; | 576 | u32 reg; |
577 | int i; | 577 | int i; |
578 | 578 | ||
579 | /* | 579 | /* |
580 | * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and | 580 | * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and |
581 | * bits 7,1:0 both equal to 1, at least once after reset. | 581 | * bits 7,1:0 both equal to 1, at least once after reset. |
582 | * Subsequent operations need only to check that bits 1:0 are equal | 582 | * Subsequent operations need only to check that bits 1:0 are equal |
583 | * to 1 prior to starting a single byte read/write | 583 | * to 1 prior to starting a single byte read/write |
584 | */ | 584 | */ |
585 | 585 | ||
586 | for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { | 586 | for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { |
587 | /* Read registers grouped in DWORD1 */ | 587 | /* Read registers grouped in DWORD1 */ |
588 | if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®)) | 588 | if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®)) |
589 | return -EIO; | 589 | return -EIO; |
590 | 590 | ||
591 | /* I2C idle and Phy Queue Avail both true */ | 591 | /* I2C idle and Phy Queue Avail both true */ |
592 | if ((reg & 0x3000) == 0x3000) { | 592 | if ((reg & 0x3000) == 0x3000) { |
593 | if (status) | 593 | if (status) |
594 | *status = reg; | 594 | *status = reg; |
595 | return reg & 0xFF; | 595 | return reg & 0xFF; |
596 | } | 596 | } |
597 | } | 597 | } |
598 | return -ETIMEDOUT; | 598 | return -ETIMEDOUT; |
599 | } | 599 | } |
600 | 600 | ||
601 | 601 | ||
602 | /** | 602 | /** |
603 | * eeprom_write - Write a byte to the ET1310's EEPROM | 603 | * eeprom_write - Write a byte to the ET1310's EEPROM |
604 | * @adapter: pointer to our private adapter structure | 604 | * @adapter: pointer to our private adapter structure |
605 | * @addr: the address to write | 605 | * @addr: the address to write |
606 | * @data: the value to write | 606 | * @data: the value to write |
607 | * | 607 | * |
608 | * Returns 1 for a successful write. | 608 | * Returns 1 for a successful write. |
609 | */ | 609 | */ |
610 | static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) | 610 | static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) |
611 | { | 611 | { |
612 | struct pci_dev *pdev = adapter->pdev; | 612 | struct pci_dev *pdev = adapter->pdev; |
613 | int index = 0; | 613 | int index = 0; |
614 | int retries; | 614 | int retries; |
615 | int err = 0; | 615 | int err = 0; |
616 | int i2c_wack = 0; | 616 | int i2c_wack = 0; |
617 | int writeok = 0; | 617 | int writeok = 0; |
618 | u32 status; | 618 | u32 status; |
619 | u32 val = 0; | 619 | u32 val = 0; |
620 | 620 | ||
621 | /* | 621 | /* |
622 | * For an EEPROM, an I2C single byte write is defined as a START | 622 | * For an EEPROM, an I2C single byte write is defined as a START |
623 | * condition followed by the device address, EEPROM address, one byte | 623 | * condition followed by the device address, EEPROM address, one byte |
624 | * of data and a STOP condition. The STOP condition will trigger the | 624 | * of data and a STOP condition. The STOP condition will trigger the |
625 | * EEPROM's internally timed write cycle to the nonvolatile memory. | 625 | * EEPROM's internally timed write cycle to the nonvolatile memory. |
626 | * All inputs are disabled during this write cycle and the EEPROM will | 626 | * All inputs are disabled during this write cycle and the EEPROM will |
627 | * not respond to any access until the internal write is complete. | 627 | * not respond to any access until the internal write is complete. |
628 | */ | 628 | */ |
629 | 629 | ||
630 | err = eeprom_wait_ready(pdev, NULL); | 630 | err = eeprom_wait_ready(pdev, NULL); |
631 | if (err) | 631 | if (err) |
632 | return err; | 632 | return err; |
633 | 633 | ||
634 | /* | 634 | /* |
635 | * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, | 635 | * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, |
636 | * and bits 1:0 both =0. Bit 5 should be set according to the | 636 | * and bits 1:0 both =0. Bit 5 should be set according to the |
637 | * type of EEPROM being accessed (1=two byte addressing, 0=one | 637 | * type of EEPROM being accessed (1=two byte addressing, 0=one |
638 | * byte addressing). | 638 | * byte addressing). |
639 | */ | 639 | */ |
640 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, | 640 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, |
641 | LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) | 641 | LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) |
642 | return -EIO; | 642 | return -EIO; |
643 | 643 | ||
644 | i2c_wack = 1; | 644 | i2c_wack = 1; |
645 | 645 | ||
646 | /* Prepare EEPROM address for Step 3 */ | 646 | /* Prepare EEPROM address for Step 3 */ |
647 | 647 | ||
648 | for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { | 648 | for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { |
649 | /* Write the address to the LBCIF Address Register */ | 649 | /* Write the address to the LBCIF Address Register */ |
650 | if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) | 650 | if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) |
651 | break; | 651 | break; |
652 | /* | 652 | /* |
653 | * Write the data to the LBCIF Data Register (the I2C write | 653 | * Write the data to the LBCIF Data Register (the I2C write |
654 | * will begin). | 654 | * will begin). |
655 | */ | 655 | */ |
656 | if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) | 656 | if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) |
657 | break; | 657 | break; |
658 | /* | 658 | /* |
659 | * Monitor bit 1:0 of the LBCIF Status Register. When bits | 659 | * Monitor bit 1:0 of the LBCIF Status Register. When bits |
660 | * 1:0 are both equal to 1, the I2C write has completed and the | 660 | * 1:0 are both equal to 1, the I2C write has completed and the |
661 | * internal write cycle of the EEPROM is about to start. | 661 | * internal write cycle of the EEPROM is about to start. |
662 | * (bits 1:0 = 01 is a legal state while waiting from both | 662 | * (bits 1:0 = 01 is a legal state while waiting from both |
663 | * equal to 1, but bits 1:0 = 10 is invalid and implies that | 663 | * equal to 1, but bits 1:0 = 10 is invalid and implies that |
664 | * something is broken). | 664 | * something is broken). |
665 | */ | 665 | */ |
666 | err = eeprom_wait_ready(pdev, &status); | 666 | err = eeprom_wait_ready(pdev, &status); |
667 | if (err < 0) | 667 | if (err < 0) |
668 | return 0; | 668 | return 0; |
669 | 669 | ||
670 | /* | 670 | /* |
671 | * Check bit 3 of the LBCIF Status Register. If equal to 1, | 671 | * Check bit 3 of the LBCIF Status Register. If equal to 1, |
672 | * an error has occurred.Don't break here if we are revision | 672 | * an error has occurred.Don't break here if we are revision |
673 | * 1, this is so we do a blind write for load bug. | 673 | * 1, this is so we do a blind write for load bug. |
674 | */ | 674 | */ |
675 | if ((status & LBCIF_STATUS_GENERAL_ERROR) | 675 | if ((status & LBCIF_STATUS_GENERAL_ERROR) |
676 | && adapter->pdev->revision == 0) | 676 | && adapter->pdev->revision == 0) |
677 | break; | 677 | break; |
678 | 678 | ||
679 | /* | 679 | /* |
680 | * Check bit 2 of the LBCIF Status Register. If equal to 1 an | 680 | * Check bit 2 of the LBCIF Status Register. If equal to 1 an |
681 | * ACK error has occurred on the address phase of the write. | 681 | * ACK error has occurred on the address phase of the write. |
682 | * This could be due to an actual hardware failure or the | 682 | * This could be due to an actual hardware failure or the |
683 | * EEPROM may still be in its internal write cycle from a | 683 | * EEPROM may still be in its internal write cycle from a |
684 | * previous write. This write operation was ignored and must be | 684 | * previous write. This write operation was ignored and must be |
685 | *repeated later. | 685 | *repeated later. |
686 | */ | 686 | */ |
687 | if (status & LBCIF_STATUS_ACK_ERROR) { | 687 | if (status & LBCIF_STATUS_ACK_ERROR) { |
688 | /* | 688 | /* |
689 | * This could be due to an actual hardware failure | 689 | * This could be due to an actual hardware failure |
690 | * or the EEPROM may still be in its internal write | 690 | * or the EEPROM may still be in its internal write |
691 | * cycle from a previous write. This write operation | 691 | * cycle from a previous write. This write operation |
692 | * was ignored and must be repeated later. | 692 | * was ignored and must be repeated later. |
693 | */ | 693 | */ |
694 | udelay(10); | 694 | udelay(10); |
695 | continue; | 695 | continue; |
696 | } | 696 | } |
697 | 697 | ||
698 | writeok = 1; | 698 | writeok = 1; |
699 | break; | 699 | break; |
700 | } | 700 | } |
701 | 701 | ||
702 | /* | 702 | /* |
703 | * Set bit 6 of the LBCIF Control Register = 0. | 703 | * Set bit 6 of the LBCIF Control Register = 0. |
704 | */ | 704 | */ |
705 | udelay(10); | 705 | udelay(10); |
706 | 706 | ||
707 | while (i2c_wack) { | 707 | while (i2c_wack) { |
708 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, | 708 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, |
709 | LBCIF_CONTROL_LBCIF_ENABLE)) | 709 | LBCIF_CONTROL_LBCIF_ENABLE)) |
710 | writeok = 0; | 710 | writeok = 0; |
711 | 711 | ||
712 | /* Do read until internal ACK_ERROR goes away meaning write | 712 | /* Do read until internal ACK_ERROR goes away meaning write |
713 | * completed | 713 | * completed |
714 | */ | 714 | */ |
715 | do { | 715 | do { |
716 | pci_write_config_dword(pdev, | 716 | pci_write_config_dword(pdev, |
717 | LBCIF_ADDRESS_REGISTER, | 717 | LBCIF_ADDRESS_REGISTER, |
718 | addr); | 718 | addr); |
719 | do { | 719 | do { |
720 | pci_read_config_dword(pdev, | 720 | pci_read_config_dword(pdev, |
721 | LBCIF_DATA_REGISTER, &val); | 721 | LBCIF_DATA_REGISTER, &val); |
722 | } while ((val & 0x00010000) == 0); | 722 | } while ((val & 0x00010000) == 0); |
723 | } while (val & 0x00040000); | 723 | } while (val & 0x00040000); |
724 | 724 | ||
725 | if ((val & 0xFF00) != 0xC000 || index == 10000) | 725 | if ((val & 0xFF00) != 0xC000 || index == 10000) |
726 | break; | 726 | break; |
727 | index++; | 727 | index++; |
728 | } | 728 | } |
729 | return writeok ? 0 : -EIO; | 729 | return writeok ? 0 : -EIO; |
730 | } | 730 | } |
731 | 731 | ||
732 | /** | 732 | /** |
733 | * eeprom_read - Read a byte from the ET1310's EEPROM | 733 | * eeprom_read - Read a byte from the ET1310's EEPROM |
734 | * @adapter: pointer to our private adapter structure | 734 | * @adapter: pointer to our private adapter structure |
735 | * @addr: the address from which to read | 735 | * @addr: the address from which to read |
736 | * @pdata: a pointer to a byte in which to store the value of the read | 736 | * @pdata: a pointer to a byte in which to store the value of the read |
737 | * @eeprom_id: the ID of the EEPROM | 737 | * @eeprom_id: the ID of the EEPROM |
738 | * @addrmode: how the EEPROM is to be accessed | 738 | * @addrmode: how the EEPROM is to be accessed |
739 | * | 739 | * |
740 | * Returns 1 for a successful read | 740 | * Returns 1 for a successful read |
741 | */ | 741 | */ |
742 | static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) | 742 | static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) |
743 | { | 743 | { |
744 | struct pci_dev *pdev = adapter->pdev; | 744 | struct pci_dev *pdev = adapter->pdev; |
745 | int err; | 745 | int err; |
746 | u32 status; | 746 | u32 status; |
747 | 747 | ||
748 | /* | 748 | /* |
749 | * A single byte read is similar to the single byte write, with the | 749 | * A single byte read is similar to the single byte write, with the |
750 | * exception of the data flow: | 750 | * exception of the data flow: |
751 | */ | 751 | */ |
752 | 752 | ||
753 | err = eeprom_wait_ready(pdev, NULL); | 753 | err = eeprom_wait_ready(pdev, NULL); |
754 | if (err) | 754 | if (err) |
755 | return err; | 755 | return err; |
756 | /* | 756 | /* |
757 | * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, | 757 | * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, |
758 | * and bits 1:0 both =0. Bit 5 should be set according to the type | 758 | * and bits 1:0 both =0. Bit 5 should be set according to the type |
759 | * of EEPROM being accessed (1=two byte addressing, 0=one byte | 759 | * of EEPROM being accessed (1=two byte addressing, 0=one byte |
760 | * addressing). | 760 | * addressing). |
761 | */ | 761 | */ |
762 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, | 762 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, |
763 | LBCIF_CONTROL_LBCIF_ENABLE)) | 763 | LBCIF_CONTROL_LBCIF_ENABLE)) |
764 | return -EIO; | 764 | return -EIO; |
765 | /* | 765 | /* |
766 | * Write the address to the LBCIF Address Register (I2C read will | 766 | * Write the address to the LBCIF Address Register (I2C read will |
767 | * begin). | 767 | * begin). |
768 | */ | 768 | */ |
769 | if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) | 769 | if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) |
770 | return -EIO; | 770 | return -EIO; |
771 | /* | 771 | /* |
772 | * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read | 772 | * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read |
773 | * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure | 773 | * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure |
774 | * has occurred). | 774 | * has occurred). |
775 | */ | 775 | */ |
776 | err = eeprom_wait_ready(pdev, &status); | 776 | err = eeprom_wait_ready(pdev, &status); |
777 | if (err < 0) | 777 | if (err < 0) |
778 | return err; | 778 | return err; |
779 | /* | 779 | /* |
780 | * Regardless of error status, read data byte from LBCIF Data | 780 | * Regardless of error status, read data byte from LBCIF Data |
781 | * Register. | 781 | * Register. |
782 | */ | 782 | */ |
783 | *pdata = err; | 783 | *pdata = err; |
784 | /* | 784 | /* |
785 | * Check bit 2 of the LBCIF Status Register. If = 1, | 785 | * Check bit 2 of the LBCIF Status Register. If = 1, |
786 | * then an error has occurred. | 786 | * then an error has occurred. |
787 | */ | 787 | */ |
788 | return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; | 788 | return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; |
789 | } | 789 | } |
790 | 790 | ||
791 | static int et131x_init_eeprom(struct et131x_adapter *adapter) | 791 | static int et131x_init_eeprom(struct et131x_adapter *adapter) |
792 | { | 792 | { |
793 | struct pci_dev *pdev = adapter->pdev; | 793 | struct pci_dev *pdev = adapter->pdev; |
794 | u8 eestatus; | 794 | u8 eestatus; |
795 | 795 | ||
796 | /* We first need to check the EEPROM Status code located at offset | 796 | /* We first need to check the EEPROM Status code located at offset |
797 | * 0xB2 of config space | 797 | * 0xB2 of config space |
798 | */ | 798 | */ |
799 | pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, | 799 | pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, |
800 | &eestatus); | 800 | &eestatus); |
801 | 801 | ||
802 | /* THIS IS A WORKAROUND: | 802 | /* THIS IS A WORKAROUND: |
803 | * I need to call this function twice to get my card in a | 803 | * I need to call this function twice to get my card in a |
804 | * LG M1 Express Dual running. I tried also a msleep before this | 804 | * LG M1 Express Dual running. I tried also a msleep before this |
805 | * function, because I thougth there could be some time condidions | 805 | * function, because I thought there could be some time condidions |
806 | * but it didn't work. Call the whole function twice also work. | 806 | * but it didn't work. Call the whole function twice also work. |
807 | */ | 807 | */ |
808 | if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { | 808 | if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { |
809 | dev_err(&pdev->dev, | 809 | dev_err(&pdev->dev, |
810 | "Could not read PCI config space for EEPROM Status\n"); | 810 | "Could not read PCI config space for EEPROM Status\n"); |
811 | return -EIO; | 811 | return -EIO; |
812 | } | 812 | } |
813 | 813 | ||
814 | /* Determine if the error(s) we care about are present. If they are | 814 | /* Determine if the error(s) we care about are present. If they are |
815 | * present we need to fail. | 815 | * present we need to fail. |
816 | */ | 816 | */ |
817 | if (eestatus & 0x4C) { | 817 | if (eestatus & 0x4C) { |
818 | int write_failed = 0; | 818 | int write_failed = 0; |
819 | if (pdev->revision == 0x01) { | 819 | if (pdev->revision == 0x01) { |
820 | int i; | 820 | int i; |
821 | static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; | 821 | static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; |
822 | 822 | ||
823 | /* Re-write the first 4 bytes if we have an eeprom | 823 | /* Re-write the first 4 bytes if we have an eeprom |
824 | * present and the revision id is 1, this fixes the | 824 | * present and the revision id is 1, this fixes the |
825 | * corruption seen with 1310 B Silicon | 825 | * corruption seen with 1310 B Silicon |
826 | */ | 826 | */ |
827 | for (i = 0; i < 3; i++) | 827 | for (i = 0; i < 3; i++) |
828 | if (eeprom_write(adapter, i, eedata[i]) < 0) | 828 | if (eeprom_write(adapter, i, eedata[i]) < 0) |
829 | write_failed = 1; | 829 | write_failed = 1; |
830 | } | 830 | } |
831 | if (pdev->revision != 0x01 || write_failed) { | 831 | if (pdev->revision != 0x01 || write_failed) { |
832 | dev_err(&pdev->dev, | 832 | dev_err(&pdev->dev, |
833 | "Fatal EEPROM Status Error - 0x%04x\n", eestatus); | 833 | "Fatal EEPROM Status Error - 0x%04x\n", eestatus); |
834 | 834 | ||
835 | /* This error could mean that there was an error | 835 | /* This error could mean that there was an error |
836 | * reading the eeprom or that the eeprom doesn't exist. | 836 | * reading the eeprom or that the eeprom doesn't exist. |
837 | * We will treat each case the same and not try to | 837 | * We will treat each case the same and not try to |
838 | * gather additional information that normally would | 838 | * gather additional information that normally would |
839 | * come from the eeprom, like MAC Address | 839 | * come from the eeprom, like MAC Address |
840 | */ | 840 | */ |
841 | adapter->has_eeprom = 0; | 841 | adapter->has_eeprom = 0; |
842 | return -EIO; | 842 | return -EIO; |
843 | } | 843 | } |
844 | } | 844 | } |
845 | adapter->has_eeprom = 1; | 845 | adapter->has_eeprom = 1; |
846 | 846 | ||
847 | /* Read the EEPROM for information regarding LED behavior. Refer to | 847 | /* Read the EEPROM for information regarding LED behavior. Refer to |
848 | * ET1310_phy.c, et131x_xcvr_init(), for its use. | 848 | * ET1310_phy.c, et131x_xcvr_init(), for its use. |
849 | */ | 849 | */ |
850 | eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); | 850 | eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); |
851 | eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); | 851 | eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); |
852 | 852 | ||
853 | if (adapter->eeprom_data[0] != 0xcd) | 853 | if (adapter->eeprom_data[0] != 0xcd) |
854 | /* Disable all optional features */ | 854 | /* Disable all optional features */ |
855 | adapter->eeprom_data[1] = 0x00; | 855 | adapter->eeprom_data[1] = 0x00; |
856 | 856 | ||
857 | return 0; | 857 | return 0; |
858 | } | 858 | } |
859 | 859 | ||
860 | /** | 860 | /** |
861 | * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. | 861 | * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. |
862 | * @adapter: pointer to our adapter structure | 862 | * @adapter: pointer to our adapter structure |
863 | */ | 863 | */ |
864 | static void et131x_rx_dma_enable(struct et131x_adapter *adapter) | 864 | static void et131x_rx_dma_enable(struct et131x_adapter *adapter) |
865 | { | 865 | { |
866 | /* Setup the receive dma configuration register for normal operation */ | 866 | /* Setup the receive dma configuration register for normal operation */ |
867 | u32 csr = 0x2000; /* FBR1 enable */ | 867 | u32 csr = 0x2000; /* FBR1 enable */ |
868 | 868 | ||
869 | if (adapter->rx_ring.fbr[0]->buffsize == 4096) | 869 | if (adapter->rx_ring.fbr[0]->buffsize == 4096) |
870 | csr |= 0x0800; | 870 | csr |= 0x0800; |
871 | else if (adapter->rx_ring.fbr[0]->buffsize == 8192) | 871 | else if (adapter->rx_ring.fbr[0]->buffsize == 8192) |
872 | csr |= 0x1000; | 872 | csr |= 0x1000; |
873 | else if (adapter->rx_ring.fbr[0]->buffsize == 16384) | 873 | else if (adapter->rx_ring.fbr[0]->buffsize == 16384) |
874 | csr |= 0x1800; | 874 | csr |= 0x1800; |
875 | #ifdef USE_FBR0 | 875 | #ifdef USE_FBR0 |
876 | csr |= 0x0400; /* FBR0 enable */ | 876 | csr |= 0x0400; /* FBR0 enable */ |
877 | if (adapter->rx_ring.fbr[1]->buffsize == 256) | 877 | if (adapter->rx_ring.fbr[1]->buffsize == 256) |
878 | csr |= 0x0100; | 878 | csr |= 0x0100; |
879 | else if (adapter->rx_ring.fbr[1]->buffsize == 512) | 879 | else if (adapter->rx_ring.fbr[1]->buffsize == 512) |
880 | csr |= 0x0200; | 880 | csr |= 0x0200; |
881 | else if (adapter->rx_ring.fbr[1]->buffsize == 1024) | 881 | else if (adapter->rx_ring.fbr[1]->buffsize == 1024) |
882 | csr |= 0x0300; | 882 | csr |= 0x0300; |
883 | #endif | 883 | #endif |
884 | writel(csr, &adapter->regs->rxdma.csr); | 884 | writel(csr, &adapter->regs->rxdma.csr); |
885 | 885 | ||
886 | csr = readl(&adapter->regs->rxdma.csr); | 886 | csr = readl(&adapter->regs->rxdma.csr); |
887 | if ((csr & 0x00020000) != 0) { | 887 | if ((csr & 0x00020000) != 0) { |
888 | udelay(5); | 888 | udelay(5); |
889 | csr = readl(&adapter->regs->rxdma.csr); | 889 | csr = readl(&adapter->regs->rxdma.csr); |
890 | if ((csr & 0x00020000) != 0) { | 890 | if ((csr & 0x00020000) != 0) { |
891 | dev_err(&adapter->pdev->dev, | 891 | dev_err(&adapter->pdev->dev, |
892 | "RX Dma failed to exit halt state. CSR 0x%08x\n", | 892 | "RX Dma failed to exit halt state. CSR 0x%08x\n", |
893 | csr); | 893 | csr); |
894 | } | 894 | } |
895 | } | 895 | } |
896 | } | 896 | } |
897 | 897 | ||
898 | /** | 898 | /** |
899 | * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 | 899 | * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 |
900 | * @adapter: pointer to our adapter structure | 900 | * @adapter: pointer to our adapter structure |
901 | */ | 901 | */ |
902 | static void et131x_rx_dma_disable(struct et131x_adapter *adapter) | 902 | static void et131x_rx_dma_disable(struct et131x_adapter *adapter) |
903 | { | 903 | { |
904 | u32 csr; | 904 | u32 csr; |
905 | /* Setup the receive dma configuration register */ | 905 | /* Setup the receive dma configuration register */ |
906 | writel(0x00002001, &adapter->regs->rxdma.csr); | 906 | writel(0x00002001, &adapter->regs->rxdma.csr); |
907 | csr = readl(&adapter->regs->rxdma.csr); | 907 | csr = readl(&adapter->regs->rxdma.csr); |
908 | if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */ | 908 | if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */ |
909 | udelay(5); | 909 | udelay(5); |
910 | csr = readl(&adapter->regs->rxdma.csr); | 910 | csr = readl(&adapter->regs->rxdma.csr); |
911 | if ((csr & 0x00020000) == 0) | 911 | if ((csr & 0x00020000) == 0) |
912 | dev_err(&adapter->pdev->dev, | 912 | dev_err(&adapter->pdev->dev, |
913 | "RX Dma failed to enter halt state. CSR 0x%08x\n", | 913 | "RX Dma failed to enter halt state. CSR 0x%08x\n", |
914 | csr); | 914 | csr); |
915 | } | 915 | } |
916 | } | 916 | } |
917 | 917 | ||
918 | /** | 918 | /** |
919 | * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. | 919 | * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. |
920 | * @adapter: pointer to our adapter structure | 920 | * @adapter: pointer to our adapter structure |
921 | * | 921 | * |
922 | * Mainly used after a return to the D0 (full-power) state from a lower state. | 922 | * Mainly used after a return to the D0 (full-power) state from a lower state. |
923 | */ | 923 | */ |
924 | static void et131x_tx_dma_enable(struct et131x_adapter *adapter) | 924 | static void et131x_tx_dma_enable(struct et131x_adapter *adapter) |
925 | { | 925 | { |
926 | /* Setup the transmit dma configuration register for normal | 926 | /* Setup the transmit dma configuration register for normal |
927 | * operation | 927 | * operation |
928 | */ | 928 | */ |
929 | writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), | 929 | writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), |
930 | &adapter->regs->txdma.csr); | 930 | &adapter->regs->txdma.csr); |
931 | } | 931 | } |
932 | 932 | ||
933 | static inline void add_10bit(u32 *v, int n) | 933 | static inline void add_10bit(u32 *v, int n) |
934 | { | 934 | { |
935 | *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); | 935 | *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); |
936 | } | 936 | } |
937 | 937 | ||
938 | static inline void add_12bit(u32 *v, int n) | 938 | static inline void add_12bit(u32 *v, int n) |
939 | { | 939 | { |
940 | *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); | 940 | *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); |
941 | } | 941 | } |
942 | 942 | ||
943 | /** | 943 | /** |
944 | * et1310_config_mac_regs1 - Initialize the first part of MAC regs | 944 | * et1310_config_mac_regs1 - Initialize the first part of MAC regs |
945 | * @adapter: pointer to our adapter structure | 945 | * @adapter: pointer to our adapter structure |
946 | */ | 946 | */ |
947 | static void et1310_config_mac_regs1(struct et131x_adapter *adapter) | 947 | static void et1310_config_mac_regs1(struct et131x_adapter *adapter) |
948 | { | 948 | { |
949 | struct mac_regs __iomem *macregs = &adapter->regs->mac; | 949 | struct mac_regs __iomem *macregs = &adapter->regs->mac; |
950 | u32 station1; | 950 | u32 station1; |
951 | u32 station2; | 951 | u32 station2; |
952 | u32 ipg; | 952 | u32 ipg; |
953 | 953 | ||
954 | /* First we need to reset everything. Write to MAC configuration | 954 | /* First we need to reset everything. Write to MAC configuration |
955 | * register 1 to perform reset. | 955 | * register 1 to perform reset. |
956 | */ | 956 | */ |
957 | writel(0xC00F0000, ¯egs->cfg1); | 957 | writel(0xC00F0000, ¯egs->cfg1); |
958 | 958 | ||
959 | /* Next lets configure the MAC Inter-packet gap register */ | 959 | /* Next lets configure the MAC Inter-packet gap register */ |
960 | ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ | 960 | ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ |
961 | ipg |= 0x50 << 8; /* ifg enforce 0x50 */ | 961 | ipg |= 0x50 << 8; /* ifg enforce 0x50 */ |
962 | writel(ipg, ¯egs->ipg); | 962 | writel(ipg, ¯egs->ipg); |
963 | 963 | ||
964 | /* Next lets configure the MAC Half Duplex register */ | 964 | /* Next lets configure the MAC Half Duplex register */ |
965 | /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ | 965 | /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ |
966 | writel(0x00A1F037, ¯egs->hfdp); | 966 | writel(0x00A1F037, ¯egs->hfdp); |
967 | 967 | ||
968 | /* Next lets configure the MAC Interface Control register */ | 968 | /* Next lets configure the MAC Interface Control register */ |
969 | writel(0, ¯egs->if_ctrl); | 969 | writel(0, ¯egs->if_ctrl); |
970 | 970 | ||
971 | /* Let's move on to setting up the mii management configuration */ | 971 | /* Let's move on to setting up the mii management configuration */ |
972 | writel(0x07, ¯egs->mii_mgmt_cfg); /* Clock reset 0x7 */ | 972 | writel(0x07, ¯egs->mii_mgmt_cfg); /* Clock reset 0x7 */ |
973 | 973 | ||
974 | /* Next lets configure the MAC Station Address register. These | 974 | /* Next lets configure the MAC Station Address register. These |
975 | * values are read from the EEPROM during initialization and stored | 975 | * values are read from the EEPROM during initialization and stored |
976 | * in the adapter structure. We write what is stored in the adapter | 976 | * in the adapter structure. We write what is stored in the adapter |
977 | * structure to the MAC Station Address registers high and low. This | 977 | * structure to the MAC Station Address registers high and low. This |
978 | * station address is used for generating and checking pause control | 978 | * station address is used for generating and checking pause control |
979 | * packets. | 979 | * packets. |
980 | */ | 980 | */ |
981 | station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | | 981 | station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | |
982 | (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); | 982 | (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); |
983 | station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | | 983 | station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | |
984 | (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | | 984 | (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | |
985 | (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | | 985 | (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | |
986 | adapter->addr[2]; | 986 | adapter->addr[2]; |
987 | writel(station1, ¯egs->station_addr_1); | 987 | writel(station1, ¯egs->station_addr_1); |
988 | writel(station2, ¯egs->station_addr_2); | 988 | writel(station2, ¯egs->station_addr_2); |
989 | 989 | ||
990 | /* Max ethernet packet in bytes that will passed by the mac without | 990 | /* Max ethernet packet in bytes that will be passed by the mac without |
991 | * being truncated. Allow the MAC to pass 4 more than our max packet | 991 | * being truncated. Allow the MAC to pass 4 more than our max packet |
992 | * size. This is 4 for the Ethernet CRC. | 992 | * size. This is 4 for the Ethernet CRC. |
993 | * | 993 | * |
994 | * Packets larger than (registry_jumbo_packet) that do not contain a | 994 | * Packets larger than (registry_jumbo_packet) that do not contain a |
995 | * VLAN ID will be dropped by the Rx function. | 995 | * VLAN ID will be dropped by the Rx function. |
996 | */ | 996 | */ |
997 | writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len); | 997 | writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len); |
998 | 998 | ||
999 | /* clear out MAC config reset */ | 999 | /* clear out MAC config reset */ |
1000 | writel(0, ¯egs->cfg1); | 1000 | writel(0, ¯egs->cfg1); |
1001 | } | 1001 | } |
1002 | 1002 | ||
1003 | /** | 1003 | /** |
1004 | * et1310_config_mac_regs2 - Initialize the second part of MAC regs | 1004 | * et1310_config_mac_regs2 - Initialize the second part of MAC regs |
1005 | * @adapter: pointer to our adapter structure | 1005 | * @adapter: pointer to our adapter structure |
1006 | */ | 1006 | */ |
1007 | static void et1310_config_mac_regs2(struct et131x_adapter *adapter) | 1007 | static void et1310_config_mac_regs2(struct et131x_adapter *adapter) |
1008 | { | 1008 | { |
1009 | int32_t delay = 0; | 1009 | int32_t delay = 0; |
1010 | struct mac_regs __iomem *mac = &adapter->regs->mac; | 1010 | struct mac_regs __iomem *mac = &adapter->regs->mac; |
1011 | struct phy_device *phydev = adapter->phydev; | 1011 | struct phy_device *phydev = adapter->phydev; |
1012 | u32 cfg1; | 1012 | u32 cfg1; |
1013 | u32 cfg2; | 1013 | u32 cfg2; |
1014 | u32 ifctrl; | 1014 | u32 ifctrl; |
1015 | u32 ctl; | 1015 | u32 ctl; |
1016 | 1016 | ||
1017 | ctl = readl(&adapter->regs->txmac.ctl); | 1017 | ctl = readl(&adapter->regs->txmac.ctl); |
1018 | cfg1 = readl(&mac->cfg1); | 1018 | cfg1 = readl(&mac->cfg1); |
1019 | cfg2 = readl(&mac->cfg2); | 1019 | cfg2 = readl(&mac->cfg2); |
1020 | ifctrl = readl(&mac->if_ctrl); | 1020 | ifctrl = readl(&mac->if_ctrl); |
1021 | 1021 | ||
1022 | /* Set up the if mode bits */ | 1022 | /* Set up the if mode bits */ |
1023 | cfg2 &= ~0x300; | 1023 | cfg2 &= ~0x300; |
1024 | if (phydev && phydev->speed == SPEED_1000) { | 1024 | if (phydev && phydev->speed == SPEED_1000) { |
1025 | cfg2 |= 0x200; | 1025 | cfg2 |= 0x200; |
1026 | /* Phy mode bit */ | 1026 | /* Phy mode bit */ |
1027 | ifctrl &= ~(1 << 24); | 1027 | ifctrl &= ~(1 << 24); |
1028 | } else { | 1028 | } else { |
1029 | cfg2 |= 0x100; | 1029 | cfg2 |= 0x100; |
1030 | ifctrl |= (1 << 24); | 1030 | ifctrl |= (1 << 24); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | /* We need to enable Rx/Tx */ | 1033 | /* We need to enable Rx/Tx */ |
1034 | cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW; | 1034 | cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW; |
1035 | /* Initialize loop back to off */ | 1035 | /* Initialize loop back to off */ |
1036 | cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW); | 1036 | cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW); |
1037 | if (adapter->flowcontrol == FLOW_RXONLY || | 1037 | if (adapter->flowcontrol == FLOW_RXONLY || |
1038 | adapter->flowcontrol == FLOW_BOTH) | 1038 | adapter->flowcontrol == FLOW_BOTH) |
1039 | cfg1 |= CFG1_RX_FLOW; | 1039 | cfg1 |= CFG1_RX_FLOW; |
1040 | writel(cfg1, &mac->cfg1); | 1040 | writel(cfg1, &mac->cfg1); |
1041 | 1041 | ||
1042 | /* Now we need to initialize the MAC Configuration 2 register */ | 1042 | /* Now we need to initialize the MAC Configuration 2 register */ |
1043 | /* preamble 7, check length, huge frame off, pad crc, crc enable | 1043 | /* preamble 7, check length, huge frame off, pad crc, crc enable |
1044 | full duplex off */ | 1044 | full duplex off */ |
1045 | cfg2 |= 0x7016; | 1045 | cfg2 |= 0x7016; |
1046 | cfg2 &= ~0x0021; | 1046 | cfg2 &= ~0x0021; |
1047 | 1047 | ||
1048 | /* Turn on duplex if needed */ | 1048 | /* Turn on duplex if needed */ |
1049 | if (phydev && phydev->duplex == DUPLEX_FULL) | 1049 | if (phydev && phydev->duplex == DUPLEX_FULL) |
1050 | cfg2 |= 0x01; | 1050 | cfg2 |= 0x01; |
1051 | 1051 | ||
1052 | ifctrl &= ~(1 << 26); | 1052 | ifctrl &= ~(1 << 26); |
1053 | if (phydev && phydev->duplex == DUPLEX_HALF) | 1053 | if (phydev && phydev->duplex == DUPLEX_HALF) |
1054 | ifctrl |= (1<<26); /* Enable ghd */ | 1054 | ifctrl |= (1<<26); /* Enable ghd */ |
1055 | 1055 | ||
1056 | writel(ifctrl, &mac->if_ctrl); | 1056 | writel(ifctrl, &mac->if_ctrl); |
1057 | writel(cfg2, &mac->cfg2); | 1057 | writel(cfg2, &mac->cfg2); |
1058 | 1058 | ||
1059 | do { | 1059 | do { |
1060 | udelay(10); | 1060 | udelay(10); |
1061 | delay++; | 1061 | delay++; |
1062 | cfg1 = readl(&mac->cfg1); | 1062 | cfg1 = readl(&mac->cfg1); |
1063 | } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100); | 1063 | } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100); |
1064 | 1064 | ||
1065 | if (delay == 100) { | 1065 | if (delay == 100) { |
1066 | dev_warn(&adapter->pdev->dev, | 1066 | dev_warn(&adapter->pdev->dev, |
1067 | "Syncd bits did not respond correctly cfg1 word 0x%08x\n", | 1067 | "Syncd bits did not respond correctly cfg1 word 0x%08x\n", |
1068 | cfg1); | 1068 | cfg1); |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | /* Enable txmac */ | 1071 | /* Enable txmac */ |
1072 | ctl |= 0x09; /* TX mac enable, FC disable */ | 1072 | ctl |= 0x09; /* TX mac enable, FC disable */ |
1073 | writel(ctl, &adapter->regs->txmac.ctl); | 1073 | writel(ctl, &adapter->regs->txmac.ctl); |
1074 | 1074 | ||
1075 | /* Ready to start the RXDMA/TXDMA engine */ | 1075 | /* Ready to start the RXDMA/TXDMA engine */ |
1076 | if (adapter->flags & fMP_ADAPTER_LOWER_POWER) { | 1076 | if (adapter->flags & fMP_ADAPTER_LOWER_POWER) { |
1077 | et131x_rx_dma_enable(adapter); | 1077 | et131x_rx_dma_enable(adapter); |
1078 | et131x_tx_dma_enable(adapter); | 1078 | et131x_tx_dma_enable(adapter); |
1079 | } | 1079 | } |
1080 | } | 1080 | } |
1081 | 1081 | ||
1082 | /** | 1082 | /** |
1083 | * et1310_in_phy_coma - check if the device is in phy coma | 1083 | * et1310_in_phy_coma - check if the device is in phy coma |
1084 | * @adapter: pointer to our adapter structure | 1084 | * @adapter: pointer to our adapter structure |
1085 | * | 1085 | * |
1086 | * Returns 0 if the device is not in phy coma, 1 if it is in phy coma | 1086 | * Returns 0 if the device is not in phy coma, 1 if it is in phy coma |
1087 | */ | 1087 | */ |
1088 | static int et1310_in_phy_coma(struct et131x_adapter *adapter) | 1088 | static int et1310_in_phy_coma(struct et131x_adapter *adapter) |
1089 | { | 1089 | { |
1090 | u32 pmcsr; | 1090 | u32 pmcsr; |
1091 | 1091 | ||
1092 | pmcsr = readl(&adapter->regs->global.pm_csr); | 1092 | pmcsr = readl(&adapter->regs->global.pm_csr); |
1093 | 1093 | ||
1094 | return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; | 1094 | return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) | 1097 | static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) |
1098 | { | 1098 | { |
1099 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; | 1099 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; |
1100 | u32 hash1 = 0; | 1100 | u32 hash1 = 0; |
1101 | u32 hash2 = 0; | 1101 | u32 hash2 = 0; |
1102 | u32 hash3 = 0; | 1102 | u32 hash3 = 0; |
1103 | u32 hash4 = 0; | 1103 | u32 hash4 = 0; |
1104 | u32 pm_csr; | 1104 | u32 pm_csr; |
1105 | 1105 | ||
1106 | /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision | 1106 | /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision |
1107 | * the multi-cast LIST. If it is NOT specified, (and "ALL" is not | 1107 | * the multi-cast LIST. If it is NOT specified, (and "ALL" is not |
1108 | * specified) then we should pass NO multi-cast addresses to the | 1108 | * specified) then we should pass NO multi-cast addresses to the |
1109 | * driver. | 1109 | * driver. |
1110 | */ | 1110 | */ |
1111 | if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { | 1111 | if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { |
1112 | int i; | 1112 | int i; |
1113 | 1113 | ||
1114 | /* Loop through our multicast array and set up the device */ | 1114 | /* Loop through our multicast array and set up the device */ |
1115 | for (i = 0; i < adapter->multicast_addr_count; i++) { | 1115 | for (i = 0; i < adapter->multicast_addr_count; i++) { |
1116 | u32 result; | 1116 | u32 result; |
1117 | 1117 | ||
1118 | result = ether_crc(6, adapter->multicast_list[i]); | 1118 | result = ether_crc(6, adapter->multicast_list[i]); |
1119 | 1119 | ||
1120 | result = (result & 0x3F800000) >> 23; | 1120 | result = (result & 0x3F800000) >> 23; |
1121 | 1121 | ||
1122 | if (result < 32) { | 1122 | if (result < 32) { |
1123 | hash1 |= (1 << result); | 1123 | hash1 |= (1 << result); |
1124 | } else if ((31 < result) && (result < 64)) { | 1124 | } else if ((31 < result) && (result < 64)) { |
1125 | result -= 32; | 1125 | result -= 32; |
1126 | hash2 |= (1 << result); | 1126 | hash2 |= (1 << result); |
1127 | } else if ((63 < result) && (result < 96)) { | 1127 | } else if ((63 < result) && (result < 96)) { |
1128 | result -= 64; | 1128 | result -= 64; |
1129 | hash3 |= (1 << result); | 1129 | hash3 |= (1 << result); |
1130 | } else { | 1130 | } else { |
1131 | result -= 96; | 1131 | result -= 96; |
1132 | hash4 |= (1 << result); | 1132 | hash4 |= (1 << result); |
1133 | } | 1133 | } |
1134 | } | 1134 | } |
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | /* Write out the new hash to the device */ | 1137 | /* Write out the new hash to the device */ |
1138 | pm_csr = readl(&adapter->regs->global.pm_csr); | 1138 | pm_csr = readl(&adapter->regs->global.pm_csr); |
1139 | if (!et1310_in_phy_coma(adapter)) { | 1139 | if (!et1310_in_phy_coma(adapter)) { |
1140 | writel(hash1, &rxmac->multi_hash1); | 1140 | writel(hash1, &rxmac->multi_hash1); |
1141 | writel(hash2, &rxmac->multi_hash2); | 1141 | writel(hash2, &rxmac->multi_hash2); |
1142 | writel(hash3, &rxmac->multi_hash3); | 1142 | writel(hash3, &rxmac->multi_hash3); |
1143 | writel(hash4, &rxmac->multi_hash4); | 1143 | writel(hash4, &rxmac->multi_hash4); |
1144 | } | 1144 | } |
1145 | } | 1145 | } |
1146 | 1146 | ||
1147 | static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) | 1147 | static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) |
1148 | { | 1148 | { |
1149 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; | 1149 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; |
1150 | u32 uni_pf1; | 1150 | u32 uni_pf1; |
1151 | u32 uni_pf2; | 1151 | u32 uni_pf2; |
1152 | u32 uni_pf3; | 1152 | u32 uni_pf3; |
1153 | u32 pm_csr; | 1153 | u32 pm_csr; |
1154 | 1154 | ||
1155 | /* Set up unicast packet filter reg 3 to be the first two octets of | 1155 | /* Set up unicast packet filter reg 3 to be the first two octets of |
1156 | * the MAC address for both address | 1156 | * the MAC address for both address |
1157 | * | 1157 | * |
1158 | * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the | 1158 | * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the |
1159 | * MAC address for second address | 1159 | * MAC address for second address |
1160 | * | 1160 | * |
1161 | * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the | 1161 | * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the |
1162 | * MAC address for first address | 1162 | * MAC address for first address |
1163 | */ | 1163 | */ |
1164 | uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) | | 1164 | uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) | |
1165 | (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) | | 1165 | (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) | |
1166 | (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) | | 1166 | (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) | |
1167 | adapter->addr[1]; | 1167 | adapter->addr[1]; |
1168 | 1168 | ||
1169 | uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) | | 1169 | uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) | |
1170 | (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) | | 1170 | (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) | |
1171 | (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) | | 1171 | (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) | |
1172 | adapter->addr[5]; | 1172 | adapter->addr[5]; |
1173 | 1173 | ||
1174 | uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) | | 1174 | uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) | |
1175 | (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) | | 1175 | (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) | |
1176 | (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) | | 1176 | (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) | |
1177 | adapter->addr[5]; | 1177 | adapter->addr[5]; |
1178 | 1178 | ||
1179 | pm_csr = readl(&adapter->regs->global.pm_csr); | 1179 | pm_csr = readl(&adapter->regs->global.pm_csr); |
1180 | if (!et1310_in_phy_coma(adapter)) { | 1180 | if (!et1310_in_phy_coma(adapter)) { |
1181 | writel(uni_pf1, &rxmac->uni_pf_addr1); | 1181 | writel(uni_pf1, &rxmac->uni_pf_addr1); |
1182 | writel(uni_pf2, &rxmac->uni_pf_addr2); | 1182 | writel(uni_pf2, &rxmac->uni_pf_addr2); |
1183 | writel(uni_pf3, &rxmac->uni_pf_addr3); | 1183 | writel(uni_pf3, &rxmac->uni_pf_addr3); |
1184 | } | 1184 | } |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) | 1187 | static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) |
1188 | { | 1188 | { |
1189 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; | 1189 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; |
1190 | struct phy_device *phydev = adapter->phydev; | 1190 | struct phy_device *phydev = adapter->phydev; |
1191 | u32 sa_lo; | 1191 | u32 sa_lo; |
1192 | u32 sa_hi = 0; | 1192 | u32 sa_hi = 0; |
1193 | u32 pf_ctrl = 0; | 1193 | u32 pf_ctrl = 0; |
1194 | 1194 | ||
1195 | /* Disable the MAC while it is being configured (also disable WOL) */ | 1195 | /* Disable the MAC while it is being configured (also disable WOL) */ |
1196 | writel(0x8, &rxmac->ctrl); | 1196 | writel(0x8, &rxmac->ctrl); |
1197 | 1197 | ||
1198 | /* Initialize WOL to disabled. */ | 1198 | /* Initialize WOL to disabled. */ |
1199 | writel(0, &rxmac->crc0); | 1199 | writel(0, &rxmac->crc0); |
1200 | writel(0, &rxmac->crc12); | 1200 | writel(0, &rxmac->crc12); |
1201 | writel(0, &rxmac->crc34); | 1201 | writel(0, &rxmac->crc34); |
1202 | 1202 | ||
1203 | /* We need to set the WOL mask0 - mask4 next. We initialize it to | 1203 | /* We need to set the WOL mask0 - mask4 next. We initialize it to |
1204 | * its default Values of 0x00000000 because there are not WOL masks | 1204 | * its default Values of 0x00000000 because there are not WOL masks |
1205 | * as of this time. | 1205 | * as of this time. |
1206 | */ | 1206 | */ |
1207 | writel(0, &rxmac->mask0_word0); | 1207 | writel(0, &rxmac->mask0_word0); |
1208 | writel(0, &rxmac->mask0_word1); | 1208 | writel(0, &rxmac->mask0_word1); |
1209 | writel(0, &rxmac->mask0_word2); | 1209 | writel(0, &rxmac->mask0_word2); |
1210 | writel(0, &rxmac->mask0_word3); | 1210 | writel(0, &rxmac->mask0_word3); |
1211 | 1211 | ||
1212 | writel(0, &rxmac->mask1_word0); | 1212 | writel(0, &rxmac->mask1_word0); |
1213 | writel(0, &rxmac->mask1_word1); | 1213 | writel(0, &rxmac->mask1_word1); |
1214 | writel(0, &rxmac->mask1_word2); | 1214 | writel(0, &rxmac->mask1_word2); |
1215 | writel(0, &rxmac->mask1_word3); | 1215 | writel(0, &rxmac->mask1_word3); |
1216 | 1216 | ||
1217 | writel(0, &rxmac->mask2_word0); | 1217 | writel(0, &rxmac->mask2_word0); |
1218 | writel(0, &rxmac->mask2_word1); | 1218 | writel(0, &rxmac->mask2_word1); |
1219 | writel(0, &rxmac->mask2_word2); | 1219 | writel(0, &rxmac->mask2_word2); |
1220 | writel(0, &rxmac->mask2_word3); | 1220 | writel(0, &rxmac->mask2_word3); |
1221 | 1221 | ||
1222 | writel(0, &rxmac->mask3_word0); | 1222 | writel(0, &rxmac->mask3_word0); |
1223 | writel(0, &rxmac->mask3_word1); | 1223 | writel(0, &rxmac->mask3_word1); |
1224 | writel(0, &rxmac->mask3_word2); | 1224 | writel(0, &rxmac->mask3_word2); |
1225 | writel(0, &rxmac->mask3_word3); | 1225 | writel(0, &rxmac->mask3_word3); |
1226 | 1226 | ||
1227 | writel(0, &rxmac->mask4_word0); | 1227 | writel(0, &rxmac->mask4_word0); |
1228 | writel(0, &rxmac->mask4_word1); | 1228 | writel(0, &rxmac->mask4_word1); |
1229 | writel(0, &rxmac->mask4_word2); | 1229 | writel(0, &rxmac->mask4_word2); |
1230 | writel(0, &rxmac->mask4_word3); | 1230 | writel(0, &rxmac->mask4_word3); |
1231 | 1231 | ||
1232 | /* Lets setup the WOL Source Address */ | 1232 | /* Lets setup the WOL Source Address */ |
1233 | sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) | | 1233 | sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) | |
1234 | (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) | | 1234 | (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) | |
1235 | (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) | | 1235 | (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) | |
1236 | adapter->addr[5]; | 1236 | adapter->addr[5]; |
1237 | writel(sa_lo, &rxmac->sa_lo); | 1237 | writel(sa_lo, &rxmac->sa_lo); |
1238 | 1238 | ||
1239 | sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) | | 1239 | sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) | |
1240 | adapter->addr[1]; | 1240 | adapter->addr[1]; |
1241 | writel(sa_hi, &rxmac->sa_hi); | 1241 | writel(sa_hi, &rxmac->sa_hi); |
1242 | 1242 | ||
1243 | /* Disable all Packet Filtering */ | 1243 | /* Disable all Packet Filtering */ |
1244 | writel(0, &rxmac->pf_ctrl); | 1244 | writel(0, &rxmac->pf_ctrl); |
1245 | 1245 | ||
1246 | /* Let's initialize the Unicast Packet filtering address */ | 1246 | /* Let's initialize the Unicast Packet filtering address */ |
1247 | if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { | 1247 | if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { |
1248 | et1310_setup_device_for_unicast(adapter); | 1248 | et1310_setup_device_for_unicast(adapter); |
1249 | pf_ctrl |= 4; /* Unicast filter */ | 1249 | pf_ctrl |= 4; /* Unicast filter */ |
1250 | } else { | 1250 | } else { |
1251 | writel(0, &rxmac->uni_pf_addr1); | 1251 | writel(0, &rxmac->uni_pf_addr1); |
1252 | writel(0, &rxmac->uni_pf_addr2); | 1252 | writel(0, &rxmac->uni_pf_addr2); |
1253 | writel(0, &rxmac->uni_pf_addr3); | 1253 | writel(0, &rxmac->uni_pf_addr3); |
1254 | } | 1254 | } |
1255 | 1255 | ||
1256 | /* Let's initialize the Multicast hash */ | 1256 | /* Let's initialize the Multicast hash */ |
1257 | if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { | 1257 | if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { |
1258 | pf_ctrl |= 2; /* Multicast filter */ | 1258 | pf_ctrl |= 2; /* Multicast filter */ |
1259 | et1310_setup_device_for_multicast(adapter); | 1259 | et1310_setup_device_for_multicast(adapter); |
1260 | } | 1260 | } |
1261 | 1261 | ||
1262 | /* Runt packet filtering. Didn't work in version A silicon. */ | 1262 | /* Runt packet filtering. Didn't work in version A silicon. */ |
1263 | pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16; | 1263 | pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16; |
1264 | pf_ctrl |= 8; /* Fragment filter */ | 1264 | pf_ctrl |= 8; /* Fragment filter */ |
1265 | 1265 | ||
1266 | if (adapter->registry_jumbo_packet > 8192) | 1266 | if (adapter->registry_jumbo_packet > 8192) |
1267 | /* In order to transmit jumbo packets greater than 8k, the | 1267 | /* In order to transmit jumbo packets greater than 8k, the |
1268 | * FIFO between RxMAC and RxDMA needs to be reduced in size | 1268 | * FIFO between RxMAC and RxDMA needs to be reduced in size |
1269 | * to (16k - Jumbo packet size). In order to implement this, | 1269 | * to (16k - Jumbo packet size). In order to implement this, |
1270 | * we must use "cut through" mode in the RxMAC, which chops | 1270 | * we must use "cut through" mode in the RxMAC, which chops |
1271 | * packets down into segments which are (max_size * 16). In | 1271 | * packets down into segments which are (max_size * 16). In |
1272 | * this case we selected 256 bytes, since this is the size of | 1272 | * this case we selected 256 bytes, since this is the size of |
1273 | * the PCI-Express TLP's that the 1310 uses. | 1273 | * the PCI-Express TLP's that the 1310 uses. |
1274 | * | 1274 | * |
1275 | * seg_en on, fc_en off, size 0x10 | 1275 | * seg_en on, fc_en off, size 0x10 |
1276 | */ | 1276 | */ |
1277 | writel(0x41, &rxmac->mcif_ctrl_max_seg); | 1277 | writel(0x41, &rxmac->mcif_ctrl_max_seg); |
1278 | else | 1278 | else |
1279 | writel(0, &rxmac->mcif_ctrl_max_seg); | 1279 | writel(0, &rxmac->mcif_ctrl_max_seg); |
1280 | 1280 | ||
1281 | /* Initialize the MCIF water marks */ | 1281 | /* Initialize the MCIF water marks */ |
1282 | writel(0, &rxmac->mcif_water_mark); | 1282 | writel(0, &rxmac->mcif_water_mark); |
1283 | 1283 | ||
1284 | /* Initialize the MIF control */ | 1284 | /* Initialize the MIF control */ |
1285 | writel(0, &rxmac->mif_ctrl); | 1285 | writel(0, &rxmac->mif_ctrl); |
1286 | 1286 | ||
1287 | /* Initialize the Space Available Register */ | 1287 | /* Initialize the Space Available Register */ |
1288 | writel(0, &rxmac->space_avail); | 1288 | writel(0, &rxmac->space_avail); |
1289 | 1289 | ||
1290 | /* Initialize the the mif_ctrl register | 1290 | /* Initialize the the mif_ctrl register |
1291 | * bit 3: Receive code error. One or more nibbles were signaled as | 1291 | * bit 3: Receive code error. One or more nibbles were signaled as |
1292 | * errors during the reception of the packet. Clear this | 1292 | * errors during the reception of the packet. Clear this |
1293 | * bit in Gigabit, set it in 100Mbit. This was derived | 1293 | * bit in Gigabit, set it in 100Mbit. This was derived |
1294 | * experimentally at UNH. | 1294 | * experimentally at UNH. |
1295 | * bit 4: Receive CRC error. The packet's CRC did not match the | 1295 | * bit 4: Receive CRC error. The packet's CRC did not match the |
1296 | * internally generated CRC. | 1296 | * internally generated CRC. |
1297 | * bit 5: Receive length check error. Indicates that frame length | 1297 | * bit 5: Receive length check error. Indicates that frame length |
1298 | * field value in the packet does not match the actual data | 1298 | * field value in the packet does not match the actual data |
1299 | * byte length and is not a type field. | 1299 | * byte length and is not a type field. |
1300 | * bit 16: Receive frame truncated. | 1300 | * bit 16: Receive frame truncated. |
1301 | * bit 17: Drop packet enable | 1301 | * bit 17: Drop packet enable |
1302 | */ | 1302 | */ |
1303 | if (phydev && phydev->speed == SPEED_100) | 1303 | if (phydev && phydev->speed == SPEED_100) |
1304 | writel(0x30038, &rxmac->mif_ctrl); | 1304 | writel(0x30038, &rxmac->mif_ctrl); |
1305 | else | 1305 | else |
1306 | writel(0x30030, &rxmac->mif_ctrl); | 1306 | writel(0x30030, &rxmac->mif_ctrl); |
1307 | 1307 | ||
1308 | /* Finally we initialize RxMac to be enabled & WOL disabled. Packet | 1308 | /* Finally we initialize RxMac to be enabled & WOL disabled. Packet |
1309 | * filter is always enabled since it is where the runt packets are | 1309 | * filter is always enabled since it is where the runt packets are |
1310 | * supposed to be dropped. For version A silicon, runt packet | 1310 | * supposed to be dropped. For version A silicon, runt packet |
1311 | * dropping doesn't work, so it is disabled in the pf_ctrl register, | 1311 | * dropping doesn't work, so it is disabled in the pf_ctrl register, |
1312 | * but we still leave the packet filter on. | 1312 | * but we still leave the packet filter on. |
1313 | */ | 1313 | */ |
1314 | writel(pf_ctrl, &rxmac->pf_ctrl); | 1314 | writel(pf_ctrl, &rxmac->pf_ctrl); |
1315 | writel(0x9, &rxmac->ctrl); | 1315 | writel(0x9, &rxmac->ctrl); |
1316 | } | 1316 | } |
1317 | 1317 | ||
1318 | static void et1310_config_txmac_regs(struct et131x_adapter *adapter) | 1318 | static void et1310_config_txmac_regs(struct et131x_adapter *adapter) |
1319 | { | 1319 | { |
1320 | struct txmac_regs __iomem *txmac = &adapter->regs->txmac; | 1320 | struct txmac_regs __iomem *txmac = &adapter->regs->txmac; |
1321 | 1321 | ||
1322 | /* We need to update the Control Frame Parameters | 1322 | /* We need to update the Control Frame Parameters |
1323 | * cfpt - control frame pause timer set to 64 (0x40) | 1323 | * cfpt - control frame pause timer set to 64 (0x40) |
1324 | * cfep - control frame extended pause timer set to 0x0 | 1324 | * cfep - control frame extended pause timer set to 0x0 |
1325 | */ | 1325 | */ |
1326 | if (adapter->flowcontrol == FLOW_NONE) | 1326 | if (adapter->flowcontrol == FLOW_NONE) |
1327 | writel(0, &txmac->cf_param); | 1327 | writel(0, &txmac->cf_param); |
1328 | else | 1328 | else |
1329 | writel(0x40, &txmac->cf_param); | 1329 | writel(0x40, &txmac->cf_param); |
1330 | } | 1330 | } |
1331 | 1331 | ||
1332 | static void et1310_config_macstat_regs(struct et131x_adapter *adapter) | 1332 | static void et1310_config_macstat_regs(struct et131x_adapter *adapter) |
1333 | { | 1333 | { |
1334 | struct macstat_regs __iomem *macstat = | 1334 | struct macstat_regs __iomem *macstat = |
1335 | &adapter->regs->macstat; | 1335 | &adapter->regs->macstat; |
1336 | 1336 | ||
1337 | /* Next we need to initialize all the macstat registers to zero on | 1337 | /* Next we need to initialize all the macstat registers to zero on |
1338 | * the device. | 1338 | * the device. |
1339 | */ | 1339 | */ |
1340 | writel(0, &macstat->txrx_0_64_byte_frames); | 1340 | writel(0, &macstat->txrx_0_64_byte_frames); |
1341 | writel(0, &macstat->txrx_65_127_byte_frames); | 1341 | writel(0, &macstat->txrx_65_127_byte_frames); |
1342 | writel(0, &macstat->txrx_128_255_byte_frames); | 1342 | writel(0, &macstat->txrx_128_255_byte_frames); |
1343 | writel(0, &macstat->txrx_256_511_byte_frames); | 1343 | writel(0, &macstat->txrx_256_511_byte_frames); |
1344 | writel(0, &macstat->txrx_512_1023_byte_frames); | 1344 | writel(0, &macstat->txrx_512_1023_byte_frames); |
1345 | writel(0, &macstat->txrx_1024_1518_byte_frames); | 1345 | writel(0, &macstat->txrx_1024_1518_byte_frames); |
1346 | writel(0, &macstat->txrx_1519_1522_gvln_frames); | 1346 | writel(0, &macstat->txrx_1519_1522_gvln_frames); |
1347 | 1347 | ||
1348 | writel(0, &macstat->rx_bytes); | 1348 | writel(0, &macstat->rx_bytes); |
1349 | writel(0, &macstat->rx_packets); | 1349 | writel(0, &macstat->rx_packets); |
1350 | writel(0, &macstat->rx_fcs_errs); | 1350 | writel(0, &macstat->rx_fcs_errs); |
1351 | writel(0, &macstat->rx_multicast_packets); | 1351 | writel(0, &macstat->rx_multicast_packets); |
1352 | writel(0, &macstat->rx_broadcast_packets); | 1352 | writel(0, &macstat->rx_broadcast_packets); |
1353 | writel(0, &macstat->rx_control_frames); | 1353 | writel(0, &macstat->rx_control_frames); |
1354 | writel(0, &macstat->rx_pause_frames); | 1354 | writel(0, &macstat->rx_pause_frames); |
1355 | writel(0, &macstat->rx_unknown_opcodes); | 1355 | writel(0, &macstat->rx_unknown_opcodes); |
1356 | writel(0, &macstat->rx_align_errs); | 1356 | writel(0, &macstat->rx_align_errs); |
1357 | writel(0, &macstat->rx_frame_len_errs); | 1357 | writel(0, &macstat->rx_frame_len_errs); |
1358 | writel(0, &macstat->rx_code_errs); | 1358 | writel(0, &macstat->rx_code_errs); |
1359 | writel(0, &macstat->rx_carrier_sense_errs); | 1359 | writel(0, &macstat->rx_carrier_sense_errs); |
1360 | writel(0, &macstat->rx_undersize_packets); | 1360 | writel(0, &macstat->rx_undersize_packets); |
1361 | writel(0, &macstat->rx_oversize_packets); | 1361 | writel(0, &macstat->rx_oversize_packets); |
1362 | writel(0, &macstat->rx_fragment_packets); | 1362 | writel(0, &macstat->rx_fragment_packets); |
1363 | writel(0, &macstat->rx_jabbers); | 1363 | writel(0, &macstat->rx_jabbers); |
1364 | writel(0, &macstat->rx_drops); | 1364 | writel(0, &macstat->rx_drops); |
1365 | 1365 | ||
1366 | writel(0, &macstat->tx_bytes); | 1366 | writel(0, &macstat->tx_bytes); |
1367 | writel(0, &macstat->tx_packets); | 1367 | writel(0, &macstat->tx_packets); |
1368 | writel(0, &macstat->tx_multicast_packets); | 1368 | writel(0, &macstat->tx_multicast_packets); |
1369 | writel(0, &macstat->tx_broadcast_packets); | 1369 | writel(0, &macstat->tx_broadcast_packets); |
1370 | writel(0, &macstat->tx_pause_frames); | 1370 | writel(0, &macstat->tx_pause_frames); |
1371 | writel(0, &macstat->tx_deferred); | 1371 | writel(0, &macstat->tx_deferred); |
1372 | writel(0, &macstat->tx_excessive_deferred); | 1372 | writel(0, &macstat->tx_excessive_deferred); |
1373 | writel(0, &macstat->tx_single_collisions); | 1373 | writel(0, &macstat->tx_single_collisions); |
1374 | writel(0, &macstat->tx_multiple_collisions); | 1374 | writel(0, &macstat->tx_multiple_collisions); |
1375 | writel(0, &macstat->tx_late_collisions); | 1375 | writel(0, &macstat->tx_late_collisions); |
1376 | writel(0, &macstat->tx_excessive_collisions); | 1376 | writel(0, &macstat->tx_excessive_collisions); |
1377 | writel(0, &macstat->tx_total_collisions); | 1377 | writel(0, &macstat->tx_total_collisions); |
1378 | writel(0, &macstat->tx_pause_honored_frames); | 1378 | writel(0, &macstat->tx_pause_honored_frames); |
1379 | writel(0, &macstat->tx_drops); | 1379 | writel(0, &macstat->tx_drops); |
1380 | writel(0, &macstat->tx_jabbers); | 1380 | writel(0, &macstat->tx_jabbers); |
1381 | writel(0, &macstat->tx_fcs_errs); | 1381 | writel(0, &macstat->tx_fcs_errs); |
1382 | writel(0, &macstat->tx_control_frames); | 1382 | writel(0, &macstat->tx_control_frames); |
1383 | writel(0, &macstat->tx_oversize_frames); | 1383 | writel(0, &macstat->tx_oversize_frames); |
1384 | writel(0, &macstat->tx_undersize_frames); | 1384 | writel(0, &macstat->tx_undersize_frames); |
1385 | writel(0, &macstat->tx_fragments); | 1385 | writel(0, &macstat->tx_fragments); |
1386 | writel(0, &macstat->carry_reg1); | 1386 | writel(0, &macstat->carry_reg1); |
1387 | writel(0, &macstat->carry_reg2); | 1387 | writel(0, &macstat->carry_reg2); |
1388 | 1388 | ||
1389 | /* Unmask any counters that we want to track the overflow of. | 1389 | /* Unmask any counters that we want to track the overflow of. |
1390 | * Initially this will be all counters. It may become clear later | 1390 | * Initially this will be all counters. It may become clear later |
1391 | * that we do not need to track all counters. | 1391 | * that we do not need to track all counters. |
1392 | */ | 1392 | */ |
1393 | writel(0xFFFFBE32, &macstat->carry_reg1_mask); | 1393 | writel(0xFFFFBE32, &macstat->carry_reg1_mask); |
1394 | writel(0xFFFE7E8B, &macstat->carry_reg2_mask); | 1394 | writel(0xFFFE7E8B, &macstat->carry_reg2_mask); |
1395 | } | 1395 | } |
1396 | 1396 | ||
1397 | /** | 1397 | /** |
1398 | * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC | 1398 | * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC |
1399 | * @adapter: pointer to our private adapter structure | 1399 | * @adapter: pointer to our private adapter structure |
1400 | * @addr: the address of the transceiver | 1400 | * @addr: the address of the transceiver |
1401 | * @reg: the register to read | 1401 | * @reg: the register to read |
1402 | * @value: pointer to a 16-bit value in which the value will be stored | 1402 | * @value: pointer to a 16-bit value in which the value will be stored |
1403 | * | 1403 | * |
1404 | * Returns 0 on success, errno on failure (as defined in errno.h) | 1404 | * Returns 0 on success, errno on failure (as defined in errno.h) |
1405 | */ | 1405 | */ |
1406 | static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, | 1406 | static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, |
1407 | u8 reg, u16 *value) | 1407 | u8 reg, u16 *value) |
1408 | { | 1408 | { |
1409 | struct mac_regs __iomem *mac = &adapter->regs->mac; | 1409 | struct mac_regs __iomem *mac = &adapter->regs->mac; |
1410 | int status = 0; | 1410 | int status = 0; |
1411 | u32 delay = 0; | 1411 | u32 delay = 0; |
1412 | u32 mii_addr; | 1412 | u32 mii_addr; |
1413 | u32 mii_cmd; | 1413 | u32 mii_cmd; |
1414 | u32 mii_indicator; | 1414 | u32 mii_indicator; |
1415 | 1415 | ||
1416 | /* Save a local copy of the registers we are dealing with so we can | 1416 | /* Save a local copy of the registers we are dealing with so we can |
1417 | * set them back | 1417 | * set them back |
1418 | */ | 1418 | */ |
1419 | mii_addr = readl(&mac->mii_mgmt_addr); | 1419 | mii_addr = readl(&mac->mii_mgmt_addr); |
1420 | mii_cmd = readl(&mac->mii_mgmt_cmd); | 1420 | mii_cmd = readl(&mac->mii_mgmt_cmd); |
1421 | 1421 | ||
1422 | /* Stop the current operation */ | 1422 | /* Stop the current operation */ |
1423 | writel(0, &mac->mii_mgmt_cmd); | 1423 | writel(0, &mac->mii_mgmt_cmd); |
1424 | 1424 | ||
1425 | /* Set up the register we need to read from on the correct PHY */ | 1425 | /* Set up the register we need to read from on the correct PHY */ |
1426 | writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); | 1426 | writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); |
1427 | 1427 | ||
1428 | writel(0x1, &mac->mii_mgmt_cmd); | 1428 | writel(0x1, &mac->mii_mgmt_cmd); |
1429 | 1429 | ||
1430 | do { | 1430 | do { |
1431 | udelay(50); | 1431 | udelay(50); |
1432 | delay++; | 1432 | delay++; |
1433 | mii_indicator = readl(&mac->mii_mgmt_indicator); | 1433 | mii_indicator = readl(&mac->mii_mgmt_indicator); |
1434 | } while ((mii_indicator & MGMT_WAIT) && delay < 50); | 1434 | } while ((mii_indicator & MGMT_WAIT) && delay < 50); |
1435 | 1435 | ||
1436 | /* If we hit the max delay, we could not read the register */ | 1436 | /* If we hit the max delay, we could not read the register */ |
1437 | if (delay == 50) { | 1437 | if (delay == 50) { |
1438 | dev_warn(&adapter->pdev->dev, | 1438 | dev_warn(&adapter->pdev->dev, |
1439 | "reg 0x%08x could not be read\n", reg); | 1439 | "reg 0x%08x could not be read\n", reg); |
1440 | dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", | 1440 | dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", |
1441 | mii_indicator); | 1441 | mii_indicator); |
1442 | 1442 | ||
1443 | status = -EIO; | 1443 | status = -EIO; |
1444 | } | 1444 | } |
1445 | 1445 | ||
1446 | /* If we hit here we were able to read the register and we need to | 1446 | /* If we hit here we were able to read the register and we need to |
1447 | * return the value to the caller */ | 1447 | * return the value to the caller */ |
1448 | *value = readl(&mac->mii_mgmt_stat) & 0xFFFF; | 1448 | *value = readl(&mac->mii_mgmt_stat) & 0xFFFF; |
1449 | 1449 | ||
1450 | /* Stop the read operation */ | 1450 | /* Stop the read operation */ |
1451 | writel(0, &mac->mii_mgmt_cmd); | 1451 | writel(0, &mac->mii_mgmt_cmd); |
1452 | 1452 | ||
1453 | /* set the registers we touched back to the state at which we entered | 1453 | /* set the registers we touched back to the state at which we entered |
1454 | * this function | 1454 | * this function |
1455 | */ | 1455 | */ |
1456 | writel(mii_addr, &mac->mii_mgmt_addr); | 1456 | writel(mii_addr, &mac->mii_mgmt_addr); |
1457 | writel(mii_cmd, &mac->mii_mgmt_cmd); | 1457 | writel(mii_cmd, &mac->mii_mgmt_cmd); |
1458 | 1458 | ||
1459 | return status; | 1459 | return status; |
1460 | } | 1460 | } |
1461 | 1461 | ||
1462 | static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) | 1462 | static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) |
1463 | { | 1463 | { |
1464 | struct phy_device *phydev = adapter->phydev; | 1464 | struct phy_device *phydev = adapter->phydev; |
1465 | 1465 | ||
1466 | if (!phydev) | 1466 | if (!phydev) |
1467 | return -EIO; | 1467 | return -EIO; |
1468 | 1468 | ||
1469 | return et131x_phy_mii_read(adapter, phydev->addr, reg, value); | 1469 | return et131x_phy_mii_read(adapter, phydev->addr, reg, value); |
1470 | } | 1470 | } |
1471 | 1471 | ||
1472 | /** | 1472 | /** |
1473 | * et131x_mii_write - Write to a PHY register through the MII interface of the MAC | 1473 | * et131x_mii_write - Write to a PHY register through the MII interface of the MAC |
1474 | * @adapter: pointer to our private adapter structure | 1474 | * @adapter: pointer to our private adapter structure |
1475 | * @reg: the register to read | 1475 | * @reg: the register to read |
1476 | * @value: 16-bit value to write | 1476 | * @value: 16-bit value to write |
1477 | * | 1477 | * |
1478 | * FIXME: one caller in netdev still | 1478 | * FIXME: one caller in netdev still |
1479 | * | 1479 | * |
1480 | * Return 0 on success, errno on failure (as defined in errno.h) | 1480 | * Return 0 on success, errno on failure (as defined in errno.h) |
1481 | */ | 1481 | */ |
1482 | static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) | 1482 | static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) |
1483 | { | 1483 | { |
1484 | struct mac_regs __iomem *mac = &adapter->regs->mac; | 1484 | struct mac_regs __iomem *mac = &adapter->regs->mac; |
1485 | struct phy_device *phydev = adapter->phydev; | 1485 | struct phy_device *phydev = adapter->phydev; |
1486 | int status = 0; | 1486 | int status = 0; |
1487 | u8 addr; | 1487 | u8 addr; |
1488 | u32 delay = 0; | 1488 | u32 delay = 0; |
1489 | u32 mii_addr; | 1489 | u32 mii_addr; |
1490 | u32 mii_cmd; | 1490 | u32 mii_cmd; |
1491 | u32 mii_indicator; | 1491 | u32 mii_indicator; |
1492 | 1492 | ||
1493 | if (!phydev) | 1493 | if (!phydev) |
1494 | return -EIO; | 1494 | return -EIO; |
1495 | 1495 | ||
1496 | addr = phydev->addr; | 1496 | addr = phydev->addr; |
1497 | 1497 | ||
1498 | /* Save a local copy of the registers we are dealing with so we can | 1498 | /* Save a local copy of the registers we are dealing with so we can |
1499 | * set them back | 1499 | * set them back |
1500 | */ | 1500 | */ |
1501 | mii_addr = readl(&mac->mii_mgmt_addr); | 1501 | mii_addr = readl(&mac->mii_mgmt_addr); |
1502 | mii_cmd = readl(&mac->mii_mgmt_cmd); | 1502 | mii_cmd = readl(&mac->mii_mgmt_cmd); |
1503 | 1503 | ||
1504 | /* Stop the current operation */ | 1504 | /* Stop the current operation */ |
1505 | writel(0, &mac->mii_mgmt_cmd); | 1505 | writel(0, &mac->mii_mgmt_cmd); |
1506 | 1506 | ||
1507 | /* Set up the register we need to write to on the correct PHY */ | 1507 | /* Set up the register we need to write to on the correct PHY */ |
1508 | writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); | 1508 | writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); |
1509 | 1509 | ||
1510 | /* Add the value to write to the registers to the mac */ | 1510 | /* Add the value to write to the registers to the mac */ |
1511 | writel(value, &mac->mii_mgmt_ctrl); | 1511 | writel(value, &mac->mii_mgmt_ctrl); |
1512 | 1512 | ||
1513 | do { | 1513 | do { |
1514 | udelay(50); | 1514 | udelay(50); |
1515 | delay++; | 1515 | delay++; |
1516 | mii_indicator = readl(&mac->mii_mgmt_indicator); | 1516 | mii_indicator = readl(&mac->mii_mgmt_indicator); |
1517 | } while ((mii_indicator & MGMT_BUSY) && delay < 100); | 1517 | } while ((mii_indicator & MGMT_BUSY) && delay < 100); |
1518 | 1518 | ||
1519 | /* If we hit the max delay, we could not write the register */ | 1519 | /* If we hit the max delay, we could not write the register */ |
1520 | if (delay == 100) { | 1520 | if (delay == 100) { |
1521 | u16 tmp; | 1521 | u16 tmp; |
1522 | 1522 | ||
1523 | dev_warn(&adapter->pdev->dev, | 1523 | dev_warn(&adapter->pdev->dev, |
1524 | "reg 0x%08x could not be written", reg); | 1524 | "reg 0x%08x could not be written", reg); |
1525 | dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", | 1525 | dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", |
1526 | mii_indicator); | 1526 | mii_indicator); |
1527 | dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", | 1527 | dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", |
1528 | readl(&mac->mii_mgmt_cmd)); | 1528 | readl(&mac->mii_mgmt_cmd)); |
1529 | 1529 | ||
1530 | et131x_mii_read(adapter, reg, &tmp); | 1530 | et131x_mii_read(adapter, reg, &tmp); |
1531 | 1531 | ||
1532 | status = -EIO; | 1532 | status = -EIO; |
1533 | } | 1533 | } |
1534 | /* Stop the write operation */ | 1534 | /* Stop the write operation */ |
1535 | writel(0, &mac->mii_mgmt_cmd); | 1535 | writel(0, &mac->mii_mgmt_cmd); |
1536 | 1536 | ||
1537 | /* | 1537 | /* |
1538 | * set the registers we touched back to the state at which we entered | 1538 | * set the registers we touched back to the state at which we entered |
1539 | * this function | 1539 | * this function |
1540 | */ | 1540 | */ |
1541 | writel(mii_addr, &mac->mii_mgmt_addr); | 1541 | writel(mii_addr, &mac->mii_mgmt_addr); |
1542 | writel(mii_cmd, &mac->mii_mgmt_cmd); | 1542 | writel(mii_cmd, &mac->mii_mgmt_cmd); |
1543 | 1543 | ||
1544 | return status; | 1544 | return status; |
1545 | } | 1545 | } |
1546 | 1546 | ||
1547 | /* Still used from _mac for BIT_READ */ | 1547 | /* Still used from _mac for BIT_READ */ |
1548 | static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, | 1548 | static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, |
1549 | u16 action, u16 regnum, u16 bitnum, | 1549 | u16 action, u16 regnum, u16 bitnum, |
1550 | u8 *value) | 1550 | u8 *value) |
1551 | { | 1551 | { |
1552 | u16 reg; | 1552 | u16 reg; |
1553 | u16 mask = 0x0001 << bitnum; | 1553 | u16 mask = 0x0001 << bitnum; |
1554 | 1554 | ||
1555 | /* Read the requested register */ | 1555 | /* Read the requested register */ |
1556 | et131x_mii_read(adapter, regnum, ®); | 1556 | et131x_mii_read(adapter, regnum, ®); |
1557 | 1557 | ||
1558 | switch (action) { | 1558 | switch (action) { |
1559 | case TRUEPHY_BIT_READ: | 1559 | case TRUEPHY_BIT_READ: |
1560 | *value = (reg & mask) >> bitnum; | 1560 | *value = (reg & mask) >> bitnum; |
1561 | break; | 1561 | break; |
1562 | 1562 | ||
1563 | case TRUEPHY_BIT_SET: | 1563 | case TRUEPHY_BIT_SET: |
1564 | et131x_mii_write(adapter, regnum, reg | mask); | 1564 | et131x_mii_write(adapter, regnum, reg | mask); |
1565 | break; | 1565 | break; |
1566 | 1566 | ||
1567 | case TRUEPHY_BIT_CLEAR: | 1567 | case TRUEPHY_BIT_CLEAR: |
1568 | et131x_mii_write(adapter, regnum, reg & ~mask); | 1568 | et131x_mii_write(adapter, regnum, reg & ~mask); |
1569 | break; | 1569 | break; |
1570 | 1570 | ||
1571 | default: | 1571 | default: |
1572 | break; | 1572 | break; |
1573 | } | 1573 | } |
1574 | } | 1574 | } |
1575 | 1575 | ||
1576 | static void et1310_config_flow_control(struct et131x_adapter *adapter) | 1576 | static void et1310_config_flow_control(struct et131x_adapter *adapter) |
1577 | { | 1577 | { |
1578 | struct phy_device *phydev = adapter->phydev; | 1578 | struct phy_device *phydev = adapter->phydev; |
1579 | 1579 | ||
1580 | if (phydev->duplex == DUPLEX_HALF) { | 1580 | if (phydev->duplex == DUPLEX_HALF) { |
1581 | adapter->flowcontrol = FLOW_NONE; | 1581 | adapter->flowcontrol = FLOW_NONE; |
1582 | } else { | 1582 | } else { |
1583 | char remote_pause, remote_async_pause; | 1583 | char remote_pause, remote_async_pause; |
1584 | 1584 | ||
1585 | et1310_phy_access_mii_bit(adapter, | 1585 | et1310_phy_access_mii_bit(adapter, |
1586 | TRUEPHY_BIT_READ, 5, 10, &remote_pause); | 1586 | TRUEPHY_BIT_READ, 5, 10, &remote_pause); |
1587 | et1310_phy_access_mii_bit(adapter, | 1587 | et1310_phy_access_mii_bit(adapter, |
1588 | TRUEPHY_BIT_READ, 5, 11, | 1588 | TRUEPHY_BIT_READ, 5, 11, |
1589 | &remote_async_pause); | 1589 | &remote_async_pause); |
1590 | 1590 | ||
1591 | if ((remote_pause == TRUEPHY_BIT_SET) && | 1591 | if ((remote_pause == TRUEPHY_BIT_SET) && |
1592 | (remote_async_pause == TRUEPHY_BIT_SET)) { | 1592 | (remote_async_pause == TRUEPHY_BIT_SET)) { |
1593 | adapter->flowcontrol = adapter->wanted_flow; | 1593 | adapter->flowcontrol = adapter->wanted_flow; |
1594 | } else if ((remote_pause == TRUEPHY_BIT_SET) && | 1594 | } else if ((remote_pause == TRUEPHY_BIT_SET) && |
1595 | (remote_async_pause == TRUEPHY_BIT_CLEAR)) { | 1595 | (remote_async_pause == TRUEPHY_BIT_CLEAR)) { |
1596 | if (adapter->wanted_flow == FLOW_BOTH) | 1596 | if (adapter->wanted_flow == FLOW_BOTH) |
1597 | adapter->flowcontrol = FLOW_BOTH; | 1597 | adapter->flowcontrol = FLOW_BOTH; |
1598 | else | 1598 | else |
1599 | adapter->flowcontrol = FLOW_NONE; | 1599 | adapter->flowcontrol = FLOW_NONE; |
1600 | } else if ((remote_pause == TRUEPHY_BIT_CLEAR) && | 1600 | } else if ((remote_pause == TRUEPHY_BIT_CLEAR) && |
1601 | (remote_async_pause == TRUEPHY_BIT_CLEAR)) { | 1601 | (remote_async_pause == TRUEPHY_BIT_CLEAR)) { |
1602 | adapter->flowcontrol = FLOW_NONE; | 1602 | adapter->flowcontrol = FLOW_NONE; |
1603 | } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT && | 1603 | } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT && |
1604 | remote_async_pause == TRUEPHY_SET_BIT) */ | 1604 | remote_async_pause == TRUEPHY_SET_BIT) */ |
1605 | if (adapter->wanted_flow == FLOW_BOTH) | 1605 | if (adapter->wanted_flow == FLOW_BOTH) |
1606 | adapter->flowcontrol = FLOW_RXONLY; | 1606 | adapter->flowcontrol = FLOW_RXONLY; |
1607 | else | 1607 | else |
1608 | adapter->flowcontrol = FLOW_NONE; | 1608 | adapter->flowcontrol = FLOW_NONE; |
1609 | } | 1609 | } |
1610 | } | 1610 | } |
1611 | } | 1611 | } |
1612 | 1612 | ||
1613 | /** | 1613 | /** |
1614 | * et1310_update_macstat_host_counters - Update the local copy of the statistics | 1614 | * et1310_update_macstat_host_counters - Update the local copy of the statistics |
1615 | * @adapter: pointer to the adapter structure | 1615 | * @adapter: pointer to the adapter structure |
1616 | */ | 1616 | */ |
1617 | static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) | 1617 | static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) |
1618 | { | 1618 | { |
1619 | struct ce_stats *stats = &adapter->stats; | 1619 | struct ce_stats *stats = &adapter->stats; |
1620 | struct macstat_regs __iomem *macstat = | 1620 | struct macstat_regs __iomem *macstat = |
1621 | &adapter->regs->macstat; | 1621 | &adapter->regs->macstat; |
1622 | 1622 | ||
1623 | stats->tx_collisions += readl(&macstat->tx_total_collisions); | 1623 | stats->tx_collisions += readl(&macstat->tx_total_collisions); |
1624 | stats->tx_first_collisions += readl(&macstat->tx_single_collisions); | 1624 | stats->tx_first_collisions += readl(&macstat->tx_single_collisions); |
1625 | stats->tx_deferred += readl(&macstat->tx_deferred); | 1625 | stats->tx_deferred += readl(&macstat->tx_deferred); |
1626 | stats->tx_excessive_collisions += | 1626 | stats->tx_excessive_collisions += |
1627 | readl(&macstat->tx_multiple_collisions); | 1627 | readl(&macstat->tx_multiple_collisions); |
1628 | stats->tx_late_collisions += readl(&macstat->tx_late_collisions); | 1628 | stats->tx_late_collisions += readl(&macstat->tx_late_collisions); |
1629 | stats->tx_underflows += readl(&macstat->tx_undersize_frames); | 1629 | stats->tx_underflows += readl(&macstat->tx_undersize_frames); |
1630 | stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); | 1630 | stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); |
1631 | 1631 | ||
1632 | stats->rx_align_errs += readl(&macstat->rx_align_errs); | 1632 | stats->rx_align_errs += readl(&macstat->rx_align_errs); |
1633 | stats->rx_crc_errs += readl(&macstat->rx_code_errs); | 1633 | stats->rx_crc_errs += readl(&macstat->rx_code_errs); |
1634 | stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); | 1634 | stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); |
1635 | stats->rx_overflows += readl(&macstat->rx_oversize_packets); | 1635 | stats->rx_overflows += readl(&macstat->rx_oversize_packets); |
1636 | stats->rx_code_violations += readl(&macstat->rx_fcs_errs); | 1636 | stats->rx_code_violations += readl(&macstat->rx_fcs_errs); |
1637 | stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); | 1637 | stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); |
1638 | stats->rx_other_errs += readl(&macstat->rx_fragment_packets); | 1638 | stats->rx_other_errs += readl(&macstat->rx_fragment_packets); |
1639 | } | 1639 | } |
1640 | 1640 | ||
1641 | /** | 1641 | /** |
1642 | * et1310_handle_macstat_interrupt | 1642 | * et1310_handle_macstat_interrupt |
1643 | * @adapter: pointer to the adapter structure | 1643 | * @adapter: pointer to the adapter structure |
1644 | * | 1644 | * |
1645 | * One of the MACSTAT counters has wrapped. Update the local copy of | 1645 | * One of the MACSTAT counters has wrapped. Update the local copy of |
1646 | * the statistics held in the adapter structure, checking the "wrap" | 1646 | * the statistics held in the adapter structure, checking the "wrap" |
1647 | * bit for each counter. | 1647 | * bit for each counter. |
1648 | */ | 1648 | */ |
1649 | static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) | 1649 | static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) |
1650 | { | 1650 | { |
1651 | u32 carry_reg1; | 1651 | u32 carry_reg1; |
1652 | u32 carry_reg2; | 1652 | u32 carry_reg2; |
1653 | 1653 | ||
1654 | /* Read the interrupt bits from the register(s). These are Clear On | 1654 | /* Read the interrupt bits from the register(s). These are Clear On |
1655 | * Write. | 1655 | * Write. |
1656 | */ | 1656 | */ |
1657 | carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); | 1657 | carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); |
1658 | carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); | 1658 | carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); |
1659 | 1659 | ||
1660 | writel(carry_reg1, &adapter->regs->macstat.carry_reg1); | 1660 | writel(carry_reg1, &adapter->regs->macstat.carry_reg1); |
1661 | writel(carry_reg2, &adapter->regs->macstat.carry_reg2); | 1661 | writel(carry_reg2, &adapter->regs->macstat.carry_reg2); |
1662 | 1662 | ||
1663 | /* We need to do update the host copy of all the MAC_STAT counters. | 1663 | /* We need to do update the host copy of all the MAC_STAT counters. |
1664 | * For each counter, check it's overflow bit. If the overflow bit is | 1664 | * For each counter, check it's overflow bit. If the overflow bit is |
1665 | * set, then increment the host version of the count by one complete | 1665 | * set, then increment the host version of the count by one complete |
1666 | * revolution of the counter. This routine is called when the counter | 1666 | * revolution of the counter. This routine is called when the counter |
1667 | * block indicates that one of the counters has wrapped. | 1667 | * block indicates that one of the counters has wrapped. |
1668 | */ | 1668 | */ |
1669 | if (carry_reg1 & (1 << 14)) | 1669 | if (carry_reg1 & (1 << 14)) |
1670 | adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; | 1670 | adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; |
1671 | if (carry_reg1 & (1 << 8)) | 1671 | if (carry_reg1 & (1 << 8)) |
1672 | adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; | 1672 | adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; |
1673 | if (carry_reg1 & (1 << 7)) | 1673 | if (carry_reg1 & (1 << 7)) |
1674 | adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; | 1674 | adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; |
1675 | if (carry_reg1 & (1 << 2)) | 1675 | if (carry_reg1 & (1 << 2)) |
1676 | adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; | 1676 | adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; |
1677 | if (carry_reg1 & (1 << 6)) | 1677 | if (carry_reg1 & (1 << 6)) |
1678 | adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; | 1678 | adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; |
1679 | if (carry_reg1 & (1 << 3)) | 1679 | if (carry_reg1 & (1 << 3)) |
1680 | adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; | 1680 | adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; |
1681 | if (carry_reg1 & (1 << 0)) | 1681 | if (carry_reg1 & (1 << 0)) |
1682 | adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; | 1682 | adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; |
1683 | if (carry_reg2 & (1 << 16)) | 1683 | if (carry_reg2 & (1 << 16)) |
1684 | adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; | 1684 | adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; |
1685 | if (carry_reg2 & (1 << 15)) | 1685 | if (carry_reg2 & (1 << 15)) |
1686 | adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; | 1686 | adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; |
1687 | if (carry_reg2 & (1 << 6)) | 1687 | if (carry_reg2 & (1 << 6)) |
1688 | adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; | 1688 | adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; |
1689 | if (carry_reg2 & (1 << 8)) | 1689 | if (carry_reg2 & (1 << 8)) |
1690 | adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; | 1690 | adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; |
1691 | if (carry_reg2 & (1 << 5)) | 1691 | if (carry_reg2 & (1 << 5)) |
1692 | adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; | 1692 | adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; |
1693 | if (carry_reg2 & (1 << 4)) | 1693 | if (carry_reg2 & (1 << 4)) |
1694 | adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; | 1694 | adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; |
1695 | if (carry_reg2 & (1 << 2)) | 1695 | if (carry_reg2 & (1 << 2)) |
1696 | adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; | 1696 | adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; |
1697 | } | 1697 | } |
1698 | 1698 | ||
1699 | static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) | 1699 | static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) |
1700 | { | 1700 | { |
1701 | struct net_device *netdev = bus->priv; | 1701 | struct net_device *netdev = bus->priv; |
1702 | struct et131x_adapter *adapter = netdev_priv(netdev); | 1702 | struct et131x_adapter *adapter = netdev_priv(netdev); |
1703 | u16 value; | 1703 | u16 value; |
1704 | int ret; | 1704 | int ret; |
1705 | 1705 | ||
1706 | ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); | 1706 | ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); |
1707 | 1707 | ||
1708 | if (ret < 0) | 1708 | if (ret < 0) |
1709 | return ret; | 1709 | return ret; |
1710 | else | 1710 | else |
1711 | return value; | 1711 | return value; |
1712 | } | 1712 | } |
1713 | 1713 | ||
1714 | static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value) | 1714 | static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value) |
1715 | { | 1715 | { |
1716 | struct net_device *netdev = bus->priv; | 1716 | struct net_device *netdev = bus->priv; |
1717 | struct et131x_adapter *adapter = netdev_priv(netdev); | 1717 | struct et131x_adapter *adapter = netdev_priv(netdev); |
1718 | 1718 | ||
1719 | return et131x_mii_write(adapter, reg, value); | 1719 | return et131x_mii_write(adapter, reg, value); |
1720 | } | 1720 | } |
1721 | 1721 | ||
1722 | static int et131x_mdio_reset(struct mii_bus *bus) | 1722 | static int et131x_mdio_reset(struct mii_bus *bus) |
1723 | { | 1723 | { |
1724 | struct net_device *netdev = bus->priv; | 1724 | struct net_device *netdev = bus->priv; |
1725 | struct et131x_adapter *adapter = netdev_priv(netdev); | 1725 | struct et131x_adapter *adapter = netdev_priv(netdev); |
1726 | 1726 | ||
1727 | et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); | 1727 | et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); |
1728 | 1728 | ||
1729 | return 0; | 1729 | return 0; |
1730 | } | 1730 | } |
1731 | 1731 | ||
1732 | /** | 1732 | /** |
1733 | * et1310_phy_power_down - PHY power control | 1733 | * et1310_phy_power_down - PHY power control |
1734 | * @adapter: device to control | 1734 | * @adapter: device to control |
1735 | * @down: true for off/false for back on | 1735 | * @down: true for off/false for back on |
1736 | * | 1736 | * |
1737 | * one hundred, ten, one thousand megs | 1737 | * one hundred, ten, one thousand megs |
1738 | * How would you like to have your LAN accessed | 1738 | * How would you like to have your LAN accessed |
1739 | * Can't you see that this code processed | 1739 | * Can't you see that this code processed |
1740 | * Phy power, phy power.. | 1740 | * Phy power, phy power.. |
1741 | */ | 1741 | */ |
1742 | static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) | 1742 | static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) |
1743 | { | 1743 | { |
1744 | u16 data; | 1744 | u16 data; |
1745 | 1745 | ||
1746 | et131x_mii_read(adapter, MII_BMCR, &data); | 1746 | et131x_mii_read(adapter, MII_BMCR, &data); |
1747 | data &= ~BMCR_PDOWN; | 1747 | data &= ~BMCR_PDOWN; |
1748 | if (down) | 1748 | if (down) |
1749 | data |= BMCR_PDOWN; | 1749 | data |= BMCR_PDOWN; |
1750 | et131x_mii_write(adapter, MII_BMCR, data); | 1750 | et131x_mii_write(adapter, MII_BMCR, data); |
1751 | } | 1751 | } |
1752 | 1752 | ||
1753 | /** | 1753 | /** |
1754 | * et131x_xcvr_init - Init the phy if we are setting it into force mode | 1754 | * et131x_xcvr_init - Init the phy if we are setting it into force mode |
1755 | * @adapter: pointer to our private adapter structure | 1755 | * @adapter: pointer to our private adapter structure |
1756 | * | 1756 | * |
1757 | */ | 1757 | */ |
1758 | static void et131x_xcvr_init(struct et131x_adapter *adapter) | 1758 | static void et131x_xcvr_init(struct et131x_adapter *adapter) |
1759 | { | 1759 | { |
1760 | u16 imr; | 1760 | u16 imr; |
1761 | u16 isr; | 1761 | u16 isr; |
1762 | u16 lcr2; | 1762 | u16 lcr2; |
1763 | 1763 | ||
1764 | et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr); | 1764 | et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr); |
1765 | et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr); | 1765 | et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr); |
1766 | 1766 | ||
1767 | /* Set the link status interrupt only. Bad behavior when link status | 1767 | /* Set the link status interrupt only. Bad behavior when link status |
1768 | * and auto neg are set, we run into a nested interrupt problem | 1768 | * and auto neg are set, we run into a nested interrupt problem |
1769 | */ | 1769 | */ |
1770 | imr |= (ET_PHY_INT_MASK_AUTONEGSTAT & | 1770 | imr |= (ET_PHY_INT_MASK_AUTONEGSTAT & |
1771 | ET_PHY_INT_MASK_LINKSTAT & | 1771 | ET_PHY_INT_MASK_LINKSTAT & |
1772 | ET_PHY_INT_MASK_ENABLE); | 1772 | ET_PHY_INT_MASK_ENABLE); |
1773 | 1773 | ||
1774 | et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr); | 1774 | et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr); |
1775 | 1775 | ||
1776 | /* Set the LED behavior such that LED 1 indicates speed (off = | 1776 | /* Set the LED behavior such that LED 1 indicates speed (off = |
1777 | * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates | 1777 | * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates |
1778 | * link and activity (on for link, blink off for activity). | 1778 | * link and activity (on for link, blink off for activity). |
1779 | * | 1779 | * |
1780 | * NOTE: Some customizations have been added here for specific | 1780 | * NOTE: Some customizations have been added here for specific |
1781 | * vendors; The LED behavior is now determined by vendor data in the | 1781 | * vendors; The LED behavior is now determined by vendor data in the |
1782 | * EEPROM. However, the above description is the default. | 1782 | * EEPROM. However, the above description is the default. |
1783 | */ | 1783 | */ |
1784 | if ((adapter->eeprom_data[1] & 0x4) == 0) { | 1784 | if ((adapter->eeprom_data[1] & 0x4) == 0) { |
1785 | et131x_mii_read(adapter, PHY_LED_2, &lcr2); | 1785 | et131x_mii_read(adapter, PHY_LED_2, &lcr2); |
1786 | 1786 | ||
1787 | lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T); | 1787 | lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T); |
1788 | lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); | 1788 | lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); |
1789 | 1789 | ||
1790 | if ((adapter->eeprom_data[1] & 0x8) == 0) | 1790 | if ((adapter->eeprom_data[1] & 0x8) == 0) |
1791 | lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); | 1791 | lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); |
1792 | else | 1792 | else |
1793 | lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); | 1793 | lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); |
1794 | 1794 | ||
1795 | et131x_mii_write(adapter, PHY_LED_2, lcr2); | 1795 | et131x_mii_write(adapter, PHY_LED_2, lcr2); |
1796 | } | 1796 | } |
1797 | } | 1797 | } |
1798 | 1798 | ||
1799 | /** | 1799 | /** |
1800 | * et131x_configure_global_regs - configure JAGCore global regs | 1800 | * et131x_configure_global_regs - configure JAGCore global regs |
1801 | * @adapter: pointer to our adapter structure | 1801 | * @adapter: pointer to our adapter structure |
1802 | * | 1802 | * |
1803 | * Used to configure the global registers on the JAGCore | 1803 | * Used to configure the global registers on the JAGCore |
1804 | */ | 1804 | */ |
1805 | static void et131x_configure_global_regs(struct et131x_adapter *adapter) | 1805 | static void et131x_configure_global_regs(struct et131x_adapter *adapter) |
1806 | { | 1806 | { |
1807 | struct global_regs __iomem *regs = &adapter->regs->global; | 1807 | struct global_regs __iomem *regs = &adapter->regs->global; |
1808 | 1808 | ||
1809 | writel(0, ®s->rxq_start_addr); | 1809 | writel(0, ®s->rxq_start_addr); |
1810 | writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr); | 1810 | writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr); |
1811 | 1811 | ||
1812 | if (adapter->registry_jumbo_packet < 2048) { | 1812 | if (adapter->registry_jumbo_packet < 2048) { |
1813 | /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word | 1813 | /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word |
1814 | * block of RAM that the driver can split between Tx | 1814 | * block of RAM that the driver can split between Tx |
1815 | * and Rx as it desires. Our default is to split it | 1815 | * and Rx as it desires. Our default is to split it |
1816 | * 50/50: | 1816 | * 50/50: |
1817 | */ | 1817 | */ |
1818 | writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr); | 1818 | writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr); |
1819 | writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr); | 1819 | writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr); |
1820 | } else if (adapter->registry_jumbo_packet < 8192) { | 1820 | } else if (adapter->registry_jumbo_packet < 8192) { |
1821 | /* For jumbo packets > 2k but < 8k, split 50-50. */ | 1821 | /* For jumbo packets > 2k but < 8k, split 50-50. */ |
1822 | writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr); | 1822 | writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr); |
1823 | writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr); | 1823 | writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr); |
1824 | } else { | 1824 | } else { |
1825 | /* 9216 is the only packet size greater than 8k that | 1825 | /* 9216 is the only packet size greater than 8k that |
1826 | * is available. The Tx buffer has to be big enough | 1826 | * is available. The Tx buffer has to be big enough |
1827 | * for one whole packet on the Tx side. We'll make | 1827 | * for one whole packet on the Tx side. We'll make |
1828 | * the Tx 9408, and give the rest to Rx | 1828 | * the Tx 9408, and give the rest to Rx |
1829 | */ | 1829 | */ |
1830 | writel(0x01b3, ®s->rxq_end_addr); | 1830 | writel(0x01b3, ®s->rxq_end_addr); |
1831 | writel(0x01b4, ®s->txq_start_addr); | 1831 | writel(0x01b4, ®s->txq_start_addr); |
1832 | } | 1832 | } |
1833 | 1833 | ||
1834 | /* Initialize the loopback register. Disable all loopbacks. */ | 1834 | /* Initialize the loopback register. Disable all loopbacks. */ |
1835 | writel(0, ®s->loopback); | 1835 | writel(0, ®s->loopback); |
1836 | 1836 | ||
1837 | /* MSI Register */ | 1837 | /* MSI Register */ |
1838 | writel(0, ®s->msi_config); | 1838 | writel(0, ®s->msi_config); |
1839 | 1839 | ||
1840 | /* By default, disable the watchdog timer. It will be enabled when | 1840 | /* By default, disable the watchdog timer. It will be enabled when |
1841 | * a packet is queued. | 1841 | * a packet is queued. |
1842 | */ | 1842 | */ |
1843 | writel(0, ®s->watchdog_timer); | 1843 | writel(0, ®s->watchdog_timer); |
1844 | } | 1844 | } |
1845 | 1845 | ||
1846 | /** | 1846 | /** |
1847 | * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence | 1847 | * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence |
1848 | * @adapter: pointer to our adapter structure | 1848 | * @adapter: pointer to our adapter structure |
1849 | */ | 1849 | */ |
1850 | static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) | 1850 | static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) |
1851 | { | 1851 | { |
1852 | struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; | 1852 | struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; |
1853 | struct rx_ring *rx_local = &adapter->rx_ring; | 1853 | struct rx_ring *rx_local = &adapter->rx_ring; |
1854 | struct fbr_desc *fbr_entry; | 1854 | struct fbr_desc *fbr_entry; |
1855 | u32 entry; | 1855 | u32 entry; |
1856 | u32 psr_num_des; | 1856 | u32 psr_num_des; |
1857 | unsigned long flags; | 1857 | unsigned long flags; |
1858 | 1858 | ||
1859 | /* Halt RXDMA to perform the reconfigure. */ | 1859 | /* Halt RXDMA to perform the reconfigure. */ |
1860 | et131x_rx_dma_disable(adapter); | 1860 | et131x_rx_dma_disable(adapter); |
1861 | 1861 | ||
1862 | /* Load the completion writeback physical address | 1862 | /* Load the completion writeback physical address |
1863 | * | 1863 | * |
1864 | * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, | 1864 | * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, |
1865 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses | 1865 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses |
1866 | * are ever returned, make sure the high part is retrieved here | 1866 | * are ever returned, make sure the high part is retrieved here |
1867 | * before storing the adjusted address. | 1867 | * before storing the adjusted address. |
1868 | */ | 1868 | */ |
1869 | writel((u32) ((u64)rx_local->rx_status_bus >> 32), | 1869 | writel((u32) ((u64)rx_local->rx_status_bus >> 32), |
1870 | &rx_dma->dma_wb_base_hi); | 1870 | &rx_dma->dma_wb_base_hi); |
1871 | writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo); | 1871 | writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo); |
1872 | 1872 | ||
1873 | memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); | 1873 | memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); |
1874 | 1874 | ||
1875 | /* Set the address and parameters of the packet status ring into the | 1875 | /* Set the address and parameters of the packet status ring into the |
1876 | * 1310's registers | 1876 | * 1310's registers |
1877 | */ | 1877 | */ |
1878 | writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32), | 1878 | writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32), |
1879 | &rx_dma->psr_base_hi); | 1879 | &rx_dma->psr_base_hi); |
1880 | writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo); | 1880 | writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo); |
1881 | writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des); | 1881 | writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des); |
1882 | writel(0, &rx_dma->psr_full_offset); | 1882 | writel(0, &rx_dma->psr_full_offset); |
1883 | 1883 | ||
1884 | psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF; | 1884 | psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF; |
1885 | writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, | 1885 | writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, |
1886 | &rx_dma->psr_min_des); | 1886 | &rx_dma->psr_min_des); |
1887 | 1887 | ||
1888 | spin_lock_irqsave(&adapter->rcv_lock, flags); | 1888 | spin_lock_irqsave(&adapter->rcv_lock, flags); |
1889 | 1889 | ||
1890 | /* These local variables track the PSR in the adapter structure */ | 1890 | /* These local variables track the PSR in the adapter structure */ |
1891 | rx_local->local_psr_full = 0; | 1891 | rx_local->local_psr_full = 0; |
1892 | 1892 | ||
1893 | /* Now's the best time to initialize FBR1 contents */ | 1893 | /* Now's the best time to initialize FBR1 contents */ |
1894 | fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr; | 1894 | fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr; |
1895 | for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) { | 1895 | for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) { |
1896 | fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry]; | 1896 | fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry]; |
1897 | fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry]; | 1897 | fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry]; |
1898 | fbr_entry->word2 = entry; | 1898 | fbr_entry->word2 = entry; |
1899 | fbr_entry++; | 1899 | fbr_entry++; |
1900 | } | 1900 | } |
1901 | 1901 | ||
1902 | /* Set the address and parameters of Free buffer ring 1 (and 0 if | 1902 | /* Set the address and parameters of Free buffer ring 1 (and 0 if |
1903 | * required) into the 1310's registers | 1903 | * required) into the 1310's registers |
1904 | */ | 1904 | */ |
1905 | writel((u32) (rx_local->fbr[0]->real_physaddr >> 32), | 1905 | writel((u32) (rx_local->fbr[0]->real_physaddr >> 32), |
1906 | &rx_dma->fbr1_base_hi); | 1906 | &rx_dma->fbr1_base_hi); |
1907 | writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo); | 1907 | writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo); |
1908 | writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des); | 1908 | writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des); |
1909 | writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset); | 1909 | writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset); |
1910 | 1910 | ||
1911 | /* This variable tracks the free buffer ring 1 full position, so it | 1911 | /* This variable tracks the free buffer ring 1 full position, so it |
1912 | * has to match the above. | 1912 | * has to match the above. |
1913 | */ | 1913 | */ |
1914 | rx_local->fbr[0]->local_full = ET_DMA10_WRAP; | 1914 | rx_local->fbr[0]->local_full = ET_DMA10_WRAP; |
1915 | writel( | 1915 | writel( |
1916 | ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, | 1916 | ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, |
1917 | &rx_dma->fbr1_min_des); | 1917 | &rx_dma->fbr1_min_des); |
1918 | 1918 | ||
1919 | #ifdef USE_FBR0 | 1919 | #ifdef USE_FBR0 |
1920 | /* Now's the best time to initialize FBR0 contents */ | 1920 | /* Now's the best time to initialize FBR0 contents */ |
1921 | fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr; | 1921 | fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr; |
1922 | for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) { | 1922 | for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) { |
1923 | fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry]; | 1923 | fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry]; |
1924 | fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry]; | 1924 | fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry]; |
1925 | fbr_entry->word2 = entry; | 1925 | fbr_entry->word2 = entry; |
1926 | fbr_entry++; | 1926 | fbr_entry++; |
1927 | } | 1927 | } |
1928 | 1928 | ||
1929 | writel((u32) (rx_local->fbr[1]->real_physaddr >> 32), | 1929 | writel((u32) (rx_local->fbr[1]->real_physaddr >> 32), |
1930 | &rx_dma->fbr0_base_hi); | 1930 | &rx_dma->fbr0_base_hi); |
1931 | writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo); | 1931 | writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo); |
1932 | writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des); | 1932 | writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des); |
1933 | writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset); | 1933 | writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset); |
1934 | 1934 | ||
1935 | /* This variable tracks the free buffer ring 0 full position, so it | 1935 | /* This variable tracks the free buffer ring 0 full position, so it |
1936 | * has to match the above. | 1936 | * has to match the above. |
1937 | */ | 1937 | */ |
1938 | rx_local->fbr[1]->local_full = ET_DMA10_WRAP; | 1938 | rx_local->fbr[1]->local_full = ET_DMA10_WRAP; |
1939 | writel( | 1939 | writel( |
1940 | ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, | 1940 | ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, |
1941 | &rx_dma->fbr0_min_des); | 1941 | &rx_dma->fbr0_min_des); |
1942 | #endif | 1942 | #endif |
1943 | 1943 | ||
1944 | /* Program the number of packets we will receive before generating an | 1944 | /* Program the number of packets we will receive before generating an |
1945 | * interrupt. | 1945 | * interrupt. |
1946 | * For version B silicon, this value gets updated once autoneg is | 1946 | * For version B silicon, this value gets updated once autoneg is |
1947 | *complete. | 1947 | *complete. |
1948 | */ | 1948 | */ |
1949 | writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); | 1949 | writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); |
1950 | 1950 | ||
1951 | /* The "time_done" is not working correctly to coalesce interrupts | 1951 | /* The "time_done" is not working correctly to coalesce interrupts |
1952 | * after a given time period, but rather is giving us an interrupt | 1952 | * after a given time period, but rather is giving us an interrupt |
1953 | * regardless of whether we have received packets. | 1953 | * regardless of whether we have received packets. |
1954 | * This value gets updated once autoneg is complete. | 1954 | * This value gets updated once autoneg is complete. |
1955 | */ | 1955 | */ |
1956 | writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); | 1956 | writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); |
1957 | 1957 | ||
1958 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); | 1958 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
1959 | } | 1959 | } |
1960 | 1960 | ||
1961 | /** | 1961 | /** |
1962 | * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. | 1962 | * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. |
1963 | * @adapter: pointer to our private adapter structure | 1963 | * @adapter: pointer to our private adapter structure |
1964 | * | 1964 | * |
1965 | * Configure the transmit engine with the ring buffers we have created | 1965 | * Configure the transmit engine with the ring buffers we have created |
1966 | * and prepare it for use. | 1966 | * and prepare it for use. |
1967 | */ | 1967 | */ |
1968 | static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) | 1968 | static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) |
1969 | { | 1969 | { |
1970 | struct txdma_regs __iomem *txdma = &adapter->regs->txdma; | 1970 | struct txdma_regs __iomem *txdma = &adapter->regs->txdma; |
1971 | 1971 | ||
1972 | /* Load the hardware with the start of the transmit descriptor ring. */ | 1972 | /* Load the hardware with the start of the transmit descriptor ring. */ |
1973 | writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32), | 1973 | writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32), |
1974 | &txdma->pr_base_hi); | 1974 | &txdma->pr_base_hi); |
1975 | writel((u32) adapter->tx_ring.tx_desc_ring_pa, | 1975 | writel((u32) adapter->tx_ring.tx_desc_ring_pa, |
1976 | &txdma->pr_base_lo); | 1976 | &txdma->pr_base_lo); |
1977 | 1977 | ||
1978 | /* Initialise the transmit DMA engine */ | 1978 | /* Initialise the transmit DMA engine */ |
1979 | writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); | 1979 | writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); |
1980 | 1980 | ||
1981 | /* Load the completion writeback physical address */ | 1981 | /* Load the completion writeback physical address */ |
1982 | writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32), | 1982 | writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32), |
1983 | &txdma->dma_wb_base_hi); | 1983 | &txdma->dma_wb_base_hi); |
1984 | writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo); | 1984 | writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo); |
1985 | 1985 | ||
1986 | *adapter->tx_ring.tx_status = 0; | 1986 | *adapter->tx_ring.tx_status = 0; |
1987 | 1987 | ||
1988 | writel(0, &txdma->service_request); | 1988 | writel(0, &txdma->service_request); |
1989 | adapter->tx_ring.send_idx = 0; | 1989 | adapter->tx_ring.send_idx = 0; |
1990 | } | 1990 | } |
1991 | 1991 | ||
1992 | /** | 1992 | /** |
1993 | * et131x_adapter_setup - Set the adapter up as per cassini+ documentation | 1993 | * et131x_adapter_setup - Set the adapter up as per cassini+ documentation |
1994 | * @adapter: pointer to our private adapter structure | 1994 | * @adapter: pointer to our private adapter structure |
1995 | * | 1995 | * |
1996 | * Returns 0 on success, errno on failure (as defined in errno.h) | 1996 | * Returns 0 on success, errno on failure (as defined in errno.h) |
1997 | */ | 1997 | */ |
1998 | static void et131x_adapter_setup(struct et131x_adapter *adapter) | 1998 | static void et131x_adapter_setup(struct et131x_adapter *adapter) |
1999 | { | 1999 | { |
2000 | /* Configure the JAGCore */ | 2000 | /* Configure the JAGCore */ |
2001 | et131x_configure_global_regs(adapter); | 2001 | et131x_configure_global_regs(adapter); |
2002 | 2002 | ||
2003 | et1310_config_mac_regs1(adapter); | 2003 | et1310_config_mac_regs1(adapter); |
2004 | 2004 | ||
2005 | /* Configure the MMC registers */ | 2005 | /* Configure the MMC registers */ |
2006 | /* All we need to do is initialize the Memory Control Register */ | 2006 | /* All we need to do is initialize the Memory Control Register */ |
2007 | writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); | 2007 | writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); |
2008 | 2008 | ||
2009 | et1310_config_rxmac_regs(adapter); | 2009 | et1310_config_rxmac_regs(adapter); |
2010 | et1310_config_txmac_regs(adapter); | 2010 | et1310_config_txmac_regs(adapter); |
2011 | 2011 | ||
2012 | et131x_config_rx_dma_regs(adapter); | 2012 | et131x_config_rx_dma_regs(adapter); |
2013 | et131x_config_tx_dma_regs(adapter); | 2013 | et131x_config_tx_dma_regs(adapter); |
2014 | 2014 | ||
2015 | et1310_config_macstat_regs(adapter); | 2015 | et1310_config_macstat_regs(adapter); |
2016 | 2016 | ||
2017 | et1310_phy_power_down(adapter, 0); | 2017 | et1310_phy_power_down(adapter, 0); |
2018 | et131x_xcvr_init(adapter); | 2018 | et131x_xcvr_init(adapter); |
2019 | } | 2019 | } |
2020 | 2020 | ||
2021 | /** | 2021 | /** |
2022 | * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310 | 2022 | * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310 |
2023 | * @adapter: pointer to our private adapter structure | 2023 | * @adapter: pointer to our private adapter structure |
2024 | */ | 2024 | */ |
2025 | static void et131x_soft_reset(struct et131x_adapter *adapter) | 2025 | static void et131x_soft_reset(struct et131x_adapter *adapter) |
2026 | { | 2026 | { |
2027 | /* Disable MAC Core */ | 2027 | /* Disable MAC Core */ |
2028 | writel(0xc00f0000, &adapter->regs->mac.cfg1); | 2028 | writel(0xc00f0000, &adapter->regs->mac.cfg1); |
2029 | 2029 | ||
2030 | /* Set everything to a reset value */ | 2030 | /* Set everything to a reset value */ |
2031 | writel(0x7F, &adapter->regs->global.sw_reset); | 2031 | writel(0x7F, &adapter->regs->global.sw_reset); |
2032 | writel(0x000f0000, &adapter->regs->mac.cfg1); | 2032 | writel(0x000f0000, &adapter->regs->mac.cfg1); |
2033 | writel(0x00000000, &adapter->regs->mac.cfg1); | 2033 | writel(0x00000000, &adapter->regs->mac.cfg1); |
2034 | } | 2034 | } |
2035 | 2035 | ||
2036 | /** | 2036 | /** |
2037 | * et131x_enable_interrupts - enable interrupt | 2037 | * et131x_enable_interrupts - enable interrupt |
2038 | * @adapter: et131x device | 2038 | * @adapter: et131x device |
2039 | * | 2039 | * |
2040 | * Enable the appropriate interrupts on the ET131x according to our | 2040 | * Enable the appropriate interrupts on the ET131x according to our |
2041 | * configuration | 2041 | * configuration |
2042 | */ | 2042 | */ |
2043 | static void et131x_enable_interrupts(struct et131x_adapter *adapter) | 2043 | static void et131x_enable_interrupts(struct et131x_adapter *adapter) |
2044 | { | 2044 | { |
2045 | u32 mask; | 2045 | u32 mask; |
2046 | 2046 | ||
2047 | /* Enable all global interrupts */ | 2047 | /* Enable all global interrupts */ |
2048 | if (adapter->flowcontrol == FLOW_TXONLY || | 2048 | if (adapter->flowcontrol == FLOW_TXONLY || |
2049 | adapter->flowcontrol == FLOW_BOTH) | 2049 | adapter->flowcontrol == FLOW_BOTH) |
2050 | mask = INT_MASK_ENABLE; | 2050 | mask = INT_MASK_ENABLE; |
2051 | else | 2051 | else |
2052 | mask = INT_MASK_ENABLE_NO_FLOW; | 2052 | mask = INT_MASK_ENABLE_NO_FLOW; |
2053 | 2053 | ||
2054 | writel(mask, &adapter->regs->global.int_mask); | 2054 | writel(mask, &adapter->regs->global.int_mask); |
2055 | } | 2055 | } |
2056 | 2056 | ||
2057 | /** | 2057 | /** |
2058 | * et131x_disable_interrupts - interrupt disable | 2058 | * et131x_disable_interrupts - interrupt disable |
2059 | * @adapter: et131x device | 2059 | * @adapter: et131x device |
2060 | * | 2060 | * |
2061 | * Block all interrupts from the et131x device at the device itself | 2061 | * Block all interrupts from the et131x device at the device itself |
2062 | */ | 2062 | */ |
2063 | static void et131x_disable_interrupts(struct et131x_adapter *adapter) | 2063 | static void et131x_disable_interrupts(struct et131x_adapter *adapter) |
2064 | { | 2064 | { |
2065 | /* Disable all global interrupts */ | 2065 | /* Disable all global interrupts */ |
2066 | writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); | 2066 | writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); |
2067 | } | 2067 | } |
2068 | 2068 | ||
2069 | /** | 2069 | /** |
2070 | * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 | 2070 | * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 |
2071 | * @adapter: pointer to our adapter structure | 2071 | * @adapter: pointer to our adapter structure |
2072 | */ | 2072 | */ |
2073 | static void et131x_tx_dma_disable(struct et131x_adapter *adapter) | 2073 | static void et131x_tx_dma_disable(struct et131x_adapter *adapter) |
2074 | { | 2074 | { |
2075 | /* Setup the tramsmit dma configuration register */ | 2075 | /* Setup the tramsmit dma configuration register */ |
2076 | writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT, | 2076 | writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT, |
2077 | &adapter->regs->txdma.csr); | 2077 | &adapter->regs->txdma.csr); |
2078 | } | 2078 | } |
2079 | 2079 | ||
2080 | /** | 2080 | /** |
2081 | * et131x_enable_txrx - Enable tx/rx queues | 2081 | * et131x_enable_txrx - Enable tx/rx queues |
2082 | * @netdev: device to be enabled | 2082 | * @netdev: device to be enabled |
2083 | */ | 2083 | */ |
2084 | static void et131x_enable_txrx(struct net_device *netdev) | 2084 | static void et131x_enable_txrx(struct net_device *netdev) |
2085 | { | 2085 | { |
2086 | struct et131x_adapter *adapter = netdev_priv(netdev); | 2086 | struct et131x_adapter *adapter = netdev_priv(netdev); |
2087 | 2087 | ||
2088 | /* Enable the Tx and Rx DMA engines (if not already enabled) */ | 2088 | /* Enable the Tx and Rx DMA engines (if not already enabled) */ |
2089 | et131x_rx_dma_enable(adapter); | 2089 | et131x_rx_dma_enable(adapter); |
2090 | et131x_tx_dma_enable(adapter); | 2090 | et131x_tx_dma_enable(adapter); |
2091 | 2091 | ||
2092 | /* Enable device interrupts */ | 2092 | /* Enable device interrupts */ |
2093 | if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE) | 2093 | if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE) |
2094 | et131x_enable_interrupts(adapter); | 2094 | et131x_enable_interrupts(adapter); |
2095 | 2095 | ||
2096 | /* We're ready to move some data, so start the queue */ | 2096 | /* We're ready to move some data, so start the queue */ |
2097 | netif_start_queue(netdev); | 2097 | netif_start_queue(netdev); |
2098 | } | 2098 | } |
2099 | 2099 | ||
2100 | /** | 2100 | /** |
2101 | * et131x_disable_txrx - Disable tx/rx queues | 2101 | * et131x_disable_txrx - Disable tx/rx queues |
2102 | * @netdev: device to be disabled | 2102 | * @netdev: device to be disabled |
2103 | */ | 2103 | */ |
2104 | static void et131x_disable_txrx(struct net_device *netdev) | 2104 | static void et131x_disable_txrx(struct net_device *netdev) |
2105 | { | 2105 | { |
2106 | struct et131x_adapter *adapter = netdev_priv(netdev); | 2106 | struct et131x_adapter *adapter = netdev_priv(netdev); |
2107 | 2107 | ||
2108 | /* First thing is to stop the queue */ | 2108 | /* First thing is to stop the queue */ |
2109 | netif_stop_queue(netdev); | 2109 | netif_stop_queue(netdev); |
2110 | 2110 | ||
2111 | /* Stop the Tx and Rx DMA engines */ | 2111 | /* Stop the Tx and Rx DMA engines */ |
2112 | et131x_rx_dma_disable(adapter); | 2112 | et131x_rx_dma_disable(adapter); |
2113 | et131x_tx_dma_disable(adapter); | 2113 | et131x_tx_dma_disable(adapter); |
2114 | 2114 | ||
2115 | /* Disable device interrupts */ | 2115 | /* Disable device interrupts */ |
2116 | et131x_disable_interrupts(adapter); | 2116 | et131x_disable_interrupts(adapter); |
2117 | } | 2117 | } |
2118 | 2118 | ||
2119 | /** | 2119 | /** |
2120 | * et131x_init_send - Initialize send data structures | 2120 | * et131x_init_send - Initialize send data structures |
2121 | * @adapter: pointer to our private adapter structure | 2121 | * @adapter: pointer to our private adapter structure |
2122 | */ | 2122 | */ |
2123 | static void et131x_init_send(struct et131x_adapter *adapter) | 2123 | static void et131x_init_send(struct et131x_adapter *adapter) |
2124 | { | 2124 | { |
2125 | struct tcb *tcb; | 2125 | struct tcb *tcb; |
2126 | u32 ct; | 2126 | u32 ct; |
2127 | struct tx_ring *tx_ring; | 2127 | struct tx_ring *tx_ring; |
2128 | 2128 | ||
2129 | /* Setup some convenience pointers */ | 2129 | /* Setup some convenience pointers */ |
2130 | tx_ring = &adapter->tx_ring; | 2130 | tx_ring = &adapter->tx_ring; |
2131 | tcb = adapter->tx_ring.tcb_ring; | 2131 | tcb = adapter->tx_ring.tcb_ring; |
2132 | 2132 | ||
2133 | tx_ring->tcb_qhead = tcb; | 2133 | tx_ring->tcb_qhead = tcb; |
2134 | 2134 | ||
2135 | memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); | 2135 | memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); |
2136 | 2136 | ||
2137 | /* Go through and set up each TCB */ | 2137 | /* Go through and set up each TCB */ |
2138 | for (ct = 0; ct++ < NUM_TCB; tcb++) | 2138 | for (ct = 0; ct++ < NUM_TCB; tcb++) |
2139 | /* Set the link pointer in HW TCB to the next TCB in the | 2139 | /* Set the link pointer in HW TCB to the next TCB in the |
2140 | * chain | 2140 | * chain |
2141 | */ | 2141 | */ |
2142 | tcb->next = tcb + 1; | 2142 | tcb->next = tcb + 1; |
2143 | 2143 | ||
2144 | /* Set the tail pointer */ | 2144 | /* Set the tail pointer */ |
2145 | tcb--; | 2145 | tcb--; |
2146 | tx_ring->tcb_qtail = tcb; | 2146 | tx_ring->tcb_qtail = tcb; |
2147 | tcb->next = NULL; | 2147 | tcb->next = NULL; |
2148 | /* Curr send queue should now be empty */ | 2148 | /* Curr send queue should now be empty */ |
2149 | tx_ring->send_head = NULL; | 2149 | tx_ring->send_head = NULL; |
2150 | tx_ring->send_tail = NULL; | 2150 | tx_ring->send_tail = NULL; |
2151 | } | 2151 | } |
2152 | 2152 | ||
2153 | /** | 2153 | /** |
2154 | * et1310_enable_phy_coma - called when network cable is unplugged | 2154 | * et1310_enable_phy_coma - called when network cable is unplugged |
2155 | * @adapter: pointer to our adapter structure | 2155 | * @adapter: pointer to our adapter structure |
2156 | * | 2156 | * |
2157 | * driver receive an phy status change interrupt while in D0 and check that | 2157 | * driver receive an phy status change interrupt while in D0 and check that |
2158 | * phy_status is down. | 2158 | * phy_status is down. |
2159 | * | 2159 | * |
2160 | * -- gate off JAGCore; | 2160 | * -- gate off JAGCore; |
2161 | * -- set gigE PHY in Coma mode | 2161 | * -- set gigE PHY in Coma mode |
2162 | * -- wake on phy_interrupt; Perform software reset JAGCore, | 2162 | * -- wake on phy_interrupt; Perform software reset JAGCore, |
2163 | * re-initialize jagcore and gigE PHY | 2163 | * re-initialize jagcore and gigE PHY |
2164 | * | 2164 | * |
2165 | * Add D0-ASPM-PhyLinkDown Support: | 2165 | * Add D0-ASPM-PhyLinkDown Support: |
2166 | * -- while in D0, when there is a phy_interrupt indicating phy link | 2166 | * -- while in D0, when there is a phy_interrupt indicating phy link |
2167 | * down status, call the MPSetPhyComa routine to enter this active | 2167 | * down status, call the MPSetPhyComa routine to enter this active |
2168 | * state power saving mode | 2168 | * state power saving mode |
2169 | * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt | 2169 | * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt |
2170 | * indicating linkup status, call the MPDisablePhyComa routine to | 2170 | * indicating linkup status, call the MPDisablePhyComa routine to |
2171 | * restore JAGCore and gigE PHY | 2171 | * restore JAGCore and gigE PHY |
2172 | */ | 2172 | */ |
2173 | static void et1310_enable_phy_coma(struct et131x_adapter *adapter) | 2173 | static void et1310_enable_phy_coma(struct et131x_adapter *adapter) |
2174 | { | 2174 | { |
2175 | unsigned long flags; | 2175 | unsigned long flags; |
2176 | u32 pmcsr; | 2176 | u32 pmcsr; |
2177 | 2177 | ||
2178 | pmcsr = readl(&adapter->regs->global.pm_csr); | 2178 | pmcsr = readl(&adapter->regs->global.pm_csr); |
2179 | 2179 | ||
2180 | /* Save the GbE PHY speed and duplex modes. Need to restore this | 2180 | /* Save the GbE PHY speed and duplex modes. Need to restore this |
2181 | * when cable is plugged back in | 2181 | * when cable is plugged back in |
2182 | */ | 2182 | */ |
2183 | /* | 2183 | /* |
2184 | * TODO - when PM is re-enabled, check if we need to | 2184 | * TODO - when PM is re-enabled, check if we need to |
2185 | * perform a similar task as this - | 2185 | * perform a similar task as this - |
2186 | * adapter->pdown_speed = adapter->ai_force_speed; | 2186 | * adapter->pdown_speed = adapter->ai_force_speed; |
2187 | * adapter->pdown_duplex = adapter->ai_force_duplex; | 2187 | * adapter->pdown_duplex = adapter->ai_force_duplex; |
2188 | */ | 2188 | */ |
2189 | 2189 | ||
2190 | /* Stop sending packets. */ | 2190 | /* Stop sending packets. */ |
2191 | spin_lock_irqsave(&adapter->send_hw_lock, flags); | 2191 | spin_lock_irqsave(&adapter->send_hw_lock, flags); |
2192 | adapter->flags |= fMP_ADAPTER_LOWER_POWER; | 2192 | adapter->flags |= fMP_ADAPTER_LOWER_POWER; |
2193 | spin_unlock_irqrestore(&adapter->send_hw_lock, flags); | 2193 | spin_unlock_irqrestore(&adapter->send_hw_lock, flags); |
2194 | 2194 | ||
2195 | /* Wait for outstanding Receive packets */ | 2195 | /* Wait for outstanding Receive packets */ |
2196 | 2196 | ||
2197 | et131x_disable_txrx(adapter->netdev); | 2197 | et131x_disable_txrx(adapter->netdev); |
2198 | 2198 | ||
2199 | /* Gate off JAGCore 3 clock domains */ | 2199 | /* Gate off JAGCore 3 clock domains */ |
2200 | pmcsr &= ~ET_PMCSR_INIT; | 2200 | pmcsr &= ~ET_PMCSR_INIT; |
2201 | writel(pmcsr, &adapter->regs->global.pm_csr); | 2201 | writel(pmcsr, &adapter->regs->global.pm_csr); |
2202 | 2202 | ||
2203 | /* Program gigE PHY in to Coma mode */ | 2203 | /* Program gigE PHY in to Coma mode */ |
2204 | pmcsr |= ET_PM_PHY_SW_COMA; | 2204 | pmcsr |= ET_PM_PHY_SW_COMA; |
2205 | writel(pmcsr, &adapter->regs->global.pm_csr); | 2205 | writel(pmcsr, &adapter->regs->global.pm_csr); |
2206 | } | 2206 | } |
2207 | 2207 | ||
2208 | /** | 2208 | /** |
2209 | * et1310_disable_phy_coma - Disable the Phy Coma Mode | 2209 | * et1310_disable_phy_coma - Disable the Phy Coma Mode |
2210 | * @adapter: pointer to our adapter structure | 2210 | * @adapter: pointer to our adapter structure |
2211 | */ | 2211 | */ |
2212 | static void et1310_disable_phy_coma(struct et131x_adapter *adapter) | 2212 | static void et1310_disable_phy_coma(struct et131x_adapter *adapter) |
2213 | { | 2213 | { |
2214 | u32 pmcsr; | 2214 | u32 pmcsr; |
2215 | 2215 | ||
2216 | pmcsr = readl(&adapter->regs->global.pm_csr); | 2216 | pmcsr = readl(&adapter->regs->global.pm_csr); |
2217 | 2217 | ||
2218 | /* Disable phy_sw_coma register and re-enable JAGCore clocks */ | 2218 | /* Disable phy_sw_coma register and re-enable JAGCore clocks */ |
2219 | pmcsr |= ET_PMCSR_INIT; | 2219 | pmcsr |= ET_PMCSR_INIT; |
2220 | pmcsr &= ~ET_PM_PHY_SW_COMA; | 2220 | pmcsr &= ~ET_PM_PHY_SW_COMA; |
2221 | writel(pmcsr, &adapter->regs->global.pm_csr); | 2221 | writel(pmcsr, &adapter->regs->global.pm_csr); |
2222 | 2222 | ||
2223 | /* Restore the GbE PHY speed and duplex modes; | 2223 | /* Restore the GbE PHY speed and duplex modes; |
2224 | * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY | 2224 | * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY |
2225 | */ | 2225 | */ |
2226 | /* TODO - when PM is re-enabled, check if we need to | 2226 | /* TODO - when PM is re-enabled, check if we need to |
2227 | * perform a similar task as this - | 2227 | * perform a similar task as this - |
2228 | * adapter->ai_force_speed = adapter->pdown_speed; | 2228 | * adapter->ai_force_speed = adapter->pdown_speed; |
2229 | * adapter->ai_force_duplex = adapter->pdown_duplex; | 2229 | * adapter->ai_force_duplex = adapter->pdown_duplex; |
2230 | */ | 2230 | */ |
2231 | 2231 | ||
2232 | /* Re-initialize the send structures */ | 2232 | /* Re-initialize the send structures */ |
2233 | et131x_init_send(adapter); | 2233 | et131x_init_send(adapter); |
2234 | 2234 | ||
2235 | /* Bring the device back to the state it was during init prior to | 2235 | /* Bring the device back to the state it was during init prior to |
2236 | * autonegotiation being complete. This way, when we get the auto-neg | 2236 | * autonegotiation being complete. This way, when we get the auto-neg |
2237 | * complete interrupt, we can complete init by calling ConfigMacREGS2. | 2237 | * complete interrupt, we can complete init by calling ConfigMacREGS2. |
2238 | */ | 2238 | */ |
2239 | et131x_soft_reset(adapter); | 2239 | et131x_soft_reset(adapter); |
2240 | 2240 | ||
2241 | /* setup et1310 as per the documentation ?? */ | 2241 | /* setup et1310 as per the documentation ?? */ |
2242 | et131x_adapter_setup(adapter); | 2242 | et131x_adapter_setup(adapter); |
2243 | 2243 | ||
2244 | /* Allow Tx to restart */ | 2244 | /* Allow Tx to restart */ |
2245 | adapter->flags &= ~fMP_ADAPTER_LOWER_POWER; | 2245 | adapter->flags &= ~fMP_ADAPTER_LOWER_POWER; |
2246 | 2246 | ||
2247 | et131x_enable_txrx(adapter->netdev); | 2247 | et131x_enable_txrx(adapter->netdev); |
2248 | } | 2248 | } |
2249 | 2249 | ||
2250 | static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) | 2250 | static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) |
2251 | { | 2251 | { |
2252 | u32 tmp_free_buff_ring = *free_buff_ring; | 2252 | u32 tmp_free_buff_ring = *free_buff_ring; |
2253 | tmp_free_buff_ring++; | 2253 | tmp_free_buff_ring++; |
2254 | /* This works for all cases where limit < 1024. The 1023 case | 2254 | /* This works for all cases where limit < 1024. The 1023 case |
2255 | works because 1023++ is 1024 which means the if condition is not | 2255 | works because 1023++ is 1024 which means the if condition is not |
2256 | taken but the carry of the bit into the wrap bit toggles the wrap | 2256 | taken but the carry of the bit into the wrap bit toggles the wrap |
2257 | value correctly */ | 2257 | value correctly */ |
2258 | if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { | 2258 | if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { |
2259 | tmp_free_buff_ring &= ~ET_DMA10_MASK; | 2259 | tmp_free_buff_ring &= ~ET_DMA10_MASK; |
2260 | tmp_free_buff_ring ^= ET_DMA10_WRAP; | 2260 | tmp_free_buff_ring ^= ET_DMA10_WRAP; |
2261 | } | 2261 | } |
2262 | /* For the 1023 case */ | 2262 | /* For the 1023 case */ |
2263 | tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP); | 2263 | tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP); |
2264 | *free_buff_ring = tmp_free_buff_ring; | 2264 | *free_buff_ring = tmp_free_buff_ring; |
2265 | return tmp_free_buff_ring; | 2265 | return tmp_free_buff_ring; |
2266 | } | 2266 | } |
2267 | 2267 | ||
2268 | /** | 2268 | /** |
2269 | * et131x_align_allocated_memory - Align allocated memory on a given boundary | 2269 | * et131x_align_allocated_memory - Align allocated memory on a given boundary |
2270 | * @adapter: pointer to our adapter structure | 2270 | * @adapter: pointer to our adapter structure |
2271 | * @phys_addr: pointer to Physical address | 2271 | * @phys_addr: pointer to Physical address |
2272 | * @offset: pointer to the offset variable | 2272 | * @offset: pointer to the offset variable |
2273 | * @mask: correct mask | 2273 | * @mask: correct mask |
2274 | */ | 2274 | */ |
2275 | static void et131x_align_allocated_memory(struct et131x_adapter *adapter, | 2275 | static void et131x_align_allocated_memory(struct et131x_adapter *adapter, |
2276 | u64 *phys_addr, u64 *offset, | 2276 | u64 *phys_addr, u64 *offset, |
2277 | u64 mask) | 2277 | u64 mask) |
2278 | { | 2278 | { |
2279 | u64 new_addr = *phys_addr & ~mask; | 2279 | u64 new_addr = *phys_addr & ~mask; |
2280 | 2280 | ||
2281 | *offset = 0; | 2281 | *offset = 0; |
2282 | 2282 | ||
2283 | if (new_addr != *phys_addr) { | 2283 | if (new_addr != *phys_addr) { |
2284 | /* Move to next aligned block */ | 2284 | /* Move to next aligned block */ |
2285 | new_addr += mask + 1; | 2285 | new_addr += mask + 1; |
2286 | /* Return offset for adjusting virt addr */ | 2286 | /* Return offset for adjusting virt addr */ |
2287 | *offset = new_addr - *phys_addr; | 2287 | *offset = new_addr - *phys_addr; |
2288 | /* Return new physical address */ | 2288 | /* Return new physical address */ |
2289 | *phys_addr = new_addr; | 2289 | *phys_addr = new_addr; |
2290 | } | 2290 | } |
2291 | } | 2291 | } |
2292 | 2292 | ||
2293 | /** | 2293 | /** |
2294 | * et131x_rx_dma_memory_alloc | 2294 | * et131x_rx_dma_memory_alloc |
2295 | * @adapter: pointer to our private adapter structure | 2295 | * @adapter: pointer to our private adapter structure |
2296 | * | 2296 | * |
2297 | * Returns 0 on success and errno on failure (as defined in errno.h) | 2297 | * Returns 0 on success and errno on failure (as defined in errno.h) |
2298 | * | 2298 | * |
2299 | * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, | 2299 | * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, |
2300 | * and the Packet Status Ring. | 2300 | * and the Packet Status Ring. |
2301 | */ | 2301 | */ |
2302 | static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) | 2302 | static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) |
2303 | { | 2303 | { |
2304 | u32 i, j; | 2304 | u32 i, j; |
2305 | u32 bufsize; | 2305 | u32 bufsize; |
2306 | u32 pktstat_ringsize, fbr_chunksize; | 2306 | u32 pktstat_ringsize, fbr_chunksize; |
2307 | struct rx_ring *rx_ring; | 2307 | struct rx_ring *rx_ring; |
2308 | 2308 | ||
2309 | /* Setup some convenience pointers */ | 2309 | /* Setup some convenience pointers */ |
2310 | rx_ring = &adapter->rx_ring; | 2310 | rx_ring = &adapter->rx_ring; |
2311 | 2311 | ||
2312 | /* Alloc memory for the lookup table */ | 2312 | /* Alloc memory for the lookup table */ |
2313 | #ifdef USE_FBR0 | 2313 | #ifdef USE_FBR0 |
2314 | rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); | 2314 | rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); |
2315 | #endif | 2315 | #endif |
2316 | rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); | 2316 | rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); |
2317 | 2317 | ||
2318 | /* The first thing we will do is configure the sizes of the buffer | 2318 | /* The first thing we will do is configure the sizes of the buffer |
2319 | * rings. These will change based on jumbo packet support. Larger | 2319 | * rings. These will change based on jumbo packet support. Larger |
2320 | * jumbo packets increases the size of each entry in FBR0, and the | 2320 | * jumbo packets increases the size of each entry in FBR0, and the |
2321 | * number of entries in FBR0, while at the same time decreasing the | 2321 | * number of entries in FBR0, while at the same time decreasing the |
2322 | * number of entries in FBR1. | 2322 | * number of entries in FBR1. |
2323 | * | 2323 | * |
2324 | * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 | 2324 | * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 |
2325 | * entries are huge in order to accommodate a "jumbo" frame, then it | 2325 | * entries are huge in order to accommodate a "jumbo" frame, then it |
2326 | * will have less entries. Conversely, FBR1 will now be relied upon | 2326 | * will have less entries. Conversely, FBR1 will now be relied upon |
2327 | * to carry more "normal" frames, thus it's entry size also increases | 2327 | * to carry more "normal" frames, thus it's entry size also increases |
2328 | * and the number of entries goes up too (since it now carries | 2328 | * and the number of entries goes up too (since it now carries |
2329 | * "small" + "regular" packets. | 2329 | * "small" + "regular" packets. |
2330 | * | 2330 | * |
2331 | * In this scheme, we try to maintain 512 entries between the two | 2331 | * In this scheme, we try to maintain 512 entries between the two |
2332 | * rings. Also, FBR1 remains a constant size - when it's size doubles | 2332 | * rings. Also, FBR1 remains a constant size - when it's size doubles |
2333 | * the number of entries halves. FBR0 increases in size, however. | 2333 | * the number of entries halves. FBR0 increases in size, however. |
2334 | */ | 2334 | */ |
2335 | 2335 | ||
2336 | if (adapter->registry_jumbo_packet < 2048) { | 2336 | if (adapter->registry_jumbo_packet < 2048) { |
2337 | #ifdef USE_FBR0 | 2337 | #ifdef USE_FBR0 |
2338 | rx_ring->fbr[1]->buffsize = 256; | 2338 | rx_ring->fbr[1]->buffsize = 256; |
2339 | rx_ring->fbr[1]->num_entries = 512; | 2339 | rx_ring->fbr[1]->num_entries = 512; |
2340 | #endif | 2340 | #endif |
2341 | rx_ring->fbr[0]->buffsize = 2048; | 2341 | rx_ring->fbr[0]->buffsize = 2048; |
2342 | rx_ring->fbr[0]->num_entries = 512; | 2342 | rx_ring->fbr[0]->num_entries = 512; |
2343 | } else if (adapter->registry_jumbo_packet < 4096) { | 2343 | } else if (adapter->registry_jumbo_packet < 4096) { |
2344 | #ifdef USE_FBR0 | 2344 | #ifdef USE_FBR0 |
2345 | rx_ring->fbr[1]->buffsize = 512; | 2345 | rx_ring->fbr[1]->buffsize = 512; |
2346 | rx_ring->fbr[1]->num_entries = 1024; | 2346 | rx_ring->fbr[1]->num_entries = 1024; |
2347 | #endif | 2347 | #endif |
2348 | rx_ring->fbr[0]->buffsize = 4096; | 2348 | rx_ring->fbr[0]->buffsize = 4096; |
2349 | rx_ring->fbr[0]->num_entries = 512; | 2349 | rx_ring->fbr[0]->num_entries = 512; |
2350 | } else { | 2350 | } else { |
2351 | #ifdef USE_FBR0 | 2351 | #ifdef USE_FBR0 |
2352 | rx_ring->fbr[1]->buffsize = 1024; | 2352 | rx_ring->fbr[1]->buffsize = 1024; |
2353 | rx_ring->fbr[1]->num_entries = 768; | 2353 | rx_ring->fbr[1]->num_entries = 768; |
2354 | #endif | 2354 | #endif |
2355 | rx_ring->fbr[0]->buffsize = 16384; | 2355 | rx_ring->fbr[0]->buffsize = 16384; |
2356 | rx_ring->fbr[0]->num_entries = 128; | 2356 | rx_ring->fbr[0]->num_entries = 128; |
2357 | } | 2357 | } |
2358 | 2358 | ||
2359 | #ifdef USE_FBR0 | 2359 | #ifdef USE_FBR0 |
2360 | adapter->rx_ring.psr_num_entries = | 2360 | adapter->rx_ring.psr_num_entries = |
2361 | adapter->rx_ring.fbr[1]->num_entries + | 2361 | adapter->rx_ring.fbr[1]->num_entries + |
2362 | adapter->rx_ring.fbr[0]->num_entries; | 2362 | adapter->rx_ring.fbr[0]->num_entries; |
2363 | #else | 2363 | #else |
2364 | adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries; | 2364 | adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries; |
2365 | #endif | 2365 | #endif |
2366 | 2366 | ||
2367 | /* Allocate an area of memory for Free Buffer Ring 1 */ | 2367 | /* Allocate an area of memory for Free Buffer Ring 1 */ |
2368 | bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + | 2368 | bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + |
2369 | 0xfff; | 2369 | 0xfff; |
2370 | rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, | 2370 | rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, |
2371 | bufsize, | 2371 | bufsize, |
2372 | &rx_ring->fbr[0]->ring_physaddr, | 2372 | &rx_ring->fbr[0]->ring_physaddr, |
2373 | GFP_KERNEL); | 2373 | GFP_KERNEL); |
2374 | if (!rx_ring->fbr[0]->ring_virtaddr) { | 2374 | if (!rx_ring->fbr[0]->ring_virtaddr) { |
2375 | dev_err(&adapter->pdev->dev, | 2375 | dev_err(&adapter->pdev->dev, |
2376 | "Cannot alloc memory for Free Buffer Ring 1\n"); | 2376 | "Cannot alloc memory for Free Buffer Ring 1\n"); |
2377 | return -ENOMEM; | 2377 | return -ENOMEM; |
2378 | } | 2378 | } |
2379 | 2379 | ||
2380 | /* Save physical address | 2380 | /* Save physical address |
2381 | * | 2381 | * |
2382 | * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, | 2382 | * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, |
2383 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses | 2383 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses |
2384 | * are ever returned, make sure the high part is retrieved here | 2384 | * are ever returned, make sure the high part is retrieved here |
2385 | * before storing the adjusted address. | 2385 | * before storing the adjusted address. |
2386 | */ | 2386 | */ |
2387 | rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr; | 2387 | rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr; |
2388 | 2388 | ||
2389 | /* Align Free Buffer Ring 1 on a 4K boundary */ | 2389 | /* Align Free Buffer Ring 1 on a 4K boundary */ |
2390 | et131x_align_allocated_memory(adapter, | 2390 | et131x_align_allocated_memory(adapter, |
2391 | &rx_ring->fbr[0]->real_physaddr, | 2391 | &rx_ring->fbr[0]->real_physaddr, |
2392 | &rx_ring->fbr[0]->offset, 0x0FFF); | 2392 | &rx_ring->fbr[0]->offset, 0x0FFF); |
2393 | 2393 | ||
2394 | rx_ring->fbr[0]->ring_virtaddr = | 2394 | rx_ring->fbr[0]->ring_virtaddr = |
2395 | (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr + | 2395 | (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr + |
2396 | rx_ring->fbr[0]->offset); | 2396 | rx_ring->fbr[0]->offset); |
2397 | 2397 | ||
2398 | #ifdef USE_FBR0 | 2398 | #ifdef USE_FBR0 |
2399 | /* Allocate an area of memory for Free Buffer Ring 0 */ | 2399 | /* Allocate an area of memory for Free Buffer Ring 0 */ |
2400 | bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + | 2400 | bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + |
2401 | 0xfff; | 2401 | 0xfff; |
2402 | rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, | 2402 | rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, |
2403 | bufsize, | 2403 | bufsize, |
2404 | &rx_ring->fbr[1]->ring_physaddr, | 2404 | &rx_ring->fbr[1]->ring_physaddr, |
2405 | GFP_KERNEL); | 2405 | GFP_KERNEL); |
2406 | if (!rx_ring->fbr[1]->ring_virtaddr) { | 2406 | if (!rx_ring->fbr[1]->ring_virtaddr) { |
2407 | dev_err(&adapter->pdev->dev, | 2407 | dev_err(&adapter->pdev->dev, |
2408 | "Cannot alloc memory for Free Buffer Ring 0\n"); | 2408 | "Cannot alloc memory for Free Buffer Ring 0\n"); |
2409 | return -ENOMEM; | 2409 | return -ENOMEM; |
2410 | } | 2410 | } |
2411 | 2411 | ||
2412 | /* Save physical address | 2412 | /* Save physical address |
2413 | * | 2413 | * |
2414 | * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, | 2414 | * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, |
2415 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses | 2415 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses |
2416 | * are ever returned, make sure the high part is retrieved here before | 2416 | * are ever returned, make sure the high part is retrieved here before |
2417 | * storing the adjusted address. | 2417 | * storing the adjusted address. |
2418 | */ | 2418 | */ |
2419 | rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr; | 2419 | rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr; |
2420 | 2420 | ||
2421 | /* Align Free Buffer Ring 0 on a 4K boundary */ | 2421 | /* Align Free Buffer Ring 0 on a 4K boundary */ |
2422 | et131x_align_allocated_memory(adapter, | 2422 | et131x_align_allocated_memory(adapter, |
2423 | &rx_ring->fbr[1]->real_physaddr, | 2423 | &rx_ring->fbr[1]->real_physaddr, |
2424 | &rx_ring->fbr[1]->offset, 0x0FFF); | 2424 | &rx_ring->fbr[1]->offset, 0x0FFF); |
2425 | 2425 | ||
2426 | rx_ring->fbr[1]->ring_virtaddr = | 2426 | rx_ring->fbr[1]->ring_virtaddr = |
2427 | (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr + | 2427 | (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr + |
2428 | rx_ring->fbr[1]->offset); | 2428 | rx_ring->fbr[1]->offset); |
2429 | #endif | 2429 | #endif |
2430 | for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) { | 2430 | for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) { |
2431 | u64 fbr1_tmp_physaddr; | 2431 | u64 fbr1_tmp_physaddr; |
2432 | u64 fbr1_offset; | 2432 | u64 fbr1_offset; |
2433 | u32 fbr1_align; | 2433 | u32 fbr1_align; |
2434 | 2434 | ||
2435 | /* This code allocates an area of memory big enough for N | 2435 | /* This code allocates an area of memory big enough for N |
2436 | * free buffers + (buffer_size - 1) so that the buffers can | 2436 | * free buffers + (buffer_size - 1) so that the buffers can |
2437 | * be aligned on 4k boundaries. If each buffer were aligned | 2437 | * be aligned on 4k boundaries. If each buffer were aligned |
2438 | * to a buffer_size boundary, the effect would be to double | 2438 | * to a buffer_size boundary, the effect would be to double |
2439 | * the size of FBR0. By allocating N buffers at once, we | 2439 | * the size of FBR0. By allocating N buffers at once, we |
2440 | * reduce this overhead. | 2440 | * reduce this overhead. |
2441 | */ | 2441 | */ |
2442 | if (rx_ring->fbr[0]->buffsize > 4096) | 2442 | if (rx_ring->fbr[0]->buffsize > 4096) |
2443 | fbr1_align = 4096; | 2443 | fbr1_align = 4096; |
2444 | else | 2444 | else |
2445 | fbr1_align = rx_ring->fbr[0]->buffsize; | 2445 | fbr1_align = rx_ring->fbr[0]->buffsize; |
2446 | 2446 | ||
2447 | fbr_chunksize = | 2447 | fbr_chunksize = |
2448 | (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1; | 2448 | (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1; |
2449 | rx_ring->fbr[0]->mem_virtaddrs[i] = | 2449 | rx_ring->fbr[0]->mem_virtaddrs[i] = |
2450 | dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, | 2450 | dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, |
2451 | &rx_ring->fbr[0]->mem_physaddrs[i], | 2451 | &rx_ring->fbr[0]->mem_physaddrs[i], |
2452 | GFP_KERNEL); | 2452 | GFP_KERNEL); |
2453 | 2453 | ||
2454 | if (!rx_ring->fbr[0]->mem_virtaddrs[i]) { | 2454 | if (!rx_ring->fbr[0]->mem_virtaddrs[i]) { |
2455 | dev_err(&adapter->pdev->dev, | 2455 | dev_err(&adapter->pdev->dev, |
2456 | "Could not alloc memory\n"); | 2456 | "Could not alloc memory\n"); |
2457 | return -ENOMEM; | 2457 | return -ENOMEM; |
2458 | } | 2458 | } |
2459 | 2459 | ||
2460 | /* See NOTE in "Save Physical Address" comment above */ | 2460 | /* See NOTE in "Save Physical Address" comment above */ |
2461 | fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i]; | 2461 | fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i]; |
2462 | 2462 | ||
2463 | et131x_align_allocated_memory(adapter, | 2463 | et131x_align_allocated_memory(adapter, |
2464 | &fbr1_tmp_physaddr, | 2464 | &fbr1_tmp_physaddr, |
2465 | &fbr1_offset, (fbr1_align - 1)); | 2465 | &fbr1_offset, (fbr1_align - 1)); |
2466 | 2466 | ||
2467 | for (j = 0; j < FBR_CHUNKS; j++) { | 2467 | for (j = 0; j < FBR_CHUNKS; j++) { |
2468 | u32 index = (i * FBR_CHUNKS) + j; | 2468 | u32 index = (i * FBR_CHUNKS) + j; |
2469 | 2469 | ||
2470 | /* Save the Virtual address of this index for quick | 2470 | /* Save the Virtual address of this index for quick |
2471 | * access later | 2471 | * access later |
2472 | */ | 2472 | */ |
2473 | rx_ring->fbr[0]->virt[index] = | 2473 | rx_ring->fbr[0]->virt[index] = |
2474 | (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] + | 2474 | (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] + |
2475 | (j * rx_ring->fbr[0]->buffsize) + fbr1_offset; | 2475 | (j * rx_ring->fbr[0]->buffsize) + fbr1_offset; |
2476 | 2476 | ||
2477 | /* now store the physical address in the descriptor | 2477 | /* now store the physical address in the descriptor |
2478 | * so the device can access it | 2478 | * so the device can access it |
2479 | */ | 2479 | */ |
2480 | rx_ring->fbr[0]->bus_high[index] = | 2480 | rx_ring->fbr[0]->bus_high[index] = |
2481 | (u32) (fbr1_tmp_physaddr >> 32); | 2481 | (u32) (fbr1_tmp_physaddr >> 32); |
2482 | rx_ring->fbr[0]->bus_low[index] = | 2482 | rx_ring->fbr[0]->bus_low[index] = |
2483 | (u32) fbr1_tmp_physaddr; | 2483 | (u32) fbr1_tmp_physaddr; |
2484 | 2484 | ||
2485 | fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize; | 2485 | fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize; |
2486 | 2486 | ||
2487 | rx_ring->fbr[0]->buffer1[index] = | 2487 | rx_ring->fbr[0]->buffer1[index] = |
2488 | rx_ring->fbr[0]->virt[index]; | 2488 | rx_ring->fbr[0]->virt[index]; |
2489 | rx_ring->fbr[0]->buffer2[index] = | 2489 | rx_ring->fbr[0]->buffer2[index] = |
2490 | rx_ring->fbr[0]->virt[index] - 4; | 2490 | rx_ring->fbr[0]->virt[index] - 4; |
2491 | } | 2491 | } |
2492 | } | 2492 | } |
2493 | 2493 | ||
2494 | #ifdef USE_FBR0 | 2494 | #ifdef USE_FBR0 |
2495 | /* Same for FBR0 (if in use) */ | 2495 | /* Same for FBR0 (if in use) */ |
2496 | for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) { | 2496 | for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) { |
2497 | u64 fbr0_tmp_physaddr; | 2497 | u64 fbr0_tmp_physaddr; |
2498 | u64 fbr0_offset; | 2498 | u64 fbr0_offset; |
2499 | 2499 | ||
2500 | fbr_chunksize = | 2500 | fbr_chunksize = |
2501 | ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1; | 2501 | ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1; |
2502 | rx_ring->fbr[1]->mem_virtaddrs[i] = | 2502 | rx_ring->fbr[1]->mem_virtaddrs[i] = |
2503 | dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, | 2503 | dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, |
2504 | &rx_ring->fbr[1]->mem_physaddrs[i], | 2504 | &rx_ring->fbr[1]->mem_physaddrs[i], |
2505 | GFP_KERNEL); | 2505 | GFP_KERNEL); |
2506 | 2506 | ||
2507 | if (!rx_ring->fbr[1]->mem_virtaddrs[i]) { | 2507 | if (!rx_ring->fbr[1]->mem_virtaddrs[i]) { |
2508 | dev_err(&adapter->pdev->dev, | 2508 | dev_err(&adapter->pdev->dev, |
2509 | "Could not alloc memory\n"); | 2509 | "Could not alloc memory\n"); |
2510 | return -ENOMEM; | 2510 | return -ENOMEM; |
2511 | } | 2511 | } |
2512 | 2512 | ||
2513 | /* See NOTE in "Save Physical Address" comment above */ | 2513 | /* See NOTE in "Save Physical Address" comment above */ |
2514 | fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i]; | 2514 | fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i]; |
2515 | 2515 | ||
2516 | et131x_align_allocated_memory(adapter, | 2516 | et131x_align_allocated_memory(adapter, |
2517 | &fbr0_tmp_physaddr, | 2517 | &fbr0_tmp_physaddr, |
2518 | &fbr0_offset, | 2518 | &fbr0_offset, |
2519 | rx_ring->fbr[1]->buffsize - 1); | 2519 | rx_ring->fbr[1]->buffsize - 1); |
2520 | 2520 | ||
2521 | for (j = 0; j < FBR_CHUNKS; j++) { | 2521 | for (j = 0; j < FBR_CHUNKS; j++) { |
2522 | u32 index = (i * FBR_CHUNKS) + j; | 2522 | u32 index = (i * FBR_CHUNKS) + j; |
2523 | 2523 | ||
2524 | rx_ring->fbr[1]->virt[index] = | 2524 | rx_ring->fbr[1]->virt[index] = |
2525 | (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] + | 2525 | (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] + |
2526 | (j * rx_ring->fbr[1]->buffsize) + fbr0_offset; | 2526 | (j * rx_ring->fbr[1]->buffsize) + fbr0_offset; |
2527 | 2527 | ||
2528 | rx_ring->fbr[1]->bus_high[index] = | 2528 | rx_ring->fbr[1]->bus_high[index] = |
2529 | (u32) (fbr0_tmp_physaddr >> 32); | 2529 | (u32) (fbr0_tmp_physaddr >> 32); |
2530 | rx_ring->fbr[1]->bus_low[index] = | 2530 | rx_ring->fbr[1]->bus_low[index] = |
2531 | (u32) fbr0_tmp_physaddr; | 2531 | (u32) fbr0_tmp_physaddr; |
2532 | 2532 | ||
2533 | fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize; | 2533 | fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize; |
2534 | 2534 | ||
2535 | rx_ring->fbr[1]->buffer1[index] = | 2535 | rx_ring->fbr[1]->buffer1[index] = |
2536 | rx_ring->fbr[1]->virt[index]; | 2536 | rx_ring->fbr[1]->virt[index]; |
2537 | rx_ring->fbr[1]->buffer2[index] = | 2537 | rx_ring->fbr[1]->buffer2[index] = |
2538 | rx_ring->fbr[1]->virt[index] - 4; | 2538 | rx_ring->fbr[1]->virt[index] - 4; |
2539 | } | 2539 | } |
2540 | } | 2540 | } |
2541 | #endif | 2541 | #endif |
2542 | 2542 | ||
2543 | /* Allocate an area of memory for FIFO of Packet Status ring entries */ | 2543 | /* Allocate an area of memory for FIFO of Packet Status ring entries */ |
2544 | pktstat_ringsize = | 2544 | pktstat_ringsize = |
2545 | sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; | 2545 | sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; |
2546 | 2546 | ||
2547 | rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, | 2547 | rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, |
2548 | pktstat_ringsize, | 2548 | pktstat_ringsize, |
2549 | &rx_ring->ps_ring_physaddr, | 2549 | &rx_ring->ps_ring_physaddr, |
2550 | GFP_KERNEL); | 2550 | GFP_KERNEL); |
2551 | 2551 | ||
2552 | if (!rx_ring->ps_ring_virtaddr) { | 2552 | if (!rx_ring->ps_ring_virtaddr) { |
2553 | dev_err(&adapter->pdev->dev, | 2553 | dev_err(&adapter->pdev->dev, |
2554 | "Cannot alloc memory for Packet Status Ring\n"); | 2554 | "Cannot alloc memory for Packet Status Ring\n"); |
2555 | return -ENOMEM; | 2555 | return -ENOMEM; |
2556 | } | 2556 | } |
2557 | printk(KERN_INFO "Packet Status Ring %lx\n", | 2557 | printk(KERN_INFO "Packet Status Ring %lx\n", |
2558 | (unsigned long) rx_ring->ps_ring_physaddr); | 2558 | (unsigned long) rx_ring->ps_ring_physaddr); |
2559 | 2559 | ||
2560 | /* | 2560 | /* |
2561 | * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, | 2561 | * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, |
2562 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses | 2562 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses |
2563 | * are ever returned, make sure the high part is retrieved here before | 2563 | * are ever returned, make sure the high part is retrieved here before |
2564 | * storing the adjusted address. | 2564 | * storing the adjusted address. |
2565 | */ | 2565 | */ |
2566 | 2566 | ||
2567 | /* Allocate an area of memory for writeback of status information */ | 2567 | /* Allocate an area of memory for writeback of status information */ |
2568 | rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, | 2568 | rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, |
2569 | sizeof(struct rx_status_block), | 2569 | sizeof(struct rx_status_block), |
2570 | &rx_ring->rx_status_bus, | 2570 | &rx_ring->rx_status_bus, |
2571 | GFP_KERNEL); | 2571 | GFP_KERNEL); |
2572 | if (!rx_ring->rx_status_block) { | 2572 | if (!rx_ring->rx_status_block) { |
2573 | dev_err(&adapter->pdev->dev, | 2573 | dev_err(&adapter->pdev->dev, |
2574 | "Cannot alloc memory for Status Block\n"); | 2574 | "Cannot alloc memory for Status Block\n"); |
2575 | return -ENOMEM; | 2575 | return -ENOMEM; |
2576 | } | 2576 | } |
2577 | rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; | 2577 | rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; |
2578 | printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus); | 2578 | printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus); |
2579 | 2579 | ||
2580 | /* Recv | 2580 | /* Recv |
2581 | * kmem_cache_create initializes a lookaside list. After successful | 2581 | * kmem_cache_create initializes a lookaside list. After successful |
2582 | * creation, nonpaged fixed-size blocks can be allocated from and | 2582 | * creation, nonpaged fixed-size blocks can be allocated from and |
2583 | * freed to the lookaside list. | 2583 | * freed to the lookaside list. |
2584 | * RFDs will be allocated from this pool. | 2584 | * RFDs will be allocated from this pool. |
2585 | */ | 2585 | */ |
2586 | rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name, | 2586 | rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name, |
2587 | sizeof(struct rfd), | 2587 | sizeof(struct rfd), |
2588 | 0, | 2588 | 0, |
2589 | SLAB_CACHE_DMA | | 2589 | SLAB_CACHE_DMA | |
2590 | SLAB_HWCACHE_ALIGN, | 2590 | SLAB_HWCACHE_ALIGN, |
2591 | NULL); | 2591 | NULL); |
2592 | 2592 | ||
2593 | adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE; | 2593 | adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE; |
2594 | 2594 | ||
2595 | /* The RFDs are going to be put on lists later on, so initialize the | 2595 | /* The RFDs are going to be put on lists later on, so initialize the |
2596 | * lists now. | 2596 | * lists now. |
2597 | */ | 2597 | */ |
2598 | INIT_LIST_HEAD(&rx_ring->recv_list); | 2598 | INIT_LIST_HEAD(&rx_ring->recv_list); |
2599 | return 0; | 2599 | return 0; |
2600 | } | 2600 | } |
2601 | 2601 | ||
2602 | /** | 2602 | /** |
2603 | * et131x_rx_dma_memory_free - Free all memory allocated within this module. | 2603 | * et131x_rx_dma_memory_free - Free all memory allocated within this module. |
2604 | * @adapter: pointer to our private adapter structure | 2604 | * @adapter: pointer to our private adapter structure |
2605 | */ | 2605 | */ |
2606 | static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) | 2606 | static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) |
2607 | { | 2607 | { |
2608 | u32 index; | 2608 | u32 index; |
2609 | u32 bufsize; | 2609 | u32 bufsize; |
2610 | u32 pktstat_ringsize; | 2610 | u32 pktstat_ringsize; |
2611 | struct rfd *rfd; | 2611 | struct rfd *rfd; |
2612 | struct rx_ring *rx_ring; | 2612 | struct rx_ring *rx_ring; |
2613 | 2613 | ||
2614 | /* Setup some convenience pointers */ | 2614 | /* Setup some convenience pointers */ |
2615 | rx_ring = &adapter->rx_ring; | 2615 | rx_ring = &adapter->rx_ring; |
2616 | 2616 | ||
2617 | /* Free RFDs and associated packet descriptors */ | 2617 | /* Free RFDs and associated packet descriptors */ |
2618 | WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); | 2618 | WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); |
2619 | 2619 | ||
2620 | while (!list_empty(&rx_ring->recv_list)) { | 2620 | while (!list_empty(&rx_ring->recv_list)) { |
2621 | rfd = (struct rfd *) list_entry(rx_ring->recv_list.next, | 2621 | rfd = (struct rfd *) list_entry(rx_ring->recv_list.next, |
2622 | struct rfd, list_node); | 2622 | struct rfd, list_node); |
2623 | 2623 | ||
2624 | list_del(&rfd->list_node); | 2624 | list_del(&rfd->list_node); |
2625 | rfd->skb = NULL; | 2625 | rfd->skb = NULL; |
2626 | kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd); | 2626 | kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd); |
2627 | } | 2627 | } |
2628 | 2628 | ||
2629 | /* Free Free Buffer Ring 1 */ | 2629 | /* Free Free Buffer Ring 1 */ |
2630 | if (rx_ring->fbr[0]->ring_virtaddr) { | 2630 | if (rx_ring->fbr[0]->ring_virtaddr) { |
2631 | /* First the packet memory */ | 2631 | /* First the packet memory */ |
2632 | for (index = 0; index < | 2632 | for (index = 0; index < |
2633 | (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) { | 2633 | (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) { |
2634 | if (rx_ring->fbr[0]->mem_virtaddrs[index]) { | 2634 | if (rx_ring->fbr[0]->mem_virtaddrs[index]) { |
2635 | u32 fbr1_align; | 2635 | u32 fbr1_align; |
2636 | 2636 | ||
2637 | if (rx_ring->fbr[0]->buffsize > 4096) | 2637 | if (rx_ring->fbr[0]->buffsize > 4096) |
2638 | fbr1_align = 4096; | 2638 | fbr1_align = 4096; |
2639 | else | 2639 | else |
2640 | fbr1_align = rx_ring->fbr[0]->buffsize; | 2640 | fbr1_align = rx_ring->fbr[0]->buffsize; |
2641 | 2641 | ||
2642 | bufsize = | 2642 | bufsize = |
2643 | (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) + | 2643 | (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) + |
2644 | fbr1_align - 1; | 2644 | fbr1_align - 1; |
2645 | 2645 | ||
2646 | dma_free_coherent(&adapter->pdev->dev, | 2646 | dma_free_coherent(&adapter->pdev->dev, |
2647 | bufsize, | 2647 | bufsize, |
2648 | rx_ring->fbr[0]->mem_virtaddrs[index], | 2648 | rx_ring->fbr[0]->mem_virtaddrs[index], |
2649 | rx_ring->fbr[0]->mem_physaddrs[index]); | 2649 | rx_ring->fbr[0]->mem_physaddrs[index]); |
2650 | 2650 | ||
2651 | rx_ring->fbr[0]->mem_virtaddrs[index] = NULL; | 2651 | rx_ring->fbr[0]->mem_virtaddrs[index] = NULL; |
2652 | } | 2652 | } |
2653 | } | 2653 | } |
2654 | 2654 | ||
2655 | /* Now the FIFO itself */ | 2655 | /* Now the FIFO itself */ |
2656 | rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *) | 2656 | rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *) |
2657 | rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset); | 2657 | rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset); |
2658 | 2658 | ||
2659 | bufsize = | 2659 | bufsize = |
2660 | (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + | 2660 | (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + |
2661 | 0xfff; | 2661 | 0xfff; |
2662 | 2662 | ||
2663 | dma_free_coherent(&adapter->pdev->dev, bufsize, | 2663 | dma_free_coherent(&adapter->pdev->dev, bufsize, |
2664 | rx_ring->fbr[0]->ring_virtaddr, | 2664 | rx_ring->fbr[0]->ring_virtaddr, |
2665 | rx_ring->fbr[0]->ring_physaddr); | 2665 | rx_ring->fbr[0]->ring_physaddr); |
2666 | 2666 | ||
2667 | rx_ring->fbr[0]->ring_virtaddr = NULL; | 2667 | rx_ring->fbr[0]->ring_virtaddr = NULL; |
2668 | } | 2668 | } |
2669 | 2669 | ||
2670 | #ifdef USE_FBR0 | 2670 | #ifdef USE_FBR0 |
2671 | /* Now the same for Free Buffer Ring 0 */ | 2671 | /* Now the same for Free Buffer Ring 0 */ |
2672 | if (rx_ring->fbr[1]->ring_virtaddr) { | 2672 | if (rx_ring->fbr[1]->ring_virtaddr) { |
2673 | /* First the packet memory */ | 2673 | /* First the packet memory */ |
2674 | for (index = 0; index < | 2674 | for (index = 0; index < |
2675 | (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) { | 2675 | (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) { |
2676 | if (rx_ring->fbr[1]->mem_virtaddrs[index]) { | 2676 | if (rx_ring->fbr[1]->mem_virtaddrs[index]) { |
2677 | bufsize = | 2677 | bufsize = |
2678 | (rx_ring->fbr[1]->buffsize * | 2678 | (rx_ring->fbr[1]->buffsize * |
2679 | (FBR_CHUNKS + 1)) - 1; | 2679 | (FBR_CHUNKS + 1)) - 1; |
2680 | 2680 | ||
2681 | dma_free_coherent(&adapter->pdev->dev, | 2681 | dma_free_coherent(&adapter->pdev->dev, |
2682 | bufsize, | 2682 | bufsize, |
2683 | rx_ring->fbr[1]->mem_virtaddrs[index], | 2683 | rx_ring->fbr[1]->mem_virtaddrs[index], |
2684 | rx_ring->fbr[1]->mem_physaddrs[index]); | 2684 | rx_ring->fbr[1]->mem_physaddrs[index]); |
2685 | 2685 | ||
2686 | rx_ring->fbr[1]->mem_virtaddrs[index] = NULL; | 2686 | rx_ring->fbr[1]->mem_virtaddrs[index] = NULL; |
2687 | } | 2687 | } |
2688 | } | 2688 | } |
2689 | 2689 | ||
2690 | /* Now the FIFO itself */ | 2690 | /* Now the FIFO itself */ |
2691 | rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *) | 2691 | rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *) |
2692 | rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset); | 2692 | rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset); |
2693 | 2693 | ||
2694 | bufsize = | 2694 | bufsize = |
2695 | (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + | 2695 | (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + |
2696 | 0xfff; | 2696 | 0xfff; |
2697 | 2697 | ||
2698 | dma_free_coherent(&adapter->pdev->dev, | 2698 | dma_free_coherent(&adapter->pdev->dev, |
2699 | bufsize, | 2699 | bufsize, |
2700 | rx_ring->fbr[1]->ring_virtaddr, | 2700 | rx_ring->fbr[1]->ring_virtaddr, |
2701 | rx_ring->fbr[1]->ring_physaddr); | 2701 | rx_ring->fbr[1]->ring_physaddr); |
2702 | 2702 | ||
2703 | rx_ring->fbr[1]->ring_virtaddr = NULL; | 2703 | rx_ring->fbr[1]->ring_virtaddr = NULL; |
2704 | } | 2704 | } |
2705 | #endif | 2705 | #endif |
2706 | 2706 | ||
2707 | /* Free Packet Status Ring */ | 2707 | /* Free Packet Status Ring */ |
2708 | if (rx_ring->ps_ring_virtaddr) { | 2708 | if (rx_ring->ps_ring_virtaddr) { |
2709 | pktstat_ringsize = | 2709 | pktstat_ringsize = |
2710 | sizeof(struct pkt_stat_desc) * | 2710 | sizeof(struct pkt_stat_desc) * |
2711 | adapter->rx_ring.psr_num_entries; | 2711 | adapter->rx_ring.psr_num_entries; |
2712 | 2712 | ||
2713 | dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize, | 2713 | dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize, |
2714 | rx_ring->ps_ring_virtaddr, | 2714 | rx_ring->ps_ring_virtaddr, |
2715 | rx_ring->ps_ring_physaddr); | 2715 | rx_ring->ps_ring_physaddr); |
2716 | 2716 | ||
2717 | rx_ring->ps_ring_virtaddr = NULL; | 2717 | rx_ring->ps_ring_virtaddr = NULL; |
2718 | } | 2718 | } |
2719 | 2719 | ||
2720 | /* Free area of memory for the writeback of status information */ | 2720 | /* Free area of memory for the writeback of status information */ |
2721 | if (rx_ring->rx_status_block) { | 2721 | if (rx_ring->rx_status_block) { |
2722 | dma_free_coherent(&adapter->pdev->dev, | 2722 | dma_free_coherent(&adapter->pdev->dev, |
2723 | sizeof(struct rx_status_block), | 2723 | sizeof(struct rx_status_block), |
2724 | rx_ring->rx_status_block, rx_ring->rx_status_bus); | 2724 | rx_ring->rx_status_block, rx_ring->rx_status_bus); |
2725 | rx_ring->rx_status_block = NULL; | 2725 | rx_ring->rx_status_block = NULL; |
2726 | } | 2726 | } |
2727 | 2727 | ||
2728 | /* Destroy the lookaside (RFD) pool */ | 2728 | /* Destroy the lookaside (RFD) pool */ |
2729 | if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) { | 2729 | if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) { |
2730 | kmem_cache_destroy(rx_ring->recv_lookaside); | 2730 | kmem_cache_destroy(rx_ring->recv_lookaside); |
2731 | adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE; | 2731 | adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE; |
2732 | } | 2732 | } |
2733 | 2733 | ||
2734 | /* Free the FBR Lookup Table */ | 2734 | /* Free the FBR Lookup Table */ |
2735 | #ifdef USE_FBR0 | 2735 | #ifdef USE_FBR0 |
2736 | kfree(rx_ring->fbr[1]); | 2736 | kfree(rx_ring->fbr[1]); |
2737 | #endif | 2737 | #endif |
2738 | 2738 | ||
2739 | kfree(rx_ring->fbr[0]); | 2739 | kfree(rx_ring->fbr[0]); |
2740 | 2740 | ||
2741 | /* Reset Counters */ | 2741 | /* Reset Counters */ |
2742 | rx_ring->num_ready_recv = 0; | 2742 | rx_ring->num_ready_recv = 0; |
2743 | } | 2743 | } |
2744 | 2744 | ||
2745 | /** | 2745 | /** |
2746 | * et131x_init_recv - Initialize receive data structures. | 2746 | * et131x_init_recv - Initialize receive data structures. |
2747 | * @adapter: pointer to our private adapter structure | 2747 | * @adapter: pointer to our private adapter structure |
2748 | * | 2748 | * |
2749 | * Returns 0 on success and errno on failure (as defined in errno.h) | 2749 | * Returns 0 on success and errno on failure (as defined in errno.h) |
2750 | */ | 2750 | */ |
2751 | static int et131x_init_recv(struct et131x_adapter *adapter) | 2751 | static int et131x_init_recv(struct et131x_adapter *adapter) |
2752 | { | 2752 | { |
2753 | int status = -ENOMEM; | 2753 | int status = -ENOMEM; |
2754 | struct rfd *rfd = NULL; | 2754 | struct rfd *rfd = NULL; |
2755 | u32 rfdct; | 2755 | u32 rfdct; |
2756 | u32 numrfd = 0; | 2756 | u32 numrfd = 0; |
2757 | struct rx_ring *rx_ring; | 2757 | struct rx_ring *rx_ring; |
2758 | 2758 | ||
2759 | /* Setup some convenience pointers */ | 2759 | /* Setup some convenience pointers */ |
2760 | rx_ring = &adapter->rx_ring; | 2760 | rx_ring = &adapter->rx_ring; |
2761 | 2761 | ||
2762 | /* Setup each RFD */ | 2762 | /* Setup each RFD */ |
2763 | for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { | 2763 | for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { |
2764 | rfd = kmem_cache_alloc(rx_ring->recv_lookaside, | 2764 | rfd = kmem_cache_alloc(rx_ring->recv_lookaside, |
2765 | GFP_ATOMIC | GFP_DMA); | 2765 | GFP_ATOMIC | GFP_DMA); |
2766 | 2766 | ||
2767 | if (!rfd) { | 2767 | if (!rfd) { |
2768 | dev_err(&adapter->pdev->dev, | 2768 | dev_err(&adapter->pdev->dev, |
2769 | "Couldn't alloc RFD out of kmem_cache\n"); | 2769 | "Couldn't alloc RFD out of kmem_cache\n"); |
2770 | status = -ENOMEM; | 2770 | status = -ENOMEM; |
2771 | continue; | 2771 | continue; |
2772 | } | 2772 | } |
2773 | 2773 | ||
2774 | rfd->skb = NULL; | 2774 | rfd->skb = NULL; |
2775 | 2775 | ||
2776 | /* Add this RFD to the recv_list */ | 2776 | /* Add this RFD to the recv_list */ |
2777 | list_add_tail(&rfd->list_node, &rx_ring->recv_list); | 2777 | list_add_tail(&rfd->list_node, &rx_ring->recv_list); |
2778 | 2778 | ||
2779 | /* Increment both the available RFD's, and the total RFD's. */ | 2779 | /* Increment both the available RFD's, and the total RFD's. */ |
2780 | rx_ring->num_ready_recv++; | 2780 | rx_ring->num_ready_recv++; |
2781 | numrfd++; | 2781 | numrfd++; |
2782 | } | 2782 | } |
2783 | 2783 | ||
2784 | if (numrfd > NIC_MIN_NUM_RFD) | 2784 | if (numrfd > NIC_MIN_NUM_RFD) |
2785 | status = 0; | 2785 | status = 0; |
2786 | 2786 | ||
2787 | rx_ring->num_rfd = numrfd; | 2787 | rx_ring->num_rfd = numrfd; |
2788 | 2788 | ||
2789 | if (status != 0) { | 2789 | if (status != 0) { |
2790 | kmem_cache_free(rx_ring->recv_lookaside, rfd); | 2790 | kmem_cache_free(rx_ring->recv_lookaside, rfd); |
2791 | dev_err(&adapter->pdev->dev, | 2791 | dev_err(&adapter->pdev->dev, |
2792 | "Allocation problems in et131x_init_recv\n"); | 2792 | "Allocation problems in et131x_init_recv\n"); |
2793 | } | 2793 | } |
2794 | return status; | 2794 | return status; |
2795 | } | 2795 | } |
2796 | 2796 | ||
2797 | /** | 2797 | /** |
2798 | * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate. | 2798 | * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate. |
2799 | * @adapter: pointer to our adapter structure | 2799 | * @adapter: pointer to our adapter structure |
2800 | */ | 2800 | */ |
2801 | static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) | 2801 | static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) |
2802 | { | 2802 | { |
2803 | struct phy_device *phydev = adapter->phydev; | 2803 | struct phy_device *phydev = adapter->phydev; |
2804 | 2804 | ||
2805 | if (!phydev) | 2805 | if (!phydev) |
2806 | return; | 2806 | return; |
2807 | 2807 | ||
2808 | /* For version B silicon, we do not use the RxDMA timer for 10 and 100 | 2808 | /* For version B silicon, we do not use the RxDMA timer for 10 and 100 |
2809 | * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. | 2809 | * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. |
2810 | */ | 2810 | */ |
2811 | if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { | 2811 | if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { |
2812 | writel(0, &adapter->regs->rxdma.max_pkt_time); | 2812 | writel(0, &adapter->regs->rxdma.max_pkt_time); |
2813 | writel(1, &adapter->regs->rxdma.num_pkt_done); | 2813 | writel(1, &adapter->regs->rxdma.num_pkt_done); |
2814 | } | 2814 | } |
2815 | } | 2815 | } |
2816 | 2816 | ||
2817 | /** | 2817 | /** |
2818 | * NICReturnRFD - Recycle a RFD and put it back onto the receive list | 2818 | * NICReturnRFD - Recycle a RFD and put it back onto the receive list |
2819 | * @adapter: pointer to our adapter | 2819 | * @adapter: pointer to our adapter |
2820 | * @rfd: pointer to the RFD | 2820 | * @rfd: pointer to the RFD |
2821 | */ | 2821 | */ |
2822 | static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) | 2822 | static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) |
2823 | { | 2823 | { |
2824 | struct rx_ring *rx_local = &adapter->rx_ring; | 2824 | struct rx_ring *rx_local = &adapter->rx_ring; |
2825 | struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; | 2825 | struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; |
2826 | u16 buff_index = rfd->bufferindex; | 2826 | u16 buff_index = rfd->bufferindex; |
2827 | u8 ring_index = rfd->ringindex; | 2827 | u8 ring_index = rfd->ringindex; |
2828 | unsigned long flags; | 2828 | unsigned long flags; |
2829 | 2829 | ||
2830 | /* We don't use any of the OOB data besides status. Otherwise, we | 2830 | /* We don't use any of the OOB data besides status. Otherwise, we |
2831 | * need to clean up OOB data | 2831 | * need to clean up OOB data |
2832 | */ | 2832 | */ |
2833 | if ( | 2833 | if ( |
2834 | #ifdef USE_FBR0 | 2834 | #ifdef USE_FBR0 |
2835 | (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) || | 2835 | (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) || |
2836 | #endif | 2836 | #endif |
2837 | (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) { | 2837 | (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) { |
2838 | spin_lock_irqsave(&adapter->fbr_lock, flags); | 2838 | spin_lock_irqsave(&adapter->fbr_lock, flags); |
2839 | 2839 | ||
2840 | if (ring_index == 1) { | 2840 | if (ring_index == 1) { |
2841 | struct fbr_desc *next = (struct fbr_desc *) | 2841 | struct fbr_desc *next = (struct fbr_desc *) |
2842 | (rx_local->fbr[0]->ring_virtaddr) + | 2842 | (rx_local->fbr[0]->ring_virtaddr) + |
2843 | INDEX10(rx_local->fbr[0]->local_full); | 2843 | INDEX10(rx_local->fbr[0]->local_full); |
2844 | 2844 | ||
2845 | /* Handle the Free Buffer Ring advancement here. Write | 2845 | /* Handle the Free Buffer Ring advancement here. Write |
2846 | * the PA / Buffer Index for the returned buffer into | 2846 | * the PA / Buffer Index for the returned buffer into |
2847 | * the oldest (next to be freed)FBR entry | 2847 | * the oldest (next to be freed)FBR entry |
2848 | */ | 2848 | */ |
2849 | next->addr_hi = rx_local->fbr[0]->bus_high[buff_index]; | 2849 | next->addr_hi = rx_local->fbr[0]->bus_high[buff_index]; |
2850 | next->addr_lo = rx_local->fbr[0]->bus_low[buff_index]; | 2850 | next->addr_lo = rx_local->fbr[0]->bus_low[buff_index]; |
2851 | next->word2 = buff_index; | 2851 | next->word2 = buff_index; |
2852 | 2852 | ||
2853 | writel(bump_free_buff_ring( | 2853 | writel(bump_free_buff_ring( |
2854 | &rx_local->fbr[0]->local_full, | 2854 | &rx_local->fbr[0]->local_full, |
2855 | rx_local->fbr[0]->num_entries - 1), | 2855 | rx_local->fbr[0]->num_entries - 1), |
2856 | &rx_dma->fbr1_full_offset); | 2856 | &rx_dma->fbr1_full_offset); |
2857 | } | 2857 | } |
2858 | #ifdef USE_FBR0 | 2858 | #ifdef USE_FBR0 |
2859 | else { | 2859 | else { |
2860 | struct fbr_desc *next = (struct fbr_desc *) | 2860 | struct fbr_desc *next = (struct fbr_desc *) |
2861 | rx_local->fbr[1]->ring_virtaddr + | 2861 | rx_local->fbr[1]->ring_virtaddr + |
2862 | INDEX10(rx_local->fbr[1]->local_full); | 2862 | INDEX10(rx_local->fbr[1]->local_full); |
2863 | 2863 | ||
2864 | /* Handle the Free Buffer Ring advancement here. Write | 2864 | /* Handle the Free Buffer Ring advancement here. Write |
2865 | * the PA / Buffer Index for the returned buffer into | 2865 | * the PA / Buffer Index for the returned buffer into |
2866 | * the oldest (next to be freed) FBR entry | 2866 | * the oldest (next to be freed) FBR entry |
2867 | */ | 2867 | */ |
2868 | next->addr_hi = rx_local->fbr[1]->bus_high[buff_index]; | 2868 | next->addr_hi = rx_local->fbr[1]->bus_high[buff_index]; |
2869 | next->addr_lo = rx_local->fbr[1]->bus_low[buff_index]; | 2869 | next->addr_lo = rx_local->fbr[1]->bus_low[buff_index]; |
2870 | next->word2 = buff_index; | 2870 | next->word2 = buff_index; |
2871 | 2871 | ||
2872 | writel(bump_free_buff_ring( | 2872 | writel(bump_free_buff_ring( |
2873 | &rx_local->fbr[1]->local_full, | 2873 | &rx_local->fbr[1]->local_full, |
2874 | rx_local->fbr[1]->num_entries - 1), | 2874 | rx_local->fbr[1]->num_entries - 1), |
2875 | &rx_dma->fbr0_full_offset); | 2875 | &rx_dma->fbr0_full_offset); |
2876 | } | 2876 | } |
2877 | #endif | 2877 | #endif |
2878 | spin_unlock_irqrestore(&adapter->fbr_lock, flags); | 2878 | spin_unlock_irqrestore(&adapter->fbr_lock, flags); |
2879 | } else { | 2879 | } else { |
2880 | dev_err(&adapter->pdev->dev, | 2880 | dev_err(&adapter->pdev->dev, |
2881 | "%s illegal Buffer Index returned\n", __func__); | 2881 | "%s illegal Buffer Index returned\n", __func__); |
2882 | } | 2882 | } |
2883 | 2883 | ||
2884 | /* The processing on this RFD is done, so put it back on the tail of | 2884 | /* The processing on this RFD is done, so put it back on the tail of |
2885 | * our list | 2885 | * our list |
2886 | */ | 2886 | */ |
2887 | spin_lock_irqsave(&adapter->rcv_lock, flags); | 2887 | spin_lock_irqsave(&adapter->rcv_lock, flags); |
2888 | list_add_tail(&rfd->list_node, &rx_local->recv_list); | 2888 | list_add_tail(&rfd->list_node, &rx_local->recv_list); |
2889 | rx_local->num_ready_recv++; | 2889 | rx_local->num_ready_recv++; |
2890 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); | 2890 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
2891 | 2891 | ||
2892 | WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); | 2892 | WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); |
2893 | } | 2893 | } |
2894 | 2894 | ||
2895 | /** | 2895 | /** |
2896 | * nic_rx_pkts - Checks the hardware for available packets | 2896 | * nic_rx_pkts - Checks the hardware for available packets |
2897 | * @adapter: pointer to our adapter | 2897 | * @adapter: pointer to our adapter |
2898 | * | 2898 | * |
2899 | * Returns rfd, a pointer to our MPRFD. | 2899 | * Returns rfd, a pointer to our MPRFD. |
2900 | * | 2900 | * |
2901 | * Checks the hardware for available packets, using completion ring | 2901 | * Checks the hardware for available packets, using completion ring |
2902 | * If packets are available, it gets an RFD from the recv_list, attaches | 2902 | * If packets are available, it gets an RFD from the recv_list, attaches |
2903 | * the packet to it, puts the RFD in the RecvPendList, and also returns | 2903 | * the packet to it, puts the RFD in the RecvPendList, and also returns |
2904 | * the pointer to the RFD. | 2904 | * the pointer to the RFD. |
2905 | */ | 2905 | */ |
2906 | static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) | 2906 | static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) |
2907 | { | 2907 | { |
2908 | struct rx_ring *rx_local = &adapter->rx_ring; | 2908 | struct rx_ring *rx_local = &adapter->rx_ring; |
2909 | struct rx_status_block *status; | 2909 | struct rx_status_block *status; |
2910 | struct pkt_stat_desc *psr; | 2910 | struct pkt_stat_desc *psr; |
2911 | struct rfd *rfd; | 2911 | struct rfd *rfd; |
2912 | u32 i; | 2912 | u32 i; |
2913 | u8 *buf; | 2913 | u8 *buf; |
2914 | unsigned long flags; | 2914 | unsigned long flags; |
2915 | struct list_head *element; | 2915 | struct list_head *element; |
2916 | u8 ring_index; | 2916 | u8 ring_index; |
2917 | u16 buff_index; | 2917 | u16 buff_index; |
2918 | u32 len; | 2918 | u32 len; |
2919 | u32 word0; | 2919 | u32 word0; |
2920 | u32 word1; | 2920 | u32 word1; |
2921 | 2921 | ||
2922 | /* RX Status block is written by the DMA engine prior to every | 2922 | /* RX Status block is written by the DMA engine prior to every |
2923 | * interrupt. It contains the next to be used entry in the Packet | 2923 | * interrupt. It contains the next to be used entry in the Packet |
2924 | * Status Ring, and also the two Free Buffer rings. | 2924 | * Status Ring, and also the two Free Buffer rings. |
2925 | */ | 2925 | */ |
2926 | status = rx_local->rx_status_block; | 2926 | status = rx_local->rx_status_block; |
2927 | word1 = status->word1 >> 16; /* Get the useful bits */ | 2927 | word1 = status->word1 >> 16; /* Get the useful bits */ |
2928 | 2928 | ||
2929 | /* Check the PSR and wrap bits do not match */ | 2929 | /* Check the PSR and wrap bits do not match */ |
2930 | if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) | 2930 | if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) |
2931 | /* Looks like this ring is not updated yet */ | 2931 | /* Looks like this ring is not updated yet */ |
2932 | return NULL; | 2932 | return NULL; |
2933 | 2933 | ||
2934 | /* The packet status ring indicates that data is available. */ | 2934 | /* The packet status ring indicates that data is available. */ |
2935 | psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) + | 2935 | psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) + |
2936 | (rx_local->local_psr_full & 0xFFF); | 2936 | (rx_local->local_psr_full & 0xFFF); |
2937 | 2937 | ||
2938 | /* Grab any information that is required once the PSR is | 2938 | /* Grab any information that is required once the PSR is |
2939 | * advanced, since we can no longer rely on the memory being | 2939 | * advanced, since we can no longer rely on the memory being |
2940 | * accurate | 2940 | * accurate |
2941 | */ | 2941 | */ |
2942 | len = psr->word1 & 0xFFFF; | 2942 | len = psr->word1 & 0xFFFF; |
2943 | ring_index = (psr->word1 >> 26) & 0x03; | 2943 | ring_index = (psr->word1 >> 26) & 0x03; |
2944 | buff_index = (psr->word1 >> 16) & 0x3FF; | 2944 | buff_index = (psr->word1 >> 16) & 0x3FF; |
2945 | word0 = psr->word0; | 2945 | word0 = psr->word0; |
2946 | 2946 | ||
2947 | /* Indicate that we have used this PSR entry. */ | 2947 | /* Indicate that we have used this PSR entry. */ |
2948 | /* FIXME wrap 12 */ | 2948 | /* FIXME wrap 12 */ |
2949 | add_12bit(&rx_local->local_psr_full, 1); | 2949 | add_12bit(&rx_local->local_psr_full, 1); |
2950 | if ( | 2950 | if ( |
2951 | (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) { | 2951 | (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) { |
2952 | /* Clear psr full and toggle the wrap bit */ | 2952 | /* Clear psr full and toggle the wrap bit */ |
2953 | rx_local->local_psr_full &= ~0xFFF; | 2953 | rx_local->local_psr_full &= ~0xFFF; |
2954 | rx_local->local_psr_full ^= 0x1000; | 2954 | rx_local->local_psr_full ^= 0x1000; |
2955 | } | 2955 | } |
2956 | 2956 | ||
2957 | writel(rx_local->local_psr_full, | 2957 | writel(rx_local->local_psr_full, |
2958 | &adapter->regs->rxdma.psr_full_offset); | 2958 | &adapter->regs->rxdma.psr_full_offset); |
2959 | 2959 | ||
2960 | #ifndef USE_FBR0 | 2960 | #ifndef USE_FBR0 |
2961 | if (ring_index != 1) | 2961 | if (ring_index != 1) |
2962 | return NULL; | 2962 | return NULL; |
2963 | #endif | 2963 | #endif |
2964 | 2964 | ||
2965 | #ifdef USE_FBR0 | 2965 | #ifdef USE_FBR0 |
2966 | if (ring_index > 1 || | 2966 | if (ring_index > 1 || |
2967 | (ring_index == 0 && | 2967 | (ring_index == 0 && |
2968 | buff_index > rx_local->fbr[1]->num_entries - 1) || | 2968 | buff_index > rx_local->fbr[1]->num_entries - 1) || |
2969 | (ring_index == 1 && | 2969 | (ring_index == 1 && |
2970 | buff_index > rx_local->fbr[0]->num_entries - 1)) | 2970 | buff_index > rx_local->fbr[0]->num_entries - 1)) |
2971 | #else | 2971 | #else |
2972 | if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1) | 2972 | if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1) |
2973 | #endif | 2973 | #endif |
2974 | { | 2974 | { |
2975 | /* Illegal buffer or ring index cannot be used by S/W*/ | 2975 | /* Illegal buffer or ring index cannot be used by S/W*/ |
2976 | dev_err(&adapter->pdev->dev, | 2976 | dev_err(&adapter->pdev->dev, |
2977 | "NICRxPkts PSR Entry %d indicates " | 2977 | "NICRxPkts PSR Entry %d indicates " |
2978 | "length of %d and/or bad bi(%d)\n", | 2978 | "length of %d and/or bad bi(%d)\n", |
2979 | rx_local->local_psr_full & 0xFFF, | 2979 | rx_local->local_psr_full & 0xFFF, |
2980 | len, buff_index); | 2980 | len, buff_index); |
2981 | return NULL; | 2981 | return NULL; |
2982 | } | 2982 | } |
2983 | 2983 | ||
2984 | /* Get and fill the RFD. */ | 2984 | /* Get and fill the RFD. */ |
2985 | spin_lock_irqsave(&adapter->rcv_lock, flags); | 2985 | spin_lock_irqsave(&adapter->rcv_lock, flags); |
2986 | 2986 | ||
2987 | rfd = NULL; | 2987 | rfd = NULL; |
2988 | element = rx_local->recv_list.next; | 2988 | element = rx_local->recv_list.next; |
2989 | rfd = (struct rfd *) list_entry(element, struct rfd, list_node); | 2989 | rfd = (struct rfd *) list_entry(element, struct rfd, list_node); |
2990 | 2990 | ||
2991 | if (rfd == NULL) { | 2991 | if (rfd == NULL) { |
2992 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); | 2992 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
2993 | return NULL; | 2993 | return NULL; |
2994 | } | 2994 | } |
2995 | 2995 | ||
2996 | list_del(&rfd->list_node); | 2996 | list_del(&rfd->list_node); |
2997 | rx_local->num_ready_recv--; | 2997 | rx_local->num_ready_recv--; |
2998 | 2998 | ||
2999 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); | 2999 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
3000 | 3000 | ||
3001 | rfd->bufferindex = buff_index; | 3001 | rfd->bufferindex = buff_index; |
3002 | rfd->ringindex = ring_index; | 3002 | rfd->ringindex = ring_index; |
3003 | 3003 | ||
3004 | /* In V1 silicon, there is a bug which screws up filtering of | 3004 | /* In V1 silicon, there is a bug which screws up filtering of |
3005 | * runt packets. Therefore runt packet filtering is disabled | 3005 | * runt packets. Therefore runt packet filtering is disabled |
3006 | * in the MAC and the packets are dropped here. They are | 3006 | * in the MAC and the packets are dropped here. They are |
3007 | * also counted here. | 3007 | * also counted here. |
3008 | */ | 3008 | */ |
3009 | if (len < (NIC_MIN_PACKET_SIZE + 4)) { | 3009 | if (len < (NIC_MIN_PACKET_SIZE + 4)) { |
3010 | adapter->stats.rx_other_errs++; | 3010 | adapter->stats.rx_other_errs++; |
3011 | len = 0; | 3011 | len = 0; |
3012 | } | 3012 | } |
3013 | 3013 | ||
3014 | if (len) { | 3014 | if (len) { |
3015 | /* Determine if this is a multicast packet coming in */ | 3015 | /* Determine if this is a multicast packet coming in */ |
3016 | if ((word0 & ALCATEL_MULTICAST_PKT) && | 3016 | if ((word0 & ALCATEL_MULTICAST_PKT) && |
3017 | !(word0 & ALCATEL_BROADCAST_PKT)) { | 3017 | !(word0 & ALCATEL_BROADCAST_PKT)) { |
3018 | /* Promiscuous mode and Multicast mode are | 3018 | /* Promiscuous mode and Multicast mode are |
3019 | * not mutually exclusive as was first | 3019 | * not mutually exclusive as was first |
3020 | * thought. I guess Promiscuous is just | 3020 | * thought. I guess Promiscuous is just |
3021 | * considered a super-set of the other | 3021 | * considered a super-set of the other |
3022 | * filters. Generally filter is 0x2b when in | 3022 | * filters. Generally filter is 0x2b when in |
3023 | * promiscuous mode. | 3023 | * promiscuous mode. |
3024 | */ | 3024 | */ |
3025 | if ((adapter->packet_filter & | 3025 | if ((adapter->packet_filter & |
3026 | ET131X_PACKET_TYPE_MULTICAST) | 3026 | ET131X_PACKET_TYPE_MULTICAST) |
3027 | && !(adapter->packet_filter & | 3027 | && !(adapter->packet_filter & |
3028 | ET131X_PACKET_TYPE_PROMISCUOUS) | 3028 | ET131X_PACKET_TYPE_PROMISCUOUS) |
3029 | && !(adapter->packet_filter & | 3029 | && !(adapter->packet_filter & |
3030 | ET131X_PACKET_TYPE_ALL_MULTICAST)) { | 3030 | ET131X_PACKET_TYPE_ALL_MULTICAST)) { |
3031 | /* | 3031 | /* |
3032 | * Note - ring_index for fbr[] array is reversed | 3032 | * Note - ring_index for fbr[] array is reversed |
3033 | * 1 for FBR0 etc | 3033 | * 1 for FBR0 etc |
3034 | */ | 3034 | */ |
3035 | buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]-> | 3035 | buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]-> |
3036 | virt[buff_index]; | 3036 | virt[buff_index]; |
3037 | 3037 | ||
3038 | /* Loop through our list to see if the | 3038 | /* Loop through our list to see if the |
3039 | * destination address of this packet | 3039 | * destination address of this packet |
3040 | * matches one in our list. | 3040 | * matches one in our list. |
3041 | */ | 3041 | */ |
3042 | for (i = 0; i < adapter->multicast_addr_count; | 3042 | for (i = 0; i < adapter->multicast_addr_count; |
3043 | i++) { | 3043 | i++) { |
3044 | if (buf[0] == | 3044 | if (buf[0] == |
3045 | adapter->multicast_list[i][0] | 3045 | adapter->multicast_list[i][0] |
3046 | && buf[1] == | 3046 | && buf[1] == |
3047 | adapter->multicast_list[i][1] | 3047 | adapter->multicast_list[i][1] |
3048 | && buf[2] == | 3048 | && buf[2] == |
3049 | adapter->multicast_list[i][2] | 3049 | adapter->multicast_list[i][2] |
3050 | && buf[3] == | 3050 | && buf[3] == |
3051 | adapter->multicast_list[i][3] | 3051 | adapter->multicast_list[i][3] |
3052 | && buf[4] == | 3052 | && buf[4] == |
3053 | adapter->multicast_list[i][4] | 3053 | adapter->multicast_list[i][4] |
3054 | && buf[5] == | 3054 | && buf[5] == |
3055 | adapter->multicast_list[i][5]) { | 3055 | adapter->multicast_list[i][5]) { |
3056 | break; | 3056 | break; |
3057 | } | 3057 | } |
3058 | } | 3058 | } |
3059 | 3059 | ||
3060 | /* If our index is equal to the number | 3060 | /* If our index is equal to the number |
3061 | * of Multicast address we have, then | 3061 | * of Multicast address we have, then |
3062 | * this means we did not find this | 3062 | * this means we did not find this |
3063 | * packet's matching address in our | 3063 | * packet's matching address in our |
3064 | * list. Set the len to zero, | 3064 | * list. Set the len to zero, |
3065 | * so we free our RFD when we return | 3065 | * so we free our RFD when we return |
3066 | * from this function. | 3066 | * from this function. |
3067 | */ | 3067 | */ |
3068 | if (i == adapter->multicast_addr_count) | 3068 | if (i == adapter->multicast_addr_count) |
3069 | len = 0; | 3069 | len = 0; |
3070 | } | 3070 | } |
3071 | 3071 | ||
3072 | if (len > 0) | 3072 | if (len > 0) |
3073 | adapter->stats.multicast_pkts_rcvd++; | 3073 | adapter->stats.multicast_pkts_rcvd++; |
3074 | } else if (word0 & ALCATEL_BROADCAST_PKT) | 3074 | } else if (word0 & ALCATEL_BROADCAST_PKT) |
3075 | adapter->stats.broadcast_pkts_rcvd++; | 3075 | adapter->stats.broadcast_pkts_rcvd++; |
3076 | else | 3076 | else |
3077 | /* Not sure what this counter measures in | 3077 | /* Not sure what this counter measures in |
3078 | * promiscuous mode. Perhaps we should check | 3078 | * promiscuous mode. Perhaps we should check |
3079 | * the MAC address to see if it is directed | 3079 | * the MAC address to see if it is directed |
3080 | * to us in promiscuous mode. | 3080 | * to us in promiscuous mode. |
3081 | */ | 3081 | */ |
3082 | adapter->stats.unicast_pkts_rcvd++; | 3082 | adapter->stats.unicast_pkts_rcvd++; |
3083 | } | 3083 | } |
3084 | 3084 | ||
3085 | if (len > 0) { | 3085 | if (len > 0) { |
3086 | struct sk_buff *skb = NULL; | 3086 | struct sk_buff *skb = NULL; |
3087 | 3087 | ||
3088 | /*rfd->len = len - 4; */ | 3088 | /*rfd->len = len - 4; */ |
3089 | rfd->len = len; | 3089 | rfd->len = len; |
3090 | 3090 | ||
3091 | skb = dev_alloc_skb(rfd->len + 2); | 3091 | skb = dev_alloc_skb(rfd->len + 2); |
3092 | if (!skb) { | 3092 | if (!skb) { |
3093 | dev_err(&adapter->pdev->dev, | 3093 | dev_err(&adapter->pdev->dev, |
3094 | "Couldn't alloc an SKB for Rx\n"); | 3094 | "Couldn't alloc an SKB for Rx\n"); |
3095 | return NULL; | 3095 | return NULL; |
3096 | } | 3096 | } |
3097 | 3097 | ||
3098 | adapter->net_stats.rx_bytes += rfd->len; | 3098 | adapter->net_stats.rx_bytes += rfd->len; |
3099 | 3099 | ||
3100 | /* | 3100 | /* |
3101 | * Note - ring_index for fbr[] array is reversed, | 3101 | * Note - ring_index for fbr[] array is reversed, |
3102 | * 1 for FBR0 etc | 3102 | * 1 for FBR0 etc |
3103 | */ | 3103 | */ |
3104 | memcpy(skb_put(skb, rfd->len), | 3104 | memcpy(skb_put(skb, rfd->len), |
3105 | rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index], | 3105 | rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index], |
3106 | rfd->len); | 3106 | rfd->len); |
3107 | 3107 | ||
3108 | skb->dev = adapter->netdev; | 3108 | skb->dev = adapter->netdev; |
3109 | skb->protocol = eth_type_trans(skb, adapter->netdev); | 3109 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
3110 | skb->ip_summed = CHECKSUM_NONE; | 3110 | skb->ip_summed = CHECKSUM_NONE; |
3111 | 3111 | ||
3112 | netif_rx_ni(skb); | 3112 | netif_rx_ni(skb); |
3113 | } else { | 3113 | } else { |
3114 | rfd->len = 0; | 3114 | rfd->len = 0; |
3115 | } | 3115 | } |
3116 | 3116 | ||
3117 | nic_return_rfd(adapter, rfd); | 3117 | nic_return_rfd(adapter, rfd); |
3118 | return rfd; | 3118 | return rfd; |
3119 | } | 3119 | } |
3120 | 3120 | ||
3121 | /** | 3121 | /** |
3122 | * et131x_handle_recv_interrupt - Interrupt handler for receive processing | 3122 | * et131x_handle_recv_interrupt - Interrupt handler for receive processing |
3123 | * @adapter: pointer to our adapter | 3123 | * @adapter: pointer to our adapter |
3124 | * | 3124 | * |
3125 | * Assumption, Rcv spinlock has been acquired. | 3125 | * Assumption, Rcv spinlock has been acquired. |
3126 | */ | 3126 | */ |
3127 | static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) | 3127 | static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) |
3128 | { | 3128 | { |
3129 | struct rfd *rfd = NULL; | 3129 | struct rfd *rfd = NULL; |
3130 | u32 count = 0; | 3130 | u32 count = 0; |
3131 | bool done = true; | 3131 | bool done = true; |
3132 | 3132 | ||
3133 | /* Process up to available RFD's */ | 3133 | /* Process up to available RFD's */ |
3134 | while (count < NUM_PACKETS_HANDLED) { | 3134 | while (count < NUM_PACKETS_HANDLED) { |
3135 | if (list_empty(&adapter->rx_ring.recv_list)) { | 3135 | if (list_empty(&adapter->rx_ring.recv_list)) { |
3136 | WARN_ON(adapter->rx_ring.num_ready_recv != 0); | 3136 | WARN_ON(adapter->rx_ring.num_ready_recv != 0); |
3137 | done = false; | 3137 | done = false; |
3138 | break; | 3138 | break; |
3139 | } | 3139 | } |
3140 | 3140 | ||
3141 | rfd = nic_rx_pkts(adapter); | 3141 | rfd = nic_rx_pkts(adapter); |
3142 | 3142 | ||
3143 | if (rfd == NULL) | 3143 | if (rfd == NULL) |
3144 | break; | 3144 | break; |
3145 | 3145 | ||
3146 | /* Do not receive any packets until a filter has been set. | 3146 | /* Do not receive any packets until a filter has been set. |
3147 | * Do not receive any packets until we have link. | 3147 | * Do not receive any packets until we have link. |
3148 | * If length is zero, return the RFD in order to advance the | 3148 | * If length is zero, return the RFD in order to advance the |
3149 | * Free buffer ring. | 3149 | * Free buffer ring. |
3150 | */ | 3150 | */ |
3151 | if (!adapter->packet_filter || | 3151 | if (!adapter->packet_filter || |
3152 | !netif_carrier_ok(adapter->netdev) || | 3152 | !netif_carrier_ok(adapter->netdev) || |
3153 | rfd->len == 0) | 3153 | rfd->len == 0) |
3154 | continue; | 3154 | continue; |
3155 | 3155 | ||
3156 | /* Increment the number of packets we received */ | 3156 | /* Increment the number of packets we received */ |
3157 | adapter->net_stats.rx_packets++; | 3157 | adapter->net_stats.rx_packets++; |
3158 | 3158 | ||
3159 | /* Set the status on the packet, either resources or success */ | 3159 | /* Set the status on the packet, either resources or success */ |
3160 | if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) { | 3160 | if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) { |
3161 | dev_warn(&adapter->pdev->dev, | 3161 | dev_warn(&adapter->pdev->dev, |
3162 | "RFD's are running out\n"); | 3162 | "RFD's are running out\n"); |
3163 | } | 3163 | } |
3164 | count++; | 3164 | count++; |
3165 | } | 3165 | } |
3166 | 3166 | ||
3167 | if (count == NUM_PACKETS_HANDLED || !done) { | 3167 | if (count == NUM_PACKETS_HANDLED || !done) { |
3168 | adapter->rx_ring.unfinished_receives = true; | 3168 | adapter->rx_ring.unfinished_receives = true; |
3169 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, | 3169 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, |
3170 | &adapter->regs->global.watchdog_timer); | 3170 | &adapter->regs->global.watchdog_timer); |
3171 | } else | 3171 | } else |
3172 | /* Watchdog timer will disable itself if appropriate. */ | 3172 | /* Watchdog timer will disable itself if appropriate. */ |
3173 | adapter->rx_ring.unfinished_receives = false; | 3173 | adapter->rx_ring.unfinished_receives = false; |
3174 | } | 3174 | } |
3175 | 3175 | ||
3176 | /** | 3176 | /** |
3177 | * et131x_tx_dma_memory_alloc | 3177 | * et131x_tx_dma_memory_alloc |
3178 | * @adapter: pointer to our private adapter structure | 3178 | * @adapter: pointer to our private adapter structure |
3179 | * | 3179 | * |
3180 | * Returns 0 on success and errno on failure (as defined in errno.h). | 3180 | * Returns 0 on success and errno on failure (as defined in errno.h). |
3181 | * | 3181 | * |
3182 | * Allocates memory that will be visible both to the device and to the CPU. | 3182 | * Allocates memory that will be visible both to the device and to the CPU. |
3183 | * The OS will pass us packets, pointers to which we will insert in the Tx | 3183 | * The OS will pass us packets, pointers to which we will insert in the Tx |
3184 | * Descriptor queue. The device will read this queue to find the packets in | 3184 | * Descriptor queue. The device will read this queue to find the packets in |
3185 | * memory. The device will update the "status" in memory each time it xmits a | 3185 | * memory. The device will update the "status" in memory each time it xmits a |
3186 | * packet. | 3186 | * packet. |
3187 | */ | 3187 | */ |
3188 | static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | 3188 | static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) |
3189 | { | 3189 | { |
3190 | int desc_size = 0; | 3190 | int desc_size = 0; |
3191 | struct tx_ring *tx_ring = &adapter->tx_ring; | 3191 | struct tx_ring *tx_ring = &adapter->tx_ring; |
3192 | 3192 | ||
3193 | /* Allocate memory for the TCB's (Transmit Control Block) */ | 3193 | /* Allocate memory for the TCB's (Transmit Control Block) */ |
3194 | adapter->tx_ring.tcb_ring = | 3194 | adapter->tx_ring.tcb_ring = |
3195 | kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); | 3195 | kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); |
3196 | if (!adapter->tx_ring.tcb_ring) { | 3196 | if (!adapter->tx_ring.tcb_ring) { |
3197 | dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); | 3197 | dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); |
3198 | return -ENOMEM; | 3198 | return -ENOMEM; |
3199 | } | 3199 | } |
3200 | 3200 | ||
3201 | /* Allocate enough memory for the Tx descriptor ring, and allocate | 3201 | /* Allocate enough memory for the Tx descriptor ring, and allocate |
3202 | * some extra so that the ring can be aligned on a 4k boundary. | 3202 | * some extra so that the ring can be aligned on a 4k boundary. |
3203 | */ | 3203 | */ |
3204 | desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; | 3204 | desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; |
3205 | tx_ring->tx_desc_ring = | 3205 | tx_ring->tx_desc_ring = |
3206 | (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, | 3206 | (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, |
3207 | desc_size, | 3207 | desc_size, |
3208 | &tx_ring->tx_desc_ring_pa, | 3208 | &tx_ring->tx_desc_ring_pa, |
3209 | GFP_KERNEL); | 3209 | GFP_KERNEL); |
3210 | if (!adapter->tx_ring.tx_desc_ring) { | 3210 | if (!adapter->tx_ring.tx_desc_ring) { |
3211 | dev_err(&adapter->pdev->dev, | 3211 | dev_err(&adapter->pdev->dev, |
3212 | "Cannot alloc memory for Tx Ring\n"); | 3212 | "Cannot alloc memory for Tx Ring\n"); |
3213 | return -ENOMEM; | 3213 | return -ENOMEM; |
3214 | } | 3214 | } |
3215 | 3215 | ||
3216 | /* Save physical address | 3216 | /* Save physical address |
3217 | * | 3217 | * |
3218 | * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, | 3218 | * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, |
3219 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses | 3219 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses |
3220 | * are ever returned, make sure the high part is retrieved here before | 3220 | * are ever returned, make sure the high part is retrieved here before |
3221 | * storing the adjusted address. | 3221 | * storing the adjusted address. |
3222 | */ | 3222 | */ |
3223 | /* Allocate memory for the Tx status block */ | 3223 | /* Allocate memory for the Tx status block */ |
3224 | tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, | 3224 | tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, |
3225 | sizeof(u32), | 3225 | sizeof(u32), |
3226 | &tx_ring->tx_status_pa, | 3226 | &tx_ring->tx_status_pa, |
3227 | GFP_KERNEL); | 3227 | GFP_KERNEL); |
3228 | if (!adapter->tx_ring.tx_status_pa) { | 3228 | if (!adapter->tx_ring.tx_status_pa) { |
3229 | dev_err(&adapter->pdev->dev, | 3229 | dev_err(&adapter->pdev->dev, |
3230 | "Cannot alloc memory for Tx status block\n"); | 3230 | "Cannot alloc memory for Tx status block\n"); |
3231 | return -ENOMEM; | 3231 | return -ENOMEM; |
3232 | } | 3232 | } |
3233 | return 0; | 3233 | return 0; |
3234 | } | 3234 | } |
3235 | 3235 | ||
3236 | /** | 3236 | /** |
3237 | * et131x_tx_dma_memory_free - Free all memory allocated within this module | 3237 | * et131x_tx_dma_memory_free - Free all memory allocated within this module |
3238 | * @adapter: pointer to our private adapter structure | 3238 | * @adapter: pointer to our private adapter structure |
3239 | * | 3239 | * |
3240 | * Returns 0 on success and errno on failure (as defined in errno.h). | 3240 | * Returns 0 on success and errno on failure (as defined in errno.h). |
3241 | */ | 3241 | */ |
3242 | static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) | 3242 | static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) |
3243 | { | 3243 | { |
3244 | int desc_size = 0; | 3244 | int desc_size = 0; |
3245 | 3245 | ||
3246 | if (adapter->tx_ring.tx_desc_ring) { | 3246 | if (adapter->tx_ring.tx_desc_ring) { |
3247 | /* Free memory relating to Tx rings here */ | 3247 | /* Free memory relating to Tx rings here */ |
3248 | desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) | 3248 | desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) |
3249 | + 4096 - 1; | 3249 | + 4096 - 1; |
3250 | dma_free_coherent(&adapter->pdev->dev, | 3250 | dma_free_coherent(&adapter->pdev->dev, |
3251 | desc_size, | 3251 | desc_size, |
3252 | adapter->tx_ring.tx_desc_ring, | 3252 | adapter->tx_ring.tx_desc_ring, |
3253 | adapter->tx_ring.tx_desc_ring_pa); | 3253 | adapter->tx_ring.tx_desc_ring_pa); |
3254 | adapter->tx_ring.tx_desc_ring = NULL; | 3254 | adapter->tx_ring.tx_desc_ring = NULL; |
3255 | } | 3255 | } |
3256 | 3256 | ||
3257 | /* Free memory for the Tx status block */ | 3257 | /* Free memory for the Tx status block */ |
3258 | if (adapter->tx_ring.tx_status) { | 3258 | if (adapter->tx_ring.tx_status) { |
3259 | dma_free_coherent(&adapter->pdev->dev, | 3259 | dma_free_coherent(&adapter->pdev->dev, |
3260 | sizeof(u32), | 3260 | sizeof(u32), |
3261 | adapter->tx_ring.tx_status, | 3261 | adapter->tx_ring.tx_status, |
3262 | adapter->tx_ring.tx_status_pa); | 3262 | adapter->tx_ring.tx_status_pa); |
3263 | 3263 | ||
3264 | adapter->tx_ring.tx_status = NULL; | 3264 | adapter->tx_ring.tx_status = NULL; |
3265 | } | 3265 | } |
3266 | /* Free the memory for the tcb structures */ | 3266 | /* Free the memory for the tcb structures */ |
3267 | kfree(adapter->tx_ring.tcb_ring); | 3267 | kfree(adapter->tx_ring.tcb_ring); |
3268 | } | 3268 | } |
3269 | 3269 | ||
3270 | /** | 3270 | /** |
3271 | * nic_send_packet - NIC specific send handler for version B silicon. | 3271 | * nic_send_packet - NIC specific send handler for version B silicon. |
3272 | * @adapter: pointer to our adapter | 3272 | * @adapter: pointer to our adapter |
3273 | * @tcb: pointer to struct tcb | 3273 | * @tcb: pointer to struct tcb |
3274 | * | 3274 | * |
3275 | * Returns 0 or errno. | 3275 | * Returns 0 or errno. |
3276 | */ | 3276 | */ |
3277 | static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) | 3277 | static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) |
3278 | { | 3278 | { |
3279 | u32 i; | 3279 | u32 i; |
3280 | struct tx_desc desc[24]; /* 24 x 16 byte */ | 3280 | struct tx_desc desc[24]; /* 24 x 16 byte */ |
3281 | u32 frag = 0; | 3281 | u32 frag = 0; |
3282 | u32 thiscopy, remainder; | 3282 | u32 thiscopy, remainder; |
3283 | struct sk_buff *skb = tcb->skb; | 3283 | struct sk_buff *skb = tcb->skb; |
3284 | u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; | 3284 | u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; |
3285 | struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; | 3285 | struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; |
3286 | unsigned long flags; | 3286 | unsigned long flags; |
3287 | struct phy_device *phydev = adapter->phydev; | 3287 | struct phy_device *phydev = adapter->phydev; |
3288 | 3288 | ||
3289 | /* Part of the optimizations of this send routine restrict us to | 3289 | /* Part of the optimizations of this send routine restrict us to |
3290 | * sending 24 fragments at a pass. In practice we should never see | 3290 | * sending 24 fragments at a pass. In practice we should never see |
3291 | * more than 5 fragments. | 3291 | * more than 5 fragments. |
3292 | * | 3292 | * |
3293 | * NOTE: The older version of this function (below) can handle any | 3293 | * NOTE: The older version of this function (below) can handle any |
3294 | * number of fragments. If needed, we can call this function, | 3294 | * number of fragments. If needed, we can call this function, |
3295 | * although it is less efficient. | 3295 | * although it is less efficient. |
3296 | */ | 3296 | */ |
3297 | if (nr_frags > 23) | 3297 | if (nr_frags > 23) |
3298 | return -EIO; | 3298 | return -EIO; |
3299 | 3299 | ||
3300 | memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); | 3300 | memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); |
3301 | 3301 | ||
3302 | for (i = 0; i < nr_frags; i++) { | 3302 | for (i = 0; i < nr_frags; i++) { |
3303 | /* If there is something in this element, lets get a | 3303 | /* If there is something in this element, lets get a |
3304 | * descriptor from the ring and get the necessary data | 3304 | * descriptor from the ring and get the necessary data |
3305 | */ | 3305 | */ |
3306 | if (i == 0) { | 3306 | if (i == 0) { |
3307 | /* If the fragments are smaller than a standard MTU, | 3307 | /* If the fragments are smaller than a standard MTU, |
3308 | * then map them to a single descriptor in the Tx | 3308 | * then map them to a single descriptor in the Tx |
3309 | * Desc ring. However, if they're larger, as is | 3309 | * Desc ring. However, if they're larger, as is |
3310 | * possible with support for jumbo packets, then | 3310 | * possible with support for jumbo packets, then |
3311 | * split them each across 2 descriptors. | 3311 | * split them each across 2 descriptors. |
3312 | * | 3312 | * |
3313 | * This will work until we determine why the hardware | 3313 | * This will work until we determine why the hardware |
3314 | * doesn't seem to like large fragments. | 3314 | * doesn't seem to like large fragments. |
3315 | */ | 3315 | */ |
3316 | if ((skb->len - skb->data_len) <= 1514) { | 3316 | if ((skb->len - skb->data_len) <= 1514) { |
3317 | desc[frag].addr_hi = 0; | 3317 | desc[frag].addr_hi = 0; |
3318 | /* Low 16bits are length, high is vlan and | 3318 | /* Low 16bits are length, high is vlan and |
3319 | unused currently so zero */ | 3319 | unused currently so zero */ |
3320 | desc[frag].len_vlan = | 3320 | desc[frag].len_vlan = |
3321 | skb->len - skb->data_len; | 3321 | skb->len - skb->data_len; |
3322 | 3322 | ||
3323 | /* NOTE: Here, the dma_addr_t returned from | 3323 | /* NOTE: Here, the dma_addr_t returned from |
3324 | * dma_map_single() is implicitly cast as a | 3324 | * dma_map_single() is implicitly cast as a |
3325 | * u32. Although dma_addr_t can be | 3325 | * u32. Although dma_addr_t can be |
3326 | * 64-bit, the address returned by | 3326 | * 64-bit, the address returned by |
3327 | * dma_map_single() is always 32-bit | 3327 | * dma_map_single() is always 32-bit |
3328 | * addressable (as defined by the pci/dma | 3328 | * addressable (as defined by the pci/dma |
3329 | * subsystem) | 3329 | * subsystem) |
3330 | */ | 3330 | */ |
3331 | desc[frag++].addr_lo = | 3331 | desc[frag++].addr_lo = |
3332 | dma_map_single(&adapter->pdev->dev, | 3332 | dma_map_single(&adapter->pdev->dev, |
3333 | skb->data, | 3333 | skb->data, |
3334 | skb->len - | 3334 | skb->len - |
3335 | skb->data_len, | 3335 | skb->data_len, |
3336 | DMA_TO_DEVICE); | 3336 | DMA_TO_DEVICE); |
3337 | } else { | 3337 | } else { |
3338 | desc[frag].addr_hi = 0; | 3338 | desc[frag].addr_hi = 0; |
3339 | desc[frag].len_vlan = | 3339 | desc[frag].len_vlan = |
3340 | (skb->len - skb->data_len) / 2; | 3340 | (skb->len - skb->data_len) / 2; |
3341 | 3341 | ||
3342 | /* NOTE: Here, the dma_addr_t returned from | 3342 | /* NOTE: Here, the dma_addr_t returned from |
3343 | * dma_map_single() is implicitly cast as a | 3343 | * dma_map_single() is implicitly cast as a |
3344 | * u32. Although dma_addr_t can be | 3344 | * u32. Although dma_addr_t can be |
3345 | * 64-bit, the address returned by | 3345 | * 64-bit, the address returned by |
3346 | * dma_map_single() is always 32-bit | 3346 | * dma_map_single() is always 32-bit |
3347 | * addressable (as defined by the pci/dma | 3347 | * addressable (as defined by the pci/dma |
3348 | * subsystem) | 3348 | * subsystem) |
3349 | */ | 3349 | */ |
3350 | desc[frag++].addr_lo = | 3350 | desc[frag++].addr_lo = |
3351 | dma_map_single(&adapter->pdev->dev, | 3351 | dma_map_single(&adapter->pdev->dev, |
3352 | skb->data, | 3352 | skb->data, |
3353 | ((skb->len - | 3353 | ((skb->len - |
3354 | skb->data_len) / 2), | 3354 | skb->data_len) / 2), |
3355 | DMA_TO_DEVICE); | 3355 | DMA_TO_DEVICE); |
3356 | desc[frag].addr_hi = 0; | 3356 | desc[frag].addr_hi = 0; |
3357 | 3357 | ||
3358 | desc[frag].len_vlan = | 3358 | desc[frag].len_vlan = |
3359 | (skb->len - skb->data_len) / 2; | 3359 | (skb->len - skb->data_len) / 2; |
3360 | 3360 | ||
3361 | /* NOTE: Here, the dma_addr_t returned from | 3361 | /* NOTE: Here, the dma_addr_t returned from |
3362 | * dma_map_single() is implicitly cast as a | 3362 | * dma_map_single() is implicitly cast as a |
3363 | * u32. Although dma_addr_t can be | 3363 | * u32. Although dma_addr_t can be |
3364 | * 64-bit, the address returned by | 3364 | * 64-bit, the address returned by |
3365 | * dma_map_single() is always 32-bit | 3365 | * dma_map_single() is always 32-bit |
3366 | * addressable (as defined by the pci/dma | 3366 | * addressable (as defined by the pci/dma |
3367 | * subsystem) | 3367 | * subsystem) |
3368 | */ | 3368 | */ |
3369 | desc[frag++].addr_lo = | 3369 | desc[frag++].addr_lo = |
3370 | dma_map_single(&adapter->pdev->dev, | 3370 | dma_map_single(&adapter->pdev->dev, |
3371 | skb->data + | 3371 | skb->data + |
3372 | ((skb->len - | 3372 | ((skb->len - |
3373 | skb->data_len) / 2), | 3373 | skb->data_len) / 2), |
3374 | ((skb->len - | 3374 | ((skb->len - |
3375 | skb->data_len) / 2), | 3375 | skb->data_len) / 2), |
3376 | DMA_TO_DEVICE); | 3376 | DMA_TO_DEVICE); |
3377 | } | 3377 | } |
3378 | } else { | 3378 | } else { |
3379 | desc[frag].addr_hi = 0; | 3379 | desc[frag].addr_hi = 0; |
3380 | desc[frag].len_vlan = | 3380 | desc[frag].len_vlan = |
3381 | frags[i - 1].size; | 3381 | frags[i - 1].size; |
3382 | 3382 | ||
3383 | /* NOTE: Here, the dma_addr_t returned from | 3383 | /* NOTE: Here, the dma_addr_t returned from |
3384 | * dma_map_page() is implicitly cast as a u32. | 3384 | * dma_map_page() is implicitly cast as a u32. |
3385 | * Although dma_addr_t can be 64-bit, the address | 3385 | * Although dma_addr_t can be 64-bit, the address |
3386 | * returned by dma_map_page() is always 32-bit | 3386 | * returned by dma_map_page() is always 32-bit |
3387 | * addressable (as defined by the pci/dma subsystem) | 3387 | * addressable (as defined by the pci/dma subsystem) |
3388 | */ | 3388 | */ |
3389 | desc[frag++].addr_lo = skb_frag_dma_map( | 3389 | desc[frag++].addr_lo = skb_frag_dma_map( |
3390 | &adapter->pdev->dev, | 3390 | &adapter->pdev->dev, |
3391 | &frags[i - 1], | 3391 | &frags[i - 1], |
3392 | 0, | 3392 | 0, |
3393 | frags[i - 1].size, | 3393 | frags[i - 1].size, |
3394 | DMA_TO_DEVICE); | 3394 | DMA_TO_DEVICE); |
3395 | } | 3395 | } |
3396 | } | 3396 | } |
3397 | 3397 | ||
3398 | if (phydev && phydev->speed == SPEED_1000) { | 3398 | if (phydev && phydev->speed == SPEED_1000) { |
3399 | if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { | 3399 | if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { |
3400 | /* Last element & Interrupt flag */ | 3400 | /* Last element & Interrupt flag */ |
3401 | desc[frag - 1].flags = 0x5; | 3401 | desc[frag - 1].flags = 0x5; |
3402 | adapter->tx_ring.since_irq = 0; | 3402 | adapter->tx_ring.since_irq = 0; |
3403 | } else { /* Last element */ | 3403 | } else { /* Last element */ |
3404 | desc[frag - 1].flags = 0x1; | 3404 | desc[frag - 1].flags = 0x1; |
3405 | } | 3405 | } |
3406 | } else | 3406 | } else |
3407 | desc[frag - 1].flags = 0x5; | 3407 | desc[frag - 1].flags = 0x5; |
3408 | 3408 | ||
3409 | desc[0].flags |= 2; /* First element flag */ | 3409 | desc[0].flags |= 2; /* First element flag */ |
3410 | 3410 | ||
3411 | tcb->index_start = adapter->tx_ring.send_idx; | 3411 | tcb->index_start = adapter->tx_ring.send_idx; |
3412 | tcb->stale = 0; | 3412 | tcb->stale = 0; |
3413 | 3413 | ||
3414 | spin_lock_irqsave(&adapter->send_hw_lock, flags); | 3414 | spin_lock_irqsave(&adapter->send_hw_lock, flags); |
3415 | 3415 | ||
3416 | thiscopy = NUM_DESC_PER_RING_TX - | 3416 | thiscopy = NUM_DESC_PER_RING_TX - |
3417 | INDEX10(adapter->tx_ring.send_idx); | 3417 | INDEX10(adapter->tx_ring.send_idx); |
3418 | 3418 | ||
3419 | if (thiscopy >= frag) { | 3419 | if (thiscopy >= frag) { |
3420 | remainder = 0; | 3420 | remainder = 0; |
3421 | thiscopy = frag; | 3421 | thiscopy = frag; |
3422 | } else { | 3422 | } else { |
3423 | remainder = frag - thiscopy; | 3423 | remainder = frag - thiscopy; |
3424 | } | 3424 | } |
3425 | 3425 | ||
3426 | memcpy(adapter->tx_ring.tx_desc_ring + | 3426 | memcpy(adapter->tx_ring.tx_desc_ring + |
3427 | INDEX10(adapter->tx_ring.send_idx), desc, | 3427 | INDEX10(adapter->tx_ring.send_idx), desc, |
3428 | sizeof(struct tx_desc) * thiscopy); | 3428 | sizeof(struct tx_desc) * thiscopy); |
3429 | 3429 | ||
3430 | add_10bit(&adapter->tx_ring.send_idx, thiscopy); | 3430 | add_10bit(&adapter->tx_ring.send_idx, thiscopy); |
3431 | 3431 | ||
3432 | if (INDEX10(adapter->tx_ring.send_idx) == 0 || | 3432 | if (INDEX10(adapter->tx_ring.send_idx) == 0 || |
3433 | INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { | 3433 | INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { |
3434 | adapter->tx_ring.send_idx &= ~ET_DMA10_MASK; | 3434 | adapter->tx_ring.send_idx &= ~ET_DMA10_MASK; |
3435 | adapter->tx_ring.send_idx ^= ET_DMA10_WRAP; | 3435 | adapter->tx_ring.send_idx ^= ET_DMA10_WRAP; |
3436 | } | 3436 | } |
3437 | 3437 | ||
3438 | if (remainder) { | 3438 | if (remainder) { |
3439 | memcpy(adapter->tx_ring.tx_desc_ring, | 3439 | memcpy(adapter->tx_ring.tx_desc_ring, |
3440 | desc + thiscopy, | 3440 | desc + thiscopy, |
3441 | sizeof(struct tx_desc) * remainder); | 3441 | sizeof(struct tx_desc) * remainder); |
3442 | 3442 | ||
3443 | add_10bit(&adapter->tx_ring.send_idx, remainder); | 3443 | add_10bit(&adapter->tx_ring.send_idx, remainder); |
3444 | } | 3444 | } |
3445 | 3445 | ||
3446 | if (INDEX10(adapter->tx_ring.send_idx) == 0) { | 3446 | if (INDEX10(adapter->tx_ring.send_idx) == 0) { |
3447 | if (adapter->tx_ring.send_idx) | 3447 | if (adapter->tx_ring.send_idx) |
3448 | tcb->index = NUM_DESC_PER_RING_TX - 1; | 3448 | tcb->index = NUM_DESC_PER_RING_TX - 1; |
3449 | else | 3449 | else |
3450 | tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); | 3450 | tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); |
3451 | } else | 3451 | } else |
3452 | tcb->index = adapter->tx_ring.send_idx - 1; | 3452 | tcb->index = adapter->tx_ring.send_idx - 1; |
3453 | 3453 | ||
3454 | spin_lock(&adapter->tcb_send_qlock); | 3454 | spin_lock(&adapter->tcb_send_qlock); |
3455 | 3455 | ||
3456 | if (adapter->tx_ring.send_tail) | 3456 | if (adapter->tx_ring.send_tail) |
3457 | adapter->tx_ring.send_tail->next = tcb; | 3457 | adapter->tx_ring.send_tail->next = tcb; |
3458 | else | 3458 | else |
3459 | adapter->tx_ring.send_head = tcb; | 3459 | adapter->tx_ring.send_head = tcb; |
3460 | 3460 | ||
3461 | adapter->tx_ring.send_tail = tcb; | 3461 | adapter->tx_ring.send_tail = tcb; |
3462 | 3462 | ||
3463 | WARN_ON(tcb->next != NULL); | 3463 | WARN_ON(tcb->next != NULL); |
3464 | 3464 | ||
3465 | adapter->tx_ring.used++; | 3465 | adapter->tx_ring.used++; |
3466 | 3466 | ||
3467 | spin_unlock(&adapter->tcb_send_qlock); | 3467 | spin_unlock(&adapter->tcb_send_qlock); |
3468 | 3468 | ||
3469 | /* Write the new write pointer back to the device. */ | 3469 | /* Write the new write pointer back to the device. */ |
3470 | writel(adapter->tx_ring.send_idx, | 3470 | writel(adapter->tx_ring.send_idx, |
3471 | &adapter->regs->txdma.service_request); | 3471 | &adapter->regs->txdma.service_request); |
3472 | 3472 | ||
3473 | /* For Gig only, we use Tx Interrupt coalescing. Enable the software | 3473 | /* For Gig only, we use Tx Interrupt coalescing. Enable the software |
3474 | * timer to wake us up if this packet isn't followed by N more. | 3474 | * timer to wake us up if this packet isn't followed by N more. |
3475 | */ | 3475 | */ |
3476 | if (phydev && phydev->speed == SPEED_1000) { | 3476 | if (phydev && phydev->speed == SPEED_1000) { |
3477 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, | 3477 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, |
3478 | &adapter->regs->global.watchdog_timer); | 3478 | &adapter->regs->global.watchdog_timer); |
3479 | } | 3479 | } |
3480 | spin_unlock_irqrestore(&adapter->send_hw_lock, flags); | 3480 | spin_unlock_irqrestore(&adapter->send_hw_lock, flags); |
3481 | 3481 | ||
3482 | return 0; | 3482 | return 0; |
3483 | } | 3483 | } |
3484 | 3484 | ||
3485 | /** | 3485 | /** |
3486 | * send_packet - Do the work to send a packet | 3486 | * send_packet - Do the work to send a packet |
3487 | * @skb: the packet(s) to send | 3487 | * @skb: the packet(s) to send |
3488 | * @adapter: a pointer to the device's private adapter structure | 3488 | * @adapter: a pointer to the device's private adapter structure |
3489 | * | 3489 | * |
3490 | * Return 0 in almost all cases; non-zero value in extreme hard failure only. | 3490 | * Return 0 in almost all cases; non-zero value in extreme hard failure only. |
3491 | * | 3491 | * |
3492 | * Assumption: Send spinlock has been acquired | 3492 | * Assumption: Send spinlock has been acquired |
3493 | */ | 3493 | */ |
3494 | static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) | 3494 | static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) |
3495 | { | 3495 | { |
3496 | int status; | 3496 | int status; |
3497 | struct tcb *tcb = NULL; | 3497 | struct tcb *tcb = NULL; |
3498 | u16 *shbufva; | 3498 | u16 *shbufva; |
3499 | unsigned long flags; | 3499 | unsigned long flags; |
3500 | 3500 | ||
3501 | /* All packets must have at least a MAC address and a protocol type */ | 3501 | /* All packets must have at least a MAC address and a protocol type */ |
3502 | if (skb->len < ETH_HLEN) | 3502 | if (skb->len < ETH_HLEN) |
3503 | return -EIO; | 3503 | return -EIO; |
3504 | 3504 | ||
3505 | /* Get a TCB for this packet */ | 3505 | /* Get a TCB for this packet */ |
3506 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); | 3506 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); |
3507 | 3507 | ||
3508 | tcb = adapter->tx_ring.tcb_qhead; | 3508 | tcb = adapter->tx_ring.tcb_qhead; |
3509 | 3509 | ||
3510 | if (tcb == NULL) { | 3510 | if (tcb == NULL) { |
3511 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); | 3511 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
3512 | return -ENOMEM; | 3512 | return -ENOMEM; |
3513 | } | 3513 | } |
3514 | 3514 | ||
3515 | adapter->tx_ring.tcb_qhead = tcb->next; | 3515 | adapter->tx_ring.tcb_qhead = tcb->next; |
3516 | 3516 | ||
3517 | if (adapter->tx_ring.tcb_qhead == NULL) | 3517 | if (adapter->tx_ring.tcb_qhead == NULL) |
3518 | adapter->tx_ring.tcb_qtail = NULL; | 3518 | adapter->tx_ring.tcb_qtail = NULL; |
3519 | 3519 | ||
3520 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); | 3520 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
3521 | 3521 | ||
3522 | tcb->skb = skb; | 3522 | tcb->skb = skb; |
3523 | 3523 | ||
3524 | if (skb->data != NULL && skb->len - skb->data_len >= 6) { | 3524 | if (skb->data != NULL && skb->len - skb->data_len >= 6) { |
3525 | shbufva = (u16 *) skb->data; | 3525 | shbufva = (u16 *) skb->data; |
3526 | 3526 | ||
3527 | if ((shbufva[0] == 0xffff) && | 3527 | if ((shbufva[0] == 0xffff) && |
3528 | (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { | 3528 | (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { |
3529 | tcb->flags |= fMP_DEST_BROAD; | 3529 | tcb->flags |= fMP_DEST_BROAD; |
3530 | } else if ((shbufva[0] & 0x3) == 0x0001) { | 3530 | } else if ((shbufva[0] & 0x3) == 0x0001) { |
3531 | tcb->flags |= fMP_DEST_MULTI; | 3531 | tcb->flags |= fMP_DEST_MULTI; |
3532 | } | 3532 | } |
3533 | } | 3533 | } |
3534 | 3534 | ||
3535 | tcb->next = NULL; | 3535 | tcb->next = NULL; |
3536 | 3536 | ||
3537 | /* Call the NIC specific send handler. */ | 3537 | /* Call the NIC specific send handler. */ |
3538 | status = nic_send_packet(adapter, tcb); | 3538 | status = nic_send_packet(adapter, tcb); |
3539 | 3539 | ||
3540 | if (status != 0) { | 3540 | if (status != 0) { |
3541 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); | 3541 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); |
3542 | 3542 | ||
3543 | if (adapter->tx_ring.tcb_qtail) | 3543 | if (adapter->tx_ring.tcb_qtail) |
3544 | adapter->tx_ring.tcb_qtail->next = tcb; | 3544 | adapter->tx_ring.tcb_qtail->next = tcb; |
3545 | else | 3545 | else |
3546 | /* Apparently ready Q is empty. */ | 3546 | /* Apparently ready Q is empty. */ |
3547 | adapter->tx_ring.tcb_qhead = tcb; | 3547 | adapter->tx_ring.tcb_qhead = tcb; |
3548 | 3548 | ||
3549 | adapter->tx_ring.tcb_qtail = tcb; | 3549 | adapter->tx_ring.tcb_qtail = tcb; |
3550 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); | 3550 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
3551 | return status; | 3551 | return status; |
3552 | } | 3552 | } |
3553 | WARN_ON(adapter->tx_ring.used > NUM_TCB); | 3553 | WARN_ON(adapter->tx_ring.used > NUM_TCB); |
3554 | return 0; | 3554 | return 0; |
3555 | } | 3555 | } |
3556 | 3556 | ||
3557 | /** | 3557 | /** |
3558 | * et131x_send_packets - This function is called by the OS to send packets | 3558 | * et131x_send_packets - This function is called by the OS to send packets |
3559 | * @skb: the packet(s) to send | 3559 | * @skb: the packet(s) to send |
3560 | * @netdev:device on which to TX the above packet(s) | 3560 | * @netdev:device on which to TX the above packet(s) |
3561 | * | 3561 | * |
3562 | * Return 0 in almost all cases; non-zero value in extreme hard failure only | 3562 | * Return 0 in almost all cases; non-zero value in extreme hard failure only |
3563 | */ | 3563 | */ |
3564 | static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) | 3564 | static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) |
3565 | { | 3565 | { |
3566 | int status = 0; | 3566 | int status = 0; |
3567 | struct et131x_adapter *adapter = netdev_priv(netdev); | 3567 | struct et131x_adapter *adapter = netdev_priv(netdev); |
3568 | 3568 | ||
3569 | /* Send these packets | 3569 | /* Send these packets |
3570 | * | 3570 | * |
3571 | * NOTE: The Linux Tx entry point is only given one packet at a time | 3571 | * NOTE: The Linux Tx entry point is only given one packet at a time |
3572 | * to Tx, so the PacketCount and it's array used makes no sense here | 3572 | * to Tx, so the PacketCount and it's array used makes no sense here |
3573 | */ | 3573 | */ |
3574 | 3574 | ||
3575 | /* TCB is not available */ | 3575 | /* TCB is not available */ |
3576 | if (adapter->tx_ring.used >= NUM_TCB) { | 3576 | if (adapter->tx_ring.used >= NUM_TCB) { |
3577 | /* NOTE: If there's an error on send, no need to queue the | 3577 | /* NOTE: If there's an error on send, no need to queue the |
3578 | * packet under Linux; if we just send an error up to the | 3578 | * packet under Linux; if we just send an error up to the |
3579 | * netif layer, it will resend the skb to us. | 3579 | * netif layer, it will resend the skb to us. |
3580 | */ | 3580 | */ |
3581 | status = -ENOMEM; | 3581 | status = -ENOMEM; |
3582 | } else { | 3582 | } else { |
3583 | /* We need to see if the link is up; if it's not, make the | 3583 | /* We need to see if the link is up; if it's not, make the |
3584 | * netif layer think we're good and drop the packet | 3584 | * netif layer think we're good and drop the packet |
3585 | */ | 3585 | */ |
3586 | if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) || | 3586 | if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) || |
3587 | !netif_carrier_ok(netdev)) { | 3587 | !netif_carrier_ok(netdev)) { |
3588 | dev_kfree_skb_any(skb); | 3588 | dev_kfree_skb_any(skb); |
3589 | skb = NULL; | 3589 | skb = NULL; |
3590 | 3590 | ||
3591 | adapter->net_stats.tx_dropped++; | 3591 | adapter->net_stats.tx_dropped++; |
3592 | } else { | 3592 | } else { |
3593 | status = send_packet(skb, adapter); | 3593 | status = send_packet(skb, adapter); |
3594 | if (status != 0 && status != -ENOMEM) { | 3594 | if (status != 0 && status != -ENOMEM) { |
3595 | /* On any other error, make netif think we're | 3595 | /* On any other error, make netif think we're |
3596 | * OK and drop the packet | 3596 | * OK and drop the packet |
3597 | */ | 3597 | */ |
3598 | dev_kfree_skb_any(skb); | 3598 | dev_kfree_skb_any(skb); |
3599 | skb = NULL; | 3599 | skb = NULL; |
3600 | adapter->net_stats.tx_dropped++; | 3600 | adapter->net_stats.tx_dropped++; |
3601 | } | 3601 | } |
3602 | } | 3602 | } |
3603 | } | 3603 | } |
3604 | return status; | 3604 | return status; |
3605 | } | 3605 | } |
3606 | 3606 | ||
3607 | /** | 3607 | /** |
3608 | * free_send_packet - Recycle a struct tcb | 3608 | * free_send_packet - Recycle a struct tcb |
3609 | * @adapter: pointer to our adapter | 3609 | * @adapter: pointer to our adapter |
3610 | * @tcb: pointer to struct tcb | 3610 | * @tcb: pointer to struct tcb |
3611 | * | 3611 | * |
3612 | * Complete the packet if necessary | 3612 | * Complete the packet if necessary |
3613 | * Assumption - Send spinlock has been acquired | 3613 | * Assumption - Send spinlock has been acquired |
3614 | */ | 3614 | */ |
3615 | static inline void free_send_packet(struct et131x_adapter *adapter, | 3615 | static inline void free_send_packet(struct et131x_adapter *adapter, |
3616 | struct tcb *tcb) | 3616 | struct tcb *tcb) |
3617 | { | 3617 | { |
3618 | unsigned long flags; | 3618 | unsigned long flags; |
3619 | struct tx_desc *desc = NULL; | 3619 | struct tx_desc *desc = NULL; |
3620 | struct net_device_stats *stats = &adapter->net_stats; | 3620 | struct net_device_stats *stats = &adapter->net_stats; |
3621 | 3621 | ||
3622 | if (tcb->flags & fMP_DEST_BROAD) | 3622 | if (tcb->flags & fMP_DEST_BROAD) |
3623 | atomic_inc(&adapter->stats.broadcast_pkts_xmtd); | 3623 | atomic_inc(&adapter->stats.broadcast_pkts_xmtd); |
3624 | else if (tcb->flags & fMP_DEST_MULTI) | 3624 | else if (tcb->flags & fMP_DEST_MULTI) |
3625 | atomic_inc(&adapter->stats.multicast_pkts_xmtd); | 3625 | atomic_inc(&adapter->stats.multicast_pkts_xmtd); |
3626 | else | 3626 | else |
3627 | atomic_inc(&adapter->stats.unicast_pkts_xmtd); | 3627 | atomic_inc(&adapter->stats.unicast_pkts_xmtd); |
3628 | 3628 | ||
3629 | if (tcb->skb) { | 3629 | if (tcb->skb) { |
3630 | stats->tx_bytes += tcb->skb->len; | 3630 | stats->tx_bytes += tcb->skb->len; |
3631 | 3631 | ||
3632 | /* Iterate through the TX descriptors on the ring | 3632 | /* Iterate through the TX descriptors on the ring |
3633 | * corresponding to this packet and umap the fragments | 3633 | * corresponding to this packet and umap the fragments |
3634 | * they point to | 3634 | * they point to |
3635 | */ | 3635 | */ |
3636 | do { | 3636 | do { |
3637 | desc = (struct tx_desc *) | 3637 | desc = (struct tx_desc *) |
3638 | (adapter->tx_ring.tx_desc_ring + | 3638 | (adapter->tx_ring.tx_desc_ring + |
3639 | INDEX10(tcb->index_start)); | 3639 | INDEX10(tcb->index_start)); |
3640 | 3640 | ||
3641 | dma_unmap_single(&adapter->pdev->dev, | 3641 | dma_unmap_single(&adapter->pdev->dev, |
3642 | desc->addr_lo, | 3642 | desc->addr_lo, |
3643 | desc->len_vlan, DMA_TO_DEVICE); | 3643 | desc->len_vlan, DMA_TO_DEVICE); |
3644 | 3644 | ||
3645 | add_10bit(&tcb->index_start, 1); | 3645 | add_10bit(&tcb->index_start, 1); |
3646 | if (INDEX10(tcb->index_start) >= | 3646 | if (INDEX10(tcb->index_start) >= |
3647 | NUM_DESC_PER_RING_TX) { | 3647 | NUM_DESC_PER_RING_TX) { |
3648 | tcb->index_start &= ~ET_DMA10_MASK; | 3648 | tcb->index_start &= ~ET_DMA10_MASK; |
3649 | tcb->index_start ^= ET_DMA10_WRAP; | 3649 | tcb->index_start ^= ET_DMA10_WRAP; |
3650 | } | 3650 | } |
3651 | } while (desc != (adapter->tx_ring.tx_desc_ring + | 3651 | } while (desc != (adapter->tx_ring.tx_desc_ring + |
3652 | INDEX10(tcb->index))); | 3652 | INDEX10(tcb->index))); |
3653 | 3653 | ||
3654 | dev_kfree_skb_any(tcb->skb); | 3654 | dev_kfree_skb_any(tcb->skb); |
3655 | } | 3655 | } |
3656 | 3656 | ||
3657 | memset(tcb, 0, sizeof(struct tcb)); | 3657 | memset(tcb, 0, sizeof(struct tcb)); |
3658 | 3658 | ||
3659 | /* Add the TCB to the Ready Q */ | 3659 | /* Add the TCB to the Ready Q */ |
3660 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); | 3660 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); |
3661 | 3661 | ||
3662 | adapter->net_stats.tx_packets++; | 3662 | adapter->net_stats.tx_packets++; |
3663 | 3663 | ||
3664 | if (adapter->tx_ring.tcb_qtail) | 3664 | if (adapter->tx_ring.tcb_qtail) |
3665 | adapter->tx_ring.tcb_qtail->next = tcb; | 3665 | adapter->tx_ring.tcb_qtail->next = tcb; |
3666 | else | 3666 | else |
3667 | /* Apparently ready Q is empty. */ | 3667 | /* Apparently ready Q is empty. */ |
3668 | adapter->tx_ring.tcb_qhead = tcb; | 3668 | adapter->tx_ring.tcb_qhead = tcb; |
3669 | 3669 | ||
3670 | adapter->tx_ring.tcb_qtail = tcb; | 3670 | adapter->tx_ring.tcb_qtail = tcb; |
3671 | 3671 | ||
3672 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); | 3672 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
3673 | WARN_ON(adapter->tx_ring.used < 0); | 3673 | WARN_ON(adapter->tx_ring.used < 0); |
3674 | } | 3674 | } |
3675 | 3675 | ||
3676 | /** | 3676 | /** |
3677 | * et131x_free_busy_send_packets - Free and complete the stopped active sends | 3677 | * et131x_free_busy_send_packets - Free and complete the stopped active sends |
3678 | * @adapter: pointer to our adapter | 3678 | * @adapter: pointer to our adapter |
3679 | * | 3679 | * |
3680 | * Assumption - Send spinlock has been acquired | 3680 | * Assumption - Send spinlock has been acquired |
3681 | */ | 3681 | */ |
3682 | static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) | 3682 | static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) |
3683 | { | 3683 | { |
3684 | struct tcb *tcb; | 3684 | struct tcb *tcb; |
3685 | unsigned long flags; | 3685 | unsigned long flags; |
3686 | u32 freed = 0; | 3686 | u32 freed = 0; |
3687 | 3687 | ||
3688 | /* Any packets being sent? Check the first TCB on the send list */ | 3688 | /* Any packets being sent? Check the first TCB on the send list */ |
3689 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); | 3689 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
3690 | 3690 | ||
3691 | tcb = adapter->tx_ring.send_head; | 3691 | tcb = adapter->tx_ring.send_head; |
3692 | 3692 | ||
3693 | while (tcb != NULL && freed < NUM_TCB) { | 3693 | while (tcb != NULL && freed < NUM_TCB) { |
3694 | struct tcb *next = tcb->next; | 3694 | struct tcb *next = tcb->next; |
3695 | 3695 | ||
3696 | adapter->tx_ring.send_head = next; | 3696 | adapter->tx_ring.send_head = next; |
3697 | 3697 | ||
3698 | if (next == NULL) | 3698 | if (next == NULL) |
3699 | adapter->tx_ring.send_tail = NULL; | 3699 | adapter->tx_ring.send_tail = NULL; |
3700 | 3700 | ||
3701 | adapter->tx_ring.used--; | 3701 | adapter->tx_ring.used--; |
3702 | 3702 | ||
3703 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); | 3703 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
3704 | 3704 | ||
3705 | freed++; | 3705 | freed++; |
3706 | free_send_packet(adapter, tcb); | 3706 | free_send_packet(adapter, tcb); |
3707 | 3707 | ||
3708 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); | 3708 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
3709 | 3709 | ||
3710 | tcb = adapter->tx_ring.send_head; | 3710 | tcb = adapter->tx_ring.send_head; |
3711 | } | 3711 | } |
3712 | 3712 | ||
3713 | WARN_ON(freed == NUM_TCB); | 3713 | WARN_ON(freed == NUM_TCB); |
3714 | 3714 | ||
3715 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); | 3715 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
3716 | 3716 | ||
3717 | adapter->tx_ring.used = 0; | 3717 | adapter->tx_ring.used = 0; |
3718 | } | 3718 | } |
3719 | 3719 | ||
3720 | /** | 3720 | /** |
3721 | * et131x_handle_send_interrupt - Interrupt handler for sending processing | 3721 | * et131x_handle_send_interrupt - Interrupt handler for sending processing |
3722 | * @adapter: pointer to our adapter | 3722 | * @adapter: pointer to our adapter |
3723 | * | 3723 | * |
3724 | * Re-claim the send resources, complete sends and get more to send from | 3724 | * Re-claim the send resources, complete sends and get more to send from |
3725 | * the send wait queue. | 3725 | * the send wait queue. |
3726 | * | 3726 | * |
3727 | * Assumption - Send spinlock has been acquired | 3727 | * Assumption - Send spinlock has been acquired |
3728 | */ | 3728 | */ |
3729 | static void et131x_handle_send_interrupt(struct et131x_adapter *adapter) | 3729 | static void et131x_handle_send_interrupt(struct et131x_adapter *adapter) |
3730 | { | 3730 | { |
3731 | unsigned long flags; | 3731 | unsigned long flags; |
3732 | u32 serviced; | 3732 | u32 serviced; |
3733 | struct tcb *tcb; | 3733 | struct tcb *tcb; |
3734 | u32 index; | 3734 | u32 index; |
3735 | 3735 | ||
3736 | serviced = readl(&adapter->regs->txdma.new_service_complete); | 3736 | serviced = readl(&adapter->regs->txdma.new_service_complete); |
3737 | index = INDEX10(serviced); | 3737 | index = INDEX10(serviced); |
3738 | 3738 | ||
3739 | /* Has the ring wrapped? Process any descriptors that do not have | 3739 | /* Has the ring wrapped? Process any descriptors that do not have |
3740 | * the same "wrap" indicator as the current completion indicator | 3740 | * the same "wrap" indicator as the current completion indicator |
3741 | */ | 3741 | */ |
3742 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); | 3742 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
3743 | 3743 | ||
3744 | tcb = adapter->tx_ring.send_head; | 3744 | tcb = adapter->tx_ring.send_head; |
3745 | 3745 | ||
3746 | while (tcb && | 3746 | while (tcb && |
3747 | ((serviced ^ tcb->index) & ET_DMA10_WRAP) && | 3747 | ((serviced ^ tcb->index) & ET_DMA10_WRAP) && |
3748 | index < INDEX10(tcb->index)) { | 3748 | index < INDEX10(tcb->index)) { |
3749 | adapter->tx_ring.used--; | 3749 | adapter->tx_ring.used--; |
3750 | adapter->tx_ring.send_head = tcb->next; | 3750 | adapter->tx_ring.send_head = tcb->next; |
3751 | if (tcb->next == NULL) | 3751 | if (tcb->next == NULL) |
3752 | adapter->tx_ring.send_tail = NULL; | 3752 | adapter->tx_ring.send_tail = NULL; |
3753 | 3753 | ||
3754 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); | 3754 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
3755 | free_send_packet(adapter, tcb); | 3755 | free_send_packet(adapter, tcb); |
3756 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); | 3756 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
3757 | 3757 | ||
3758 | /* Goto the next packet */ | 3758 | /* Goto the next packet */ |
3759 | tcb = adapter->tx_ring.send_head; | 3759 | tcb = adapter->tx_ring.send_head; |
3760 | } | 3760 | } |
3761 | while (tcb && | 3761 | while (tcb && |
3762 | !((serviced ^ tcb->index) & ET_DMA10_WRAP) | 3762 | !((serviced ^ tcb->index) & ET_DMA10_WRAP) |
3763 | && index > (tcb->index & ET_DMA10_MASK)) { | 3763 | && index > (tcb->index & ET_DMA10_MASK)) { |
3764 | adapter->tx_ring.used--; | 3764 | adapter->tx_ring.used--; |
3765 | adapter->tx_ring.send_head = tcb->next; | 3765 | adapter->tx_ring.send_head = tcb->next; |
3766 | if (tcb->next == NULL) | 3766 | if (tcb->next == NULL) |
3767 | adapter->tx_ring.send_tail = NULL; | 3767 | adapter->tx_ring.send_tail = NULL; |
3768 | 3768 | ||
3769 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); | 3769 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
3770 | free_send_packet(adapter, tcb); | 3770 | free_send_packet(adapter, tcb); |
3771 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); | 3771 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
3772 | 3772 | ||
3773 | /* Goto the next packet */ | 3773 | /* Goto the next packet */ |
3774 | tcb = adapter->tx_ring.send_head; | 3774 | tcb = adapter->tx_ring.send_head; |
3775 | } | 3775 | } |
3776 | 3776 | ||
3777 | /* Wake up the queue when we hit a low-water mark */ | 3777 | /* Wake up the queue when we hit a low-water mark */ |
3778 | if (adapter->tx_ring.used <= NUM_TCB / 3) | 3778 | if (adapter->tx_ring.used <= NUM_TCB / 3) |
3779 | netif_wake_queue(adapter->netdev); | 3779 | netif_wake_queue(adapter->netdev); |
3780 | 3780 | ||
3781 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); | 3781 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
3782 | } | 3782 | } |
3783 | 3783 | ||
3784 | static int et131x_get_settings(struct net_device *netdev, | 3784 | static int et131x_get_settings(struct net_device *netdev, |
3785 | struct ethtool_cmd *cmd) | 3785 | struct ethtool_cmd *cmd) |
3786 | { | 3786 | { |
3787 | struct et131x_adapter *adapter = netdev_priv(netdev); | 3787 | struct et131x_adapter *adapter = netdev_priv(netdev); |
3788 | 3788 | ||
3789 | return phy_ethtool_gset(adapter->phydev, cmd); | 3789 | return phy_ethtool_gset(adapter->phydev, cmd); |
3790 | } | 3790 | } |
3791 | 3791 | ||
3792 | static int et131x_set_settings(struct net_device *netdev, | 3792 | static int et131x_set_settings(struct net_device *netdev, |
3793 | struct ethtool_cmd *cmd) | 3793 | struct ethtool_cmd *cmd) |
3794 | { | 3794 | { |
3795 | struct et131x_adapter *adapter = netdev_priv(netdev); | 3795 | struct et131x_adapter *adapter = netdev_priv(netdev); |
3796 | 3796 | ||
3797 | return phy_ethtool_sset(adapter->phydev, cmd); | 3797 | return phy_ethtool_sset(adapter->phydev, cmd); |
3798 | } | 3798 | } |
3799 | 3799 | ||
3800 | static int et131x_get_regs_len(struct net_device *netdev) | 3800 | static int et131x_get_regs_len(struct net_device *netdev) |
3801 | { | 3801 | { |
3802 | #define ET131X_REGS_LEN 256 | 3802 | #define ET131X_REGS_LEN 256 |
3803 | return ET131X_REGS_LEN * sizeof(u32); | 3803 | return ET131X_REGS_LEN * sizeof(u32); |
3804 | } | 3804 | } |
3805 | 3805 | ||
3806 | static void et131x_get_regs(struct net_device *netdev, | 3806 | static void et131x_get_regs(struct net_device *netdev, |
3807 | struct ethtool_regs *regs, void *regs_data) | 3807 | struct ethtool_regs *regs, void *regs_data) |
3808 | { | 3808 | { |
3809 | struct et131x_adapter *adapter = netdev_priv(netdev); | 3809 | struct et131x_adapter *adapter = netdev_priv(netdev); |
3810 | struct address_map __iomem *aregs = adapter->regs; | 3810 | struct address_map __iomem *aregs = adapter->regs; |
3811 | u32 *regs_buff = regs_data; | 3811 | u32 *regs_buff = regs_data; |
3812 | u32 num = 0; | 3812 | u32 num = 0; |
3813 | 3813 | ||
3814 | memset(regs_data, 0, et131x_get_regs_len(netdev)); | 3814 | memset(regs_data, 0, et131x_get_regs_len(netdev)); |
3815 | 3815 | ||
3816 | regs->version = (1 << 24) | (adapter->pdev->revision << 16) | | 3816 | regs->version = (1 << 24) | (adapter->pdev->revision << 16) | |
3817 | adapter->pdev->device; | 3817 | adapter->pdev->device; |
3818 | 3818 | ||
3819 | /* PHY regs */ | 3819 | /* PHY regs */ |
3820 | et131x_mii_read(adapter, MII_BMCR, (u16 *)®s_buff[num++]); | 3820 | et131x_mii_read(adapter, MII_BMCR, (u16 *)®s_buff[num++]); |
3821 | et131x_mii_read(adapter, MII_BMSR, (u16 *)®s_buff[num++]); | 3821 | et131x_mii_read(adapter, MII_BMSR, (u16 *)®s_buff[num++]); |
3822 | et131x_mii_read(adapter, MII_PHYSID1, (u16 *)®s_buff[num++]); | 3822 | et131x_mii_read(adapter, MII_PHYSID1, (u16 *)®s_buff[num++]); |
3823 | et131x_mii_read(adapter, MII_PHYSID2, (u16 *)®s_buff[num++]); | 3823 | et131x_mii_read(adapter, MII_PHYSID2, (u16 *)®s_buff[num++]); |
3824 | et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)®s_buff[num++]); | 3824 | et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)®s_buff[num++]); |
3825 | et131x_mii_read(adapter, MII_LPA, (u16 *)®s_buff[num++]); | 3825 | et131x_mii_read(adapter, MII_LPA, (u16 *)®s_buff[num++]); |
3826 | et131x_mii_read(adapter, MII_EXPANSION, (u16 *)®s_buff[num++]); | 3826 | et131x_mii_read(adapter, MII_EXPANSION, (u16 *)®s_buff[num++]); |
3827 | /* Autoneg next page transmit reg */ | 3827 | /* Autoneg next page transmit reg */ |
3828 | et131x_mii_read(adapter, 0x07, (u16 *)®s_buff[num++]); | 3828 | et131x_mii_read(adapter, 0x07, (u16 *)®s_buff[num++]); |
3829 | /* Link partner next page reg */ | 3829 | /* Link partner next page reg */ |
3830 | et131x_mii_read(adapter, 0x08, (u16 *)®s_buff[num++]); | 3830 | et131x_mii_read(adapter, 0x08, (u16 *)®s_buff[num++]); |
3831 | et131x_mii_read(adapter, MII_CTRL1000, (u16 *)®s_buff[num++]); | 3831 | et131x_mii_read(adapter, MII_CTRL1000, (u16 *)®s_buff[num++]); |
3832 | et131x_mii_read(adapter, MII_STAT1000, (u16 *)®s_buff[num++]); | 3832 | et131x_mii_read(adapter, MII_STAT1000, (u16 *)®s_buff[num++]); |
3833 | et131x_mii_read(adapter, MII_ESTATUS, (u16 *)®s_buff[num++]); | 3833 | et131x_mii_read(adapter, MII_ESTATUS, (u16 *)®s_buff[num++]); |
3834 | et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)®s_buff[num++]); | 3834 | et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)®s_buff[num++]); |
3835 | et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)®s_buff[num++]); | 3835 | et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)®s_buff[num++]); |
3836 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, | 3836 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, |
3837 | (u16 *)®s_buff[num++]); | 3837 | (u16 *)®s_buff[num++]); |
3838 | et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, | 3838 | et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, |
3839 | (u16 *)®s_buff[num++]); | 3839 | (u16 *)®s_buff[num++]); |
3840 | et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1, | 3840 | et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1, |
3841 | (u16 *)®s_buff[num++]); | 3841 | (u16 *)®s_buff[num++]); |
3842 | et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, | 3842 | et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, |
3843 | (u16 *)®s_buff[num++]); | 3843 | (u16 *)®s_buff[num++]); |
3844 | et131x_mii_read(adapter, PHY_CONFIG, (u16 *)®s_buff[num++]); | 3844 | et131x_mii_read(adapter, PHY_CONFIG, (u16 *)®s_buff[num++]); |
3845 | et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)®s_buff[num++]); | 3845 | et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)®s_buff[num++]); |
3846 | et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)®s_buff[num++]); | 3846 | et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)®s_buff[num++]); |
3847 | et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, | 3847 | et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, |
3848 | (u16 *)®s_buff[num++]); | 3848 | (u16 *)®s_buff[num++]); |
3849 | et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)®s_buff[num++]); | 3849 | et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)®s_buff[num++]); |
3850 | et131x_mii_read(adapter, PHY_LED_1, (u16 *)®s_buff[num++]); | 3850 | et131x_mii_read(adapter, PHY_LED_1, (u16 *)®s_buff[num++]); |
3851 | et131x_mii_read(adapter, PHY_LED_2, (u16 *)®s_buff[num++]); | 3851 | et131x_mii_read(adapter, PHY_LED_2, (u16 *)®s_buff[num++]); |
3852 | 3852 | ||
3853 | /* Global regs */ | 3853 | /* Global regs */ |
3854 | regs_buff[num++] = readl(&aregs->global.txq_start_addr); | 3854 | regs_buff[num++] = readl(&aregs->global.txq_start_addr); |
3855 | regs_buff[num++] = readl(&aregs->global.txq_end_addr); | 3855 | regs_buff[num++] = readl(&aregs->global.txq_end_addr); |
3856 | regs_buff[num++] = readl(&aregs->global.rxq_start_addr); | 3856 | regs_buff[num++] = readl(&aregs->global.rxq_start_addr); |
3857 | regs_buff[num++] = readl(&aregs->global.rxq_end_addr); | 3857 | regs_buff[num++] = readl(&aregs->global.rxq_end_addr); |
3858 | regs_buff[num++] = readl(&aregs->global.pm_csr); | 3858 | regs_buff[num++] = readl(&aregs->global.pm_csr); |
3859 | regs_buff[num++] = adapter->stats.interrupt_status; | 3859 | regs_buff[num++] = adapter->stats.interrupt_status; |
3860 | regs_buff[num++] = readl(&aregs->global.int_mask); | 3860 | regs_buff[num++] = readl(&aregs->global.int_mask); |
3861 | regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); | 3861 | regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); |
3862 | regs_buff[num++] = readl(&aregs->global.int_status_alias); | 3862 | regs_buff[num++] = readl(&aregs->global.int_status_alias); |
3863 | regs_buff[num++] = readl(&aregs->global.sw_reset); | 3863 | regs_buff[num++] = readl(&aregs->global.sw_reset); |
3864 | regs_buff[num++] = readl(&aregs->global.slv_timer); | 3864 | regs_buff[num++] = readl(&aregs->global.slv_timer); |
3865 | regs_buff[num++] = readl(&aregs->global.msi_config); | 3865 | regs_buff[num++] = readl(&aregs->global.msi_config); |
3866 | regs_buff[num++] = readl(&aregs->global.loopback); | 3866 | regs_buff[num++] = readl(&aregs->global.loopback); |
3867 | regs_buff[num++] = readl(&aregs->global.watchdog_timer); | 3867 | regs_buff[num++] = readl(&aregs->global.watchdog_timer); |
3868 | 3868 | ||
3869 | /* TXDMA regs */ | 3869 | /* TXDMA regs */ |
3870 | regs_buff[num++] = readl(&aregs->txdma.csr); | 3870 | regs_buff[num++] = readl(&aregs->txdma.csr); |
3871 | regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); | 3871 | regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); |
3872 | regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); | 3872 | regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); |
3873 | regs_buff[num++] = readl(&aregs->txdma.pr_num_des); | 3873 | regs_buff[num++] = readl(&aregs->txdma.pr_num_des); |
3874 | regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); | 3874 | regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); |
3875 | regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); | 3875 | regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); |
3876 | regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); | 3876 | regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); |
3877 | regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); | 3877 | regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); |
3878 | regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); | 3878 | regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); |
3879 | regs_buff[num++] = readl(&aregs->txdma.service_request); | 3879 | regs_buff[num++] = readl(&aregs->txdma.service_request); |
3880 | regs_buff[num++] = readl(&aregs->txdma.service_complete); | 3880 | regs_buff[num++] = readl(&aregs->txdma.service_complete); |
3881 | regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); | 3881 | regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); |
3882 | regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); | 3882 | regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); |
3883 | regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); | 3883 | regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); |
3884 | regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); | 3884 | regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); |
3885 | regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); | 3885 | regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); |
3886 | regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); | 3886 | regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); |
3887 | regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); | 3887 | regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); |
3888 | regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); | 3888 | regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); |
3889 | regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); | 3889 | regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); |
3890 | regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); | 3890 | regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); |
3891 | regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); | 3891 | regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); |
3892 | regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); | 3892 | regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); |
3893 | regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); | 3893 | regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); |
3894 | regs_buff[num++] = readl(&aregs->txdma.new_service_complete); | 3894 | regs_buff[num++] = readl(&aregs->txdma.new_service_complete); |
3895 | regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); | 3895 | regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); |
3896 | 3896 | ||
3897 | /* RXDMA regs */ | 3897 | /* RXDMA regs */ |
3898 | regs_buff[num++] = readl(&aregs->rxdma.csr); | 3898 | regs_buff[num++] = readl(&aregs->rxdma.csr); |
3899 | regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); | 3899 | regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); |
3900 | regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); | 3900 | regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); |
3901 | regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); | 3901 | regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); |
3902 | regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); | 3902 | regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); |
3903 | regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); | 3903 | regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); |
3904 | regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); | 3904 | regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); |
3905 | regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); | 3905 | regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); |
3906 | regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); | 3906 | regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); |
3907 | regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); | 3907 | regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); |
3908 | regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); | 3908 | regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); |
3909 | regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); | 3909 | regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); |
3910 | regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); | 3910 | regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); |
3911 | regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); | 3911 | regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); |
3912 | regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); | 3912 | regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); |
3913 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); | 3913 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); |
3914 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); | 3914 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); |
3915 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); | 3915 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); |
3916 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); | 3916 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); |
3917 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); | 3917 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); |
3918 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); | 3918 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); |
3919 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); | 3919 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); |
3920 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); | 3920 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); |
3921 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); | 3921 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); |
3922 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); | 3922 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); |
3923 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); | 3923 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); |
3924 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); | 3924 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); |
3925 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); | 3925 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); |
3926 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); | 3926 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); |
3927 | } | 3927 | } |
3928 | 3928 | ||
3929 | #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */ | 3929 | #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */ |
3930 | static void et131x_get_drvinfo(struct net_device *netdev, | 3930 | static void et131x_get_drvinfo(struct net_device *netdev, |
3931 | struct ethtool_drvinfo *info) | 3931 | struct ethtool_drvinfo *info) |
3932 | { | 3932 | { |
3933 | struct et131x_adapter *adapter = netdev_priv(netdev); | 3933 | struct et131x_adapter *adapter = netdev_priv(netdev); |
3934 | 3934 | ||
3935 | strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN); | 3935 | strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN); |
3936 | strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN); | 3936 | strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN); |
3937 | strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN); | 3937 | strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN); |
3938 | } | 3938 | } |
3939 | 3939 | ||
3940 | static struct ethtool_ops et131x_ethtool_ops = { | 3940 | static struct ethtool_ops et131x_ethtool_ops = { |
3941 | .get_settings = et131x_get_settings, | 3941 | .get_settings = et131x_get_settings, |
3942 | .set_settings = et131x_set_settings, | 3942 | .set_settings = et131x_set_settings, |
3943 | .get_drvinfo = et131x_get_drvinfo, | 3943 | .get_drvinfo = et131x_get_drvinfo, |
3944 | .get_regs_len = et131x_get_regs_len, | 3944 | .get_regs_len = et131x_get_regs_len, |
3945 | .get_regs = et131x_get_regs, | 3945 | .get_regs = et131x_get_regs, |
3946 | .get_link = ethtool_op_get_link, | 3946 | .get_link = ethtool_op_get_link, |
3947 | }; | 3947 | }; |
3948 | 3948 | ||
3949 | static void et131x_set_ethtool_ops(struct net_device *netdev) | 3949 | static void et131x_set_ethtool_ops(struct net_device *netdev) |
3950 | { | 3950 | { |
3951 | SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); | 3951 | SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); |
3952 | } | 3952 | } |
3953 | 3953 | ||
3954 | /** | 3954 | /** |
3955 | * et131x_hwaddr_init - set up the MAC Address on the ET1310 | 3955 | * et131x_hwaddr_init - set up the MAC Address on the ET1310 |
3956 | * @adapter: pointer to our private adapter structure | 3956 | * @adapter: pointer to our private adapter structure |
3957 | */ | 3957 | */ |
3958 | static void et131x_hwaddr_init(struct et131x_adapter *adapter) | 3958 | static void et131x_hwaddr_init(struct et131x_adapter *adapter) |
3959 | { | 3959 | { |
3960 | /* If have our default mac from init and no mac address from | 3960 | /* If have our default mac from init and no mac address from |
3961 | * EEPROM then we need to generate the last octet and set it on the | 3961 | * EEPROM then we need to generate the last octet and set it on the |
3962 | * device | 3962 | * device |
3963 | */ | 3963 | */ |
3964 | if (adapter->rom_addr[0] == 0x00 && | 3964 | if (adapter->rom_addr[0] == 0x00 && |
3965 | adapter->rom_addr[1] == 0x00 && | 3965 | adapter->rom_addr[1] == 0x00 && |
3966 | adapter->rom_addr[2] == 0x00 && | 3966 | adapter->rom_addr[2] == 0x00 && |
3967 | adapter->rom_addr[3] == 0x00 && | 3967 | adapter->rom_addr[3] == 0x00 && |
3968 | adapter->rom_addr[4] == 0x00 && | 3968 | adapter->rom_addr[4] == 0x00 && |
3969 | adapter->rom_addr[5] == 0x00) { | 3969 | adapter->rom_addr[5] == 0x00) { |
3970 | /* | 3970 | /* |
3971 | * We need to randomly generate the last octet so we | 3971 | * We need to randomly generate the last octet so we |
3972 | * decrease our chances of setting the mac address to | 3972 | * decrease our chances of setting the mac address to |
3973 | * same as another one of our cards in the system | 3973 | * same as another one of our cards in the system |
3974 | */ | 3974 | */ |
3975 | get_random_bytes(&adapter->addr[5], 1); | 3975 | get_random_bytes(&adapter->addr[5], 1); |
3976 | /* | 3976 | /* |
3977 | * We have the default value in the register we are | 3977 | * We have the default value in the register we are |
3978 | * working with so we need to copy the current | 3978 | * working with so we need to copy the current |
3979 | * address into the permanent address | 3979 | * address into the permanent address |
3980 | */ | 3980 | */ |
3981 | memcpy(adapter->rom_addr, | 3981 | memcpy(adapter->rom_addr, |
3982 | adapter->addr, ETH_ALEN); | 3982 | adapter->addr, ETH_ALEN); |
3983 | } else { | 3983 | } else { |
3984 | /* We do not have an override address, so set the | 3984 | /* We do not have an override address, so set the |
3985 | * current address to the permanent address and add | 3985 | * current address to the permanent address and add |
3986 | * it to the device | 3986 | * it to the device |
3987 | */ | 3987 | */ |
3988 | memcpy(adapter->addr, | 3988 | memcpy(adapter->addr, |
3989 | adapter->rom_addr, ETH_ALEN); | 3989 | adapter->rom_addr, ETH_ALEN); |
3990 | } | 3990 | } |
3991 | } | 3991 | } |
3992 | 3992 | ||
3993 | /** | 3993 | /** |
3994 | * et131x_pci_init - initial PCI setup | 3994 | * et131x_pci_init - initial PCI setup |
3995 | * @adapter: pointer to our private adapter structure | 3995 | * @adapter: pointer to our private adapter structure |
3996 | * @pdev: our PCI device | 3996 | * @pdev: our PCI device |
3997 | * | 3997 | * |
3998 | * Perform the initial setup of PCI registers and if possible initialise | 3998 | * Perform the initial setup of PCI registers and if possible initialise |
3999 | * the MAC address. At this point the I/O registers have yet to be mapped | 3999 | * the MAC address. At this point the I/O registers have yet to be mapped |
4000 | */ | 4000 | */ |
4001 | static int et131x_pci_init(struct et131x_adapter *adapter, | 4001 | static int et131x_pci_init(struct et131x_adapter *adapter, |
4002 | struct pci_dev *pdev) | 4002 | struct pci_dev *pdev) |
4003 | { | 4003 | { |
4004 | int cap = pci_pcie_cap(pdev); | 4004 | int cap = pci_pcie_cap(pdev); |
4005 | u16 max_payload; | 4005 | u16 max_payload; |
4006 | u16 ctl; | 4006 | u16 ctl; |
4007 | int i, rc; | 4007 | int i, rc; |
4008 | 4008 | ||
4009 | rc = et131x_init_eeprom(adapter); | 4009 | rc = et131x_init_eeprom(adapter); |
4010 | if (rc < 0) | 4010 | if (rc < 0) |
4011 | goto out; | 4011 | goto out; |
4012 | 4012 | ||
4013 | if (!cap) { | 4013 | if (!cap) { |
4014 | dev_err(&pdev->dev, "Missing PCIe capabilities\n"); | 4014 | dev_err(&pdev->dev, "Missing PCIe capabilities\n"); |
4015 | goto err_out; | 4015 | goto err_out; |
4016 | } | 4016 | } |
4017 | 4017 | ||
4018 | /* Let's set up the PORT LOGIC Register. First we need to know what | 4018 | /* Let's set up the PORT LOGIC Register. First we need to know what |
4019 | * the max_payload_size is | 4019 | * the max_payload_size is |
4020 | */ | 4020 | */ |
4021 | if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) { | 4021 | if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) { |
4022 | dev_err(&pdev->dev, | 4022 | dev_err(&pdev->dev, |
4023 | "Could not read PCI config space for Max Payload Size\n"); | 4023 | "Could not read PCI config space for Max Payload Size\n"); |
4024 | goto err_out; | 4024 | goto err_out; |
4025 | } | 4025 | } |
4026 | 4026 | ||
4027 | /* Program the Ack/Nak latency and replay timers */ | 4027 | /* Program the Ack/Nak latency and replay timers */ |
4028 | max_payload &= 0x07; | 4028 | max_payload &= 0x07; |
4029 | 4029 | ||
4030 | if (max_payload < 2) { | 4030 | if (max_payload < 2) { |
4031 | static const u16 acknak[2] = { 0x76, 0xD0 }; | 4031 | static const u16 acknak[2] = { 0x76, 0xD0 }; |
4032 | static const u16 replay[2] = { 0x1E0, 0x2ED }; | 4032 | static const u16 replay[2] = { 0x1E0, 0x2ED }; |
4033 | 4033 | ||
4034 | if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, | 4034 | if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, |
4035 | acknak[max_payload])) { | 4035 | acknak[max_payload])) { |
4036 | dev_err(&pdev->dev, | 4036 | dev_err(&pdev->dev, |
4037 | "Could not write PCI config space for ACK/NAK\n"); | 4037 | "Could not write PCI config space for ACK/NAK\n"); |
4038 | goto err_out; | 4038 | goto err_out; |
4039 | } | 4039 | } |
4040 | if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, | 4040 | if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, |
4041 | replay[max_payload])) { | 4041 | replay[max_payload])) { |
4042 | dev_err(&pdev->dev, | 4042 | dev_err(&pdev->dev, |
4043 | "Could not write PCI config space for Replay Timer\n"); | 4043 | "Could not write PCI config space for Replay Timer\n"); |
4044 | goto err_out; | 4044 | goto err_out; |
4045 | } | 4045 | } |
4046 | } | 4046 | } |
4047 | 4047 | ||
4048 | /* l0s and l1 latency timers. We are using default values. | 4048 | /* l0s and l1 latency timers. We are using default values. |
4049 | * Representing 001 for L0s and 010 for L1 | 4049 | * Representing 001 for L0s and 010 for L1 |
4050 | */ | 4050 | */ |
4051 | if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { | 4051 | if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { |
4052 | dev_err(&pdev->dev, | 4052 | dev_err(&pdev->dev, |
4053 | "Could not write PCI config space for Latency Timers\n"); | 4053 | "Could not write PCI config space for Latency Timers\n"); |
4054 | goto err_out; | 4054 | goto err_out; |
4055 | } | 4055 | } |
4056 | 4056 | ||
4057 | /* Change the max read size to 2k */ | 4057 | /* Change the max read size to 2k */ |
4058 | if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) { | 4058 | if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) { |
4059 | dev_err(&pdev->dev, | 4059 | dev_err(&pdev->dev, |
4060 | "Could not read PCI config space for Max read size\n"); | 4060 | "Could not read PCI config space for Max read size\n"); |
4061 | goto err_out; | 4061 | goto err_out; |
4062 | } | 4062 | } |
4063 | 4063 | ||
4064 | ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | ( 0x04 << 12); | 4064 | ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | ( 0x04 << 12); |
4065 | 4065 | ||
4066 | if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) { | 4066 | if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) { |
4067 | dev_err(&pdev->dev, | 4067 | dev_err(&pdev->dev, |
4068 | "Could not write PCI config space for Max read size\n"); | 4068 | "Could not write PCI config space for Max read size\n"); |
4069 | goto err_out; | 4069 | goto err_out; |
4070 | } | 4070 | } |
4071 | 4071 | ||
4072 | /* Get MAC address from config space if an eeprom exists, otherwise | 4072 | /* Get MAC address from config space if an eeprom exists, otherwise |
4073 | * the MAC address there will not be valid | 4073 | * the MAC address there will not be valid |
4074 | */ | 4074 | */ |
4075 | if (!adapter->has_eeprom) { | 4075 | if (!adapter->has_eeprom) { |
4076 | et131x_hwaddr_init(adapter); | 4076 | et131x_hwaddr_init(adapter); |
4077 | return 0; | 4077 | return 0; |
4078 | } | 4078 | } |
4079 | 4079 | ||
4080 | for (i = 0; i < ETH_ALEN; i++) { | 4080 | for (i = 0; i < ETH_ALEN; i++) { |
4081 | if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, | 4081 | if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, |
4082 | adapter->rom_addr + i)) { | 4082 | adapter->rom_addr + i)) { |
4083 | dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); | 4083 | dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); |
4084 | goto err_out; | 4084 | goto err_out; |
4085 | } | 4085 | } |
4086 | } | 4086 | } |
4087 | memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); | 4087 | memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); |
4088 | out: | 4088 | out: |
4089 | return rc; | 4089 | return rc; |
4090 | err_out: | 4090 | err_out: |
4091 | rc = -EIO; | 4091 | rc = -EIO; |
4092 | goto out; | 4092 | goto out; |
4093 | } | 4093 | } |
4094 | 4094 | ||
4095 | /** | 4095 | /** |
4096 | * et131x_error_timer_handler | 4096 | * et131x_error_timer_handler |
4097 | * @data: timer-specific variable; here a pointer to our adapter structure | 4097 | * @data: timer-specific variable; here a pointer to our adapter structure |
4098 | * | 4098 | * |
4099 | * The routine called when the error timer expires, to track the number of | 4099 | * The routine called when the error timer expires, to track the number of |
4100 | * recurring errors. | 4100 | * recurring errors. |
4101 | */ | 4101 | */ |
4102 | static void et131x_error_timer_handler(unsigned long data) | 4102 | static void et131x_error_timer_handler(unsigned long data) |
4103 | { | 4103 | { |
4104 | struct et131x_adapter *adapter = (struct et131x_adapter *) data; | 4104 | struct et131x_adapter *adapter = (struct et131x_adapter *) data; |
4105 | struct phy_device *phydev = adapter->phydev; | 4105 | struct phy_device *phydev = adapter->phydev; |
4106 | 4106 | ||
4107 | if (et1310_in_phy_coma(adapter)) { | 4107 | if (et1310_in_phy_coma(adapter)) { |
4108 | /* Bring the device immediately out of coma, to | 4108 | /* Bring the device immediately out of coma, to |
4109 | * prevent it from sleeping indefinitely, this | 4109 | * prevent it from sleeping indefinitely, this |
4110 | * mechanism could be improved! */ | 4110 | * mechanism could be improved! */ |
4111 | et1310_disable_phy_coma(adapter); | 4111 | et1310_disable_phy_coma(adapter); |
4112 | adapter->boot_coma = 20; | 4112 | adapter->boot_coma = 20; |
4113 | } else { | 4113 | } else { |
4114 | et1310_update_macstat_host_counters(adapter); | 4114 | et1310_update_macstat_host_counters(adapter); |
4115 | } | 4115 | } |
4116 | 4116 | ||
4117 | if (!phydev->link && adapter->boot_coma < 11) | 4117 | if (!phydev->link && adapter->boot_coma < 11) |
4118 | adapter->boot_coma++; | 4118 | adapter->boot_coma++; |
4119 | 4119 | ||
4120 | if (adapter->boot_coma == 10) { | 4120 | if (adapter->boot_coma == 10) { |
4121 | if (!phydev->link) { | 4121 | if (!phydev->link) { |
4122 | if (!et1310_in_phy_coma(adapter)) { | 4122 | if (!et1310_in_phy_coma(adapter)) { |
4123 | /* NOTE - This was originally a 'sync with | 4123 | /* NOTE - This was originally a 'sync with |
4124 | * interrupt'. How to do that under Linux? | 4124 | * interrupt'. How to do that under Linux? |
4125 | */ | 4125 | */ |
4126 | et131x_enable_interrupts(adapter); | 4126 | et131x_enable_interrupts(adapter); |
4127 | et1310_enable_phy_coma(adapter); | 4127 | et1310_enable_phy_coma(adapter); |
4128 | } | 4128 | } |
4129 | } | 4129 | } |
4130 | } | 4130 | } |
4131 | 4131 | ||
4132 | /* This is a periodic timer, so reschedule */ | 4132 | /* This is a periodic timer, so reschedule */ |
4133 | mod_timer(&adapter->error_timer, jiffies + | 4133 | mod_timer(&adapter->error_timer, jiffies + |
4134 | TX_ERROR_PERIOD * HZ / 1000); | 4134 | TX_ERROR_PERIOD * HZ / 1000); |
4135 | } | 4135 | } |
4136 | 4136 | ||
4137 | /** | 4137 | /** |
4138 | * et131x_adapter_memory_alloc | 4138 | * et131x_adapter_memory_alloc |
4139 | * @adapter: pointer to our private adapter structure | 4139 | * @adapter: pointer to our private adapter structure |
4140 | * | 4140 | * |
4141 | * Returns 0 on success, errno on failure (as defined in errno.h). | 4141 | * Returns 0 on success, errno on failure (as defined in errno.h). |
4142 | * | 4142 | * |
4143 | * Allocate all the memory blocks for send, receive and others. | 4143 | * Allocate all the memory blocks for send, receive and others. |
4144 | */ | 4144 | */ |
4145 | static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) | 4145 | static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) |
4146 | { | 4146 | { |
4147 | int status; | 4147 | int status; |
4148 | 4148 | ||
4149 | /* Allocate memory for the Tx Ring */ | 4149 | /* Allocate memory for the Tx Ring */ |
4150 | status = et131x_tx_dma_memory_alloc(adapter); | 4150 | status = et131x_tx_dma_memory_alloc(adapter); |
4151 | if (status != 0) { | 4151 | if (status != 0) { |
4152 | dev_err(&adapter->pdev->dev, | 4152 | dev_err(&adapter->pdev->dev, |
4153 | "et131x_tx_dma_memory_alloc FAILED\n"); | 4153 | "et131x_tx_dma_memory_alloc FAILED\n"); |
4154 | return status; | 4154 | return status; |
4155 | } | 4155 | } |
4156 | /* Receive buffer memory allocation */ | 4156 | /* Receive buffer memory allocation */ |
4157 | status = et131x_rx_dma_memory_alloc(adapter); | 4157 | status = et131x_rx_dma_memory_alloc(adapter); |
4158 | if (status != 0) { | 4158 | if (status != 0) { |
4159 | dev_err(&adapter->pdev->dev, | 4159 | dev_err(&adapter->pdev->dev, |
4160 | "et131x_rx_dma_memory_alloc FAILED\n"); | 4160 | "et131x_rx_dma_memory_alloc FAILED\n"); |
4161 | et131x_tx_dma_memory_free(adapter); | 4161 | et131x_tx_dma_memory_free(adapter); |
4162 | return status; | 4162 | return status; |
4163 | } | 4163 | } |
4164 | 4164 | ||
4165 | /* Init receive data structures */ | 4165 | /* Init receive data structures */ |
4166 | status = et131x_init_recv(adapter); | 4166 | status = et131x_init_recv(adapter); |
4167 | if (status != 0) { | 4167 | if (status != 0) { |
4168 | dev_err(&adapter->pdev->dev, | 4168 | dev_err(&adapter->pdev->dev, |
4169 | "et131x_init_recv FAILED\n"); | 4169 | "et131x_init_recv FAILED\n"); |
4170 | et131x_tx_dma_memory_free(adapter); | 4170 | et131x_tx_dma_memory_free(adapter); |
4171 | et131x_rx_dma_memory_free(adapter); | 4171 | et131x_rx_dma_memory_free(adapter); |
4172 | } | 4172 | } |
4173 | return status; | 4173 | return status; |
4174 | } | 4174 | } |
4175 | 4175 | ||
4176 | /** | 4176 | /** |
4177 | * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx | 4177 | * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx |
4178 | * @adapter: pointer to our private adapter structure | 4178 | * @adapter: pointer to our private adapter structure |
4179 | */ | 4179 | */ |
4180 | static void et131x_adapter_memory_free(struct et131x_adapter *adapter) | 4180 | static void et131x_adapter_memory_free(struct et131x_adapter *adapter) |
4181 | { | 4181 | { |
4182 | /* Free DMA memory */ | 4182 | /* Free DMA memory */ |
4183 | et131x_tx_dma_memory_free(adapter); | 4183 | et131x_tx_dma_memory_free(adapter); |
4184 | et131x_rx_dma_memory_free(adapter); | 4184 | et131x_rx_dma_memory_free(adapter); |
4185 | } | 4185 | } |
4186 | 4186 | ||
4187 | static void et131x_adjust_link(struct net_device *netdev) | 4187 | static void et131x_adjust_link(struct net_device *netdev) |
4188 | { | 4188 | { |
4189 | struct et131x_adapter *adapter = netdev_priv(netdev); | 4189 | struct et131x_adapter *adapter = netdev_priv(netdev); |
4190 | struct phy_device *phydev = adapter->phydev; | 4190 | struct phy_device *phydev = adapter->phydev; |
4191 | 4191 | ||
4192 | if (netif_carrier_ok(netdev)) { | 4192 | if (netif_carrier_ok(netdev)) { |
4193 | adapter->boot_coma = 20; | 4193 | adapter->boot_coma = 20; |
4194 | 4194 | ||
4195 | if (phydev && phydev->speed == SPEED_10) { | 4195 | if (phydev && phydev->speed == SPEED_10) { |
4196 | /* | 4196 | /* |
4197 | * NOTE - Is there a way to query this without | 4197 | * NOTE - Is there a way to query this without |
4198 | * TruePHY? | 4198 | * TruePHY? |
4199 | * && TRU_QueryCoreType(adapter->hTruePhy, 0)== | 4199 | * && TRU_QueryCoreType(adapter->hTruePhy, 0)== |
4200 | * EMI_TRUEPHY_A13O) { | 4200 | * EMI_TRUEPHY_A13O) { |
4201 | */ | 4201 | */ |
4202 | u16 register18; | 4202 | u16 register18; |
4203 | 4203 | ||
4204 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, | 4204 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, |
4205 | ®ister18); | 4205 | ®ister18); |
4206 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, | 4206 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
4207 | register18 | 0x4); | 4207 | register18 | 0x4); |
4208 | et131x_mii_write(adapter, PHY_INDEX_REG, | 4208 | et131x_mii_write(adapter, PHY_INDEX_REG, |
4209 | register18 | 0x8402); | 4209 | register18 | 0x8402); |
4210 | et131x_mii_write(adapter, PHY_DATA_REG, | 4210 | et131x_mii_write(adapter, PHY_DATA_REG, |
4211 | register18 | 511); | 4211 | register18 | 511); |
4212 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, | 4212 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
4213 | register18); | 4213 | register18); |
4214 | } | 4214 | } |
4215 | 4215 | ||
4216 | et1310_config_flow_control(adapter); | 4216 | et1310_config_flow_control(adapter); |
4217 | 4217 | ||
4218 | if (phydev && phydev->speed == SPEED_1000 && | 4218 | if (phydev && phydev->speed == SPEED_1000 && |
4219 | adapter->registry_jumbo_packet > 2048) { | 4219 | adapter->registry_jumbo_packet > 2048) { |
4220 | u16 reg; | 4220 | u16 reg; |
4221 | 4221 | ||
4222 | et131x_mii_read(adapter, PHY_CONFIG, ®); | 4222 | et131x_mii_read(adapter, PHY_CONFIG, ®); |
4223 | reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; | 4223 | reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; |
4224 | reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; | 4224 | reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; |
4225 | et131x_mii_write(adapter, PHY_CONFIG, reg); | 4225 | et131x_mii_write(adapter, PHY_CONFIG, reg); |
4226 | } | 4226 | } |
4227 | 4227 | ||
4228 | et131x_set_rx_dma_timer(adapter); | 4228 | et131x_set_rx_dma_timer(adapter); |
4229 | et1310_config_mac_regs2(adapter); | 4229 | et1310_config_mac_regs2(adapter); |
4230 | } | 4230 | } |
4231 | 4231 | ||
4232 | if (phydev && phydev->link != adapter->link) { | 4232 | if (phydev && phydev->link != adapter->link) { |
4233 | /* | 4233 | /* |
4234 | * Check to see if we are in coma mode and if | 4234 | * Check to see if we are in coma mode and if |
4235 | * so, disable it because we will not be able | 4235 | * so, disable it because we will not be able |
4236 | * to read PHY values until we are out. | 4236 | * to read PHY values until we are out. |
4237 | */ | 4237 | */ |
4238 | if (et1310_in_phy_coma(adapter)) | 4238 | if (et1310_in_phy_coma(adapter)) |
4239 | et1310_disable_phy_coma(adapter); | 4239 | et1310_disable_phy_coma(adapter); |
4240 | 4240 | ||
4241 | if (phydev->link) { | 4241 | if (phydev->link) { |
4242 | adapter->boot_coma = 20; | 4242 | adapter->boot_coma = 20; |
4243 | } else { | 4243 | } else { |
4244 | dev_warn(&adapter->pdev->dev, | 4244 | dev_warn(&adapter->pdev->dev, |
4245 | "Link down - cable problem ?\n"); | 4245 | "Link down - cable problem ?\n"); |
4246 | adapter->boot_coma = 0; | 4246 | adapter->boot_coma = 0; |
4247 | 4247 | ||
4248 | if (phydev->speed == SPEED_10) { | 4248 | if (phydev->speed == SPEED_10) { |
4249 | /* NOTE - Is there a way to query this without | 4249 | /* NOTE - Is there a way to query this without |
4250 | * TruePHY? | 4250 | * TruePHY? |
4251 | * && TRU_QueryCoreType(adapter->hTruePhy, 0) == | 4251 | * && TRU_QueryCoreType(adapter->hTruePhy, 0) == |
4252 | * EMI_TRUEPHY_A13O) | 4252 | * EMI_TRUEPHY_A13O) |
4253 | */ | 4253 | */ |
4254 | u16 register18; | 4254 | u16 register18; |
4255 | 4255 | ||
4256 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, | 4256 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, |
4257 | ®ister18); | 4257 | ®ister18); |
4258 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, | 4258 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
4259 | register18 | 0x4); | 4259 | register18 | 0x4); |
4260 | et131x_mii_write(adapter, PHY_INDEX_REG, | 4260 | et131x_mii_write(adapter, PHY_INDEX_REG, |
4261 | register18 | 0x8402); | 4261 | register18 | 0x8402); |
4262 | et131x_mii_write(adapter, PHY_DATA_REG, | 4262 | et131x_mii_write(adapter, PHY_DATA_REG, |
4263 | register18 | 511); | 4263 | register18 | 511); |
4264 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, | 4264 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
4265 | register18); | 4265 | register18); |
4266 | } | 4266 | } |
4267 | 4267 | ||
4268 | /* Free the packets being actively sent & stopped */ | 4268 | /* Free the packets being actively sent & stopped */ |
4269 | et131x_free_busy_send_packets(adapter); | 4269 | et131x_free_busy_send_packets(adapter); |
4270 | 4270 | ||
4271 | /* Re-initialize the send structures */ | 4271 | /* Re-initialize the send structures */ |
4272 | et131x_init_send(adapter); | 4272 | et131x_init_send(adapter); |
4273 | 4273 | ||
4274 | /* | 4274 | /* |
4275 | * Bring the device back to the state it was during | 4275 | * Bring the device back to the state it was during |
4276 | * init prior to autonegotiation being complete. This | 4276 | * init prior to autonegotiation being complete. This |
4277 | * way, when we get the auto-neg complete interrupt, | 4277 | * way, when we get the auto-neg complete interrupt, |
4278 | * we can complete init by calling config_mac_regs2. | 4278 | * we can complete init by calling config_mac_regs2. |
4279 | */ | 4279 | */ |
4280 | et131x_soft_reset(adapter); | 4280 | et131x_soft_reset(adapter); |
4281 | 4281 | ||
4282 | /* Setup ET1310 as per the documentation */ | 4282 | /* Setup ET1310 as per the documentation */ |
4283 | et131x_adapter_setup(adapter); | 4283 | et131x_adapter_setup(adapter); |
4284 | 4284 | ||
4285 | /* perform reset of tx/rx */ | 4285 | /* perform reset of tx/rx */ |
4286 | et131x_disable_txrx(netdev); | 4286 | et131x_disable_txrx(netdev); |
4287 | et131x_enable_txrx(netdev); | 4287 | et131x_enable_txrx(netdev); |
4288 | } | 4288 | } |
4289 | 4289 | ||
4290 | adapter->link = phydev->link; | 4290 | adapter->link = phydev->link; |
4291 | 4291 | ||
4292 | phy_print_status(phydev); | 4292 | phy_print_status(phydev); |
4293 | } | 4293 | } |
4294 | } | 4294 | } |
4295 | 4295 | ||
4296 | static int et131x_mii_probe(struct net_device *netdev) | 4296 | static int et131x_mii_probe(struct net_device *netdev) |
4297 | { | 4297 | { |
4298 | struct et131x_adapter *adapter = netdev_priv(netdev); | 4298 | struct et131x_adapter *adapter = netdev_priv(netdev); |
4299 | struct phy_device *phydev = NULL; | 4299 | struct phy_device *phydev = NULL; |
4300 | 4300 | ||
4301 | phydev = phy_find_first(adapter->mii_bus); | 4301 | phydev = phy_find_first(adapter->mii_bus); |
4302 | if (!phydev) { | 4302 | if (!phydev) { |
4303 | dev_err(&adapter->pdev->dev, "no PHY found\n"); | 4303 | dev_err(&adapter->pdev->dev, "no PHY found\n"); |
4304 | return -ENODEV; | 4304 | return -ENODEV; |
4305 | } | 4305 | } |
4306 | 4306 | ||
4307 | phydev = phy_connect(netdev, dev_name(&phydev->dev), | 4307 | phydev = phy_connect(netdev, dev_name(&phydev->dev), |
4308 | &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII); | 4308 | &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII); |
4309 | 4309 | ||
4310 | if (IS_ERR(phydev)) { | 4310 | if (IS_ERR(phydev)) { |
4311 | dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); | 4311 | dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); |
4312 | return PTR_ERR(phydev); | 4312 | return PTR_ERR(phydev); |
4313 | } | 4313 | } |
4314 | 4314 | ||
4315 | phydev->supported &= (SUPPORTED_10baseT_Half | 4315 | phydev->supported &= (SUPPORTED_10baseT_Half |
4316 | | SUPPORTED_10baseT_Full | 4316 | | SUPPORTED_10baseT_Full |
4317 | | SUPPORTED_100baseT_Half | 4317 | | SUPPORTED_100baseT_Half |
4318 | | SUPPORTED_100baseT_Full | 4318 | | SUPPORTED_100baseT_Full |
4319 | | SUPPORTED_Autoneg | 4319 | | SUPPORTED_Autoneg |
4320 | | SUPPORTED_MII | 4320 | | SUPPORTED_MII |
4321 | | SUPPORTED_TP); | 4321 | | SUPPORTED_TP); |
4322 | 4322 | ||
4323 | if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) | 4323 | if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) |
4324 | phydev->supported |= SUPPORTED_1000baseT_Full; | 4324 | phydev->supported |= SUPPORTED_1000baseT_Full; |
4325 | 4325 | ||
4326 | phydev->advertising = phydev->supported; | 4326 | phydev->advertising = phydev->supported; |
4327 | adapter->phydev = phydev; | 4327 | adapter->phydev = phydev; |
4328 | 4328 | ||
4329 | dev_info(&adapter->pdev->dev, "attached PHY driver [%s] " | 4329 | dev_info(&adapter->pdev->dev, "attached PHY driver [%s] " |
4330 | "(mii_bus:phy_addr=%s)\n", | 4330 | "(mii_bus:phy_addr=%s)\n", |
4331 | phydev->drv->name, dev_name(&phydev->dev)); | 4331 | phydev->drv->name, dev_name(&phydev->dev)); |
4332 | 4332 | ||
4333 | return 0; | 4333 | return 0; |
4334 | } | 4334 | } |
4335 | 4335 | ||
4336 | /** | 4336 | /** |
4337 | * et131x_adapter_init | 4337 | * et131x_adapter_init |
4338 | * @adapter: pointer to the private adapter struct | 4338 | * @adapter: pointer to the private adapter struct |
4339 | * @pdev: pointer to the PCI device | 4339 | * @pdev: pointer to the PCI device |
4340 | * | 4340 | * |
4341 | * Initialize the data structures for the et131x_adapter object and link | 4341 | * Initialize the data structures for the et131x_adapter object and link |
4342 | * them together with the platform provided device structures. | 4342 | * them together with the platform provided device structures. |
4343 | */ | 4343 | */ |
4344 | static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, | 4344 | static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, |
4345 | struct pci_dev *pdev) | 4345 | struct pci_dev *pdev) |
4346 | { | 4346 | { |
4347 | static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; | 4347 | static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; |
4348 | 4348 | ||
4349 | struct et131x_adapter *adapter; | 4349 | struct et131x_adapter *adapter; |
4350 | 4350 | ||
4351 | /* Allocate private adapter struct and copy in relevant information */ | 4351 | /* Allocate private adapter struct and copy in relevant information */ |
4352 | adapter = netdev_priv(netdev); | 4352 | adapter = netdev_priv(netdev); |
4353 | adapter->pdev = pci_dev_get(pdev); | 4353 | adapter->pdev = pci_dev_get(pdev); |
4354 | adapter->netdev = netdev; | 4354 | adapter->netdev = netdev; |
4355 | 4355 | ||
4356 | /* Initialize spinlocks here */ | 4356 | /* Initialize spinlocks here */ |
4357 | spin_lock_init(&adapter->lock); | 4357 | spin_lock_init(&adapter->lock); |
4358 | spin_lock_init(&adapter->tcb_send_qlock); | 4358 | spin_lock_init(&adapter->tcb_send_qlock); |
4359 | spin_lock_init(&adapter->tcb_ready_qlock); | 4359 | spin_lock_init(&adapter->tcb_ready_qlock); |
4360 | spin_lock_init(&adapter->send_hw_lock); | 4360 | spin_lock_init(&adapter->send_hw_lock); |
4361 | spin_lock_init(&adapter->rcv_lock); | 4361 | spin_lock_init(&adapter->rcv_lock); |
4362 | spin_lock_init(&adapter->rcv_pend_lock); | 4362 | spin_lock_init(&adapter->rcv_pend_lock); |
4363 | spin_lock_init(&adapter->fbr_lock); | 4363 | spin_lock_init(&adapter->fbr_lock); |
4364 | spin_lock_init(&adapter->phy_lock); | 4364 | spin_lock_init(&adapter->phy_lock); |
4365 | 4365 | ||
4366 | adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ | 4366 | adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ |
4367 | 4367 | ||
4368 | /* Set the MAC address to a default */ | 4368 | /* Set the MAC address to a default */ |
4369 | memcpy(adapter->addr, default_mac, ETH_ALEN); | 4369 | memcpy(adapter->addr, default_mac, ETH_ALEN); |
4370 | 4370 | ||
4371 | return adapter; | 4371 | return adapter; |
4372 | } | 4372 | } |
4373 | 4373 | ||
4374 | /** | 4374 | /** |
4375 | * et131x_pci_remove | 4375 | * et131x_pci_remove |
4376 | * @pdev: a pointer to the device's pci_dev structure | 4376 | * @pdev: a pointer to the device's pci_dev structure |
4377 | * | 4377 | * |
4378 | * Registered in the pci_driver structure, this function is called when the | 4378 | * Registered in the pci_driver structure, this function is called when the |
4379 | * PCI subsystem detects that a PCI device which matches the information | 4379 | * PCI subsystem detects that a PCI device which matches the information |
4380 | * contained in the pci_device_id table has been removed. | 4380 | * contained in the pci_device_id table has been removed. |
4381 | */ | 4381 | */ |
4382 | static void __devexit et131x_pci_remove(struct pci_dev *pdev) | 4382 | static void __devexit et131x_pci_remove(struct pci_dev *pdev) |
4383 | { | 4383 | { |
4384 | struct net_device *netdev = pci_get_drvdata(pdev); | 4384 | struct net_device *netdev = pci_get_drvdata(pdev); |
4385 | struct et131x_adapter *adapter = netdev_priv(netdev); | 4385 | struct et131x_adapter *adapter = netdev_priv(netdev); |
4386 | 4386 | ||
4387 | unregister_netdev(netdev); | 4387 | unregister_netdev(netdev); |
4388 | phy_disconnect(adapter->phydev); | 4388 | phy_disconnect(adapter->phydev); |
4389 | mdiobus_unregister(adapter->mii_bus); | 4389 | mdiobus_unregister(adapter->mii_bus); |
4390 | kfree(adapter->mii_bus->irq); | 4390 | kfree(adapter->mii_bus->irq); |
4391 | mdiobus_free(adapter->mii_bus); | 4391 | mdiobus_free(adapter->mii_bus); |
4392 | 4392 | ||
4393 | et131x_adapter_memory_free(adapter); | 4393 | et131x_adapter_memory_free(adapter); |
4394 | iounmap(adapter->regs); | 4394 | iounmap(adapter->regs); |
4395 | pci_dev_put(pdev); | 4395 | pci_dev_put(pdev); |
4396 | 4396 | ||
4397 | free_netdev(netdev); | 4397 | free_netdev(netdev); |
4398 | pci_release_regions(pdev); | 4398 | pci_release_regions(pdev); |
4399 | pci_disable_device(pdev); | 4399 | pci_disable_device(pdev); |
4400 | } | 4400 | } |
4401 | 4401 | ||
4402 | /** | 4402 | /** |
4403 | * et131x_up - Bring up a device for use. | 4403 | * et131x_up - Bring up a device for use. |
4404 | * @netdev: device to be opened | 4404 | * @netdev: device to be opened |
4405 | */ | 4405 | */ |
4406 | static void et131x_up(struct net_device *netdev) | 4406 | static void et131x_up(struct net_device *netdev) |
4407 | { | 4407 | { |
4408 | struct et131x_adapter *adapter = netdev_priv(netdev); | 4408 | struct et131x_adapter *adapter = netdev_priv(netdev); |
4409 | 4409 | ||
4410 | et131x_enable_txrx(netdev); | 4410 | et131x_enable_txrx(netdev); |
4411 | phy_start(adapter->phydev); | 4411 | phy_start(adapter->phydev); |
4412 | } | 4412 | } |
4413 | 4413 | ||
4414 | /** | 4414 | /** |
4415 | * et131x_down - Bring down the device | 4415 | * et131x_down - Bring down the device |
4416 | * @netdev: device to be broght down | 4416 | * @netdev: device to be brought down |
4417 | */ | 4417 | */ |
4418 | static void et131x_down(struct net_device *netdev) | 4418 | static void et131x_down(struct net_device *netdev) |
4419 | { | 4419 | { |
4420 | struct et131x_adapter *adapter = netdev_priv(netdev); | 4420 | struct et131x_adapter *adapter = netdev_priv(netdev); |
4421 | 4421 | ||
4422 | /* Save the timestamp for the TX watchdog, prevent a timeout */ | 4422 | /* Save the timestamp for the TX watchdog, prevent a timeout */ |
4423 | netdev->trans_start = jiffies; | 4423 | netdev->trans_start = jiffies; |
4424 | 4424 | ||
4425 | phy_stop(adapter->phydev); | 4425 | phy_stop(adapter->phydev); |
4426 | et131x_disable_txrx(netdev); | 4426 | et131x_disable_txrx(netdev); |
4427 | } | 4427 | } |
4428 | 4428 | ||
4429 | #ifdef CONFIG_PM_SLEEP | 4429 | #ifdef CONFIG_PM_SLEEP |
4430 | static int et131x_suspend(struct device *dev) | 4430 | static int et131x_suspend(struct device *dev) |
4431 | { | 4431 | { |
4432 | struct pci_dev *pdev = to_pci_dev(dev); | 4432 | struct pci_dev *pdev = to_pci_dev(dev); |
4433 | struct net_device *netdev = pci_get_drvdata(pdev); | 4433 | struct net_device *netdev = pci_get_drvdata(pdev); |
4434 | 4434 | ||
4435 | if (netif_running(netdev)) { | 4435 | if (netif_running(netdev)) { |
4436 | netif_device_detach(netdev); | 4436 | netif_device_detach(netdev); |
4437 | et131x_down(netdev); | 4437 | et131x_down(netdev); |
4438 | pci_save_state(pdev); | 4438 | pci_save_state(pdev); |
4439 | } | 4439 | } |
4440 | 4440 | ||
4441 | return 0; | 4441 | return 0; |
4442 | } | 4442 | } |
4443 | 4443 | ||
4444 | static int et131x_resume(struct device *dev) | 4444 | static int et131x_resume(struct device *dev) |
4445 | { | 4445 | { |
4446 | struct pci_dev *pdev = to_pci_dev(dev); | 4446 | struct pci_dev *pdev = to_pci_dev(dev); |
4447 | struct net_device *netdev = pci_get_drvdata(pdev); | 4447 | struct net_device *netdev = pci_get_drvdata(pdev); |
4448 | 4448 | ||
4449 | if (netif_running(netdev)) { | 4449 | if (netif_running(netdev)) { |
4450 | pci_restore_state(pdev); | 4450 | pci_restore_state(pdev); |
4451 | et131x_up(netdev); | 4451 | et131x_up(netdev); |
4452 | netif_device_attach(netdev); | 4452 | netif_device_attach(netdev); |
4453 | } | 4453 | } |
4454 | 4454 | ||
4455 | return 0; | 4455 | return 0; |
4456 | } | 4456 | } |
4457 | 4457 | ||
4458 | static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); | 4458 | static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); |
4459 | #define ET131X_PM_OPS (&et131x_pm_ops) | 4459 | #define ET131X_PM_OPS (&et131x_pm_ops) |
4460 | #else | 4460 | #else |
4461 | #define ET131X_PM_OPS NULL | 4461 | #define ET131X_PM_OPS NULL |
4462 | #endif | 4462 | #endif |
4463 | 4463 | ||
4464 | /** | 4464 | /** |
4465 | * et131x_isr - The Interrupt Service Routine for the driver. | 4465 | * et131x_isr - The Interrupt Service Routine for the driver. |
4466 | * @irq: the IRQ on which the interrupt was received. | 4466 | * @irq: the IRQ on which the interrupt was received. |
4467 | * @dev_id: device-specific info (here a pointer to a net_device struct) | 4467 | * @dev_id: device-specific info (here a pointer to a net_device struct) |
4468 | * | 4468 | * |
4469 | * Returns a value indicating if the interrupt was handled. | 4469 | * Returns a value indicating if the interrupt was handled. |
4470 | */ | 4470 | */ |
4471 | irqreturn_t et131x_isr(int irq, void *dev_id) | 4471 | irqreturn_t et131x_isr(int irq, void *dev_id) |
4472 | { | 4472 | { |
4473 | bool handled = true; | 4473 | bool handled = true; |
4474 | struct net_device *netdev = (struct net_device *)dev_id; | 4474 | struct net_device *netdev = (struct net_device *)dev_id; |
4475 | struct et131x_adapter *adapter = NULL; | 4475 | struct et131x_adapter *adapter = NULL; |
4476 | u32 status; | 4476 | u32 status; |
4477 | 4477 | ||
4478 | if (!netif_device_present(netdev)) { | 4478 | if (!netif_device_present(netdev)) { |
4479 | handled = false; | 4479 | handled = false; |
4480 | goto out; | 4480 | goto out; |
4481 | } | 4481 | } |
4482 | 4482 | ||
4483 | adapter = netdev_priv(netdev); | 4483 | adapter = netdev_priv(netdev); |
4484 | 4484 | ||
4485 | /* If the adapter is in low power state, then it should not | 4485 | /* If the adapter is in low power state, then it should not |
4486 | * recognize any interrupt | 4486 | * recognize any interrupt |
4487 | */ | 4487 | */ |
4488 | 4488 | ||
4489 | /* Disable Device Interrupts */ | 4489 | /* Disable Device Interrupts */ |
4490 | et131x_disable_interrupts(adapter); | 4490 | et131x_disable_interrupts(adapter); |
4491 | 4491 | ||
4492 | /* Get a copy of the value in the interrupt status register | 4492 | /* Get a copy of the value in the interrupt status register |
4493 | * so we can process the interrupting section | 4493 | * so we can process the interrupting section |
4494 | */ | 4494 | */ |
4495 | status = readl(&adapter->regs->global.int_status); | 4495 | status = readl(&adapter->regs->global.int_status); |
4496 | 4496 | ||
4497 | if (adapter->flowcontrol == FLOW_TXONLY || | 4497 | if (adapter->flowcontrol == FLOW_TXONLY || |
4498 | adapter->flowcontrol == FLOW_BOTH) { | 4498 | adapter->flowcontrol == FLOW_BOTH) { |
4499 | status &= ~INT_MASK_ENABLE; | 4499 | status &= ~INT_MASK_ENABLE; |
4500 | } else { | 4500 | } else { |
4501 | status &= ~INT_MASK_ENABLE_NO_FLOW; | 4501 | status &= ~INT_MASK_ENABLE_NO_FLOW; |
4502 | } | 4502 | } |
4503 | 4503 | ||
4504 | /* Make sure this is our interrupt */ | 4504 | /* Make sure this is our interrupt */ |
4505 | if (!status) { | 4505 | if (!status) { |
4506 | handled = false; | 4506 | handled = false; |
4507 | et131x_enable_interrupts(adapter); | 4507 | et131x_enable_interrupts(adapter); |
4508 | goto out; | 4508 | goto out; |
4509 | } | 4509 | } |
4510 | 4510 | ||
4511 | /* This is our interrupt, so process accordingly */ | 4511 | /* This is our interrupt, so process accordingly */ |
4512 | 4512 | ||
4513 | if (status & ET_INTR_WATCHDOG) { | 4513 | if (status & ET_INTR_WATCHDOG) { |
4514 | struct tcb *tcb = adapter->tx_ring.send_head; | 4514 | struct tcb *tcb = adapter->tx_ring.send_head; |
4515 | 4515 | ||
4516 | if (tcb) | 4516 | if (tcb) |
4517 | if (++tcb->stale > 1) | 4517 | if (++tcb->stale > 1) |
4518 | status |= ET_INTR_TXDMA_ISR; | 4518 | status |= ET_INTR_TXDMA_ISR; |
4519 | 4519 | ||
4520 | if (adapter->rx_ring.unfinished_receives) | 4520 | if (adapter->rx_ring.unfinished_receives) |
4521 | status |= ET_INTR_RXDMA_XFR_DONE; | 4521 | status |= ET_INTR_RXDMA_XFR_DONE; |
4522 | else if (tcb == NULL) | 4522 | else if (tcb == NULL) |
4523 | writel(0, &adapter->regs->global.watchdog_timer); | 4523 | writel(0, &adapter->regs->global.watchdog_timer); |
4524 | 4524 | ||
4525 | status &= ~ET_INTR_WATCHDOG; | 4525 | status &= ~ET_INTR_WATCHDOG; |
4526 | } | 4526 | } |
4527 | 4527 | ||
4528 | if (status == 0) { | 4528 | if (status == 0) { |
4529 | /* This interrupt has in some way been "handled" by | 4529 | /* This interrupt has in some way been "handled" by |
4530 | * the ISR. Either it was a spurious Rx interrupt, or | 4530 | * the ISR. Either it was a spurious Rx interrupt, or |
4531 | * it was a Tx interrupt that has been filtered by | 4531 | * it was a Tx interrupt that has been filtered by |
4532 | * the ISR. | 4532 | * the ISR. |
4533 | */ | 4533 | */ |
4534 | et131x_enable_interrupts(adapter); | 4534 | et131x_enable_interrupts(adapter); |
4535 | goto out; | 4535 | goto out; |
4536 | } | 4536 | } |
4537 | 4537 | ||
4538 | /* We need to save the interrupt status value for use in our | 4538 | /* We need to save the interrupt status value for use in our |
4539 | * DPC. We will clear the software copy of that in that | 4539 | * DPC. We will clear the software copy of that in that |
4540 | * routine. | 4540 | * routine. |
4541 | */ | 4541 | */ |
4542 | adapter->stats.interrupt_status = status; | 4542 | adapter->stats.interrupt_status = status; |
4543 | 4543 | ||
4544 | /* Schedule the ISR handler as a bottom-half task in the | 4544 | /* Schedule the ISR handler as a bottom-half task in the |
4545 | * kernel's tq_immediate queue, and mark the queue for | 4545 | * kernel's tq_immediate queue, and mark the queue for |
4546 | * execution | 4546 | * execution |
4547 | */ | 4547 | */ |
4548 | schedule_work(&adapter->task); | 4548 | schedule_work(&adapter->task); |
4549 | out: | 4549 | out: |
4550 | return IRQ_RETVAL(handled); | 4550 | return IRQ_RETVAL(handled); |
4551 | } | 4551 | } |
4552 | 4552 | ||
4553 | /** | 4553 | /** |
4554 | * et131x_isr_handler - The ISR handler | 4554 | * et131x_isr_handler - The ISR handler |
4555 | * @p_adapter, a pointer to the device's private adapter structure | 4555 | * @p_adapter, a pointer to the device's private adapter structure |
4556 | * | 4556 | * |
4557 | * scheduled to run in a deferred context by the ISR. This is where the ISR's | 4557 | * scheduled to run in a deferred context by the ISR. This is where the ISR's |
4558 | * work actually gets done. | 4558 | * work actually gets done. |
4559 | */ | 4559 | */ |
4560 | static void et131x_isr_handler(struct work_struct *work) | 4560 | static void et131x_isr_handler(struct work_struct *work) |
4561 | { | 4561 | { |
4562 | struct et131x_adapter *adapter = | 4562 | struct et131x_adapter *adapter = |
4563 | container_of(work, struct et131x_adapter, task); | 4563 | container_of(work, struct et131x_adapter, task); |
4564 | u32 status = adapter->stats.interrupt_status; | 4564 | u32 status = adapter->stats.interrupt_status; |
4565 | struct address_map __iomem *iomem = adapter->regs; | 4565 | struct address_map __iomem *iomem = adapter->regs; |
4566 | 4566 | ||
4567 | /* | 4567 | /* |
4568 | * These first two are by far the most common. Once handled, we clear | 4568 | * These first two are by far the most common. Once handled, we clear |
4569 | * their two bits in the status word. If the word is now zero, we | 4569 | * their two bits in the status word. If the word is now zero, we |
4570 | * exit. | 4570 | * exit. |
4571 | */ | 4571 | */ |
4572 | /* Handle all the completed Transmit interrupts */ | 4572 | /* Handle all the completed Transmit interrupts */ |
4573 | if (status & ET_INTR_TXDMA_ISR) | 4573 | if (status & ET_INTR_TXDMA_ISR) |
4574 | et131x_handle_send_interrupt(adapter); | 4574 | et131x_handle_send_interrupt(adapter); |
4575 | 4575 | ||
4576 | /* Handle all the completed Receives interrupts */ | 4576 | /* Handle all the completed Receives interrupts */ |
4577 | if (status & ET_INTR_RXDMA_XFR_DONE) | 4577 | if (status & ET_INTR_RXDMA_XFR_DONE) |
4578 | et131x_handle_recv_interrupt(adapter); | 4578 | et131x_handle_recv_interrupt(adapter); |
4579 | 4579 | ||
4580 | status &= 0xffffffd7; | 4580 | status &= 0xffffffd7; |
4581 | 4581 | ||
4582 | if (status) { | 4582 | if (status) { |
4583 | /* Handle the TXDMA Error interrupt */ | 4583 | /* Handle the TXDMA Error interrupt */ |
4584 | if (status & ET_INTR_TXDMA_ERR) { | 4584 | if (status & ET_INTR_TXDMA_ERR) { |
4585 | u32 txdma_err; | 4585 | u32 txdma_err; |
4586 | 4586 | ||
4587 | /* Following read also clears the register (COR) */ | 4587 | /* Following read also clears the register (COR) */ |
4588 | txdma_err = readl(&iomem->txdma.tx_dma_error); | 4588 | txdma_err = readl(&iomem->txdma.tx_dma_error); |
4589 | 4589 | ||
4590 | dev_warn(&adapter->pdev->dev, | 4590 | dev_warn(&adapter->pdev->dev, |
4591 | "TXDMA_ERR interrupt, error = %d\n", | 4591 | "TXDMA_ERR interrupt, error = %d\n", |
4592 | txdma_err); | 4592 | txdma_err); |
4593 | } | 4593 | } |
4594 | 4594 | ||
4595 | /* Handle Free Buffer Ring 0 and 1 Low interrupt */ | 4595 | /* Handle Free Buffer Ring 0 and 1 Low interrupt */ |
4596 | if (status & | 4596 | if (status & |
4597 | (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { | 4597 | (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { |
4598 | /* | 4598 | /* |
4599 | * This indicates the number of unused buffers in | 4599 | * This indicates the number of unused buffers in |
4600 | * RXDMA free buffer ring 0 is <= the limit you | 4600 | * RXDMA free buffer ring 0 is <= the limit you |
4601 | * programmed. Free buffer resources need to be | 4601 | * programmed. Free buffer resources need to be |
4602 | * returned. Free buffers are consumed as packets | 4602 | * returned. Free buffers are consumed as packets |
4603 | * are passed from the network to the host. The host | 4603 | * are passed from the network to the host. The host |
4604 | * becomes aware of the packets from the contents of | 4604 | * becomes aware of the packets from the contents of |
4605 | * the packet status ring. This ring is queried when | 4605 | * the packet status ring. This ring is queried when |
4606 | * the packet done interrupt occurs. Packets are then | 4606 | * the packet done interrupt occurs. Packets are then |
4607 | * passed to the OS. When the OS is done with the | 4607 | * passed to the OS. When the OS is done with the |
4608 | * packets the resources can be returned to the | 4608 | * packets the resources can be returned to the |
4609 | * ET1310 for re-use. This interrupt is one method of | 4609 | * ET1310 for re-use. This interrupt is one method of |
4610 | * returning resources. | 4610 | * returning resources. |
4611 | */ | 4611 | */ |
4612 | 4612 | ||
4613 | /* If the user has flow control on, then we will | 4613 | /* If the user has flow control on, then we will |
4614 | * send a pause packet, otherwise just exit | 4614 | * send a pause packet, otherwise just exit |
4615 | */ | 4615 | */ |
4616 | if (adapter->flowcontrol == FLOW_TXONLY || | 4616 | if (adapter->flowcontrol == FLOW_TXONLY || |
4617 | adapter->flowcontrol == FLOW_BOTH) { | 4617 | adapter->flowcontrol == FLOW_BOTH) { |
4618 | u32 pm_csr; | 4618 | u32 pm_csr; |
4619 | 4619 | ||
4620 | /* Tell the device to send a pause packet via | 4620 | /* Tell the device to send a pause packet via |
4621 | * the back pressure register (bp req and | 4621 | * the back pressure register (bp req and |
4622 | * bp xon/xoff) | 4622 | * bp xon/xoff) |
4623 | */ | 4623 | */ |
4624 | pm_csr = readl(&iomem->global.pm_csr); | 4624 | pm_csr = readl(&iomem->global.pm_csr); |
4625 | if (!et1310_in_phy_coma(adapter)) | 4625 | if (!et1310_in_phy_coma(adapter)) |
4626 | writel(3, &iomem->txmac.bp_ctrl); | 4626 | writel(3, &iomem->txmac.bp_ctrl); |
4627 | } | 4627 | } |
4628 | } | 4628 | } |
4629 | 4629 | ||
4630 | /* Handle Packet Status Ring Low Interrupt */ | 4630 | /* Handle Packet Status Ring Low Interrupt */ |
4631 | if (status & ET_INTR_RXDMA_STAT_LOW) { | 4631 | if (status & ET_INTR_RXDMA_STAT_LOW) { |
4632 | 4632 | ||
4633 | /* | 4633 | /* |
4634 | * Same idea as with the two Free Buffer Rings. | 4634 | * Same idea as with the two Free Buffer Rings. |
4635 | * Packets going from the network to the host each | 4635 | * Packets going from the network to the host each |
4636 | * consume a free buffer resource and a packet status | 4636 | * consume a free buffer resource and a packet status |
4637 | * resource. These resoures are passed to the OS. | 4637 | * resource. These resoures are passed to the OS. |
4638 | * When the OS is done with the resources, they need | 4638 | * When the OS is done with the resources, they need |
4639 | * to be returned to the ET1310. This is one method | 4639 | * to be returned to the ET1310. This is one method |
4640 | * of returning the resources. | 4640 | * of returning the resources. |
4641 | */ | 4641 | */ |
4642 | } | 4642 | } |
4643 | 4643 | ||
4644 | /* Handle RXDMA Error Interrupt */ | 4644 | /* Handle RXDMA Error Interrupt */ |
4645 | if (status & ET_INTR_RXDMA_ERR) { | 4645 | if (status & ET_INTR_RXDMA_ERR) { |
4646 | /* | 4646 | /* |
4647 | * The rxdma_error interrupt is sent when a time-out | 4647 | * The rxdma_error interrupt is sent when a time-out |
4648 | * on a request issued by the JAGCore has occurred or | 4648 | * on a request issued by the JAGCore has occurred or |
4649 | * a completion is returned with an un-successful | 4649 | * a completion is returned with an un-successful |
4650 | * status. In both cases the request is considered | 4650 | * status. In both cases the request is considered |
4651 | * complete. The JAGCore will automatically re-try the | 4651 | * complete. The JAGCore will automatically re-try the |
4652 | * request in question. Normally information on events | 4652 | * request in question. Normally information on events |
4653 | * like these are sent to the host using the "Advanced | 4653 | * like these are sent to the host using the "Advanced |
4654 | * Error Reporting" capability. This interrupt is | 4654 | * Error Reporting" capability. This interrupt is |
4655 | * another way of getting similar information. The | 4655 | * another way of getting similar information. The |
4656 | * only thing required is to clear the interrupt by | 4656 | * only thing required is to clear the interrupt by |
4657 | * reading the ISR in the global resources. The | 4657 | * reading the ISR in the global resources. The |
4658 | * JAGCore will do a re-try on the request. Normally | 4658 | * JAGCore will do a re-try on the request. Normally |
4659 | * you should never see this interrupt. If you start | 4659 | * you should never see this interrupt. If you start |
4660 | * to see this interrupt occurring frequently then | 4660 | * to see this interrupt occurring frequently then |
4661 | * something bad has occurred. A reset might be the | 4661 | * something bad has occurred. A reset might be the |
4662 | * thing to do. | 4662 | * thing to do. |
4663 | */ | 4663 | */ |
4664 | /* TRAP();*/ | 4664 | /* TRAP();*/ |
4665 | 4665 | ||
4666 | dev_warn(&adapter->pdev->dev, | 4666 | dev_warn(&adapter->pdev->dev, |
4667 | "RxDMA_ERR interrupt, error %x\n", | 4667 | "RxDMA_ERR interrupt, error %x\n", |
4668 | readl(&iomem->txmac.tx_test)); | 4668 | readl(&iomem->txmac.tx_test)); |
4669 | } | 4669 | } |
4670 | 4670 | ||
4671 | /* Handle the Wake on LAN Event */ | 4671 | /* Handle the Wake on LAN Event */ |
4672 | if (status & ET_INTR_WOL) { | 4672 | if (status & ET_INTR_WOL) { |
4673 | /* | 4673 | /* |
4674 | * This is a secondary interrupt for wake on LAN. | 4674 | * This is a secondary interrupt for wake on LAN. |
4675 | * The driver should never see this, if it does, | 4675 | * The driver should never see this, if it does, |
4676 | * something serious is wrong. We will TRAP the | 4676 | * something serious is wrong. We will TRAP the |
4677 | * message when we are in DBG mode, otherwise we | 4677 | * message when we are in DBG mode, otherwise we |
4678 | * will ignore it. | 4678 | * will ignore it. |
4679 | */ | 4679 | */ |
4680 | dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); | 4680 | dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); |
4681 | } | 4681 | } |
4682 | 4682 | ||
4683 | /* Let's move on to the TxMac */ | 4683 | /* Let's move on to the TxMac */ |
4684 | if (status & ET_INTR_TXMAC) { | 4684 | if (status & ET_INTR_TXMAC) { |
4685 | u32 err = readl(&iomem->txmac.err); | 4685 | u32 err = readl(&iomem->txmac.err); |
4686 | 4686 | ||
4687 | /* | 4687 | /* |
4688 | * When any of the errors occur and TXMAC generates | 4688 | * When any of the errors occur and TXMAC generates |
4689 | * an interrupt to report these errors, it usually | 4689 | * an interrupt to report these errors, it usually |
4690 | * means that TXMAC has detected an error in the data | 4690 | * means that TXMAC has detected an error in the data |
4691 | * stream retrieved from the on-chip Tx Q. All of | 4691 | * stream retrieved from the on-chip Tx Q. All of |
4692 | * these errors are catastrophic and TXMAC won't be | 4692 | * these errors are catastrophic and TXMAC won't be |
4693 | * able to recover data when these errors occur. In | 4693 | * able to recover data when these errors occur. In |
4694 | * a nutshell, the whole Tx path will have to be reset | 4694 | * a nutshell, the whole Tx path will have to be reset |
4695 | * and re-configured afterwards. | 4695 | * and re-configured afterwards. |
4696 | */ | 4696 | */ |
4697 | dev_warn(&adapter->pdev->dev, | 4697 | dev_warn(&adapter->pdev->dev, |
4698 | "TXMAC interrupt, error 0x%08x\n", | 4698 | "TXMAC interrupt, error 0x%08x\n", |
4699 | err); | 4699 | err); |
4700 | 4700 | ||
4701 | /* If we are debugging, we want to see this error, | 4701 | /* If we are debugging, we want to see this error, |
4702 | * otherwise we just want the device to be reset and | 4702 | * otherwise we just want the device to be reset and |
4703 | * continue | 4703 | * continue |
4704 | */ | 4704 | */ |
4705 | } | 4705 | } |
4706 | 4706 | ||
4707 | /* Handle RXMAC Interrupt */ | 4707 | /* Handle RXMAC Interrupt */ |
4708 | if (status & ET_INTR_RXMAC) { | 4708 | if (status & ET_INTR_RXMAC) { |
4709 | /* | 4709 | /* |
4710 | * These interrupts are catastrophic to the device, | 4710 | * These interrupts are catastrophic to the device, |
4711 | * what we need to do is disable the interrupts and | 4711 | * what we need to do is disable the interrupts and |
4712 | * set the flag to cause us to reset so we can solve | 4712 | * set the flag to cause us to reset so we can solve |
4713 | * this issue. | 4713 | * this issue. |
4714 | */ | 4714 | */ |
4715 | /* MP_SET_FLAG( adapter, | 4715 | /* MP_SET_FLAG( adapter, |
4716 | fMP_ADAPTER_HARDWARE_ERROR); */ | 4716 | fMP_ADAPTER_HARDWARE_ERROR); */ |
4717 | 4717 | ||
4718 | dev_warn(&adapter->pdev->dev, | 4718 | dev_warn(&adapter->pdev->dev, |
4719 | "RXMAC interrupt, error 0x%08x. Requesting reset\n", | 4719 | "RXMAC interrupt, error 0x%08x. Requesting reset\n", |
4720 | readl(&iomem->rxmac.err_reg)); | 4720 | readl(&iomem->rxmac.err_reg)); |
4721 | 4721 | ||
4722 | dev_warn(&adapter->pdev->dev, | 4722 | dev_warn(&adapter->pdev->dev, |
4723 | "Enable 0x%08x, Diag 0x%08x\n", | 4723 | "Enable 0x%08x, Diag 0x%08x\n", |
4724 | readl(&iomem->rxmac.ctrl), | 4724 | readl(&iomem->rxmac.ctrl), |
4725 | readl(&iomem->rxmac.rxq_diag)); | 4725 | readl(&iomem->rxmac.rxq_diag)); |
4726 | 4726 | ||
4727 | /* | 4727 | /* |
4728 | * If we are debugging, we want to see this error, | 4728 | * If we are debugging, we want to see this error, |
4729 | * otherwise we just want the device to be reset and | 4729 | * otherwise we just want the device to be reset and |
4730 | * continue | 4730 | * continue |
4731 | */ | 4731 | */ |
4732 | } | 4732 | } |
4733 | 4733 | ||
4734 | /* Handle MAC_STAT Interrupt */ | 4734 | /* Handle MAC_STAT Interrupt */ |
4735 | if (status & ET_INTR_MAC_STAT) { | 4735 | if (status & ET_INTR_MAC_STAT) { |
4736 | /* | 4736 | /* |
4737 | * This means at least one of the un-masked counters | 4737 | * This means at least one of the un-masked counters |
4738 | * in the MAC_STAT block has rolled over. Use this | 4738 | * in the MAC_STAT block has rolled over. Use this |
4739 | * to maintain the top, software managed bits of the | 4739 | * to maintain the top, software managed bits of the |
4740 | * counter(s). | 4740 | * counter(s). |
4741 | */ | 4741 | */ |
4742 | et1310_handle_macstat_interrupt(adapter); | 4742 | et1310_handle_macstat_interrupt(adapter); |
4743 | } | 4743 | } |
4744 | 4744 | ||
4745 | /* Handle SLV Timeout Interrupt */ | 4745 | /* Handle SLV Timeout Interrupt */ |
4746 | if (status & ET_INTR_SLV_TIMEOUT) { | 4746 | if (status & ET_INTR_SLV_TIMEOUT) { |
4747 | /* | 4747 | /* |
4748 | * This means a timeout has occurred on a read or | 4748 | * This means a timeout has occurred on a read or |
4749 | * write request to one of the JAGCore registers. The | 4749 | * write request to one of the JAGCore registers. The |
4750 | * Global Resources block has terminated the request | 4750 | * Global Resources block has terminated the request |
4751 | * and on a read request, returned a "fake" value. | 4751 | * and on a read request, returned a "fake" value. |
4752 | * The most likely reasons are: Bad Address or the | 4752 | * The most likely reasons are: Bad Address or the |
4753 | * addressed module is in a power-down state and | 4753 | * addressed module is in a power-down state and |
4754 | * can't respond. | 4754 | * can't respond. |
4755 | */ | 4755 | */ |
4756 | } | 4756 | } |
4757 | } | 4757 | } |
4758 | et131x_enable_interrupts(adapter); | 4758 | et131x_enable_interrupts(adapter); |
4759 | } | 4759 | } |
4760 | 4760 | ||
4761 | /** | 4761 | /** |
4762 | * et131x_stats - Return the current device statistics. | 4762 | * et131x_stats - Return the current device statistics. |
4763 | * @netdev: device whose stats are being queried | 4763 | * @netdev: device whose stats are being queried |
4764 | * | 4764 | * |
4765 | * Returns 0 on success, errno on failure (as defined in errno.h) | 4765 | * Returns 0 on success, errno on failure (as defined in errno.h) |
4766 | */ | 4766 | */ |
4767 | static struct net_device_stats *et131x_stats(struct net_device *netdev) | 4767 | static struct net_device_stats *et131x_stats(struct net_device *netdev) |
4768 | { | 4768 | { |
4769 | struct et131x_adapter *adapter = netdev_priv(netdev); | 4769 | struct et131x_adapter *adapter = netdev_priv(netdev); |
4770 | struct net_device_stats *stats = &adapter->net_stats; | 4770 | struct net_device_stats *stats = &adapter->net_stats; |
4771 | struct ce_stats *devstat = &adapter->stats; | 4771 | struct ce_stats *devstat = &adapter->stats; |
4772 | 4772 | ||
4773 | stats->rx_errors = devstat->rx_length_errs + | 4773 | stats->rx_errors = devstat->rx_length_errs + |
4774 | devstat->rx_align_errs + | 4774 | devstat->rx_align_errs + |
4775 | devstat->rx_crc_errs + | 4775 | devstat->rx_crc_errs + |
4776 | devstat->rx_code_violations + | 4776 | devstat->rx_code_violations + |
4777 | devstat->rx_other_errs; | 4777 | devstat->rx_other_errs; |
4778 | stats->tx_errors = devstat->tx_max_pkt_errs; | 4778 | stats->tx_errors = devstat->tx_max_pkt_errs; |
4779 | stats->multicast = devstat->multicast_pkts_rcvd; | 4779 | stats->multicast = devstat->multicast_pkts_rcvd; |
4780 | stats->collisions = devstat->tx_collisions; | 4780 | stats->collisions = devstat->tx_collisions; |
4781 | 4781 | ||
4782 | stats->rx_length_errors = devstat->rx_length_errs; | 4782 | stats->rx_length_errors = devstat->rx_length_errs; |
4783 | stats->rx_over_errors = devstat->rx_overflows; | 4783 | stats->rx_over_errors = devstat->rx_overflows; |
4784 | stats->rx_crc_errors = devstat->rx_crc_errs; | 4784 | stats->rx_crc_errors = devstat->rx_crc_errs; |
4785 | 4785 | ||
4786 | /* NOTE: These stats don't have corresponding values in CE_STATS, | 4786 | /* NOTE: These stats don't have corresponding values in CE_STATS, |
4787 | * so we're going to have to update these directly from within the | 4787 | * so we're going to have to update these directly from within the |
4788 | * TX/RX code | 4788 | * TX/RX code |
4789 | */ | 4789 | */ |
4790 | /* stats->rx_bytes = 20; devstat->; */ | 4790 | /* stats->rx_bytes = 20; devstat->; */ |
4791 | /* stats->tx_bytes = 20; devstat->; */ | 4791 | /* stats->tx_bytes = 20; devstat->; */ |
4792 | /* stats->rx_dropped = devstat->; */ | 4792 | /* stats->rx_dropped = devstat->; */ |
4793 | /* stats->tx_dropped = devstat->; */ | 4793 | /* stats->tx_dropped = devstat->; */ |
4794 | 4794 | ||
4795 | /* NOTE: Not used, can't find analogous statistics */ | 4795 | /* NOTE: Not used, can't find analogous statistics */ |
4796 | /* stats->rx_frame_errors = devstat->; */ | 4796 | /* stats->rx_frame_errors = devstat->; */ |
4797 | /* stats->rx_fifo_errors = devstat->; */ | 4797 | /* stats->rx_fifo_errors = devstat->; */ |
4798 | /* stats->rx_missed_errors = devstat->; */ | 4798 | /* stats->rx_missed_errors = devstat->; */ |
4799 | 4799 | ||
4800 | /* stats->tx_aborted_errors = devstat->; */ | 4800 | /* stats->tx_aborted_errors = devstat->; */ |
4801 | /* stats->tx_carrier_errors = devstat->; */ | 4801 | /* stats->tx_carrier_errors = devstat->; */ |
4802 | /* stats->tx_fifo_errors = devstat->; */ | 4802 | /* stats->tx_fifo_errors = devstat->; */ |
4803 | /* stats->tx_heartbeat_errors = devstat->; */ | 4803 | /* stats->tx_heartbeat_errors = devstat->; */ |
4804 | /* stats->tx_window_errors = devstat->; */ | 4804 | /* stats->tx_window_errors = devstat->; */ |
4805 | return stats; | 4805 | return stats; |
4806 | } | 4806 | } |
4807 | 4807 | ||
4808 | /** | 4808 | /** |
4809 | * et131x_open - Open the device for use. | 4809 | * et131x_open - Open the device for use. |
4810 | * @netdev: device to be opened | 4810 | * @netdev: device to be opened |
4811 | * | 4811 | * |
4812 | * Returns 0 on success, errno on failure (as defined in errno.h) | 4812 | * Returns 0 on success, errno on failure (as defined in errno.h) |
4813 | */ | 4813 | */ |
4814 | static int et131x_open(struct net_device *netdev) | 4814 | static int et131x_open(struct net_device *netdev) |
4815 | { | 4815 | { |
4816 | struct et131x_adapter *adapter = netdev_priv(netdev); | 4816 | struct et131x_adapter *adapter = netdev_priv(netdev); |
4817 | struct pci_dev *pdev = adapter->pdev; | 4817 | struct pci_dev *pdev = adapter->pdev; |
4818 | unsigned int irq = pdev->irq; | 4818 | unsigned int irq = pdev->irq; |
4819 | int result; | 4819 | int result; |
4820 | 4820 | ||
4821 | /* Start the timer to track NIC errors */ | 4821 | /* Start the timer to track NIC errors */ |
4822 | init_timer(&adapter->error_timer); | 4822 | init_timer(&adapter->error_timer); |
4823 | adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; | 4823 | adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; |
4824 | adapter->error_timer.function = et131x_error_timer_handler; | 4824 | adapter->error_timer.function = et131x_error_timer_handler; |
4825 | adapter->error_timer.data = (unsigned long)adapter; | 4825 | adapter->error_timer.data = (unsigned long)adapter; |
4826 | add_timer(&adapter->error_timer); | 4826 | add_timer(&adapter->error_timer); |
4827 | 4827 | ||
4828 | result = request_irq(irq, et131x_isr, IRQF_SHARED, netdev->name, netdev); | 4828 | result = request_irq(irq, et131x_isr, IRQF_SHARED, netdev->name, netdev); |
4829 | if (result) { | 4829 | if (result) { |
4830 | dev_err(&pdev->dev, "could not register IRQ %d\n", irq); | 4830 | dev_err(&pdev->dev, "could not register IRQ %d\n", irq); |
4831 | return result; | 4831 | return result; |
4832 | } | 4832 | } |
4833 | 4833 | ||
4834 | adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE; | 4834 | adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE; |
4835 | 4835 | ||
4836 | et131x_up(netdev); | 4836 | et131x_up(netdev); |
4837 | 4837 | ||
4838 | return result; | 4838 | return result; |
4839 | } | 4839 | } |
4840 | 4840 | ||
4841 | /** | 4841 | /** |
4842 | * et131x_close - Close the device | 4842 | * et131x_close - Close the device |
4843 | * @netdev: device to be closed | 4843 | * @netdev: device to be closed |
4844 | * | 4844 | * |
4845 | * Returns 0 on success, errno on failure (as defined in errno.h) | 4845 | * Returns 0 on success, errno on failure (as defined in errno.h) |
4846 | */ | 4846 | */ |
4847 | static int et131x_close(struct net_device *netdev) | 4847 | static int et131x_close(struct net_device *netdev) |
4848 | { | 4848 | { |
4849 | struct et131x_adapter *adapter = netdev_priv(netdev); | 4849 | struct et131x_adapter *adapter = netdev_priv(netdev); |
4850 | 4850 | ||
4851 | et131x_down(netdev); | 4851 | et131x_down(netdev); |
4852 | 4852 | ||
4853 | adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; | 4853 | adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; |
4854 | free_irq(adapter->pdev->irq, netdev); | 4854 | free_irq(adapter->pdev->irq, netdev); |
4855 | 4855 | ||
4856 | /* Stop the error timer */ | 4856 | /* Stop the error timer */ |
4857 | return del_timer_sync(&adapter->error_timer); | 4857 | return del_timer_sync(&adapter->error_timer); |
4858 | } | 4858 | } |
4859 | 4859 | ||
4860 | /** | 4860 | /** |
4861 | * et131x_ioctl - The I/O Control handler for the driver | 4861 | * et131x_ioctl - The I/O Control handler for the driver |
4862 | * @netdev: device on which the control request is being made | 4862 | * @netdev: device on which the control request is being made |
4863 | * @reqbuf: a pointer to the IOCTL request buffer | 4863 | * @reqbuf: a pointer to the IOCTL request buffer |
4864 | * @cmd: the IOCTL command code | 4864 | * @cmd: the IOCTL command code |
4865 | * | 4865 | * |
4866 | * Returns 0 on success, errno on failure (as defined in errno.h) | 4866 | * Returns 0 on success, errno on failure (as defined in errno.h) |
4867 | */ | 4867 | */ |
4868 | static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, | 4868 | static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, |
4869 | int cmd) | 4869 | int cmd) |
4870 | { | 4870 | { |
4871 | struct et131x_adapter *adapter = netdev_priv(netdev); | 4871 | struct et131x_adapter *adapter = netdev_priv(netdev); |
4872 | 4872 | ||
4873 | if (!adapter->phydev) | 4873 | if (!adapter->phydev) |
4874 | return -EINVAL; | 4874 | return -EINVAL; |
4875 | 4875 | ||
4876 | return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); | 4876 | return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); |
4877 | } | 4877 | } |
4878 | 4878 | ||
4879 | /** | 4879 | /** |
4880 | * et131x_set_packet_filter - Configures the Rx Packet filtering on the device | 4880 | * et131x_set_packet_filter - Configures the Rx Packet filtering on the device |
4881 | * @adapter: pointer to our private adapter structure | 4881 | * @adapter: pointer to our private adapter structure |
4882 | * | 4882 | * |
4883 | * FIXME: lot of dups with MAC code | 4883 | * FIXME: lot of dups with MAC code |
4884 | * | 4884 | * |
4885 | * Returns 0 on success, errno on failure | 4885 | * Returns 0 on success, errno on failure |
4886 | */ | 4886 | */ |
4887 | static int et131x_set_packet_filter(struct et131x_adapter *adapter) | 4887 | static int et131x_set_packet_filter(struct et131x_adapter *adapter) |
4888 | { | 4888 | { |
4889 | int filter = adapter->packet_filter; | 4889 | int filter = adapter->packet_filter; |
4890 | int status = 0; | 4890 | int status = 0; |
4891 | u32 ctrl; | 4891 | u32 ctrl; |
4892 | u32 pf_ctrl; | 4892 | u32 pf_ctrl; |
4893 | 4893 | ||
4894 | ctrl = readl(&adapter->regs->rxmac.ctrl); | 4894 | ctrl = readl(&adapter->regs->rxmac.ctrl); |
4895 | pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); | 4895 | pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); |
4896 | 4896 | ||
4897 | /* Default to disabled packet filtering. Enable it in the individual | 4897 | /* Default to disabled packet filtering. Enable it in the individual |
4898 | * case statements that require the device to filter something | 4898 | * case statements that require the device to filter something |
4899 | */ | 4899 | */ |
4900 | ctrl |= 0x04; | 4900 | ctrl |= 0x04; |
4901 | 4901 | ||
4902 | /* Set us to be in promiscuous mode so we receive everything, this | 4902 | /* Set us to be in promiscuous mode so we receive everything, this |
4903 | * is also true when we get a packet filter of 0 | 4903 | * is also true when we get a packet filter of 0 |
4904 | */ | 4904 | */ |
4905 | if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) | 4905 | if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) |
4906 | pf_ctrl &= ~7; /* Clear filter bits */ | 4906 | pf_ctrl &= ~7; /* Clear filter bits */ |
4907 | else { | 4907 | else { |
4908 | /* | 4908 | /* |
4909 | * Set us up with Multicast packet filtering. Three cases are | 4909 | * Set us up with Multicast packet filtering. Three cases are |
4910 | * possible - (1) we have a multi-cast list, (2) we receive ALL | 4910 | * possible - (1) we have a multi-cast list, (2) we receive ALL |
4911 | * multicast entries or (3) we receive none. | 4911 | * multicast entries or (3) we receive none. |
4912 | */ | 4912 | */ |
4913 | if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) | 4913 | if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) |
4914 | pf_ctrl &= ~2; /* Multicast filter bit */ | 4914 | pf_ctrl &= ~2; /* Multicast filter bit */ |
4915 | else { | 4915 | else { |
4916 | et1310_setup_device_for_multicast(adapter); | 4916 | et1310_setup_device_for_multicast(adapter); |
4917 | pf_ctrl |= 2; | 4917 | pf_ctrl |= 2; |
4918 | ctrl &= ~0x04; | 4918 | ctrl &= ~0x04; |
4919 | } | 4919 | } |
4920 | 4920 | ||
4921 | /* Set us up with Unicast packet filtering */ | 4921 | /* Set us up with Unicast packet filtering */ |
4922 | if (filter & ET131X_PACKET_TYPE_DIRECTED) { | 4922 | if (filter & ET131X_PACKET_TYPE_DIRECTED) { |
4923 | et1310_setup_device_for_unicast(adapter); | 4923 | et1310_setup_device_for_unicast(adapter); |
4924 | pf_ctrl |= 4; | 4924 | pf_ctrl |= 4; |
4925 | ctrl &= ~0x04; | 4925 | ctrl &= ~0x04; |
4926 | } | 4926 | } |
4927 | 4927 | ||
4928 | /* Set us up with Broadcast packet filtering */ | 4928 | /* Set us up with Broadcast packet filtering */ |
4929 | if (filter & ET131X_PACKET_TYPE_BROADCAST) { | 4929 | if (filter & ET131X_PACKET_TYPE_BROADCAST) { |
4930 | pf_ctrl |= 1; /* Broadcast filter bit */ | 4930 | pf_ctrl |= 1; /* Broadcast filter bit */ |
4931 | ctrl &= ~0x04; | 4931 | ctrl &= ~0x04; |
4932 | } else | 4932 | } else |
4933 | pf_ctrl &= ~1; | 4933 | pf_ctrl &= ~1; |
4934 | 4934 | ||
4935 | /* Setup the receive mac configuration registers - Packet | 4935 | /* Setup the receive mac configuration registers - Packet |
4936 | * Filter control + the enable / disable for packet filter | 4936 | * Filter control + the enable / disable for packet filter |
4937 | * in the control reg. | 4937 | * in the control reg. |
4938 | */ | 4938 | */ |
4939 | writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); | 4939 | writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); |
4940 | writel(ctrl, &adapter->regs->rxmac.ctrl); | 4940 | writel(ctrl, &adapter->regs->rxmac.ctrl); |
4941 | } | 4941 | } |
4942 | return status; | 4942 | return status; |
4943 | } | 4943 | } |
4944 | 4944 | ||
4945 | /** | 4945 | /** |
4946 | * et131x_multicast - The handler to configure multicasting on the interface | 4946 | * et131x_multicast - The handler to configure multicasting on the interface |
4947 | * @netdev: a pointer to a net_device struct representing the device | 4947 | * @netdev: a pointer to a net_device struct representing the device |
4948 | */ | 4948 | */ |
4949 | static void et131x_multicast(struct net_device *netdev) | 4949 | static void et131x_multicast(struct net_device *netdev) |
4950 | { | 4950 | { |
4951 | struct et131x_adapter *adapter = netdev_priv(netdev); | 4951 | struct et131x_adapter *adapter = netdev_priv(netdev); |
4952 | int packet_filter; | 4952 | int packet_filter; |
4953 | unsigned long flags; | 4953 | unsigned long flags; |
4954 | struct netdev_hw_addr *ha; | 4954 | struct netdev_hw_addr *ha; |
4955 | int i; | 4955 | int i; |
4956 | 4956 | ||
4957 | spin_lock_irqsave(&adapter->lock, flags); | 4957 | spin_lock_irqsave(&adapter->lock, flags); |
4958 | 4958 | ||
4959 | /* Before we modify the platform-independent filter flags, store them | 4959 | /* Before we modify the platform-independent filter flags, store them |
4960 | * locally. This allows us to determine if anything's changed and if | 4960 | * locally. This allows us to determine if anything's changed and if |
4961 | * we even need to bother the hardware | 4961 | * we even need to bother the hardware |
4962 | */ | 4962 | */ |
4963 | packet_filter = adapter->packet_filter; | 4963 | packet_filter = adapter->packet_filter; |
4964 | 4964 | ||
4965 | /* Clear the 'multicast' flag locally; because we only have a single | 4965 | /* Clear the 'multicast' flag locally; because we only have a single |
4966 | * flag to check multicast, and multiple multicast addresses can be | 4966 | * flag to check multicast, and multiple multicast addresses can be |
4967 | * set, this is the easiest way to determine if more than one | 4967 | * set, this is the easiest way to determine if more than one |
4968 | * multicast address is being set. | 4968 | * multicast address is being set. |
4969 | */ | 4969 | */ |
4970 | packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; | 4970 | packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; |
4971 | 4971 | ||
4972 | /* Check the net_device flags and set the device independent flags | 4972 | /* Check the net_device flags and set the device independent flags |
4973 | * accordingly | 4973 | * accordingly |
4974 | */ | 4974 | */ |
4975 | 4975 | ||
4976 | if (netdev->flags & IFF_PROMISC) | 4976 | if (netdev->flags & IFF_PROMISC) |
4977 | adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; | 4977 | adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; |
4978 | else | 4978 | else |
4979 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; | 4979 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; |
4980 | 4980 | ||
4981 | if (netdev->flags & IFF_ALLMULTI) | 4981 | if (netdev->flags & IFF_ALLMULTI) |
4982 | adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; | 4982 | adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; |
4983 | 4983 | ||
4984 | if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) | 4984 | if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) |
4985 | adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; | 4985 | adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; |
4986 | 4986 | ||
4987 | if (netdev_mc_count(netdev) < 1) { | 4987 | if (netdev_mc_count(netdev) < 1) { |
4988 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; | 4988 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; |
4989 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; | 4989 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; |
4990 | } else | 4990 | } else |
4991 | adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; | 4991 | adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; |
4992 | 4992 | ||
4993 | /* Set values in the private adapter struct */ | 4993 | /* Set values in the private adapter struct */ |
4994 | i = 0; | 4994 | i = 0; |
4995 | netdev_for_each_mc_addr(ha, netdev) { | 4995 | netdev_for_each_mc_addr(ha, netdev) { |
4996 | if (i == NIC_MAX_MCAST_LIST) | 4996 | if (i == NIC_MAX_MCAST_LIST) |
4997 | break; | 4997 | break; |
4998 | memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN); | 4998 | memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN); |
4999 | } | 4999 | } |
5000 | adapter->multicast_addr_count = i; | 5000 | adapter->multicast_addr_count = i; |
5001 | 5001 | ||
5002 | /* Are the new flags different from the previous ones? If not, then no | 5002 | /* Are the new flags different from the previous ones? If not, then no |
5003 | * action is required | 5003 | * action is required |
5004 | * | 5004 | * |
5005 | * NOTE - This block will always update the multicast_list with the | 5005 | * NOTE - This block will always update the multicast_list with the |
5006 | * hardware, even if the addresses aren't the same. | 5006 | * hardware, even if the addresses aren't the same. |
5007 | */ | 5007 | */ |
5008 | if (packet_filter != adapter->packet_filter) { | 5008 | if (packet_filter != adapter->packet_filter) { |
5009 | /* Call the device's filter function */ | 5009 | /* Call the device's filter function */ |
5010 | et131x_set_packet_filter(adapter); | 5010 | et131x_set_packet_filter(adapter); |
5011 | } | 5011 | } |
5012 | spin_unlock_irqrestore(&adapter->lock, flags); | 5012 | spin_unlock_irqrestore(&adapter->lock, flags); |
5013 | } | 5013 | } |
5014 | 5014 | ||
5015 | /** | 5015 | /** |
5016 | * et131x_tx - The handler to tx a packet on the device | 5016 | * et131x_tx - The handler to tx a packet on the device |
5017 | * @skb: data to be Tx'd | 5017 | * @skb: data to be Tx'd |
5018 | * @netdev: device on which data is to be Tx'd | 5018 | * @netdev: device on which data is to be Tx'd |
5019 | * | 5019 | * |
5020 | * Returns 0 on success, errno on failure (as defined in errno.h) | 5020 | * Returns 0 on success, errno on failure (as defined in errno.h) |
5021 | */ | 5021 | */ |
5022 | static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) | 5022 | static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) |
5023 | { | 5023 | { |
5024 | int status = 0; | 5024 | int status = 0; |
5025 | struct et131x_adapter *adapter = netdev_priv(netdev); | 5025 | struct et131x_adapter *adapter = netdev_priv(netdev); |
5026 | 5026 | ||
5027 | /* stop the queue if it's getting full */ | 5027 | /* stop the queue if it's getting full */ |
5028 | if (adapter->tx_ring.used >= NUM_TCB - 1 && | 5028 | if (adapter->tx_ring.used >= NUM_TCB - 1 && |
5029 | !netif_queue_stopped(netdev)) | 5029 | !netif_queue_stopped(netdev)) |
5030 | netif_stop_queue(netdev); | 5030 | netif_stop_queue(netdev); |
5031 | 5031 | ||
5032 | /* Save the timestamp for the TX timeout watchdog */ | 5032 | /* Save the timestamp for the TX timeout watchdog */ |
5033 | netdev->trans_start = jiffies; | 5033 | netdev->trans_start = jiffies; |
5034 | 5034 | ||
5035 | /* Call the device-specific data Tx routine */ | 5035 | /* Call the device-specific data Tx routine */ |
5036 | status = et131x_send_packets(skb, netdev); | 5036 | status = et131x_send_packets(skb, netdev); |
5037 | 5037 | ||
5038 | /* Check status and manage the netif queue if necessary */ | 5038 | /* Check status and manage the netif queue if necessary */ |
5039 | if (status != 0) { | 5039 | if (status != 0) { |
5040 | if (status == -ENOMEM) | 5040 | if (status == -ENOMEM) |
5041 | status = NETDEV_TX_BUSY; | 5041 | status = NETDEV_TX_BUSY; |
5042 | else | 5042 | else |
5043 | status = NETDEV_TX_OK; | 5043 | status = NETDEV_TX_OK; |
5044 | } | 5044 | } |
5045 | return status; | 5045 | return status; |
5046 | } | 5046 | } |
5047 | 5047 | ||
5048 | /** | 5048 | /** |
5049 | * et131x_tx_timeout - Timeout handler | 5049 | * et131x_tx_timeout - Timeout handler |
5050 | * @netdev: a pointer to a net_device struct representing the device | 5050 | * @netdev: a pointer to a net_device struct representing the device |
5051 | * | 5051 | * |
5052 | * The handler called when a Tx request times out. The timeout period is | 5052 | * The handler called when a Tx request times out. The timeout period is |
5053 | * specified by the 'tx_timeo" element in the net_device structure (see | 5053 | * specified by the 'tx_timeo" element in the net_device structure (see |
5054 | * et131x_alloc_device() to see how this value is set). | 5054 | * et131x_alloc_device() to see how this value is set). |
5055 | */ | 5055 | */ |
5056 | static void et131x_tx_timeout(struct net_device *netdev) | 5056 | static void et131x_tx_timeout(struct net_device *netdev) |
5057 | { | 5057 | { |
5058 | struct et131x_adapter *adapter = netdev_priv(netdev); | 5058 | struct et131x_adapter *adapter = netdev_priv(netdev); |
5059 | struct tcb *tcb; | 5059 | struct tcb *tcb; |
5060 | unsigned long flags; | 5060 | unsigned long flags; |
5061 | 5061 | ||
5062 | /* If the device is closed, ignore the timeout */ | 5062 | /* If the device is closed, ignore the timeout */ |
5063 | if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)) | 5063 | if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)) |
5064 | return; | 5064 | return; |
5065 | 5065 | ||
5066 | /* Any nonrecoverable hardware error? | 5066 | /* Any nonrecoverable hardware error? |
5067 | * Checks adapter->flags for any failure in phy reading | 5067 | * Checks adapter->flags for any failure in phy reading |
5068 | */ | 5068 | */ |
5069 | if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR) | 5069 | if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR) |
5070 | return; | 5070 | return; |
5071 | 5071 | ||
5072 | /* Hardware failure? */ | 5072 | /* Hardware failure? */ |
5073 | if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) { | 5073 | if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) { |
5074 | dev_err(&adapter->pdev->dev, "hardware error - reset\n"); | 5074 | dev_err(&adapter->pdev->dev, "hardware error - reset\n"); |
5075 | return; | 5075 | return; |
5076 | } | 5076 | } |
5077 | 5077 | ||
5078 | /* Is send stuck? */ | 5078 | /* Is send stuck? */ |
5079 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); | 5079 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
5080 | 5080 | ||
5081 | tcb = adapter->tx_ring.send_head; | 5081 | tcb = adapter->tx_ring.send_head; |
5082 | 5082 | ||
5083 | if (tcb != NULL) { | 5083 | if (tcb != NULL) { |
5084 | tcb->count++; | 5084 | tcb->count++; |
5085 | 5085 | ||
5086 | if (tcb->count > NIC_SEND_HANG_THRESHOLD) { | 5086 | if (tcb->count > NIC_SEND_HANG_THRESHOLD) { |
5087 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, | 5087 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, |
5088 | flags); | 5088 | flags); |
5089 | 5089 | ||
5090 | dev_warn(&adapter->pdev->dev, | 5090 | dev_warn(&adapter->pdev->dev, |
5091 | "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n", | 5091 | "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n", |
5092 | tcb->index, | 5092 | tcb->index, |
5093 | tcb->flags); | 5093 | tcb->flags); |
5094 | 5094 | ||
5095 | adapter->net_stats.tx_errors++; | 5095 | adapter->net_stats.tx_errors++; |
5096 | 5096 | ||
5097 | /* perform reset of tx/rx */ | 5097 | /* perform reset of tx/rx */ |
5098 | et131x_disable_txrx(netdev); | 5098 | et131x_disable_txrx(netdev); |
5099 | et131x_enable_txrx(netdev); | 5099 | et131x_enable_txrx(netdev); |
5100 | return; | 5100 | return; |
5101 | } | 5101 | } |
5102 | } | 5102 | } |
5103 | 5103 | ||
5104 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); | 5104 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
5105 | } | 5105 | } |
5106 | 5106 | ||
5107 | /** | 5107 | /** |
5108 | * et131x_change_mtu - The handler called to change the MTU for the device | 5108 | * et131x_change_mtu - The handler called to change the MTU for the device |
5109 | * @netdev: device whose MTU is to be changed | 5109 | * @netdev: device whose MTU is to be changed |
5110 | * @new_mtu: the desired MTU | 5110 | * @new_mtu: the desired MTU |
5111 | * | 5111 | * |
5112 | * Returns 0 on success, errno on failure (as defined in errno.h) | 5112 | * Returns 0 on success, errno on failure (as defined in errno.h) |
5113 | */ | 5113 | */ |
5114 | static int et131x_change_mtu(struct net_device *netdev, int new_mtu) | 5114 | static int et131x_change_mtu(struct net_device *netdev, int new_mtu) |
5115 | { | 5115 | { |
5116 | int result = 0; | 5116 | int result = 0; |
5117 | struct et131x_adapter *adapter = netdev_priv(netdev); | 5117 | struct et131x_adapter *adapter = netdev_priv(netdev); |
5118 | 5118 | ||
5119 | /* Make sure the requested MTU is valid */ | 5119 | /* Make sure the requested MTU is valid */ |
5120 | if (new_mtu < 64 || new_mtu > 9216) | 5120 | if (new_mtu < 64 || new_mtu > 9216) |
5121 | return -EINVAL; | 5121 | return -EINVAL; |
5122 | 5122 | ||
5123 | et131x_disable_txrx(netdev); | 5123 | et131x_disable_txrx(netdev); |
5124 | et131x_handle_send_interrupt(adapter); | 5124 | et131x_handle_send_interrupt(adapter); |
5125 | et131x_handle_recv_interrupt(adapter); | 5125 | et131x_handle_recv_interrupt(adapter); |
5126 | 5126 | ||
5127 | /* Set the new MTU */ | 5127 | /* Set the new MTU */ |
5128 | netdev->mtu = new_mtu; | 5128 | netdev->mtu = new_mtu; |
5129 | 5129 | ||
5130 | /* Free Rx DMA memory */ | 5130 | /* Free Rx DMA memory */ |
5131 | et131x_adapter_memory_free(adapter); | 5131 | et131x_adapter_memory_free(adapter); |
5132 | 5132 | ||
5133 | /* Set the config parameter for Jumbo Packet support */ | 5133 | /* Set the config parameter for Jumbo Packet support */ |
5134 | adapter->registry_jumbo_packet = new_mtu + 14; | 5134 | adapter->registry_jumbo_packet = new_mtu + 14; |
5135 | et131x_soft_reset(adapter); | 5135 | et131x_soft_reset(adapter); |
5136 | 5136 | ||
5137 | /* Alloc and init Rx DMA memory */ | 5137 | /* Alloc and init Rx DMA memory */ |
5138 | result = et131x_adapter_memory_alloc(adapter); | 5138 | result = et131x_adapter_memory_alloc(adapter); |
5139 | if (result != 0) { | 5139 | if (result != 0) { |
5140 | dev_warn(&adapter->pdev->dev, | 5140 | dev_warn(&adapter->pdev->dev, |
5141 | "Change MTU failed; couldn't re-alloc DMA memory\n"); | 5141 | "Change MTU failed; couldn't re-alloc DMA memory\n"); |
5142 | return result; | 5142 | return result; |
5143 | } | 5143 | } |
5144 | 5144 | ||
5145 | et131x_init_send(adapter); | 5145 | et131x_init_send(adapter); |
5146 | 5146 | ||
5147 | et131x_hwaddr_init(adapter); | 5147 | et131x_hwaddr_init(adapter); |
5148 | memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); | 5148 | memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); |
5149 | 5149 | ||
5150 | /* Init the device with the new settings */ | 5150 | /* Init the device with the new settings */ |
5151 | et131x_adapter_setup(adapter); | 5151 | et131x_adapter_setup(adapter); |
5152 | 5152 | ||
5153 | et131x_enable_txrx(netdev); | 5153 | et131x_enable_txrx(netdev); |
5154 | 5154 | ||
5155 | return result; | 5155 | return result; |
5156 | } | 5156 | } |
5157 | 5157 | ||
5158 | /** | 5158 | /** |
5159 | * et131x_set_mac_addr - handler to change the MAC address for the device | 5159 | * et131x_set_mac_addr - handler to change the MAC address for the device |
5160 | * @netdev: device whose MAC is to be changed | 5160 | * @netdev: device whose MAC is to be changed |
5161 | * @new_mac: the desired MAC address | 5161 | * @new_mac: the desired MAC address |
5162 | * | 5162 | * |
5163 | * Returns 0 on success, errno on failure (as defined in errno.h) | 5163 | * Returns 0 on success, errno on failure (as defined in errno.h) |
5164 | * | 5164 | * |
5165 | * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 | 5165 | * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 |
5166 | */ | 5166 | */ |
5167 | static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) | 5167 | static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) |
5168 | { | 5168 | { |
5169 | int result = 0; | 5169 | int result = 0; |
5170 | struct et131x_adapter *adapter = netdev_priv(netdev); | 5170 | struct et131x_adapter *adapter = netdev_priv(netdev); |
5171 | struct sockaddr *address = new_mac; | 5171 | struct sockaddr *address = new_mac; |
5172 | 5172 | ||
5173 | /* begin blux */ | 5173 | /* begin blux */ |
5174 | 5174 | ||
5175 | if (adapter == NULL) | 5175 | if (adapter == NULL) |
5176 | return -ENODEV; | 5176 | return -ENODEV; |
5177 | 5177 | ||
5178 | /* Make sure the requested MAC is valid */ | 5178 | /* Make sure the requested MAC is valid */ |
5179 | if (!is_valid_ether_addr(address->sa_data)) | 5179 | if (!is_valid_ether_addr(address->sa_data)) |
5180 | return -EINVAL; | 5180 | return -EINVAL; |
5181 | 5181 | ||
5182 | et131x_disable_txrx(netdev); | 5182 | et131x_disable_txrx(netdev); |
5183 | et131x_handle_send_interrupt(adapter); | 5183 | et131x_handle_send_interrupt(adapter); |
5184 | et131x_handle_recv_interrupt(adapter); | 5184 | et131x_handle_recv_interrupt(adapter); |
5185 | 5185 | ||
5186 | /* Set the new MAC */ | 5186 | /* Set the new MAC */ |
5187 | /* netdev->set_mac_address = &new_mac; */ | 5187 | /* netdev->set_mac_address = &new_mac; */ |
5188 | 5188 | ||
5189 | memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); | 5189 | memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); |
5190 | 5190 | ||
5191 | printk(KERN_INFO "%s: Setting MAC address to %pM\n", | 5191 | printk(KERN_INFO "%s: Setting MAC address to %pM\n", |
5192 | netdev->name, netdev->dev_addr); | 5192 | netdev->name, netdev->dev_addr); |
5193 | 5193 | ||
5194 | /* Free Rx DMA memory */ | 5194 | /* Free Rx DMA memory */ |
5195 | et131x_adapter_memory_free(adapter); | 5195 | et131x_adapter_memory_free(adapter); |
5196 | 5196 | ||
5197 | et131x_soft_reset(adapter); | 5197 | et131x_soft_reset(adapter); |
5198 | 5198 | ||
5199 | /* Alloc and init Rx DMA memory */ | 5199 | /* Alloc and init Rx DMA memory */ |
5200 | result = et131x_adapter_memory_alloc(adapter); | 5200 | result = et131x_adapter_memory_alloc(adapter); |
5201 | if (result != 0) { | 5201 | if (result != 0) { |
5202 | dev_err(&adapter->pdev->dev, | 5202 | dev_err(&adapter->pdev->dev, |
5203 | "Change MAC failed; couldn't re-alloc DMA memory\n"); | 5203 | "Change MAC failed; couldn't re-alloc DMA memory\n"); |
5204 | return result; | 5204 | return result; |
5205 | } | 5205 | } |
5206 | 5206 | ||
5207 | et131x_init_send(adapter); | 5207 | et131x_init_send(adapter); |
5208 | 5208 | ||
5209 | et131x_hwaddr_init(adapter); | 5209 | et131x_hwaddr_init(adapter); |
5210 | 5210 | ||
5211 | /* Init the device with the new settings */ | 5211 | /* Init the device with the new settings */ |
5212 | et131x_adapter_setup(adapter); | 5212 | et131x_adapter_setup(adapter); |
5213 | 5213 | ||
5214 | et131x_enable_txrx(netdev); | 5214 | et131x_enable_txrx(netdev); |
5215 | 5215 | ||
5216 | return result; | 5216 | return result; |
5217 | } | 5217 | } |
5218 | 5218 | ||
5219 | static const struct net_device_ops et131x_netdev_ops = { | 5219 | static const struct net_device_ops et131x_netdev_ops = { |
5220 | .ndo_open = et131x_open, | 5220 | .ndo_open = et131x_open, |
5221 | .ndo_stop = et131x_close, | 5221 | .ndo_stop = et131x_close, |
5222 | .ndo_start_xmit = et131x_tx, | 5222 | .ndo_start_xmit = et131x_tx, |
5223 | .ndo_set_rx_mode = et131x_multicast, | 5223 | .ndo_set_rx_mode = et131x_multicast, |
5224 | .ndo_tx_timeout = et131x_tx_timeout, | 5224 | .ndo_tx_timeout = et131x_tx_timeout, |
5225 | .ndo_change_mtu = et131x_change_mtu, | 5225 | .ndo_change_mtu = et131x_change_mtu, |
5226 | .ndo_set_mac_address = et131x_set_mac_addr, | 5226 | .ndo_set_mac_address = et131x_set_mac_addr, |
5227 | .ndo_validate_addr = eth_validate_addr, | 5227 | .ndo_validate_addr = eth_validate_addr, |
5228 | .ndo_get_stats = et131x_stats, | 5228 | .ndo_get_stats = et131x_stats, |
5229 | .ndo_do_ioctl = et131x_ioctl, | 5229 | .ndo_do_ioctl = et131x_ioctl, |
5230 | }; | 5230 | }; |
5231 | 5231 | ||
5232 | /** | 5232 | /** |
5233 | * et131x_pci_setup - Perform device initialization | 5233 | * et131x_pci_setup - Perform device initialization |
5234 | * @pdev: a pointer to the device's pci_dev structure | 5234 | * @pdev: a pointer to the device's pci_dev structure |
5235 | * @ent: this device's entry in the pci_device_id table | 5235 | * @ent: this device's entry in the pci_device_id table |
5236 | * | 5236 | * |
5237 | * Returns 0 on success, errno on failure (as defined in errno.h) | 5237 | * Returns 0 on success, errno on failure (as defined in errno.h) |
5238 | * | 5238 | * |
5239 | * Registered in the pci_driver structure, this function is called when the | 5239 | * Registered in the pci_driver structure, this function is called when the |
5240 | * PCI subsystem finds a new PCI device which matches the information | 5240 | * PCI subsystem finds a new PCI device which matches the information |
5241 | * contained in the pci_device_id table. This routine is the equivalent to | 5241 | * contained in the pci_device_id table. This routine is the equivalent to |
5242 | * a device insertion routine. | 5242 | * a device insertion routine. |
5243 | */ | 5243 | */ |
5244 | static int __devinit et131x_pci_setup(struct pci_dev *pdev, | 5244 | static int __devinit et131x_pci_setup(struct pci_dev *pdev, |
5245 | const struct pci_device_id *ent) | 5245 | const struct pci_device_id *ent) |
5246 | { | 5246 | { |
5247 | struct net_device *netdev; | 5247 | struct net_device *netdev; |
5248 | struct et131x_adapter *adapter; | 5248 | struct et131x_adapter *adapter; |
5249 | int rc; | 5249 | int rc; |
5250 | int ii; | 5250 | int ii; |
5251 | 5251 | ||
5252 | rc = pci_enable_device(pdev); | 5252 | rc = pci_enable_device(pdev); |
5253 | if (rc < 0) { | 5253 | if (rc < 0) { |
5254 | dev_err(&pdev->dev, "pci_enable_device() failed\n"); | 5254 | dev_err(&pdev->dev, "pci_enable_device() failed\n"); |
5255 | goto out; | 5255 | goto out; |
5256 | } | 5256 | } |
5257 | 5257 | ||
5258 | /* Perform some basic PCI checks */ | 5258 | /* Perform some basic PCI checks */ |
5259 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | 5259 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
5260 | dev_err(&pdev->dev, "Can't find PCI device's base address\n"); | 5260 | dev_err(&pdev->dev, "Can't find PCI device's base address\n"); |
5261 | rc = -ENODEV; | 5261 | rc = -ENODEV; |
5262 | goto err_disable; | 5262 | goto err_disable; |
5263 | } | 5263 | } |
5264 | 5264 | ||
5265 | rc = pci_request_regions(pdev, DRIVER_NAME); | 5265 | rc = pci_request_regions(pdev, DRIVER_NAME); |
5266 | if (rc < 0) { | 5266 | if (rc < 0) { |
5267 | dev_err(&pdev->dev, "Can't get PCI resources\n"); | 5267 | dev_err(&pdev->dev, "Can't get PCI resources\n"); |
5268 | goto err_disable; | 5268 | goto err_disable; |
5269 | } | 5269 | } |
5270 | 5270 | ||
5271 | pci_set_master(pdev); | 5271 | pci_set_master(pdev); |
5272 | 5272 | ||
5273 | /* Check the DMA addressing support of this device */ | 5273 | /* Check the DMA addressing support of this device */ |
5274 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { | 5274 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
5275 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); | 5275 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
5276 | if (rc < 0) { | 5276 | if (rc < 0) { |
5277 | dev_err(&pdev->dev, | 5277 | dev_err(&pdev->dev, |
5278 | "Unable to obtain 64 bit DMA for consistent allocations\n"); | 5278 | "Unable to obtain 64 bit DMA for consistent allocations\n"); |
5279 | goto err_release_res; | 5279 | goto err_release_res; |
5280 | } | 5280 | } |
5281 | } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { | 5281 | } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { |
5282 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | 5282 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
5283 | if (rc < 0) { | 5283 | if (rc < 0) { |
5284 | dev_err(&pdev->dev, | 5284 | dev_err(&pdev->dev, |
5285 | "Unable to obtain 32 bit DMA for consistent allocations\n"); | 5285 | "Unable to obtain 32 bit DMA for consistent allocations\n"); |
5286 | goto err_release_res; | 5286 | goto err_release_res; |
5287 | } | 5287 | } |
5288 | } else { | 5288 | } else { |
5289 | dev_err(&pdev->dev, "No usable DMA addressing method\n"); | 5289 | dev_err(&pdev->dev, "No usable DMA addressing method\n"); |
5290 | rc = -EIO; | 5290 | rc = -EIO; |
5291 | goto err_release_res; | 5291 | goto err_release_res; |
5292 | } | 5292 | } |
5293 | 5293 | ||
5294 | /* Allocate netdev and private adapter structs */ | 5294 | /* Allocate netdev and private adapter structs */ |
5295 | netdev = alloc_etherdev(sizeof(struct et131x_adapter)); | 5295 | netdev = alloc_etherdev(sizeof(struct et131x_adapter)); |
5296 | if (!netdev) { | 5296 | if (!netdev) { |
5297 | dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); | 5297 | dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); |
5298 | rc = -ENOMEM; | 5298 | rc = -ENOMEM; |
5299 | goto err_release_res; | 5299 | goto err_release_res; |
5300 | } | 5300 | } |
5301 | 5301 | ||
5302 | netdev->watchdog_timeo = ET131X_TX_TIMEOUT; | 5302 | netdev->watchdog_timeo = ET131X_TX_TIMEOUT; |
5303 | netdev->netdev_ops = &et131x_netdev_ops; | 5303 | netdev->netdev_ops = &et131x_netdev_ops; |
5304 | 5304 | ||
5305 | SET_NETDEV_DEV(netdev, &pdev->dev); | 5305 | SET_NETDEV_DEV(netdev, &pdev->dev); |
5306 | et131x_set_ethtool_ops(netdev); | 5306 | et131x_set_ethtool_ops(netdev); |
5307 | 5307 | ||
5308 | adapter = et131x_adapter_init(netdev, pdev); | 5308 | adapter = et131x_adapter_init(netdev, pdev); |
5309 | 5309 | ||
5310 | rc = et131x_pci_init(adapter, pdev); | 5310 | rc = et131x_pci_init(adapter, pdev); |
5311 | if (rc < 0) | 5311 | if (rc < 0) |
5312 | goto err_free_dev; | 5312 | goto err_free_dev; |
5313 | 5313 | ||
5314 | /* Map the bus-relative registers to system virtual memory */ | 5314 | /* Map the bus-relative registers to system virtual memory */ |
5315 | adapter->regs = pci_ioremap_bar(pdev, 0); | 5315 | adapter->regs = pci_ioremap_bar(pdev, 0); |
5316 | if (!adapter->regs) { | 5316 | if (!adapter->regs) { |
5317 | dev_err(&pdev->dev, "Cannot map device registers\n"); | 5317 | dev_err(&pdev->dev, "Cannot map device registers\n"); |
5318 | rc = -ENOMEM; | 5318 | rc = -ENOMEM; |
5319 | goto err_free_dev; | 5319 | goto err_free_dev; |
5320 | } | 5320 | } |
5321 | 5321 | ||
5322 | /* If Phy COMA mode was enabled when we went down, disable it here. */ | 5322 | /* If Phy COMA mode was enabled when we went down, disable it here. */ |
5323 | writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); | 5323 | writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); |
5324 | 5324 | ||
5325 | /* Issue a global reset to the et1310 */ | 5325 | /* Issue a global reset to the et1310 */ |
5326 | et131x_soft_reset(adapter); | 5326 | et131x_soft_reset(adapter); |
5327 | 5327 | ||
5328 | /* Disable all interrupts (paranoid) */ | 5328 | /* Disable all interrupts (paranoid) */ |
5329 | et131x_disable_interrupts(adapter); | 5329 | et131x_disable_interrupts(adapter); |
5330 | 5330 | ||
5331 | /* Allocate DMA memory */ | 5331 | /* Allocate DMA memory */ |
5332 | rc = et131x_adapter_memory_alloc(adapter); | 5332 | rc = et131x_adapter_memory_alloc(adapter); |
5333 | if (rc < 0) { | 5333 | if (rc < 0) { |
5334 | dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); | 5334 | dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); |
5335 | goto err_iounmap; | 5335 | goto err_iounmap; |
5336 | } | 5336 | } |
5337 | 5337 | ||
5338 | /* Init send data structures */ | 5338 | /* Init send data structures */ |
5339 | et131x_init_send(adapter); | 5339 | et131x_init_send(adapter); |
5340 | 5340 | ||
5341 | /* Set up the task structure for the ISR's deferred handler */ | 5341 | /* Set up the task structure for the ISR's deferred handler */ |
5342 | INIT_WORK(&adapter->task, et131x_isr_handler); | 5342 | INIT_WORK(&adapter->task, et131x_isr_handler); |
5343 | 5343 | ||
5344 | /* Copy address into the net_device struct */ | 5344 | /* Copy address into the net_device struct */ |
5345 | memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); | 5345 | memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); |
5346 | 5346 | ||
5347 | /* Init variable for counting how long we do not have link status */ | 5347 | /* Init variable for counting how long we do not have link status */ |
5348 | adapter->boot_coma = 0; | 5348 | adapter->boot_coma = 0; |
5349 | et1310_disable_phy_coma(adapter); | 5349 | et1310_disable_phy_coma(adapter); |
5350 | 5350 | ||
5351 | rc = -ENOMEM; | 5351 | rc = -ENOMEM; |
5352 | 5352 | ||
5353 | /* Setup the mii_bus struct */ | 5353 | /* Setup the mii_bus struct */ |
5354 | adapter->mii_bus = mdiobus_alloc(); | 5354 | adapter->mii_bus = mdiobus_alloc(); |
5355 | if (!adapter->mii_bus) { | 5355 | if (!adapter->mii_bus) { |
5356 | dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); | 5356 | dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); |
5357 | goto err_mem_free; | 5357 | goto err_mem_free; |
5358 | } | 5358 | } |
5359 | 5359 | ||
5360 | adapter->mii_bus->name = "et131x_eth_mii"; | 5360 | adapter->mii_bus->name = "et131x_eth_mii"; |
5361 | snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", | 5361 | snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", |
5362 | (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); | 5362 | (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); |
5363 | adapter->mii_bus->priv = netdev; | 5363 | adapter->mii_bus->priv = netdev; |
5364 | adapter->mii_bus->read = et131x_mdio_read; | 5364 | adapter->mii_bus->read = et131x_mdio_read; |
5365 | adapter->mii_bus->write = et131x_mdio_write; | 5365 | adapter->mii_bus->write = et131x_mdio_write; |
5366 | adapter->mii_bus->reset = et131x_mdio_reset; | 5366 | adapter->mii_bus->reset = et131x_mdio_reset; |
5367 | adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | 5367 | adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
5368 | if (!adapter->mii_bus->irq) { | 5368 | if (!adapter->mii_bus->irq) { |
5369 | dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); | 5369 | dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); |
5370 | goto err_mdio_free; | 5370 | goto err_mdio_free; |
5371 | } | 5371 | } |
5372 | 5372 | ||
5373 | for (ii = 0; ii < PHY_MAX_ADDR; ii++) | 5373 | for (ii = 0; ii < PHY_MAX_ADDR; ii++) |
5374 | adapter->mii_bus->irq[ii] = PHY_POLL; | 5374 | adapter->mii_bus->irq[ii] = PHY_POLL; |
5375 | 5375 | ||
5376 | rc = mdiobus_register(adapter->mii_bus); | 5376 | rc = mdiobus_register(adapter->mii_bus); |
5377 | if (rc < 0) { | 5377 | if (rc < 0) { |
5378 | dev_err(&pdev->dev, "failed to register MII bus\n"); | 5378 | dev_err(&pdev->dev, "failed to register MII bus\n"); |
5379 | goto err_mdio_free_irq; | 5379 | goto err_mdio_free_irq; |
5380 | } | 5380 | } |
5381 | 5381 | ||
5382 | rc = et131x_mii_probe(netdev); | 5382 | rc = et131x_mii_probe(netdev); |
5383 | if (rc < 0) { | 5383 | if (rc < 0) { |
5384 | dev_err(&pdev->dev, "failed to probe MII bus\n"); | 5384 | dev_err(&pdev->dev, "failed to probe MII bus\n"); |
5385 | goto err_mdio_unregister; | 5385 | goto err_mdio_unregister; |
5386 | } | 5386 | } |
5387 | 5387 | ||
5388 | /* Setup et1310 as per the documentation */ | 5388 | /* Setup et1310 as per the documentation */ |
5389 | et131x_adapter_setup(adapter); | 5389 | et131x_adapter_setup(adapter); |
5390 | 5390 | ||
5391 | /* We can enable interrupts now | 5391 | /* We can enable interrupts now |
5392 | * | 5392 | * |
5393 | * NOTE - Because registration of interrupt handler is done in the | 5393 | * NOTE - Because registration of interrupt handler is done in the |
5394 | * device's open(), defer enabling device interrupts to that | 5394 | * device's open(), defer enabling device interrupts to that |
5395 | * point | 5395 | * point |
5396 | */ | 5396 | */ |
5397 | 5397 | ||
5398 | /* Register the net_device struct with the Linux network layer */ | 5398 | /* Register the net_device struct with the Linux network layer */ |
5399 | rc = register_netdev(netdev); | 5399 | rc = register_netdev(netdev); |
5400 | if (rc < 0) { | 5400 | if (rc < 0) { |
5401 | dev_err(&pdev->dev, "register_netdev() failed\n"); | 5401 | dev_err(&pdev->dev, "register_netdev() failed\n"); |
5402 | goto err_phy_disconnect; | 5402 | goto err_phy_disconnect; |
5403 | } | 5403 | } |
5404 | 5404 | ||
5405 | /* Register the net_device struct with the PCI subsystem. Save a copy | 5405 | /* Register the net_device struct with the PCI subsystem. Save a copy |
5406 | * of the PCI config space for this device now that the device has | 5406 | * of the PCI config space for this device now that the device has |
5407 | * been initialized, just in case it needs to be quickly restored. | 5407 | * been initialized, just in case it needs to be quickly restored. |
5408 | */ | 5408 | */ |
5409 | pci_set_drvdata(pdev, netdev); | 5409 | pci_set_drvdata(pdev, netdev); |
5410 | out: | 5410 | out: |
5411 | return rc; | 5411 | return rc; |
5412 | 5412 | ||
5413 | err_phy_disconnect: | 5413 | err_phy_disconnect: |
5414 | phy_disconnect(adapter->phydev); | 5414 | phy_disconnect(adapter->phydev); |
5415 | err_mdio_unregister: | 5415 | err_mdio_unregister: |
5416 | mdiobus_unregister(adapter->mii_bus); | 5416 | mdiobus_unregister(adapter->mii_bus); |
5417 | err_mdio_free_irq: | 5417 | err_mdio_free_irq: |
5418 | kfree(adapter->mii_bus->irq); | 5418 | kfree(adapter->mii_bus->irq); |
5419 | err_mdio_free: | 5419 | err_mdio_free: |
5420 | mdiobus_free(adapter->mii_bus); | 5420 | mdiobus_free(adapter->mii_bus); |
5421 | err_mem_free: | 5421 | err_mem_free: |
5422 | et131x_adapter_memory_free(adapter); | 5422 | et131x_adapter_memory_free(adapter); |
5423 | err_iounmap: | 5423 | err_iounmap: |
5424 | iounmap(adapter->regs); | 5424 | iounmap(adapter->regs); |
5425 | err_free_dev: | 5425 | err_free_dev: |
5426 | pci_dev_put(pdev); | 5426 | pci_dev_put(pdev); |
5427 | free_netdev(netdev); | 5427 | free_netdev(netdev); |
5428 | err_release_res: | 5428 | err_release_res: |
5429 | pci_release_regions(pdev); | 5429 | pci_release_regions(pdev); |
5430 | err_disable: | 5430 | err_disable: |
5431 | pci_disable_device(pdev); | 5431 | pci_disable_device(pdev); |
5432 | goto out; | 5432 | goto out; |
5433 | } | 5433 | } |
5434 | 5434 | ||
5435 | static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { | 5435 | static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { |
5436 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, | 5436 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, |
5437 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, | 5437 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, |
5438 | {0,} | 5438 | {0,} |
5439 | }; | 5439 | }; |
5440 | MODULE_DEVICE_TABLE(pci, et131x_pci_table); | 5440 | MODULE_DEVICE_TABLE(pci, et131x_pci_table); |
5441 | 5441 | ||
5442 | static struct pci_driver et131x_driver = { | 5442 | static struct pci_driver et131x_driver = { |
5443 | .name = DRIVER_NAME, | 5443 | .name = DRIVER_NAME, |
5444 | .id_table = et131x_pci_table, | 5444 | .id_table = et131x_pci_table, |
5445 | .probe = et131x_pci_setup, | 5445 | .probe = et131x_pci_setup, |
5446 | .remove = __devexit_p(et131x_pci_remove), | 5446 | .remove = __devexit_p(et131x_pci_remove), |
5447 | .driver.pm = ET131X_PM_OPS, | 5447 | .driver.pm = ET131X_PM_OPS, |
5448 | }; | 5448 | }; |
5449 | 5449 | ||
5450 | /** | 5450 | /** |
5451 | * et131x_init_module - The "main" entry point called on driver initialization | 5451 | * et131x_init_module - The "main" entry point called on driver initialization |
5452 | * | 5452 | * |
5453 | * Returns 0 on success, errno on failure (as defined in errno.h) | 5453 | * Returns 0 on success, errno on failure (as defined in errno.h) |
5454 | */ | 5454 | */ |
5455 | static int __init et131x_init_module(void) | 5455 | static int __init et131x_init_module(void) |
5456 | { | 5456 | { |
5457 | return pci_register_driver(&et131x_driver); | 5457 | return pci_register_driver(&et131x_driver); |
5458 | } | 5458 | } |
5459 | 5459 | ||
5460 | /** | 5460 | /** |
5461 | * et131x_cleanup_module - The entry point called on driver cleanup | 5461 | * et131x_cleanup_module - The entry point called on driver cleanup |
5462 | */ | 5462 | */ |
5463 | static void __exit et131x_cleanup_module(void) | 5463 | static void __exit et131x_cleanup_module(void) |
5464 | { | 5464 | { |
5465 | pci_unregister_driver(&et131x_driver); | 5465 | pci_unregister_driver(&et131x_driver); |
5466 | } | 5466 | } |
5467 | 5467 | ||
5468 | module_init(et131x_init_module); | 5468 | module_init(et131x_init_module); |
5469 | module_exit(et131x_cleanup_module); | 5469 | module_exit(et131x_cleanup_module); |
5470 | 5470 | ||
5471 | 5471 |
drivers/staging/et131x/et131x.h
1 | /* | 1 | /* |
2 | * Copyright © 2005 Agere Systems Inc. | 2 | * Copyright © 2005 Agere Systems Inc. |
3 | * All rights reserved. | 3 | * All rights reserved. |
4 | * http://www.agere.com | 4 | * http://www.agere.com |
5 | * | 5 | * |
6 | * SOFTWARE LICENSE | 6 | * SOFTWARE LICENSE |
7 | * | 7 | * |
8 | * This software is provided subject to the following terms and conditions, | 8 | * This software is provided subject to the following terms and conditions, |
9 | * which you should read carefully before using the software. Using this | 9 | * which you should read carefully before using the software. Using this |
10 | * software indicates your acceptance of these terms and conditions. If you do | 10 | * software indicates your acceptance of these terms and conditions. If you do |
11 | * not agree with these terms and conditions, do not use the software. | 11 | * not agree with these terms and conditions, do not use the software. |
12 | * | 12 | * |
13 | * Copyright © 2005 Agere Systems Inc. | 13 | * Copyright © 2005 Agere Systems Inc. |
14 | * All rights reserved. | 14 | * All rights reserved. |
15 | * | 15 | * |
16 | * Redistribution and use in source or binary forms, with or without | 16 | * Redistribution and use in source or binary forms, with or without |
17 | * modifications, are permitted provided that the following conditions are met: | 17 | * modifications, are permitted provided that the following conditions are met: |
18 | * | 18 | * |
19 | * . Redistributions of source code must retain the above copyright notice, this | 19 | * . Redistributions of source code must retain the above copyright notice, this |
20 | * list of conditions and the following Disclaimer as comments in the code as | 20 | * list of conditions and the following Disclaimer as comments in the code as |
21 | * well as in the documentation and/or other materials provided with the | 21 | * well as in the documentation and/or other materials provided with the |
22 | * distribution. | 22 | * distribution. |
23 | * | 23 | * |
24 | * . Redistributions in binary form must reproduce the above copyright notice, | 24 | * . Redistributions in binary form must reproduce the above copyright notice, |
25 | * this list of conditions and the following Disclaimer in the documentation | 25 | * this list of conditions and the following Disclaimer in the documentation |
26 | * and/or other materials provided with the distribution. | 26 | * and/or other materials provided with the distribution. |
27 | * | 27 | * |
28 | * . Neither the name of Agere Systems Inc. nor the names of the contributors | 28 | * . Neither the name of Agere Systems Inc. nor the names of the contributors |
29 | * may be used to endorse or promote products derived from this software | 29 | * may be used to endorse or promote products derived from this software |
30 | * without specific prior written permission. | 30 | * without specific prior written permission. |
31 | * | 31 | * |
32 | * Disclaimer | 32 | * Disclaimer |
33 | * | 33 | * |
34 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, | 34 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
35 | * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF | 35 | * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF |
36 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY | 36 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY |
37 | * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN | 37 | * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN |
38 | * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY | 38 | * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY |
39 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | 39 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
40 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | 40 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
41 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | 41 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
42 | * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT | 42 | * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT |
43 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | 43 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
44 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH | 44 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH |
45 | * DAMAGE. | 45 | * DAMAGE. |
46 | * | 46 | * |
47 | */ | 47 | */ |
48 | 48 | ||
49 | #define DRIVER_NAME "et131x" | 49 | #define DRIVER_NAME "et131x" |
50 | #define DRIVER_VERSION "v2.0" | 50 | #define DRIVER_VERSION "v2.0" |
51 | 51 | ||
52 | /* EEPROM registers */ | 52 | /* EEPROM registers */ |
53 | 53 | ||
54 | /* LBCIF Register Groups (addressed via 32-bit offsets) */ | 54 | /* LBCIF Register Groups (addressed via 32-bit offsets) */ |
55 | #define LBCIF_DWORD0_GROUP 0xAC | 55 | #define LBCIF_DWORD0_GROUP 0xAC |
56 | #define LBCIF_DWORD1_GROUP 0xB0 | 56 | #define LBCIF_DWORD1_GROUP 0xB0 |
57 | 57 | ||
58 | /* LBCIF Registers (addressed via 8-bit offsets) */ | 58 | /* LBCIF Registers (addressed via 8-bit offsets) */ |
59 | #define LBCIF_ADDRESS_REGISTER 0xAC | 59 | #define LBCIF_ADDRESS_REGISTER 0xAC |
60 | #define LBCIF_DATA_REGISTER 0xB0 | 60 | #define LBCIF_DATA_REGISTER 0xB0 |
61 | #define LBCIF_CONTROL_REGISTER 0xB1 | 61 | #define LBCIF_CONTROL_REGISTER 0xB1 |
62 | #define LBCIF_STATUS_REGISTER 0xB2 | 62 | #define LBCIF_STATUS_REGISTER 0xB2 |
63 | 63 | ||
64 | /* LBCIF Control Register Bits */ | 64 | /* LBCIF Control Register Bits */ |
65 | #define LBCIF_CONTROL_SEQUENTIAL_READ 0x01 | 65 | #define LBCIF_CONTROL_SEQUENTIAL_READ 0x01 |
66 | #define LBCIF_CONTROL_PAGE_WRITE 0x02 | 66 | #define LBCIF_CONTROL_PAGE_WRITE 0x02 |
67 | #define LBCIF_CONTROL_EEPROM_RELOAD 0x08 | 67 | #define LBCIF_CONTROL_EEPROM_RELOAD 0x08 |
68 | #define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20 | 68 | #define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20 |
69 | #define LBCIF_CONTROL_I2C_WRITE 0x40 | 69 | #define LBCIF_CONTROL_I2C_WRITE 0x40 |
70 | #define LBCIF_CONTROL_LBCIF_ENABLE 0x80 | 70 | #define LBCIF_CONTROL_LBCIF_ENABLE 0x80 |
71 | 71 | ||
72 | /* LBCIF Status Register Bits */ | 72 | /* LBCIF Status Register Bits */ |
73 | #define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01 | 73 | #define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01 |
74 | #define LBCIF_STATUS_I2C_IDLE 0x02 | 74 | #define LBCIF_STATUS_I2C_IDLE 0x02 |
75 | #define LBCIF_STATUS_ACK_ERROR 0x04 | 75 | #define LBCIF_STATUS_ACK_ERROR 0x04 |
76 | #define LBCIF_STATUS_GENERAL_ERROR 0x08 | 76 | #define LBCIF_STATUS_GENERAL_ERROR 0x08 |
77 | #define LBCIF_STATUS_CHECKSUM_ERROR 0x40 | 77 | #define LBCIF_STATUS_CHECKSUM_ERROR 0x40 |
78 | #define LBCIF_STATUS_EEPROM_PRESENT 0x80 | 78 | #define LBCIF_STATUS_EEPROM_PRESENT 0x80 |
79 | 79 | ||
80 | /* START OF GLOBAL REGISTER ADDRESS MAP */ | 80 | /* START OF GLOBAL REGISTER ADDRESS MAP */ |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * 10bit registers | 83 | * 10bit registers |
84 | * | 84 | * |
85 | * Tx queue start address reg in global address map at address 0x0000 | 85 | * Tx queue start address reg in global address map at address 0x0000 |
86 | * tx queue end address reg in global address map at address 0x0004 | 86 | * tx queue end address reg in global address map at address 0x0004 |
87 | * rx queue start address reg in global address map at address 0x0008 | 87 | * rx queue start address reg in global address map at address 0x0008 |
88 | * rx queue end address reg in global address map at address 0x000C | 88 | * rx queue end address reg in global address map at address 0x000C |
89 | */ | 89 | */ |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * structure for power management control status reg in global address map | 92 | * structure for power management control status reg in global address map |
93 | * located at address 0x0010 | 93 | * located at address 0x0010 |
94 | * jagcore_rx_rdy bit 9 | 94 | * jagcore_rx_rdy bit 9 |
95 | * jagcore_tx_rdy bit 8 | 95 | * jagcore_tx_rdy bit 8 |
96 | * phy_lped_en bit 7 | 96 | * phy_lped_en bit 7 |
97 | * phy_sw_coma bit 6 | 97 | * phy_sw_coma bit 6 |
98 | * rxclk_gate bit 5 | 98 | * rxclk_gate bit 5 |
99 | * txclk_gate bit 4 | 99 | * txclk_gate bit 4 |
100 | * sysclk_gate bit 3 | 100 | * sysclk_gate bit 3 |
101 | * jagcore_rx_en bit 2 | 101 | * jagcore_rx_en bit 2 |
102 | * jagcore_tx_en bit 1 | 102 | * jagcore_tx_en bit 1 |
103 | * gigephy_en bit 0 | 103 | * gigephy_en bit 0 |
104 | */ | 104 | */ |
105 | 105 | ||
106 | #define ET_PM_PHY_SW_COMA 0x40 | 106 | #define ET_PM_PHY_SW_COMA 0x40 |
107 | #define ET_PMCSR_INIT 0x38 | 107 | #define ET_PMCSR_INIT 0x38 |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Interrupt status reg at address 0x0018 | 110 | * Interrupt status reg at address 0x0018 |
111 | */ | 111 | */ |
112 | 112 | ||
113 | #define ET_INTR_TXDMA_ISR 0x00000008 | 113 | #define ET_INTR_TXDMA_ISR 0x00000008 |
114 | #define ET_INTR_TXDMA_ERR 0x00000010 | 114 | #define ET_INTR_TXDMA_ERR 0x00000010 |
115 | #define ET_INTR_RXDMA_XFR_DONE 0x00000020 | 115 | #define ET_INTR_RXDMA_XFR_DONE 0x00000020 |
116 | #define ET_INTR_RXDMA_FB_R0_LOW 0x00000040 | 116 | #define ET_INTR_RXDMA_FB_R0_LOW 0x00000040 |
117 | #define ET_INTR_RXDMA_FB_R1_LOW 0x00000080 | 117 | #define ET_INTR_RXDMA_FB_R1_LOW 0x00000080 |
118 | #define ET_INTR_RXDMA_STAT_LOW 0x00000100 | 118 | #define ET_INTR_RXDMA_STAT_LOW 0x00000100 |
119 | #define ET_INTR_RXDMA_ERR 0x00000200 | 119 | #define ET_INTR_RXDMA_ERR 0x00000200 |
120 | #define ET_INTR_WATCHDOG 0x00004000 | 120 | #define ET_INTR_WATCHDOG 0x00004000 |
121 | #define ET_INTR_WOL 0x00008000 | 121 | #define ET_INTR_WOL 0x00008000 |
122 | #define ET_INTR_PHY 0x00010000 | 122 | #define ET_INTR_PHY 0x00010000 |
123 | #define ET_INTR_TXMAC 0x00020000 | 123 | #define ET_INTR_TXMAC 0x00020000 |
124 | #define ET_INTR_RXMAC 0x00040000 | 124 | #define ET_INTR_RXMAC 0x00040000 |
125 | #define ET_INTR_MAC_STAT 0x00080000 | 125 | #define ET_INTR_MAC_STAT 0x00080000 |
126 | #define ET_INTR_SLV_TIMEOUT 0x00100000 | 126 | #define ET_INTR_SLV_TIMEOUT 0x00100000 |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Interrupt mask register at address 0x001C | 129 | * Interrupt mask register at address 0x001C |
130 | * Interrupt alias clear mask reg at address 0x0020 | 130 | * Interrupt alias clear mask reg at address 0x0020 |
131 | * Interrupt status alias reg at address 0x0024 | 131 | * Interrupt status alias reg at address 0x0024 |
132 | * | 132 | * |
133 | * Same masks as above | 133 | * Same masks as above |
134 | */ | 134 | */ |
135 | 135 | ||
136 | /* | 136 | /* |
137 | * Software reset reg at address 0x0028 | 137 | * Software reset reg at address 0x0028 |
138 | * 0: txdma_sw_reset | 138 | * 0: txdma_sw_reset |
139 | * 1: rxdma_sw_reset | 139 | * 1: rxdma_sw_reset |
140 | * 2: txmac_sw_reset | 140 | * 2: txmac_sw_reset |
141 | * 3: rxmac_sw_reset | 141 | * 3: rxmac_sw_reset |
142 | * 4: mac_sw_reset | 142 | * 4: mac_sw_reset |
143 | * 5: mac_stat_sw_reset | 143 | * 5: mac_stat_sw_reset |
144 | * 6: mmc_sw_reset | 144 | * 6: mmc_sw_reset |
145 | *31: selfclr_disable | 145 | *31: selfclr_disable |
146 | */ | 146 | */ |
147 | 147 | ||
148 | /* | 148 | /* |
149 | * SLV Timer reg at address 0x002C (low 24 bits) | 149 | * SLV Timer reg at address 0x002C (low 24 bits) |
150 | */ | 150 | */ |
151 | 151 | ||
152 | /* | 152 | /* |
153 | * MSI Configuration reg at address 0x0030 | 153 | * MSI Configuration reg at address 0x0030 |
154 | */ | 154 | */ |
155 | 155 | ||
156 | #define ET_MSI_VECTOR 0x0000001F | 156 | #define ET_MSI_VECTOR 0x0000001F |
157 | #define ET_MSI_TC 0x00070000 | 157 | #define ET_MSI_TC 0x00070000 |
158 | 158 | ||
159 | /* | 159 | /* |
160 | * Loopback reg located at address 0x0034 | 160 | * Loopback reg located at address 0x0034 |
161 | */ | 161 | */ |
162 | 162 | ||
163 | #define ET_LOOP_MAC 0x00000001 | 163 | #define ET_LOOP_MAC 0x00000001 |
164 | #define ET_LOOP_DMA 0x00000002 | 164 | #define ET_LOOP_DMA 0x00000002 |
165 | 165 | ||
166 | /* | 166 | /* |
167 | * GLOBAL Module of JAGCore Address Mapping | 167 | * GLOBAL Module of JAGCore Address Mapping |
168 | * Located at address 0x0000 | 168 | * Located at address 0x0000 |
169 | */ | 169 | */ |
170 | struct global_regs { /* Location: */ | 170 | struct global_regs { /* Location: */ |
171 | u32 txq_start_addr; /* 0x0000 */ | 171 | u32 txq_start_addr; /* 0x0000 */ |
172 | u32 txq_end_addr; /* 0x0004 */ | 172 | u32 txq_end_addr; /* 0x0004 */ |
173 | u32 rxq_start_addr; /* 0x0008 */ | 173 | u32 rxq_start_addr; /* 0x0008 */ |
174 | u32 rxq_end_addr; /* 0x000C */ | 174 | u32 rxq_end_addr; /* 0x000C */ |
175 | u32 pm_csr; /* 0x0010 */ | 175 | u32 pm_csr; /* 0x0010 */ |
176 | u32 unused; /* 0x0014 */ | 176 | u32 unused; /* 0x0014 */ |
177 | u32 int_status; /* 0x0018 */ | 177 | u32 int_status; /* 0x0018 */ |
178 | u32 int_mask; /* 0x001C */ | 178 | u32 int_mask; /* 0x001C */ |
179 | u32 int_alias_clr_en; /* 0x0020 */ | 179 | u32 int_alias_clr_en; /* 0x0020 */ |
180 | u32 int_status_alias; /* 0x0024 */ | 180 | u32 int_status_alias; /* 0x0024 */ |
181 | u32 sw_reset; /* 0x0028 */ | 181 | u32 sw_reset; /* 0x0028 */ |
182 | u32 slv_timer; /* 0x002C */ | 182 | u32 slv_timer; /* 0x002C */ |
183 | u32 msi_config; /* 0x0030 */ | 183 | u32 msi_config; /* 0x0030 */ |
184 | u32 loopback; /* 0x0034 */ | 184 | u32 loopback; /* 0x0034 */ |
185 | u32 watchdog_timer; /* 0x0038 */ | 185 | u32 watchdog_timer; /* 0x0038 */ |
186 | }; | 186 | }; |
187 | 187 | ||
188 | 188 | ||
189 | /* START OF TXDMA REGISTER ADDRESS MAP */ | 189 | /* START OF TXDMA REGISTER ADDRESS MAP */ |
190 | 190 | ||
191 | /* | 191 | /* |
192 | * txdma control status reg at address 0x1000 | 192 | * txdma control status reg at address 0x1000 |
193 | */ | 193 | */ |
194 | 194 | ||
195 | #define ET_TXDMA_CSR_HALT 0x00000001 | 195 | #define ET_TXDMA_CSR_HALT 0x00000001 |
196 | #define ET_TXDMA_DROP_TLP 0x00000002 | 196 | #define ET_TXDMA_DROP_TLP 0x00000002 |
197 | #define ET_TXDMA_CACHE_THRS 0x000000F0 | 197 | #define ET_TXDMA_CACHE_THRS 0x000000F0 |
198 | #define ET_TXDMA_CACHE_SHIFT 4 | 198 | #define ET_TXDMA_CACHE_SHIFT 4 |
199 | #define ET_TXDMA_SNGL_EPKT 0x00000100 | 199 | #define ET_TXDMA_SNGL_EPKT 0x00000100 |
200 | #define ET_TXDMA_CLASS 0x00001E00 | 200 | #define ET_TXDMA_CLASS 0x00001E00 |
201 | 201 | ||
202 | /* | 202 | /* |
203 | * structure for txdma packet ring base address hi reg in txdma address map | 203 | * structure for txdma packet ring base address hi reg in txdma address map |
204 | * located at address 0x1004 | 204 | * located at address 0x1004 |
205 | * Defined earlier (u32) | 205 | * Defined earlier (u32) |
206 | */ | 206 | */ |
207 | 207 | ||
208 | /* | 208 | /* |
209 | * structure for txdma packet ring base address low reg in txdma address map | 209 | * structure for txdma packet ring base address low reg in txdma address map |
210 | * located at address 0x1008 | 210 | * located at address 0x1008 |
211 | * Defined earlier (u32) | 211 | * Defined earlier (u32) |
212 | */ | 212 | */ |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * structure for txdma packet ring number of descriptor reg in txdma address | 215 | * structure for txdma packet ring number of descriptor reg in txdma address |
216 | * map. Located at address 0x100C | 216 | * map. Located at address 0x100C |
217 | * | 217 | * |
218 | * 31-10: unused | 218 | * 31-10: unused |
219 | * 9-0: pr ndes | 219 | * 9-0: pr ndes |
220 | */ | 220 | */ |
221 | 221 | ||
222 | #define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */ | 222 | #define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */ |
223 | #define ET_DMA12_WRAP 0x1000 | 223 | #define ET_DMA12_WRAP 0x1000 |
224 | #define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */ | 224 | #define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */ |
225 | #define ET_DMA10_WRAP 0x0400 | 225 | #define ET_DMA10_WRAP 0x0400 |
226 | #define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */ | 226 | #define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */ |
227 | #define ET_DMA4_WRAP 0x0010 | 227 | #define ET_DMA4_WRAP 0x0010 |
228 | 228 | ||
229 | #define INDEX12(x) ((x) & ET_DMA12_MASK) | 229 | #define INDEX12(x) ((x) & ET_DMA12_MASK) |
230 | #define INDEX10(x) ((x) & ET_DMA10_MASK) | 230 | #define INDEX10(x) ((x) & ET_DMA10_MASK) |
231 | #define INDEX4(x) ((x) & ET_DMA4_MASK) | 231 | #define INDEX4(x) ((x) & ET_DMA4_MASK) |
232 | 232 | ||
233 | /* | 233 | /* |
234 | * 10bit DMA with wrap | 234 | * 10bit DMA with wrap |
235 | * txdma tx queue write address reg in txdma address map at 0x1010 | 235 | * txdma tx queue write address reg in txdma address map at 0x1010 |
236 | * txdma tx queue write address external reg in txdma address map at 0x1014 | 236 | * txdma tx queue write address external reg in txdma address map at 0x1014 |
237 | * txdma tx queue read address reg in txdma address map at 0x1018 | 237 | * txdma tx queue read address reg in txdma address map at 0x1018 |
238 | * | 238 | * |
239 | * u32 | 239 | * u32 |
240 | * txdma status writeback address hi reg in txdma address map at0x101C | 240 | * txdma status writeback address hi reg in txdma address map at0x101C |
241 | * txdma status writeback address lo reg in txdma address map at 0x1020 | 241 | * txdma status writeback address lo reg in txdma address map at 0x1020 |
242 | * | 242 | * |
243 | * 10bit DMA with wrap | 243 | * 10bit DMA with wrap |
244 | * txdma service request reg in txdma address map at 0x1024 | 244 | * txdma service request reg in txdma address map at 0x1024 |
245 | * structure for txdma service complete reg in txdma address map at 0x1028 | 245 | * structure for txdma service complete reg in txdma address map at 0x1028 |
246 | * | 246 | * |
247 | * 4bit DMA with wrap | 247 | * 4bit DMA with wrap |
248 | * txdma tx descriptor cache read index reg in txdma address map at 0x102C | 248 | * txdma tx descriptor cache read index reg in txdma address map at 0x102C |
249 | * txdma tx descriptor cache write index reg in txdma address map at 0x1030 | 249 | * txdma tx descriptor cache write index reg in txdma address map at 0x1030 |
250 | * | 250 | * |
251 | * txdma error reg in txdma address map at address 0x1034 | 251 | * txdma error reg in txdma address map at address 0x1034 |
252 | * 0: PyldResend | 252 | * 0: PyldResend |
253 | * 1: PyldRewind | 253 | * 1: PyldRewind |
254 | * 4: DescrResend | 254 | * 4: DescrResend |
255 | * 5: DescrRewind | 255 | * 5: DescrRewind |
256 | * 8: WrbkResend | 256 | * 8: WrbkResend |
257 | * 9: WrbkRewind | 257 | * 9: WrbkRewind |
258 | */ | 258 | */ |
259 | 259 | ||
260 | /* | 260 | /* |
261 | * Tx DMA Module of JAGCore Address Mapping | 261 | * Tx DMA Module of JAGCore Address Mapping |
262 | * Located at address 0x1000 | 262 | * Located at address 0x1000 |
263 | */ | 263 | */ |
264 | struct txdma_regs { /* Location: */ | 264 | struct txdma_regs { /* Location: */ |
265 | u32 csr; /* 0x1000 */ | 265 | u32 csr; /* 0x1000 */ |
266 | u32 pr_base_hi; /* 0x1004 */ | 266 | u32 pr_base_hi; /* 0x1004 */ |
267 | u32 pr_base_lo; /* 0x1008 */ | 267 | u32 pr_base_lo; /* 0x1008 */ |
268 | u32 pr_num_des; /* 0x100C */ | 268 | u32 pr_num_des; /* 0x100C */ |
269 | u32 txq_wr_addr; /* 0x1010 */ | 269 | u32 txq_wr_addr; /* 0x1010 */ |
270 | u32 txq_wr_addr_ext; /* 0x1014 */ | 270 | u32 txq_wr_addr_ext; /* 0x1014 */ |
271 | u32 txq_rd_addr; /* 0x1018 */ | 271 | u32 txq_rd_addr; /* 0x1018 */ |
272 | u32 dma_wb_base_hi; /* 0x101C */ | 272 | u32 dma_wb_base_hi; /* 0x101C */ |
273 | u32 dma_wb_base_lo; /* 0x1020 */ | 273 | u32 dma_wb_base_lo; /* 0x1020 */ |
274 | u32 service_request; /* 0x1024 */ | 274 | u32 service_request; /* 0x1024 */ |
275 | u32 service_complete; /* 0x1028 */ | 275 | u32 service_complete; /* 0x1028 */ |
276 | u32 cache_rd_index; /* 0x102C */ | 276 | u32 cache_rd_index; /* 0x102C */ |
277 | u32 cache_wr_index; /* 0x1030 */ | 277 | u32 cache_wr_index; /* 0x1030 */ |
278 | u32 tx_dma_error; /* 0x1034 */ | 278 | u32 tx_dma_error; /* 0x1034 */ |
279 | u32 desc_abort_cnt; /* 0x1038 */ | 279 | u32 desc_abort_cnt; /* 0x1038 */ |
280 | u32 payload_abort_cnt; /* 0x103c */ | 280 | u32 payload_abort_cnt; /* 0x103c */ |
281 | u32 writeback_abort_cnt; /* 0x1040 */ | 281 | u32 writeback_abort_cnt; /* 0x1040 */ |
282 | u32 desc_timeout_cnt; /* 0x1044 */ | 282 | u32 desc_timeout_cnt; /* 0x1044 */ |
283 | u32 payload_timeout_cnt; /* 0x1048 */ | 283 | u32 payload_timeout_cnt; /* 0x1048 */ |
284 | u32 writeback_timeout_cnt; /* 0x104c */ | 284 | u32 writeback_timeout_cnt; /* 0x104c */ |
285 | u32 desc_error_cnt; /* 0x1050 */ | 285 | u32 desc_error_cnt; /* 0x1050 */ |
286 | u32 payload_error_cnt; /* 0x1054 */ | 286 | u32 payload_error_cnt; /* 0x1054 */ |
287 | u32 writeback_error_cnt; /* 0x1058 */ | 287 | u32 writeback_error_cnt; /* 0x1058 */ |
288 | u32 dropped_tlp_cnt; /* 0x105c */ | 288 | u32 dropped_tlp_cnt; /* 0x105c */ |
289 | u32 new_service_complete; /* 0x1060 */ | 289 | u32 new_service_complete; /* 0x1060 */ |
290 | u32 ethernet_packet_cnt; /* 0x1064 */ | 290 | u32 ethernet_packet_cnt; /* 0x1064 */ |
291 | }; | 291 | }; |
292 | 292 | ||
293 | /* END OF TXDMA REGISTER ADDRESS MAP */ | 293 | /* END OF TXDMA REGISTER ADDRESS MAP */ |
294 | 294 | ||
295 | 295 | ||
296 | /* START OF RXDMA REGISTER ADDRESS MAP */ | 296 | /* START OF RXDMA REGISTER ADDRESS MAP */ |
297 | 297 | ||
298 | /* | 298 | /* |
299 | * structure for control status reg in rxdma address map | 299 | * structure for control status reg in rxdma address map |
300 | * Located at address 0x2000 | 300 | * Located at address 0x2000 |
301 | * | 301 | * |
302 | * CSR | 302 | * CSR |
303 | * 0: halt | 303 | * 0: halt |
304 | * 1-3: tc | 304 | * 1-3: tc |
305 | * 4: fbr_big_endian | 305 | * 4: fbr_big_endian |
306 | * 5: psr_big_endian | 306 | * 5: psr_big_endian |
307 | * 6: pkt_big_endian | 307 | * 6: pkt_big_endian |
308 | * 7: dma_big_endian | 308 | * 7: dma_big_endian |
309 | * 8-9: fbr0_size | 309 | * 8-9: fbr0_size |
310 | * 10: fbr0_enable | 310 | * 10: fbr0_enable |
311 | * 11-12: fbr1_size | 311 | * 11-12: fbr1_size |
312 | * 13: fbr1_enable | 312 | * 13: fbr1_enable |
313 | * 14: unused | 313 | * 14: unused |
314 | * 15: pkt_drop_disable | 314 | * 15: pkt_drop_disable |
315 | * 16: pkt_done_flush | 315 | * 16: pkt_done_flush |
316 | * 17: halt_status | 316 | * 17: halt_status |
317 | * 18-31: unused | 317 | * 18-31: unused |
318 | */ | 318 | */ |
319 | 319 | ||
320 | 320 | ||
321 | /* | 321 | /* |
322 | * structure for dma writeback lo reg in rxdma address map | 322 | * structure for dma writeback lo reg in rxdma address map |
323 | * located at address 0x2004 | 323 | * located at address 0x2004 |
324 | * Defined earlier (u32) | 324 | * Defined earlier (u32) |
325 | */ | 325 | */ |
326 | 326 | ||
327 | /* | 327 | /* |
328 | * structure for dma writeback hi reg in rxdma address map | 328 | * structure for dma writeback hi reg in rxdma address map |
329 | * located at address 0x2008 | 329 | * located at address 0x2008 |
330 | * Defined earlier (u32) | 330 | * Defined earlier (u32) |
331 | */ | 331 | */ |
332 | 332 | ||
333 | /* | 333 | /* |
334 | * structure for number of packets done reg in rxdma address map | 334 | * structure for number of packets done reg in rxdma address map |
335 | * located at address 0x200C | 335 | * located at address 0x200C |
336 | * | 336 | * |
337 | * 31-8: unused | 337 | * 31-8: unused |
338 | * 7-0: num done | 338 | * 7-0: num done |
339 | */ | 339 | */ |
340 | 340 | ||
341 | /* | 341 | /* |
342 | * structure for max packet time reg in rxdma address map | 342 | * structure for max packet time reg in rxdma address map |
343 | * located at address 0x2010 | 343 | * located at address 0x2010 |
344 | * | 344 | * |
345 | * 31-18: unused | 345 | * 31-18: unused |
346 | * 17-0: time done | 346 | * 17-0: time done |
347 | */ | 347 | */ |
348 | 348 | ||
349 | /* | 349 | /* |
350 | * structure for rx queue read address reg in rxdma address map | 350 | * structure for rx queue read address reg in rxdma address map |
351 | * located at address 0x2014 | 351 | * located at address 0x2014 |
352 | * Defined earlier (u32) | 352 | * Defined earlier (u32) |
353 | */ | 353 | */ |
354 | 354 | ||
355 | /* | 355 | /* |
356 | * structure for rx queue read address external reg in rxdma address map | 356 | * structure for rx queue read address external reg in rxdma address map |
357 | * located at address 0x2018 | 357 | * located at address 0x2018 |
358 | * Defined earlier (u32) | 358 | * Defined earlier (u32) |
359 | */ | 359 | */ |
360 | 360 | ||
361 | /* | 361 | /* |
362 | * structure for rx queue write address reg in rxdma address map | 362 | * structure for rx queue write address reg in rxdma address map |
363 | * located at address 0x201C | 363 | * located at address 0x201C |
364 | * Defined earlier (u32) | 364 | * Defined earlier (u32) |
365 | */ | 365 | */ |
366 | 366 | ||
367 | /* | 367 | /* |
368 | * structure for packet status ring base address lo reg in rxdma address map | 368 | * structure for packet status ring base address lo reg in rxdma address map |
369 | * located at address 0x2020 | 369 | * located at address 0x2020 |
370 | * Defined earlier (u32) | 370 | * Defined earlier (u32) |
371 | */ | 371 | */ |
372 | 372 | ||
373 | /* | 373 | /* |
374 | * structure for packet status ring base address hi reg in rxdma address map | 374 | * structure for packet status ring base address hi reg in rxdma address map |
375 | * located at address 0x2024 | 375 | * located at address 0x2024 |
376 | * Defined earlier (u32) | 376 | * Defined earlier (u32) |
377 | */ | 377 | */ |
378 | 378 | ||
379 | /* | 379 | /* |
380 | * structure for packet status ring number of descriptors reg in rxdma address | 380 | * structure for packet status ring number of descriptors reg in rxdma address |
381 | * map. Located at address 0x2028 | 381 | * map. Located at address 0x2028 |
382 | * | 382 | * |
383 | * 31-12: unused | 383 | * 31-12: unused |
384 | * 11-0: psr ndes | 384 | * 11-0: psr ndes |
385 | */ | 385 | */ |
386 | 386 | ||
387 | /* | 387 | /* |
388 | * structure for packet status ring available offset reg in rxdma address map | 388 | * structure for packet status ring available offset reg in rxdma address map |
389 | * located at address 0x202C | 389 | * located at address 0x202C |
390 | * | 390 | * |
391 | * 31-13: unused | 391 | * 31-13: unused |
392 | * 12: psr avail wrap | 392 | * 12: psr avail wrap |
393 | * 11-0: psr avail | 393 | * 11-0: psr avail |
394 | */ | 394 | */ |
395 | 395 | ||
396 | /* | 396 | /* |
397 | * structure for packet status ring full offset reg in rxdma address map | 397 | * structure for packet status ring full offset reg in rxdma address map |
398 | * located at address 0x2030 | 398 | * located at address 0x2030 |
399 | * | 399 | * |
400 | * 31-13: unused | 400 | * 31-13: unused |
401 | * 12: psr full wrap | 401 | * 12: psr full wrap |
402 | * 11-0: psr full | 402 | * 11-0: psr full |
403 | */ | 403 | */ |
404 | 404 | ||
405 | /* | 405 | /* |
406 | * structure for packet status ring access index reg in rxdma address map | 406 | * structure for packet status ring access index reg in rxdma address map |
407 | * located at address 0x2034 | 407 | * located at address 0x2034 |
408 | * | 408 | * |
409 | * 31-5: unused | 409 | * 31-5: unused |
410 | * 4-0: psr_ai | 410 | * 4-0: psr_ai |
411 | */ | 411 | */ |
412 | 412 | ||
413 | /* | 413 | /* |
414 | * structure for packet status ring minimum descriptors reg in rxdma address | 414 | * structure for packet status ring minimum descriptors reg in rxdma address |
415 | * map. Located at address 0x2038 | 415 | * map. Located at address 0x2038 |
416 | * | 416 | * |
417 | * 31-12: unused | 417 | * 31-12: unused |
418 | * 11-0: psr_min | 418 | * 11-0: psr_min |
419 | */ | 419 | */ |
420 | 420 | ||
421 | /* | 421 | /* |
422 | * structure for free buffer ring base lo address reg in rxdma address map | 422 | * structure for free buffer ring base lo address reg in rxdma address map |
423 | * located at address 0x203C | 423 | * located at address 0x203C |
424 | * Defined earlier (u32) | 424 | * Defined earlier (u32) |
425 | */ | 425 | */ |
426 | 426 | ||
427 | /* | 427 | /* |
428 | * structure for free buffer ring base hi address reg in rxdma address map | 428 | * structure for free buffer ring base hi address reg in rxdma address map |
429 | * located at address 0x2040 | 429 | * located at address 0x2040 |
430 | * Defined earlier (u32) | 430 | * Defined earlier (u32) |
431 | */ | 431 | */ |
432 | 432 | ||
433 | /* | 433 | /* |
434 | * structure for free buffer ring number of descriptors reg in rxdma address | 434 | * structure for free buffer ring number of descriptors reg in rxdma address |
435 | * map. Located at address 0x2044 | 435 | * map. Located at address 0x2044 |
436 | * | 436 | * |
437 | * 31-10: unused | 437 | * 31-10: unused |
438 | * 9-0: fbr ndesc | 438 | * 9-0: fbr ndesc |
439 | */ | 439 | */ |
440 | 440 | ||
441 | /* | 441 | /* |
442 | * structure for free buffer ring 0 available offset reg in rxdma address map | 442 | * structure for free buffer ring 0 available offset reg in rxdma address map |
443 | * located at address 0x2048 | 443 | * located at address 0x2048 |
444 | * Defined earlier (u32) | 444 | * Defined earlier (u32) |
445 | */ | 445 | */ |
446 | 446 | ||
447 | /* | 447 | /* |
448 | * structure for free buffer ring 0 full offset reg in rxdma address map | 448 | * structure for free buffer ring 0 full offset reg in rxdma address map |
449 | * located at address 0x204C | 449 | * located at address 0x204C |
450 | * Defined earlier (u32) | 450 | * Defined earlier (u32) |
451 | */ | 451 | */ |
452 | 452 | ||
453 | /* | 453 | /* |
454 | * structure for free buffer cache 0 full offset reg in rxdma address map | 454 | * structure for free buffer cache 0 full offset reg in rxdma address map |
455 | * located at address 0x2050 | 455 | * located at address 0x2050 |
456 | * | 456 | * |
457 | * 31-5: unused | 457 | * 31-5: unused |
458 | * 4-0: fbc rdi | 458 | * 4-0: fbc rdi |
459 | */ | 459 | */ |
460 | 460 | ||
461 | /* | 461 | /* |
462 | * structure for free buffer ring 0 minimum descriptor reg in rxdma address map | 462 | * structure for free buffer ring 0 minimum descriptor reg in rxdma address map |
463 | * located at address 0x2054 | 463 | * located at address 0x2054 |
464 | * | 464 | * |
465 | * 31-10: unused | 465 | * 31-10: unused |
466 | * 9-0: fbr min | 466 | * 9-0: fbr min |
467 | */ | 467 | */ |
468 | 468 | ||
469 | /* | 469 | /* |
470 | * structure for free buffer ring 1 base address lo reg in rxdma address map | 470 | * structure for free buffer ring 1 base address lo reg in rxdma address map |
471 | * located at address 0x2058 - 0x205C | 471 | * located at address 0x2058 - 0x205C |
472 | * Defined earlier (RXDMA_FBR_BASE_LO_t and RXDMA_FBR_BASE_HI_t) | 472 | * Defined earlier (RXDMA_FBR_BASE_LO_t and RXDMA_FBR_BASE_HI_t) |
473 | */ | 473 | */ |
474 | 474 | ||
475 | /* | 475 | /* |
476 | * structure for free buffer ring 1 number of descriptors reg in rxdma address | 476 | * structure for free buffer ring 1 number of descriptors reg in rxdma address |
477 | * map. Located at address 0x2060 | 477 | * map. Located at address 0x2060 |
478 | * Defined earlier (RXDMA_FBR_NUM_DES_t) | 478 | * Defined earlier (RXDMA_FBR_NUM_DES_t) |
479 | */ | 479 | */ |
480 | 480 | ||
481 | /* | 481 | /* |
482 | * structure for free buffer ring 1 available offset reg in rxdma address map | 482 | * structure for free buffer ring 1 available offset reg in rxdma address map |
483 | * located at address 0x2064 | 483 | * located at address 0x2064 |
484 | * Defined Earlier (RXDMA_FBR_AVAIL_OFFSET_t) | 484 | * Defined Earlier (RXDMA_FBR_AVAIL_OFFSET_t) |
485 | */ | 485 | */ |
486 | 486 | ||
487 | /* | 487 | /* |
488 | * structure for free buffer ring 1 full offset reg in rxdma address map | 488 | * structure for free buffer ring 1 full offset reg in rxdma address map |
489 | * located at address 0x2068 | 489 | * located at address 0x2068 |
490 | * Defined Earlier (RXDMA_FBR_FULL_OFFSET_t) | 490 | * Defined Earlier (RXDMA_FBR_FULL_OFFSET_t) |
491 | */ | 491 | */ |
492 | 492 | ||
493 | /* | 493 | /* |
494 | * structure for free buffer cache 1 read index reg in rxdma address map | 494 | * structure for free buffer cache 1 read index reg in rxdma address map |
495 | * located at address 0x206C | 495 | * located at address 0x206C |
496 | * Defined Earlier (RXDMA_FBC_RD_INDEX_t) | 496 | * Defined Earlier (RXDMA_FBC_RD_INDEX_t) |
497 | */ | 497 | */ |
498 | 498 | ||
499 | /* | 499 | /* |
500 | * structure for free buffer ring 1 minimum descriptor reg in rxdma address map | 500 | * structure for free buffer ring 1 minimum descriptor reg in rxdma address map |
501 | * located at address 0x2070 | 501 | * located at address 0x2070 |
502 | * Defined Earlier (RXDMA_FBR_MIN_DES_t) | 502 | * Defined Earlier (RXDMA_FBR_MIN_DES_t) |
503 | */ | 503 | */ |
504 | 504 | ||
505 | /* | 505 | /* |
506 | * Rx DMA Module of JAGCore Address Mapping | 506 | * Rx DMA Module of JAGCore Address Mapping |
507 | * Located at address 0x2000 | 507 | * Located at address 0x2000 |
508 | */ | 508 | */ |
509 | struct rxdma_regs { /* Location: */ | 509 | struct rxdma_regs { /* Location: */ |
510 | u32 csr; /* 0x2000 */ | 510 | u32 csr; /* 0x2000 */ |
511 | u32 dma_wb_base_lo; /* 0x2004 */ | 511 | u32 dma_wb_base_lo; /* 0x2004 */ |
512 | u32 dma_wb_base_hi; /* 0x2008 */ | 512 | u32 dma_wb_base_hi; /* 0x2008 */ |
513 | u32 num_pkt_done; /* 0x200C */ | 513 | u32 num_pkt_done; /* 0x200C */ |
514 | u32 max_pkt_time; /* 0x2010 */ | 514 | u32 max_pkt_time; /* 0x2010 */ |
515 | u32 rxq_rd_addr; /* 0x2014 */ | 515 | u32 rxq_rd_addr; /* 0x2014 */ |
516 | u32 rxq_rd_addr_ext; /* 0x2018 */ | 516 | u32 rxq_rd_addr_ext; /* 0x2018 */ |
517 | u32 rxq_wr_addr; /* 0x201C */ | 517 | u32 rxq_wr_addr; /* 0x201C */ |
518 | u32 psr_base_lo; /* 0x2020 */ | 518 | u32 psr_base_lo; /* 0x2020 */ |
519 | u32 psr_base_hi; /* 0x2024 */ | 519 | u32 psr_base_hi; /* 0x2024 */ |
520 | u32 psr_num_des; /* 0x2028 */ | 520 | u32 psr_num_des; /* 0x2028 */ |
521 | u32 psr_avail_offset; /* 0x202C */ | 521 | u32 psr_avail_offset; /* 0x202C */ |
522 | u32 psr_full_offset; /* 0x2030 */ | 522 | u32 psr_full_offset; /* 0x2030 */ |
523 | u32 psr_access_index; /* 0x2034 */ | 523 | u32 psr_access_index; /* 0x2034 */ |
524 | u32 psr_min_des; /* 0x2038 */ | 524 | u32 psr_min_des; /* 0x2038 */ |
525 | u32 fbr0_base_lo; /* 0x203C */ | 525 | u32 fbr0_base_lo; /* 0x203C */ |
526 | u32 fbr0_base_hi; /* 0x2040 */ | 526 | u32 fbr0_base_hi; /* 0x2040 */ |
527 | u32 fbr0_num_des; /* 0x2044 */ | 527 | u32 fbr0_num_des; /* 0x2044 */ |
528 | u32 fbr0_avail_offset; /* 0x2048 */ | 528 | u32 fbr0_avail_offset; /* 0x2048 */ |
529 | u32 fbr0_full_offset; /* 0x204C */ | 529 | u32 fbr0_full_offset; /* 0x204C */ |
530 | u32 fbr0_rd_index; /* 0x2050 */ | 530 | u32 fbr0_rd_index; /* 0x2050 */ |
531 | u32 fbr0_min_des; /* 0x2054 */ | 531 | u32 fbr0_min_des; /* 0x2054 */ |
532 | u32 fbr1_base_lo; /* 0x2058 */ | 532 | u32 fbr1_base_lo; /* 0x2058 */ |
533 | u32 fbr1_base_hi; /* 0x205C */ | 533 | u32 fbr1_base_hi; /* 0x205C */ |
534 | u32 fbr1_num_des; /* 0x2060 */ | 534 | u32 fbr1_num_des; /* 0x2060 */ |
535 | u32 fbr1_avail_offset; /* 0x2064 */ | 535 | u32 fbr1_avail_offset; /* 0x2064 */ |
536 | u32 fbr1_full_offset; /* 0x2068 */ | 536 | u32 fbr1_full_offset; /* 0x2068 */ |
537 | u32 fbr1_rd_index; /* 0x206C */ | 537 | u32 fbr1_rd_index; /* 0x206C */ |
538 | u32 fbr1_min_des; /* 0x2070 */ | 538 | u32 fbr1_min_des; /* 0x2070 */ |
539 | }; | 539 | }; |
540 | 540 | ||
541 | /* END OF RXDMA REGISTER ADDRESS MAP */ | 541 | /* END OF RXDMA REGISTER ADDRESS MAP */ |
542 | 542 | ||
543 | 543 | ||
544 | /* START OF TXMAC REGISTER ADDRESS MAP */ | 544 | /* START OF TXMAC REGISTER ADDRESS MAP */ |
545 | 545 | ||
546 | /* | 546 | /* |
547 | * structure for control reg in txmac address map | 547 | * structure for control reg in txmac address map |
548 | * located at address 0x3000 | 548 | * located at address 0x3000 |
549 | * | 549 | * |
550 | * bits | 550 | * bits |
551 | * 31-8: unused | 551 | * 31-8: unused |
552 | * 7: cklseg_disable | 552 | * 7: cklseg_disable |
553 | * 6: ckbcnt_disable | 553 | * 6: ckbcnt_disable |
554 | * 5: cksegnum | 554 | * 5: cksegnum |
555 | * 4: async_disable | 555 | * 4: async_disable |
556 | * 3: fc_disable | 556 | * 3: fc_disable |
557 | * 2: mcif_disable | 557 | * 2: mcif_disable |
558 | * 1: mif_disable | 558 | * 1: mif_disable |
559 | * 0: txmac_en | 559 | * 0: txmac_en |
560 | */ | 560 | */ |
561 | 561 | ||
562 | /* | 562 | /* |
563 | * structure for shadow pointer reg in txmac address map | 563 | * structure for shadow pointer reg in txmac address map |
564 | * located at address 0x3004 | 564 | * located at address 0x3004 |
565 | * 31-27: reserved | 565 | * 31-27: reserved |
566 | * 26-16: txq rd ptr | 566 | * 26-16: txq rd ptr |
567 | * 15-11: reserved | 567 | * 15-11: reserved |
568 | * 10-0: txq wr ptr | 568 | * 10-0: txq wr ptr |
569 | */ | 569 | */ |
570 | 570 | ||
571 | /* | 571 | /* |
572 | * structure for error count reg in txmac address map | 572 | * structure for error count reg in txmac address map |
573 | * located at address 0x3008 | 573 | * located at address 0x3008 |
574 | * | 574 | * |
575 | * 31-12: unused | 575 | * 31-12: unused |
576 | * 11-8: reserved | 576 | * 11-8: reserved |
577 | * 7-4: txq_underrun | 577 | * 7-4: txq_underrun |
578 | * 3-0: fifo_underrun | 578 | * 3-0: fifo_underrun |
579 | */ | 579 | */ |
580 | 580 | ||
581 | /* | 581 | /* |
582 | * structure for max fill reg in txmac address map | 582 | * structure for max fill reg in txmac address map |
583 | * located at address 0x300C | 583 | * located at address 0x300C |
584 | * 31-12: unused | 584 | * 31-12: unused |
585 | * 11-0: max fill | 585 | * 11-0: max fill |
586 | */ | 586 | */ |
587 | 587 | ||
588 | /* | 588 | /* |
589 | * structure for cf parameter reg in txmac address map | 589 | * structure for cf parameter reg in txmac address map |
590 | * located at address 0x3010 | 590 | * located at address 0x3010 |
591 | * 31-16: cfep | 591 | * 31-16: cfep |
592 | * 15-0: cfpt | 592 | * 15-0: cfpt |
593 | */ | 593 | */ |
594 | 594 | ||
595 | /* | 595 | /* |
596 | * structure for tx test reg in txmac address map | 596 | * structure for tx test reg in txmac address map |
597 | * located at address 0x3014 | 597 | * located at address 0x3014 |
598 | * 31-17: unused | 598 | * 31-17: unused |
599 | * 16: reserved1 | 599 | * 16: reserved |
600 | * 15: txtest_en | 600 | * 15: txtest_en |
601 | * 14-11: unused | 601 | * 14-11: unused |
602 | * 10-0: txq test pointer | 602 | * 10-0: txq test pointer |
603 | */ | 603 | */ |
604 | 604 | ||
605 | /* | 605 | /* |
606 | * structure for error reg in txmac address map | 606 | * structure for error reg in txmac address map |
607 | * located at address 0x3018 | 607 | * located at address 0x3018 |
608 | * | 608 | * |
609 | * 31-9: unused | 609 | * 31-9: unused |
610 | * 8: fifo_underrun | 610 | * 8: fifo_underrun |
611 | * 7-6: unused | 611 | * 7-6: unused |
612 | * 5: ctrl2_err | 612 | * 5: ctrl2_err |
613 | * 4: txq_underrun | 613 | * 4: txq_underrun |
614 | * 3: bcnt_err | 614 | * 3: bcnt_err |
615 | * 2: lseg_err | 615 | * 2: lseg_err |
616 | * 1: segnum_err | 616 | * 1: segnum_err |
617 | * 0: seg0_err | 617 | * 0: seg0_err |
618 | */ | 618 | */ |
619 | 619 | ||
620 | /* | 620 | /* |
621 | * structure for error interrupt reg in txmac address map | 621 | * structure for error interrupt reg in txmac address map |
622 | * located at address 0x301C | 622 | * located at address 0x301C |
623 | * | 623 | * |
624 | * 31-9: unused | 624 | * 31-9: unused |
625 | * 8: fifo_underrun | 625 | * 8: fifo_underrun |
626 | * 7-6: unused | 626 | * 7-6: unused |
627 | * 5: ctrl2_err | 627 | * 5: ctrl2_err |
628 | * 4: txq_underrun | 628 | * 4: txq_underrun |
629 | * 3: bcnt_err | 629 | * 3: bcnt_err |
630 | * 2: lseg_err | 630 | * 2: lseg_err |
631 | * 1: segnum_err | 631 | * 1: segnum_err |
632 | * 0: seg0_err | 632 | * 0: seg0_err |
633 | */ | 633 | */ |
634 | 634 | ||
635 | /* | 635 | /* |
636 | * structure for error interrupt reg in txmac address map | 636 | * structure for error interrupt reg in txmac address map |
637 | * located at address 0x3020 | 637 | * located at address 0x3020 |
638 | * | 638 | * |
639 | * 31-2: unused | 639 | * 31-2: unused |
640 | * 1: bp_req | 640 | * 1: bp_req |
641 | * 0: bp_xonxoff | 641 | * 0: bp_xonxoff |
642 | */ | 642 | */ |
643 | 643 | ||
644 | /* | 644 | /* |
645 | * Tx MAC Module of JAGCore Address Mapping | 645 | * Tx MAC Module of JAGCore Address Mapping |
646 | */ | 646 | */ |
647 | struct txmac_regs { /* Location: */ | 647 | struct txmac_regs { /* Location: */ |
648 | u32 ctl; /* 0x3000 */ | 648 | u32 ctl; /* 0x3000 */ |
649 | u32 shadow_ptr; /* 0x3004 */ | 649 | u32 shadow_ptr; /* 0x3004 */ |
650 | u32 err_cnt; /* 0x3008 */ | 650 | u32 err_cnt; /* 0x3008 */ |
651 | u32 max_fill; /* 0x300C */ | 651 | u32 max_fill; /* 0x300C */ |
652 | u32 cf_param; /* 0x3010 */ | 652 | u32 cf_param; /* 0x3010 */ |
653 | u32 tx_test; /* 0x3014 */ | 653 | u32 tx_test; /* 0x3014 */ |
654 | u32 err; /* 0x3018 */ | 654 | u32 err; /* 0x3018 */ |
655 | u32 err_int; /* 0x301C */ | 655 | u32 err_int; /* 0x301C */ |
656 | u32 bp_ctrl; /* 0x3020 */ | 656 | u32 bp_ctrl; /* 0x3020 */ |
657 | }; | 657 | }; |
658 | 658 | ||
659 | /* END OF TXMAC REGISTER ADDRESS MAP */ | 659 | /* END OF TXMAC REGISTER ADDRESS MAP */ |
660 | 660 | ||
661 | /* START OF RXMAC REGISTER ADDRESS MAP */ | 661 | /* START OF RXMAC REGISTER ADDRESS MAP */ |
662 | 662 | ||
663 | /* | 663 | /* |
664 | * structure for rxmac control reg in rxmac address map | 664 | * structure for rxmac control reg in rxmac address map |
665 | * located at address 0x4000 | 665 | * located at address 0x4000 |
666 | * | 666 | * |
667 | * 31-7: reserved | 667 | * 31-7: reserved |
668 | * 6: rxmac_int_disable | 668 | * 6: rxmac_int_disable |
669 | * 5: async_disable | 669 | * 5: async_disable |
670 | * 4: mif_disable | 670 | * 4: mif_disable |
671 | * 3: wol_disable | 671 | * 3: wol_disable |
672 | * 2: pkt_filter_disable | 672 | * 2: pkt_filter_disable |
673 | * 1: mcif_disable | 673 | * 1: mcif_disable |
674 | * 0: rxmac_en | 674 | * 0: rxmac_en |
675 | */ | 675 | */ |
676 | 676 | ||
677 | /* | 677 | /* |
678 | * structure for Wake On Lan Control and CRC 0 reg in rxmac address map | 678 | * structure for Wake On Lan Control and CRC 0 reg in rxmac address map |
679 | * located at address 0x4004 | 679 | * located at address 0x4004 |
680 | * 31-16: crc | 680 | * 31-16: crc |
681 | * 15-12: reserved | 681 | * 15-12: reserved |
682 | * 11: ignore_pp | 682 | * 11: ignore_pp |
683 | * 10: ignore_mp | 683 | * 10: ignore_mp |
684 | * 9: clr_intr | 684 | * 9: clr_intr |
685 | * 8: ignore_link_chg | 685 | * 8: ignore_link_chg |
686 | * 7: ignore_uni | 686 | * 7: ignore_uni |
687 | * 6: ignore_multi | 687 | * 6: ignore_multi |
688 | * 5: ignore_broad | 688 | * 5: ignore_broad |
689 | * 4-0: valid_crc 4-0 | 689 | * 4-0: valid_crc 4-0 |
690 | */ | 690 | */ |
691 | 691 | ||
692 | /* | 692 | /* |
693 | * structure for CRC 1 and CRC 2 reg in rxmac address map | 693 | * structure for CRC 1 and CRC 2 reg in rxmac address map |
694 | * located at address 0x4008 | 694 | * located at address 0x4008 |
695 | * | 695 | * |
696 | * 31-16: crc2 | 696 | * 31-16: crc2 |
697 | * 15-0: crc1 | 697 | * 15-0: crc1 |
698 | */ | 698 | */ |
699 | 699 | ||
700 | /* | 700 | /* |
701 | * structure for CRC 3 and CRC 4 reg in rxmac address map | 701 | * structure for CRC 3 and CRC 4 reg in rxmac address map |
702 | * located at address 0x400C | 702 | * located at address 0x400C |
703 | * | 703 | * |
704 | * 31-16: crc4 | 704 | * 31-16: crc4 |
705 | * 15-0: crc3 | 705 | * 15-0: crc3 |
706 | */ | 706 | */ |
707 | 707 | ||
708 | /* | 708 | /* |
709 | * structure for Wake On Lan Source Address Lo reg in rxmac address map | 709 | * structure for Wake On Lan Source Address Lo reg in rxmac address map |
710 | * located at address 0x4010 | 710 | * located at address 0x4010 |
711 | * | 711 | * |
712 | * 31-24: sa3 | 712 | * 31-24: sa3 |
713 | * 23-16: sa4 | 713 | * 23-16: sa4 |
714 | * 15-8: sa5 | 714 | * 15-8: sa5 |
715 | * 7-0: sa6 | 715 | * 7-0: sa6 |
716 | */ | 716 | */ |
717 | 717 | ||
718 | #define ET_WOL_LO_SA3_SHIFT 24 | 718 | #define ET_WOL_LO_SA3_SHIFT 24 |
719 | #define ET_WOL_LO_SA4_SHIFT 16 | 719 | #define ET_WOL_LO_SA4_SHIFT 16 |
720 | #define ET_WOL_LO_SA5_SHIFT 8 | 720 | #define ET_WOL_LO_SA5_SHIFT 8 |
721 | 721 | ||
722 | /* | 722 | /* |
723 | * structure for Wake On Lan Source Address Hi reg in rxmac address map | 723 | * structure for Wake On Lan Source Address Hi reg in rxmac address map |
724 | * located at address 0x4014 | 724 | * located at address 0x4014 |
725 | * | 725 | * |
726 | * 31-16: reserved | 726 | * 31-16: reserved |
727 | * 15-8: sa1 | 727 | * 15-8: sa1 |
728 | * 7-0: sa2 | 728 | * 7-0: sa2 |
729 | */ | 729 | */ |
730 | 730 | ||
731 | #define ET_WOL_HI_SA1_SHIFT 8 | 731 | #define ET_WOL_HI_SA1_SHIFT 8 |
732 | 732 | ||
733 | /* | 733 | /* |
734 | * structure for Wake On Lan mask reg in rxmac address map | 734 | * structure for Wake On Lan mask reg in rxmac address map |
735 | * located at address 0x4018 - 0x4064 | 735 | * located at address 0x4018 - 0x4064 |
736 | * Defined earlier (u32) | 736 | * Defined earlier (u32) |
737 | */ | 737 | */ |
738 | 738 | ||
739 | /* | 739 | /* |
740 | * structure for Unicast Paket Filter Address 1 reg in rxmac address map | 740 | * structure for Unicast Paket Filter Address 1 reg in rxmac address map |
741 | * located at address 0x4068 | 741 | * located at address 0x4068 |
742 | * | 742 | * |
743 | * 31-24: addr1_3 | 743 | * 31-24: addr1_3 |
744 | * 23-16: addr1_4 | 744 | * 23-16: addr1_4 |
745 | * 15-8: addr1_5 | 745 | * 15-8: addr1_5 |
746 | * 7-0: addr1_6 | 746 | * 7-0: addr1_6 |
747 | */ | 747 | */ |
748 | 748 | ||
749 | #define ET_UNI_PF_ADDR1_3_SHIFT 24 | 749 | #define ET_UNI_PF_ADDR1_3_SHIFT 24 |
750 | #define ET_UNI_PF_ADDR1_4_SHIFT 16 | 750 | #define ET_UNI_PF_ADDR1_4_SHIFT 16 |
751 | #define ET_UNI_PF_ADDR1_5_SHIFT 8 | 751 | #define ET_UNI_PF_ADDR1_5_SHIFT 8 |
752 | 752 | ||
753 | /* | 753 | /* |
754 | * structure for Unicast Paket Filter Address 2 reg in rxmac address map | 754 | * structure for Unicast Paket Filter Address 2 reg in rxmac address map |
755 | * located at address 0x406C | 755 | * located at address 0x406C |
756 | * | 756 | * |
757 | * 31-24: addr2_3 | 757 | * 31-24: addr2_3 |
758 | * 23-16: addr2_4 | 758 | * 23-16: addr2_4 |
759 | * 15-8: addr2_5 | 759 | * 15-8: addr2_5 |
760 | * 7-0: addr2_6 | 760 | * 7-0: addr2_6 |
761 | */ | 761 | */ |
762 | 762 | ||
763 | #define ET_UNI_PF_ADDR2_3_SHIFT 24 | 763 | #define ET_UNI_PF_ADDR2_3_SHIFT 24 |
764 | #define ET_UNI_PF_ADDR2_4_SHIFT 16 | 764 | #define ET_UNI_PF_ADDR2_4_SHIFT 16 |
765 | #define ET_UNI_PF_ADDR2_5_SHIFT 8 | 765 | #define ET_UNI_PF_ADDR2_5_SHIFT 8 |
766 | 766 | ||
767 | /* | 767 | /* |
768 | * structure for Unicast Paket Filter Address 1 & 2 reg in rxmac address map | 768 | * structure for Unicast Paket Filter Address 1 & 2 reg in rxmac address map |
769 | * located at address 0x4070 | 769 | * located at address 0x4070 |
770 | * | 770 | * |
771 | * 31-24: addr2_1 | 771 | * 31-24: addr2_1 |
772 | * 23-16: addr2_2 | 772 | * 23-16: addr2_2 |
773 | * 15-8: addr1_1 | 773 | * 15-8: addr1_1 |
774 | * 7-0: addr1_2 | 774 | * 7-0: addr1_2 |
775 | */ | 775 | */ |
776 | 776 | ||
777 | #define ET_UNI_PF_ADDR2_1_SHIFT 24 | 777 | #define ET_UNI_PF_ADDR2_1_SHIFT 24 |
778 | #define ET_UNI_PF_ADDR2_2_SHIFT 16 | 778 | #define ET_UNI_PF_ADDR2_2_SHIFT 16 |
779 | #define ET_UNI_PF_ADDR1_1_SHIFT 8 | 779 | #define ET_UNI_PF_ADDR1_1_SHIFT 8 |
780 | 780 | ||
781 | 781 | ||
782 | /* | 782 | /* |
783 | * structure for Multicast Hash reg in rxmac address map | 783 | * structure for Multicast Hash reg in rxmac address map |
784 | * located at address 0x4074 - 0x4080 | 784 | * located at address 0x4074 - 0x4080 |
785 | * Defined earlier (u32) | 785 | * Defined earlier (u32) |
786 | */ | 786 | */ |
787 | 787 | ||
788 | /* | 788 | /* |
789 | * structure for Packet Filter Control reg in rxmac address map | 789 | * structure for Packet Filter Control reg in rxmac address map |
790 | * located at address 0x4084 | 790 | * located at address 0x4084 |
791 | * | 791 | * |
792 | * 31-23: unused | 792 | * 31-23: unused |
793 | * 22-16: min_pkt_size | 793 | * 22-16: min_pkt_size |
794 | * 15-4: unused | 794 | * 15-4: unused |
795 | * 3: filter_frag_en | 795 | * 3: filter_frag_en |
796 | * 2: filter_uni_en | 796 | * 2: filter_uni_en |
797 | * 1: filter_multi_en | 797 | * 1: filter_multi_en |
798 | * 0: filter_broad_en | 798 | * 0: filter_broad_en |
799 | */ | 799 | */ |
800 | 800 | ||
801 | /* | 801 | /* |
802 | * structure for Memory Controller Interface Control Max Segment reg in rxmac | 802 | * structure for Memory Controller Interface Control Max Segment reg in rxmac |
803 | * address map. Located at address 0x4088 | 803 | * address map. Located at address 0x4088 |
804 | * | 804 | * |
805 | * 31-10: reserved | 805 | * 31-10: reserved |
806 | * 9-2: max_size | 806 | * 9-2: max_size |
807 | * 1: fc_en | 807 | * 1: fc_en |
808 | * 0: seg_en | 808 | * 0: seg_en |
809 | */ | 809 | */ |
810 | 810 | ||
811 | /* | 811 | /* |
812 | * structure for Memory Controller Interface Water Mark reg in rxmac address | 812 | * structure for Memory Controller Interface Water Mark reg in rxmac address |
813 | * map. Located at address 0x408C | 813 | * map. Located at address 0x408C |
814 | * | 814 | * |
815 | * 31-26: unused | 815 | * 31-26: unused |
816 | * 25-16: mark_hi | 816 | * 25-16: mark_hi |
817 | * 15-10: unused | 817 | * 15-10: unused |
818 | * 9-0: mark_lo | 818 | * 9-0: mark_lo |
819 | */ | 819 | */ |
820 | 820 | ||
821 | /* | 821 | /* |
822 | * structure for Rx Queue Dialog reg in rxmac address map. | 822 | * structure for Rx Queue Dialog reg in rxmac address map. |
823 | * located at address 0x4090 | 823 | * located at address 0x4090 |
824 | * | 824 | * |
825 | * 31-26: reserved | 825 | * 31-26: reserved |
826 | * 25-16: rd_ptr | 826 | * 25-16: rd_ptr |
827 | * 15-10: reserved | 827 | * 15-10: reserved |
828 | * 9-0: wr_ptr | 828 | * 9-0: wr_ptr |
829 | */ | 829 | */ |
830 | 830 | ||
831 | /* | 831 | /* |
832 | * structure for space available reg in rxmac address map. | 832 | * structure for space available reg in rxmac address map. |
833 | * located at address 0x4094 | 833 | * located at address 0x4094 |
834 | * | 834 | * |
835 | * 31-17: reserved | 835 | * 31-17: reserved |
836 | * 16: space_avail_en | 836 | * 16: space_avail_en |
837 | * 15-10: reserved | 837 | * 15-10: reserved |
838 | * 9-0: space_avail | 838 | * 9-0: space_avail |
839 | */ | 839 | */ |
840 | 840 | ||
841 | /* | 841 | /* |
842 | * structure for management interface reg in rxmac address map. | 842 | * structure for management interface reg in rxmac address map. |
843 | * located at address 0x4098 | 843 | * located at address 0x4098 |
844 | * | 844 | * |
845 | * 31-18: reserved | 845 | * 31-18: reserved |
846 | * 17: drop_pkt_en | 846 | * 17: drop_pkt_en |
847 | * 16-0: drop_pkt_mask | 847 | * 16-0: drop_pkt_mask |
848 | */ | 848 | */ |
849 | 849 | ||
850 | /* | 850 | /* |
851 | * structure for Error reg in rxmac address map. | 851 | * structure for Error reg in rxmac address map. |
852 | * located at address 0x409C | 852 | * located at address 0x409C |
853 | * | 853 | * |
854 | * 31-4: unused | 854 | * 31-4: unused |
855 | * 3: mif | 855 | * 3: mif |
856 | * 2: async | 856 | * 2: async |
857 | * 1: pkt_filter | 857 | * 1: pkt_filter |
858 | * 0: mcif | 858 | * 0: mcif |
859 | */ | 859 | */ |
860 | 860 | ||
861 | /* | 861 | /* |
862 | * Rx MAC Module of JAGCore Address Mapping | 862 | * Rx MAC Module of JAGCore Address Mapping |
863 | */ | 863 | */ |
864 | struct rxmac_regs { /* Location: */ | 864 | struct rxmac_regs { /* Location: */ |
865 | u32 ctrl; /* 0x4000 */ | 865 | u32 ctrl; /* 0x4000 */ |
866 | u32 crc0; /* 0x4004 */ | 866 | u32 crc0; /* 0x4004 */ |
867 | u32 crc12; /* 0x4008 */ | 867 | u32 crc12; /* 0x4008 */ |
868 | u32 crc34; /* 0x400C */ | 868 | u32 crc34; /* 0x400C */ |
869 | u32 sa_lo; /* 0x4010 */ | 869 | u32 sa_lo; /* 0x4010 */ |
870 | u32 sa_hi; /* 0x4014 */ | 870 | u32 sa_hi; /* 0x4014 */ |
871 | u32 mask0_word0; /* 0x4018 */ | 871 | u32 mask0_word0; /* 0x4018 */ |
872 | u32 mask0_word1; /* 0x401C */ | 872 | u32 mask0_word1; /* 0x401C */ |
873 | u32 mask0_word2; /* 0x4020 */ | 873 | u32 mask0_word2; /* 0x4020 */ |
874 | u32 mask0_word3; /* 0x4024 */ | 874 | u32 mask0_word3; /* 0x4024 */ |
875 | u32 mask1_word0; /* 0x4028 */ | 875 | u32 mask1_word0; /* 0x4028 */ |
876 | u32 mask1_word1; /* 0x402C */ | 876 | u32 mask1_word1; /* 0x402C */ |
877 | u32 mask1_word2; /* 0x4030 */ | 877 | u32 mask1_word2; /* 0x4030 */ |
878 | u32 mask1_word3; /* 0x4034 */ | 878 | u32 mask1_word3; /* 0x4034 */ |
879 | u32 mask2_word0; /* 0x4038 */ | 879 | u32 mask2_word0; /* 0x4038 */ |
880 | u32 mask2_word1; /* 0x403C */ | 880 | u32 mask2_word1; /* 0x403C */ |
881 | u32 mask2_word2; /* 0x4040 */ | 881 | u32 mask2_word2; /* 0x4040 */ |
882 | u32 mask2_word3; /* 0x4044 */ | 882 | u32 mask2_word3; /* 0x4044 */ |
883 | u32 mask3_word0; /* 0x4048 */ | 883 | u32 mask3_word0; /* 0x4048 */ |
884 | u32 mask3_word1; /* 0x404C */ | 884 | u32 mask3_word1; /* 0x404C */ |
885 | u32 mask3_word2; /* 0x4050 */ | 885 | u32 mask3_word2; /* 0x4050 */ |
886 | u32 mask3_word3; /* 0x4054 */ | 886 | u32 mask3_word3; /* 0x4054 */ |
887 | u32 mask4_word0; /* 0x4058 */ | 887 | u32 mask4_word0; /* 0x4058 */ |
888 | u32 mask4_word1; /* 0x405C */ | 888 | u32 mask4_word1; /* 0x405C */ |
889 | u32 mask4_word2; /* 0x4060 */ | 889 | u32 mask4_word2; /* 0x4060 */ |
890 | u32 mask4_word3; /* 0x4064 */ | 890 | u32 mask4_word3; /* 0x4064 */ |
891 | u32 uni_pf_addr1; /* 0x4068 */ | 891 | u32 uni_pf_addr1; /* 0x4068 */ |
892 | u32 uni_pf_addr2; /* 0x406C */ | 892 | u32 uni_pf_addr2; /* 0x406C */ |
893 | u32 uni_pf_addr3; /* 0x4070 */ | 893 | u32 uni_pf_addr3; /* 0x4070 */ |
894 | u32 multi_hash1; /* 0x4074 */ | 894 | u32 multi_hash1; /* 0x4074 */ |
895 | u32 multi_hash2; /* 0x4078 */ | 895 | u32 multi_hash2; /* 0x4078 */ |
896 | u32 multi_hash3; /* 0x407C */ | 896 | u32 multi_hash3; /* 0x407C */ |
897 | u32 multi_hash4; /* 0x4080 */ | 897 | u32 multi_hash4; /* 0x4080 */ |
898 | u32 pf_ctrl; /* 0x4084 */ | 898 | u32 pf_ctrl; /* 0x4084 */ |
899 | u32 mcif_ctrl_max_seg; /* 0x4088 */ | 899 | u32 mcif_ctrl_max_seg; /* 0x4088 */ |
900 | u32 mcif_water_mark; /* 0x408C */ | 900 | u32 mcif_water_mark; /* 0x408C */ |
901 | u32 rxq_diag; /* 0x4090 */ | 901 | u32 rxq_diag; /* 0x4090 */ |
902 | u32 space_avail; /* 0x4094 */ | 902 | u32 space_avail; /* 0x4094 */ |
903 | 903 | ||
904 | u32 mif_ctrl; /* 0x4098 */ | 904 | u32 mif_ctrl; /* 0x4098 */ |
905 | u32 err_reg; /* 0x409C */ | 905 | u32 err_reg; /* 0x409C */ |
906 | }; | 906 | }; |
907 | 907 | ||
908 | /* END OF RXMAC REGISTER ADDRESS MAP */ | 908 | /* END OF RXMAC REGISTER ADDRESS MAP */ |
909 | 909 | ||
910 | 910 | ||
911 | /* START OF MAC REGISTER ADDRESS MAP */ | 911 | /* START OF MAC REGISTER ADDRESS MAP */ |
912 | 912 | ||
913 | /* | 913 | /* |
914 | * structure for configuration #1 reg in mac address map. | 914 | * structure for configuration #1 reg in mac address map. |
915 | * located at address 0x5000 | 915 | * located at address 0x5000 |
916 | * | 916 | * |
917 | * 31: soft reset | 917 | * 31: soft reset |
918 | * 30: sim reset | 918 | * 30: sim reset |
919 | * 29-20: reserved | 919 | * 29-20: reserved |
920 | * 19: reset rx mc | 920 | * 19: reset rx mc |
921 | * 18: reset tx mc | 921 | * 18: reset tx mc |
922 | * 17: reset rx func | 922 | * 17: reset rx func |
923 | * 16: reset tx fnc | 923 | * 16: reset tx fnc |
924 | * 15-9: reserved | 924 | * 15-9: reserved |
925 | * 8: loopback | 925 | * 8: loopback |
926 | * 7-6: reserved | 926 | * 7-6: reserved |
927 | * 5: rx flow | 927 | * 5: rx flow |
928 | * 4: tx flow | 928 | * 4: tx flow |
929 | * 3: syncd rx en | 929 | * 3: syncd rx en |
930 | * 2: rx enable | 930 | * 2: rx enable |
931 | * 1: syncd tx en | 931 | * 1: syncd tx en |
932 | * 0: tx enable | 932 | * 0: tx enable |
933 | */ | 933 | */ |
934 | 934 | ||
935 | #define CFG1_LOOPBACK 0x00000100 | 935 | #define CFG1_LOOPBACK 0x00000100 |
936 | #define CFG1_RX_FLOW 0x00000020 | 936 | #define CFG1_RX_FLOW 0x00000020 |
937 | #define CFG1_TX_FLOW 0x00000010 | 937 | #define CFG1_TX_FLOW 0x00000010 |
938 | #define CFG1_RX_ENABLE 0x00000004 | 938 | #define CFG1_RX_ENABLE 0x00000004 |
939 | #define CFG1_TX_ENABLE 0x00000001 | 939 | #define CFG1_TX_ENABLE 0x00000001 |
940 | #define CFG1_WAIT 0x0000000A /* RX & TX syncd */ | 940 | #define CFG1_WAIT 0x0000000A /* RX & TX syncd */ |
941 | 941 | ||
942 | /* | 942 | /* |
943 | * structure for configuration #2 reg in mac address map. | 943 | * structure for configuration #2 reg in mac address map. |
944 | * located at address 0x5004 | 944 | * located at address 0x5004 |
945 | * 31-16: reserved | 945 | * 31-16: reserved |
946 | * 15-12: preamble | 946 | * 15-12: preamble |
947 | * 11-10: reserved | 947 | * 11-10: reserved |
948 | * 9-8: if mode | 948 | * 9-8: if mode |
949 | * 7-6: reserved | 949 | * 7-6: reserved |
950 | * 5: huge frame | 950 | * 5: huge frame |
951 | * 4: length check | 951 | * 4: length check |
952 | * 3: undefined | 952 | * 3: undefined |
953 | * 2: pad crc | 953 | * 2: pad crc |
954 | * 1: crc enable | 954 | * 1: crc enable |
955 | * 0: full duplex | 955 | * 0: full duplex |
956 | */ | 956 | */ |
957 | 957 | ||
958 | 958 | ||
959 | /* | 959 | /* |
960 | * structure for Interpacket gap reg in mac address map. | 960 | * structure for Interpacket gap reg in mac address map. |
961 | * located at address 0x5008 | 961 | * located at address 0x5008 |
962 | * | 962 | * |
963 | * 31: reserved | 963 | * 31: reserved |
964 | * 30-24: non B2B ipg 1 | 964 | * 30-24: non B2B ipg 1 |
965 | * 23: undefined | 965 | * 23: undefined |
966 | * 22-16: non B2B ipg 2 | 966 | * 22-16: non B2B ipg 2 |
967 | * 15-8: Min ifg enforce | 967 | * 15-8: Min ifg enforce |
968 | * 7-0: B2B ipg | 968 | * 7-0: B2B ipg |
969 | * | 969 | * |
970 | * structure for half duplex reg in mac address map. | 970 | * structure for half duplex reg in mac address map. |
971 | * located at address 0x500C | 971 | * located at address 0x500C |
972 | * 31-24: reserved | 972 | * 31-24: reserved |
973 | * 23-20: Alt BEB trunc | 973 | * 23-20: Alt BEB trunc |
974 | * 19: Alt BEB enable | 974 | * 19: Alt BEB enable |
975 | * 18: BP no backoff | 975 | * 18: BP no backoff |
976 | * 17: no backoff | 976 | * 17: no backoff |
977 | * 16: excess defer | 977 | * 16: excess defer |
978 | * 15-12: re-xmit max | 978 | * 15-12: re-xmit max |
979 | * 11-10: reserved | 979 | * 11-10: reserved |
980 | * 9-0: collision window | 980 | * 9-0: collision window |
981 | */ | 981 | */ |
982 | 982 | ||
983 | /* | 983 | /* |
984 | * structure for Maximum Frame Length reg in mac address map. | 984 | * structure for Maximum Frame Length reg in mac address map. |
985 | * located at address 0x5010: bits 0-15 hold the length. | 985 | * located at address 0x5010: bits 0-15 hold the length. |
986 | */ | 986 | */ |
987 | 987 | ||
988 | /* | 988 | /* |
989 | * structure for Reserve 1 reg in mac address map. | 989 | * structure for Reserve 1 reg in mac address map. |
990 | * located at address 0x5014 - 0x5018 | 990 | * located at address 0x5014 - 0x5018 |
991 | * Defined earlier (u32) | 991 | * Defined earlier (u32) |
992 | */ | 992 | */ |
993 | 993 | ||
994 | /* | 994 | /* |
995 | * structure for Test reg in mac address map. | 995 | * structure for Test reg in mac address map. |
996 | * located at address 0x501C | 996 | * located at address 0x501C |
997 | * test: bits 0-2, rest unused | 997 | * test: bits 0-2, rest unused |
998 | */ | 998 | */ |
999 | 999 | ||
1000 | /* | 1000 | /* |
1001 | * structure for MII Management Configuration reg in mac address map. | 1001 | * structure for MII Management Configuration reg in mac address map. |
1002 | * located at address 0x5020 | 1002 | * located at address 0x5020 |
1003 | * | 1003 | * |
1004 | * 31: reset MII mgmt | 1004 | * 31: reset MII mgmt |
1005 | * 30-6: unused | 1005 | * 30-6: unused |
1006 | * 5: scan auto increment | 1006 | * 5: scan auto increment |
1007 | * 4: preamble suppress | 1007 | * 4: preamble suppress |
1008 | * 3: undefined | 1008 | * 3: undefined |
1009 | * 2-0: mgmt clock reset | 1009 | * 2-0: mgmt clock reset |
1010 | */ | 1010 | */ |
1011 | 1011 | ||
1012 | /* | 1012 | /* |
1013 | * structure for MII Management Command reg in mac address map. | 1013 | * structure for MII Management Command reg in mac address map. |
1014 | * located at address 0x5024 | 1014 | * located at address 0x5024 |
1015 | * bit 1: scan cycle | 1015 | * bit 1: scan cycle |
1016 | * bit 0: read cycle | 1016 | * bit 0: read cycle |
1017 | */ | 1017 | */ |
1018 | 1018 | ||
1019 | /* | 1019 | /* |
1020 | * structure for MII Management Address reg in mac address map. | 1020 | * structure for MII Management Address reg in mac address map. |
1021 | * located at address 0x5028 | 1021 | * located at address 0x5028 |
1022 | * 31-13: reserved | 1022 | * 31-13: reserved |
1023 | * 12-8: phy addr | 1023 | * 12-8: phy addr |
1024 | * 7-5: reserved | 1024 | * 7-5: reserved |
1025 | * 4-0: register | 1025 | * 4-0: register |
1026 | */ | 1026 | */ |
1027 | 1027 | ||
1028 | #define MII_ADDR(phy, reg) ((phy) << 8 | (reg)) | 1028 | #define MII_ADDR(phy, reg) ((phy) << 8 | (reg)) |
1029 | 1029 | ||
1030 | /* | 1030 | /* |
1031 | * structure for MII Management Control reg in mac address map. | 1031 | * structure for MII Management Control reg in mac address map. |
1032 | * located at address 0x502C | 1032 | * located at address 0x502C |
1033 | * 31-16: reserved | 1033 | * 31-16: reserved |
1034 | * 15-0: phy control | 1034 | * 15-0: phy control |
1035 | */ | 1035 | */ |
1036 | 1036 | ||
1037 | /* | 1037 | /* |
1038 | * structure for MII Management Status reg in mac address map. | 1038 | * structure for MII Management Status reg in mac address map. |
1039 | * located at address 0x5030 | 1039 | * located at address 0x5030 |
1040 | * 31-16: reserved | 1040 | * 31-16: reserved |
1041 | * 15-0: phy control | 1041 | * 15-0: phy control |
1042 | */ | 1042 | */ |
1043 | 1043 | ||
1044 | /* | 1044 | /* |
1045 | * structure for MII Management Indicators reg in mac address map. | 1045 | * structure for MII Management Indicators reg in mac address map. |
1046 | * located at address 0x5034 | 1046 | * located at address 0x5034 |
1047 | * 31-3: reserved | 1047 | * 31-3: reserved |
1048 | * 2: not valid | 1048 | * 2: not valid |
1049 | * 1: scanning | 1049 | * 1: scanning |
1050 | * 0: busy | 1050 | * 0: busy |
1051 | */ | 1051 | */ |
1052 | 1052 | ||
1053 | #define MGMT_BUSY 0x00000001 /* busy */ | 1053 | #define MGMT_BUSY 0x00000001 /* busy */ |
1054 | #define MGMT_WAIT 0x00000005 /* busy | not valid */ | 1054 | #define MGMT_WAIT 0x00000005 /* busy | not valid */ |
1055 | 1055 | ||
1056 | /* | 1056 | /* |
1057 | * structure for Interface Control reg in mac address map. | 1057 | * structure for Interface Control reg in mac address map. |
1058 | * located at address 0x5038 | 1058 | * located at address 0x5038 |
1059 | * | 1059 | * |
1060 | * 31: reset if module | 1060 | * 31: reset if module |
1061 | * 30-28: reserved | 1061 | * 30-28: reserved |
1062 | * 27: tbi mode | 1062 | * 27: tbi mode |
1063 | * 26: ghd mode | 1063 | * 26: ghd mode |
1064 | * 25: lhd mode | 1064 | * 25: lhd mode |
1065 | * 24: phy mode | 1065 | * 24: phy mode |
1066 | * 23: reset per mii | 1066 | * 23: reset per mii |
1067 | * 22-17: reserved | 1067 | * 22-17: reserved |
1068 | * 16: speed | 1068 | * 16: speed |
1069 | * 15: reset pe100x | 1069 | * 15: reset pe100x |
1070 | * 14-11: reserved | 1070 | * 14-11: reserved |
1071 | * 10: force quiet | 1071 | * 10: force quiet |
1072 | * 9: no cipher | 1072 | * 9: no cipher |
1073 | * 8: disable link fail | 1073 | * 8: disable link fail |
1074 | * 7: reset gpsi | 1074 | * 7: reset gpsi |
1075 | * 6-1: reserved | 1075 | * 6-1: reserved |
1076 | * 0: enable jabber protection | 1076 | * 0: enable jabber protection |
1077 | */ | 1077 | */ |
1078 | 1078 | ||
1079 | /* | 1079 | /* |
1080 | * structure for Interface Status reg in mac address map. | 1080 | * structure for Interface Status reg in mac address map. |
1081 | * located at address 0x503C | 1081 | * located at address 0x503C |
1082 | * | 1082 | * |
1083 | * 31-10: reserved | 1083 | * 31-10: reserved |
1084 | * 9: excess_defer | 1084 | * 9: excess_defer |
1085 | * 8: clash | 1085 | * 8: clash |
1086 | * 7: phy_jabber | 1086 | * 7: phy_jabber |
1087 | * 6: phy_link_ok | 1087 | * 6: phy_link_ok |
1088 | * 5: phy_full_duplex | 1088 | * 5: phy_full_duplex |
1089 | * 4: phy_speed | 1089 | * 4: phy_speed |
1090 | * 3: pe100x_link_fail | 1090 | * 3: pe100x_link_fail |
1091 | * 2: pe10t_loss_carrier | 1091 | * 2: pe10t_loss_carrier |
1092 | * 1: pe10t_sqe_error | 1092 | * 1: pe10t_sqe_error |
1093 | * 0: pe10t_jabber | 1093 | * 0: pe10t_jabber |
1094 | */ | 1094 | */ |
1095 | 1095 | ||
1096 | /* | 1096 | /* |
1097 | * structure for Mac Station Address, Part 1 reg in mac address map. | 1097 | * structure for Mac Station Address, Part 1 reg in mac address map. |
1098 | * located at address 0x5040 | 1098 | * located at address 0x5040 |
1099 | * | 1099 | * |
1100 | * 31-24: Octet6 | 1100 | * 31-24: Octet6 |
1101 | * 23-16: Octet5 | 1101 | * 23-16: Octet5 |
1102 | * 15-8: Octet4 | 1102 | * 15-8: Octet4 |
1103 | * 7-0: Octet3 | 1103 | * 7-0: Octet3 |
1104 | */ | 1104 | */ |
1105 | 1105 | ||
1106 | #define ET_MAC_STATION_ADDR1_OC6_SHIFT 24 | 1106 | #define ET_MAC_STATION_ADDR1_OC6_SHIFT 24 |
1107 | #define ET_MAC_STATION_ADDR1_OC5_SHIFT 16 | 1107 | #define ET_MAC_STATION_ADDR1_OC5_SHIFT 16 |
1108 | #define ET_MAC_STATION_ADDR1_OC4_SHIFT 8 | 1108 | #define ET_MAC_STATION_ADDR1_OC4_SHIFT 8 |
1109 | 1109 | ||
1110 | /* | 1110 | /* |
1111 | * structure for Mac Station Address, Part 2 reg in mac address map. | 1111 | * structure for Mac Station Address, Part 2 reg in mac address map. |
1112 | * located at address 0x5044 | 1112 | * located at address 0x5044 |
1113 | * | 1113 | * |
1114 | * 31-24: Octet2 | 1114 | * 31-24: Octet2 |
1115 | * 23-16: Octet1 | 1115 | * 23-16: Octet1 |
1116 | * 15-0: reserved | 1116 | * 15-0: reserved |
1117 | */ | 1117 | */ |
1118 | 1118 | ||
1119 | #define ET_MAC_STATION_ADDR2_OC2_SHIFT 24 | 1119 | #define ET_MAC_STATION_ADDR2_OC2_SHIFT 24 |
1120 | #define ET_MAC_STATION_ADDR2_OC1_SHIFT 16 | 1120 | #define ET_MAC_STATION_ADDR2_OC1_SHIFT 16 |
1121 | 1121 | ||
1122 | /* | 1122 | /* |
1123 | * MAC Module of JAGCore Address Mapping | 1123 | * MAC Module of JAGCore Address Mapping |
1124 | */ | 1124 | */ |
1125 | struct mac_regs { /* Location: */ | 1125 | struct mac_regs { /* Location: */ |
1126 | u32 cfg1; /* 0x5000 */ | 1126 | u32 cfg1; /* 0x5000 */ |
1127 | u32 cfg2; /* 0x5004 */ | 1127 | u32 cfg2; /* 0x5004 */ |
1128 | u32 ipg; /* 0x5008 */ | 1128 | u32 ipg; /* 0x5008 */ |
1129 | u32 hfdp; /* 0x500C */ | 1129 | u32 hfdp; /* 0x500C */ |
1130 | u32 max_fm_len; /* 0x5010 */ | 1130 | u32 max_fm_len; /* 0x5010 */ |
1131 | u32 rsv1; /* 0x5014 */ | 1131 | u32 rsv1; /* 0x5014 */ |
1132 | u32 rsv2; /* 0x5018 */ | 1132 | u32 rsv2; /* 0x5018 */ |
1133 | u32 mac_test; /* 0x501C */ | 1133 | u32 mac_test; /* 0x501C */ |
1134 | u32 mii_mgmt_cfg; /* 0x5020 */ | 1134 | u32 mii_mgmt_cfg; /* 0x5020 */ |
1135 | u32 mii_mgmt_cmd; /* 0x5024 */ | 1135 | u32 mii_mgmt_cmd; /* 0x5024 */ |
1136 | u32 mii_mgmt_addr; /* 0x5028 */ | 1136 | u32 mii_mgmt_addr; /* 0x5028 */ |
1137 | u32 mii_mgmt_ctrl; /* 0x502C */ | 1137 | u32 mii_mgmt_ctrl; /* 0x502C */ |
1138 | u32 mii_mgmt_stat; /* 0x5030 */ | 1138 | u32 mii_mgmt_stat; /* 0x5030 */ |
1139 | u32 mii_mgmt_indicator; /* 0x5034 */ | 1139 | u32 mii_mgmt_indicator; /* 0x5034 */ |
1140 | u32 if_ctrl; /* 0x5038 */ | 1140 | u32 if_ctrl; /* 0x5038 */ |
1141 | u32 if_stat; /* 0x503C */ | 1141 | u32 if_stat; /* 0x503C */ |
1142 | u32 station_addr_1; /* 0x5040 */ | 1142 | u32 station_addr_1; /* 0x5040 */ |
1143 | u32 station_addr_2; /* 0x5044 */ | 1143 | u32 station_addr_2; /* 0x5044 */ |
1144 | }; | 1144 | }; |
1145 | 1145 | ||
1146 | /* END OF MAC REGISTER ADDRESS MAP */ | 1146 | /* END OF MAC REGISTER ADDRESS MAP */ |
1147 | 1147 | ||
1148 | /* START OF MAC STAT REGISTER ADDRESS MAP */ | 1148 | /* START OF MAC STAT REGISTER ADDRESS MAP */ |
1149 | 1149 | ||
1150 | /* | 1150 | /* |
1151 | * structure for Carry Register One and it's Mask Register reg located in mac | 1151 | * structure for Carry Register One and it's Mask Register reg located in mac |
1152 | * stat address map address 0x6130 and 0x6138. | 1152 | * stat address map address 0x6130 and 0x6138. |
1153 | * | 1153 | * |
1154 | * 31: tr64 | 1154 | * 31: tr64 |
1155 | * 30: tr127 | 1155 | * 30: tr127 |
1156 | * 29: tr255 | 1156 | * 29: tr255 |
1157 | * 28: tr511 | 1157 | * 28: tr511 |
1158 | * 27: tr1k | 1158 | * 27: tr1k |
1159 | * 26: trmax | 1159 | * 26: trmax |
1160 | * 25: trmgv | 1160 | * 25: trmgv |
1161 | * 24-17: unused | 1161 | * 24-17: unused |
1162 | * 16: rbyt | 1162 | * 16: rbyt |
1163 | * 15: rpkt | 1163 | * 15: rpkt |
1164 | * 14: rfcs | 1164 | * 14: rfcs |
1165 | * 13: rmca | 1165 | * 13: rmca |
1166 | * 12: rbca | 1166 | * 12: rbca |
1167 | * 11: rxcf | 1167 | * 11: rxcf |
1168 | * 10: rxpf | 1168 | * 10: rxpf |
1169 | * 9: rxuo | 1169 | * 9: rxuo |
1170 | * 8: raln | 1170 | * 8: raln |
1171 | * 7: rflr | 1171 | * 7: rflr |
1172 | * 6: rcde | 1172 | * 6: rcde |
1173 | * 5: rcse | 1173 | * 5: rcse |
1174 | * 4: rund | 1174 | * 4: rund |
1175 | * 3: rovr | 1175 | * 3: rovr |
1176 | * 2: rfrg | 1176 | * 2: rfrg |
1177 | * 1: rjbr | 1177 | * 1: rjbr |
1178 | * 0: rdrp | 1178 | * 0: rdrp |
1179 | */ | 1179 | */ |
1180 | 1180 | ||
1181 | /* | 1181 | /* |
1182 | * structure for Carry Register Two Mask Register reg in mac stat address map. | 1182 | * structure for Carry Register Two Mask Register reg in mac stat address map. |
1183 | * located at address 0x613C | 1183 | * located at address 0x613C |
1184 | * | 1184 | * |
1185 | * 31-20: unused | 1185 | * 31-20: unused |
1186 | * 19: tjbr | 1186 | * 19: tjbr |
1187 | * 18: tfcs | 1187 | * 18: tfcs |
1188 | * 17: txcf | 1188 | * 17: txcf |
1189 | * 16: tovr | 1189 | * 16: tovr |
1190 | * 15: tund | 1190 | * 15: tund |
1191 | * 14: trfg | 1191 | * 14: trfg |
1192 | * 13: tbyt | 1192 | * 13: tbyt |
1193 | * 12: tpkt | 1193 | * 12: tpkt |
1194 | * 11: tmca | 1194 | * 11: tmca |
1195 | * 10: tbca | 1195 | * 10: tbca |
1196 | * 9: txpf | 1196 | * 9: txpf |
1197 | * 8: tdfr | 1197 | * 8: tdfr |
1198 | * 7: tedf | 1198 | * 7: tedf |
1199 | * 6: tscl | 1199 | * 6: tscl |
1200 | * 5: tmcl | 1200 | * 5: tmcl |
1201 | * 4: tlcl | 1201 | * 4: tlcl |
1202 | * 3: txcl | 1202 | * 3: txcl |
1203 | * 2: tncl | 1203 | * 2: tncl |
1204 | * 1: tpfh | 1204 | * 1: tpfh |
1205 | * 0: tdrp | 1205 | * 0: tdrp |
1206 | */ | 1206 | */ |
1207 | 1207 | ||
1208 | /* | 1208 | /* |
1209 | * MAC STATS Module of JAGCore Address Mapping | 1209 | * MAC STATS Module of JAGCore Address Mapping |
1210 | */ | 1210 | */ |
1211 | struct macstat_regs { /* Location: */ | 1211 | struct macstat_regs { /* Location: */ |
1212 | u32 pad[32]; /* 0x6000 - 607C */ | 1212 | u32 pad[32]; /* 0x6000 - 607C */ |
1213 | 1213 | ||
1214 | /* Tx/Rx 0-64 Byte Frame Counter */ | 1214 | /* Tx/Rx 0-64 Byte Frame Counter */ |
1215 | u32 txrx_0_64_byte_frames; /* 0x6080 */ | 1215 | u32 txrx_0_64_byte_frames; /* 0x6080 */ |
1216 | 1216 | ||
1217 | /* Tx/Rx 65-127 Byte Frame Counter */ | 1217 | /* Tx/Rx 65-127 Byte Frame Counter */ |
1218 | u32 txrx_65_127_byte_frames; /* 0x6084 */ | 1218 | u32 txrx_65_127_byte_frames; /* 0x6084 */ |
1219 | 1219 | ||
1220 | /* Tx/Rx 128-255 Byte Frame Counter */ | 1220 | /* Tx/Rx 128-255 Byte Frame Counter */ |
1221 | u32 txrx_128_255_byte_frames; /* 0x6088 */ | 1221 | u32 txrx_128_255_byte_frames; /* 0x6088 */ |
1222 | 1222 | ||
1223 | /* Tx/Rx 256-511 Byte Frame Counter */ | 1223 | /* Tx/Rx 256-511 Byte Frame Counter */ |
1224 | u32 txrx_256_511_byte_frames; /* 0x608C */ | 1224 | u32 txrx_256_511_byte_frames; /* 0x608C */ |
1225 | 1225 | ||
1226 | /* Tx/Rx 512-1023 Byte Frame Counter */ | 1226 | /* Tx/Rx 512-1023 Byte Frame Counter */ |
1227 | u32 txrx_512_1023_byte_frames; /* 0x6090 */ | 1227 | u32 txrx_512_1023_byte_frames; /* 0x6090 */ |
1228 | 1228 | ||
1229 | /* Tx/Rx 1024-1518 Byte Frame Counter */ | 1229 | /* Tx/Rx 1024-1518 Byte Frame Counter */ |
1230 | u32 txrx_1024_1518_byte_frames; /* 0x6094 */ | 1230 | u32 txrx_1024_1518_byte_frames; /* 0x6094 */ |
1231 | 1231 | ||
1232 | /* Tx/Rx 1519-1522 Byte Good VLAN Frame Count */ | 1232 | /* Tx/Rx 1519-1522 Byte Good VLAN Frame Count */ |
1233 | u32 txrx_1519_1522_gvln_frames; /* 0x6098 */ | 1233 | u32 txrx_1519_1522_gvln_frames; /* 0x6098 */ |
1234 | 1234 | ||
1235 | /* Rx Byte Counter */ | 1235 | /* Rx Byte Counter */ |
1236 | u32 rx_bytes; /* 0x609C */ | 1236 | u32 rx_bytes; /* 0x609C */ |
1237 | 1237 | ||
1238 | /* Rx Packet Counter */ | 1238 | /* Rx Packet Counter */ |
1239 | u32 rx_packets; /* 0x60A0 */ | 1239 | u32 rx_packets; /* 0x60A0 */ |
1240 | 1240 | ||
1241 | /* Rx FCS Error Counter */ | 1241 | /* Rx FCS Error Counter */ |
1242 | u32 rx_fcs_errs; /* 0x60A4 */ | 1242 | u32 rx_fcs_errs; /* 0x60A4 */ |
1243 | 1243 | ||
1244 | /* Rx Multicast Packet Counter */ | 1244 | /* Rx Multicast Packet Counter */ |
1245 | u32 rx_multicast_packets; /* 0x60A8 */ | 1245 | u32 rx_multicast_packets; /* 0x60A8 */ |
1246 | 1246 | ||
1247 | /* Rx Broadcast Packet Counter */ | 1247 | /* Rx Broadcast Packet Counter */ |
1248 | u32 rx_broadcast_packets; /* 0x60AC */ | 1248 | u32 rx_broadcast_packets; /* 0x60AC */ |
1249 | 1249 | ||
1250 | /* Rx Control Frame Packet Counter */ | 1250 | /* Rx Control Frame Packet Counter */ |
1251 | u32 rx_control_frames; /* 0x60B0 */ | 1251 | u32 rx_control_frames; /* 0x60B0 */ |
1252 | 1252 | ||
1253 | /* Rx Pause Frame Packet Counter */ | 1253 | /* Rx Pause Frame Packet Counter */ |
1254 | u32 rx_pause_frames; /* 0x60B4 */ | 1254 | u32 rx_pause_frames; /* 0x60B4 */ |
1255 | 1255 | ||
1256 | /* Rx Unknown OP Code Counter */ | 1256 | /* Rx Unknown OP Code Counter */ |
1257 | u32 rx_unknown_opcodes; /* 0x60B8 */ | 1257 | u32 rx_unknown_opcodes; /* 0x60B8 */ |
1258 | 1258 | ||
1259 | /* Rx Alignment Error Counter */ | 1259 | /* Rx Alignment Error Counter */ |
1260 | u32 rx_align_errs; /* 0x60BC */ | 1260 | u32 rx_align_errs; /* 0x60BC */ |
1261 | 1261 | ||
1262 | /* Rx Frame Length Error Counter */ | 1262 | /* Rx Frame Length Error Counter */ |
1263 | u32 rx_frame_len_errs; /* 0x60C0 */ | 1263 | u32 rx_frame_len_errs; /* 0x60C0 */ |
1264 | 1264 | ||
1265 | /* Rx Code Error Counter */ | 1265 | /* Rx Code Error Counter */ |
1266 | u32 rx_code_errs; /* 0x60C4 */ | 1266 | u32 rx_code_errs; /* 0x60C4 */ |
1267 | 1267 | ||
1268 | /* Rx Carrier Sense Error Counter */ | 1268 | /* Rx Carrier Sense Error Counter */ |
1269 | u32 rx_carrier_sense_errs; /* 0x60C8 */ | 1269 | u32 rx_carrier_sense_errs; /* 0x60C8 */ |
1270 | 1270 | ||
1271 | /* Rx Undersize Packet Counter */ | 1271 | /* Rx Undersize Packet Counter */ |
1272 | u32 rx_undersize_packets; /* 0x60CC */ | 1272 | u32 rx_undersize_packets; /* 0x60CC */ |
1273 | 1273 | ||
1274 | /* Rx Oversize Packet Counter */ | 1274 | /* Rx Oversize Packet Counter */ |
1275 | u32 rx_oversize_packets; /* 0x60D0 */ | 1275 | u32 rx_oversize_packets; /* 0x60D0 */ |
1276 | 1276 | ||
1277 | /* Rx Fragment Counter */ | 1277 | /* Rx Fragment Counter */ |
1278 | u32 rx_fragment_packets; /* 0x60D4 */ | 1278 | u32 rx_fragment_packets; /* 0x60D4 */ |
1279 | 1279 | ||
1280 | /* Rx Jabber Counter */ | 1280 | /* Rx Jabber Counter */ |
1281 | u32 rx_jabbers; /* 0x60D8 */ | 1281 | u32 rx_jabbers; /* 0x60D8 */ |
1282 | 1282 | ||
1283 | /* Rx Drop */ | 1283 | /* Rx Drop */ |
1284 | u32 rx_drops; /* 0x60DC */ | 1284 | u32 rx_drops; /* 0x60DC */ |
1285 | 1285 | ||
1286 | /* Tx Byte Counter */ | 1286 | /* Tx Byte Counter */ |
1287 | u32 tx_bytes; /* 0x60E0 */ | 1287 | u32 tx_bytes; /* 0x60E0 */ |
1288 | 1288 | ||
1289 | /* Tx Packet Counter */ | 1289 | /* Tx Packet Counter */ |
1290 | u32 tx_packets; /* 0x60E4 */ | 1290 | u32 tx_packets; /* 0x60E4 */ |
1291 | 1291 | ||
1292 | /* Tx Multicast Packet Counter */ | 1292 | /* Tx Multicast Packet Counter */ |
1293 | u32 tx_multicast_packets; /* 0x60E8 */ | 1293 | u32 tx_multicast_packets; /* 0x60E8 */ |
1294 | 1294 | ||
1295 | /* Tx Broadcast Packet Counter */ | 1295 | /* Tx Broadcast Packet Counter */ |
1296 | u32 tx_broadcast_packets; /* 0x60EC */ | 1296 | u32 tx_broadcast_packets; /* 0x60EC */ |
1297 | 1297 | ||
1298 | /* Tx Pause Control Frame Counter */ | 1298 | /* Tx Pause Control Frame Counter */ |
1299 | u32 tx_pause_frames; /* 0x60F0 */ | 1299 | u32 tx_pause_frames; /* 0x60F0 */ |
1300 | 1300 | ||
1301 | /* Tx Deferral Packet Counter */ | 1301 | /* Tx Deferral Packet Counter */ |
1302 | u32 tx_deferred; /* 0x60F4 */ | 1302 | u32 tx_deferred; /* 0x60F4 */ |
1303 | 1303 | ||
1304 | /* Tx Excessive Deferral Packet Counter */ | 1304 | /* Tx Excessive Deferral Packet Counter */ |
1305 | u32 tx_excessive_deferred; /* 0x60F8 */ | 1305 | u32 tx_excessive_deferred; /* 0x60F8 */ |
1306 | 1306 | ||
1307 | /* Tx Single Collision Packet Counter */ | 1307 | /* Tx Single Collision Packet Counter */ |
1308 | u32 tx_single_collisions; /* 0x60FC */ | 1308 | u32 tx_single_collisions; /* 0x60FC */ |
1309 | 1309 | ||
1310 | /* Tx Multiple Collision Packet Counter */ | 1310 | /* Tx Multiple Collision Packet Counter */ |
1311 | u32 tx_multiple_collisions; /* 0x6100 */ | 1311 | u32 tx_multiple_collisions; /* 0x6100 */ |
1312 | 1312 | ||
1313 | /* Tx Late Collision Packet Counter */ | 1313 | /* Tx Late Collision Packet Counter */ |
1314 | u32 tx_late_collisions; /* 0x6104 */ | 1314 | u32 tx_late_collisions; /* 0x6104 */ |
1315 | 1315 | ||
1316 | /* Tx Excessive Collision Packet Counter */ | 1316 | /* Tx Excessive Collision Packet Counter */ |
1317 | u32 tx_excessive_collisions; /* 0x6108 */ | 1317 | u32 tx_excessive_collisions; /* 0x6108 */ |
1318 | 1318 | ||
1319 | /* Tx Total Collision Packet Counter */ | 1319 | /* Tx Total Collision Packet Counter */ |
1320 | u32 tx_total_collisions; /* 0x610C */ | 1320 | u32 tx_total_collisions; /* 0x610C */ |
1321 | 1321 | ||
1322 | /* Tx Pause Frame Honored Counter */ | 1322 | /* Tx Pause Frame Honored Counter */ |
1323 | u32 tx_pause_honored_frames; /* 0x6110 */ | 1323 | u32 tx_pause_honored_frames; /* 0x6110 */ |
1324 | 1324 | ||
1325 | /* Tx Drop Frame Counter */ | 1325 | /* Tx Drop Frame Counter */ |
1326 | u32 tx_drops; /* 0x6114 */ | 1326 | u32 tx_drops; /* 0x6114 */ |
1327 | 1327 | ||
1328 | /* Tx Jabber Frame Counter */ | 1328 | /* Tx Jabber Frame Counter */ |
1329 | u32 tx_jabbers; /* 0x6118 */ | 1329 | u32 tx_jabbers; /* 0x6118 */ |
1330 | 1330 | ||
1331 | /* Tx FCS Error Counter */ | 1331 | /* Tx FCS Error Counter */ |
1332 | u32 tx_fcs_errs; /* 0x611C */ | 1332 | u32 tx_fcs_errs; /* 0x611C */ |
1333 | 1333 | ||
1334 | /* Tx Control Frame Counter */ | 1334 | /* Tx Control Frame Counter */ |
1335 | u32 tx_control_frames; /* 0x6120 */ | 1335 | u32 tx_control_frames; /* 0x6120 */ |
1336 | 1336 | ||
1337 | /* Tx Oversize Frame Counter */ | 1337 | /* Tx Oversize Frame Counter */ |
1338 | u32 tx_oversize_frames; /* 0x6124 */ | 1338 | u32 tx_oversize_frames; /* 0x6124 */ |
1339 | 1339 | ||
1340 | /* Tx Undersize Frame Counter */ | 1340 | /* Tx Undersize Frame Counter */ |
1341 | u32 tx_undersize_frames; /* 0x6128 */ | 1341 | u32 tx_undersize_frames; /* 0x6128 */ |
1342 | 1342 | ||
1343 | /* Tx Fragments Frame Counter */ | 1343 | /* Tx Fragments Frame Counter */ |
1344 | u32 tx_fragments; /* 0x612C */ | 1344 | u32 tx_fragments; /* 0x612C */ |
1345 | 1345 | ||
1346 | /* Carry Register One Register */ | 1346 | /* Carry Register One Register */ |
1347 | u32 carry_reg1; /* 0x6130 */ | 1347 | u32 carry_reg1; /* 0x6130 */ |
1348 | 1348 | ||
1349 | /* Carry Register Two Register */ | 1349 | /* Carry Register Two Register */ |
1350 | u32 carry_reg2; /* 0x6134 */ | 1350 | u32 carry_reg2; /* 0x6134 */ |
1351 | 1351 | ||
1352 | /* Carry Register One Mask Register */ | 1352 | /* Carry Register One Mask Register */ |
1353 | u32 carry_reg1_mask; /* 0x6138 */ | 1353 | u32 carry_reg1_mask; /* 0x6138 */ |
1354 | 1354 | ||
1355 | /* Carry Register Two Mask Register */ | 1355 | /* Carry Register Two Mask Register */ |
1356 | u32 carry_reg2_mask; /* 0x613C */ | 1356 | u32 carry_reg2_mask; /* 0x613C */ |
1357 | }; | 1357 | }; |
1358 | 1358 | ||
1359 | /* END OF MAC STAT REGISTER ADDRESS MAP */ | 1359 | /* END OF MAC STAT REGISTER ADDRESS MAP */ |
1360 | 1360 | ||
1361 | /* START OF MMC REGISTER ADDRESS MAP */ | 1361 | /* START OF MMC REGISTER ADDRESS MAP */ |
1362 | 1362 | ||
1363 | /* | 1363 | /* |
1364 | * Main Memory Controller Control reg in mmc address map. | 1364 | * Main Memory Controller Control reg in mmc address map. |
1365 | * located at address 0x7000 | 1365 | * located at address 0x7000 |
1366 | */ | 1366 | */ |
1367 | 1367 | ||
1368 | #define ET_MMC_ENABLE 1 | 1368 | #define ET_MMC_ENABLE 1 |
1369 | #define ET_MMC_ARB_DISABLE 2 | 1369 | #define ET_MMC_ARB_DISABLE 2 |
1370 | #define ET_MMC_RXMAC_DISABLE 4 | 1370 | #define ET_MMC_RXMAC_DISABLE 4 |
1371 | #define ET_MMC_TXMAC_DISABLE 8 | 1371 | #define ET_MMC_TXMAC_DISABLE 8 |
1372 | #define ET_MMC_TXDMA_DISABLE 16 | 1372 | #define ET_MMC_TXDMA_DISABLE 16 |
1373 | #define ET_MMC_RXDMA_DISABLE 32 | 1373 | #define ET_MMC_RXDMA_DISABLE 32 |
1374 | #define ET_MMC_FORCE_CE 64 | 1374 | #define ET_MMC_FORCE_CE 64 |
1375 | 1375 | ||
1376 | /* | 1376 | /* |
1377 | * Main Memory Controller Host Memory Access Address reg in mmc | 1377 | * Main Memory Controller Host Memory Access Address reg in mmc |
1378 | * address map. Located at address 0x7004. Top 16 bits hold the address bits | 1378 | * address map. Located at address 0x7004. Top 16 bits hold the address bits |
1379 | */ | 1379 | */ |
1380 | 1380 | ||
1381 | #define ET_SRAM_REQ_ACCESS 1 | 1381 | #define ET_SRAM_REQ_ACCESS 1 |
1382 | #define ET_SRAM_WR_ACCESS 2 | 1382 | #define ET_SRAM_WR_ACCESS 2 |
1383 | #define ET_SRAM_IS_CTRL 4 | 1383 | #define ET_SRAM_IS_CTRL 4 |
1384 | 1384 | ||
1385 | /* | 1385 | /* |
1386 | * structure for Main Memory Controller Host Memory Access Data reg in mmc | 1386 | * structure for Main Memory Controller Host Memory Access Data reg in mmc |
1387 | * address map. Located at address 0x7008 - 0x7014 | 1387 | * address map. Located at address 0x7008 - 0x7014 |
1388 | * Defined earlier (u32) | 1388 | * Defined earlier (u32) |
1389 | */ | 1389 | */ |
1390 | 1390 | ||
1391 | /* | 1391 | /* |
1392 | * Memory Control Module of JAGCore Address Mapping | 1392 | * Memory Control Module of JAGCore Address Mapping |
1393 | */ | 1393 | */ |
1394 | struct mmc_regs { /* Location: */ | 1394 | struct mmc_regs { /* Location: */ |
1395 | u32 mmc_ctrl; /* 0x7000 */ | 1395 | u32 mmc_ctrl; /* 0x7000 */ |
1396 | u32 sram_access; /* 0x7004 */ | 1396 | u32 sram_access; /* 0x7004 */ |
1397 | u32 sram_word1; /* 0x7008 */ | 1397 | u32 sram_word1; /* 0x7008 */ |
1398 | u32 sram_word2; /* 0x700C */ | 1398 | u32 sram_word2; /* 0x700C */ |
1399 | u32 sram_word3; /* 0x7010 */ | 1399 | u32 sram_word3; /* 0x7010 */ |
1400 | u32 sram_word4; /* 0x7014 */ | 1400 | u32 sram_word4; /* 0x7014 */ |
1401 | }; | 1401 | }; |
1402 | 1402 | ||
1403 | /* END OF MMC REGISTER ADDRESS MAP */ | 1403 | /* END OF MMC REGISTER ADDRESS MAP */ |
1404 | 1404 | ||
1405 | 1405 | ||
1406 | /* | 1406 | /* |
1407 | * JAGCore Address Mapping | 1407 | * JAGCore Address Mapping |
1408 | */ | 1408 | */ |
1409 | struct address_map { | 1409 | struct address_map { |
1410 | struct global_regs global; | 1410 | struct global_regs global; |
1411 | /* unused section of global address map */ | 1411 | /* unused section of global address map */ |
1412 | u8 unused_global[4096 - sizeof(struct global_regs)]; | 1412 | u8 unused_global[4096 - sizeof(struct global_regs)]; |
1413 | struct txdma_regs txdma; | 1413 | struct txdma_regs txdma; |
1414 | /* unused section of txdma address map */ | 1414 | /* unused section of txdma address map */ |
1415 | u8 unused_txdma[4096 - sizeof(struct txdma_regs)]; | 1415 | u8 unused_txdma[4096 - sizeof(struct txdma_regs)]; |
1416 | struct rxdma_regs rxdma; | 1416 | struct rxdma_regs rxdma; |
1417 | /* unused section of rxdma address map */ | 1417 | /* unused section of rxdma address map */ |
1418 | u8 unused_rxdma[4096 - sizeof(struct rxdma_regs)]; | 1418 | u8 unused_rxdma[4096 - sizeof(struct rxdma_regs)]; |
1419 | struct txmac_regs txmac; | 1419 | struct txmac_regs txmac; |
1420 | /* unused section of txmac address map */ | 1420 | /* unused section of txmac address map */ |
1421 | u8 unused_txmac[4096 - sizeof(struct txmac_regs)]; | 1421 | u8 unused_txmac[4096 - sizeof(struct txmac_regs)]; |
1422 | struct rxmac_regs rxmac; | 1422 | struct rxmac_regs rxmac; |
1423 | /* unused section of rxmac address map */ | 1423 | /* unused section of rxmac address map */ |
1424 | u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)]; | 1424 | u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)]; |
1425 | struct mac_regs mac; | 1425 | struct mac_regs mac; |
1426 | /* unused section of mac address map */ | 1426 | /* unused section of mac address map */ |
1427 | u8 unused_mac[4096 - sizeof(struct mac_regs)]; | 1427 | u8 unused_mac[4096 - sizeof(struct mac_regs)]; |
1428 | struct macstat_regs macstat; | 1428 | struct macstat_regs macstat; |
1429 | /* unused section of mac stat address map */ | 1429 | /* unused section of mac stat address map */ |
1430 | u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)]; | 1430 | u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)]; |
1431 | struct mmc_regs mmc; | 1431 | struct mmc_regs mmc; |
1432 | /* unused section of mmc address map */ | 1432 | /* unused section of mmc address map */ |
1433 | u8 unused_mmc[4096 - sizeof(struct mmc_regs)]; | 1433 | u8 unused_mmc[4096 - sizeof(struct mmc_regs)]; |
1434 | /* unused section of address map */ | 1434 | /* unused section of address map */ |
1435 | u8 unused_[1015808]; | 1435 | u8 unused_[1015808]; |
1436 | 1436 | ||
1437 | u8 unused_exp_rom[4096]; /* MGS-size TBD */ | 1437 | u8 unused_exp_rom[4096]; /* MGS-size TBD */ |
1438 | u8 unused__[524288]; /* unused section of address map */ | 1438 | u8 unused__[524288]; /* unused section of address map */ |
1439 | }; | 1439 | }; |
1440 | 1440 | ||
1441 | /* | 1441 | /* |
1442 | * Defines for generic MII registers 0x00 -> 0x0F can be found in | 1442 | * Defines for generic MII registers 0x00 -> 0x0F can be found in |
1443 | * include/linux/mii.h | 1443 | * include/linux/mii.h |
1444 | */ | 1444 | */ |
1445 | 1445 | ||
1446 | /* some defines for modem registers that seem to be 'reserved' */ | 1446 | /* some defines for modem registers that seem to be 'reserved' */ |
1447 | #define PHY_INDEX_REG 0x10 | 1447 | #define PHY_INDEX_REG 0x10 |
1448 | #define PHY_DATA_REG 0x11 | 1448 | #define PHY_DATA_REG 0x11 |
1449 | #define PHY_MPHY_CONTROL_REG 0x12 | 1449 | #define PHY_MPHY_CONTROL_REG 0x12 |
1450 | 1450 | ||
1451 | /* defines for specified registers */ | 1451 | /* defines for specified registers */ |
1452 | #define PHY_LOOPBACK_CONTROL 0x13 /* TRU_VMI_LOOPBACK_CONTROL_1_REG 19 */ | 1452 | #define PHY_LOOPBACK_CONTROL 0x13 /* TRU_VMI_LOOPBACK_CONTROL_1_REG 19 */ |
1453 | /* TRU_VMI_LOOPBACK_CONTROL_2_REG 20 */ | 1453 | /* TRU_VMI_LOOPBACK_CONTROL_2_REG 20 */ |
1454 | #define PHY_REGISTER_MGMT_CONTROL 0x15 /* TRU_VMI_MI_SEQ_CONTROL_REG 21 */ | 1454 | #define PHY_REGISTER_MGMT_CONTROL 0x15 /* TRU_VMI_MI_SEQ_CONTROL_REG 21 */ |
1455 | #define PHY_CONFIG 0x16 /* TRU_VMI_CONFIGURATION_REG 22 */ | 1455 | #define PHY_CONFIG 0x16 /* TRU_VMI_CONFIGURATION_REG 22 */ |
1456 | #define PHY_PHY_CONTROL 0x17 /* TRU_VMI_PHY_CONTROL_REG 23 */ | 1456 | #define PHY_PHY_CONTROL 0x17 /* TRU_VMI_PHY_CONTROL_REG 23 */ |
1457 | #define PHY_INTERRUPT_MASK 0x18 /* TRU_VMI_INTERRUPT_MASK_REG 24 */ | 1457 | #define PHY_INTERRUPT_MASK 0x18 /* TRU_VMI_INTERRUPT_MASK_REG 24 */ |
1458 | #define PHY_INTERRUPT_STATUS 0x19 /* TRU_VMI_INTERRUPT_STATUS_REG 25 */ | 1458 | #define PHY_INTERRUPT_STATUS 0x19 /* TRU_VMI_INTERRUPT_STATUS_REG 25 */ |
1459 | #define PHY_PHY_STATUS 0x1A /* TRU_VMI_PHY_STATUS_REG 26 */ | 1459 | #define PHY_PHY_STATUS 0x1A /* TRU_VMI_PHY_STATUS_REG 26 */ |
1460 | #define PHY_LED_1 0x1B /* TRU_VMI_LED_CONTROL_1_REG 27 */ | 1460 | #define PHY_LED_1 0x1B /* TRU_VMI_LED_CONTROL_1_REG 27 */ |
1461 | #define PHY_LED_2 0x1C /* TRU_VMI_LED_CONTROL_2_REG 28 */ | 1461 | #define PHY_LED_2 0x1C /* TRU_VMI_LED_CONTROL_2_REG 28 */ |
1462 | /* TRU_VMI_LINK_CONTROL_REG 29 */ | 1462 | /* TRU_VMI_LINK_CONTROL_REG 29 */ |
1463 | /* TRU_VMI_TIMING_CONTROL_REG */ | 1463 | /* TRU_VMI_TIMING_CONTROL_REG */ |
1464 | 1464 | ||
1465 | /* MI Register 10: Gigabit basic mode status reg(Reg 0x0A) */ | 1465 | /* MI Register 10: Gigabit basic mode status reg(Reg 0x0A) */ |
1466 | #define ET_1000BT_MSTR_SLV 0x4000 | 1466 | #define ET_1000BT_MSTR_SLV 0x4000 |
1467 | 1467 | ||
1468 | /* MI Register 16 - 18: Reserved Reg(0x10-0x12) */ | 1468 | /* MI Register 16 - 18: Reserved Reg(0x10-0x12) */ |
1469 | 1469 | ||
1470 | /* MI Register 19: Loopback Control Reg(0x13) | 1470 | /* MI Register 19: Loopback Control Reg(0x13) |
1471 | * 15: mii_en | 1471 | * 15: mii_en |
1472 | * 14: pcs_en | 1472 | * 14: pcs_en |
1473 | * 13: pmd_en | 1473 | * 13: pmd_en |
1474 | * 12: all_digital_en | 1474 | * 12: all_digital_en |
1475 | * 11: replica_en | 1475 | * 11: replica_en |
1476 | * 10: line_driver_en | 1476 | * 10: line_driver_en |
1477 | * 9-0: reserved | 1477 | * 9-0: reserved |
1478 | */ | 1478 | */ |
1479 | 1479 | ||
1480 | /* MI Register 20: Reserved Reg(0x14) */ | 1480 | /* MI Register 20: Reserved Reg(0x14) */ |
1481 | 1481 | ||
1482 | /* MI Register 21: Management Interface Control Reg(0x15) | 1482 | /* MI Register 21: Management Interface Control Reg(0x15) |
1483 | * 15-11: reserved | 1483 | * 15-11: reserved |
1484 | * 10-4: mi_error_count | 1484 | * 10-4: mi_error_count |
1485 | * 3: reserved | 1485 | * 3: reserved |
1486 | * 2: ignore_10g_fr | 1486 | * 2: ignore_10g_fr |
1487 | * 1: reserved | 1487 | * 1: reserved |
1488 | * 0: preamble_supress_en | 1488 | * 0: preamble_suppress_en |
1489 | */ | 1489 | */ |
1490 | 1490 | ||
1491 | /* MI Register 22: PHY Configuration Reg(0x16) | 1491 | /* MI Register 22: PHY Configuration Reg(0x16) |
1492 | * 15: crs_tx_en | 1492 | * 15: crs_tx_en |
1493 | * 14: reserved | 1493 | * 14: reserved |
1494 | * 13-12: tx_fifo_depth | 1494 | * 13-12: tx_fifo_depth |
1495 | * 11-10: speed_downshift | 1495 | * 11-10: speed_downshift |
1496 | * 9: pbi_detect | 1496 | * 9: pbi_detect |
1497 | * 8: tbi_rate | 1497 | * 8: tbi_rate |
1498 | * 7: alternate_np | 1498 | * 7: alternate_np |
1499 | * 6: group_mdio_en | 1499 | * 6: group_mdio_en |
1500 | * 5: tx_clock_en | 1500 | * 5: tx_clock_en |
1501 | * 4: sys_clock_en | 1501 | * 4: sys_clock_en |
1502 | * 3: reserved | 1502 | * 3: reserved |
1503 | * 2-0: mac_if_mode | 1503 | * 2-0: mac_if_mode |
1504 | */ | 1504 | */ |
1505 | 1505 | ||
1506 | #define ET_PHY_CONFIG_TX_FIFO_DEPTH 0x3000 | 1506 | #define ET_PHY_CONFIG_TX_FIFO_DEPTH 0x3000 |
1507 | 1507 | ||
1508 | #define ET_PHY_CONFIG_FIFO_DEPTH_8 0x0000 | 1508 | #define ET_PHY_CONFIG_FIFO_DEPTH_8 0x0000 |
1509 | #define ET_PHY_CONFIG_FIFO_DEPTH_16 0x1000 | 1509 | #define ET_PHY_CONFIG_FIFO_DEPTH_16 0x1000 |
1510 | #define ET_PHY_CONFIG_FIFO_DEPTH_32 0x2000 | 1510 | #define ET_PHY_CONFIG_FIFO_DEPTH_32 0x2000 |
1511 | #define ET_PHY_CONFIG_FIFO_DEPTH_64 0x3000 | 1511 | #define ET_PHY_CONFIG_FIFO_DEPTH_64 0x3000 |
1512 | 1512 | ||
1513 | /* MI Register 23: PHY CONTROL Reg(0x17) | 1513 | /* MI Register 23: PHY CONTROL Reg(0x17) |
1514 | * 15: reserved | 1514 | * 15: reserved |
1515 | * 14: tdr_en | 1515 | * 14: tdr_en |
1516 | * 13: reserved | 1516 | * 13: reserved |
1517 | * 12-11: downshift_attempts | 1517 | * 12-11: downshift_attempts |
1518 | * 10-6: reserved | 1518 | * 10-6: reserved |
1519 | * 5: jabber_10baseT | 1519 | * 5: jabber_10baseT |
1520 | * 4: sqe_10baseT | 1520 | * 4: sqe_10baseT |
1521 | * 3: tp_loopback_10baseT | 1521 | * 3: tp_loopback_10baseT |
1522 | * 2: preamble_gen_en | 1522 | * 2: preamble_gen_en |
1523 | * 1: reserved | 1523 | * 1: reserved |
1524 | * 0: force_int | 1524 | * 0: force_int |
1525 | */ | 1525 | */ |
1526 | 1526 | ||
1527 | /* MI Register 24: Interrupt Mask Reg(0x18) | 1527 | /* MI Register 24: Interrupt Mask Reg(0x18) |
1528 | * 15-10: reserved | 1528 | * 15-10: reserved |
1529 | * 9: mdio_sync_lost | 1529 | * 9: mdio_sync_lost |
1530 | * 8: autoneg_status | 1530 | * 8: autoneg_status |
1531 | * 7: hi_bit_err | 1531 | * 7: hi_bit_err |
1532 | * 6: np_rx | 1532 | * 6: np_rx |
1533 | * 5: err_counter_full | 1533 | * 5: err_counter_full |
1534 | * 4: fifo_over_underflow | 1534 | * 4: fifo_over_underflow |
1535 | * 3: rx_status | 1535 | * 3: rx_status |
1536 | * 2: link_status | 1536 | * 2: link_status |
1537 | * 1: automatic_speed | 1537 | * 1: automatic_speed |
1538 | * 0: int_en | 1538 | * 0: int_en |
1539 | */ | 1539 | */ |
1540 | 1540 | ||
1541 | #define ET_PHY_INT_MASK_AUTONEGSTAT 0x0100 | 1541 | #define ET_PHY_INT_MASK_AUTONEGSTAT 0x0100 |
1542 | #define ET_PHY_INT_MASK_LINKSTAT 0x0004 | 1542 | #define ET_PHY_INT_MASK_LINKSTAT 0x0004 |
1543 | #define ET_PHY_INT_MASK_ENABLE 0x0001 | 1543 | #define ET_PHY_INT_MASK_ENABLE 0x0001 |
1544 | 1544 | ||
1545 | /* MI Register 25: Interrupt Status Reg(0x19) | 1545 | /* MI Register 25: Interrupt Status Reg(0x19) |
1546 | * 15-10: reserved | 1546 | * 15-10: reserved |
1547 | * 9: mdio_sync_lost | 1547 | * 9: mdio_sync_lost |
1548 | * 8: autoneg_status | 1548 | * 8: autoneg_status |
1549 | * 7: hi_bit_err | 1549 | * 7: hi_bit_err |
1550 | * 6: np_rx | 1550 | * 6: np_rx |
1551 | * 5: err_counter_full | 1551 | * 5: err_counter_full |
1552 | * 4: fifo_over_underflow | 1552 | * 4: fifo_over_underflow |
1553 | * 3: rx_status | 1553 | * 3: rx_status |
1554 | * 2: link_status | 1554 | * 2: link_status |
1555 | * 1: automatic_speed | 1555 | * 1: automatic_speed |
1556 | * 0: int_en | 1556 | * 0: int_en |
1557 | */ | 1557 | */ |
1558 | 1558 | ||
1559 | /* MI Register 26: PHY Status Reg(0x1A) | 1559 | /* MI Register 26: PHY Status Reg(0x1A) |
1560 | * 15: reserved | 1560 | * 15: reserved |
1561 | * 14-13: autoneg_fault | 1561 | * 14-13: autoneg_fault |
1562 | * 12: autoneg_status | 1562 | * 12: autoneg_status |
1563 | * 11: mdi_x_status | 1563 | * 11: mdi_x_status |
1564 | * 10: polarity_status | 1564 | * 10: polarity_status |
1565 | * 9-8: speed_status | 1565 | * 9-8: speed_status |
1566 | * 7: duplex_status | 1566 | * 7: duplex_status |
1567 | * 6: link_status | 1567 | * 6: link_status |
1568 | * 5: tx_status | 1568 | * 5: tx_status |
1569 | * 4: rx_status | 1569 | * 4: rx_status |
1570 | * 3: collision_status | 1570 | * 3: collision_status |
1571 | * 2: autoneg_en | 1571 | * 2: autoneg_en |
1572 | * 1: pause_en | 1572 | * 1: pause_en |
1573 | * 0: asymmetric_dir | 1573 | * 0: asymmetric_dir |
1574 | */ | 1574 | */ |
1575 | #define ET_PHY_AUTONEG_STATUS 0x1000 | 1575 | #define ET_PHY_AUTONEG_STATUS 0x1000 |
1576 | #define ET_PHY_POLARITY_STATUS 0x0400 | 1576 | #define ET_PHY_POLARITY_STATUS 0x0400 |
1577 | #define ET_PHY_SPEED_STATUS 0x0300 | 1577 | #define ET_PHY_SPEED_STATUS 0x0300 |
1578 | #define ET_PHY_DUPLEX_STATUS 0x0080 | 1578 | #define ET_PHY_DUPLEX_STATUS 0x0080 |
1579 | #define ET_PHY_LSTATUS 0x0040 | 1579 | #define ET_PHY_LSTATUS 0x0040 |
1580 | #define ET_PHY_AUTONEG_ENABLE 0x0020 | 1580 | #define ET_PHY_AUTONEG_ENABLE 0x0020 |
1581 | 1581 | ||
1582 | /* MI Register 27: LED Control Reg 1(0x1B) | 1582 | /* MI Register 27: LED Control Reg 1(0x1B) |
1583 | * 15-14: reserved | 1583 | * 15-14: reserved |
1584 | * 13-12: led_dup_indicate | 1584 | * 13-12: led_dup_indicate |
1585 | * 11-10: led_10baseT | 1585 | * 11-10: led_10baseT |
1586 | * 9-8: led_collision | 1586 | * 9-8: led_collision |
1587 | * 7-4: reserved | 1587 | * 7-4: reserved |
1588 | * 3-2: pulse_dur | 1588 | * 3-2: pulse_dur |
1589 | * 1: pulse_stretch1 | 1589 | * 1: pulse_stretch1 |
1590 | * 0: pulse_stretch0 | 1590 | * 0: pulse_stretch0 |
1591 | */ | 1591 | */ |
1592 | 1592 | ||
1593 | /* MI Register 28: LED Control Reg 2(0x1C) | 1593 | /* MI Register 28: LED Control Reg 2(0x1C) |
1594 | * 15-12: led_link | 1594 | * 15-12: led_link |
1595 | * 11-8: led_tx_rx | 1595 | * 11-8: led_tx_rx |
1596 | * 7-4: led_100BaseTX | 1596 | * 7-4: led_100BaseTX |
1597 | * 3-0: led_1000BaseT | 1597 | * 3-0: led_1000BaseT |
1598 | */ | 1598 | */ |
1599 | #define ET_LED2_LED_LINK 0xF000 | 1599 | #define ET_LED2_LED_LINK 0xF000 |
1600 | #define ET_LED2_LED_TXRX 0x0F00 | 1600 | #define ET_LED2_LED_TXRX 0x0F00 |
1601 | #define ET_LED2_LED_100TX 0x00F0 | 1601 | #define ET_LED2_LED_100TX 0x00F0 |
1602 | #define ET_LED2_LED_1000T 0x000F | 1602 | #define ET_LED2_LED_1000T 0x000F |
1603 | 1603 | ||
1604 | /* defines for LED control reg 2 values */ | 1604 | /* defines for LED control reg 2 values */ |
1605 | #define LED_VAL_1000BT 0x0 | 1605 | #define LED_VAL_1000BT 0x0 |
1606 | #define LED_VAL_100BTX 0x1 | 1606 | #define LED_VAL_100BTX 0x1 |
1607 | #define LED_VAL_10BT 0x2 | 1607 | #define LED_VAL_10BT 0x2 |
1608 | #define LED_VAL_1000BT_100BTX 0x3 /* 1000BT on, 100BTX blink */ | 1608 | #define LED_VAL_1000BT_100BTX 0x3 /* 1000BT on, 100BTX blink */ |
1609 | #define LED_VAL_LINKON 0x4 | 1609 | #define LED_VAL_LINKON 0x4 |
1610 | #define LED_VAL_TX 0x5 | 1610 | #define LED_VAL_TX 0x5 |
1611 | #define LED_VAL_RX 0x6 | 1611 | #define LED_VAL_RX 0x6 |
1612 | #define LED_VAL_TXRX 0x7 /* TX or RX */ | 1612 | #define LED_VAL_TXRX 0x7 /* TX or RX */ |
1613 | #define LED_VAL_DUPLEXFULL 0x8 | 1613 | #define LED_VAL_DUPLEXFULL 0x8 |
1614 | #define LED_VAL_COLLISION 0x9 | 1614 | #define LED_VAL_COLLISION 0x9 |
1615 | #define LED_VAL_LINKON_ACTIVE 0xA /* Link on, activity blink */ | 1615 | #define LED_VAL_LINKON_ACTIVE 0xA /* Link on, activity blink */ |
1616 | #define LED_VAL_LINKON_RECV 0xB /* Link on, receive blink */ | 1616 | #define LED_VAL_LINKON_RECV 0xB /* Link on, receive blink */ |
1617 | #define LED_VAL_DUPLEXFULL_COLLISION 0xC /* Duplex on, collision blink */ | 1617 | #define LED_VAL_DUPLEXFULL_COLLISION 0xC /* Duplex on, collision blink */ |
1618 | #define LED_VAL_BLINK 0xD | 1618 | #define LED_VAL_BLINK 0xD |
1619 | #define LED_VAL_ON 0xE | 1619 | #define LED_VAL_ON 0xE |
1620 | #define LED_VAL_OFF 0xF | 1620 | #define LED_VAL_OFF 0xF |
1621 | 1621 | ||
1622 | #define LED_LINK_SHIFT 12 | 1622 | #define LED_LINK_SHIFT 12 |
1623 | #define LED_TXRX_SHIFT 8 | 1623 | #define LED_TXRX_SHIFT 8 |
1624 | #define LED_100TX_SHIFT 4 | 1624 | #define LED_100TX_SHIFT 4 |
1625 | 1625 | ||
1626 | /* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */ | 1626 | /* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */ |
1627 | 1627 | ||
1628 | /* Defines for PHY access routines */ | 1628 | /* Defines for PHY access routines */ |
1629 | 1629 | ||
1630 | /* Define bit operation flags */ | 1630 | /* Define bit operation flags */ |
1631 | #define TRUEPHY_BIT_CLEAR 0 | 1631 | #define TRUEPHY_BIT_CLEAR 0 |
1632 | #define TRUEPHY_BIT_SET 1 | 1632 | #define TRUEPHY_BIT_SET 1 |
1633 | #define TRUEPHY_BIT_READ 2 | 1633 | #define TRUEPHY_BIT_READ 2 |
1634 | 1634 | ||
1635 | /* Define read/write operation flags */ | 1635 | /* Define read/write operation flags */ |
1636 | #ifndef TRUEPHY_READ | 1636 | #ifndef TRUEPHY_READ |
1637 | #define TRUEPHY_READ 0 | 1637 | #define TRUEPHY_READ 0 |
1638 | #define TRUEPHY_WRITE 1 | 1638 | #define TRUEPHY_WRITE 1 |
1639 | #define TRUEPHY_MASK 2 | 1639 | #define TRUEPHY_MASK 2 |
1640 | #endif | 1640 | #endif |
1641 | 1641 | ||
1642 | /* Define master/slave configuration values */ | 1642 | /* Define master/slave configuration values */ |
1643 | #define TRUEPHY_CFG_SLAVE 0 | 1643 | #define TRUEPHY_CFG_SLAVE 0 |
1644 | #define TRUEPHY_CFG_MASTER 1 | 1644 | #define TRUEPHY_CFG_MASTER 1 |
1645 | 1645 | ||
1646 | /* Define MDI/MDI-X settings */ | 1646 | /* Define MDI/MDI-X settings */ |
1647 | #define TRUEPHY_MDI 0 | 1647 | #define TRUEPHY_MDI 0 |
1648 | #define TRUEPHY_MDIX 1 | 1648 | #define TRUEPHY_MDIX 1 |
1649 | #define TRUEPHY_AUTO_MDI_MDIX 2 | 1649 | #define TRUEPHY_AUTO_MDI_MDIX 2 |
1650 | 1650 | ||
1651 | /* Define 10Base-T link polarities */ | 1651 | /* Define 10Base-T link polarities */ |
1652 | #define TRUEPHY_POLARITY_NORMAL 0 | 1652 | #define TRUEPHY_POLARITY_NORMAL 0 |
1653 | #define TRUEPHY_POLARITY_INVERTED 1 | 1653 | #define TRUEPHY_POLARITY_INVERTED 1 |
1654 | 1654 | ||
1655 | /* Define auto-negotiation results */ | 1655 | /* Define auto-negotiation results */ |
1656 | #define TRUEPHY_ANEG_NOT_COMPLETE 0 | 1656 | #define TRUEPHY_ANEG_NOT_COMPLETE 0 |
1657 | #define TRUEPHY_ANEG_COMPLETE 1 | 1657 | #define TRUEPHY_ANEG_COMPLETE 1 |
1658 | #define TRUEPHY_ANEG_DISABLED 2 | 1658 | #define TRUEPHY_ANEG_DISABLED 2 |
1659 | 1659 | ||
1660 | /* Define duplex advertisement flags */ | 1660 | /* Define duplex advertisement flags */ |
1661 | #define TRUEPHY_ADV_DUPLEX_NONE 0x00 | 1661 | #define TRUEPHY_ADV_DUPLEX_NONE 0x00 |
1662 | #define TRUEPHY_ADV_DUPLEX_FULL 0x01 | 1662 | #define TRUEPHY_ADV_DUPLEX_FULL 0x01 |
1663 | #define TRUEPHY_ADV_DUPLEX_HALF 0x02 | 1663 | #define TRUEPHY_ADV_DUPLEX_HALF 0x02 |
1664 | #define TRUEPHY_ADV_DUPLEX_BOTH \ | 1664 | #define TRUEPHY_ADV_DUPLEX_BOTH \ |
1665 | (TRUEPHY_ADV_DUPLEX_FULL | TRUEPHY_ADV_DUPLEX_HALF) | 1665 | (TRUEPHY_ADV_DUPLEX_FULL | TRUEPHY_ADV_DUPLEX_HALF) |
1666 | 1666 | ||
1667 | 1667 |