Commit 6460d948f3ebf7d5040328a60a0ab7221f69945b
Committed by
David S. Miller
1 parent
febca281f6
Exists in
master
and in
7 other branches
[NET]: Add ethtool support for NETIF_F_IPV6_CSUM devices.
Add ethtool utility function to set or clear IPV6_CSUM feature flag. Modify tg3.c and bnx2.c to use this function when doing ethtool -K to change tx checksum. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 4 changed files with 15 additions and 2 deletions Inline Diff
drivers/net/bnx2.c
1 | /* bnx2.c: Broadcom NX2 network driver. | 1 | /* bnx2.c: Broadcom NX2 network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2004-2007 Broadcom Corporation | 3 | * Copyright (c) 2004-2007 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
8 | * | 8 | * |
9 | * Written by: Michael Chan (mchan@broadcom.com) | 9 | * Written by: Michael Chan (mchan@broadcom.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <linux/ioport.h> | 19 | #include <linux/ioport.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/vmalloc.h> | 21 | #include <linux/vmalloc.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
26 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
27 | #include <linux/skbuff.h> | 27 | #include <linux/skbuff.h> |
28 | #include <linux/dma-mapping.h> | 28 | #include <linux/dma-mapping.h> |
29 | #include <asm/bitops.h> | 29 | #include <asm/bitops.h> |
30 | #include <asm/io.h> | 30 | #include <asm/io.h> |
31 | #include <asm/irq.h> | 31 | #include <asm/irq.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <asm/byteorder.h> | 33 | #include <asm/byteorder.h> |
34 | #include <asm/page.h> | 34 | #include <asm/page.h> |
35 | #include <linux/time.h> | 35 | #include <linux/time.h> |
36 | #include <linux/ethtool.h> | 36 | #include <linux/ethtool.h> |
37 | #include <linux/mii.h> | 37 | #include <linux/mii.h> |
38 | #ifdef NETIF_F_HW_VLAN_TX | 38 | #ifdef NETIF_F_HW_VLAN_TX |
39 | #include <linux/if_vlan.h> | 39 | #include <linux/if_vlan.h> |
40 | #define BCM_VLAN 1 | 40 | #define BCM_VLAN 1 |
41 | #endif | 41 | #endif |
42 | #include <net/ip.h> | 42 | #include <net/ip.h> |
43 | #include <net/tcp.h> | 43 | #include <net/tcp.h> |
44 | #include <net/checksum.h> | 44 | #include <net/checksum.h> |
45 | #include <linux/workqueue.h> | 45 | #include <linux/workqueue.h> |
46 | #include <linux/crc32.h> | 46 | #include <linux/crc32.h> |
47 | #include <linux/prefetch.h> | 47 | #include <linux/prefetch.h> |
48 | #include <linux/cache.h> | 48 | #include <linux/cache.h> |
49 | #include <linux/zlib.h> | 49 | #include <linux/zlib.h> |
50 | 50 | ||
51 | #include "bnx2.h" | 51 | #include "bnx2.h" |
52 | #include "bnx2_fw.h" | 52 | #include "bnx2_fw.h" |
53 | #include "bnx2_fw2.h" | 53 | #include "bnx2_fw2.h" |
54 | 54 | ||
55 | #define DRV_MODULE_NAME "bnx2" | 55 | #define DRV_MODULE_NAME "bnx2" |
56 | #define PFX DRV_MODULE_NAME ": " | 56 | #define PFX DRV_MODULE_NAME ": " |
57 | #define DRV_MODULE_VERSION "1.6.2" | 57 | #define DRV_MODULE_VERSION "1.6.2" |
58 | #define DRV_MODULE_RELDATE "July 6, 2007" | 58 | #define DRV_MODULE_RELDATE "July 6, 2007" |
59 | 59 | ||
60 | #define RUN_AT(x) (jiffies + (x)) | 60 | #define RUN_AT(x) (jiffies + (x)) |
61 | 61 | ||
62 | /* Time in jiffies before concluding the transmitter is hung. */ | 62 | /* Time in jiffies before concluding the transmitter is hung. */ |
63 | #define TX_TIMEOUT (5*HZ) | 63 | #define TX_TIMEOUT (5*HZ) |
64 | 64 | ||
65 | static const char version[] __devinitdata = | 65 | static const char version[] __devinitdata = |
66 | "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 66 | "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
67 | 67 | ||
68 | MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); | 68 | MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); |
69 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver"); | 69 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver"); |
70 | MODULE_LICENSE("GPL"); | 70 | MODULE_LICENSE("GPL"); |
71 | MODULE_VERSION(DRV_MODULE_VERSION); | 71 | MODULE_VERSION(DRV_MODULE_VERSION); |
72 | 72 | ||
73 | static int disable_msi = 0; | 73 | static int disable_msi = 0; |
74 | 74 | ||
75 | module_param(disable_msi, int, 0); | 75 | module_param(disable_msi, int, 0); |
76 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); | 76 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); |
77 | 77 | ||
78 | typedef enum { | 78 | typedef enum { |
79 | BCM5706 = 0, | 79 | BCM5706 = 0, |
80 | NC370T, | 80 | NC370T, |
81 | NC370I, | 81 | NC370I, |
82 | BCM5706S, | 82 | BCM5706S, |
83 | NC370F, | 83 | NC370F, |
84 | BCM5708, | 84 | BCM5708, |
85 | BCM5708S, | 85 | BCM5708S, |
86 | BCM5709, | 86 | BCM5709, |
87 | BCM5709S, | 87 | BCM5709S, |
88 | } board_t; | 88 | } board_t; |
89 | 89 | ||
90 | /* indexed by board_t, above */ | 90 | /* indexed by board_t, above */ |
91 | static const struct { | 91 | static const struct { |
92 | char *name; | 92 | char *name; |
93 | } board_info[] __devinitdata = { | 93 | } board_info[] __devinitdata = { |
94 | { "Broadcom NetXtreme II BCM5706 1000Base-T" }, | 94 | { "Broadcom NetXtreme II BCM5706 1000Base-T" }, |
95 | { "HP NC370T Multifunction Gigabit Server Adapter" }, | 95 | { "HP NC370T Multifunction Gigabit Server Adapter" }, |
96 | { "HP NC370i Multifunction Gigabit Server Adapter" }, | 96 | { "HP NC370i Multifunction Gigabit Server Adapter" }, |
97 | { "Broadcom NetXtreme II BCM5706 1000Base-SX" }, | 97 | { "Broadcom NetXtreme II BCM5706 1000Base-SX" }, |
98 | { "HP NC370F Multifunction Gigabit Server Adapter" }, | 98 | { "HP NC370F Multifunction Gigabit Server Adapter" }, |
99 | { "Broadcom NetXtreme II BCM5708 1000Base-T" }, | 99 | { "Broadcom NetXtreme II BCM5708 1000Base-T" }, |
100 | { "Broadcom NetXtreme II BCM5708 1000Base-SX" }, | 100 | { "Broadcom NetXtreme II BCM5708 1000Base-SX" }, |
101 | { "Broadcom NetXtreme II BCM5709 1000Base-T" }, | 101 | { "Broadcom NetXtreme II BCM5709 1000Base-T" }, |
102 | { "Broadcom NetXtreme II BCM5709 1000Base-SX" }, | 102 | { "Broadcom NetXtreme II BCM5709 1000Base-SX" }, |
103 | }; | 103 | }; |
104 | 104 | ||
105 | static struct pci_device_id bnx2_pci_tbl[] = { | 105 | static struct pci_device_id bnx2_pci_tbl[] = { |
106 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, | 106 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, |
107 | PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T }, | 107 | PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T }, |
108 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, | 108 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, |
109 | PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I }, | 109 | PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I }, |
110 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, | 110 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, |
111 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 }, | 111 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 }, |
112 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708, | 112 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708, |
113 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 }, | 113 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 }, |
114 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, | 114 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, |
115 | PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F }, | 115 | PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F }, |
116 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, | 116 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, |
117 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S }, | 117 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S }, |
118 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S, | 118 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S, |
119 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S }, | 119 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S }, |
120 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709, | 120 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709, |
121 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 }, | 121 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 }, |
122 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S, | 122 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S, |
123 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S }, | 123 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S }, |
124 | { 0, } | 124 | { 0, } |
125 | }; | 125 | }; |
126 | 126 | ||
127 | static struct flash_spec flash_table[] = | 127 | static struct flash_spec flash_table[] = |
128 | { | 128 | { |
129 | /* Slow EEPROM */ | 129 | /* Slow EEPROM */ |
130 | {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, | 130 | {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, |
131 | 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, | 131 | 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, |
132 | SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, | 132 | SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, |
133 | "EEPROM - slow"}, | 133 | "EEPROM - slow"}, |
134 | /* Expansion entry 0001 */ | 134 | /* Expansion entry 0001 */ |
135 | {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, | 135 | {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, |
136 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 136 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
137 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 137 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
138 | "Entry 0001"}, | 138 | "Entry 0001"}, |
139 | /* Saifun SA25F010 (non-buffered flash) */ | 139 | /* Saifun SA25F010 (non-buffered flash) */ |
140 | /* strap, cfg1, & write1 need updates */ | 140 | /* strap, cfg1, & write1 need updates */ |
141 | {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, | 141 | {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, |
142 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 142 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
143 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, | 143 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, |
144 | "Non-buffered flash (128kB)"}, | 144 | "Non-buffered flash (128kB)"}, |
145 | /* Saifun SA25F020 (non-buffered flash) */ | 145 | /* Saifun SA25F020 (non-buffered flash) */ |
146 | /* strap, cfg1, & write1 need updates */ | 146 | /* strap, cfg1, & write1 need updates */ |
147 | {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, | 147 | {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, |
148 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 148 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
149 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, | 149 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, |
150 | "Non-buffered flash (256kB)"}, | 150 | "Non-buffered flash (256kB)"}, |
151 | /* Expansion entry 0100 */ | 151 | /* Expansion entry 0100 */ |
152 | {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, | 152 | {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, |
153 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 153 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
154 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 154 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
155 | "Entry 0100"}, | 155 | "Entry 0100"}, |
156 | /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ | 156 | /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ |
157 | {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, | 157 | {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, |
158 | 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, | 158 | 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, |
159 | ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, | 159 | ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, |
160 | "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, | 160 | "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, |
161 | /* Entry 0110: ST M45PE20 (non-buffered flash)*/ | 161 | /* Entry 0110: ST M45PE20 (non-buffered flash)*/ |
162 | {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, | 162 | {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, |
163 | 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, | 163 | 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, |
164 | ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, | 164 | ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, |
165 | "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, | 165 | "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, |
166 | /* Saifun SA25F005 (non-buffered flash) */ | 166 | /* Saifun SA25F005 (non-buffered flash) */ |
167 | /* strap, cfg1, & write1 need updates */ | 167 | /* strap, cfg1, & write1 need updates */ |
168 | {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, | 168 | {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, |
169 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 169 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
170 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, | 170 | SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, |
171 | "Non-buffered flash (64kB)"}, | 171 | "Non-buffered flash (64kB)"}, |
172 | /* Fast EEPROM */ | 172 | /* Fast EEPROM */ |
173 | {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, | 173 | {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, |
174 | 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, | 174 | 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, |
175 | SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, | 175 | SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, |
176 | "EEPROM - fast"}, | 176 | "EEPROM - fast"}, |
177 | /* Expansion entry 1001 */ | 177 | /* Expansion entry 1001 */ |
178 | {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, | 178 | {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, |
179 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 179 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
180 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 180 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
181 | "Entry 1001"}, | 181 | "Entry 1001"}, |
182 | /* Expansion entry 1010 */ | 182 | /* Expansion entry 1010 */ |
183 | {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, | 183 | {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, |
184 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 184 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
185 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 185 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
186 | "Entry 1010"}, | 186 | "Entry 1010"}, |
187 | /* ATMEL AT45DB011B (buffered flash) */ | 187 | /* ATMEL AT45DB011B (buffered flash) */ |
188 | {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, | 188 | {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, |
189 | 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, | 189 | 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, |
190 | BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, | 190 | BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, |
191 | "Buffered flash (128kB)"}, | 191 | "Buffered flash (128kB)"}, |
192 | /* Expansion entry 1100 */ | 192 | /* Expansion entry 1100 */ |
193 | {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, | 193 | {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, |
194 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 194 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
195 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 195 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
196 | "Entry 1100"}, | 196 | "Entry 1100"}, |
197 | /* Expansion entry 1101 */ | 197 | /* Expansion entry 1101 */ |
198 | {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, | 198 | {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, |
199 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, | 199 | 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, |
200 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, | 200 | SAIFUN_FLASH_BYTE_ADDR_MASK, 0, |
201 | "Entry 1101"}, | 201 | "Entry 1101"}, |
202 | /* Ateml Expansion entry 1110 */ | 202 | /* Ateml Expansion entry 1110 */ |
203 | {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, | 203 | {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, |
204 | 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, | 204 | 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, |
205 | BUFFERED_FLASH_BYTE_ADDR_MASK, 0, | 205 | BUFFERED_FLASH_BYTE_ADDR_MASK, 0, |
206 | "Entry 1110 (Atmel)"}, | 206 | "Entry 1110 (Atmel)"}, |
207 | /* ATMEL AT45DB021B (buffered flash) */ | 207 | /* ATMEL AT45DB021B (buffered flash) */ |
208 | {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, | 208 | {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, |
209 | 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, | 209 | 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, |
210 | BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, | 210 | BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, |
211 | "Buffered flash (256kB)"}, | 211 | "Buffered flash (256kB)"}, |
212 | }; | 212 | }; |
213 | 213 | ||
214 | MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); | 214 | MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); |
215 | 215 | ||
216 | static inline u32 bnx2_tx_avail(struct bnx2 *bp) | 216 | static inline u32 bnx2_tx_avail(struct bnx2 *bp) |
217 | { | 217 | { |
218 | u32 diff; | 218 | u32 diff; |
219 | 219 | ||
220 | smp_mb(); | 220 | smp_mb(); |
221 | 221 | ||
222 | /* The ring uses 256 indices for 255 entries, one of them | 222 | /* The ring uses 256 indices for 255 entries, one of them |
223 | * needs to be skipped. | 223 | * needs to be skipped. |
224 | */ | 224 | */ |
225 | diff = bp->tx_prod - bp->tx_cons; | 225 | diff = bp->tx_prod - bp->tx_cons; |
226 | if (unlikely(diff >= TX_DESC_CNT)) { | 226 | if (unlikely(diff >= TX_DESC_CNT)) { |
227 | diff &= 0xffff; | 227 | diff &= 0xffff; |
228 | if (diff == TX_DESC_CNT) | 228 | if (diff == TX_DESC_CNT) |
229 | diff = MAX_TX_DESC_CNT; | 229 | diff = MAX_TX_DESC_CNT; |
230 | } | 230 | } |
231 | return (bp->tx_ring_size - diff); | 231 | return (bp->tx_ring_size - diff); |
232 | } | 232 | } |
233 | 233 | ||
234 | static u32 | 234 | static u32 |
235 | bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) | 235 | bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) |
236 | { | 236 | { |
237 | u32 val; | 237 | u32 val; |
238 | 238 | ||
239 | spin_lock_bh(&bp->indirect_lock); | 239 | spin_lock_bh(&bp->indirect_lock); |
240 | REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); | 240 | REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); |
241 | val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW); | 241 | val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW); |
242 | spin_unlock_bh(&bp->indirect_lock); | 242 | spin_unlock_bh(&bp->indirect_lock); |
243 | return val; | 243 | return val; |
244 | } | 244 | } |
245 | 245 | ||
246 | static void | 246 | static void |
247 | bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) | 247 | bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) |
248 | { | 248 | { |
249 | spin_lock_bh(&bp->indirect_lock); | 249 | spin_lock_bh(&bp->indirect_lock); |
250 | REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); | 250 | REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); |
251 | REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val); | 251 | REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val); |
252 | spin_unlock_bh(&bp->indirect_lock); | 252 | spin_unlock_bh(&bp->indirect_lock); |
253 | } | 253 | } |
254 | 254 | ||
255 | static void | 255 | static void |
256 | bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) | 256 | bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) |
257 | { | 257 | { |
258 | offset += cid_addr; | 258 | offset += cid_addr; |
259 | spin_lock_bh(&bp->indirect_lock); | 259 | spin_lock_bh(&bp->indirect_lock); |
260 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 260 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
261 | int i; | 261 | int i; |
262 | 262 | ||
263 | REG_WR(bp, BNX2_CTX_CTX_DATA, val); | 263 | REG_WR(bp, BNX2_CTX_CTX_DATA, val); |
264 | REG_WR(bp, BNX2_CTX_CTX_CTRL, | 264 | REG_WR(bp, BNX2_CTX_CTX_CTRL, |
265 | offset | BNX2_CTX_CTX_CTRL_WRITE_REQ); | 265 | offset | BNX2_CTX_CTX_CTRL_WRITE_REQ); |
266 | for (i = 0; i < 5; i++) { | 266 | for (i = 0; i < 5; i++) { |
267 | u32 val; | 267 | u32 val; |
268 | val = REG_RD(bp, BNX2_CTX_CTX_CTRL); | 268 | val = REG_RD(bp, BNX2_CTX_CTX_CTRL); |
269 | if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0) | 269 | if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0) |
270 | break; | 270 | break; |
271 | udelay(5); | 271 | udelay(5); |
272 | } | 272 | } |
273 | } else { | 273 | } else { |
274 | REG_WR(bp, BNX2_CTX_DATA_ADR, offset); | 274 | REG_WR(bp, BNX2_CTX_DATA_ADR, offset); |
275 | REG_WR(bp, BNX2_CTX_DATA, val); | 275 | REG_WR(bp, BNX2_CTX_DATA, val); |
276 | } | 276 | } |
277 | spin_unlock_bh(&bp->indirect_lock); | 277 | spin_unlock_bh(&bp->indirect_lock); |
278 | } | 278 | } |
279 | 279 | ||
280 | static int | 280 | static int |
281 | bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) | 281 | bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) |
282 | { | 282 | { |
283 | u32 val1; | 283 | u32 val1; |
284 | int i, ret; | 284 | int i, ret; |
285 | 285 | ||
286 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { | 286 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { |
287 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); | 287 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); |
288 | val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; | 288 | val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; |
289 | 289 | ||
290 | REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); | 290 | REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); |
291 | REG_RD(bp, BNX2_EMAC_MDIO_MODE); | 291 | REG_RD(bp, BNX2_EMAC_MDIO_MODE); |
292 | 292 | ||
293 | udelay(40); | 293 | udelay(40); |
294 | } | 294 | } |
295 | 295 | ||
296 | val1 = (bp->phy_addr << 21) | (reg << 16) | | 296 | val1 = (bp->phy_addr << 21) | (reg << 16) | |
297 | BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT | | 297 | BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT | |
298 | BNX2_EMAC_MDIO_COMM_START_BUSY; | 298 | BNX2_EMAC_MDIO_COMM_START_BUSY; |
299 | REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); | 299 | REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); |
300 | 300 | ||
301 | for (i = 0; i < 50; i++) { | 301 | for (i = 0; i < 50; i++) { |
302 | udelay(10); | 302 | udelay(10); |
303 | 303 | ||
304 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); | 304 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); |
305 | if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { | 305 | if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { |
306 | udelay(5); | 306 | udelay(5); |
307 | 307 | ||
308 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); | 308 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); |
309 | val1 &= BNX2_EMAC_MDIO_COMM_DATA; | 309 | val1 &= BNX2_EMAC_MDIO_COMM_DATA; |
310 | 310 | ||
311 | break; | 311 | break; |
312 | } | 312 | } |
313 | } | 313 | } |
314 | 314 | ||
315 | if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) { | 315 | if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) { |
316 | *val = 0x0; | 316 | *val = 0x0; |
317 | ret = -EBUSY; | 317 | ret = -EBUSY; |
318 | } | 318 | } |
319 | else { | 319 | else { |
320 | *val = val1; | 320 | *val = val1; |
321 | ret = 0; | 321 | ret = 0; |
322 | } | 322 | } |
323 | 323 | ||
324 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { | 324 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { |
325 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); | 325 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); |
326 | val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; | 326 | val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; |
327 | 327 | ||
328 | REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); | 328 | REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); |
329 | REG_RD(bp, BNX2_EMAC_MDIO_MODE); | 329 | REG_RD(bp, BNX2_EMAC_MDIO_MODE); |
330 | 330 | ||
331 | udelay(40); | 331 | udelay(40); |
332 | } | 332 | } |
333 | 333 | ||
334 | return ret; | 334 | return ret; |
335 | } | 335 | } |
336 | 336 | ||
337 | static int | 337 | static int |
338 | bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) | 338 | bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) |
339 | { | 339 | { |
340 | u32 val1; | 340 | u32 val1; |
341 | int i, ret; | 341 | int i, ret; |
342 | 342 | ||
343 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { | 343 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { |
344 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); | 344 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); |
345 | val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; | 345 | val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; |
346 | 346 | ||
347 | REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); | 347 | REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); |
348 | REG_RD(bp, BNX2_EMAC_MDIO_MODE); | 348 | REG_RD(bp, BNX2_EMAC_MDIO_MODE); |
349 | 349 | ||
350 | udelay(40); | 350 | udelay(40); |
351 | } | 351 | } |
352 | 352 | ||
353 | val1 = (bp->phy_addr << 21) | (reg << 16) | val | | 353 | val1 = (bp->phy_addr << 21) | (reg << 16) | val | |
354 | BNX2_EMAC_MDIO_COMM_COMMAND_WRITE | | 354 | BNX2_EMAC_MDIO_COMM_COMMAND_WRITE | |
355 | BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT; | 355 | BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT; |
356 | REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); | 356 | REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); |
357 | 357 | ||
358 | for (i = 0; i < 50; i++) { | 358 | for (i = 0; i < 50; i++) { |
359 | udelay(10); | 359 | udelay(10); |
360 | 360 | ||
361 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); | 361 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); |
362 | if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { | 362 | if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { |
363 | udelay(5); | 363 | udelay(5); |
364 | break; | 364 | break; |
365 | } | 365 | } |
366 | } | 366 | } |
367 | 367 | ||
368 | if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) | 368 | if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) |
369 | ret = -EBUSY; | 369 | ret = -EBUSY; |
370 | else | 370 | else |
371 | ret = 0; | 371 | ret = 0; |
372 | 372 | ||
373 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { | 373 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { |
374 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); | 374 | val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); |
375 | val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; | 375 | val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; |
376 | 376 | ||
377 | REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); | 377 | REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); |
378 | REG_RD(bp, BNX2_EMAC_MDIO_MODE); | 378 | REG_RD(bp, BNX2_EMAC_MDIO_MODE); |
379 | 379 | ||
380 | udelay(40); | 380 | udelay(40); |
381 | } | 381 | } |
382 | 382 | ||
383 | return ret; | 383 | return ret; |
384 | } | 384 | } |
385 | 385 | ||
386 | static void | 386 | static void |
387 | bnx2_disable_int(struct bnx2 *bp) | 387 | bnx2_disable_int(struct bnx2 *bp) |
388 | { | 388 | { |
389 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 389 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
390 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); | 390 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); |
391 | REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); | 391 | REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); |
392 | } | 392 | } |
393 | 393 | ||
394 | static void | 394 | static void |
395 | bnx2_enable_int(struct bnx2 *bp) | 395 | bnx2_enable_int(struct bnx2 *bp) |
396 | { | 396 | { |
397 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 397 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
398 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 398 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
399 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx); | 399 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx); |
400 | 400 | ||
401 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 401 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
402 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx); | 402 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx); |
403 | 403 | ||
404 | REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); | 404 | REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); |
405 | } | 405 | } |
406 | 406 | ||
407 | static void | 407 | static void |
408 | bnx2_disable_int_sync(struct bnx2 *bp) | 408 | bnx2_disable_int_sync(struct bnx2 *bp) |
409 | { | 409 | { |
410 | atomic_inc(&bp->intr_sem); | 410 | atomic_inc(&bp->intr_sem); |
411 | bnx2_disable_int(bp); | 411 | bnx2_disable_int(bp); |
412 | synchronize_irq(bp->pdev->irq); | 412 | synchronize_irq(bp->pdev->irq); |
413 | } | 413 | } |
414 | 414 | ||
415 | static void | 415 | static void |
416 | bnx2_netif_stop(struct bnx2 *bp) | 416 | bnx2_netif_stop(struct bnx2 *bp) |
417 | { | 417 | { |
418 | bnx2_disable_int_sync(bp); | 418 | bnx2_disable_int_sync(bp); |
419 | if (netif_running(bp->dev)) { | 419 | if (netif_running(bp->dev)) { |
420 | netif_poll_disable(bp->dev); | 420 | netif_poll_disable(bp->dev); |
421 | netif_tx_disable(bp->dev); | 421 | netif_tx_disable(bp->dev); |
422 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | 422 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ |
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | static void | 426 | static void |
427 | bnx2_netif_start(struct bnx2 *bp) | 427 | bnx2_netif_start(struct bnx2 *bp) |
428 | { | 428 | { |
429 | if (atomic_dec_and_test(&bp->intr_sem)) { | 429 | if (atomic_dec_and_test(&bp->intr_sem)) { |
430 | if (netif_running(bp->dev)) { | 430 | if (netif_running(bp->dev)) { |
431 | netif_wake_queue(bp->dev); | 431 | netif_wake_queue(bp->dev); |
432 | netif_poll_enable(bp->dev); | 432 | netif_poll_enable(bp->dev); |
433 | bnx2_enable_int(bp); | 433 | bnx2_enable_int(bp); |
434 | } | 434 | } |
435 | } | 435 | } |
436 | } | 436 | } |
437 | 437 | ||
438 | static void | 438 | static void |
439 | bnx2_free_mem(struct bnx2 *bp) | 439 | bnx2_free_mem(struct bnx2 *bp) |
440 | { | 440 | { |
441 | int i; | 441 | int i; |
442 | 442 | ||
443 | for (i = 0; i < bp->ctx_pages; i++) { | 443 | for (i = 0; i < bp->ctx_pages; i++) { |
444 | if (bp->ctx_blk[i]) { | 444 | if (bp->ctx_blk[i]) { |
445 | pci_free_consistent(bp->pdev, BCM_PAGE_SIZE, | 445 | pci_free_consistent(bp->pdev, BCM_PAGE_SIZE, |
446 | bp->ctx_blk[i], | 446 | bp->ctx_blk[i], |
447 | bp->ctx_blk_mapping[i]); | 447 | bp->ctx_blk_mapping[i]); |
448 | bp->ctx_blk[i] = NULL; | 448 | bp->ctx_blk[i] = NULL; |
449 | } | 449 | } |
450 | } | 450 | } |
451 | if (bp->status_blk) { | 451 | if (bp->status_blk) { |
452 | pci_free_consistent(bp->pdev, bp->status_stats_size, | 452 | pci_free_consistent(bp->pdev, bp->status_stats_size, |
453 | bp->status_blk, bp->status_blk_mapping); | 453 | bp->status_blk, bp->status_blk_mapping); |
454 | bp->status_blk = NULL; | 454 | bp->status_blk = NULL; |
455 | bp->stats_blk = NULL; | 455 | bp->stats_blk = NULL; |
456 | } | 456 | } |
457 | if (bp->tx_desc_ring) { | 457 | if (bp->tx_desc_ring) { |
458 | pci_free_consistent(bp->pdev, | 458 | pci_free_consistent(bp->pdev, |
459 | sizeof(struct tx_bd) * TX_DESC_CNT, | 459 | sizeof(struct tx_bd) * TX_DESC_CNT, |
460 | bp->tx_desc_ring, bp->tx_desc_mapping); | 460 | bp->tx_desc_ring, bp->tx_desc_mapping); |
461 | bp->tx_desc_ring = NULL; | 461 | bp->tx_desc_ring = NULL; |
462 | } | 462 | } |
463 | kfree(bp->tx_buf_ring); | 463 | kfree(bp->tx_buf_ring); |
464 | bp->tx_buf_ring = NULL; | 464 | bp->tx_buf_ring = NULL; |
465 | for (i = 0; i < bp->rx_max_ring; i++) { | 465 | for (i = 0; i < bp->rx_max_ring; i++) { |
466 | if (bp->rx_desc_ring[i]) | 466 | if (bp->rx_desc_ring[i]) |
467 | pci_free_consistent(bp->pdev, | 467 | pci_free_consistent(bp->pdev, |
468 | sizeof(struct rx_bd) * RX_DESC_CNT, | 468 | sizeof(struct rx_bd) * RX_DESC_CNT, |
469 | bp->rx_desc_ring[i], | 469 | bp->rx_desc_ring[i], |
470 | bp->rx_desc_mapping[i]); | 470 | bp->rx_desc_mapping[i]); |
471 | bp->rx_desc_ring[i] = NULL; | 471 | bp->rx_desc_ring[i] = NULL; |
472 | } | 472 | } |
473 | vfree(bp->rx_buf_ring); | 473 | vfree(bp->rx_buf_ring); |
474 | bp->rx_buf_ring = NULL; | 474 | bp->rx_buf_ring = NULL; |
475 | } | 475 | } |
476 | 476 | ||
477 | static int | 477 | static int |
478 | bnx2_alloc_mem(struct bnx2 *bp) | 478 | bnx2_alloc_mem(struct bnx2 *bp) |
479 | { | 479 | { |
480 | int i, status_blk_size; | 480 | int i, status_blk_size; |
481 | 481 | ||
482 | bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT, | 482 | bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT, |
483 | GFP_KERNEL); | 483 | GFP_KERNEL); |
484 | if (bp->tx_buf_ring == NULL) | 484 | if (bp->tx_buf_ring == NULL) |
485 | return -ENOMEM; | 485 | return -ENOMEM; |
486 | 486 | ||
487 | bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, | 487 | bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, |
488 | sizeof(struct tx_bd) * | 488 | sizeof(struct tx_bd) * |
489 | TX_DESC_CNT, | 489 | TX_DESC_CNT, |
490 | &bp->tx_desc_mapping); | 490 | &bp->tx_desc_mapping); |
491 | if (bp->tx_desc_ring == NULL) | 491 | if (bp->tx_desc_ring == NULL) |
492 | goto alloc_mem_err; | 492 | goto alloc_mem_err; |
493 | 493 | ||
494 | bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT * | 494 | bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT * |
495 | bp->rx_max_ring); | 495 | bp->rx_max_ring); |
496 | if (bp->rx_buf_ring == NULL) | 496 | if (bp->rx_buf_ring == NULL) |
497 | goto alloc_mem_err; | 497 | goto alloc_mem_err; |
498 | 498 | ||
499 | memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT * | 499 | memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT * |
500 | bp->rx_max_ring); | 500 | bp->rx_max_ring); |
501 | 501 | ||
502 | for (i = 0; i < bp->rx_max_ring; i++) { | 502 | for (i = 0; i < bp->rx_max_ring; i++) { |
503 | bp->rx_desc_ring[i] = | 503 | bp->rx_desc_ring[i] = |
504 | pci_alloc_consistent(bp->pdev, | 504 | pci_alloc_consistent(bp->pdev, |
505 | sizeof(struct rx_bd) * RX_DESC_CNT, | 505 | sizeof(struct rx_bd) * RX_DESC_CNT, |
506 | &bp->rx_desc_mapping[i]); | 506 | &bp->rx_desc_mapping[i]); |
507 | if (bp->rx_desc_ring[i] == NULL) | 507 | if (bp->rx_desc_ring[i] == NULL) |
508 | goto alloc_mem_err; | 508 | goto alloc_mem_err; |
509 | 509 | ||
510 | } | 510 | } |
511 | 511 | ||
512 | /* Combine status and statistics blocks into one allocation. */ | 512 | /* Combine status and statistics blocks into one allocation. */ |
513 | status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); | 513 | status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); |
514 | bp->status_stats_size = status_blk_size + | 514 | bp->status_stats_size = status_blk_size + |
515 | sizeof(struct statistics_block); | 515 | sizeof(struct statistics_block); |
516 | 516 | ||
517 | bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size, | 517 | bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size, |
518 | &bp->status_blk_mapping); | 518 | &bp->status_blk_mapping); |
519 | if (bp->status_blk == NULL) | 519 | if (bp->status_blk == NULL) |
520 | goto alloc_mem_err; | 520 | goto alloc_mem_err; |
521 | 521 | ||
522 | memset(bp->status_blk, 0, bp->status_stats_size); | 522 | memset(bp->status_blk, 0, bp->status_stats_size); |
523 | 523 | ||
524 | bp->stats_blk = (void *) ((unsigned long) bp->status_blk + | 524 | bp->stats_blk = (void *) ((unsigned long) bp->status_blk + |
525 | status_blk_size); | 525 | status_blk_size); |
526 | 526 | ||
527 | bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; | 527 | bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; |
528 | 528 | ||
529 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 529 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
530 | bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE; | 530 | bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE; |
531 | if (bp->ctx_pages == 0) | 531 | if (bp->ctx_pages == 0) |
532 | bp->ctx_pages = 1; | 532 | bp->ctx_pages = 1; |
533 | for (i = 0; i < bp->ctx_pages; i++) { | 533 | for (i = 0; i < bp->ctx_pages; i++) { |
534 | bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev, | 534 | bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev, |
535 | BCM_PAGE_SIZE, | 535 | BCM_PAGE_SIZE, |
536 | &bp->ctx_blk_mapping[i]); | 536 | &bp->ctx_blk_mapping[i]); |
537 | if (bp->ctx_blk[i] == NULL) | 537 | if (bp->ctx_blk[i] == NULL) |
538 | goto alloc_mem_err; | 538 | goto alloc_mem_err; |
539 | } | 539 | } |
540 | } | 540 | } |
541 | return 0; | 541 | return 0; |
542 | 542 | ||
543 | alloc_mem_err: | 543 | alloc_mem_err: |
544 | bnx2_free_mem(bp); | 544 | bnx2_free_mem(bp); |
545 | return -ENOMEM; | 545 | return -ENOMEM; |
546 | } | 546 | } |
547 | 547 | ||
548 | static void | 548 | static void |
549 | bnx2_report_fw_link(struct bnx2 *bp) | 549 | bnx2_report_fw_link(struct bnx2 *bp) |
550 | { | 550 | { |
551 | u32 fw_link_status = 0; | 551 | u32 fw_link_status = 0; |
552 | 552 | ||
553 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) | 553 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) |
554 | return; | 554 | return; |
555 | 555 | ||
556 | if (bp->link_up) { | 556 | if (bp->link_up) { |
557 | u32 bmsr; | 557 | u32 bmsr; |
558 | 558 | ||
559 | switch (bp->line_speed) { | 559 | switch (bp->line_speed) { |
560 | case SPEED_10: | 560 | case SPEED_10: |
561 | if (bp->duplex == DUPLEX_HALF) | 561 | if (bp->duplex == DUPLEX_HALF) |
562 | fw_link_status = BNX2_LINK_STATUS_10HALF; | 562 | fw_link_status = BNX2_LINK_STATUS_10HALF; |
563 | else | 563 | else |
564 | fw_link_status = BNX2_LINK_STATUS_10FULL; | 564 | fw_link_status = BNX2_LINK_STATUS_10FULL; |
565 | break; | 565 | break; |
566 | case SPEED_100: | 566 | case SPEED_100: |
567 | if (bp->duplex == DUPLEX_HALF) | 567 | if (bp->duplex == DUPLEX_HALF) |
568 | fw_link_status = BNX2_LINK_STATUS_100HALF; | 568 | fw_link_status = BNX2_LINK_STATUS_100HALF; |
569 | else | 569 | else |
570 | fw_link_status = BNX2_LINK_STATUS_100FULL; | 570 | fw_link_status = BNX2_LINK_STATUS_100FULL; |
571 | break; | 571 | break; |
572 | case SPEED_1000: | 572 | case SPEED_1000: |
573 | if (bp->duplex == DUPLEX_HALF) | 573 | if (bp->duplex == DUPLEX_HALF) |
574 | fw_link_status = BNX2_LINK_STATUS_1000HALF; | 574 | fw_link_status = BNX2_LINK_STATUS_1000HALF; |
575 | else | 575 | else |
576 | fw_link_status = BNX2_LINK_STATUS_1000FULL; | 576 | fw_link_status = BNX2_LINK_STATUS_1000FULL; |
577 | break; | 577 | break; |
578 | case SPEED_2500: | 578 | case SPEED_2500: |
579 | if (bp->duplex == DUPLEX_HALF) | 579 | if (bp->duplex == DUPLEX_HALF) |
580 | fw_link_status = BNX2_LINK_STATUS_2500HALF; | 580 | fw_link_status = BNX2_LINK_STATUS_2500HALF; |
581 | else | 581 | else |
582 | fw_link_status = BNX2_LINK_STATUS_2500FULL; | 582 | fw_link_status = BNX2_LINK_STATUS_2500FULL; |
583 | break; | 583 | break; |
584 | } | 584 | } |
585 | 585 | ||
586 | fw_link_status |= BNX2_LINK_STATUS_LINK_UP; | 586 | fw_link_status |= BNX2_LINK_STATUS_LINK_UP; |
587 | 587 | ||
588 | if (bp->autoneg) { | 588 | if (bp->autoneg) { |
589 | fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED; | 589 | fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED; |
590 | 590 | ||
591 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); | 591 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); |
592 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); | 592 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); |
593 | 593 | ||
594 | if (!(bmsr & BMSR_ANEGCOMPLETE) || | 594 | if (!(bmsr & BMSR_ANEGCOMPLETE) || |
595 | bp->phy_flags & PHY_PARALLEL_DETECT_FLAG) | 595 | bp->phy_flags & PHY_PARALLEL_DETECT_FLAG) |
596 | fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET; | 596 | fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET; |
597 | else | 597 | else |
598 | fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE; | 598 | fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE; |
599 | } | 599 | } |
600 | } | 600 | } |
601 | else | 601 | else |
602 | fw_link_status = BNX2_LINK_STATUS_LINK_DOWN; | 602 | fw_link_status = BNX2_LINK_STATUS_LINK_DOWN; |
603 | 603 | ||
604 | REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status); | 604 | REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status); |
605 | } | 605 | } |
606 | 606 | ||
607 | static char * | 607 | static char * |
608 | bnx2_xceiver_str(struct bnx2 *bp) | 608 | bnx2_xceiver_str(struct bnx2 *bp) |
609 | { | 609 | { |
610 | return ((bp->phy_port == PORT_FIBRE) ? "SerDes" : | 610 | return ((bp->phy_port == PORT_FIBRE) ? "SerDes" : |
611 | ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" : | 611 | ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" : |
612 | "Copper")); | 612 | "Copper")); |
613 | } | 613 | } |
614 | 614 | ||
615 | static void | 615 | static void |
616 | bnx2_report_link(struct bnx2 *bp) | 616 | bnx2_report_link(struct bnx2 *bp) |
617 | { | 617 | { |
618 | if (bp->link_up) { | 618 | if (bp->link_up) { |
619 | netif_carrier_on(bp->dev); | 619 | netif_carrier_on(bp->dev); |
620 | printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name, | 620 | printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name, |
621 | bnx2_xceiver_str(bp)); | 621 | bnx2_xceiver_str(bp)); |
622 | 622 | ||
623 | printk("%d Mbps ", bp->line_speed); | 623 | printk("%d Mbps ", bp->line_speed); |
624 | 624 | ||
625 | if (bp->duplex == DUPLEX_FULL) | 625 | if (bp->duplex == DUPLEX_FULL) |
626 | printk("full duplex"); | 626 | printk("full duplex"); |
627 | else | 627 | else |
628 | printk("half duplex"); | 628 | printk("half duplex"); |
629 | 629 | ||
630 | if (bp->flow_ctrl) { | 630 | if (bp->flow_ctrl) { |
631 | if (bp->flow_ctrl & FLOW_CTRL_RX) { | 631 | if (bp->flow_ctrl & FLOW_CTRL_RX) { |
632 | printk(", receive "); | 632 | printk(", receive "); |
633 | if (bp->flow_ctrl & FLOW_CTRL_TX) | 633 | if (bp->flow_ctrl & FLOW_CTRL_TX) |
634 | printk("& transmit "); | 634 | printk("& transmit "); |
635 | } | 635 | } |
636 | else { | 636 | else { |
637 | printk(", transmit "); | 637 | printk(", transmit "); |
638 | } | 638 | } |
639 | printk("flow control ON"); | 639 | printk("flow control ON"); |
640 | } | 640 | } |
641 | printk("\n"); | 641 | printk("\n"); |
642 | } | 642 | } |
643 | else { | 643 | else { |
644 | netif_carrier_off(bp->dev); | 644 | netif_carrier_off(bp->dev); |
645 | printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name, | 645 | printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name, |
646 | bnx2_xceiver_str(bp)); | 646 | bnx2_xceiver_str(bp)); |
647 | } | 647 | } |
648 | 648 | ||
649 | bnx2_report_fw_link(bp); | 649 | bnx2_report_fw_link(bp); |
650 | } | 650 | } |
651 | 651 | ||
652 | static void | 652 | static void |
653 | bnx2_resolve_flow_ctrl(struct bnx2 *bp) | 653 | bnx2_resolve_flow_ctrl(struct bnx2 *bp) |
654 | { | 654 | { |
655 | u32 local_adv, remote_adv; | 655 | u32 local_adv, remote_adv; |
656 | 656 | ||
657 | bp->flow_ctrl = 0; | 657 | bp->flow_ctrl = 0; |
658 | if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != | 658 | if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != |
659 | (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { | 659 | (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { |
660 | 660 | ||
661 | if (bp->duplex == DUPLEX_FULL) { | 661 | if (bp->duplex == DUPLEX_FULL) { |
662 | bp->flow_ctrl = bp->req_flow_ctrl; | 662 | bp->flow_ctrl = bp->req_flow_ctrl; |
663 | } | 663 | } |
664 | return; | 664 | return; |
665 | } | 665 | } |
666 | 666 | ||
667 | if (bp->duplex != DUPLEX_FULL) { | 667 | if (bp->duplex != DUPLEX_FULL) { |
668 | return; | 668 | return; |
669 | } | 669 | } |
670 | 670 | ||
671 | if ((bp->phy_flags & PHY_SERDES_FLAG) && | 671 | if ((bp->phy_flags & PHY_SERDES_FLAG) && |
672 | (CHIP_NUM(bp) == CHIP_NUM_5708)) { | 672 | (CHIP_NUM(bp) == CHIP_NUM_5708)) { |
673 | u32 val; | 673 | u32 val; |
674 | 674 | ||
675 | bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); | 675 | bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); |
676 | if (val & BCM5708S_1000X_STAT1_TX_PAUSE) | 676 | if (val & BCM5708S_1000X_STAT1_TX_PAUSE) |
677 | bp->flow_ctrl |= FLOW_CTRL_TX; | 677 | bp->flow_ctrl |= FLOW_CTRL_TX; |
678 | if (val & BCM5708S_1000X_STAT1_RX_PAUSE) | 678 | if (val & BCM5708S_1000X_STAT1_RX_PAUSE) |
679 | bp->flow_ctrl |= FLOW_CTRL_RX; | 679 | bp->flow_ctrl |= FLOW_CTRL_RX; |
680 | return; | 680 | return; |
681 | } | 681 | } |
682 | 682 | ||
683 | bnx2_read_phy(bp, bp->mii_adv, &local_adv); | 683 | bnx2_read_phy(bp, bp->mii_adv, &local_adv); |
684 | bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); | 684 | bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); |
685 | 685 | ||
686 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 686 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
687 | u32 new_local_adv = 0; | 687 | u32 new_local_adv = 0; |
688 | u32 new_remote_adv = 0; | 688 | u32 new_remote_adv = 0; |
689 | 689 | ||
690 | if (local_adv & ADVERTISE_1000XPAUSE) | 690 | if (local_adv & ADVERTISE_1000XPAUSE) |
691 | new_local_adv |= ADVERTISE_PAUSE_CAP; | 691 | new_local_adv |= ADVERTISE_PAUSE_CAP; |
692 | if (local_adv & ADVERTISE_1000XPSE_ASYM) | 692 | if (local_adv & ADVERTISE_1000XPSE_ASYM) |
693 | new_local_adv |= ADVERTISE_PAUSE_ASYM; | 693 | new_local_adv |= ADVERTISE_PAUSE_ASYM; |
694 | if (remote_adv & ADVERTISE_1000XPAUSE) | 694 | if (remote_adv & ADVERTISE_1000XPAUSE) |
695 | new_remote_adv |= ADVERTISE_PAUSE_CAP; | 695 | new_remote_adv |= ADVERTISE_PAUSE_CAP; |
696 | if (remote_adv & ADVERTISE_1000XPSE_ASYM) | 696 | if (remote_adv & ADVERTISE_1000XPSE_ASYM) |
697 | new_remote_adv |= ADVERTISE_PAUSE_ASYM; | 697 | new_remote_adv |= ADVERTISE_PAUSE_ASYM; |
698 | 698 | ||
699 | local_adv = new_local_adv; | 699 | local_adv = new_local_adv; |
700 | remote_adv = new_remote_adv; | 700 | remote_adv = new_remote_adv; |
701 | } | 701 | } |
702 | 702 | ||
703 | /* See Table 28B-3 of 802.3ab-1999 spec. */ | 703 | /* See Table 28B-3 of 802.3ab-1999 spec. */ |
704 | if (local_adv & ADVERTISE_PAUSE_CAP) { | 704 | if (local_adv & ADVERTISE_PAUSE_CAP) { |
705 | if(local_adv & ADVERTISE_PAUSE_ASYM) { | 705 | if(local_adv & ADVERTISE_PAUSE_ASYM) { |
706 | if (remote_adv & ADVERTISE_PAUSE_CAP) { | 706 | if (remote_adv & ADVERTISE_PAUSE_CAP) { |
707 | bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; | 707 | bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; |
708 | } | 708 | } |
709 | else if (remote_adv & ADVERTISE_PAUSE_ASYM) { | 709 | else if (remote_adv & ADVERTISE_PAUSE_ASYM) { |
710 | bp->flow_ctrl = FLOW_CTRL_RX; | 710 | bp->flow_ctrl = FLOW_CTRL_RX; |
711 | } | 711 | } |
712 | } | 712 | } |
713 | else { | 713 | else { |
714 | if (remote_adv & ADVERTISE_PAUSE_CAP) { | 714 | if (remote_adv & ADVERTISE_PAUSE_CAP) { |
715 | bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; | 715 | bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; |
716 | } | 716 | } |
717 | } | 717 | } |
718 | } | 718 | } |
719 | else if (local_adv & ADVERTISE_PAUSE_ASYM) { | 719 | else if (local_adv & ADVERTISE_PAUSE_ASYM) { |
720 | if ((remote_adv & ADVERTISE_PAUSE_CAP) && | 720 | if ((remote_adv & ADVERTISE_PAUSE_CAP) && |
721 | (remote_adv & ADVERTISE_PAUSE_ASYM)) { | 721 | (remote_adv & ADVERTISE_PAUSE_ASYM)) { |
722 | 722 | ||
723 | bp->flow_ctrl = FLOW_CTRL_TX; | 723 | bp->flow_ctrl = FLOW_CTRL_TX; |
724 | } | 724 | } |
725 | } | 725 | } |
726 | } | 726 | } |
727 | 727 | ||
728 | static int | 728 | static int |
729 | bnx2_5709s_linkup(struct bnx2 *bp) | 729 | bnx2_5709s_linkup(struct bnx2 *bp) |
730 | { | 730 | { |
731 | u32 val, speed; | 731 | u32 val, speed; |
732 | 732 | ||
733 | bp->link_up = 1; | 733 | bp->link_up = 1; |
734 | 734 | ||
735 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS); | 735 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS); |
736 | bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val); | 736 | bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val); |
737 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); | 737 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); |
738 | 738 | ||
739 | if ((bp->autoneg & AUTONEG_SPEED) == 0) { | 739 | if ((bp->autoneg & AUTONEG_SPEED) == 0) { |
740 | bp->line_speed = bp->req_line_speed; | 740 | bp->line_speed = bp->req_line_speed; |
741 | bp->duplex = bp->req_duplex; | 741 | bp->duplex = bp->req_duplex; |
742 | return 0; | 742 | return 0; |
743 | } | 743 | } |
744 | speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK; | 744 | speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK; |
745 | switch (speed) { | 745 | switch (speed) { |
746 | case MII_BNX2_GP_TOP_AN_SPEED_10: | 746 | case MII_BNX2_GP_TOP_AN_SPEED_10: |
747 | bp->line_speed = SPEED_10; | 747 | bp->line_speed = SPEED_10; |
748 | break; | 748 | break; |
749 | case MII_BNX2_GP_TOP_AN_SPEED_100: | 749 | case MII_BNX2_GP_TOP_AN_SPEED_100: |
750 | bp->line_speed = SPEED_100; | 750 | bp->line_speed = SPEED_100; |
751 | break; | 751 | break; |
752 | case MII_BNX2_GP_TOP_AN_SPEED_1G: | 752 | case MII_BNX2_GP_TOP_AN_SPEED_1G: |
753 | case MII_BNX2_GP_TOP_AN_SPEED_1GKV: | 753 | case MII_BNX2_GP_TOP_AN_SPEED_1GKV: |
754 | bp->line_speed = SPEED_1000; | 754 | bp->line_speed = SPEED_1000; |
755 | break; | 755 | break; |
756 | case MII_BNX2_GP_TOP_AN_SPEED_2_5G: | 756 | case MII_BNX2_GP_TOP_AN_SPEED_2_5G: |
757 | bp->line_speed = SPEED_2500; | 757 | bp->line_speed = SPEED_2500; |
758 | break; | 758 | break; |
759 | } | 759 | } |
760 | if (val & MII_BNX2_GP_TOP_AN_FD) | 760 | if (val & MII_BNX2_GP_TOP_AN_FD) |
761 | bp->duplex = DUPLEX_FULL; | 761 | bp->duplex = DUPLEX_FULL; |
762 | else | 762 | else |
763 | bp->duplex = DUPLEX_HALF; | 763 | bp->duplex = DUPLEX_HALF; |
764 | return 0; | 764 | return 0; |
765 | } | 765 | } |
766 | 766 | ||
767 | static int | 767 | static int |
768 | bnx2_5708s_linkup(struct bnx2 *bp) | 768 | bnx2_5708s_linkup(struct bnx2 *bp) |
769 | { | 769 | { |
770 | u32 val; | 770 | u32 val; |
771 | 771 | ||
772 | bp->link_up = 1; | 772 | bp->link_up = 1; |
773 | bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); | 773 | bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); |
774 | switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) { | 774 | switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) { |
775 | case BCM5708S_1000X_STAT1_SPEED_10: | 775 | case BCM5708S_1000X_STAT1_SPEED_10: |
776 | bp->line_speed = SPEED_10; | 776 | bp->line_speed = SPEED_10; |
777 | break; | 777 | break; |
778 | case BCM5708S_1000X_STAT1_SPEED_100: | 778 | case BCM5708S_1000X_STAT1_SPEED_100: |
779 | bp->line_speed = SPEED_100; | 779 | bp->line_speed = SPEED_100; |
780 | break; | 780 | break; |
781 | case BCM5708S_1000X_STAT1_SPEED_1G: | 781 | case BCM5708S_1000X_STAT1_SPEED_1G: |
782 | bp->line_speed = SPEED_1000; | 782 | bp->line_speed = SPEED_1000; |
783 | break; | 783 | break; |
784 | case BCM5708S_1000X_STAT1_SPEED_2G5: | 784 | case BCM5708S_1000X_STAT1_SPEED_2G5: |
785 | bp->line_speed = SPEED_2500; | 785 | bp->line_speed = SPEED_2500; |
786 | break; | 786 | break; |
787 | } | 787 | } |
788 | if (val & BCM5708S_1000X_STAT1_FD) | 788 | if (val & BCM5708S_1000X_STAT1_FD) |
789 | bp->duplex = DUPLEX_FULL; | 789 | bp->duplex = DUPLEX_FULL; |
790 | else | 790 | else |
791 | bp->duplex = DUPLEX_HALF; | 791 | bp->duplex = DUPLEX_HALF; |
792 | 792 | ||
793 | return 0; | 793 | return 0; |
794 | } | 794 | } |
795 | 795 | ||
796 | static int | 796 | static int |
797 | bnx2_5706s_linkup(struct bnx2 *bp) | 797 | bnx2_5706s_linkup(struct bnx2 *bp) |
798 | { | 798 | { |
799 | u32 bmcr, local_adv, remote_adv, common; | 799 | u32 bmcr, local_adv, remote_adv, common; |
800 | 800 | ||
801 | bp->link_up = 1; | 801 | bp->link_up = 1; |
802 | bp->line_speed = SPEED_1000; | 802 | bp->line_speed = SPEED_1000; |
803 | 803 | ||
804 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 804 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
805 | if (bmcr & BMCR_FULLDPLX) { | 805 | if (bmcr & BMCR_FULLDPLX) { |
806 | bp->duplex = DUPLEX_FULL; | 806 | bp->duplex = DUPLEX_FULL; |
807 | } | 807 | } |
808 | else { | 808 | else { |
809 | bp->duplex = DUPLEX_HALF; | 809 | bp->duplex = DUPLEX_HALF; |
810 | } | 810 | } |
811 | 811 | ||
812 | if (!(bmcr & BMCR_ANENABLE)) { | 812 | if (!(bmcr & BMCR_ANENABLE)) { |
813 | return 0; | 813 | return 0; |
814 | } | 814 | } |
815 | 815 | ||
816 | bnx2_read_phy(bp, bp->mii_adv, &local_adv); | 816 | bnx2_read_phy(bp, bp->mii_adv, &local_adv); |
817 | bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); | 817 | bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); |
818 | 818 | ||
819 | common = local_adv & remote_adv; | 819 | common = local_adv & remote_adv; |
820 | if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) { | 820 | if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) { |
821 | 821 | ||
822 | if (common & ADVERTISE_1000XFULL) { | 822 | if (common & ADVERTISE_1000XFULL) { |
823 | bp->duplex = DUPLEX_FULL; | 823 | bp->duplex = DUPLEX_FULL; |
824 | } | 824 | } |
825 | else { | 825 | else { |
826 | bp->duplex = DUPLEX_HALF; | 826 | bp->duplex = DUPLEX_HALF; |
827 | } | 827 | } |
828 | } | 828 | } |
829 | 829 | ||
830 | return 0; | 830 | return 0; |
831 | } | 831 | } |
832 | 832 | ||
833 | static int | 833 | static int |
834 | bnx2_copper_linkup(struct bnx2 *bp) | 834 | bnx2_copper_linkup(struct bnx2 *bp) |
835 | { | 835 | { |
836 | u32 bmcr; | 836 | u32 bmcr; |
837 | 837 | ||
838 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 838 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
839 | if (bmcr & BMCR_ANENABLE) { | 839 | if (bmcr & BMCR_ANENABLE) { |
840 | u32 local_adv, remote_adv, common; | 840 | u32 local_adv, remote_adv, common; |
841 | 841 | ||
842 | bnx2_read_phy(bp, MII_CTRL1000, &local_adv); | 842 | bnx2_read_phy(bp, MII_CTRL1000, &local_adv); |
843 | bnx2_read_phy(bp, MII_STAT1000, &remote_adv); | 843 | bnx2_read_phy(bp, MII_STAT1000, &remote_adv); |
844 | 844 | ||
845 | common = local_adv & (remote_adv >> 2); | 845 | common = local_adv & (remote_adv >> 2); |
846 | if (common & ADVERTISE_1000FULL) { | 846 | if (common & ADVERTISE_1000FULL) { |
847 | bp->line_speed = SPEED_1000; | 847 | bp->line_speed = SPEED_1000; |
848 | bp->duplex = DUPLEX_FULL; | 848 | bp->duplex = DUPLEX_FULL; |
849 | } | 849 | } |
850 | else if (common & ADVERTISE_1000HALF) { | 850 | else if (common & ADVERTISE_1000HALF) { |
851 | bp->line_speed = SPEED_1000; | 851 | bp->line_speed = SPEED_1000; |
852 | bp->duplex = DUPLEX_HALF; | 852 | bp->duplex = DUPLEX_HALF; |
853 | } | 853 | } |
854 | else { | 854 | else { |
855 | bnx2_read_phy(bp, bp->mii_adv, &local_adv); | 855 | bnx2_read_phy(bp, bp->mii_adv, &local_adv); |
856 | bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); | 856 | bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); |
857 | 857 | ||
858 | common = local_adv & remote_adv; | 858 | common = local_adv & remote_adv; |
859 | if (common & ADVERTISE_100FULL) { | 859 | if (common & ADVERTISE_100FULL) { |
860 | bp->line_speed = SPEED_100; | 860 | bp->line_speed = SPEED_100; |
861 | bp->duplex = DUPLEX_FULL; | 861 | bp->duplex = DUPLEX_FULL; |
862 | } | 862 | } |
863 | else if (common & ADVERTISE_100HALF) { | 863 | else if (common & ADVERTISE_100HALF) { |
864 | bp->line_speed = SPEED_100; | 864 | bp->line_speed = SPEED_100; |
865 | bp->duplex = DUPLEX_HALF; | 865 | bp->duplex = DUPLEX_HALF; |
866 | } | 866 | } |
867 | else if (common & ADVERTISE_10FULL) { | 867 | else if (common & ADVERTISE_10FULL) { |
868 | bp->line_speed = SPEED_10; | 868 | bp->line_speed = SPEED_10; |
869 | bp->duplex = DUPLEX_FULL; | 869 | bp->duplex = DUPLEX_FULL; |
870 | } | 870 | } |
871 | else if (common & ADVERTISE_10HALF) { | 871 | else if (common & ADVERTISE_10HALF) { |
872 | bp->line_speed = SPEED_10; | 872 | bp->line_speed = SPEED_10; |
873 | bp->duplex = DUPLEX_HALF; | 873 | bp->duplex = DUPLEX_HALF; |
874 | } | 874 | } |
875 | else { | 875 | else { |
876 | bp->line_speed = 0; | 876 | bp->line_speed = 0; |
877 | bp->link_up = 0; | 877 | bp->link_up = 0; |
878 | } | 878 | } |
879 | } | 879 | } |
880 | } | 880 | } |
881 | else { | 881 | else { |
882 | if (bmcr & BMCR_SPEED100) { | 882 | if (bmcr & BMCR_SPEED100) { |
883 | bp->line_speed = SPEED_100; | 883 | bp->line_speed = SPEED_100; |
884 | } | 884 | } |
885 | else { | 885 | else { |
886 | bp->line_speed = SPEED_10; | 886 | bp->line_speed = SPEED_10; |
887 | } | 887 | } |
888 | if (bmcr & BMCR_FULLDPLX) { | 888 | if (bmcr & BMCR_FULLDPLX) { |
889 | bp->duplex = DUPLEX_FULL; | 889 | bp->duplex = DUPLEX_FULL; |
890 | } | 890 | } |
891 | else { | 891 | else { |
892 | bp->duplex = DUPLEX_HALF; | 892 | bp->duplex = DUPLEX_HALF; |
893 | } | 893 | } |
894 | } | 894 | } |
895 | 895 | ||
896 | return 0; | 896 | return 0; |
897 | } | 897 | } |
898 | 898 | ||
899 | static int | 899 | static int |
900 | bnx2_set_mac_link(struct bnx2 *bp) | 900 | bnx2_set_mac_link(struct bnx2 *bp) |
901 | { | 901 | { |
902 | u32 val; | 902 | u32 val; |
903 | 903 | ||
904 | REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); | 904 | REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); |
905 | if (bp->link_up && (bp->line_speed == SPEED_1000) && | 905 | if (bp->link_up && (bp->line_speed == SPEED_1000) && |
906 | (bp->duplex == DUPLEX_HALF)) { | 906 | (bp->duplex == DUPLEX_HALF)) { |
907 | REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); | 907 | REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); |
908 | } | 908 | } |
909 | 909 | ||
910 | /* Configure the EMAC mode register. */ | 910 | /* Configure the EMAC mode register. */ |
911 | val = REG_RD(bp, BNX2_EMAC_MODE); | 911 | val = REG_RD(bp, BNX2_EMAC_MODE); |
912 | 912 | ||
913 | val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | | 913 | val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | |
914 | BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | | 914 | BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | |
915 | BNX2_EMAC_MODE_25G_MODE); | 915 | BNX2_EMAC_MODE_25G_MODE); |
916 | 916 | ||
917 | if (bp->link_up) { | 917 | if (bp->link_up) { |
918 | switch (bp->line_speed) { | 918 | switch (bp->line_speed) { |
919 | case SPEED_10: | 919 | case SPEED_10: |
920 | if (CHIP_NUM(bp) != CHIP_NUM_5706) { | 920 | if (CHIP_NUM(bp) != CHIP_NUM_5706) { |
921 | val |= BNX2_EMAC_MODE_PORT_MII_10M; | 921 | val |= BNX2_EMAC_MODE_PORT_MII_10M; |
922 | break; | 922 | break; |
923 | } | 923 | } |
924 | /* fall through */ | 924 | /* fall through */ |
925 | case SPEED_100: | 925 | case SPEED_100: |
926 | val |= BNX2_EMAC_MODE_PORT_MII; | 926 | val |= BNX2_EMAC_MODE_PORT_MII; |
927 | break; | 927 | break; |
928 | case SPEED_2500: | 928 | case SPEED_2500: |
929 | val |= BNX2_EMAC_MODE_25G_MODE; | 929 | val |= BNX2_EMAC_MODE_25G_MODE; |
930 | /* fall through */ | 930 | /* fall through */ |
931 | case SPEED_1000: | 931 | case SPEED_1000: |
932 | val |= BNX2_EMAC_MODE_PORT_GMII; | 932 | val |= BNX2_EMAC_MODE_PORT_GMII; |
933 | break; | 933 | break; |
934 | } | 934 | } |
935 | } | 935 | } |
936 | else { | 936 | else { |
937 | val |= BNX2_EMAC_MODE_PORT_GMII; | 937 | val |= BNX2_EMAC_MODE_PORT_GMII; |
938 | } | 938 | } |
939 | 939 | ||
940 | /* Set the MAC to operate in the appropriate duplex mode. */ | 940 | /* Set the MAC to operate in the appropriate duplex mode. */ |
941 | if (bp->duplex == DUPLEX_HALF) | 941 | if (bp->duplex == DUPLEX_HALF) |
942 | val |= BNX2_EMAC_MODE_HALF_DUPLEX; | 942 | val |= BNX2_EMAC_MODE_HALF_DUPLEX; |
943 | REG_WR(bp, BNX2_EMAC_MODE, val); | 943 | REG_WR(bp, BNX2_EMAC_MODE, val); |
944 | 944 | ||
945 | /* Enable/disable rx PAUSE. */ | 945 | /* Enable/disable rx PAUSE. */ |
946 | bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN; | 946 | bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN; |
947 | 947 | ||
948 | if (bp->flow_ctrl & FLOW_CTRL_RX) | 948 | if (bp->flow_ctrl & FLOW_CTRL_RX) |
949 | bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN; | 949 | bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN; |
950 | REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); | 950 | REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); |
951 | 951 | ||
952 | /* Enable/disable tx PAUSE. */ | 952 | /* Enable/disable tx PAUSE. */ |
953 | val = REG_RD(bp, BNX2_EMAC_TX_MODE); | 953 | val = REG_RD(bp, BNX2_EMAC_TX_MODE); |
954 | val &= ~BNX2_EMAC_TX_MODE_FLOW_EN; | 954 | val &= ~BNX2_EMAC_TX_MODE_FLOW_EN; |
955 | 955 | ||
956 | if (bp->flow_ctrl & FLOW_CTRL_TX) | 956 | if (bp->flow_ctrl & FLOW_CTRL_TX) |
957 | val |= BNX2_EMAC_TX_MODE_FLOW_EN; | 957 | val |= BNX2_EMAC_TX_MODE_FLOW_EN; |
958 | REG_WR(bp, BNX2_EMAC_TX_MODE, val); | 958 | REG_WR(bp, BNX2_EMAC_TX_MODE, val); |
959 | 959 | ||
960 | /* Acknowledge the interrupt. */ | 960 | /* Acknowledge the interrupt. */ |
961 | REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); | 961 | REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); |
962 | 962 | ||
963 | return 0; | 963 | return 0; |
964 | } | 964 | } |
965 | 965 | ||
966 | static void | 966 | static void |
967 | bnx2_enable_bmsr1(struct bnx2 *bp) | 967 | bnx2_enable_bmsr1(struct bnx2 *bp) |
968 | { | 968 | { |
969 | if ((bp->phy_flags & PHY_SERDES_FLAG) && | 969 | if ((bp->phy_flags & PHY_SERDES_FLAG) && |
970 | (CHIP_NUM(bp) == CHIP_NUM_5709)) | 970 | (CHIP_NUM(bp) == CHIP_NUM_5709)) |
971 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, | 971 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, |
972 | MII_BNX2_BLK_ADDR_GP_STATUS); | 972 | MII_BNX2_BLK_ADDR_GP_STATUS); |
973 | } | 973 | } |
974 | 974 | ||
975 | static void | 975 | static void |
976 | bnx2_disable_bmsr1(struct bnx2 *bp) | 976 | bnx2_disable_bmsr1(struct bnx2 *bp) |
977 | { | 977 | { |
978 | if ((bp->phy_flags & PHY_SERDES_FLAG) && | 978 | if ((bp->phy_flags & PHY_SERDES_FLAG) && |
979 | (CHIP_NUM(bp) == CHIP_NUM_5709)) | 979 | (CHIP_NUM(bp) == CHIP_NUM_5709)) |
980 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, | 980 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, |
981 | MII_BNX2_BLK_ADDR_COMBO_IEEEB0); | 981 | MII_BNX2_BLK_ADDR_COMBO_IEEEB0); |
982 | } | 982 | } |
983 | 983 | ||
984 | static int | 984 | static int |
985 | bnx2_test_and_enable_2g5(struct bnx2 *bp) | 985 | bnx2_test_and_enable_2g5(struct bnx2 *bp) |
986 | { | 986 | { |
987 | u32 up1; | 987 | u32 up1; |
988 | int ret = 1; | 988 | int ret = 1; |
989 | 989 | ||
990 | if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) | 990 | if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) |
991 | return 0; | 991 | return 0; |
992 | 992 | ||
993 | if (bp->autoneg & AUTONEG_SPEED) | 993 | if (bp->autoneg & AUTONEG_SPEED) |
994 | bp->advertising |= ADVERTISED_2500baseX_Full; | 994 | bp->advertising |= ADVERTISED_2500baseX_Full; |
995 | 995 | ||
996 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 996 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
997 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); | 997 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); |
998 | 998 | ||
999 | bnx2_read_phy(bp, bp->mii_up1, &up1); | 999 | bnx2_read_phy(bp, bp->mii_up1, &up1); |
1000 | if (!(up1 & BCM5708S_UP1_2G5)) { | 1000 | if (!(up1 & BCM5708S_UP1_2G5)) { |
1001 | up1 |= BCM5708S_UP1_2G5; | 1001 | up1 |= BCM5708S_UP1_2G5; |
1002 | bnx2_write_phy(bp, bp->mii_up1, up1); | 1002 | bnx2_write_phy(bp, bp->mii_up1, up1); |
1003 | ret = 0; | 1003 | ret = 0; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 1006 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
1007 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, | 1007 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, |
1008 | MII_BNX2_BLK_ADDR_COMBO_IEEEB0); | 1008 | MII_BNX2_BLK_ADDR_COMBO_IEEEB0); |
1009 | 1009 | ||
1010 | return ret; | 1010 | return ret; |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | static int | 1013 | static int |
1014 | bnx2_test_and_disable_2g5(struct bnx2 *bp) | 1014 | bnx2_test_and_disable_2g5(struct bnx2 *bp) |
1015 | { | 1015 | { |
1016 | u32 up1; | 1016 | u32 up1; |
1017 | int ret = 0; | 1017 | int ret = 0; |
1018 | 1018 | ||
1019 | if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) | 1019 | if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) |
1020 | return 0; | 1020 | return 0; |
1021 | 1021 | ||
1022 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 1022 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
1023 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); | 1023 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); |
1024 | 1024 | ||
1025 | bnx2_read_phy(bp, bp->mii_up1, &up1); | 1025 | bnx2_read_phy(bp, bp->mii_up1, &up1); |
1026 | if (up1 & BCM5708S_UP1_2G5) { | 1026 | if (up1 & BCM5708S_UP1_2G5) { |
1027 | up1 &= ~BCM5708S_UP1_2G5; | 1027 | up1 &= ~BCM5708S_UP1_2G5; |
1028 | bnx2_write_phy(bp, bp->mii_up1, up1); | 1028 | bnx2_write_phy(bp, bp->mii_up1, up1); |
1029 | ret = 1; | 1029 | ret = 1; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 1032 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
1033 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, | 1033 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, |
1034 | MII_BNX2_BLK_ADDR_COMBO_IEEEB0); | 1034 | MII_BNX2_BLK_ADDR_COMBO_IEEEB0); |
1035 | 1035 | ||
1036 | return ret; | 1036 | return ret; |
1037 | } | 1037 | } |
1038 | 1038 | ||
1039 | static void | 1039 | static void |
1040 | bnx2_enable_forced_2g5(struct bnx2 *bp) | 1040 | bnx2_enable_forced_2g5(struct bnx2 *bp) |
1041 | { | 1041 | { |
1042 | u32 bmcr; | 1042 | u32 bmcr; |
1043 | 1043 | ||
1044 | if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) | 1044 | if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) |
1045 | return; | 1045 | return; |
1046 | 1046 | ||
1047 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 1047 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
1048 | u32 val; | 1048 | u32 val; |
1049 | 1049 | ||
1050 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, | 1050 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, |
1051 | MII_BNX2_BLK_ADDR_SERDES_DIG); | 1051 | MII_BNX2_BLK_ADDR_SERDES_DIG); |
1052 | bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); | 1052 | bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); |
1053 | val &= ~MII_BNX2_SD_MISC1_FORCE_MSK; | 1053 | val &= ~MII_BNX2_SD_MISC1_FORCE_MSK; |
1054 | val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G; | 1054 | val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G; |
1055 | bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); | 1055 | bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); |
1056 | 1056 | ||
1057 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, | 1057 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, |
1058 | MII_BNX2_BLK_ADDR_COMBO_IEEEB0); | 1058 | MII_BNX2_BLK_ADDR_COMBO_IEEEB0); |
1059 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 1059 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
1060 | 1060 | ||
1061 | } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { | 1061 | } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { |
1062 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 1062 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
1063 | bmcr |= BCM5708S_BMCR_FORCE_2500; | 1063 | bmcr |= BCM5708S_BMCR_FORCE_2500; |
1064 | } | 1064 | } |
1065 | 1065 | ||
1066 | if (bp->autoneg & AUTONEG_SPEED) { | 1066 | if (bp->autoneg & AUTONEG_SPEED) { |
1067 | bmcr &= ~BMCR_ANENABLE; | 1067 | bmcr &= ~BMCR_ANENABLE; |
1068 | if (bp->req_duplex == DUPLEX_FULL) | 1068 | if (bp->req_duplex == DUPLEX_FULL) |
1069 | bmcr |= BMCR_FULLDPLX; | 1069 | bmcr |= BMCR_FULLDPLX; |
1070 | } | 1070 | } |
1071 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr); | 1071 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr); |
1072 | } | 1072 | } |
1073 | 1073 | ||
1074 | static void | 1074 | static void |
1075 | bnx2_disable_forced_2g5(struct bnx2 *bp) | 1075 | bnx2_disable_forced_2g5(struct bnx2 *bp) |
1076 | { | 1076 | { |
1077 | u32 bmcr; | 1077 | u32 bmcr; |
1078 | 1078 | ||
1079 | if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) | 1079 | if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) |
1080 | return; | 1080 | return; |
1081 | 1081 | ||
1082 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 1082 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
1083 | u32 val; | 1083 | u32 val; |
1084 | 1084 | ||
1085 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, | 1085 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, |
1086 | MII_BNX2_BLK_ADDR_SERDES_DIG); | 1086 | MII_BNX2_BLK_ADDR_SERDES_DIG); |
1087 | bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); | 1087 | bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); |
1088 | val &= ~MII_BNX2_SD_MISC1_FORCE; | 1088 | val &= ~MII_BNX2_SD_MISC1_FORCE; |
1089 | bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); | 1089 | bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); |
1090 | 1090 | ||
1091 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, | 1091 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, |
1092 | MII_BNX2_BLK_ADDR_COMBO_IEEEB0); | 1092 | MII_BNX2_BLK_ADDR_COMBO_IEEEB0); |
1093 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 1093 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
1094 | 1094 | ||
1095 | } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { | 1095 | } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { |
1096 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 1096 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
1097 | bmcr &= ~BCM5708S_BMCR_FORCE_2500; | 1097 | bmcr &= ~BCM5708S_BMCR_FORCE_2500; |
1098 | } | 1098 | } |
1099 | 1099 | ||
1100 | if (bp->autoneg & AUTONEG_SPEED) | 1100 | if (bp->autoneg & AUTONEG_SPEED) |
1101 | bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART; | 1101 | bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART; |
1102 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr); | 1102 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr); |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | static int | 1105 | static int |
1106 | bnx2_set_link(struct bnx2 *bp) | 1106 | bnx2_set_link(struct bnx2 *bp) |
1107 | { | 1107 | { |
1108 | u32 bmsr; | 1108 | u32 bmsr; |
1109 | u8 link_up; | 1109 | u8 link_up; |
1110 | 1110 | ||
1111 | if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) { | 1111 | if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) { |
1112 | bp->link_up = 1; | 1112 | bp->link_up = 1; |
1113 | return 0; | 1113 | return 0; |
1114 | } | 1114 | } |
1115 | 1115 | ||
1116 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) | 1116 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) |
1117 | return 0; | 1117 | return 0; |
1118 | 1118 | ||
1119 | link_up = bp->link_up; | 1119 | link_up = bp->link_up; |
1120 | 1120 | ||
1121 | bnx2_enable_bmsr1(bp); | 1121 | bnx2_enable_bmsr1(bp); |
1122 | bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); | 1122 | bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); |
1123 | bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); | 1123 | bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); |
1124 | bnx2_disable_bmsr1(bp); | 1124 | bnx2_disable_bmsr1(bp); |
1125 | 1125 | ||
1126 | if ((bp->phy_flags & PHY_SERDES_FLAG) && | 1126 | if ((bp->phy_flags & PHY_SERDES_FLAG) && |
1127 | (CHIP_NUM(bp) == CHIP_NUM_5706)) { | 1127 | (CHIP_NUM(bp) == CHIP_NUM_5706)) { |
1128 | u32 val; | 1128 | u32 val; |
1129 | 1129 | ||
1130 | val = REG_RD(bp, BNX2_EMAC_STATUS); | 1130 | val = REG_RD(bp, BNX2_EMAC_STATUS); |
1131 | if (val & BNX2_EMAC_STATUS_LINK) | 1131 | if (val & BNX2_EMAC_STATUS_LINK) |
1132 | bmsr |= BMSR_LSTATUS; | 1132 | bmsr |= BMSR_LSTATUS; |
1133 | else | 1133 | else |
1134 | bmsr &= ~BMSR_LSTATUS; | 1134 | bmsr &= ~BMSR_LSTATUS; |
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | if (bmsr & BMSR_LSTATUS) { | 1137 | if (bmsr & BMSR_LSTATUS) { |
1138 | bp->link_up = 1; | 1138 | bp->link_up = 1; |
1139 | 1139 | ||
1140 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 1140 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
1141 | if (CHIP_NUM(bp) == CHIP_NUM_5706) | 1141 | if (CHIP_NUM(bp) == CHIP_NUM_5706) |
1142 | bnx2_5706s_linkup(bp); | 1142 | bnx2_5706s_linkup(bp); |
1143 | else if (CHIP_NUM(bp) == CHIP_NUM_5708) | 1143 | else if (CHIP_NUM(bp) == CHIP_NUM_5708) |
1144 | bnx2_5708s_linkup(bp); | 1144 | bnx2_5708s_linkup(bp); |
1145 | else if (CHIP_NUM(bp) == CHIP_NUM_5709) | 1145 | else if (CHIP_NUM(bp) == CHIP_NUM_5709) |
1146 | bnx2_5709s_linkup(bp); | 1146 | bnx2_5709s_linkup(bp); |
1147 | } | 1147 | } |
1148 | else { | 1148 | else { |
1149 | bnx2_copper_linkup(bp); | 1149 | bnx2_copper_linkup(bp); |
1150 | } | 1150 | } |
1151 | bnx2_resolve_flow_ctrl(bp); | 1151 | bnx2_resolve_flow_ctrl(bp); |
1152 | } | 1152 | } |
1153 | else { | 1153 | else { |
1154 | if ((bp->phy_flags & PHY_SERDES_FLAG) && | 1154 | if ((bp->phy_flags & PHY_SERDES_FLAG) && |
1155 | (bp->autoneg & AUTONEG_SPEED)) | 1155 | (bp->autoneg & AUTONEG_SPEED)) |
1156 | bnx2_disable_forced_2g5(bp); | 1156 | bnx2_disable_forced_2g5(bp); |
1157 | 1157 | ||
1158 | bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG; | 1158 | bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG; |
1159 | bp->link_up = 0; | 1159 | bp->link_up = 0; |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | if (bp->link_up != link_up) { | 1162 | if (bp->link_up != link_up) { |
1163 | bnx2_report_link(bp); | 1163 | bnx2_report_link(bp); |
1164 | } | 1164 | } |
1165 | 1165 | ||
1166 | bnx2_set_mac_link(bp); | 1166 | bnx2_set_mac_link(bp); |
1167 | 1167 | ||
1168 | return 0; | 1168 | return 0; |
1169 | } | 1169 | } |
1170 | 1170 | ||
1171 | static int | 1171 | static int |
1172 | bnx2_reset_phy(struct bnx2 *bp) | 1172 | bnx2_reset_phy(struct bnx2 *bp) |
1173 | { | 1173 | { |
1174 | int i; | 1174 | int i; |
1175 | u32 reg; | 1175 | u32 reg; |
1176 | 1176 | ||
1177 | bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET); | 1177 | bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET); |
1178 | 1178 | ||
1179 | #define PHY_RESET_MAX_WAIT 100 | 1179 | #define PHY_RESET_MAX_WAIT 100 |
1180 | for (i = 0; i < PHY_RESET_MAX_WAIT; i++) { | 1180 | for (i = 0; i < PHY_RESET_MAX_WAIT; i++) { |
1181 | udelay(10); | 1181 | udelay(10); |
1182 | 1182 | ||
1183 | bnx2_read_phy(bp, bp->mii_bmcr, ®); | 1183 | bnx2_read_phy(bp, bp->mii_bmcr, ®); |
1184 | if (!(reg & BMCR_RESET)) { | 1184 | if (!(reg & BMCR_RESET)) { |
1185 | udelay(20); | 1185 | udelay(20); |
1186 | break; | 1186 | break; |
1187 | } | 1187 | } |
1188 | } | 1188 | } |
1189 | if (i == PHY_RESET_MAX_WAIT) { | 1189 | if (i == PHY_RESET_MAX_WAIT) { |
1190 | return -EBUSY; | 1190 | return -EBUSY; |
1191 | } | 1191 | } |
1192 | return 0; | 1192 | return 0; |
1193 | } | 1193 | } |
1194 | 1194 | ||
1195 | static u32 | 1195 | static u32 |
1196 | bnx2_phy_get_pause_adv(struct bnx2 *bp) | 1196 | bnx2_phy_get_pause_adv(struct bnx2 *bp) |
1197 | { | 1197 | { |
1198 | u32 adv = 0; | 1198 | u32 adv = 0; |
1199 | 1199 | ||
1200 | if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) == | 1200 | if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) == |
1201 | (FLOW_CTRL_RX | FLOW_CTRL_TX)) { | 1201 | (FLOW_CTRL_RX | FLOW_CTRL_TX)) { |
1202 | 1202 | ||
1203 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 1203 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
1204 | adv = ADVERTISE_1000XPAUSE; | 1204 | adv = ADVERTISE_1000XPAUSE; |
1205 | } | 1205 | } |
1206 | else { | 1206 | else { |
1207 | adv = ADVERTISE_PAUSE_CAP; | 1207 | adv = ADVERTISE_PAUSE_CAP; |
1208 | } | 1208 | } |
1209 | } | 1209 | } |
1210 | else if (bp->req_flow_ctrl & FLOW_CTRL_TX) { | 1210 | else if (bp->req_flow_ctrl & FLOW_CTRL_TX) { |
1211 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 1211 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
1212 | adv = ADVERTISE_1000XPSE_ASYM; | 1212 | adv = ADVERTISE_1000XPSE_ASYM; |
1213 | } | 1213 | } |
1214 | else { | 1214 | else { |
1215 | adv = ADVERTISE_PAUSE_ASYM; | 1215 | adv = ADVERTISE_PAUSE_ASYM; |
1216 | } | 1216 | } |
1217 | } | 1217 | } |
1218 | else if (bp->req_flow_ctrl & FLOW_CTRL_RX) { | 1218 | else if (bp->req_flow_ctrl & FLOW_CTRL_RX) { |
1219 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 1219 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
1220 | adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; | 1220 | adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; |
1221 | } | 1221 | } |
1222 | else { | 1222 | else { |
1223 | adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | 1223 | adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
1224 | } | 1224 | } |
1225 | } | 1225 | } |
1226 | return adv; | 1226 | return adv; |
1227 | } | 1227 | } |
1228 | 1228 | ||
1229 | static int bnx2_fw_sync(struct bnx2 *, u32, int); | 1229 | static int bnx2_fw_sync(struct bnx2 *, u32, int); |
1230 | 1230 | ||
1231 | static int | 1231 | static int |
1232 | bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) | 1232 | bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) |
1233 | { | 1233 | { |
1234 | u32 speed_arg = 0, pause_adv; | 1234 | u32 speed_arg = 0, pause_adv; |
1235 | 1235 | ||
1236 | pause_adv = bnx2_phy_get_pause_adv(bp); | 1236 | pause_adv = bnx2_phy_get_pause_adv(bp); |
1237 | 1237 | ||
1238 | if (bp->autoneg & AUTONEG_SPEED) { | 1238 | if (bp->autoneg & AUTONEG_SPEED) { |
1239 | speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG; | 1239 | speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG; |
1240 | if (bp->advertising & ADVERTISED_10baseT_Half) | 1240 | if (bp->advertising & ADVERTISED_10baseT_Half) |
1241 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF; | 1241 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF; |
1242 | if (bp->advertising & ADVERTISED_10baseT_Full) | 1242 | if (bp->advertising & ADVERTISED_10baseT_Full) |
1243 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL; | 1243 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL; |
1244 | if (bp->advertising & ADVERTISED_100baseT_Half) | 1244 | if (bp->advertising & ADVERTISED_100baseT_Half) |
1245 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF; | 1245 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF; |
1246 | if (bp->advertising & ADVERTISED_100baseT_Full) | 1246 | if (bp->advertising & ADVERTISED_100baseT_Full) |
1247 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL; | 1247 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL; |
1248 | if (bp->advertising & ADVERTISED_1000baseT_Full) | 1248 | if (bp->advertising & ADVERTISED_1000baseT_Full) |
1249 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL; | 1249 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL; |
1250 | if (bp->advertising & ADVERTISED_2500baseX_Full) | 1250 | if (bp->advertising & ADVERTISED_2500baseX_Full) |
1251 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; | 1251 | speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; |
1252 | } else { | 1252 | } else { |
1253 | if (bp->req_line_speed == SPEED_2500) | 1253 | if (bp->req_line_speed == SPEED_2500) |
1254 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; | 1254 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; |
1255 | else if (bp->req_line_speed == SPEED_1000) | 1255 | else if (bp->req_line_speed == SPEED_1000) |
1256 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL; | 1256 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL; |
1257 | else if (bp->req_line_speed == SPEED_100) { | 1257 | else if (bp->req_line_speed == SPEED_100) { |
1258 | if (bp->req_duplex == DUPLEX_FULL) | 1258 | if (bp->req_duplex == DUPLEX_FULL) |
1259 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL; | 1259 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL; |
1260 | else | 1260 | else |
1261 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF; | 1261 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF; |
1262 | } else if (bp->req_line_speed == SPEED_10) { | 1262 | } else if (bp->req_line_speed == SPEED_10) { |
1263 | if (bp->req_duplex == DUPLEX_FULL) | 1263 | if (bp->req_duplex == DUPLEX_FULL) |
1264 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL; | 1264 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL; |
1265 | else | 1265 | else |
1266 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF; | 1266 | speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF; |
1267 | } | 1267 | } |
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP)) | 1270 | if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP)) |
1271 | speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE; | 1271 | speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE; |
1272 | if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM)) | 1272 | if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM)) |
1273 | speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE; | 1273 | speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE; |
1274 | 1274 | ||
1275 | if (port == PORT_TP) | 1275 | if (port == PORT_TP) |
1276 | speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE | | 1276 | speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE | |
1277 | BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED; | 1277 | BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED; |
1278 | 1278 | ||
1279 | REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg); | 1279 | REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg); |
1280 | 1280 | ||
1281 | spin_unlock_bh(&bp->phy_lock); | 1281 | spin_unlock_bh(&bp->phy_lock); |
1282 | bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0); | 1282 | bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0); |
1283 | spin_lock_bh(&bp->phy_lock); | 1283 | spin_lock_bh(&bp->phy_lock); |
1284 | 1284 | ||
1285 | return 0; | 1285 | return 0; |
1286 | } | 1286 | } |
1287 | 1287 | ||
1288 | static int | 1288 | static int |
1289 | bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) | 1289 | bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) |
1290 | { | 1290 | { |
1291 | u32 adv, bmcr; | 1291 | u32 adv, bmcr; |
1292 | u32 new_adv = 0; | 1292 | u32 new_adv = 0; |
1293 | 1293 | ||
1294 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) | 1294 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) |
1295 | return (bnx2_setup_remote_phy(bp, port)); | 1295 | return (bnx2_setup_remote_phy(bp, port)); |
1296 | 1296 | ||
1297 | if (!(bp->autoneg & AUTONEG_SPEED)) { | 1297 | if (!(bp->autoneg & AUTONEG_SPEED)) { |
1298 | u32 new_bmcr; | 1298 | u32 new_bmcr; |
1299 | int force_link_down = 0; | 1299 | int force_link_down = 0; |
1300 | 1300 | ||
1301 | if (bp->req_line_speed == SPEED_2500) { | 1301 | if (bp->req_line_speed == SPEED_2500) { |
1302 | if (!bnx2_test_and_enable_2g5(bp)) | 1302 | if (!bnx2_test_and_enable_2g5(bp)) |
1303 | force_link_down = 1; | 1303 | force_link_down = 1; |
1304 | } else if (bp->req_line_speed == SPEED_1000) { | 1304 | } else if (bp->req_line_speed == SPEED_1000) { |
1305 | if (bnx2_test_and_disable_2g5(bp)) | 1305 | if (bnx2_test_and_disable_2g5(bp)) |
1306 | force_link_down = 1; | 1306 | force_link_down = 1; |
1307 | } | 1307 | } |
1308 | bnx2_read_phy(bp, bp->mii_adv, &adv); | 1308 | bnx2_read_phy(bp, bp->mii_adv, &adv); |
1309 | adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF); | 1309 | adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF); |
1310 | 1310 | ||
1311 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 1311 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
1312 | new_bmcr = bmcr & ~BMCR_ANENABLE; | 1312 | new_bmcr = bmcr & ~BMCR_ANENABLE; |
1313 | new_bmcr |= BMCR_SPEED1000; | 1313 | new_bmcr |= BMCR_SPEED1000; |
1314 | 1314 | ||
1315 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 1315 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
1316 | if (bp->req_line_speed == SPEED_2500) | 1316 | if (bp->req_line_speed == SPEED_2500) |
1317 | bnx2_enable_forced_2g5(bp); | 1317 | bnx2_enable_forced_2g5(bp); |
1318 | else if (bp->req_line_speed == SPEED_1000) { | 1318 | else if (bp->req_line_speed == SPEED_1000) { |
1319 | bnx2_disable_forced_2g5(bp); | 1319 | bnx2_disable_forced_2g5(bp); |
1320 | new_bmcr &= ~0x2000; | 1320 | new_bmcr &= ~0x2000; |
1321 | } | 1321 | } |
1322 | 1322 | ||
1323 | } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { | 1323 | } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { |
1324 | if (bp->req_line_speed == SPEED_2500) | 1324 | if (bp->req_line_speed == SPEED_2500) |
1325 | new_bmcr |= BCM5708S_BMCR_FORCE_2500; | 1325 | new_bmcr |= BCM5708S_BMCR_FORCE_2500; |
1326 | else | 1326 | else |
1327 | new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500; | 1327 | new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500; |
1328 | } | 1328 | } |
1329 | 1329 | ||
1330 | if (bp->req_duplex == DUPLEX_FULL) { | 1330 | if (bp->req_duplex == DUPLEX_FULL) { |
1331 | adv |= ADVERTISE_1000XFULL; | 1331 | adv |= ADVERTISE_1000XFULL; |
1332 | new_bmcr |= BMCR_FULLDPLX; | 1332 | new_bmcr |= BMCR_FULLDPLX; |
1333 | } | 1333 | } |
1334 | else { | 1334 | else { |
1335 | adv |= ADVERTISE_1000XHALF; | 1335 | adv |= ADVERTISE_1000XHALF; |
1336 | new_bmcr &= ~BMCR_FULLDPLX; | 1336 | new_bmcr &= ~BMCR_FULLDPLX; |
1337 | } | 1337 | } |
1338 | if ((new_bmcr != bmcr) || (force_link_down)) { | 1338 | if ((new_bmcr != bmcr) || (force_link_down)) { |
1339 | /* Force a link down visible on the other side */ | 1339 | /* Force a link down visible on the other side */ |
1340 | if (bp->link_up) { | 1340 | if (bp->link_up) { |
1341 | bnx2_write_phy(bp, bp->mii_adv, adv & | 1341 | bnx2_write_phy(bp, bp->mii_adv, adv & |
1342 | ~(ADVERTISE_1000XFULL | | 1342 | ~(ADVERTISE_1000XFULL | |
1343 | ADVERTISE_1000XHALF)); | 1343 | ADVERTISE_1000XHALF)); |
1344 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr | | 1344 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr | |
1345 | BMCR_ANRESTART | BMCR_ANENABLE); | 1345 | BMCR_ANRESTART | BMCR_ANENABLE); |
1346 | 1346 | ||
1347 | bp->link_up = 0; | 1347 | bp->link_up = 0; |
1348 | netif_carrier_off(bp->dev); | 1348 | netif_carrier_off(bp->dev); |
1349 | bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); | 1349 | bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); |
1350 | bnx2_report_link(bp); | 1350 | bnx2_report_link(bp); |
1351 | } | 1351 | } |
1352 | bnx2_write_phy(bp, bp->mii_adv, adv); | 1352 | bnx2_write_phy(bp, bp->mii_adv, adv); |
1353 | bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); | 1353 | bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); |
1354 | } else { | 1354 | } else { |
1355 | bnx2_resolve_flow_ctrl(bp); | 1355 | bnx2_resolve_flow_ctrl(bp); |
1356 | bnx2_set_mac_link(bp); | 1356 | bnx2_set_mac_link(bp); |
1357 | } | 1357 | } |
1358 | return 0; | 1358 | return 0; |
1359 | } | 1359 | } |
1360 | 1360 | ||
1361 | bnx2_test_and_enable_2g5(bp); | 1361 | bnx2_test_and_enable_2g5(bp); |
1362 | 1362 | ||
1363 | if (bp->advertising & ADVERTISED_1000baseT_Full) | 1363 | if (bp->advertising & ADVERTISED_1000baseT_Full) |
1364 | new_adv |= ADVERTISE_1000XFULL; | 1364 | new_adv |= ADVERTISE_1000XFULL; |
1365 | 1365 | ||
1366 | new_adv |= bnx2_phy_get_pause_adv(bp); | 1366 | new_adv |= bnx2_phy_get_pause_adv(bp); |
1367 | 1367 | ||
1368 | bnx2_read_phy(bp, bp->mii_adv, &adv); | 1368 | bnx2_read_phy(bp, bp->mii_adv, &adv); |
1369 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 1369 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
1370 | 1370 | ||
1371 | bp->serdes_an_pending = 0; | 1371 | bp->serdes_an_pending = 0; |
1372 | if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) { | 1372 | if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) { |
1373 | /* Force a link down visible on the other side */ | 1373 | /* Force a link down visible on the other side */ |
1374 | if (bp->link_up) { | 1374 | if (bp->link_up) { |
1375 | bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); | 1375 | bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); |
1376 | spin_unlock_bh(&bp->phy_lock); | 1376 | spin_unlock_bh(&bp->phy_lock); |
1377 | msleep(20); | 1377 | msleep(20); |
1378 | spin_lock_bh(&bp->phy_lock); | 1378 | spin_lock_bh(&bp->phy_lock); |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | bnx2_write_phy(bp, bp->mii_adv, new_adv); | 1381 | bnx2_write_phy(bp, bp->mii_adv, new_adv); |
1382 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | | 1382 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | |
1383 | BMCR_ANENABLE); | 1383 | BMCR_ANENABLE); |
1384 | /* Speed up link-up time when the link partner | 1384 | /* Speed up link-up time when the link partner |
1385 | * does not autonegotiate which is very common | 1385 | * does not autonegotiate which is very common |
1386 | * in blade servers. Some blade servers use | 1386 | * in blade servers. Some blade servers use |
1387 | * IPMI for kerboard input and it's important | 1387 | * IPMI for kerboard input and it's important |
1388 | * to minimize link disruptions. Autoneg. involves | 1388 | * to minimize link disruptions. Autoneg. involves |
1389 | * exchanging base pages plus 3 next pages and | 1389 | * exchanging base pages plus 3 next pages and |
1390 | * normally completes in about 120 msec. | 1390 | * normally completes in about 120 msec. |
1391 | */ | 1391 | */ |
1392 | bp->current_interval = SERDES_AN_TIMEOUT; | 1392 | bp->current_interval = SERDES_AN_TIMEOUT; |
1393 | bp->serdes_an_pending = 1; | 1393 | bp->serdes_an_pending = 1; |
1394 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 1394 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
1395 | } else { | 1395 | } else { |
1396 | bnx2_resolve_flow_ctrl(bp); | 1396 | bnx2_resolve_flow_ctrl(bp); |
1397 | bnx2_set_mac_link(bp); | 1397 | bnx2_set_mac_link(bp); |
1398 | } | 1398 | } |
1399 | 1399 | ||
1400 | return 0; | 1400 | return 0; |
1401 | } | 1401 | } |
1402 | 1402 | ||
1403 | #define ETHTOOL_ALL_FIBRE_SPEED \ | 1403 | #define ETHTOOL_ALL_FIBRE_SPEED \ |
1404 | (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \ | 1404 | (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \ |
1405 | (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\ | 1405 | (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\ |
1406 | (ADVERTISED_1000baseT_Full) | 1406 | (ADVERTISED_1000baseT_Full) |
1407 | 1407 | ||
1408 | #define ETHTOOL_ALL_COPPER_SPEED \ | 1408 | #define ETHTOOL_ALL_COPPER_SPEED \ |
1409 | (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ | 1409 | (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ |
1410 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ | 1410 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ |
1411 | ADVERTISED_1000baseT_Full) | 1411 | ADVERTISED_1000baseT_Full) |
1412 | 1412 | ||
1413 | #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \ | 1413 | #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \ |
1414 | ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA) | 1414 | ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA) |
1415 | 1415 | ||
1416 | #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL) | 1416 | #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL) |
1417 | 1417 | ||
1418 | static void | 1418 | static void |
1419 | bnx2_set_default_remote_link(struct bnx2 *bp) | 1419 | bnx2_set_default_remote_link(struct bnx2 *bp) |
1420 | { | 1420 | { |
1421 | u32 link; | 1421 | u32 link; |
1422 | 1422 | ||
1423 | if (bp->phy_port == PORT_TP) | 1423 | if (bp->phy_port == PORT_TP) |
1424 | link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK); | 1424 | link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK); |
1425 | else | 1425 | else |
1426 | link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK); | 1426 | link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK); |
1427 | 1427 | ||
1428 | if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) { | 1428 | if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) { |
1429 | bp->req_line_speed = 0; | 1429 | bp->req_line_speed = 0; |
1430 | bp->autoneg |= AUTONEG_SPEED; | 1430 | bp->autoneg |= AUTONEG_SPEED; |
1431 | bp->advertising = ADVERTISED_Autoneg; | 1431 | bp->advertising = ADVERTISED_Autoneg; |
1432 | if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) | 1432 | if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) |
1433 | bp->advertising |= ADVERTISED_10baseT_Half; | 1433 | bp->advertising |= ADVERTISED_10baseT_Half; |
1434 | if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL) | 1434 | if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL) |
1435 | bp->advertising |= ADVERTISED_10baseT_Full; | 1435 | bp->advertising |= ADVERTISED_10baseT_Full; |
1436 | if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) | 1436 | if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) |
1437 | bp->advertising |= ADVERTISED_100baseT_Half; | 1437 | bp->advertising |= ADVERTISED_100baseT_Half; |
1438 | if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL) | 1438 | if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL) |
1439 | bp->advertising |= ADVERTISED_100baseT_Full; | 1439 | bp->advertising |= ADVERTISED_100baseT_Full; |
1440 | if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) | 1440 | if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) |
1441 | bp->advertising |= ADVERTISED_1000baseT_Full; | 1441 | bp->advertising |= ADVERTISED_1000baseT_Full; |
1442 | if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) | 1442 | if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) |
1443 | bp->advertising |= ADVERTISED_2500baseX_Full; | 1443 | bp->advertising |= ADVERTISED_2500baseX_Full; |
1444 | } else { | 1444 | } else { |
1445 | bp->autoneg = 0; | 1445 | bp->autoneg = 0; |
1446 | bp->advertising = 0; | 1446 | bp->advertising = 0; |
1447 | bp->req_duplex = DUPLEX_FULL; | 1447 | bp->req_duplex = DUPLEX_FULL; |
1448 | if (link & BNX2_NETLINK_SET_LINK_SPEED_10) { | 1448 | if (link & BNX2_NETLINK_SET_LINK_SPEED_10) { |
1449 | bp->req_line_speed = SPEED_10; | 1449 | bp->req_line_speed = SPEED_10; |
1450 | if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) | 1450 | if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) |
1451 | bp->req_duplex = DUPLEX_HALF; | 1451 | bp->req_duplex = DUPLEX_HALF; |
1452 | } | 1452 | } |
1453 | if (link & BNX2_NETLINK_SET_LINK_SPEED_100) { | 1453 | if (link & BNX2_NETLINK_SET_LINK_SPEED_100) { |
1454 | bp->req_line_speed = SPEED_100; | 1454 | bp->req_line_speed = SPEED_100; |
1455 | if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) | 1455 | if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) |
1456 | bp->req_duplex = DUPLEX_HALF; | 1456 | bp->req_duplex = DUPLEX_HALF; |
1457 | } | 1457 | } |
1458 | if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) | 1458 | if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) |
1459 | bp->req_line_speed = SPEED_1000; | 1459 | bp->req_line_speed = SPEED_1000; |
1460 | if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) | 1460 | if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) |
1461 | bp->req_line_speed = SPEED_2500; | 1461 | bp->req_line_speed = SPEED_2500; |
1462 | } | 1462 | } |
1463 | } | 1463 | } |
1464 | 1464 | ||
1465 | static void | 1465 | static void |
1466 | bnx2_set_default_link(struct bnx2 *bp) | 1466 | bnx2_set_default_link(struct bnx2 *bp) |
1467 | { | 1467 | { |
1468 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) | 1468 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) |
1469 | return bnx2_set_default_remote_link(bp); | 1469 | return bnx2_set_default_remote_link(bp); |
1470 | 1470 | ||
1471 | bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; | 1471 | bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; |
1472 | bp->req_line_speed = 0; | 1472 | bp->req_line_speed = 0; |
1473 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 1473 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
1474 | u32 reg; | 1474 | u32 reg; |
1475 | 1475 | ||
1476 | bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; | 1476 | bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; |
1477 | 1477 | ||
1478 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG); | 1478 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG); |
1479 | reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK; | 1479 | reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK; |
1480 | if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) { | 1480 | if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) { |
1481 | bp->autoneg = 0; | 1481 | bp->autoneg = 0; |
1482 | bp->req_line_speed = bp->line_speed = SPEED_1000; | 1482 | bp->req_line_speed = bp->line_speed = SPEED_1000; |
1483 | bp->req_duplex = DUPLEX_FULL; | 1483 | bp->req_duplex = DUPLEX_FULL; |
1484 | } | 1484 | } |
1485 | } else | 1485 | } else |
1486 | bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; | 1486 | bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; |
1487 | } | 1487 | } |
1488 | 1488 | ||
1489 | static void | 1489 | static void |
1490 | bnx2_send_heart_beat(struct bnx2 *bp) | 1490 | bnx2_send_heart_beat(struct bnx2 *bp) |
1491 | { | 1491 | { |
1492 | u32 msg; | 1492 | u32 msg; |
1493 | u32 addr; | 1493 | u32 addr; |
1494 | 1494 | ||
1495 | spin_lock(&bp->indirect_lock); | 1495 | spin_lock(&bp->indirect_lock); |
1496 | msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); | 1496 | msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); |
1497 | addr = bp->shmem_base + BNX2_DRV_PULSE_MB; | 1497 | addr = bp->shmem_base + BNX2_DRV_PULSE_MB; |
1498 | REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); | 1498 | REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); |
1499 | REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); | 1499 | REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); |
1500 | spin_unlock(&bp->indirect_lock); | 1500 | spin_unlock(&bp->indirect_lock); |
1501 | } | 1501 | } |
1502 | 1502 | ||
1503 | static void | 1503 | static void |
1504 | bnx2_remote_phy_event(struct bnx2 *bp) | 1504 | bnx2_remote_phy_event(struct bnx2 *bp) |
1505 | { | 1505 | { |
1506 | u32 msg; | 1506 | u32 msg; |
1507 | u8 link_up = bp->link_up; | 1507 | u8 link_up = bp->link_up; |
1508 | u8 old_port; | 1508 | u8 old_port; |
1509 | 1509 | ||
1510 | msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS); | 1510 | msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS); |
1511 | 1511 | ||
1512 | if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED) | 1512 | if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED) |
1513 | bnx2_send_heart_beat(bp); | 1513 | bnx2_send_heart_beat(bp); |
1514 | 1514 | ||
1515 | msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED; | 1515 | msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED; |
1516 | 1516 | ||
1517 | if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN) | 1517 | if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN) |
1518 | bp->link_up = 0; | 1518 | bp->link_up = 0; |
1519 | else { | 1519 | else { |
1520 | u32 speed; | 1520 | u32 speed; |
1521 | 1521 | ||
1522 | bp->link_up = 1; | 1522 | bp->link_up = 1; |
1523 | speed = msg & BNX2_LINK_STATUS_SPEED_MASK; | 1523 | speed = msg & BNX2_LINK_STATUS_SPEED_MASK; |
1524 | bp->duplex = DUPLEX_FULL; | 1524 | bp->duplex = DUPLEX_FULL; |
1525 | switch (speed) { | 1525 | switch (speed) { |
1526 | case BNX2_LINK_STATUS_10HALF: | 1526 | case BNX2_LINK_STATUS_10HALF: |
1527 | bp->duplex = DUPLEX_HALF; | 1527 | bp->duplex = DUPLEX_HALF; |
1528 | case BNX2_LINK_STATUS_10FULL: | 1528 | case BNX2_LINK_STATUS_10FULL: |
1529 | bp->line_speed = SPEED_10; | 1529 | bp->line_speed = SPEED_10; |
1530 | break; | 1530 | break; |
1531 | case BNX2_LINK_STATUS_100HALF: | 1531 | case BNX2_LINK_STATUS_100HALF: |
1532 | bp->duplex = DUPLEX_HALF; | 1532 | bp->duplex = DUPLEX_HALF; |
1533 | case BNX2_LINK_STATUS_100BASE_T4: | 1533 | case BNX2_LINK_STATUS_100BASE_T4: |
1534 | case BNX2_LINK_STATUS_100FULL: | 1534 | case BNX2_LINK_STATUS_100FULL: |
1535 | bp->line_speed = SPEED_100; | 1535 | bp->line_speed = SPEED_100; |
1536 | break; | 1536 | break; |
1537 | case BNX2_LINK_STATUS_1000HALF: | 1537 | case BNX2_LINK_STATUS_1000HALF: |
1538 | bp->duplex = DUPLEX_HALF; | 1538 | bp->duplex = DUPLEX_HALF; |
1539 | case BNX2_LINK_STATUS_1000FULL: | 1539 | case BNX2_LINK_STATUS_1000FULL: |
1540 | bp->line_speed = SPEED_1000; | 1540 | bp->line_speed = SPEED_1000; |
1541 | break; | 1541 | break; |
1542 | case BNX2_LINK_STATUS_2500HALF: | 1542 | case BNX2_LINK_STATUS_2500HALF: |
1543 | bp->duplex = DUPLEX_HALF; | 1543 | bp->duplex = DUPLEX_HALF; |
1544 | case BNX2_LINK_STATUS_2500FULL: | 1544 | case BNX2_LINK_STATUS_2500FULL: |
1545 | bp->line_speed = SPEED_2500; | 1545 | bp->line_speed = SPEED_2500; |
1546 | break; | 1546 | break; |
1547 | default: | 1547 | default: |
1548 | bp->line_speed = 0; | 1548 | bp->line_speed = 0; |
1549 | break; | 1549 | break; |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | spin_lock(&bp->phy_lock); | 1552 | spin_lock(&bp->phy_lock); |
1553 | bp->flow_ctrl = 0; | 1553 | bp->flow_ctrl = 0; |
1554 | if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != | 1554 | if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != |
1555 | (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { | 1555 | (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { |
1556 | if (bp->duplex == DUPLEX_FULL) | 1556 | if (bp->duplex == DUPLEX_FULL) |
1557 | bp->flow_ctrl = bp->req_flow_ctrl; | 1557 | bp->flow_ctrl = bp->req_flow_ctrl; |
1558 | } else { | 1558 | } else { |
1559 | if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED) | 1559 | if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED) |
1560 | bp->flow_ctrl |= FLOW_CTRL_TX; | 1560 | bp->flow_ctrl |= FLOW_CTRL_TX; |
1561 | if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED) | 1561 | if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED) |
1562 | bp->flow_ctrl |= FLOW_CTRL_RX; | 1562 | bp->flow_ctrl |= FLOW_CTRL_RX; |
1563 | } | 1563 | } |
1564 | 1564 | ||
1565 | old_port = bp->phy_port; | 1565 | old_port = bp->phy_port; |
1566 | if (msg & BNX2_LINK_STATUS_SERDES_LINK) | 1566 | if (msg & BNX2_LINK_STATUS_SERDES_LINK) |
1567 | bp->phy_port = PORT_FIBRE; | 1567 | bp->phy_port = PORT_FIBRE; |
1568 | else | 1568 | else |
1569 | bp->phy_port = PORT_TP; | 1569 | bp->phy_port = PORT_TP; |
1570 | 1570 | ||
1571 | if (old_port != bp->phy_port) | 1571 | if (old_port != bp->phy_port) |
1572 | bnx2_set_default_link(bp); | 1572 | bnx2_set_default_link(bp); |
1573 | 1573 | ||
1574 | spin_unlock(&bp->phy_lock); | 1574 | spin_unlock(&bp->phy_lock); |
1575 | } | 1575 | } |
1576 | if (bp->link_up != link_up) | 1576 | if (bp->link_up != link_up) |
1577 | bnx2_report_link(bp); | 1577 | bnx2_report_link(bp); |
1578 | 1578 | ||
1579 | bnx2_set_mac_link(bp); | 1579 | bnx2_set_mac_link(bp); |
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | static int | 1582 | static int |
1583 | bnx2_set_remote_link(struct bnx2 *bp) | 1583 | bnx2_set_remote_link(struct bnx2 *bp) |
1584 | { | 1584 | { |
1585 | u32 evt_code; | 1585 | u32 evt_code; |
1586 | 1586 | ||
1587 | evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB); | 1587 | evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB); |
1588 | switch (evt_code) { | 1588 | switch (evt_code) { |
1589 | case BNX2_FW_EVT_CODE_LINK_EVENT: | 1589 | case BNX2_FW_EVT_CODE_LINK_EVENT: |
1590 | bnx2_remote_phy_event(bp); | 1590 | bnx2_remote_phy_event(bp); |
1591 | break; | 1591 | break; |
1592 | case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT: | 1592 | case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT: |
1593 | default: | 1593 | default: |
1594 | bnx2_send_heart_beat(bp); | 1594 | bnx2_send_heart_beat(bp); |
1595 | break; | 1595 | break; |
1596 | } | 1596 | } |
1597 | return 0; | 1597 | return 0; |
1598 | } | 1598 | } |
1599 | 1599 | ||
1600 | static int | 1600 | static int |
1601 | bnx2_setup_copper_phy(struct bnx2 *bp) | 1601 | bnx2_setup_copper_phy(struct bnx2 *bp) |
1602 | { | 1602 | { |
1603 | u32 bmcr; | 1603 | u32 bmcr; |
1604 | u32 new_bmcr; | 1604 | u32 new_bmcr; |
1605 | 1605 | ||
1606 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 1606 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
1607 | 1607 | ||
1608 | if (bp->autoneg & AUTONEG_SPEED) { | 1608 | if (bp->autoneg & AUTONEG_SPEED) { |
1609 | u32 adv_reg, adv1000_reg; | 1609 | u32 adv_reg, adv1000_reg; |
1610 | u32 new_adv_reg = 0; | 1610 | u32 new_adv_reg = 0; |
1611 | u32 new_adv1000_reg = 0; | 1611 | u32 new_adv1000_reg = 0; |
1612 | 1612 | ||
1613 | bnx2_read_phy(bp, bp->mii_adv, &adv_reg); | 1613 | bnx2_read_phy(bp, bp->mii_adv, &adv_reg); |
1614 | adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | | 1614 | adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | |
1615 | ADVERTISE_PAUSE_ASYM); | 1615 | ADVERTISE_PAUSE_ASYM); |
1616 | 1616 | ||
1617 | bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg); | 1617 | bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg); |
1618 | adv1000_reg &= PHY_ALL_1000_SPEED; | 1618 | adv1000_reg &= PHY_ALL_1000_SPEED; |
1619 | 1619 | ||
1620 | if (bp->advertising & ADVERTISED_10baseT_Half) | 1620 | if (bp->advertising & ADVERTISED_10baseT_Half) |
1621 | new_adv_reg |= ADVERTISE_10HALF; | 1621 | new_adv_reg |= ADVERTISE_10HALF; |
1622 | if (bp->advertising & ADVERTISED_10baseT_Full) | 1622 | if (bp->advertising & ADVERTISED_10baseT_Full) |
1623 | new_adv_reg |= ADVERTISE_10FULL; | 1623 | new_adv_reg |= ADVERTISE_10FULL; |
1624 | if (bp->advertising & ADVERTISED_100baseT_Half) | 1624 | if (bp->advertising & ADVERTISED_100baseT_Half) |
1625 | new_adv_reg |= ADVERTISE_100HALF; | 1625 | new_adv_reg |= ADVERTISE_100HALF; |
1626 | if (bp->advertising & ADVERTISED_100baseT_Full) | 1626 | if (bp->advertising & ADVERTISED_100baseT_Full) |
1627 | new_adv_reg |= ADVERTISE_100FULL; | 1627 | new_adv_reg |= ADVERTISE_100FULL; |
1628 | if (bp->advertising & ADVERTISED_1000baseT_Full) | 1628 | if (bp->advertising & ADVERTISED_1000baseT_Full) |
1629 | new_adv1000_reg |= ADVERTISE_1000FULL; | 1629 | new_adv1000_reg |= ADVERTISE_1000FULL; |
1630 | 1630 | ||
1631 | new_adv_reg |= ADVERTISE_CSMA; | 1631 | new_adv_reg |= ADVERTISE_CSMA; |
1632 | 1632 | ||
1633 | new_adv_reg |= bnx2_phy_get_pause_adv(bp); | 1633 | new_adv_reg |= bnx2_phy_get_pause_adv(bp); |
1634 | 1634 | ||
1635 | if ((adv1000_reg != new_adv1000_reg) || | 1635 | if ((adv1000_reg != new_adv1000_reg) || |
1636 | (adv_reg != new_adv_reg) || | 1636 | (adv_reg != new_adv_reg) || |
1637 | ((bmcr & BMCR_ANENABLE) == 0)) { | 1637 | ((bmcr & BMCR_ANENABLE) == 0)) { |
1638 | 1638 | ||
1639 | bnx2_write_phy(bp, bp->mii_adv, new_adv_reg); | 1639 | bnx2_write_phy(bp, bp->mii_adv, new_adv_reg); |
1640 | bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg); | 1640 | bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg); |
1641 | bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART | | 1641 | bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART | |
1642 | BMCR_ANENABLE); | 1642 | BMCR_ANENABLE); |
1643 | } | 1643 | } |
1644 | else if (bp->link_up) { | 1644 | else if (bp->link_up) { |
1645 | /* Flow ctrl may have changed from auto to forced */ | 1645 | /* Flow ctrl may have changed from auto to forced */ |
1646 | /* or vice-versa. */ | 1646 | /* or vice-versa. */ |
1647 | 1647 | ||
1648 | bnx2_resolve_flow_ctrl(bp); | 1648 | bnx2_resolve_flow_ctrl(bp); |
1649 | bnx2_set_mac_link(bp); | 1649 | bnx2_set_mac_link(bp); |
1650 | } | 1650 | } |
1651 | return 0; | 1651 | return 0; |
1652 | } | 1652 | } |
1653 | 1653 | ||
1654 | new_bmcr = 0; | 1654 | new_bmcr = 0; |
1655 | if (bp->req_line_speed == SPEED_100) { | 1655 | if (bp->req_line_speed == SPEED_100) { |
1656 | new_bmcr |= BMCR_SPEED100; | 1656 | new_bmcr |= BMCR_SPEED100; |
1657 | } | 1657 | } |
1658 | if (bp->req_duplex == DUPLEX_FULL) { | 1658 | if (bp->req_duplex == DUPLEX_FULL) { |
1659 | new_bmcr |= BMCR_FULLDPLX; | 1659 | new_bmcr |= BMCR_FULLDPLX; |
1660 | } | 1660 | } |
1661 | if (new_bmcr != bmcr) { | 1661 | if (new_bmcr != bmcr) { |
1662 | u32 bmsr; | 1662 | u32 bmsr; |
1663 | 1663 | ||
1664 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); | 1664 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); |
1665 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); | 1665 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); |
1666 | 1666 | ||
1667 | if (bmsr & BMSR_LSTATUS) { | 1667 | if (bmsr & BMSR_LSTATUS) { |
1668 | /* Force link down */ | 1668 | /* Force link down */ |
1669 | bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); | 1669 | bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); |
1670 | spin_unlock_bh(&bp->phy_lock); | 1670 | spin_unlock_bh(&bp->phy_lock); |
1671 | msleep(50); | 1671 | msleep(50); |
1672 | spin_lock_bh(&bp->phy_lock); | 1672 | spin_lock_bh(&bp->phy_lock); |
1673 | 1673 | ||
1674 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); | 1674 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); |
1675 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); | 1675 | bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); |
1676 | } | 1676 | } |
1677 | 1677 | ||
1678 | bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); | 1678 | bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); |
1679 | 1679 | ||
1680 | /* Normally, the new speed is setup after the link has | 1680 | /* Normally, the new speed is setup after the link has |
1681 | * gone down and up again. In some cases, link will not go | 1681 | * gone down and up again. In some cases, link will not go |
1682 | * down so we need to set up the new speed here. | 1682 | * down so we need to set up the new speed here. |
1683 | */ | 1683 | */ |
1684 | if (bmsr & BMSR_LSTATUS) { | 1684 | if (bmsr & BMSR_LSTATUS) { |
1685 | bp->line_speed = bp->req_line_speed; | 1685 | bp->line_speed = bp->req_line_speed; |
1686 | bp->duplex = bp->req_duplex; | 1686 | bp->duplex = bp->req_duplex; |
1687 | bnx2_resolve_flow_ctrl(bp); | 1687 | bnx2_resolve_flow_ctrl(bp); |
1688 | bnx2_set_mac_link(bp); | 1688 | bnx2_set_mac_link(bp); |
1689 | } | 1689 | } |
1690 | } else { | 1690 | } else { |
1691 | bnx2_resolve_flow_ctrl(bp); | 1691 | bnx2_resolve_flow_ctrl(bp); |
1692 | bnx2_set_mac_link(bp); | 1692 | bnx2_set_mac_link(bp); |
1693 | } | 1693 | } |
1694 | return 0; | 1694 | return 0; |
1695 | } | 1695 | } |
1696 | 1696 | ||
1697 | static int | 1697 | static int |
1698 | bnx2_setup_phy(struct bnx2 *bp, u8 port) | 1698 | bnx2_setup_phy(struct bnx2 *bp, u8 port) |
1699 | { | 1699 | { |
1700 | if (bp->loopback == MAC_LOOPBACK) | 1700 | if (bp->loopback == MAC_LOOPBACK) |
1701 | return 0; | 1701 | return 0; |
1702 | 1702 | ||
1703 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 1703 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
1704 | return (bnx2_setup_serdes_phy(bp, port)); | 1704 | return (bnx2_setup_serdes_phy(bp, port)); |
1705 | } | 1705 | } |
1706 | else { | 1706 | else { |
1707 | return (bnx2_setup_copper_phy(bp)); | 1707 | return (bnx2_setup_copper_phy(bp)); |
1708 | } | 1708 | } |
1709 | } | 1709 | } |
1710 | 1710 | ||
1711 | static int | 1711 | static int |
1712 | bnx2_init_5709s_phy(struct bnx2 *bp) | 1712 | bnx2_init_5709s_phy(struct bnx2 *bp) |
1713 | { | 1713 | { |
1714 | u32 val; | 1714 | u32 val; |
1715 | 1715 | ||
1716 | bp->mii_bmcr = MII_BMCR + 0x10; | 1716 | bp->mii_bmcr = MII_BMCR + 0x10; |
1717 | bp->mii_bmsr = MII_BMSR + 0x10; | 1717 | bp->mii_bmsr = MII_BMSR + 0x10; |
1718 | bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1; | 1718 | bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1; |
1719 | bp->mii_adv = MII_ADVERTISE + 0x10; | 1719 | bp->mii_adv = MII_ADVERTISE + 0x10; |
1720 | bp->mii_lpa = MII_LPA + 0x10; | 1720 | bp->mii_lpa = MII_LPA + 0x10; |
1721 | bp->mii_up1 = MII_BNX2_OVER1G_UP1; | 1721 | bp->mii_up1 = MII_BNX2_OVER1G_UP1; |
1722 | 1722 | ||
1723 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER); | 1723 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER); |
1724 | bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD); | 1724 | bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD); |
1725 | 1725 | ||
1726 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); | 1726 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); |
1727 | bnx2_reset_phy(bp); | 1727 | bnx2_reset_phy(bp); |
1728 | 1728 | ||
1729 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); | 1729 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); |
1730 | 1730 | ||
1731 | bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val); | 1731 | bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val); |
1732 | val &= ~MII_BNX2_SD_1000XCTL1_AUTODET; | 1732 | val &= ~MII_BNX2_SD_1000XCTL1_AUTODET; |
1733 | val |= MII_BNX2_SD_1000XCTL1_FIBER; | 1733 | val |= MII_BNX2_SD_1000XCTL1_FIBER; |
1734 | bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val); | 1734 | bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val); |
1735 | 1735 | ||
1736 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); | 1736 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); |
1737 | bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val); | 1737 | bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val); |
1738 | if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) | 1738 | if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) |
1739 | val |= BCM5708S_UP1_2G5; | 1739 | val |= BCM5708S_UP1_2G5; |
1740 | else | 1740 | else |
1741 | val &= ~BCM5708S_UP1_2G5; | 1741 | val &= ~BCM5708S_UP1_2G5; |
1742 | bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val); | 1742 | bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val); |
1743 | 1743 | ||
1744 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG); | 1744 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG); |
1745 | bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val); | 1745 | bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val); |
1746 | val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM; | 1746 | val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM; |
1747 | bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val); | 1747 | bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val); |
1748 | 1748 | ||
1749 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0); | 1749 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0); |
1750 | 1750 | ||
1751 | val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN | | 1751 | val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN | |
1752 | MII_BNX2_CL73_BAM_NP_AFT_BP_EN; | 1752 | MII_BNX2_CL73_BAM_NP_AFT_BP_EN; |
1753 | bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val); | 1753 | bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val); |
1754 | 1754 | ||
1755 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); | 1755 | bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); |
1756 | 1756 | ||
1757 | return 0; | 1757 | return 0; |
1758 | } | 1758 | } |
1759 | 1759 | ||
1760 | static int | 1760 | static int |
1761 | bnx2_init_5708s_phy(struct bnx2 *bp) | 1761 | bnx2_init_5708s_phy(struct bnx2 *bp) |
1762 | { | 1762 | { |
1763 | u32 val; | 1763 | u32 val; |
1764 | 1764 | ||
1765 | bnx2_reset_phy(bp); | 1765 | bnx2_reset_phy(bp); |
1766 | 1766 | ||
1767 | bp->mii_up1 = BCM5708S_UP1; | 1767 | bp->mii_up1 = BCM5708S_UP1; |
1768 | 1768 | ||
1769 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3); | 1769 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3); |
1770 | bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE); | 1770 | bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE); |
1771 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); | 1771 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); |
1772 | 1772 | ||
1773 | bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val); | 1773 | bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val); |
1774 | val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN; | 1774 | val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN; |
1775 | bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val); | 1775 | bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val); |
1776 | 1776 | ||
1777 | bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val); | 1777 | bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val); |
1778 | val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN; | 1778 | val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN; |
1779 | bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val); | 1779 | bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val); |
1780 | 1780 | ||
1781 | if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) { | 1781 | if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) { |
1782 | bnx2_read_phy(bp, BCM5708S_UP1, &val); | 1782 | bnx2_read_phy(bp, BCM5708S_UP1, &val); |
1783 | val |= BCM5708S_UP1_2G5; | 1783 | val |= BCM5708S_UP1_2G5; |
1784 | bnx2_write_phy(bp, BCM5708S_UP1, val); | 1784 | bnx2_write_phy(bp, BCM5708S_UP1, val); |
1785 | } | 1785 | } |
1786 | 1786 | ||
1787 | if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || | 1787 | if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || |
1788 | (CHIP_ID(bp) == CHIP_ID_5708_B0) || | 1788 | (CHIP_ID(bp) == CHIP_ID_5708_B0) || |
1789 | (CHIP_ID(bp) == CHIP_ID_5708_B1)) { | 1789 | (CHIP_ID(bp) == CHIP_ID_5708_B1)) { |
1790 | /* increase tx signal amplitude */ | 1790 | /* increase tx signal amplitude */ |
1791 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, | 1791 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, |
1792 | BCM5708S_BLK_ADDR_TX_MISC); | 1792 | BCM5708S_BLK_ADDR_TX_MISC); |
1793 | bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val); | 1793 | bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val); |
1794 | val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM; | 1794 | val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM; |
1795 | bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val); | 1795 | bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val); |
1796 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); | 1796 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); |
1797 | } | 1797 | } |
1798 | 1798 | ||
1799 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) & | 1799 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) & |
1800 | BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK; | 1800 | BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK; |
1801 | 1801 | ||
1802 | if (val) { | 1802 | if (val) { |
1803 | u32 is_backplane; | 1803 | u32 is_backplane; |
1804 | 1804 | ||
1805 | is_backplane = REG_RD_IND(bp, bp->shmem_base + | 1805 | is_backplane = REG_RD_IND(bp, bp->shmem_base + |
1806 | BNX2_SHARED_HW_CFG_CONFIG); | 1806 | BNX2_SHARED_HW_CFG_CONFIG); |
1807 | if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) { | 1807 | if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) { |
1808 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, | 1808 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, |
1809 | BCM5708S_BLK_ADDR_TX_MISC); | 1809 | BCM5708S_BLK_ADDR_TX_MISC); |
1810 | bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val); | 1810 | bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val); |
1811 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, | 1811 | bnx2_write_phy(bp, BCM5708S_BLK_ADDR, |
1812 | BCM5708S_BLK_ADDR_DIG); | 1812 | BCM5708S_BLK_ADDR_DIG); |
1813 | } | 1813 | } |
1814 | } | 1814 | } |
1815 | return 0; | 1815 | return 0; |
1816 | } | 1816 | } |
1817 | 1817 | ||
1818 | static int | 1818 | static int |
1819 | bnx2_init_5706s_phy(struct bnx2 *bp) | 1819 | bnx2_init_5706s_phy(struct bnx2 *bp) |
1820 | { | 1820 | { |
1821 | bnx2_reset_phy(bp); | 1821 | bnx2_reset_phy(bp); |
1822 | 1822 | ||
1823 | bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG; | 1823 | bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG; |
1824 | 1824 | ||
1825 | if (CHIP_NUM(bp) == CHIP_NUM_5706) | 1825 | if (CHIP_NUM(bp) == CHIP_NUM_5706) |
1826 | REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); | 1826 | REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); |
1827 | 1827 | ||
1828 | if (bp->dev->mtu > 1500) { | 1828 | if (bp->dev->mtu > 1500) { |
1829 | u32 val; | 1829 | u32 val; |
1830 | 1830 | ||
1831 | /* Set extended packet length bit */ | 1831 | /* Set extended packet length bit */ |
1832 | bnx2_write_phy(bp, 0x18, 0x7); | 1832 | bnx2_write_phy(bp, 0x18, 0x7); |
1833 | bnx2_read_phy(bp, 0x18, &val); | 1833 | bnx2_read_phy(bp, 0x18, &val); |
1834 | bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000); | 1834 | bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000); |
1835 | 1835 | ||
1836 | bnx2_write_phy(bp, 0x1c, 0x6c00); | 1836 | bnx2_write_phy(bp, 0x1c, 0x6c00); |
1837 | bnx2_read_phy(bp, 0x1c, &val); | 1837 | bnx2_read_phy(bp, 0x1c, &val); |
1838 | bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02); | 1838 | bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02); |
1839 | } | 1839 | } |
1840 | else { | 1840 | else { |
1841 | u32 val; | 1841 | u32 val; |
1842 | 1842 | ||
1843 | bnx2_write_phy(bp, 0x18, 0x7); | 1843 | bnx2_write_phy(bp, 0x18, 0x7); |
1844 | bnx2_read_phy(bp, 0x18, &val); | 1844 | bnx2_read_phy(bp, 0x18, &val); |
1845 | bnx2_write_phy(bp, 0x18, val & ~0x4007); | 1845 | bnx2_write_phy(bp, 0x18, val & ~0x4007); |
1846 | 1846 | ||
1847 | bnx2_write_phy(bp, 0x1c, 0x6c00); | 1847 | bnx2_write_phy(bp, 0x1c, 0x6c00); |
1848 | bnx2_read_phy(bp, 0x1c, &val); | 1848 | bnx2_read_phy(bp, 0x1c, &val); |
1849 | bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00); | 1849 | bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00); |
1850 | } | 1850 | } |
1851 | 1851 | ||
1852 | return 0; | 1852 | return 0; |
1853 | } | 1853 | } |
1854 | 1854 | ||
1855 | static int | 1855 | static int |
1856 | bnx2_init_copper_phy(struct bnx2 *bp) | 1856 | bnx2_init_copper_phy(struct bnx2 *bp) |
1857 | { | 1857 | { |
1858 | u32 val; | 1858 | u32 val; |
1859 | 1859 | ||
1860 | bnx2_reset_phy(bp); | 1860 | bnx2_reset_phy(bp); |
1861 | 1861 | ||
1862 | if (bp->phy_flags & PHY_CRC_FIX_FLAG) { | 1862 | if (bp->phy_flags & PHY_CRC_FIX_FLAG) { |
1863 | bnx2_write_phy(bp, 0x18, 0x0c00); | 1863 | bnx2_write_phy(bp, 0x18, 0x0c00); |
1864 | bnx2_write_phy(bp, 0x17, 0x000a); | 1864 | bnx2_write_phy(bp, 0x17, 0x000a); |
1865 | bnx2_write_phy(bp, 0x15, 0x310b); | 1865 | bnx2_write_phy(bp, 0x15, 0x310b); |
1866 | bnx2_write_phy(bp, 0x17, 0x201f); | 1866 | bnx2_write_phy(bp, 0x17, 0x201f); |
1867 | bnx2_write_phy(bp, 0x15, 0x9506); | 1867 | bnx2_write_phy(bp, 0x15, 0x9506); |
1868 | bnx2_write_phy(bp, 0x17, 0x401f); | 1868 | bnx2_write_phy(bp, 0x17, 0x401f); |
1869 | bnx2_write_phy(bp, 0x15, 0x14e2); | 1869 | bnx2_write_phy(bp, 0x15, 0x14e2); |
1870 | bnx2_write_phy(bp, 0x18, 0x0400); | 1870 | bnx2_write_phy(bp, 0x18, 0x0400); |
1871 | } | 1871 | } |
1872 | 1872 | ||
1873 | if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) { | 1873 | if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) { |
1874 | bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, | 1874 | bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, |
1875 | MII_BNX2_DSP_EXPAND_REG | 0x8); | 1875 | MII_BNX2_DSP_EXPAND_REG | 0x8); |
1876 | bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); | 1876 | bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); |
1877 | val &= ~(1 << 8); | 1877 | val &= ~(1 << 8); |
1878 | bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val); | 1878 | bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val); |
1879 | } | 1879 | } |
1880 | 1880 | ||
1881 | if (bp->dev->mtu > 1500) { | 1881 | if (bp->dev->mtu > 1500) { |
1882 | /* Set extended packet length bit */ | 1882 | /* Set extended packet length bit */ |
1883 | bnx2_write_phy(bp, 0x18, 0x7); | 1883 | bnx2_write_phy(bp, 0x18, 0x7); |
1884 | bnx2_read_phy(bp, 0x18, &val); | 1884 | bnx2_read_phy(bp, 0x18, &val); |
1885 | bnx2_write_phy(bp, 0x18, val | 0x4000); | 1885 | bnx2_write_phy(bp, 0x18, val | 0x4000); |
1886 | 1886 | ||
1887 | bnx2_read_phy(bp, 0x10, &val); | 1887 | bnx2_read_phy(bp, 0x10, &val); |
1888 | bnx2_write_phy(bp, 0x10, val | 0x1); | 1888 | bnx2_write_phy(bp, 0x10, val | 0x1); |
1889 | } | 1889 | } |
1890 | else { | 1890 | else { |
1891 | bnx2_write_phy(bp, 0x18, 0x7); | 1891 | bnx2_write_phy(bp, 0x18, 0x7); |
1892 | bnx2_read_phy(bp, 0x18, &val); | 1892 | bnx2_read_phy(bp, 0x18, &val); |
1893 | bnx2_write_phy(bp, 0x18, val & ~0x4007); | 1893 | bnx2_write_phy(bp, 0x18, val & ~0x4007); |
1894 | 1894 | ||
1895 | bnx2_read_phy(bp, 0x10, &val); | 1895 | bnx2_read_phy(bp, 0x10, &val); |
1896 | bnx2_write_phy(bp, 0x10, val & ~0x1); | 1896 | bnx2_write_phy(bp, 0x10, val & ~0x1); |
1897 | } | 1897 | } |
1898 | 1898 | ||
1899 | /* ethernet@wirespeed */ | 1899 | /* ethernet@wirespeed */ |
1900 | bnx2_write_phy(bp, 0x18, 0x7007); | 1900 | bnx2_write_phy(bp, 0x18, 0x7007); |
1901 | bnx2_read_phy(bp, 0x18, &val); | 1901 | bnx2_read_phy(bp, 0x18, &val); |
1902 | bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4)); | 1902 | bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4)); |
1903 | return 0; | 1903 | return 0; |
1904 | } | 1904 | } |
1905 | 1905 | ||
1906 | 1906 | ||
1907 | static int | 1907 | static int |
1908 | bnx2_init_phy(struct bnx2 *bp) | 1908 | bnx2_init_phy(struct bnx2 *bp) |
1909 | { | 1909 | { |
1910 | u32 val; | 1910 | u32 val; |
1911 | int rc = 0; | 1911 | int rc = 0; |
1912 | 1912 | ||
1913 | bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG; | 1913 | bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG; |
1914 | bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG; | 1914 | bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG; |
1915 | 1915 | ||
1916 | bp->mii_bmcr = MII_BMCR; | 1916 | bp->mii_bmcr = MII_BMCR; |
1917 | bp->mii_bmsr = MII_BMSR; | 1917 | bp->mii_bmsr = MII_BMSR; |
1918 | bp->mii_bmsr1 = MII_BMSR; | 1918 | bp->mii_bmsr1 = MII_BMSR; |
1919 | bp->mii_adv = MII_ADVERTISE; | 1919 | bp->mii_adv = MII_ADVERTISE; |
1920 | bp->mii_lpa = MII_LPA; | 1920 | bp->mii_lpa = MII_LPA; |
1921 | 1921 | ||
1922 | REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); | 1922 | REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); |
1923 | 1923 | ||
1924 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) | 1924 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) |
1925 | goto setup_phy; | 1925 | goto setup_phy; |
1926 | 1926 | ||
1927 | bnx2_read_phy(bp, MII_PHYSID1, &val); | 1927 | bnx2_read_phy(bp, MII_PHYSID1, &val); |
1928 | bp->phy_id = val << 16; | 1928 | bp->phy_id = val << 16; |
1929 | bnx2_read_phy(bp, MII_PHYSID2, &val); | 1929 | bnx2_read_phy(bp, MII_PHYSID2, &val); |
1930 | bp->phy_id |= val & 0xffff; | 1930 | bp->phy_id |= val & 0xffff; |
1931 | 1931 | ||
1932 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 1932 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
1933 | if (CHIP_NUM(bp) == CHIP_NUM_5706) | 1933 | if (CHIP_NUM(bp) == CHIP_NUM_5706) |
1934 | rc = bnx2_init_5706s_phy(bp); | 1934 | rc = bnx2_init_5706s_phy(bp); |
1935 | else if (CHIP_NUM(bp) == CHIP_NUM_5708) | 1935 | else if (CHIP_NUM(bp) == CHIP_NUM_5708) |
1936 | rc = bnx2_init_5708s_phy(bp); | 1936 | rc = bnx2_init_5708s_phy(bp); |
1937 | else if (CHIP_NUM(bp) == CHIP_NUM_5709) | 1937 | else if (CHIP_NUM(bp) == CHIP_NUM_5709) |
1938 | rc = bnx2_init_5709s_phy(bp); | 1938 | rc = bnx2_init_5709s_phy(bp); |
1939 | } | 1939 | } |
1940 | else { | 1940 | else { |
1941 | rc = bnx2_init_copper_phy(bp); | 1941 | rc = bnx2_init_copper_phy(bp); |
1942 | } | 1942 | } |
1943 | 1943 | ||
1944 | setup_phy: | 1944 | setup_phy: |
1945 | if (!rc) | 1945 | if (!rc) |
1946 | rc = bnx2_setup_phy(bp, bp->phy_port); | 1946 | rc = bnx2_setup_phy(bp, bp->phy_port); |
1947 | 1947 | ||
1948 | return rc; | 1948 | return rc; |
1949 | } | 1949 | } |
1950 | 1950 | ||
1951 | static int | 1951 | static int |
1952 | bnx2_set_mac_loopback(struct bnx2 *bp) | 1952 | bnx2_set_mac_loopback(struct bnx2 *bp) |
1953 | { | 1953 | { |
1954 | u32 mac_mode; | 1954 | u32 mac_mode; |
1955 | 1955 | ||
1956 | mac_mode = REG_RD(bp, BNX2_EMAC_MODE); | 1956 | mac_mode = REG_RD(bp, BNX2_EMAC_MODE); |
1957 | mac_mode &= ~BNX2_EMAC_MODE_PORT; | 1957 | mac_mode &= ~BNX2_EMAC_MODE_PORT; |
1958 | mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK; | 1958 | mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK; |
1959 | REG_WR(bp, BNX2_EMAC_MODE, mac_mode); | 1959 | REG_WR(bp, BNX2_EMAC_MODE, mac_mode); |
1960 | bp->link_up = 1; | 1960 | bp->link_up = 1; |
1961 | return 0; | 1961 | return 0; |
1962 | } | 1962 | } |
1963 | 1963 | ||
1964 | static int bnx2_test_link(struct bnx2 *); | 1964 | static int bnx2_test_link(struct bnx2 *); |
1965 | 1965 | ||
1966 | static int | 1966 | static int |
1967 | bnx2_set_phy_loopback(struct bnx2 *bp) | 1967 | bnx2_set_phy_loopback(struct bnx2 *bp) |
1968 | { | 1968 | { |
1969 | u32 mac_mode; | 1969 | u32 mac_mode; |
1970 | int rc, i; | 1970 | int rc, i; |
1971 | 1971 | ||
1972 | spin_lock_bh(&bp->phy_lock); | 1972 | spin_lock_bh(&bp->phy_lock); |
1973 | rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX | | 1973 | rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX | |
1974 | BMCR_SPEED1000); | 1974 | BMCR_SPEED1000); |
1975 | spin_unlock_bh(&bp->phy_lock); | 1975 | spin_unlock_bh(&bp->phy_lock); |
1976 | if (rc) | 1976 | if (rc) |
1977 | return rc; | 1977 | return rc; |
1978 | 1978 | ||
1979 | for (i = 0; i < 10; i++) { | 1979 | for (i = 0; i < 10; i++) { |
1980 | if (bnx2_test_link(bp) == 0) | 1980 | if (bnx2_test_link(bp) == 0) |
1981 | break; | 1981 | break; |
1982 | msleep(100); | 1982 | msleep(100); |
1983 | } | 1983 | } |
1984 | 1984 | ||
1985 | mac_mode = REG_RD(bp, BNX2_EMAC_MODE); | 1985 | mac_mode = REG_RD(bp, BNX2_EMAC_MODE); |
1986 | mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | | 1986 | mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | |
1987 | BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | | 1987 | BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | |
1988 | BNX2_EMAC_MODE_25G_MODE); | 1988 | BNX2_EMAC_MODE_25G_MODE); |
1989 | 1989 | ||
1990 | mac_mode |= BNX2_EMAC_MODE_PORT_GMII; | 1990 | mac_mode |= BNX2_EMAC_MODE_PORT_GMII; |
1991 | REG_WR(bp, BNX2_EMAC_MODE, mac_mode); | 1991 | REG_WR(bp, BNX2_EMAC_MODE, mac_mode); |
1992 | bp->link_up = 1; | 1992 | bp->link_up = 1; |
1993 | return 0; | 1993 | return 0; |
1994 | } | 1994 | } |
1995 | 1995 | ||
1996 | static int | 1996 | static int |
1997 | bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent) | 1997 | bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent) |
1998 | { | 1998 | { |
1999 | int i; | 1999 | int i; |
2000 | u32 val; | 2000 | u32 val; |
2001 | 2001 | ||
2002 | bp->fw_wr_seq++; | 2002 | bp->fw_wr_seq++; |
2003 | msg_data |= bp->fw_wr_seq; | 2003 | msg_data |= bp->fw_wr_seq; |
2004 | 2004 | ||
2005 | REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data); | 2005 | REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data); |
2006 | 2006 | ||
2007 | /* wait for an acknowledgement. */ | 2007 | /* wait for an acknowledgement. */ |
2008 | for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) { | 2008 | for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) { |
2009 | msleep(10); | 2009 | msleep(10); |
2010 | 2010 | ||
2011 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB); | 2011 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB); |
2012 | 2012 | ||
2013 | if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ)) | 2013 | if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ)) |
2014 | break; | 2014 | break; |
2015 | } | 2015 | } |
2016 | if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0) | 2016 | if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0) |
2017 | return 0; | 2017 | return 0; |
2018 | 2018 | ||
2019 | /* If we timed out, inform the firmware that this is the case. */ | 2019 | /* If we timed out, inform the firmware that this is the case. */ |
2020 | if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) { | 2020 | if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) { |
2021 | if (!silent) | 2021 | if (!silent) |
2022 | printk(KERN_ERR PFX "fw sync timeout, reset code = " | 2022 | printk(KERN_ERR PFX "fw sync timeout, reset code = " |
2023 | "%x\n", msg_data); | 2023 | "%x\n", msg_data); |
2024 | 2024 | ||
2025 | msg_data &= ~BNX2_DRV_MSG_CODE; | 2025 | msg_data &= ~BNX2_DRV_MSG_CODE; |
2026 | msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT; | 2026 | msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT; |
2027 | 2027 | ||
2028 | REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data); | 2028 | REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data); |
2029 | 2029 | ||
2030 | return -EBUSY; | 2030 | return -EBUSY; |
2031 | } | 2031 | } |
2032 | 2032 | ||
2033 | if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK) | 2033 | if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK) |
2034 | return -EIO; | 2034 | return -EIO; |
2035 | 2035 | ||
2036 | return 0; | 2036 | return 0; |
2037 | } | 2037 | } |
2038 | 2038 | ||
2039 | static int | 2039 | static int |
2040 | bnx2_init_5709_context(struct bnx2 *bp) | 2040 | bnx2_init_5709_context(struct bnx2 *bp) |
2041 | { | 2041 | { |
2042 | int i, ret = 0; | 2042 | int i, ret = 0; |
2043 | u32 val; | 2043 | u32 val; |
2044 | 2044 | ||
2045 | val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12); | 2045 | val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12); |
2046 | val |= (BCM_PAGE_BITS - 8) << 16; | 2046 | val |= (BCM_PAGE_BITS - 8) << 16; |
2047 | REG_WR(bp, BNX2_CTX_COMMAND, val); | 2047 | REG_WR(bp, BNX2_CTX_COMMAND, val); |
2048 | for (i = 0; i < 10; i++) { | 2048 | for (i = 0; i < 10; i++) { |
2049 | val = REG_RD(bp, BNX2_CTX_COMMAND); | 2049 | val = REG_RD(bp, BNX2_CTX_COMMAND); |
2050 | if (!(val & BNX2_CTX_COMMAND_MEM_INIT)) | 2050 | if (!(val & BNX2_CTX_COMMAND_MEM_INIT)) |
2051 | break; | 2051 | break; |
2052 | udelay(2); | 2052 | udelay(2); |
2053 | } | 2053 | } |
2054 | if (val & BNX2_CTX_COMMAND_MEM_INIT) | 2054 | if (val & BNX2_CTX_COMMAND_MEM_INIT) |
2055 | return -EBUSY; | 2055 | return -EBUSY; |
2056 | 2056 | ||
2057 | for (i = 0; i < bp->ctx_pages; i++) { | 2057 | for (i = 0; i < bp->ctx_pages; i++) { |
2058 | int j; | 2058 | int j; |
2059 | 2059 | ||
2060 | REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, | 2060 | REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, |
2061 | (bp->ctx_blk_mapping[i] & 0xffffffff) | | 2061 | (bp->ctx_blk_mapping[i] & 0xffffffff) | |
2062 | BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); | 2062 | BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); |
2063 | REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, | 2063 | REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, |
2064 | (u64) bp->ctx_blk_mapping[i] >> 32); | 2064 | (u64) bp->ctx_blk_mapping[i] >> 32); |
2065 | REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | | 2065 | REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | |
2066 | BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); | 2066 | BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); |
2067 | for (j = 0; j < 10; j++) { | 2067 | for (j = 0; j < 10; j++) { |
2068 | 2068 | ||
2069 | val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); | 2069 | val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); |
2070 | if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) | 2070 | if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) |
2071 | break; | 2071 | break; |
2072 | udelay(5); | 2072 | udelay(5); |
2073 | } | 2073 | } |
2074 | if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { | 2074 | if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { |
2075 | ret = -EBUSY; | 2075 | ret = -EBUSY; |
2076 | break; | 2076 | break; |
2077 | } | 2077 | } |
2078 | } | 2078 | } |
2079 | return ret; | 2079 | return ret; |
2080 | } | 2080 | } |
2081 | 2081 | ||
2082 | static void | 2082 | static void |
2083 | bnx2_init_context(struct bnx2 *bp) | 2083 | bnx2_init_context(struct bnx2 *bp) |
2084 | { | 2084 | { |
2085 | u32 vcid; | 2085 | u32 vcid; |
2086 | 2086 | ||
2087 | vcid = 96; | 2087 | vcid = 96; |
2088 | while (vcid) { | 2088 | while (vcid) { |
2089 | u32 vcid_addr, pcid_addr, offset; | 2089 | u32 vcid_addr, pcid_addr, offset; |
2090 | int i; | 2090 | int i; |
2091 | 2091 | ||
2092 | vcid--; | 2092 | vcid--; |
2093 | 2093 | ||
2094 | if (CHIP_ID(bp) == CHIP_ID_5706_A0) { | 2094 | if (CHIP_ID(bp) == CHIP_ID_5706_A0) { |
2095 | u32 new_vcid; | 2095 | u32 new_vcid; |
2096 | 2096 | ||
2097 | vcid_addr = GET_PCID_ADDR(vcid); | 2097 | vcid_addr = GET_PCID_ADDR(vcid); |
2098 | if (vcid & 0x8) { | 2098 | if (vcid & 0x8) { |
2099 | new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7); | 2099 | new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7); |
2100 | } | 2100 | } |
2101 | else { | 2101 | else { |
2102 | new_vcid = vcid; | 2102 | new_vcid = vcid; |
2103 | } | 2103 | } |
2104 | pcid_addr = GET_PCID_ADDR(new_vcid); | 2104 | pcid_addr = GET_PCID_ADDR(new_vcid); |
2105 | } | 2105 | } |
2106 | else { | 2106 | else { |
2107 | vcid_addr = GET_CID_ADDR(vcid); | 2107 | vcid_addr = GET_CID_ADDR(vcid); |
2108 | pcid_addr = vcid_addr; | 2108 | pcid_addr = vcid_addr; |
2109 | } | 2109 | } |
2110 | 2110 | ||
2111 | for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) { | 2111 | for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) { |
2112 | vcid_addr += (i << PHY_CTX_SHIFT); | 2112 | vcid_addr += (i << PHY_CTX_SHIFT); |
2113 | pcid_addr += (i << PHY_CTX_SHIFT); | 2113 | pcid_addr += (i << PHY_CTX_SHIFT); |
2114 | 2114 | ||
2115 | REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00); | 2115 | REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00); |
2116 | REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); | 2116 | REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); |
2117 | 2117 | ||
2118 | /* Zero out the context. */ | 2118 | /* Zero out the context. */ |
2119 | for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) | 2119 | for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) |
2120 | CTX_WR(bp, 0x00, offset, 0); | 2120 | CTX_WR(bp, 0x00, offset, 0); |
2121 | 2121 | ||
2122 | REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); | 2122 | REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); |
2123 | REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); | 2123 | REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); |
2124 | } | 2124 | } |
2125 | } | 2125 | } |
2126 | } | 2126 | } |
2127 | 2127 | ||
2128 | static int | 2128 | static int |
2129 | bnx2_alloc_bad_rbuf(struct bnx2 *bp) | 2129 | bnx2_alloc_bad_rbuf(struct bnx2 *bp) |
2130 | { | 2130 | { |
2131 | u16 *good_mbuf; | 2131 | u16 *good_mbuf; |
2132 | u32 good_mbuf_cnt; | 2132 | u32 good_mbuf_cnt; |
2133 | u32 val; | 2133 | u32 val; |
2134 | 2134 | ||
2135 | good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); | 2135 | good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); |
2136 | if (good_mbuf == NULL) { | 2136 | if (good_mbuf == NULL) { |
2137 | printk(KERN_ERR PFX "Failed to allocate memory in " | 2137 | printk(KERN_ERR PFX "Failed to allocate memory in " |
2138 | "bnx2_alloc_bad_rbuf\n"); | 2138 | "bnx2_alloc_bad_rbuf\n"); |
2139 | return -ENOMEM; | 2139 | return -ENOMEM; |
2140 | } | 2140 | } |
2141 | 2141 | ||
2142 | REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, | 2142 | REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, |
2143 | BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE); | 2143 | BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE); |
2144 | 2144 | ||
2145 | good_mbuf_cnt = 0; | 2145 | good_mbuf_cnt = 0; |
2146 | 2146 | ||
2147 | /* Allocate a bunch of mbufs and save the good ones in an array. */ | 2147 | /* Allocate a bunch of mbufs and save the good ones in an array. */ |
2148 | val = REG_RD_IND(bp, BNX2_RBUF_STATUS1); | 2148 | val = REG_RD_IND(bp, BNX2_RBUF_STATUS1); |
2149 | while (val & BNX2_RBUF_STATUS1_FREE_COUNT) { | 2149 | while (val & BNX2_RBUF_STATUS1_FREE_COUNT) { |
2150 | REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ); | 2150 | REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ); |
2151 | 2151 | ||
2152 | val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC); | 2152 | val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC); |
2153 | 2153 | ||
2154 | val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE; | 2154 | val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE; |
2155 | 2155 | ||
2156 | /* The addresses with Bit 9 set are bad memory blocks. */ | 2156 | /* The addresses with Bit 9 set are bad memory blocks. */ |
2157 | if (!(val & (1 << 9))) { | 2157 | if (!(val & (1 << 9))) { |
2158 | good_mbuf[good_mbuf_cnt] = (u16) val; | 2158 | good_mbuf[good_mbuf_cnt] = (u16) val; |
2159 | good_mbuf_cnt++; | 2159 | good_mbuf_cnt++; |
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | val = REG_RD_IND(bp, BNX2_RBUF_STATUS1); | 2162 | val = REG_RD_IND(bp, BNX2_RBUF_STATUS1); |
2163 | } | 2163 | } |
2164 | 2164 | ||
2165 | /* Free the good ones back to the mbuf pool thus discarding | 2165 | /* Free the good ones back to the mbuf pool thus discarding |
2166 | * all the bad ones. */ | 2166 | * all the bad ones. */ |
2167 | while (good_mbuf_cnt) { | 2167 | while (good_mbuf_cnt) { |
2168 | good_mbuf_cnt--; | 2168 | good_mbuf_cnt--; |
2169 | 2169 | ||
2170 | val = good_mbuf[good_mbuf_cnt]; | 2170 | val = good_mbuf[good_mbuf_cnt]; |
2171 | val = (val << 9) | val | 1; | 2171 | val = (val << 9) | val | 1; |
2172 | 2172 | ||
2173 | REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val); | 2173 | REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val); |
2174 | } | 2174 | } |
2175 | kfree(good_mbuf); | 2175 | kfree(good_mbuf); |
2176 | return 0; | 2176 | return 0; |
2177 | } | 2177 | } |
2178 | 2178 | ||
2179 | static void | 2179 | static void |
2180 | bnx2_set_mac_addr(struct bnx2 *bp) | 2180 | bnx2_set_mac_addr(struct bnx2 *bp) |
2181 | { | 2181 | { |
2182 | u32 val; | 2182 | u32 val; |
2183 | u8 *mac_addr = bp->dev->dev_addr; | 2183 | u8 *mac_addr = bp->dev->dev_addr; |
2184 | 2184 | ||
2185 | val = (mac_addr[0] << 8) | mac_addr[1]; | 2185 | val = (mac_addr[0] << 8) | mac_addr[1]; |
2186 | 2186 | ||
2187 | REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val); | 2187 | REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val); |
2188 | 2188 | ||
2189 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | | 2189 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | |
2190 | (mac_addr[4] << 8) | mac_addr[5]; | 2190 | (mac_addr[4] << 8) | mac_addr[5]; |
2191 | 2191 | ||
2192 | REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val); | 2192 | REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val); |
2193 | } | 2193 | } |
2194 | 2194 | ||
2195 | static inline int | 2195 | static inline int |
2196 | bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) | 2196 | bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) |
2197 | { | 2197 | { |
2198 | struct sk_buff *skb; | 2198 | struct sk_buff *skb; |
2199 | struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; | 2199 | struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; |
2200 | dma_addr_t mapping; | 2200 | dma_addr_t mapping; |
2201 | struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; | 2201 | struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; |
2202 | unsigned long align; | 2202 | unsigned long align; |
2203 | 2203 | ||
2204 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | 2204 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); |
2205 | if (skb == NULL) { | 2205 | if (skb == NULL) { |
2206 | return -ENOMEM; | 2206 | return -ENOMEM; |
2207 | } | 2207 | } |
2208 | 2208 | ||
2209 | if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) | 2209 | if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) |
2210 | skb_reserve(skb, BNX2_RX_ALIGN - align); | 2210 | skb_reserve(skb, BNX2_RX_ALIGN - align); |
2211 | 2211 | ||
2212 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 2212 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, |
2213 | PCI_DMA_FROMDEVICE); | 2213 | PCI_DMA_FROMDEVICE); |
2214 | 2214 | ||
2215 | rx_buf->skb = skb; | 2215 | rx_buf->skb = skb; |
2216 | pci_unmap_addr_set(rx_buf, mapping, mapping); | 2216 | pci_unmap_addr_set(rx_buf, mapping, mapping); |
2217 | 2217 | ||
2218 | rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; | 2218 | rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; |
2219 | rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; | 2219 | rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; |
2220 | 2220 | ||
2221 | bp->rx_prod_bseq += bp->rx_buf_use_size; | 2221 | bp->rx_prod_bseq += bp->rx_buf_use_size; |
2222 | 2222 | ||
2223 | return 0; | 2223 | return 0; |
2224 | } | 2224 | } |
2225 | 2225 | ||
2226 | static int | 2226 | static int |
2227 | bnx2_phy_event_is_set(struct bnx2 *bp, u32 event) | 2227 | bnx2_phy_event_is_set(struct bnx2 *bp, u32 event) |
2228 | { | 2228 | { |
2229 | struct status_block *sblk = bp->status_blk; | 2229 | struct status_block *sblk = bp->status_blk; |
2230 | u32 new_link_state, old_link_state; | 2230 | u32 new_link_state, old_link_state; |
2231 | int is_set = 1; | 2231 | int is_set = 1; |
2232 | 2232 | ||
2233 | new_link_state = sblk->status_attn_bits & event; | 2233 | new_link_state = sblk->status_attn_bits & event; |
2234 | old_link_state = sblk->status_attn_bits_ack & event; | 2234 | old_link_state = sblk->status_attn_bits_ack & event; |
2235 | if (new_link_state != old_link_state) { | 2235 | if (new_link_state != old_link_state) { |
2236 | if (new_link_state) | 2236 | if (new_link_state) |
2237 | REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); | 2237 | REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); |
2238 | else | 2238 | else |
2239 | REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); | 2239 | REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); |
2240 | } else | 2240 | } else |
2241 | is_set = 0; | 2241 | is_set = 0; |
2242 | 2242 | ||
2243 | return is_set; | 2243 | return is_set; |
2244 | } | 2244 | } |
2245 | 2245 | ||
2246 | static void | 2246 | static void |
2247 | bnx2_phy_int(struct bnx2 *bp) | 2247 | bnx2_phy_int(struct bnx2 *bp) |
2248 | { | 2248 | { |
2249 | if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) { | 2249 | if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) { |
2250 | spin_lock(&bp->phy_lock); | 2250 | spin_lock(&bp->phy_lock); |
2251 | bnx2_set_link(bp); | 2251 | bnx2_set_link(bp); |
2252 | spin_unlock(&bp->phy_lock); | 2252 | spin_unlock(&bp->phy_lock); |
2253 | } | 2253 | } |
2254 | if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT)) | 2254 | if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT)) |
2255 | bnx2_set_remote_link(bp); | 2255 | bnx2_set_remote_link(bp); |
2256 | 2256 | ||
2257 | } | 2257 | } |
2258 | 2258 | ||
2259 | static void | 2259 | static void |
2260 | bnx2_tx_int(struct bnx2 *bp) | 2260 | bnx2_tx_int(struct bnx2 *bp) |
2261 | { | 2261 | { |
2262 | struct status_block *sblk = bp->status_blk; | 2262 | struct status_block *sblk = bp->status_blk; |
2263 | u16 hw_cons, sw_cons, sw_ring_cons; | 2263 | u16 hw_cons, sw_cons, sw_ring_cons; |
2264 | int tx_free_bd = 0; | 2264 | int tx_free_bd = 0; |
2265 | 2265 | ||
2266 | hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0; | 2266 | hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0; |
2267 | if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) { | 2267 | if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) { |
2268 | hw_cons++; | 2268 | hw_cons++; |
2269 | } | 2269 | } |
2270 | sw_cons = bp->tx_cons; | 2270 | sw_cons = bp->tx_cons; |
2271 | 2271 | ||
2272 | while (sw_cons != hw_cons) { | 2272 | while (sw_cons != hw_cons) { |
2273 | struct sw_bd *tx_buf; | 2273 | struct sw_bd *tx_buf; |
2274 | struct sk_buff *skb; | 2274 | struct sk_buff *skb; |
2275 | int i, last; | 2275 | int i, last; |
2276 | 2276 | ||
2277 | sw_ring_cons = TX_RING_IDX(sw_cons); | 2277 | sw_ring_cons = TX_RING_IDX(sw_cons); |
2278 | 2278 | ||
2279 | tx_buf = &bp->tx_buf_ring[sw_ring_cons]; | 2279 | tx_buf = &bp->tx_buf_ring[sw_ring_cons]; |
2280 | skb = tx_buf->skb; | 2280 | skb = tx_buf->skb; |
2281 | 2281 | ||
2282 | /* partial BD completions possible with TSO packets */ | 2282 | /* partial BD completions possible with TSO packets */ |
2283 | if (skb_is_gso(skb)) { | 2283 | if (skb_is_gso(skb)) { |
2284 | u16 last_idx, last_ring_idx; | 2284 | u16 last_idx, last_ring_idx; |
2285 | 2285 | ||
2286 | last_idx = sw_cons + | 2286 | last_idx = sw_cons + |
2287 | skb_shinfo(skb)->nr_frags + 1; | 2287 | skb_shinfo(skb)->nr_frags + 1; |
2288 | last_ring_idx = sw_ring_cons + | 2288 | last_ring_idx = sw_ring_cons + |
2289 | skb_shinfo(skb)->nr_frags + 1; | 2289 | skb_shinfo(skb)->nr_frags + 1; |
2290 | if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) { | 2290 | if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) { |
2291 | last_idx++; | 2291 | last_idx++; |
2292 | } | 2292 | } |
2293 | if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) { | 2293 | if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) { |
2294 | break; | 2294 | break; |
2295 | } | 2295 | } |
2296 | } | 2296 | } |
2297 | 2297 | ||
2298 | pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), | 2298 | pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), |
2299 | skb_headlen(skb), PCI_DMA_TODEVICE); | 2299 | skb_headlen(skb), PCI_DMA_TODEVICE); |
2300 | 2300 | ||
2301 | tx_buf->skb = NULL; | 2301 | tx_buf->skb = NULL; |
2302 | last = skb_shinfo(skb)->nr_frags; | 2302 | last = skb_shinfo(skb)->nr_frags; |
2303 | 2303 | ||
2304 | for (i = 0; i < last; i++) { | 2304 | for (i = 0; i < last; i++) { |
2305 | sw_cons = NEXT_TX_BD(sw_cons); | 2305 | sw_cons = NEXT_TX_BD(sw_cons); |
2306 | 2306 | ||
2307 | pci_unmap_page(bp->pdev, | 2307 | pci_unmap_page(bp->pdev, |
2308 | pci_unmap_addr( | 2308 | pci_unmap_addr( |
2309 | &bp->tx_buf_ring[TX_RING_IDX(sw_cons)], | 2309 | &bp->tx_buf_ring[TX_RING_IDX(sw_cons)], |
2310 | mapping), | 2310 | mapping), |
2311 | skb_shinfo(skb)->frags[i].size, | 2311 | skb_shinfo(skb)->frags[i].size, |
2312 | PCI_DMA_TODEVICE); | 2312 | PCI_DMA_TODEVICE); |
2313 | } | 2313 | } |
2314 | 2314 | ||
2315 | sw_cons = NEXT_TX_BD(sw_cons); | 2315 | sw_cons = NEXT_TX_BD(sw_cons); |
2316 | 2316 | ||
2317 | tx_free_bd += last + 1; | 2317 | tx_free_bd += last + 1; |
2318 | 2318 | ||
2319 | dev_kfree_skb(skb); | 2319 | dev_kfree_skb(skb); |
2320 | 2320 | ||
2321 | hw_cons = bp->hw_tx_cons = | 2321 | hw_cons = bp->hw_tx_cons = |
2322 | sblk->status_tx_quick_consumer_index0; | 2322 | sblk->status_tx_quick_consumer_index0; |
2323 | 2323 | ||
2324 | if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) { | 2324 | if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) { |
2325 | hw_cons++; | 2325 | hw_cons++; |
2326 | } | 2326 | } |
2327 | } | 2327 | } |
2328 | 2328 | ||
2329 | bp->tx_cons = sw_cons; | 2329 | bp->tx_cons = sw_cons; |
2330 | /* Need to make the tx_cons update visible to bnx2_start_xmit() | 2330 | /* Need to make the tx_cons update visible to bnx2_start_xmit() |
2331 | * before checking for netif_queue_stopped(). Without the | 2331 | * before checking for netif_queue_stopped(). Without the |
2332 | * memory barrier, there is a small possibility that bnx2_start_xmit() | 2332 | * memory barrier, there is a small possibility that bnx2_start_xmit() |
2333 | * will miss it and cause the queue to be stopped forever. | 2333 | * will miss it and cause the queue to be stopped forever. |
2334 | */ | 2334 | */ |
2335 | smp_mb(); | 2335 | smp_mb(); |
2336 | 2336 | ||
2337 | if (unlikely(netif_queue_stopped(bp->dev)) && | 2337 | if (unlikely(netif_queue_stopped(bp->dev)) && |
2338 | (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) { | 2338 | (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) { |
2339 | netif_tx_lock(bp->dev); | 2339 | netif_tx_lock(bp->dev); |
2340 | if ((netif_queue_stopped(bp->dev)) && | 2340 | if ((netif_queue_stopped(bp->dev)) && |
2341 | (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) | 2341 | (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) |
2342 | netif_wake_queue(bp->dev); | 2342 | netif_wake_queue(bp->dev); |
2343 | netif_tx_unlock(bp->dev); | 2343 | netif_tx_unlock(bp->dev); |
2344 | } | 2344 | } |
2345 | } | 2345 | } |
2346 | 2346 | ||
2347 | static inline void | 2347 | static inline void |
2348 | bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, | 2348 | bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, |
2349 | u16 cons, u16 prod) | 2349 | u16 cons, u16 prod) |
2350 | { | 2350 | { |
2351 | struct sw_bd *cons_rx_buf, *prod_rx_buf; | 2351 | struct sw_bd *cons_rx_buf, *prod_rx_buf; |
2352 | struct rx_bd *cons_bd, *prod_bd; | 2352 | struct rx_bd *cons_bd, *prod_bd; |
2353 | 2353 | ||
2354 | cons_rx_buf = &bp->rx_buf_ring[cons]; | 2354 | cons_rx_buf = &bp->rx_buf_ring[cons]; |
2355 | prod_rx_buf = &bp->rx_buf_ring[prod]; | 2355 | prod_rx_buf = &bp->rx_buf_ring[prod]; |
2356 | 2356 | ||
2357 | pci_dma_sync_single_for_device(bp->pdev, | 2357 | pci_dma_sync_single_for_device(bp->pdev, |
2358 | pci_unmap_addr(cons_rx_buf, mapping), | 2358 | pci_unmap_addr(cons_rx_buf, mapping), |
2359 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); | 2359 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); |
2360 | 2360 | ||
2361 | bp->rx_prod_bseq += bp->rx_buf_use_size; | 2361 | bp->rx_prod_bseq += bp->rx_buf_use_size; |
2362 | 2362 | ||
2363 | prod_rx_buf->skb = skb; | 2363 | prod_rx_buf->skb = skb; |
2364 | 2364 | ||
2365 | if (cons == prod) | 2365 | if (cons == prod) |
2366 | return; | 2366 | return; |
2367 | 2367 | ||
2368 | pci_unmap_addr_set(prod_rx_buf, mapping, | 2368 | pci_unmap_addr_set(prod_rx_buf, mapping, |
2369 | pci_unmap_addr(cons_rx_buf, mapping)); | 2369 | pci_unmap_addr(cons_rx_buf, mapping)); |
2370 | 2370 | ||
2371 | cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; | 2371 | cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; |
2372 | prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; | 2372 | prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; |
2373 | prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; | 2373 | prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; |
2374 | prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; | 2374 | prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; |
2375 | } | 2375 | } |
2376 | 2376 | ||
2377 | static int | 2377 | static int |
2378 | bnx2_rx_int(struct bnx2 *bp, int budget) | 2378 | bnx2_rx_int(struct bnx2 *bp, int budget) |
2379 | { | 2379 | { |
2380 | struct status_block *sblk = bp->status_blk; | 2380 | struct status_block *sblk = bp->status_blk; |
2381 | u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; | 2381 | u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; |
2382 | struct l2_fhdr *rx_hdr; | 2382 | struct l2_fhdr *rx_hdr; |
2383 | int rx_pkt = 0; | 2383 | int rx_pkt = 0; |
2384 | 2384 | ||
2385 | hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0; | 2385 | hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0; |
2386 | if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) { | 2386 | if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) { |
2387 | hw_cons++; | 2387 | hw_cons++; |
2388 | } | 2388 | } |
2389 | sw_cons = bp->rx_cons; | 2389 | sw_cons = bp->rx_cons; |
2390 | sw_prod = bp->rx_prod; | 2390 | sw_prod = bp->rx_prod; |
2391 | 2391 | ||
2392 | /* Memory barrier necessary as speculative reads of the rx | 2392 | /* Memory barrier necessary as speculative reads of the rx |
2393 | * buffer can be ahead of the index in the status block | 2393 | * buffer can be ahead of the index in the status block |
2394 | */ | 2394 | */ |
2395 | rmb(); | 2395 | rmb(); |
2396 | while (sw_cons != hw_cons) { | 2396 | while (sw_cons != hw_cons) { |
2397 | unsigned int len; | 2397 | unsigned int len; |
2398 | u32 status; | 2398 | u32 status; |
2399 | struct sw_bd *rx_buf; | 2399 | struct sw_bd *rx_buf; |
2400 | struct sk_buff *skb; | 2400 | struct sk_buff *skb; |
2401 | dma_addr_t dma_addr; | 2401 | dma_addr_t dma_addr; |
2402 | 2402 | ||
2403 | sw_ring_cons = RX_RING_IDX(sw_cons); | 2403 | sw_ring_cons = RX_RING_IDX(sw_cons); |
2404 | sw_ring_prod = RX_RING_IDX(sw_prod); | 2404 | sw_ring_prod = RX_RING_IDX(sw_prod); |
2405 | 2405 | ||
2406 | rx_buf = &bp->rx_buf_ring[sw_ring_cons]; | 2406 | rx_buf = &bp->rx_buf_ring[sw_ring_cons]; |
2407 | skb = rx_buf->skb; | 2407 | skb = rx_buf->skb; |
2408 | 2408 | ||
2409 | rx_buf->skb = NULL; | 2409 | rx_buf->skb = NULL; |
2410 | 2410 | ||
2411 | dma_addr = pci_unmap_addr(rx_buf, mapping); | 2411 | dma_addr = pci_unmap_addr(rx_buf, mapping); |
2412 | 2412 | ||
2413 | pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, | 2413 | pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, |
2414 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); | 2414 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); |
2415 | 2415 | ||
2416 | rx_hdr = (struct l2_fhdr *) skb->data; | 2416 | rx_hdr = (struct l2_fhdr *) skb->data; |
2417 | len = rx_hdr->l2_fhdr_pkt_len - 4; | 2417 | len = rx_hdr->l2_fhdr_pkt_len - 4; |
2418 | 2418 | ||
2419 | if ((status = rx_hdr->l2_fhdr_status) & | 2419 | if ((status = rx_hdr->l2_fhdr_status) & |
2420 | (L2_FHDR_ERRORS_BAD_CRC | | 2420 | (L2_FHDR_ERRORS_BAD_CRC | |
2421 | L2_FHDR_ERRORS_PHY_DECODE | | 2421 | L2_FHDR_ERRORS_PHY_DECODE | |
2422 | L2_FHDR_ERRORS_ALIGNMENT | | 2422 | L2_FHDR_ERRORS_ALIGNMENT | |
2423 | L2_FHDR_ERRORS_TOO_SHORT | | 2423 | L2_FHDR_ERRORS_TOO_SHORT | |
2424 | L2_FHDR_ERRORS_GIANT_FRAME)) { | 2424 | L2_FHDR_ERRORS_GIANT_FRAME)) { |
2425 | 2425 | ||
2426 | goto reuse_rx; | 2426 | goto reuse_rx; |
2427 | } | 2427 | } |
2428 | 2428 | ||
2429 | /* Since we don't have a jumbo ring, copy small packets | 2429 | /* Since we don't have a jumbo ring, copy small packets |
2430 | * if mtu > 1500 | 2430 | * if mtu > 1500 |
2431 | */ | 2431 | */ |
2432 | if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { | 2432 | if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { |
2433 | struct sk_buff *new_skb; | 2433 | struct sk_buff *new_skb; |
2434 | 2434 | ||
2435 | new_skb = netdev_alloc_skb(bp->dev, len + 2); | 2435 | new_skb = netdev_alloc_skb(bp->dev, len + 2); |
2436 | if (new_skb == NULL) | 2436 | if (new_skb == NULL) |
2437 | goto reuse_rx; | 2437 | goto reuse_rx; |
2438 | 2438 | ||
2439 | /* aligned copy */ | 2439 | /* aligned copy */ |
2440 | skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, | 2440 | skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, |
2441 | new_skb->data, len + 2); | 2441 | new_skb->data, len + 2); |
2442 | skb_reserve(new_skb, 2); | 2442 | skb_reserve(new_skb, 2); |
2443 | skb_put(new_skb, len); | 2443 | skb_put(new_skb, len); |
2444 | 2444 | ||
2445 | bnx2_reuse_rx_skb(bp, skb, | 2445 | bnx2_reuse_rx_skb(bp, skb, |
2446 | sw_ring_cons, sw_ring_prod); | 2446 | sw_ring_cons, sw_ring_prod); |
2447 | 2447 | ||
2448 | skb = new_skb; | 2448 | skb = new_skb; |
2449 | } | 2449 | } |
2450 | else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { | 2450 | else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { |
2451 | pci_unmap_single(bp->pdev, dma_addr, | 2451 | pci_unmap_single(bp->pdev, dma_addr, |
2452 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); | 2452 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); |
2453 | 2453 | ||
2454 | skb_reserve(skb, bp->rx_offset); | 2454 | skb_reserve(skb, bp->rx_offset); |
2455 | skb_put(skb, len); | 2455 | skb_put(skb, len); |
2456 | } | 2456 | } |
2457 | else { | 2457 | else { |
2458 | reuse_rx: | 2458 | reuse_rx: |
2459 | bnx2_reuse_rx_skb(bp, skb, | 2459 | bnx2_reuse_rx_skb(bp, skb, |
2460 | sw_ring_cons, sw_ring_prod); | 2460 | sw_ring_cons, sw_ring_prod); |
2461 | goto next_rx; | 2461 | goto next_rx; |
2462 | } | 2462 | } |
2463 | 2463 | ||
2464 | skb->protocol = eth_type_trans(skb, bp->dev); | 2464 | skb->protocol = eth_type_trans(skb, bp->dev); |
2465 | 2465 | ||
2466 | if ((len > (bp->dev->mtu + ETH_HLEN)) && | 2466 | if ((len > (bp->dev->mtu + ETH_HLEN)) && |
2467 | (ntohs(skb->protocol) != 0x8100)) { | 2467 | (ntohs(skb->protocol) != 0x8100)) { |
2468 | 2468 | ||
2469 | dev_kfree_skb(skb); | 2469 | dev_kfree_skb(skb); |
2470 | goto next_rx; | 2470 | goto next_rx; |
2471 | 2471 | ||
2472 | } | 2472 | } |
2473 | 2473 | ||
2474 | skb->ip_summed = CHECKSUM_NONE; | 2474 | skb->ip_summed = CHECKSUM_NONE; |
2475 | if (bp->rx_csum && | 2475 | if (bp->rx_csum && |
2476 | (status & (L2_FHDR_STATUS_TCP_SEGMENT | | 2476 | (status & (L2_FHDR_STATUS_TCP_SEGMENT | |
2477 | L2_FHDR_STATUS_UDP_DATAGRAM))) { | 2477 | L2_FHDR_STATUS_UDP_DATAGRAM))) { |
2478 | 2478 | ||
2479 | if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM | | 2479 | if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM | |
2480 | L2_FHDR_ERRORS_UDP_XSUM)) == 0)) | 2480 | L2_FHDR_ERRORS_UDP_XSUM)) == 0)) |
2481 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 2481 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2482 | } | 2482 | } |
2483 | 2483 | ||
2484 | #ifdef BCM_VLAN | 2484 | #ifdef BCM_VLAN |
2485 | if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) { | 2485 | if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) { |
2486 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, | 2486 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, |
2487 | rx_hdr->l2_fhdr_vlan_tag); | 2487 | rx_hdr->l2_fhdr_vlan_tag); |
2488 | } | 2488 | } |
2489 | else | 2489 | else |
2490 | #endif | 2490 | #endif |
2491 | netif_receive_skb(skb); | 2491 | netif_receive_skb(skb); |
2492 | 2492 | ||
2493 | bp->dev->last_rx = jiffies; | 2493 | bp->dev->last_rx = jiffies; |
2494 | rx_pkt++; | 2494 | rx_pkt++; |
2495 | 2495 | ||
2496 | next_rx: | 2496 | next_rx: |
2497 | sw_cons = NEXT_RX_BD(sw_cons); | 2497 | sw_cons = NEXT_RX_BD(sw_cons); |
2498 | sw_prod = NEXT_RX_BD(sw_prod); | 2498 | sw_prod = NEXT_RX_BD(sw_prod); |
2499 | 2499 | ||
2500 | if ((rx_pkt == budget)) | 2500 | if ((rx_pkt == budget)) |
2501 | break; | 2501 | break; |
2502 | 2502 | ||
2503 | /* Refresh hw_cons to see if there is new work */ | 2503 | /* Refresh hw_cons to see if there is new work */ |
2504 | if (sw_cons == hw_cons) { | 2504 | if (sw_cons == hw_cons) { |
2505 | hw_cons = bp->hw_rx_cons = | 2505 | hw_cons = bp->hw_rx_cons = |
2506 | sblk->status_rx_quick_consumer_index0; | 2506 | sblk->status_rx_quick_consumer_index0; |
2507 | if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) | 2507 | if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) |
2508 | hw_cons++; | 2508 | hw_cons++; |
2509 | rmb(); | 2509 | rmb(); |
2510 | } | 2510 | } |
2511 | } | 2511 | } |
2512 | bp->rx_cons = sw_cons; | 2512 | bp->rx_cons = sw_cons; |
2513 | bp->rx_prod = sw_prod; | 2513 | bp->rx_prod = sw_prod; |
2514 | 2514 | ||
2515 | REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod); | 2515 | REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod); |
2516 | 2516 | ||
2517 | REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); | 2517 | REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); |
2518 | 2518 | ||
2519 | mmiowb(); | 2519 | mmiowb(); |
2520 | 2520 | ||
2521 | return rx_pkt; | 2521 | return rx_pkt; |
2522 | 2522 | ||
2523 | } | 2523 | } |
2524 | 2524 | ||
2525 | /* MSI ISR - The only difference between this and the INTx ISR | 2525 | /* MSI ISR - The only difference between this and the INTx ISR |
2526 | * is that the MSI interrupt is always serviced. | 2526 | * is that the MSI interrupt is always serviced. |
2527 | */ | 2527 | */ |
2528 | static irqreturn_t | 2528 | static irqreturn_t |
2529 | bnx2_msi(int irq, void *dev_instance) | 2529 | bnx2_msi(int irq, void *dev_instance) |
2530 | { | 2530 | { |
2531 | struct net_device *dev = dev_instance; | 2531 | struct net_device *dev = dev_instance; |
2532 | struct bnx2 *bp = netdev_priv(dev); | 2532 | struct bnx2 *bp = netdev_priv(dev); |
2533 | 2533 | ||
2534 | prefetch(bp->status_blk); | 2534 | prefetch(bp->status_blk); |
2535 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2535 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2536 | BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | | 2536 | BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | |
2537 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); | 2537 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); |
2538 | 2538 | ||
2539 | /* Return here if interrupt is disabled. */ | 2539 | /* Return here if interrupt is disabled. */ |
2540 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 2540 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2541 | return IRQ_HANDLED; | 2541 | return IRQ_HANDLED; |
2542 | 2542 | ||
2543 | netif_rx_schedule(dev); | 2543 | netif_rx_schedule(dev); |
2544 | 2544 | ||
2545 | return IRQ_HANDLED; | 2545 | return IRQ_HANDLED; |
2546 | } | 2546 | } |
2547 | 2547 | ||
2548 | static irqreturn_t | 2548 | static irqreturn_t |
2549 | bnx2_msi_1shot(int irq, void *dev_instance) | 2549 | bnx2_msi_1shot(int irq, void *dev_instance) |
2550 | { | 2550 | { |
2551 | struct net_device *dev = dev_instance; | 2551 | struct net_device *dev = dev_instance; |
2552 | struct bnx2 *bp = netdev_priv(dev); | 2552 | struct bnx2 *bp = netdev_priv(dev); |
2553 | 2553 | ||
2554 | prefetch(bp->status_blk); | 2554 | prefetch(bp->status_blk); |
2555 | 2555 | ||
2556 | /* Return here if interrupt is disabled. */ | 2556 | /* Return here if interrupt is disabled. */ |
2557 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 2557 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2558 | return IRQ_HANDLED; | 2558 | return IRQ_HANDLED; |
2559 | 2559 | ||
2560 | netif_rx_schedule(dev); | 2560 | netif_rx_schedule(dev); |
2561 | 2561 | ||
2562 | return IRQ_HANDLED; | 2562 | return IRQ_HANDLED; |
2563 | } | 2563 | } |
2564 | 2564 | ||
2565 | static irqreturn_t | 2565 | static irqreturn_t |
2566 | bnx2_interrupt(int irq, void *dev_instance) | 2566 | bnx2_interrupt(int irq, void *dev_instance) |
2567 | { | 2567 | { |
2568 | struct net_device *dev = dev_instance; | 2568 | struct net_device *dev = dev_instance; |
2569 | struct bnx2 *bp = netdev_priv(dev); | 2569 | struct bnx2 *bp = netdev_priv(dev); |
2570 | struct status_block *sblk = bp->status_blk; | 2570 | struct status_block *sblk = bp->status_blk; |
2571 | 2571 | ||
2572 | /* When using INTx, it is possible for the interrupt to arrive | 2572 | /* When using INTx, it is possible for the interrupt to arrive |
2573 | * at the CPU before the status block posted prior to the | 2573 | * at the CPU before the status block posted prior to the |
2574 | * interrupt. Reading a register will flush the status block. | 2574 | * interrupt. Reading a register will flush the status block. |
2575 | * When using MSI, the MSI message will always complete after | 2575 | * When using MSI, the MSI message will always complete after |
2576 | * the status block write. | 2576 | * the status block write. |
2577 | */ | 2577 | */ |
2578 | if ((sblk->status_idx == bp->last_status_idx) && | 2578 | if ((sblk->status_idx == bp->last_status_idx) && |
2579 | (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & | 2579 | (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & |
2580 | BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) | 2580 | BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) |
2581 | return IRQ_NONE; | 2581 | return IRQ_NONE; |
2582 | 2582 | ||
2583 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2583 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2584 | BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | | 2584 | BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | |
2585 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); | 2585 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); |
2586 | 2586 | ||
2587 | /* Read back to deassert IRQ immediately to avoid too many | 2587 | /* Read back to deassert IRQ immediately to avoid too many |
2588 | * spurious interrupts. | 2588 | * spurious interrupts. |
2589 | */ | 2589 | */ |
2590 | REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); | 2590 | REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); |
2591 | 2591 | ||
2592 | /* Return here if interrupt is shared and is disabled. */ | 2592 | /* Return here if interrupt is shared and is disabled. */ |
2593 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 2593 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2594 | return IRQ_HANDLED; | 2594 | return IRQ_HANDLED; |
2595 | 2595 | ||
2596 | if (netif_rx_schedule_prep(dev)) { | 2596 | if (netif_rx_schedule_prep(dev)) { |
2597 | bp->last_status_idx = sblk->status_idx; | 2597 | bp->last_status_idx = sblk->status_idx; |
2598 | __netif_rx_schedule(dev); | 2598 | __netif_rx_schedule(dev); |
2599 | } | 2599 | } |
2600 | 2600 | ||
2601 | return IRQ_HANDLED; | 2601 | return IRQ_HANDLED; |
2602 | } | 2602 | } |
2603 | 2603 | ||
2604 | #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \ | 2604 | #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \ |
2605 | STATUS_ATTN_BITS_TIMER_ABORT) | 2605 | STATUS_ATTN_BITS_TIMER_ABORT) |
2606 | 2606 | ||
2607 | static inline int | 2607 | static inline int |
2608 | bnx2_has_work(struct bnx2 *bp) | 2608 | bnx2_has_work(struct bnx2 *bp) |
2609 | { | 2609 | { |
2610 | struct status_block *sblk = bp->status_blk; | 2610 | struct status_block *sblk = bp->status_blk; |
2611 | 2611 | ||
2612 | if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) || | 2612 | if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) || |
2613 | (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)) | 2613 | (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)) |
2614 | return 1; | 2614 | return 1; |
2615 | 2615 | ||
2616 | if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != | 2616 | if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != |
2617 | (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) | 2617 | (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) |
2618 | return 1; | 2618 | return 1; |
2619 | 2619 | ||
2620 | return 0; | 2620 | return 0; |
2621 | } | 2621 | } |
2622 | 2622 | ||
2623 | static int | 2623 | static int |
2624 | bnx2_poll(struct net_device *dev, int *budget) | 2624 | bnx2_poll(struct net_device *dev, int *budget) |
2625 | { | 2625 | { |
2626 | struct bnx2 *bp = netdev_priv(dev); | 2626 | struct bnx2 *bp = netdev_priv(dev); |
2627 | struct status_block *sblk = bp->status_blk; | 2627 | struct status_block *sblk = bp->status_blk; |
2628 | u32 status_attn_bits = sblk->status_attn_bits; | 2628 | u32 status_attn_bits = sblk->status_attn_bits; |
2629 | u32 status_attn_bits_ack = sblk->status_attn_bits_ack; | 2629 | u32 status_attn_bits_ack = sblk->status_attn_bits_ack; |
2630 | 2630 | ||
2631 | if ((status_attn_bits & STATUS_ATTN_EVENTS) != | 2631 | if ((status_attn_bits & STATUS_ATTN_EVENTS) != |
2632 | (status_attn_bits_ack & STATUS_ATTN_EVENTS)) { | 2632 | (status_attn_bits_ack & STATUS_ATTN_EVENTS)) { |
2633 | 2633 | ||
2634 | bnx2_phy_int(bp); | 2634 | bnx2_phy_int(bp); |
2635 | 2635 | ||
2636 | /* This is needed to take care of transient status | 2636 | /* This is needed to take care of transient status |
2637 | * during link changes. | 2637 | * during link changes. |
2638 | */ | 2638 | */ |
2639 | REG_WR(bp, BNX2_HC_COMMAND, | 2639 | REG_WR(bp, BNX2_HC_COMMAND, |
2640 | bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); | 2640 | bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); |
2641 | REG_RD(bp, BNX2_HC_COMMAND); | 2641 | REG_RD(bp, BNX2_HC_COMMAND); |
2642 | } | 2642 | } |
2643 | 2643 | ||
2644 | if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) | 2644 | if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) |
2645 | bnx2_tx_int(bp); | 2645 | bnx2_tx_int(bp); |
2646 | 2646 | ||
2647 | if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) { | 2647 | if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) { |
2648 | int orig_budget = *budget; | 2648 | int orig_budget = *budget; |
2649 | int work_done; | 2649 | int work_done; |
2650 | 2650 | ||
2651 | if (orig_budget > dev->quota) | 2651 | if (orig_budget > dev->quota) |
2652 | orig_budget = dev->quota; | 2652 | orig_budget = dev->quota; |
2653 | 2653 | ||
2654 | work_done = bnx2_rx_int(bp, orig_budget); | 2654 | work_done = bnx2_rx_int(bp, orig_budget); |
2655 | *budget -= work_done; | 2655 | *budget -= work_done; |
2656 | dev->quota -= work_done; | 2656 | dev->quota -= work_done; |
2657 | } | 2657 | } |
2658 | 2658 | ||
2659 | bp->last_status_idx = bp->status_blk->status_idx; | 2659 | bp->last_status_idx = bp->status_blk->status_idx; |
2660 | rmb(); | 2660 | rmb(); |
2661 | 2661 | ||
2662 | if (!bnx2_has_work(bp)) { | 2662 | if (!bnx2_has_work(bp)) { |
2663 | netif_rx_complete(dev); | 2663 | netif_rx_complete(dev); |
2664 | if (likely(bp->flags & USING_MSI_FLAG)) { | 2664 | if (likely(bp->flags & USING_MSI_FLAG)) { |
2665 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2665 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2666 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 2666 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
2667 | bp->last_status_idx); | 2667 | bp->last_status_idx); |
2668 | return 0; | 2668 | return 0; |
2669 | } | 2669 | } |
2670 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2670 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2671 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 2671 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
2672 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT | | 2672 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT | |
2673 | bp->last_status_idx); | 2673 | bp->last_status_idx); |
2674 | 2674 | ||
2675 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2675 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2676 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 2676 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
2677 | bp->last_status_idx); | 2677 | bp->last_status_idx); |
2678 | return 0; | 2678 | return 0; |
2679 | } | 2679 | } |
2680 | 2680 | ||
2681 | return 1; | 2681 | return 1; |
2682 | } | 2682 | } |
2683 | 2683 | ||
2684 | /* Called with rtnl_lock from vlan functions and also netif_tx_lock | 2684 | /* Called with rtnl_lock from vlan functions and also netif_tx_lock |
2685 | * from set_multicast. | 2685 | * from set_multicast. |
2686 | */ | 2686 | */ |
2687 | static void | 2687 | static void |
2688 | bnx2_set_rx_mode(struct net_device *dev) | 2688 | bnx2_set_rx_mode(struct net_device *dev) |
2689 | { | 2689 | { |
2690 | struct bnx2 *bp = netdev_priv(dev); | 2690 | struct bnx2 *bp = netdev_priv(dev); |
2691 | u32 rx_mode, sort_mode; | 2691 | u32 rx_mode, sort_mode; |
2692 | int i; | 2692 | int i; |
2693 | 2693 | ||
2694 | spin_lock_bh(&bp->phy_lock); | 2694 | spin_lock_bh(&bp->phy_lock); |
2695 | 2695 | ||
2696 | rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | | 2696 | rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | |
2697 | BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); | 2697 | BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); |
2698 | sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; | 2698 | sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; |
2699 | #ifdef BCM_VLAN | 2699 | #ifdef BCM_VLAN |
2700 | if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG)) | 2700 | if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG)) |
2701 | rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; | 2701 | rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; |
2702 | #else | 2702 | #else |
2703 | if (!(bp->flags & ASF_ENABLE_FLAG)) | 2703 | if (!(bp->flags & ASF_ENABLE_FLAG)) |
2704 | rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; | 2704 | rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; |
2705 | #endif | 2705 | #endif |
2706 | if (dev->flags & IFF_PROMISC) { | 2706 | if (dev->flags & IFF_PROMISC) { |
2707 | /* Promiscuous mode. */ | 2707 | /* Promiscuous mode. */ |
2708 | rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; | 2708 | rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; |
2709 | sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | | 2709 | sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | |
2710 | BNX2_RPM_SORT_USER0_PROM_VLAN; | 2710 | BNX2_RPM_SORT_USER0_PROM_VLAN; |
2711 | } | 2711 | } |
2712 | else if (dev->flags & IFF_ALLMULTI) { | 2712 | else if (dev->flags & IFF_ALLMULTI) { |
2713 | for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { | 2713 | for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { |
2714 | REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), | 2714 | REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), |
2715 | 0xffffffff); | 2715 | 0xffffffff); |
2716 | } | 2716 | } |
2717 | sort_mode |= BNX2_RPM_SORT_USER0_MC_EN; | 2717 | sort_mode |= BNX2_RPM_SORT_USER0_MC_EN; |
2718 | } | 2718 | } |
2719 | else { | 2719 | else { |
2720 | /* Accept one or more multicast(s). */ | 2720 | /* Accept one or more multicast(s). */ |
2721 | struct dev_mc_list *mclist; | 2721 | struct dev_mc_list *mclist; |
2722 | u32 mc_filter[NUM_MC_HASH_REGISTERS]; | 2722 | u32 mc_filter[NUM_MC_HASH_REGISTERS]; |
2723 | u32 regidx; | 2723 | u32 regidx; |
2724 | u32 bit; | 2724 | u32 bit; |
2725 | u32 crc; | 2725 | u32 crc; |
2726 | 2726 | ||
2727 | memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); | 2727 | memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); |
2728 | 2728 | ||
2729 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; | 2729 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; |
2730 | i++, mclist = mclist->next) { | 2730 | i++, mclist = mclist->next) { |
2731 | 2731 | ||
2732 | crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); | 2732 | crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); |
2733 | bit = crc & 0xff; | 2733 | bit = crc & 0xff; |
2734 | regidx = (bit & 0xe0) >> 5; | 2734 | regidx = (bit & 0xe0) >> 5; |
2735 | bit &= 0x1f; | 2735 | bit &= 0x1f; |
2736 | mc_filter[regidx] |= (1 << bit); | 2736 | mc_filter[regidx] |= (1 << bit); |
2737 | } | 2737 | } |
2738 | 2738 | ||
2739 | for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { | 2739 | for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { |
2740 | REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), | 2740 | REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), |
2741 | mc_filter[i]); | 2741 | mc_filter[i]); |
2742 | } | 2742 | } |
2743 | 2743 | ||
2744 | sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; | 2744 | sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; |
2745 | } | 2745 | } |
2746 | 2746 | ||
2747 | if (rx_mode != bp->rx_mode) { | 2747 | if (rx_mode != bp->rx_mode) { |
2748 | bp->rx_mode = rx_mode; | 2748 | bp->rx_mode = rx_mode; |
2749 | REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); | 2749 | REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); |
2750 | } | 2750 | } |
2751 | 2751 | ||
2752 | REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); | 2752 | REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); |
2753 | REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); | 2753 | REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); |
2754 | REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); | 2754 | REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); |
2755 | 2755 | ||
2756 | spin_unlock_bh(&bp->phy_lock); | 2756 | spin_unlock_bh(&bp->phy_lock); |
2757 | } | 2757 | } |
2758 | 2758 | ||
2759 | #define FW_BUF_SIZE 0x8000 | 2759 | #define FW_BUF_SIZE 0x8000 |
2760 | 2760 | ||
2761 | static int | 2761 | static int |
2762 | bnx2_gunzip_init(struct bnx2 *bp) | 2762 | bnx2_gunzip_init(struct bnx2 *bp) |
2763 | { | 2763 | { |
2764 | if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL) | 2764 | if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL) |
2765 | goto gunzip_nomem1; | 2765 | goto gunzip_nomem1; |
2766 | 2766 | ||
2767 | if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL) | 2767 | if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL) |
2768 | goto gunzip_nomem2; | 2768 | goto gunzip_nomem2; |
2769 | 2769 | ||
2770 | bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); | 2770 | bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); |
2771 | if (bp->strm->workspace == NULL) | 2771 | if (bp->strm->workspace == NULL) |
2772 | goto gunzip_nomem3; | 2772 | goto gunzip_nomem3; |
2773 | 2773 | ||
2774 | return 0; | 2774 | return 0; |
2775 | 2775 | ||
2776 | gunzip_nomem3: | 2776 | gunzip_nomem3: |
2777 | kfree(bp->strm); | 2777 | kfree(bp->strm); |
2778 | bp->strm = NULL; | 2778 | bp->strm = NULL; |
2779 | 2779 | ||
2780 | gunzip_nomem2: | 2780 | gunzip_nomem2: |
2781 | vfree(bp->gunzip_buf); | 2781 | vfree(bp->gunzip_buf); |
2782 | bp->gunzip_buf = NULL; | 2782 | bp->gunzip_buf = NULL; |
2783 | 2783 | ||
2784 | gunzip_nomem1: | 2784 | gunzip_nomem1: |
2785 | printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for " | 2785 | printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for " |
2786 | "uncompression.\n", bp->dev->name); | 2786 | "uncompression.\n", bp->dev->name); |
2787 | return -ENOMEM; | 2787 | return -ENOMEM; |
2788 | } | 2788 | } |
2789 | 2789 | ||
2790 | static void | 2790 | static void |
2791 | bnx2_gunzip_end(struct bnx2 *bp) | 2791 | bnx2_gunzip_end(struct bnx2 *bp) |
2792 | { | 2792 | { |
2793 | kfree(bp->strm->workspace); | 2793 | kfree(bp->strm->workspace); |
2794 | 2794 | ||
2795 | kfree(bp->strm); | 2795 | kfree(bp->strm); |
2796 | bp->strm = NULL; | 2796 | bp->strm = NULL; |
2797 | 2797 | ||
2798 | if (bp->gunzip_buf) { | 2798 | if (bp->gunzip_buf) { |
2799 | vfree(bp->gunzip_buf); | 2799 | vfree(bp->gunzip_buf); |
2800 | bp->gunzip_buf = NULL; | 2800 | bp->gunzip_buf = NULL; |
2801 | } | 2801 | } |
2802 | } | 2802 | } |
2803 | 2803 | ||
2804 | static int | 2804 | static int |
2805 | bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen) | 2805 | bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen) |
2806 | { | 2806 | { |
2807 | int n, rc; | 2807 | int n, rc; |
2808 | 2808 | ||
2809 | /* check gzip header */ | 2809 | /* check gzip header */ |
2810 | if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) | 2810 | if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) |
2811 | return -EINVAL; | 2811 | return -EINVAL; |
2812 | 2812 | ||
2813 | n = 10; | 2813 | n = 10; |
2814 | 2814 | ||
2815 | #define FNAME 0x8 | 2815 | #define FNAME 0x8 |
2816 | if (zbuf[3] & FNAME) | 2816 | if (zbuf[3] & FNAME) |
2817 | while ((zbuf[n++] != 0) && (n < len)); | 2817 | while ((zbuf[n++] != 0) && (n < len)); |
2818 | 2818 | ||
2819 | bp->strm->next_in = zbuf + n; | 2819 | bp->strm->next_in = zbuf + n; |
2820 | bp->strm->avail_in = len - n; | 2820 | bp->strm->avail_in = len - n; |
2821 | bp->strm->next_out = bp->gunzip_buf; | 2821 | bp->strm->next_out = bp->gunzip_buf; |
2822 | bp->strm->avail_out = FW_BUF_SIZE; | 2822 | bp->strm->avail_out = FW_BUF_SIZE; |
2823 | 2823 | ||
2824 | rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); | 2824 | rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); |
2825 | if (rc != Z_OK) | 2825 | if (rc != Z_OK) |
2826 | return rc; | 2826 | return rc; |
2827 | 2827 | ||
2828 | rc = zlib_inflate(bp->strm, Z_FINISH); | 2828 | rc = zlib_inflate(bp->strm, Z_FINISH); |
2829 | 2829 | ||
2830 | *outlen = FW_BUF_SIZE - bp->strm->avail_out; | 2830 | *outlen = FW_BUF_SIZE - bp->strm->avail_out; |
2831 | *outbuf = bp->gunzip_buf; | 2831 | *outbuf = bp->gunzip_buf; |
2832 | 2832 | ||
2833 | if ((rc != Z_OK) && (rc != Z_STREAM_END)) | 2833 | if ((rc != Z_OK) && (rc != Z_STREAM_END)) |
2834 | printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n", | 2834 | printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n", |
2835 | bp->dev->name, bp->strm->msg); | 2835 | bp->dev->name, bp->strm->msg); |
2836 | 2836 | ||
2837 | zlib_inflateEnd(bp->strm); | 2837 | zlib_inflateEnd(bp->strm); |
2838 | 2838 | ||
2839 | if (rc == Z_STREAM_END) | 2839 | if (rc == Z_STREAM_END) |
2840 | return 0; | 2840 | return 0; |
2841 | 2841 | ||
2842 | return rc; | 2842 | return rc; |
2843 | } | 2843 | } |
2844 | 2844 | ||
2845 | static void | 2845 | static void |
2846 | load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len, | 2846 | load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len, |
2847 | u32 rv2p_proc) | 2847 | u32 rv2p_proc) |
2848 | { | 2848 | { |
2849 | int i; | 2849 | int i; |
2850 | u32 val; | 2850 | u32 val; |
2851 | 2851 | ||
2852 | 2852 | ||
2853 | for (i = 0; i < rv2p_code_len; i += 8) { | 2853 | for (i = 0; i < rv2p_code_len; i += 8) { |
2854 | REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code)); | 2854 | REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code)); |
2855 | rv2p_code++; | 2855 | rv2p_code++; |
2856 | REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code)); | 2856 | REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code)); |
2857 | rv2p_code++; | 2857 | rv2p_code++; |
2858 | 2858 | ||
2859 | if (rv2p_proc == RV2P_PROC1) { | 2859 | if (rv2p_proc == RV2P_PROC1) { |
2860 | val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR; | 2860 | val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR; |
2861 | REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val); | 2861 | REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val); |
2862 | } | 2862 | } |
2863 | else { | 2863 | else { |
2864 | val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR; | 2864 | val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR; |
2865 | REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val); | 2865 | REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val); |
2866 | } | 2866 | } |
2867 | } | 2867 | } |
2868 | 2868 | ||
2869 | /* Reset the processor, un-stall is done later. */ | 2869 | /* Reset the processor, un-stall is done later. */ |
2870 | if (rv2p_proc == RV2P_PROC1) { | 2870 | if (rv2p_proc == RV2P_PROC1) { |
2871 | REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); | 2871 | REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); |
2872 | } | 2872 | } |
2873 | else { | 2873 | else { |
2874 | REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); | 2874 | REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); |
2875 | } | 2875 | } |
2876 | } | 2876 | } |
2877 | 2877 | ||
2878 | static int | 2878 | static int |
2879 | load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw) | 2879 | load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw) |
2880 | { | 2880 | { |
2881 | u32 offset; | 2881 | u32 offset; |
2882 | u32 val; | 2882 | u32 val; |
2883 | int rc; | 2883 | int rc; |
2884 | 2884 | ||
2885 | /* Halt the CPU. */ | 2885 | /* Halt the CPU. */ |
2886 | val = REG_RD_IND(bp, cpu_reg->mode); | 2886 | val = REG_RD_IND(bp, cpu_reg->mode); |
2887 | val |= cpu_reg->mode_value_halt; | 2887 | val |= cpu_reg->mode_value_halt; |
2888 | REG_WR_IND(bp, cpu_reg->mode, val); | 2888 | REG_WR_IND(bp, cpu_reg->mode, val); |
2889 | REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear); | 2889 | REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear); |
2890 | 2890 | ||
2891 | /* Load the Text area. */ | 2891 | /* Load the Text area. */ |
2892 | offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); | 2892 | offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); |
2893 | if (fw->gz_text) { | 2893 | if (fw->gz_text) { |
2894 | u32 text_len; | 2894 | u32 text_len; |
2895 | void *text; | 2895 | void *text; |
2896 | 2896 | ||
2897 | rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text, | 2897 | rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text, |
2898 | &text_len); | 2898 | &text_len); |
2899 | if (rc) | 2899 | if (rc) |
2900 | return rc; | 2900 | return rc; |
2901 | 2901 | ||
2902 | fw->text = text; | 2902 | fw->text = text; |
2903 | } | 2903 | } |
2904 | if (fw->gz_text) { | 2904 | if (fw->gz_text) { |
2905 | int j; | 2905 | int j; |
2906 | 2906 | ||
2907 | for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { | 2907 | for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { |
2908 | REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j])); | 2908 | REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j])); |
2909 | } | 2909 | } |
2910 | } | 2910 | } |
2911 | 2911 | ||
2912 | /* Load the Data area. */ | 2912 | /* Load the Data area. */ |
2913 | offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); | 2913 | offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); |
2914 | if (fw->data) { | 2914 | if (fw->data) { |
2915 | int j; | 2915 | int j; |
2916 | 2916 | ||
2917 | for (j = 0; j < (fw->data_len / 4); j++, offset += 4) { | 2917 | for (j = 0; j < (fw->data_len / 4); j++, offset += 4) { |
2918 | REG_WR_IND(bp, offset, fw->data[j]); | 2918 | REG_WR_IND(bp, offset, fw->data[j]); |
2919 | } | 2919 | } |
2920 | } | 2920 | } |
2921 | 2921 | ||
2922 | /* Load the SBSS area. */ | 2922 | /* Load the SBSS area. */ |
2923 | offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); | 2923 | offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); |
2924 | if (fw->sbss) { | 2924 | if (fw->sbss) { |
2925 | int j; | 2925 | int j; |
2926 | 2926 | ||
2927 | for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { | 2927 | for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { |
2928 | REG_WR_IND(bp, offset, fw->sbss[j]); | 2928 | REG_WR_IND(bp, offset, fw->sbss[j]); |
2929 | } | 2929 | } |
2930 | } | 2930 | } |
2931 | 2931 | ||
2932 | /* Load the BSS area. */ | 2932 | /* Load the BSS area. */ |
2933 | offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); | 2933 | offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); |
2934 | if (fw->bss) { | 2934 | if (fw->bss) { |
2935 | int j; | 2935 | int j; |
2936 | 2936 | ||
2937 | for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { | 2937 | for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { |
2938 | REG_WR_IND(bp, offset, fw->bss[j]); | 2938 | REG_WR_IND(bp, offset, fw->bss[j]); |
2939 | } | 2939 | } |
2940 | } | 2940 | } |
2941 | 2941 | ||
2942 | /* Load the Read-Only area. */ | 2942 | /* Load the Read-Only area. */ |
2943 | offset = cpu_reg->spad_base + | 2943 | offset = cpu_reg->spad_base + |
2944 | (fw->rodata_addr - cpu_reg->mips_view_base); | 2944 | (fw->rodata_addr - cpu_reg->mips_view_base); |
2945 | if (fw->rodata) { | 2945 | if (fw->rodata) { |
2946 | int j; | 2946 | int j; |
2947 | 2947 | ||
2948 | for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) { | 2948 | for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) { |
2949 | REG_WR_IND(bp, offset, fw->rodata[j]); | 2949 | REG_WR_IND(bp, offset, fw->rodata[j]); |
2950 | } | 2950 | } |
2951 | } | 2951 | } |
2952 | 2952 | ||
2953 | /* Clear the pre-fetch instruction. */ | 2953 | /* Clear the pre-fetch instruction. */ |
2954 | REG_WR_IND(bp, cpu_reg->inst, 0); | 2954 | REG_WR_IND(bp, cpu_reg->inst, 0); |
2955 | REG_WR_IND(bp, cpu_reg->pc, fw->start_addr); | 2955 | REG_WR_IND(bp, cpu_reg->pc, fw->start_addr); |
2956 | 2956 | ||
2957 | /* Start the CPU. */ | 2957 | /* Start the CPU. */ |
2958 | val = REG_RD_IND(bp, cpu_reg->mode); | 2958 | val = REG_RD_IND(bp, cpu_reg->mode); |
2959 | val &= ~cpu_reg->mode_value_halt; | 2959 | val &= ~cpu_reg->mode_value_halt; |
2960 | REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear); | 2960 | REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear); |
2961 | REG_WR_IND(bp, cpu_reg->mode, val); | 2961 | REG_WR_IND(bp, cpu_reg->mode, val); |
2962 | 2962 | ||
2963 | return 0; | 2963 | return 0; |
2964 | } | 2964 | } |
2965 | 2965 | ||
2966 | static int | 2966 | static int |
2967 | bnx2_init_cpus(struct bnx2 *bp) | 2967 | bnx2_init_cpus(struct bnx2 *bp) |
2968 | { | 2968 | { |
2969 | struct cpu_reg cpu_reg; | 2969 | struct cpu_reg cpu_reg; |
2970 | struct fw_info *fw; | 2970 | struct fw_info *fw; |
2971 | int rc = 0; | 2971 | int rc = 0; |
2972 | void *text; | 2972 | void *text; |
2973 | u32 text_len; | 2973 | u32 text_len; |
2974 | 2974 | ||
2975 | if ((rc = bnx2_gunzip_init(bp)) != 0) | 2975 | if ((rc = bnx2_gunzip_init(bp)) != 0) |
2976 | return rc; | 2976 | return rc; |
2977 | 2977 | ||
2978 | /* Initialize the RV2P processor. */ | 2978 | /* Initialize the RV2P processor. */ |
2979 | rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text, | 2979 | rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text, |
2980 | &text_len); | 2980 | &text_len); |
2981 | if (rc) | 2981 | if (rc) |
2982 | goto init_cpu_err; | 2982 | goto init_cpu_err; |
2983 | 2983 | ||
2984 | load_rv2p_fw(bp, text, text_len, RV2P_PROC1); | 2984 | load_rv2p_fw(bp, text, text_len, RV2P_PROC1); |
2985 | 2985 | ||
2986 | rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text, | 2986 | rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text, |
2987 | &text_len); | 2987 | &text_len); |
2988 | if (rc) | 2988 | if (rc) |
2989 | goto init_cpu_err; | 2989 | goto init_cpu_err; |
2990 | 2990 | ||
2991 | load_rv2p_fw(bp, text, text_len, RV2P_PROC2); | 2991 | load_rv2p_fw(bp, text, text_len, RV2P_PROC2); |
2992 | 2992 | ||
2993 | /* Initialize the RX Processor. */ | 2993 | /* Initialize the RX Processor. */ |
2994 | cpu_reg.mode = BNX2_RXP_CPU_MODE; | 2994 | cpu_reg.mode = BNX2_RXP_CPU_MODE; |
2995 | cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT; | 2995 | cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT; |
2996 | cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA; | 2996 | cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA; |
2997 | cpu_reg.state = BNX2_RXP_CPU_STATE; | 2997 | cpu_reg.state = BNX2_RXP_CPU_STATE; |
2998 | cpu_reg.state_value_clear = 0xffffff; | 2998 | cpu_reg.state_value_clear = 0xffffff; |
2999 | cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE; | 2999 | cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE; |
3000 | cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK; | 3000 | cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK; |
3001 | cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER; | 3001 | cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER; |
3002 | cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION; | 3002 | cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION; |
3003 | cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT; | 3003 | cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT; |
3004 | cpu_reg.spad_base = BNX2_RXP_SCRATCH; | 3004 | cpu_reg.spad_base = BNX2_RXP_SCRATCH; |
3005 | cpu_reg.mips_view_base = 0x8000000; | 3005 | cpu_reg.mips_view_base = 0x8000000; |
3006 | 3006 | ||
3007 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 3007 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
3008 | fw = &bnx2_rxp_fw_09; | 3008 | fw = &bnx2_rxp_fw_09; |
3009 | else | 3009 | else |
3010 | fw = &bnx2_rxp_fw_06; | 3010 | fw = &bnx2_rxp_fw_06; |
3011 | 3011 | ||
3012 | rc = load_cpu_fw(bp, &cpu_reg, fw); | 3012 | rc = load_cpu_fw(bp, &cpu_reg, fw); |
3013 | if (rc) | 3013 | if (rc) |
3014 | goto init_cpu_err; | 3014 | goto init_cpu_err; |
3015 | 3015 | ||
3016 | /* Initialize the TX Processor. */ | 3016 | /* Initialize the TX Processor. */ |
3017 | cpu_reg.mode = BNX2_TXP_CPU_MODE; | 3017 | cpu_reg.mode = BNX2_TXP_CPU_MODE; |
3018 | cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT; | 3018 | cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT; |
3019 | cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA; | 3019 | cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA; |
3020 | cpu_reg.state = BNX2_TXP_CPU_STATE; | 3020 | cpu_reg.state = BNX2_TXP_CPU_STATE; |
3021 | cpu_reg.state_value_clear = 0xffffff; | 3021 | cpu_reg.state_value_clear = 0xffffff; |
3022 | cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE; | 3022 | cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE; |
3023 | cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK; | 3023 | cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK; |
3024 | cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER; | 3024 | cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER; |
3025 | cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION; | 3025 | cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION; |
3026 | cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT; | 3026 | cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT; |
3027 | cpu_reg.spad_base = BNX2_TXP_SCRATCH; | 3027 | cpu_reg.spad_base = BNX2_TXP_SCRATCH; |
3028 | cpu_reg.mips_view_base = 0x8000000; | 3028 | cpu_reg.mips_view_base = 0x8000000; |
3029 | 3029 | ||
3030 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 3030 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
3031 | fw = &bnx2_txp_fw_09; | 3031 | fw = &bnx2_txp_fw_09; |
3032 | else | 3032 | else |
3033 | fw = &bnx2_txp_fw_06; | 3033 | fw = &bnx2_txp_fw_06; |
3034 | 3034 | ||
3035 | rc = load_cpu_fw(bp, &cpu_reg, fw); | 3035 | rc = load_cpu_fw(bp, &cpu_reg, fw); |
3036 | if (rc) | 3036 | if (rc) |
3037 | goto init_cpu_err; | 3037 | goto init_cpu_err; |
3038 | 3038 | ||
3039 | /* Initialize the TX Patch-up Processor. */ | 3039 | /* Initialize the TX Patch-up Processor. */ |
3040 | cpu_reg.mode = BNX2_TPAT_CPU_MODE; | 3040 | cpu_reg.mode = BNX2_TPAT_CPU_MODE; |
3041 | cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT; | 3041 | cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT; |
3042 | cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA; | 3042 | cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA; |
3043 | cpu_reg.state = BNX2_TPAT_CPU_STATE; | 3043 | cpu_reg.state = BNX2_TPAT_CPU_STATE; |
3044 | cpu_reg.state_value_clear = 0xffffff; | 3044 | cpu_reg.state_value_clear = 0xffffff; |
3045 | cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE; | 3045 | cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE; |
3046 | cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK; | 3046 | cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK; |
3047 | cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER; | 3047 | cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER; |
3048 | cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION; | 3048 | cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION; |
3049 | cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT; | 3049 | cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT; |
3050 | cpu_reg.spad_base = BNX2_TPAT_SCRATCH; | 3050 | cpu_reg.spad_base = BNX2_TPAT_SCRATCH; |
3051 | cpu_reg.mips_view_base = 0x8000000; | 3051 | cpu_reg.mips_view_base = 0x8000000; |
3052 | 3052 | ||
3053 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 3053 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
3054 | fw = &bnx2_tpat_fw_09; | 3054 | fw = &bnx2_tpat_fw_09; |
3055 | else | 3055 | else |
3056 | fw = &bnx2_tpat_fw_06; | 3056 | fw = &bnx2_tpat_fw_06; |
3057 | 3057 | ||
3058 | rc = load_cpu_fw(bp, &cpu_reg, fw); | 3058 | rc = load_cpu_fw(bp, &cpu_reg, fw); |
3059 | if (rc) | 3059 | if (rc) |
3060 | goto init_cpu_err; | 3060 | goto init_cpu_err; |
3061 | 3061 | ||
3062 | /* Initialize the Completion Processor. */ | 3062 | /* Initialize the Completion Processor. */ |
3063 | cpu_reg.mode = BNX2_COM_CPU_MODE; | 3063 | cpu_reg.mode = BNX2_COM_CPU_MODE; |
3064 | cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT; | 3064 | cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT; |
3065 | cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA; | 3065 | cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA; |
3066 | cpu_reg.state = BNX2_COM_CPU_STATE; | 3066 | cpu_reg.state = BNX2_COM_CPU_STATE; |
3067 | cpu_reg.state_value_clear = 0xffffff; | 3067 | cpu_reg.state_value_clear = 0xffffff; |
3068 | cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE; | 3068 | cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE; |
3069 | cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK; | 3069 | cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK; |
3070 | cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER; | 3070 | cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER; |
3071 | cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION; | 3071 | cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION; |
3072 | cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT; | 3072 | cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT; |
3073 | cpu_reg.spad_base = BNX2_COM_SCRATCH; | 3073 | cpu_reg.spad_base = BNX2_COM_SCRATCH; |
3074 | cpu_reg.mips_view_base = 0x8000000; | 3074 | cpu_reg.mips_view_base = 0x8000000; |
3075 | 3075 | ||
3076 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 3076 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
3077 | fw = &bnx2_com_fw_09; | 3077 | fw = &bnx2_com_fw_09; |
3078 | else | 3078 | else |
3079 | fw = &bnx2_com_fw_06; | 3079 | fw = &bnx2_com_fw_06; |
3080 | 3080 | ||
3081 | rc = load_cpu_fw(bp, &cpu_reg, fw); | 3081 | rc = load_cpu_fw(bp, &cpu_reg, fw); |
3082 | if (rc) | 3082 | if (rc) |
3083 | goto init_cpu_err; | 3083 | goto init_cpu_err; |
3084 | 3084 | ||
3085 | /* Initialize the Command Processor. */ | 3085 | /* Initialize the Command Processor. */ |
3086 | cpu_reg.mode = BNX2_CP_CPU_MODE; | 3086 | cpu_reg.mode = BNX2_CP_CPU_MODE; |
3087 | cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT; | 3087 | cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT; |
3088 | cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA; | 3088 | cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA; |
3089 | cpu_reg.state = BNX2_CP_CPU_STATE; | 3089 | cpu_reg.state = BNX2_CP_CPU_STATE; |
3090 | cpu_reg.state_value_clear = 0xffffff; | 3090 | cpu_reg.state_value_clear = 0xffffff; |
3091 | cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE; | 3091 | cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE; |
3092 | cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK; | 3092 | cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK; |
3093 | cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER; | 3093 | cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER; |
3094 | cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION; | 3094 | cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION; |
3095 | cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT; | 3095 | cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT; |
3096 | cpu_reg.spad_base = BNX2_CP_SCRATCH; | 3096 | cpu_reg.spad_base = BNX2_CP_SCRATCH; |
3097 | cpu_reg.mips_view_base = 0x8000000; | 3097 | cpu_reg.mips_view_base = 0x8000000; |
3098 | 3098 | ||
3099 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 3099 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
3100 | fw = &bnx2_cp_fw_09; | 3100 | fw = &bnx2_cp_fw_09; |
3101 | 3101 | ||
3102 | rc = load_cpu_fw(bp, &cpu_reg, fw); | 3102 | rc = load_cpu_fw(bp, &cpu_reg, fw); |
3103 | if (rc) | 3103 | if (rc) |
3104 | goto init_cpu_err; | 3104 | goto init_cpu_err; |
3105 | } | 3105 | } |
3106 | init_cpu_err: | 3106 | init_cpu_err: |
3107 | bnx2_gunzip_end(bp); | 3107 | bnx2_gunzip_end(bp); |
3108 | return rc; | 3108 | return rc; |
3109 | } | 3109 | } |
3110 | 3110 | ||
3111 | static int | 3111 | static int |
3112 | bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) | 3112 | bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) |
3113 | { | 3113 | { |
3114 | u16 pmcsr; | 3114 | u16 pmcsr; |
3115 | 3115 | ||
3116 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); | 3116 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); |
3117 | 3117 | ||
3118 | switch (state) { | 3118 | switch (state) { |
3119 | case PCI_D0: { | 3119 | case PCI_D0: { |
3120 | u32 val; | 3120 | u32 val; |
3121 | 3121 | ||
3122 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, | 3122 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, |
3123 | (pmcsr & ~PCI_PM_CTRL_STATE_MASK) | | 3123 | (pmcsr & ~PCI_PM_CTRL_STATE_MASK) | |
3124 | PCI_PM_CTRL_PME_STATUS); | 3124 | PCI_PM_CTRL_PME_STATUS); |
3125 | 3125 | ||
3126 | if (pmcsr & PCI_PM_CTRL_STATE_MASK) | 3126 | if (pmcsr & PCI_PM_CTRL_STATE_MASK) |
3127 | /* delay required during transition out of D3hot */ | 3127 | /* delay required during transition out of D3hot */ |
3128 | msleep(20); | 3128 | msleep(20); |
3129 | 3129 | ||
3130 | val = REG_RD(bp, BNX2_EMAC_MODE); | 3130 | val = REG_RD(bp, BNX2_EMAC_MODE); |
3131 | val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; | 3131 | val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; |
3132 | val &= ~BNX2_EMAC_MODE_MPKT; | 3132 | val &= ~BNX2_EMAC_MODE_MPKT; |
3133 | REG_WR(bp, BNX2_EMAC_MODE, val); | 3133 | REG_WR(bp, BNX2_EMAC_MODE, val); |
3134 | 3134 | ||
3135 | val = REG_RD(bp, BNX2_RPM_CONFIG); | 3135 | val = REG_RD(bp, BNX2_RPM_CONFIG); |
3136 | val &= ~BNX2_RPM_CONFIG_ACPI_ENA; | 3136 | val &= ~BNX2_RPM_CONFIG_ACPI_ENA; |
3137 | REG_WR(bp, BNX2_RPM_CONFIG, val); | 3137 | REG_WR(bp, BNX2_RPM_CONFIG, val); |
3138 | break; | 3138 | break; |
3139 | } | 3139 | } |
3140 | case PCI_D3hot: { | 3140 | case PCI_D3hot: { |
3141 | int i; | 3141 | int i; |
3142 | u32 val, wol_msg; | 3142 | u32 val, wol_msg; |
3143 | 3143 | ||
3144 | if (bp->wol) { | 3144 | if (bp->wol) { |
3145 | u32 advertising; | 3145 | u32 advertising; |
3146 | u8 autoneg; | 3146 | u8 autoneg; |
3147 | 3147 | ||
3148 | autoneg = bp->autoneg; | 3148 | autoneg = bp->autoneg; |
3149 | advertising = bp->advertising; | 3149 | advertising = bp->advertising; |
3150 | 3150 | ||
3151 | bp->autoneg = AUTONEG_SPEED; | 3151 | bp->autoneg = AUTONEG_SPEED; |
3152 | bp->advertising = ADVERTISED_10baseT_Half | | 3152 | bp->advertising = ADVERTISED_10baseT_Half | |
3153 | ADVERTISED_10baseT_Full | | 3153 | ADVERTISED_10baseT_Full | |
3154 | ADVERTISED_100baseT_Half | | 3154 | ADVERTISED_100baseT_Half | |
3155 | ADVERTISED_100baseT_Full | | 3155 | ADVERTISED_100baseT_Full | |
3156 | ADVERTISED_Autoneg; | 3156 | ADVERTISED_Autoneg; |
3157 | 3157 | ||
3158 | bnx2_setup_copper_phy(bp); | 3158 | bnx2_setup_copper_phy(bp); |
3159 | 3159 | ||
3160 | bp->autoneg = autoneg; | 3160 | bp->autoneg = autoneg; |
3161 | bp->advertising = advertising; | 3161 | bp->advertising = advertising; |
3162 | 3162 | ||
3163 | bnx2_set_mac_addr(bp); | 3163 | bnx2_set_mac_addr(bp); |
3164 | 3164 | ||
3165 | val = REG_RD(bp, BNX2_EMAC_MODE); | 3165 | val = REG_RD(bp, BNX2_EMAC_MODE); |
3166 | 3166 | ||
3167 | /* Enable port mode. */ | 3167 | /* Enable port mode. */ |
3168 | val &= ~BNX2_EMAC_MODE_PORT; | 3168 | val &= ~BNX2_EMAC_MODE_PORT; |
3169 | val |= BNX2_EMAC_MODE_PORT_MII | | 3169 | val |= BNX2_EMAC_MODE_PORT_MII | |
3170 | BNX2_EMAC_MODE_MPKT_RCVD | | 3170 | BNX2_EMAC_MODE_MPKT_RCVD | |
3171 | BNX2_EMAC_MODE_ACPI_RCVD | | 3171 | BNX2_EMAC_MODE_ACPI_RCVD | |
3172 | BNX2_EMAC_MODE_MPKT; | 3172 | BNX2_EMAC_MODE_MPKT; |
3173 | 3173 | ||
3174 | REG_WR(bp, BNX2_EMAC_MODE, val); | 3174 | REG_WR(bp, BNX2_EMAC_MODE, val); |
3175 | 3175 | ||
3176 | /* receive all multicast */ | 3176 | /* receive all multicast */ |
3177 | for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { | 3177 | for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { |
3178 | REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), | 3178 | REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), |
3179 | 0xffffffff); | 3179 | 0xffffffff); |
3180 | } | 3180 | } |
3181 | REG_WR(bp, BNX2_EMAC_RX_MODE, | 3181 | REG_WR(bp, BNX2_EMAC_RX_MODE, |
3182 | BNX2_EMAC_RX_MODE_SORT_MODE); | 3182 | BNX2_EMAC_RX_MODE_SORT_MODE); |
3183 | 3183 | ||
3184 | val = 1 | BNX2_RPM_SORT_USER0_BC_EN | | 3184 | val = 1 | BNX2_RPM_SORT_USER0_BC_EN | |
3185 | BNX2_RPM_SORT_USER0_MC_EN; | 3185 | BNX2_RPM_SORT_USER0_MC_EN; |
3186 | REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); | 3186 | REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); |
3187 | REG_WR(bp, BNX2_RPM_SORT_USER0, val); | 3187 | REG_WR(bp, BNX2_RPM_SORT_USER0, val); |
3188 | REG_WR(bp, BNX2_RPM_SORT_USER0, val | | 3188 | REG_WR(bp, BNX2_RPM_SORT_USER0, val | |
3189 | BNX2_RPM_SORT_USER0_ENA); | 3189 | BNX2_RPM_SORT_USER0_ENA); |
3190 | 3190 | ||
3191 | /* Need to enable EMAC and RPM for WOL. */ | 3191 | /* Need to enable EMAC and RPM for WOL. */ |
3192 | REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, | 3192 | REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, |
3193 | BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | | 3193 | BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | |
3194 | BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | | 3194 | BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | |
3195 | BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); | 3195 | BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); |
3196 | 3196 | ||
3197 | val = REG_RD(bp, BNX2_RPM_CONFIG); | 3197 | val = REG_RD(bp, BNX2_RPM_CONFIG); |
3198 | val &= ~BNX2_RPM_CONFIG_ACPI_ENA; | 3198 | val &= ~BNX2_RPM_CONFIG_ACPI_ENA; |
3199 | REG_WR(bp, BNX2_RPM_CONFIG, val); | 3199 | REG_WR(bp, BNX2_RPM_CONFIG, val); |
3200 | 3200 | ||
3201 | wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; | 3201 | wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; |
3202 | } | 3202 | } |
3203 | else { | 3203 | else { |
3204 | wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; | 3204 | wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; |
3205 | } | 3205 | } |
3206 | 3206 | ||
3207 | if (!(bp->flags & NO_WOL_FLAG)) | 3207 | if (!(bp->flags & NO_WOL_FLAG)) |
3208 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0); | 3208 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0); |
3209 | 3209 | ||
3210 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | 3210 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
3211 | if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || | 3211 | if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || |
3212 | (CHIP_ID(bp) == CHIP_ID_5706_A1)) { | 3212 | (CHIP_ID(bp) == CHIP_ID_5706_A1)) { |
3213 | 3213 | ||
3214 | if (bp->wol) | 3214 | if (bp->wol) |
3215 | pmcsr |= 3; | 3215 | pmcsr |= 3; |
3216 | } | 3216 | } |
3217 | else { | 3217 | else { |
3218 | pmcsr |= 3; | 3218 | pmcsr |= 3; |
3219 | } | 3219 | } |
3220 | if (bp->wol) { | 3220 | if (bp->wol) { |
3221 | pmcsr |= PCI_PM_CTRL_PME_ENABLE; | 3221 | pmcsr |= PCI_PM_CTRL_PME_ENABLE; |
3222 | } | 3222 | } |
3223 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, | 3223 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, |
3224 | pmcsr); | 3224 | pmcsr); |
3225 | 3225 | ||
3226 | /* No more memory access after this point until | 3226 | /* No more memory access after this point until |
3227 | * device is brought back to D0. | 3227 | * device is brought back to D0. |
3228 | */ | 3228 | */ |
3229 | udelay(50); | 3229 | udelay(50); |
3230 | break; | 3230 | break; |
3231 | } | 3231 | } |
3232 | default: | 3232 | default: |
3233 | return -EINVAL; | 3233 | return -EINVAL; |
3234 | } | 3234 | } |
3235 | return 0; | 3235 | return 0; |
3236 | } | 3236 | } |
3237 | 3237 | ||
3238 | static int | 3238 | static int |
3239 | bnx2_acquire_nvram_lock(struct bnx2 *bp) | 3239 | bnx2_acquire_nvram_lock(struct bnx2 *bp) |
3240 | { | 3240 | { |
3241 | u32 val; | 3241 | u32 val; |
3242 | int j; | 3242 | int j; |
3243 | 3243 | ||
3244 | /* Request access to the flash interface. */ | 3244 | /* Request access to the flash interface. */ |
3245 | REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); | 3245 | REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); |
3246 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { | 3246 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { |
3247 | val = REG_RD(bp, BNX2_NVM_SW_ARB); | 3247 | val = REG_RD(bp, BNX2_NVM_SW_ARB); |
3248 | if (val & BNX2_NVM_SW_ARB_ARB_ARB2) | 3248 | if (val & BNX2_NVM_SW_ARB_ARB_ARB2) |
3249 | break; | 3249 | break; |
3250 | 3250 | ||
3251 | udelay(5); | 3251 | udelay(5); |
3252 | } | 3252 | } |
3253 | 3253 | ||
3254 | if (j >= NVRAM_TIMEOUT_COUNT) | 3254 | if (j >= NVRAM_TIMEOUT_COUNT) |
3255 | return -EBUSY; | 3255 | return -EBUSY; |
3256 | 3256 | ||
3257 | return 0; | 3257 | return 0; |
3258 | } | 3258 | } |
3259 | 3259 | ||
3260 | static int | 3260 | static int |
3261 | bnx2_release_nvram_lock(struct bnx2 *bp) | 3261 | bnx2_release_nvram_lock(struct bnx2 *bp) |
3262 | { | 3262 | { |
3263 | int j; | 3263 | int j; |
3264 | u32 val; | 3264 | u32 val; |
3265 | 3265 | ||
3266 | /* Relinquish nvram interface. */ | 3266 | /* Relinquish nvram interface. */ |
3267 | REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); | 3267 | REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); |
3268 | 3268 | ||
3269 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { | 3269 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { |
3270 | val = REG_RD(bp, BNX2_NVM_SW_ARB); | 3270 | val = REG_RD(bp, BNX2_NVM_SW_ARB); |
3271 | if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2)) | 3271 | if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2)) |
3272 | break; | 3272 | break; |
3273 | 3273 | ||
3274 | udelay(5); | 3274 | udelay(5); |
3275 | } | 3275 | } |
3276 | 3276 | ||
3277 | if (j >= NVRAM_TIMEOUT_COUNT) | 3277 | if (j >= NVRAM_TIMEOUT_COUNT) |
3278 | return -EBUSY; | 3278 | return -EBUSY; |
3279 | 3279 | ||
3280 | return 0; | 3280 | return 0; |
3281 | } | 3281 | } |
3282 | 3282 | ||
3283 | 3283 | ||
3284 | static int | 3284 | static int |
3285 | bnx2_enable_nvram_write(struct bnx2 *bp) | 3285 | bnx2_enable_nvram_write(struct bnx2 *bp) |
3286 | { | 3286 | { |
3287 | u32 val; | 3287 | u32 val; |
3288 | 3288 | ||
3289 | val = REG_RD(bp, BNX2_MISC_CFG); | 3289 | val = REG_RD(bp, BNX2_MISC_CFG); |
3290 | REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); | 3290 | REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); |
3291 | 3291 | ||
3292 | if (!bp->flash_info->buffered) { | 3292 | if (!bp->flash_info->buffered) { |
3293 | int j; | 3293 | int j; |
3294 | 3294 | ||
3295 | REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); | 3295 | REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); |
3296 | REG_WR(bp, BNX2_NVM_COMMAND, | 3296 | REG_WR(bp, BNX2_NVM_COMMAND, |
3297 | BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT); | 3297 | BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT); |
3298 | 3298 | ||
3299 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { | 3299 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { |
3300 | udelay(5); | 3300 | udelay(5); |
3301 | 3301 | ||
3302 | val = REG_RD(bp, BNX2_NVM_COMMAND); | 3302 | val = REG_RD(bp, BNX2_NVM_COMMAND); |
3303 | if (val & BNX2_NVM_COMMAND_DONE) | 3303 | if (val & BNX2_NVM_COMMAND_DONE) |
3304 | break; | 3304 | break; |
3305 | } | 3305 | } |
3306 | 3306 | ||
3307 | if (j >= NVRAM_TIMEOUT_COUNT) | 3307 | if (j >= NVRAM_TIMEOUT_COUNT) |
3308 | return -EBUSY; | 3308 | return -EBUSY; |
3309 | } | 3309 | } |
3310 | return 0; | 3310 | return 0; |
3311 | } | 3311 | } |
3312 | 3312 | ||
3313 | static void | 3313 | static void |
3314 | bnx2_disable_nvram_write(struct bnx2 *bp) | 3314 | bnx2_disable_nvram_write(struct bnx2 *bp) |
3315 | { | 3315 | { |
3316 | u32 val; | 3316 | u32 val; |
3317 | 3317 | ||
3318 | val = REG_RD(bp, BNX2_MISC_CFG); | 3318 | val = REG_RD(bp, BNX2_MISC_CFG); |
3319 | REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); | 3319 | REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); |
3320 | } | 3320 | } |
3321 | 3321 | ||
3322 | 3322 | ||
3323 | static void | 3323 | static void |
3324 | bnx2_enable_nvram_access(struct bnx2 *bp) | 3324 | bnx2_enable_nvram_access(struct bnx2 *bp) |
3325 | { | 3325 | { |
3326 | u32 val; | 3326 | u32 val; |
3327 | 3327 | ||
3328 | val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); | 3328 | val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); |
3329 | /* Enable both bits, even on read. */ | 3329 | /* Enable both bits, even on read. */ |
3330 | REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, | 3330 | REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, |
3331 | val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN); | 3331 | val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN); |
3332 | } | 3332 | } |
3333 | 3333 | ||
3334 | static void | 3334 | static void |
3335 | bnx2_disable_nvram_access(struct bnx2 *bp) | 3335 | bnx2_disable_nvram_access(struct bnx2 *bp) |
3336 | { | 3336 | { |
3337 | u32 val; | 3337 | u32 val; |
3338 | 3338 | ||
3339 | val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); | 3339 | val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); |
3340 | /* Disable both bits, even after read. */ | 3340 | /* Disable both bits, even after read. */ |
3341 | REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, | 3341 | REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, |
3342 | val & ~(BNX2_NVM_ACCESS_ENABLE_EN | | 3342 | val & ~(BNX2_NVM_ACCESS_ENABLE_EN | |
3343 | BNX2_NVM_ACCESS_ENABLE_WR_EN)); | 3343 | BNX2_NVM_ACCESS_ENABLE_WR_EN)); |
3344 | } | 3344 | } |
3345 | 3345 | ||
3346 | static int | 3346 | static int |
3347 | bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) | 3347 | bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) |
3348 | { | 3348 | { |
3349 | u32 cmd; | 3349 | u32 cmd; |
3350 | int j; | 3350 | int j; |
3351 | 3351 | ||
3352 | if (bp->flash_info->buffered) | 3352 | if (bp->flash_info->buffered) |
3353 | /* Buffered flash, no erase needed */ | 3353 | /* Buffered flash, no erase needed */ |
3354 | return 0; | 3354 | return 0; |
3355 | 3355 | ||
3356 | /* Build an erase command */ | 3356 | /* Build an erase command */ |
3357 | cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR | | 3357 | cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR | |
3358 | BNX2_NVM_COMMAND_DOIT; | 3358 | BNX2_NVM_COMMAND_DOIT; |
3359 | 3359 | ||
3360 | /* Need to clear DONE bit separately. */ | 3360 | /* Need to clear DONE bit separately. */ |
3361 | REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); | 3361 | REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); |
3362 | 3362 | ||
3363 | /* Address of the NVRAM to read from. */ | 3363 | /* Address of the NVRAM to read from. */ |
3364 | REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); | 3364 | REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); |
3365 | 3365 | ||
3366 | /* Issue an erase command. */ | 3366 | /* Issue an erase command. */ |
3367 | REG_WR(bp, BNX2_NVM_COMMAND, cmd); | 3367 | REG_WR(bp, BNX2_NVM_COMMAND, cmd); |
3368 | 3368 | ||
3369 | /* Wait for completion. */ | 3369 | /* Wait for completion. */ |
3370 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { | 3370 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { |
3371 | u32 val; | 3371 | u32 val; |
3372 | 3372 | ||
3373 | udelay(5); | 3373 | udelay(5); |
3374 | 3374 | ||
3375 | val = REG_RD(bp, BNX2_NVM_COMMAND); | 3375 | val = REG_RD(bp, BNX2_NVM_COMMAND); |
3376 | if (val & BNX2_NVM_COMMAND_DONE) | 3376 | if (val & BNX2_NVM_COMMAND_DONE) |
3377 | break; | 3377 | break; |
3378 | } | 3378 | } |
3379 | 3379 | ||
3380 | if (j >= NVRAM_TIMEOUT_COUNT) | 3380 | if (j >= NVRAM_TIMEOUT_COUNT) |
3381 | return -EBUSY; | 3381 | return -EBUSY; |
3382 | 3382 | ||
3383 | return 0; | 3383 | return 0; |
3384 | } | 3384 | } |
3385 | 3385 | ||
3386 | static int | 3386 | static int |
3387 | bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) | 3387 | bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) |
3388 | { | 3388 | { |
3389 | u32 cmd; | 3389 | u32 cmd; |
3390 | int j; | 3390 | int j; |
3391 | 3391 | ||
3392 | /* Build the command word. */ | 3392 | /* Build the command word. */ |
3393 | cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags; | 3393 | cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags; |
3394 | 3394 | ||
3395 | /* Calculate an offset of a buffered flash. */ | 3395 | /* Calculate an offset of a buffered flash. */ |
3396 | if (bp->flash_info->buffered) { | 3396 | if (bp->flash_info->buffered) { |
3397 | offset = ((offset / bp->flash_info->page_size) << | 3397 | offset = ((offset / bp->flash_info->page_size) << |
3398 | bp->flash_info->page_bits) + | 3398 | bp->flash_info->page_bits) + |
3399 | (offset % bp->flash_info->page_size); | 3399 | (offset % bp->flash_info->page_size); |
3400 | } | 3400 | } |
3401 | 3401 | ||
3402 | /* Need to clear DONE bit separately. */ | 3402 | /* Need to clear DONE bit separately. */ |
3403 | REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); | 3403 | REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); |
3404 | 3404 | ||
3405 | /* Address of the NVRAM to read from. */ | 3405 | /* Address of the NVRAM to read from. */ |
3406 | REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); | 3406 | REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); |
3407 | 3407 | ||
3408 | /* Issue a read command. */ | 3408 | /* Issue a read command. */ |
3409 | REG_WR(bp, BNX2_NVM_COMMAND, cmd); | 3409 | REG_WR(bp, BNX2_NVM_COMMAND, cmd); |
3410 | 3410 | ||
3411 | /* Wait for completion. */ | 3411 | /* Wait for completion. */ |
3412 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { | 3412 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { |
3413 | u32 val; | 3413 | u32 val; |
3414 | 3414 | ||
3415 | udelay(5); | 3415 | udelay(5); |
3416 | 3416 | ||
3417 | val = REG_RD(bp, BNX2_NVM_COMMAND); | 3417 | val = REG_RD(bp, BNX2_NVM_COMMAND); |
3418 | if (val & BNX2_NVM_COMMAND_DONE) { | 3418 | if (val & BNX2_NVM_COMMAND_DONE) { |
3419 | val = REG_RD(bp, BNX2_NVM_READ); | 3419 | val = REG_RD(bp, BNX2_NVM_READ); |
3420 | 3420 | ||
3421 | val = be32_to_cpu(val); | 3421 | val = be32_to_cpu(val); |
3422 | memcpy(ret_val, &val, 4); | 3422 | memcpy(ret_val, &val, 4); |
3423 | break; | 3423 | break; |
3424 | } | 3424 | } |
3425 | } | 3425 | } |
3426 | if (j >= NVRAM_TIMEOUT_COUNT) | 3426 | if (j >= NVRAM_TIMEOUT_COUNT) |
3427 | return -EBUSY; | 3427 | return -EBUSY; |
3428 | 3428 | ||
3429 | return 0; | 3429 | return 0; |
3430 | } | 3430 | } |
3431 | 3431 | ||
3432 | 3432 | ||
3433 | static int | 3433 | static int |
3434 | bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) | 3434 | bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) |
3435 | { | 3435 | { |
3436 | u32 cmd, val32; | 3436 | u32 cmd, val32; |
3437 | int j; | 3437 | int j; |
3438 | 3438 | ||
3439 | /* Build the command word. */ | 3439 | /* Build the command word. */ |
3440 | cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags; | 3440 | cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags; |
3441 | 3441 | ||
3442 | /* Calculate an offset of a buffered flash. */ | 3442 | /* Calculate an offset of a buffered flash. */ |
3443 | if (bp->flash_info->buffered) { | 3443 | if (bp->flash_info->buffered) { |
3444 | offset = ((offset / bp->flash_info->page_size) << | 3444 | offset = ((offset / bp->flash_info->page_size) << |
3445 | bp->flash_info->page_bits) + | 3445 | bp->flash_info->page_bits) + |
3446 | (offset % bp->flash_info->page_size); | 3446 | (offset % bp->flash_info->page_size); |
3447 | } | 3447 | } |
3448 | 3448 | ||
3449 | /* Need to clear DONE bit separately. */ | 3449 | /* Need to clear DONE bit separately. */ |
3450 | REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); | 3450 | REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); |
3451 | 3451 | ||
3452 | memcpy(&val32, val, 4); | 3452 | memcpy(&val32, val, 4); |
3453 | val32 = cpu_to_be32(val32); | 3453 | val32 = cpu_to_be32(val32); |
3454 | 3454 | ||
3455 | /* Write the data. */ | 3455 | /* Write the data. */ |
3456 | REG_WR(bp, BNX2_NVM_WRITE, val32); | 3456 | REG_WR(bp, BNX2_NVM_WRITE, val32); |
3457 | 3457 | ||
3458 | /* Address of the NVRAM to write to. */ | 3458 | /* Address of the NVRAM to write to. */ |
3459 | REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); | 3459 | REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); |
3460 | 3460 | ||
3461 | /* Issue the write command. */ | 3461 | /* Issue the write command. */ |
3462 | REG_WR(bp, BNX2_NVM_COMMAND, cmd); | 3462 | REG_WR(bp, BNX2_NVM_COMMAND, cmd); |
3463 | 3463 | ||
3464 | /* Wait for completion. */ | 3464 | /* Wait for completion. */ |
3465 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { | 3465 | for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { |
3466 | udelay(5); | 3466 | udelay(5); |
3467 | 3467 | ||
3468 | if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) | 3468 | if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) |
3469 | break; | 3469 | break; |
3470 | } | 3470 | } |
3471 | if (j >= NVRAM_TIMEOUT_COUNT) | 3471 | if (j >= NVRAM_TIMEOUT_COUNT) |
3472 | return -EBUSY; | 3472 | return -EBUSY; |
3473 | 3473 | ||
3474 | return 0; | 3474 | return 0; |
3475 | } | 3475 | } |
3476 | 3476 | ||
3477 | static int | 3477 | static int |
3478 | bnx2_init_nvram(struct bnx2 *bp) | 3478 | bnx2_init_nvram(struct bnx2 *bp) |
3479 | { | 3479 | { |
3480 | u32 val; | 3480 | u32 val; |
3481 | int j, entry_count, rc; | 3481 | int j, entry_count, rc; |
3482 | struct flash_spec *flash; | 3482 | struct flash_spec *flash; |
3483 | 3483 | ||
3484 | /* Determine the selected interface. */ | 3484 | /* Determine the selected interface. */ |
3485 | val = REG_RD(bp, BNX2_NVM_CFG1); | 3485 | val = REG_RD(bp, BNX2_NVM_CFG1); |
3486 | 3486 | ||
3487 | entry_count = sizeof(flash_table) / sizeof(struct flash_spec); | 3487 | entry_count = sizeof(flash_table) / sizeof(struct flash_spec); |
3488 | 3488 | ||
3489 | rc = 0; | 3489 | rc = 0; |
3490 | if (val & 0x40000000) { | 3490 | if (val & 0x40000000) { |
3491 | 3491 | ||
3492 | /* Flash interface has been reconfigured */ | 3492 | /* Flash interface has been reconfigured */ |
3493 | for (j = 0, flash = &flash_table[0]; j < entry_count; | 3493 | for (j = 0, flash = &flash_table[0]; j < entry_count; |
3494 | j++, flash++) { | 3494 | j++, flash++) { |
3495 | if ((val & FLASH_BACKUP_STRAP_MASK) == | 3495 | if ((val & FLASH_BACKUP_STRAP_MASK) == |
3496 | (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { | 3496 | (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { |
3497 | bp->flash_info = flash; | 3497 | bp->flash_info = flash; |
3498 | break; | 3498 | break; |
3499 | } | 3499 | } |
3500 | } | 3500 | } |
3501 | } | 3501 | } |
3502 | else { | 3502 | else { |
3503 | u32 mask; | 3503 | u32 mask; |
3504 | /* Not yet been reconfigured */ | 3504 | /* Not yet been reconfigured */ |
3505 | 3505 | ||
3506 | if (val & (1 << 23)) | 3506 | if (val & (1 << 23)) |
3507 | mask = FLASH_BACKUP_STRAP_MASK; | 3507 | mask = FLASH_BACKUP_STRAP_MASK; |
3508 | else | 3508 | else |
3509 | mask = FLASH_STRAP_MASK; | 3509 | mask = FLASH_STRAP_MASK; |
3510 | 3510 | ||
3511 | for (j = 0, flash = &flash_table[0]; j < entry_count; | 3511 | for (j = 0, flash = &flash_table[0]; j < entry_count; |
3512 | j++, flash++) { | 3512 | j++, flash++) { |
3513 | 3513 | ||
3514 | if ((val & mask) == (flash->strapping & mask)) { | 3514 | if ((val & mask) == (flash->strapping & mask)) { |
3515 | bp->flash_info = flash; | 3515 | bp->flash_info = flash; |
3516 | 3516 | ||
3517 | /* Request access to the flash interface. */ | 3517 | /* Request access to the flash interface. */ |
3518 | if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) | 3518 | if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) |
3519 | return rc; | 3519 | return rc; |
3520 | 3520 | ||
3521 | /* Enable access to flash interface */ | 3521 | /* Enable access to flash interface */ |
3522 | bnx2_enable_nvram_access(bp); | 3522 | bnx2_enable_nvram_access(bp); |
3523 | 3523 | ||
3524 | /* Reconfigure the flash interface */ | 3524 | /* Reconfigure the flash interface */ |
3525 | REG_WR(bp, BNX2_NVM_CFG1, flash->config1); | 3525 | REG_WR(bp, BNX2_NVM_CFG1, flash->config1); |
3526 | REG_WR(bp, BNX2_NVM_CFG2, flash->config2); | 3526 | REG_WR(bp, BNX2_NVM_CFG2, flash->config2); |
3527 | REG_WR(bp, BNX2_NVM_CFG3, flash->config3); | 3527 | REG_WR(bp, BNX2_NVM_CFG3, flash->config3); |
3528 | REG_WR(bp, BNX2_NVM_WRITE1, flash->write1); | 3528 | REG_WR(bp, BNX2_NVM_WRITE1, flash->write1); |
3529 | 3529 | ||
3530 | /* Disable access to flash interface */ | 3530 | /* Disable access to flash interface */ |
3531 | bnx2_disable_nvram_access(bp); | 3531 | bnx2_disable_nvram_access(bp); |
3532 | bnx2_release_nvram_lock(bp); | 3532 | bnx2_release_nvram_lock(bp); |
3533 | 3533 | ||
3534 | break; | 3534 | break; |
3535 | } | 3535 | } |
3536 | } | 3536 | } |
3537 | } /* if (val & 0x40000000) */ | 3537 | } /* if (val & 0x40000000) */ |
3538 | 3538 | ||
3539 | if (j == entry_count) { | 3539 | if (j == entry_count) { |
3540 | bp->flash_info = NULL; | 3540 | bp->flash_info = NULL; |
3541 | printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n"); | 3541 | printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n"); |
3542 | return -ENODEV; | 3542 | return -ENODEV; |
3543 | } | 3543 | } |
3544 | 3544 | ||
3545 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2); | 3545 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2); |
3546 | val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK; | 3546 | val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK; |
3547 | if (val) | 3547 | if (val) |
3548 | bp->flash_size = val; | 3548 | bp->flash_size = val; |
3549 | else | 3549 | else |
3550 | bp->flash_size = bp->flash_info->total_size; | 3550 | bp->flash_size = bp->flash_info->total_size; |
3551 | 3551 | ||
3552 | return rc; | 3552 | return rc; |
3553 | } | 3553 | } |
3554 | 3554 | ||
3555 | static int | 3555 | static int |
3556 | bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf, | 3556 | bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf, |
3557 | int buf_size) | 3557 | int buf_size) |
3558 | { | 3558 | { |
3559 | int rc = 0; | 3559 | int rc = 0; |
3560 | u32 cmd_flags, offset32, len32, extra; | 3560 | u32 cmd_flags, offset32, len32, extra; |
3561 | 3561 | ||
3562 | if (buf_size == 0) | 3562 | if (buf_size == 0) |
3563 | return 0; | 3563 | return 0; |
3564 | 3564 | ||
3565 | /* Request access to the flash interface. */ | 3565 | /* Request access to the flash interface. */ |
3566 | if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) | 3566 | if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) |
3567 | return rc; | 3567 | return rc; |
3568 | 3568 | ||
3569 | /* Enable access to flash interface */ | 3569 | /* Enable access to flash interface */ |
3570 | bnx2_enable_nvram_access(bp); | 3570 | bnx2_enable_nvram_access(bp); |
3571 | 3571 | ||
3572 | len32 = buf_size; | 3572 | len32 = buf_size; |
3573 | offset32 = offset; | 3573 | offset32 = offset; |
3574 | extra = 0; | 3574 | extra = 0; |
3575 | 3575 | ||
3576 | cmd_flags = 0; | 3576 | cmd_flags = 0; |
3577 | 3577 | ||
3578 | if (offset32 & 3) { | 3578 | if (offset32 & 3) { |
3579 | u8 buf[4]; | 3579 | u8 buf[4]; |
3580 | u32 pre_len; | 3580 | u32 pre_len; |
3581 | 3581 | ||
3582 | offset32 &= ~3; | 3582 | offset32 &= ~3; |
3583 | pre_len = 4 - (offset & 3); | 3583 | pre_len = 4 - (offset & 3); |
3584 | 3584 | ||
3585 | if (pre_len >= len32) { | 3585 | if (pre_len >= len32) { |
3586 | pre_len = len32; | 3586 | pre_len = len32; |
3587 | cmd_flags = BNX2_NVM_COMMAND_FIRST | | 3587 | cmd_flags = BNX2_NVM_COMMAND_FIRST | |
3588 | BNX2_NVM_COMMAND_LAST; | 3588 | BNX2_NVM_COMMAND_LAST; |
3589 | } | 3589 | } |
3590 | else { | 3590 | else { |
3591 | cmd_flags = BNX2_NVM_COMMAND_FIRST; | 3591 | cmd_flags = BNX2_NVM_COMMAND_FIRST; |
3592 | } | 3592 | } |
3593 | 3593 | ||
3594 | rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); | 3594 | rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); |
3595 | 3595 | ||
3596 | if (rc) | 3596 | if (rc) |
3597 | return rc; | 3597 | return rc; |
3598 | 3598 | ||
3599 | memcpy(ret_buf, buf + (offset & 3), pre_len); | 3599 | memcpy(ret_buf, buf + (offset & 3), pre_len); |
3600 | 3600 | ||
3601 | offset32 += 4; | 3601 | offset32 += 4; |
3602 | ret_buf += pre_len; | 3602 | ret_buf += pre_len; |
3603 | len32 -= pre_len; | 3603 | len32 -= pre_len; |
3604 | } | 3604 | } |
3605 | if (len32 & 3) { | 3605 | if (len32 & 3) { |
3606 | extra = 4 - (len32 & 3); | 3606 | extra = 4 - (len32 & 3); |
3607 | len32 = (len32 + 4) & ~3; | 3607 | len32 = (len32 + 4) & ~3; |
3608 | } | 3608 | } |
3609 | 3609 | ||
3610 | if (len32 == 4) { | 3610 | if (len32 == 4) { |
3611 | u8 buf[4]; | 3611 | u8 buf[4]; |
3612 | 3612 | ||
3613 | if (cmd_flags) | 3613 | if (cmd_flags) |
3614 | cmd_flags = BNX2_NVM_COMMAND_LAST; | 3614 | cmd_flags = BNX2_NVM_COMMAND_LAST; |
3615 | else | 3615 | else |
3616 | cmd_flags = BNX2_NVM_COMMAND_FIRST | | 3616 | cmd_flags = BNX2_NVM_COMMAND_FIRST | |
3617 | BNX2_NVM_COMMAND_LAST; | 3617 | BNX2_NVM_COMMAND_LAST; |
3618 | 3618 | ||
3619 | rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); | 3619 | rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); |
3620 | 3620 | ||
3621 | memcpy(ret_buf, buf, 4 - extra); | 3621 | memcpy(ret_buf, buf, 4 - extra); |
3622 | } | 3622 | } |
3623 | else if (len32 > 0) { | 3623 | else if (len32 > 0) { |
3624 | u8 buf[4]; | 3624 | u8 buf[4]; |
3625 | 3625 | ||
3626 | /* Read the first word. */ | 3626 | /* Read the first word. */ |
3627 | if (cmd_flags) | 3627 | if (cmd_flags) |
3628 | cmd_flags = 0; | 3628 | cmd_flags = 0; |
3629 | else | 3629 | else |
3630 | cmd_flags = BNX2_NVM_COMMAND_FIRST; | 3630 | cmd_flags = BNX2_NVM_COMMAND_FIRST; |
3631 | 3631 | ||
3632 | rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags); | 3632 | rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags); |
3633 | 3633 | ||
3634 | /* Advance to the next dword. */ | 3634 | /* Advance to the next dword. */ |
3635 | offset32 += 4; | 3635 | offset32 += 4; |
3636 | ret_buf += 4; | 3636 | ret_buf += 4; |
3637 | len32 -= 4; | 3637 | len32 -= 4; |
3638 | 3638 | ||
3639 | while (len32 > 4 && rc == 0) { | 3639 | while (len32 > 4 && rc == 0) { |
3640 | rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0); | 3640 | rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0); |
3641 | 3641 | ||
3642 | /* Advance to the next dword. */ | 3642 | /* Advance to the next dword. */ |
3643 | offset32 += 4; | 3643 | offset32 += 4; |
3644 | ret_buf += 4; | 3644 | ret_buf += 4; |
3645 | len32 -= 4; | 3645 | len32 -= 4; |
3646 | } | 3646 | } |
3647 | 3647 | ||
3648 | if (rc) | 3648 | if (rc) |
3649 | return rc; | 3649 | return rc; |
3650 | 3650 | ||
3651 | cmd_flags = BNX2_NVM_COMMAND_LAST; | 3651 | cmd_flags = BNX2_NVM_COMMAND_LAST; |
3652 | rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); | 3652 | rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); |
3653 | 3653 | ||
3654 | memcpy(ret_buf, buf, 4 - extra); | 3654 | memcpy(ret_buf, buf, 4 - extra); |
3655 | } | 3655 | } |
3656 | 3656 | ||
3657 | /* Disable access to flash interface */ | 3657 | /* Disable access to flash interface */ |
3658 | bnx2_disable_nvram_access(bp); | 3658 | bnx2_disable_nvram_access(bp); |
3659 | 3659 | ||
3660 | bnx2_release_nvram_lock(bp); | 3660 | bnx2_release_nvram_lock(bp); |
3661 | 3661 | ||
3662 | return rc; | 3662 | return rc; |
3663 | } | 3663 | } |
3664 | 3664 | ||
3665 | static int | 3665 | static int |
3666 | bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, | 3666 | bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, |
3667 | int buf_size) | 3667 | int buf_size) |
3668 | { | 3668 | { |
3669 | u32 written, offset32, len32; | 3669 | u32 written, offset32, len32; |
3670 | u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL; | 3670 | u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL; |
3671 | int rc = 0; | 3671 | int rc = 0; |
3672 | int align_start, align_end; | 3672 | int align_start, align_end; |
3673 | 3673 | ||
3674 | buf = data_buf; | 3674 | buf = data_buf; |
3675 | offset32 = offset; | 3675 | offset32 = offset; |
3676 | len32 = buf_size; | 3676 | len32 = buf_size; |
3677 | align_start = align_end = 0; | 3677 | align_start = align_end = 0; |
3678 | 3678 | ||
3679 | if ((align_start = (offset32 & 3))) { | 3679 | if ((align_start = (offset32 & 3))) { |
3680 | offset32 &= ~3; | 3680 | offset32 &= ~3; |
3681 | len32 += align_start; | 3681 | len32 += align_start; |
3682 | if (len32 < 4) | 3682 | if (len32 < 4) |
3683 | len32 = 4; | 3683 | len32 = 4; |
3684 | if ((rc = bnx2_nvram_read(bp, offset32, start, 4))) | 3684 | if ((rc = bnx2_nvram_read(bp, offset32, start, 4))) |
3685 | return rc; | 3685 | return rc; |
3686 | } | 3686 | } |
3687 | 3687 | ||
3688 | if (len32 & 3) { | 3688 | if (len32 & 3) { |
3689 | align_end = 4 - (len32 & 3); | 3689 | align_end = 4 - (len32 & 3); |
3690 | len32 += align_end; | 3690 | len32 += align_end; |
3691 | if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4))) | 3691 | if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4))) |
3692 | return rc; | 3692 | return rc; |
3693 | } | 3693 | } |
3694 | 3694 | ||
3695 | if (align_start || align_end) { | 3695 | if (align_start || align_end) { |
3696 | align_buf = kmalloc(len32, GFP_KERNEL); | 3696 | align_buf = kmalloc(len32, GFP_KERNEL); |
3697 | if (align_buf == NULL) | 3697 | if (align_buf == NULL) |
3698 | return -ENOMEM; | 3698 | return -ENOMEM; |
3699 | if (align_start) { | 3699 | if (align_start) { |
3700 | memcpy(align_buf, start, 4); | 3700 | memcpy(align_buf, start, 4); |
3701 | } | 3701 | } |
3702 | if (align_end) { | 3702 | if (align_end) { |
3703 | memcpy(align_buf + len32 - 4, end, 4); | 3703 | memcpy(align_buf + len32 - 4, end, 4); |
3704 | } | 3704 | } |
3705 | memcpy(align_buf + align_start, data_buf, buf_size); | 3705 | memcpy(align_buf + align_start, data_buf, buf_size); |
3706 | buf = align_buf; | 3706 | buf = align_buf; |
3707 | } | 3707 | } |
3708 | 3708 | ||
3709 | if (bp->flash_info->buffered == 0) { | 3709 | if (bp->flash_info->buffered == 0) { |
3710 | flash_buffer = kmalloc(264, GFP_KERNEL); | 3710 | flash_buffer = kmalloc(264, GFP_KERNEL); |
3711 | if (flash_buffer == NULL) { | 3711 | if (flash_buffer == NULL) { |
3712 | rc = -ENOMEM; | 3712 | rc = -ENOMEM; |
3713 | goto nvram_write_end; | 3713 | goto nvram_write_end; |
3714 | } | 3714 | } |
3715 | } | 3715 | } |
3716 | 3716 | ||
3717 | written = 0; | 3717 | written = 0; |
3718 | while ((written < len32) && (rc == 0)) { | 3718 | while ((written < len32) && (rc == 0)) { |
3719 | u32 page_start, page_end, data_start, data_end; | 3719 | u32 page_start, page_end, data_start, data_end; |
3720 | u32 addr, cmd_flags; | 3720 | u32 addr, cmd_flags; |
3721 | int i; | 3721 | int i; |
3722 | 3722 | ||
3723 | /* Find the page_start addr */ | 3723 | /* Find the page_start addr */ |
3724 | page_start = offset32 + written; | 3724 | page_start = offset32 + written; |
3725 | page_start -= (page_start % bp->flash_info->page_size); | 3725 | page_start -= (page_start % bp->flash_info->page_size); |
3726 | /* Find the page_end addr */ | 3726 | /* Find the page_end addr */ |
3727 | page_end = page_start + bp->flash_info->page_size; | 3727 | page_end = page_start + bp->flash_info->page_size; |
3728 | /* Find the data_start addr */ | 3728 | /* Find the data_start addr */ |
3729 | data_start = (written == 0) ? offset32 : page_start; | 3729 | data_start = (written == 0) ? offset32 : page_start; |
3730 | /* Find the data_end addr */ | 3730 | /* Find the data_end addr */ |
3731 | data_end = (page_end > offset32 + len32) ? | 3731 | data_end = (page_end > offset32 + len32) ? |
3732 | (offset32 + len32) : page_end; | 3732 | (offset32 + len32) : page_end; |
3733 | 3733 | ||
3734 | /* Request access to the flash interface. */ | 3734 | /* Request access to the flash interface. */ |
3735 | if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) | 3735 | if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) |
3736 | goto nvram_write_end; | 3736 | goto nvram_write_end; |
3737 | 3737 | ||
3738 | /* Enable access to flash interface */ | 3738 | /* Enable access to flash interface */ |
3739 | bnx2_enable_nvram_access(bp); | 3739 | bnx2_enable_nvram_access(bp); |
3740 | 3740 | ||
3741 | cmd_flags = BNX2_NVM_COMMAND_FIRST; | 3741 | cmd_flags = BNX2_NVM_COMMAND_FIRST; |
3742 | if (bp->flash_info->buffered == 0) { | 3742 | if (bp->flash_info->buffered == 0) { |
3743 | int j; | 3743 | int j; |
3744 | 3744 | ||
3745 | /* Read the whole page into the buffer | 3745 | /* Read the whole page into the buffer |
3746 | * (non-buffer flash only) */ | 3746 | * (non-buffer flash only) */ |
3747 | for (j = 0; j < bp->flash_info->page_size; j += 4) { | 3747 | for (j = 0; j < bp->flash_info->page_size; j += 4) { |
3748 | if (j == (bp->flash_info->page_size - 4)) { | 3748 | if (j == (bp->flash_info->page_size - 4)) { |
3749 | cmd_flags |= BNX2_NVM_COMMAND_LAST; | 3749 | cmd_flags |= BNX2_NVM_COMMAND_LAST; |
3750 | } | 3750 | } |
3751 | rc = bnx2_nvram_read_dword(bp, | 3751 | rc = bnx2_nvram_read_dword(bp, |
3752 | page_start + j, | 3752 | page_start + j, |
3753 | &flash_buffer[j], | 3753 | &flash_buffer[j], |
3754 | cmd_flags); | 3754 | cmd_flags); |
3755 | 3755 | ||
3756 | if (rc) | 3756 | if (rc) |
3757 | goto nvram_write_end; | 3757 | goto nvram_write_end; |
3758 | 3758 | ||
3759 | cmd_flags = 0; | 3759 | cmd_flags = 0; |
3760 | } | 3760 | } |
3761 | } | 3761 | } |
3762 | 3762 | ||
3763 | /* Enable writes to flash interface (unlock write-protect) */ | 3763 | /* Enable writes to flash interface (unlock write-protect) */ |
3764 | if ((rc = bnx2_enable_nvram_write(bp)) != 0) | 3764 | if ((rc = bnx2_enable_nvram_write(bp)) != 0) |
3765 | goto nvram_write_end; | 3765 | goto nvram_write_end; |
3766 | 3766 | ||
3767 | /* Loop to write back the buffer data from page_start to | 3767 | /* Loop to write back the buffer data from page_start to |
3768 | * data_start */ | 3768 | * data_start */ |
3769 | i = 0; | 3769 | i = 0; |
3770 | if (bp->flash_info->buffered == 0) { | 3770 | if (bp->flash_info->buffered == 0) { |
3771 | /* Erase the page */ | 3771 | /* Erase the page */ |
3772 | if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) | 3772 | if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) |
3773 | goto nvram_write_end; | 3773 | goto nvram_write_end; |
3774 | 3774 | ||
3775 | /* Re-enable the write again for the actual write */ | 3775 | /* Re-enable the write again for the actual write */ |
3776 | bnx2_enable_nvram_write(bp); | 3776 | bnx2_enable_nvram_write(bp); |
3777 | 3777 | ||
3778 | for (addr = page_start; addr < data_start; | 3778 | for (addr = page_start; addr < data_start; |
3779 | addr += 4, i += 4) { | 3779 | addr += 4, i += 4) { |
3780 | 3780 | ||
3781 | rc = bnx2_nvram_write_dword(bp, addr, | 3781 | rc = bnx2_nvram_write_dword(bp, addr, |
3782 | &flash_buffer[i], cmd_flags); | 3782 | &flash_buffer[i], cmd_flags); |
3783 | 3783 | ||
3784 | if (rc != 0) | 3784 | if (rc != 0) |
3785 | goto nvram_write_end; | 3785 | goto nvram_write_end; |
3786 | 3786 | ||
3787 | cmd_flags = 0; | 3787 | cmd_flags = 0; |
3788 | } | 3788 | } |
3789 | } | 3789 | } |
3790 | 3790 | ||
3791 | /* Loop to write the new data from data_start to data_end */ | 3791 | /* Loop to write the new data from data_start to data_end */ |
3792 | for (addr = data_start; addr < data_end; addr += 4, i += 4) { | 3792 | for (addr = data_start; addr < data_end; addr += 4, i += 4) { |
3793 | if ((addr == page_end - 4) || | 3793 | if ((addr == page_end - 4) || |
3794 | ((bp->flash_info->buffered) && | 3794 | ((bp->flash_info->buffered) && |
3795 | (addr == data_end - 4))) { | 3795 | (addr == data_end - 4))) { |
3796 | 3796 | ||
3797 | cmd_flags |= BNX2_NVM_COMMAND_LAST; | 3797 | cmd_flags |= BNX2_NVM_COMMAND_LAST; |
3798 | } | 3798 | } |
3799 | rc = bnx2_nvram_write_dword(bp, addr, buf, | 3799 | rc = bnx2_nvram_write_dword(bp, addr, buf, |
3800 | cmd_flags); | 3800 | cmd_flags); |
3801 | 3801 | ||
3802 | if (rc != 0) | 3802 | if (rc != 0) |
3803 | goto nvram_write_end; | 3803 | goto nvram_write_end; |
3804 | 3804 | ||
3805 | cmd_flags = 0; | 3805 | cmd_flags = 0; |
3806 | buf += 4; | 3806 | buf += 4; |
3807 | } | 3807 | } |
3808 | 3808 | ||
3809 | /* Loop to write back the buffer data from data_end | 3809 | /* Loop to write back the buffer data from data_end |
3810 | * to page_end */ | 3810 | * to page_end */ |
3811 | if (bp->flash_info->buffered == 0) { | 3811 | if (bp->flash_info->buffered == 0) { |
3812 | for (addr = data_end; addr < page_end; | 3812 | for (addr = data_end; addr < page_end; |
3813 | addr += 4, i += 4) { | 3813 | addr += 4, i += 4) { |
3814 | 3814 | ||
3815 | if (addr == page_end-4) { | 3815 | if (addr == page_end-4) { |
3816 | cmd_flags = BNX2_NVM_COMMAND_LAST; | 3816 | cmd_flags = BNX2_NVM_COMMAND_LAST; |
3817 | } | 3817 | } |
3818 | rc = bnx2_nvram_write_dword(bp, addr, | 3818 | rc = bnx2_nvram_write_dword(bp, addr, |
3819 | &flash_buffer[i], cmd_flags); | 3819 | &flash_buffer[i], cmd_flags); |
3820 | 3820 | ||
3821 | if (rc != 0) | 3821 | if (rc != 0) |
3822 | goto nvram_write_end; | 3822 | goto nvram_write_end; |
3823 | 3823 | ||
3824 | cmd_flags = 0; | 3824 | cmd_flags = 0; |
3825 | } | 3825 | } |
3826 | } | 3826 | } |
3827 | 3827 | ||
3828 | /* Disable writes to flash interface (lock write-protect) */ | 3828 | /* Disable writes to flash interface (lock write-protect) */ |
3829 | bnx2_disable_nvram_write(bp); | 3829 | bnx2_disable_nvram_write(bp); |
3830 | 3830 | ||
3831 | /* Disable access to flash interface */ | 3831 | /* Disable access to flash interface */ |
3832 | bnx2_disable_nvram_access(bp); | 3832 | bnx2_disable_nvram_access(bp); |
3833 | bnx2_release_nvram_lock(bp); | 3833 | bnx2_release_nvram_lock(bp); |
3834 | 3834 | ||
3835 | /* Increment written */ | 3835 | /* Increment written */ |
3836 | written += data_end - data_start; | 3836 | written += data_end - data_start; |
3837 | } | 3837 | } |
3838 | 3838 | ||
3839 | nvram_write_end: | 3839 | nvram_write_end: |
3840 | kfree(flash_buffer); | 3840 | kfree(flash_buffer); |
3841 | kfree(align_buf); | 3841 | kfree(align_buf); |
3842 | return rc; | 3842 | return rc; |
3843 | } | 3843 | } |
3844 | 3844 | ||
3845 | static void | 3845 | static void |
3846 | bnx2_init_remote_phy(struct bnx2 *bp) | 3846 | bnx2_init_remote_phy(struct bnx2 *bp) |
3847 | { | 3847 | { |
3848 | u32 val; | 3848 | u32 val; |
3849 | 3849 | ||
3850 | bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG; | 3850 | bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG; |
3851 | if (!(bp->phy_flags & PHY_SERDES_FLAG)) | 3851 | if (!(bp->phy_flags & PHY_SERDES_FLAG)) |
3852 | return; | 3852 | return; |
3853 | 3853 | ||
3854 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB); | 3854 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB); |
3855 | if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE) | 3855 | if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE) |
3856 | return; | 3856 | return; |
3857 | 3857 | ||
3858 | if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) { | 3858 | if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) { |
3859 | if (netif_running(bp->dev)) { | 3859 | if (netif_running(bp->dev)) { |
3860 | val = BNX2_DRV_ACK_CAP_SIGNATURE | | 3860 | val = BNX2_DRV_ACK_CAP_SIGNATURE | |
3861 | BNX2_FW_CAP_REMOTE_PHY_CAPABLE; | 3861 | BNX2_FW_CAP_REMOTE_PHY_CAPABLE; |
3862 | REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB, | 3862 | REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB, |
3863 | val); | 3863 | val); |
3864 | } | 3864 | } |
3865 | bp->phy_flags |= REMOTE_PHY_CAP_FLAG; | 3865 | bp->phy_flags |= REMOTE_PHY_CAP_FLAG; |
3866 | 3866 | ||
3867 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS); | 3867 | val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS); |
3868 | if (val & BNX2_LINK_STATUS_SERDES_LINK) | 3868 | if (val & BNX2_LINK_STATUS_SERDES_LINK) |
3869 | bp->phy_port = PORT_FIBRE; | 3869 | bp->phy_port = PORT_FIBRE; |
3870 | else | 3870 | else |
3871 | bp->phy_port = PORT_TP; | 3871 | bp->phy_port = PORT_TP; |
3872 | } | 3872 | } |
3873 | } | 3873 | } |
3874 | 3874 | ||
3875 | static int | 3875 | static int |
3876 | bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | 3876 | bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) |
3877 | { | 3877 | { |
3878 | u32 val; | 3878 | u32 val; |
3879 | int i, rc = 0; | 3879 | int i, rc = 0; |
3880 | 3880 | ||
3881 | /* Wait for the current PCI transaction to complete before | 3881 | /* Wait for the current PCI transaction to complete before |
3882 | * issuing a reset. */ | 3882 | * issuing a reset. */ |
3883 | REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, | 3883 | REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, |
3884 | BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | | 3884 | BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | |
3885 | BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | | 3885 | BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | |
3886 | BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | | 3886 | BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | |
3887 | BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); | 3887 | BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); |
3888 | val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); | 3888 | val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); |
3889 | udelay(5); | 3889 | udelay(5); |
3890 | 3890 | ||
3891 | /* Wait for the firmware to tell us it is ok to issue a reset. */ | 3891 | /* Wait for the firmware to tell us it is ok to issue a reset. */ |
3892 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1); | 3892 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1); |
3893 | 3893 | ||
3894 | /* Deposit a driver reset signature so the firmware knows that | 3894 | /* Deposit a driver reset signature so the firmware knows that |
3895 | * this is a soft reset. */ | 3895 | * this is a soft reset. */ |
3896 | REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE, | 3896 | REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE, |
3897 | BNX2_DRV_RESET_SIGNATURE_MAGIC); | 3897 | BNX2_DRV_RESET_SIGNATURE_MAGIC); |
3898 | 3898 | ||
3899 | /* Do a dummy read to force the chip to complete all current transaction | 3899 | /* Do a dummy read to force the chip to complete all current transaction |
3900 | * before we issue a reset. */ | 3900 | * before we issue a reset. */ |
3901 | val = REG_RD(bp, BNX2_MISC_ID); | 3901 | val = REG_RD(bp, BNX2_MISC_ID); |
3902 | 3902 | ||
3903 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 3903 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
3904 | REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); | 3904 | REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); |
3905 | REG_RD(bp, BNX2_MISC_COMMAND); | 3905 | REG_RD(bp, BNX2_MISC_COMMAND); |
3906 | udelay(5); | 3906 | udelay(5); |
3907 | 3907 | ||
3908 | val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | | 3908 | val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | |
3909 | BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; | 3909 | BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; |
3910 | 3910 | ||
3911 | pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val); | 3911 | pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val); |
3912 | 3912 | ||
3913 | } else { | 3913 | } else { |
3914 | val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | | 3914 | val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | |
3915 | BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | | 3915 | BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | |
3916 | BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; | 3916 | BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; |
3917 | 3917 | ||
3918 | /* Chip reset. */ | 3918 | /* Chip reset. */ |
3919 | REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); | 3919 | REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); |
3920 | 3920 | ||
3921 | if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || | 3921 | if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || |
3922 | (CHIP_ID(bp) == CHIP_ID_5706_A1)) { | 3922 | (CHIP_ID(bp) == CHIP_ID_5706_A1)) { |
3923 | current->state = TASK_UNINTERRUPTIBLE; | 3923 | current->state = TASK_UNINTERRUPTIBLE; |
3924 | schedule_timeout(HZ / 50); | 3924 | schedule_timeout(HZ / 50); |
3925 | } | 3925 | } |
3926 | 3926 | ||
3927 | /* Reset takes approximate 30 usec */ | 3927 | /* Reset takes approximate 30 usec */ |
3928 | for (i = 0; i < 10; i++) { | 3928 | for (i = 0; i < 10; i++) { |
3929 | val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG); | 3929 | val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG); |
3930 | if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | | 3930 | if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | |
3931 | BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) | 3931 | BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) |
3932 | break; | 3932 | break; |
3933 | udelay(10); | 3933 | udelay(10); |
3934 | } | 3934 | } |
3935 | 3935 | ||
3936 | if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | | 3936 | if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | |
3937 | BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { | 3937 | BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { |
3938 | printk(KERN_ERR PFX "Chip reset did not complete\n"); | 3938 | printk(KERN_ERR PFX "Chip reset did not complete\n"); |
3939 | return -EBUSY; | 3939 | return -EBUSY; |
3940 | } | 3940 | } |
3941 | } | 3941 | } |
3942 | 3942 | ||
3943 | /* Make sure byte swapping is properly configured. */ | 3943 | /* Make sure byte swapping is properly configured. */ |
3944 | val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0); | 3944 | val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0); |
3945 | if (val != 0x01020304) { | 3945 | if (val != 0x01020304) { |
3946 | printk(KERN_ERR PFX "Chip not in correct endian mode\n"); | 3946 | printk(KERN_ERR PFX "Chip not in correct endian mode\n"); |
3947 | return -ENODEV; | 3947 | return -ENODEV; |
3948 | } | 3948 | } |
3949 | 3949 | ||
3950 | /* Wait for the firmware to finish its initialization. */ | 3950 | /* Wait for the firmware to finish its initialization. */ |
3951 | rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0); | 3951 | rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0); |
3952 | if (rc) | 3952 | if (rc) |
3953 | return rc; | 3953 | return rc; |
3954 | 3954 | ||
3955 | spin_lock_bh(&bp->phy_lock); | 3955 | spin_lock_bh(&bp->phy_lock); |
3956 | bnx2_init_remote_phy(bp); | 3956 | bnx2_init_remote_phy(bp); |
3957 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) | 3957 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) |
3958 | bnx2_set_default_remote_link(bp); | 3958 | bnx2_set_default_remote_link(bp); |
3959 | spin_unlock_bh(&bp->phy_lock); | 3959 | spin_unlock_bh(&bp->phy_lock); |
3960 | 3960 | ||
3961 | if (CHIP_ID(bp) == CHIP_ID_5706_A0) { | 3961 | if (CHIP_ID(bp) == CHIP_ID_5706_A0) { |
3962 | /* Adjust the voltage regular to two steps lower. The default | 3962 | /* Adjust the voltage regular to two steps lower. The default |
3963 | * of this register is 0x0000000e. */ | 3963 | * of this register is 0x0000000e. */ |
3964 | REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); | 3964 | REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); |
3965 | 3965 | ||
3966 | /* Remove bad rbuf memory from the free pool. */ | 3966 | /* Remove bad rbuf memory from the free pool. */ |
3967 | rc = bnx2_alloc_bad_rbuf(bp); | 3967 | rc = bnx2_alloc_bad_rbuf(bp); |
3968 | } | 3968 | } |
3969 | 3969 | ||
3970 | return rc; | 3970 | return rc; |
3971 | } | 3971 | } |
3972 | 3972 | ||
3973 | static int | 3973 | static int |
3974 | bnx2_init_chip(struct bnx2 *bp) | 3974 | bnx2_init_chip(struct bnx2 *bp) |
3975 | { | 3975 | { |
3976 | u32 val; | 3976 | u32 val; |
3977 | int rc; | 3977 | int rc; |
3978 | 3978 | ||
3979 | /* Make sure the interrupt is not active. */ | 3979 | /* Make sure the interrupt is not active. */ |
3980 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); | 3980 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); |
3981 | 3981 | ||
3982 | val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP | | 3982 | val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP | |
3983 | BNX2_DMA_CONFIG_DATA_WORD_SWAP | | 3983 | BNX2_DMA_CONFIG_DATA_WORD_SWAP | |
3984 | #ifdef __BIG_ENDIAN | 3984 | #ifdef __BIG_ENDIAN |
3985 | BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | | 3985 | BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | |
3986 | #endif | 3986 | #endif |
3987 | BNX2_DMA_CONFIG_CNTL_WORD_SWAP | | 3987 | BNX2_DMA_CONFIG_CNTL_WORD_SWAP | |
3988 | DMA_READ_CHANS << 12 | | 3988 | DMA_READ_CHANS << 12 | |
3989 | DMA_WRITE_CHANS << 16; | 3989 | DMA_WRITE_CHANS << 16; |
3990 | 3990 | ||
3991 | val |= (0x2 << 20) | (1 << 11); | 3991 | val |= (0x2 << 20) | (1 << 11); |
3992 | 3992 | ||
3993 | if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133)) | 3993 | if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133)) |
3994 | val |= (1 << 23); | 3994 | val |= (1 << 23); |
3995 | 3995 | ||
3996 | if ((CHIP_NUM(bp) == CHIP_NUM_5706) && | 3996 | if ((CHIP_NUM(bp) == CHIP_NUM_5706) && |
3997 | (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG)) | 3997 | (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG)) |
3998 | val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA; | 3998 | val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA; |
3999 | 3999 | ||
4000 | REG_WR(bp, BNX2_DMA_CONFIG, val); | 4000 | REG_WR(bp, BNX2_DMA_CONFIG, val); |
4001 | 4001 | ||
4002 | if (CHIP_ID(bp) == CHIP_ID_5706_A0) { | 4002 | if (CHIP_ID(bp) == CHIP_ID_5706_A0) { |
4003 | val = REG_RD(bp, BNX2_TDMA_CONFIG); | 4003 | val = REG_RD(bp, BNX2_TDMA_CONFIG); |
4004 | val |= BNX2_TDMA_CONFIG_ONE_DMA; | 4004 | val |= BNX2_TDMA_CONFIG_ONE_DMA; |
4005 | REG_WR(bp, BNX2_TDMA_CONFIG, val); | 4005 | REG_WR(bp, BNX2_TDMA_CONFIG, val); |
4006 | } | 4006 | } |
4007 | 4007 | ||
4008 | if (bp->flags & PCIX_FLAG) { | 4008 | if (bp->flags & PCIX_FLAG) { |
4009 | u16 val16; | 4009 | u16 val16; |
4010 | 4010 | ||
4011 | pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, | 4011 | pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, |
4012 | &val16); | 4012 | &val16); |
4013 | pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, | 4013 | pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, |
4014 | val16 & ~PCI_X_CMD_ERO); | 4014 | val16 & ~PCI_X_CMD_ERO); |
4015 | } | 4015 | } |
4016 | 4016 | ||
4017 | REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, | 4017 | REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, |
4018 | BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | | 4018 | BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | |
4019 | BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | | 4019 | BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | |
4020 | BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); | 4020 | BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); |
4021 | 4021 | ||
4022 | /* Initialize context mapping and zero out the quick contexts. The | 4022 | /* Initialize context mapping and zero out the quick contexts. The |
4023 | * context block must have already been enabled. */ | 4023 | * context block must have already been enabled. */ |
4024 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 4024 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
4025 | rc = bnx2_init_5709_context(bp); | 4025 | rc = bnx2_init_5709_context(bp); |
4026 | if (rc) | 4026 | if (rc) |
4027 | return rc; | 4027 | return rc; |
4028 | } else | 4028 | } else |
4029 | bnx2_init_context(bp); | 4029 | bnx2_init_context(bp); |
4030 | 4030 | ||
4031 | if ((rc = bnx2_init_cpus(bp)) != 0) | 4031 | if ((rc = bnx2_init_cpus(bp)) != 0) |
4032 | return rc; | 4032 | return rc; |
4033 | 4033 | ||
4034 | bnx2_init_nvram(bp); | 4034 | bnx2_init_nvram(bp); |
4035 | 4035 | ||
4036 | bnx2_set_mac_addr(bp); | 4036 | bnx2_set_mac_addr(bp); |
4037 | 4037 | ||
4038 | val = REG_RD(bp, BNX2_MQ_CONFIG); | 4038 | val = REG_RD(bp, BNX2_MQ_CONFIG); |
4039 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; | 4039 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; |
4040 | val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; | 4040 | val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; |
4041 | if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) | 4041 | if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) |
4042 | val |= BNX2_MQ_CONFIG_HALT_DIS; | 4042 | val |= BNX2_MQ_CONFIG_HALT_DIS; |
4043 | 4043 | ||
4044 | REG_WR(bp, BNX2_MQ_CONFIG, val); | 4044 | REG_WR(bp, BNX2_MQ_CONFIG, val); |
4045 | 4045 | ||
4046 | val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); | 4046 | val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); |
4047 | REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); | 4047 | REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); |
4048 | REG_WR(bp, BNX2_MQ_KNL_WIND_END, val); | 4048 | REG_WR(bp, BNX2_MQ_KNL_WIND_END, val); |
4049 | 4049 | ||
4050 | val = (BCM_PAGE_BITS - 8) << 24; | 4050 | val = (BCM_PAGE_BITS - 8) << 24; |
4051 | REG_WR(bp, BNX2_RV2P_CONFIG, val); | 4051 | REG_WR(bp, BNX2_RV2P_CONFIG, val); |
4052 | 4052 | ||
4053 | /* Configure page size. */ | 4053 | /* Configure page size. */ |
4054 | val = REG_RD(bp, BNX2_TBDR_CONFIG); | 4054 | val = REG_RD(bp, BNX2_TBDR_CONFIG); |
4055 | val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE; | 4055 | val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE; |
4056 | val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; | 4056 | val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; |
4057 | REG_WR(bp, BNX2_TBDR_CONFIG, val); | 4057 | REG_WR(bp, BNX2_TBDR_CONFIG, val); |
4058 | 4058 | ||
4059 | val = bp->mac_addr[0] + | 4059 | val = bp->mac_addr[0] + |
4060 | (bp->mac_addr[1] << 8) + | 4060 | (bp->mac_addr[1] << 8) + |
4061 | (bp->mac_addr[2] << 16) + | 4061 | (bp->mac_addr[2] << 16) + |
4062 | bp->mac_addr[3] + | 4062 | bp->mac_addr[3] + |
4063 | (bp->mac_addr[4] << 8) + | 4063 | (bp->mac_addr[4] << 8) + |
4064 | (bp->mac_addr[5] << 16); | 4064 | (bp->mac_addr[5] << 16); |
4065 | REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); | 4065 | REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); |
4066 | 4066 | ||
4067 | /* Program the MTU. Also include 4 bytes for CRC32. */ | 4067 | /* Program the MTU. Also include 4 bytes for CRC32. */ |
4068 | val = bp->dev->mtu + ETH_HLEN + 4; | 4068 | val = bp->dev->mtu + ETH_HLEN + 4; |
4069 | if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) | 4069 | if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) |
4070 | val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; | 4070 | val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; |
4071 | REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); | 4071 | REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); |
4072 | 4072 | ||
4073 | bp->last_status_idx = 0; | 4073 | bp->last_status_idx = 0; |
4074 | bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; | 4074 | bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; |
4075 | 4075 | ||
4076 | /* Set up how to generate a link change interrupt. */ | 4076 | /* Set up how to generate a link change interrupt. */ |
4077 | REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); | 4077 | REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); |
4078 | 4078 | ||
4079 | REG_WR(bp, BNX2_HC_STATUS_ADDR_L, | 4079 | REG_WR(bp, BNX2_HC_STATUS_ADDR_L, |
4080 | (u64) bp->status_blk_mapping & 0xffffffff); | 4080 | (u64) bp->status_blk_mapping & 0xffffffff); |
4081 | REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); | 4081 | REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); |
4082 | 4082 | ||
4083 | REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L, | 4083 | REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L, |
4084 | (u64) bp->stats_blk_mapping & 0xffffffff); | 4084 | (u64) bp->stats_blk_mapping & 0xffffffff); |
4085 | REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H, | 4085 | REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H, |
4086 | (u64) bp->stats_blk_mapping >> 32); | 4086 | (u64) bp->stats_blk_mapping >> 32); |
4087 | 4087 | ||
4088 | REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, | 4088 | REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, |
4089 | (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); | 4089 | (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); |
4090 | 4090 | ||
4091 | REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, | 4091 | REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, |
4092 | (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); | 4092 | (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); |
4093 | 4093 | ||
4094 | REG_WR(bp, BNX2_HC_COMP_PROD_TRIP, | 4094 | REG_WR(bp, BNX2_HC_COMP_PROD_TRIP, |
4095 | (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); | 4095 | (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); |
4096 | 4096 | ||
4097 | REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); | 4097 | REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); |
4098 | 4098 | ||
4099 | REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); | 4099 | REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); |
4100 | 4100 | ||
4101 | REG_WR(bp, BNX2_HC_COM_TICKS, | 4101 | REG_WR(bp, BNX2_HC_COM_TICKS, |
4102 | (bp->com_ticks_int << 16) | bp->com_ticks); | 4102 | (bp->com_ticks_int << 16) | bp->com_ticks); |
4103 | 4103 | ||
4104 | REG_WR(bp, BNX2_HC_CMD_TICKS, | 4104 | REG_WR(bp, BNX2_HC_CMD_TICKS, |
4105 | (bp->cmd_ticks_int << 16) | bp->cmd_ticks); | 4105 | (bp->cmd_ticks_int << 16) | bp->cmd_ticks); |
4106 | 4106 | ||
4107 | if (CHIP_NUM(bp) == CHIP_NUM_5708) | 4107 | if (CHIP_NUM(bp) == CHIP_NUM_5708) |
4108 | REG_WR(bp, BNX2_HC_STATS_TICKS, 0); | 4108 | REG_WR(bp, BNX2_HC_STATS_TICKS, 0); |
4109 | else | 4109 | else |
4110 | REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00); | 4110 | REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00); |
4111 | REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ | 4111 | REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ |
4112 | 4112 | ||
4113 | if (CHIP_ID(bp) == CHIP_ID_5706_A1) | 4113 | if (CHIP_ID(bp) == CHIP_ID_5706_A1) |
4114 | val = BNX2_HC_CONFIG_COLLECT_STATS; | 4114 | val = BNX2_HC_CONFIG_COLLECT_STATS; |
4115 | else { | 4115 | else { |
4116 | val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE | | 4116 | val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE | |
4117 | BNX2_HC_CONFIG_COLLECT_STATS; | 4117 | BNX2_HC_CONFIG_COLLECT_STATS; |
4118 | } | 4118 | } |
4119 | 4119 | ||
4120 | if (bp->flags & ONE_SHOT_MSI_FLAG) | 4120 | if (bp->flags & ONE_SHOT_MSI_FLAG) |
4121 | val |= BNX2_HC_CONFIG_ONE_SHOT; | 4121 | val |= BNX2_HC_CONFIG_ONE_SHOT; |
4122 | 4122 | ||
4123 | REG_WR(bp, BNX2_HC_CONFIG, val); | 4123 | REG_WR(bp, BNX2_HC_CONFIG, val); |
4124 | 4124 | ||
4125 | /* Clear internal stats counters. */ | 4125 | /* Clear internal stats counters. */ |
4126 | REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); | 4126 | REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); |
4127 | 4127 | ||
4128 | REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); | 4128 | REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); |
4129 | 4129 | ||
4130 | if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) & | 4130 | if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) & |
4131 | BNX2_PORT_FEATURE_ASF_ENABLED) | 4131 | BNX2_PORT_FEATURE_ASF_ENABLED) |
4132 | bp->flags |= ASF_ENABLE_FLAG; | 4132 | bp->flags |= ASF_ENABLE_FLAG; |
4133 | 4133 | ||
4134 | /* Initialize the receive filter. */ | 4134 | /* Initialize the receive filter. */ |
4135 | bnx2_set_rx_mode(bp->dev); | 4135 | bnx2_set_rx_mode(bp->dev); |
4136 | 4136 | ||
4137 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 4137 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
4138 | val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); | 4138 | val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); |
4139 | val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; | 4139 | val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; |
4140 | REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); | 4140 | REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); |
4141 | } | 4141 | } |
4142 | rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, | 4142 | rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, |
4143 | 0); | 4143 | 0); |
4144 | 4144 | ||
4145 | REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); | 4145 | REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); |
4146 | REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); | 4146 | REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); |
4147 | 4147 | ||
4148 | udelay(20); | 4148 | udelay(20); |
4149 | 4149 | ||
4150 | bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND); | 4150 | bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND); |
4151 | 4151 | ||
4152 | return rc; | 4152 | return rc; |
4153 | } | 4153 | } |
4154 | 4154 | ||
4155 | static void | 4155 | static void |
4156 | bnx2_init_tx_context(struct bnx2 *bp, u32 cid) | 4156 | bnx2_init_tx_context(struct bnx2 *bp, u32 cid) |
4157 | { | 4157 | { |
4158 | u32 val, offset0, offset1, offset2, offset3; | 4158 | u32 val, offset0, offset1, offset2, offset3; |
4159 | 4159 | ||
4160 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 4160 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
4161 | offset0 = BNX2_L2CTX_TYPE_XI; | 4161 | offset0 = BNX2_L2CTX_TYPE_XI; |
4162 | offset1 = BNX2_L2CTX_CMD_TYPE_XI; | 4162 | offset1 = BNX2_L2CTX_CMD_TYPE_XI; |
4163 | offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; | 4163 | offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; |
4164 | offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; | 4164 | offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; |
4165 | } else { | 4165 | } else { |
4166 | offset0 = BNX2_L2CTX_TYPE; | 4166 | offset0 = BNX2_L2CTX_TYPE; |
4167 | offset1 = BNX2_L2CTX_CMD_TYPE; | 4167 | offset1 = BNX2_L2CTX_CMD_TYPE; |
4168 | offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; | 4168 | offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; |
4169 | offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; | 4169 | offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; |
4170 | } | 4170 | } |
4171 | val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; | 4171 | val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; |
4172 | CTX_WR(bp, GET_CID_ADDR(cid), offset0, val); | 4172 | CTX_WR(bp, GET_CID_ADDR(cid), offset0, val); |
4173 | 4173 | ||
4174 | val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); | 4174 | val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); |
4175 | CTX_WR(bp, GET_CID_ADDR(cid), offset1, val); | 4175 | CTX_WR(bp, GET_CID_ADDR(cid), offset1, val); |
4176 | 4176 | ||
4177 | val = (u64) bp->tx_desc_mapping >> 32; | 4177 | val = (u64) bp->tx_desc_mapping >> 32; |
4178 | CTX_WR(bp, GET_CID_ADDR(cid), offset2, val); | 4178 | CTX_WR(bp, GET_CID_ADDR(cid), offset2, val); |
4179 | 4179 | ||
4180 | val = (u64) bp->tx_desc_mapping & 0xffffffff; | 4180 | val = (u64) bp->tx_desc_mapping & 0xffffffff; |
4181 | CTX_WR(bp, GET_CID_ADDR(cid), offset3, val); | 4181 | CTX_WR(bp, GET_CID_ADDR(cid), offset3, val); |
4182 | } | 4182 | } |
4183 | 4183 | ||
4184 | static void | 4184 | static void |
4185 | bnx2_init_tx_ring(struct bnx2 *bp) | 4185 | bnx2_init_tx_ring(struct bnx2 *bp) |
4186 | { | 4186 | { |
4187 | struct tx_bd *txbd; | 4187 | struct tx_bd *txbd; |
4188 | u32 cid; | 4188 | u32 cid; |
4189 | 4189 | ||
4190 | bp->tx_wake_thresh = bp->tx_ring_size / 2; | 4190 | bp->tx_wake_thresh = bp->tx_ring_size / 2; |
4191 | 4191 | ||
4192 | txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; | 4192 | txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; |
4193 | 4193 | ||
4194 | txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; | 4194 | txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; |
4195 | txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff; | 4195 | txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff; |
4196 | 4196 | ||
4197 | bp->tx_prod = 0; | 4197 | bp->tx_prod = 0; |
4198 | bp->tx_cons = 0; | 4198 | bp->tx_cons = 0; |
4199 | bp->hw_tx_cons = 0; | 4199 | bp->hw_tx_cons = 0; |
4200 | bp->tx_prod_bseq = 0; | 4200 | bp->tx_prod_bseq = 0; |
4201 | 4201 | ||
4202 | cid = TX_CID; | 4202 | cid = TX_CID; |
4203 | bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX; | 4203 | bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX; |
4204 | bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ; | 4204 | bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ; |
4205 | 4205 | ||
4206 | bnx2_init_tx_context(bp, cid); | 4206 | bnx2_init_tx_context(bp, cid); |
4207 | } | 4207 | } |
4208 | 4208 | ||
4209 | static void | 4209 | static void |
4210 | bnx2_init_rx_ring(struct bnx2 *bp) | 4210 | bnx2_init_rx_ring(struct bnx2 *bp) |
4211 | { | 4211 | { |
4212 | struct rx_bd *rxbd; | 4212 | struct rx_bd *rxbd; |
4213 | int i; | 4213 | int i; |
4214 | u16 prod, ring_prod; | 4214 | u16 prod, ring_prod; |
4215 | u32 val; | 4215 | u32 val; |
4216 | 4216 | ||
4217 | /* 8 for CRC and VLAN */ | 4217 | /* 8 for CRC and VLAN */ |
4218 | bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8; | 4218 | bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8; |
4219 | /* hw alignment */ | 4219 | /* hw alignment */ |
4220 | bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; | 4220 | bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; |
4221 | 4221 | ||
4222 | ring_prod = prod = bp->rx_prod = 0; | 4222 | ring_prod = prod = bp->rx_prod = 0; |
4223 | bp->rx_cons = 0; | 4223 | bp->rx_cons = 0; |
4224 | bp->hw_rx_cons = 0; | 4224 | bp->hw_rx_cons = 0; |
4225 | bp->rx_prod_bseq = 0; | 4225 | bp->rx_prod_bseq = 0; |
4226 | 4226 | ||
4227 | for (i = 0; i < bp->rx_max_ring; i++) { | 4227 | for (i = 0; i < bp->rx_max_ring; i++) { |
4228 | int j; | 4228 | int j; |
4229 | 4229 | ||
4230 | rxbd = &bp->rx_desc_ring[i][0]; | 4230 | rxbd = &bp->rx_desc_ring[i][0]; |
4231 | for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) { | 4231 | for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) { |
4232 | rxbd->rx_bd_len = bp->rx_buf_use_size; | 4232 | rxbd->rx_bd_len = bp->rx_buf_use_size; |
4233 | rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; | 4233 | rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; |
4234 | } | 4234 | } |
4235 | if (i == (bp->rx_max_ring - 1)) | 4235 | if (i == (bp->rx_max_ring - 1)) |
4236 | j = 0; | 4236 | j = 0; |
4237 | else | 4237 | else |
4238 | j = i + 1; | 4238 | j = i + 1; |
4239 | rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32; | 4239 | rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32; |
4240 | rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] & | 4240 | rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] & |
4241 | 0xffffffff; | 4241 | 0xffffffff; |
4242 | } | 4242 | } |
4243 | 4243 | ||
4244 | val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; | 4244 | val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; |
4245 | val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; | 4245 | val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; |
4246 | val |= 0x02 << 8; | 4246 | val |= 0x02 << 8; |
4247 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val); | 4247 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val); |
4248 | 4248 | ||
4249 | val = (u64) bp->rx_desc_mapping[0] >> 32; | 4249 | val = (u64) bp->rx_desc_mapping[0] >> 32; |
4250 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val); | 4250 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val); |
4251 | 4251 | ||
4252 | val = (u64) bp->rx_desc_mapping[0] & 0xffffffff; | 4252 | val = (u64) bp->rx_desc_mapping[0] & 0xffffffff; |
4253 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val); | 4253 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val); |
4254 | 4254 | ||
4255 | for (i = 0; i < bp->rx_ring_size; i++) { | 4255 | for (i = 0; i < bp->rx_ring_size; i++) { |
4256 | if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) { | 4256 | if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) { |
4257 | break; | 4257 | break; |
4258 | } | 4258 | } |
4259 | prod = NEXT_RX_BD(prod); | 4259 | prod = NEXT_RX_BD(prod); |
4260 | ring_prod = RX_RING_IDX(prod); | 4260 | ring_prod = RX_RING_IDX(prod); |
4261 | } | 4261 | } |
4262 | bp->rx_prod = prod; | 4262 | bp->rx_prod = prod; |
4263 | 4263 | ||
4264 | REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod); | 4264 | REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod); |
4265 | 4265 | ||
4266 | REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); | 4266 | REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); |
4267 | } | 4267 | } |
4268 | 4268 | ||
4269 | static void | 4269 | static void |
4270 | bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) | 4270 | bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) |
4271 | { | 4271 | { |
4272 | u32 num_rings, max; | 4272 | u32 num_rings, max; |
4273 | 4273 | ||
4274 | bp->rx_ring_size = size; | 4274 | bp->rx_ring_size = size; |
4275 | num_rings = 1; | 4275 | num_rings = 1; |
4276 | while (size > MAX_RX_DESC_CNT) { | 4276 | while (size > MAX_RX_DESC_CNT) { |
4277 | size -= MAX_RX_DESC_CNT; | 4277 | size -= MAX_RX_DESC_CNT; |
4278 | num_rings++; | 4278 | num_rings++; |
4279 | } | 4279 | } |
4280 | /* round to next power of 2 */ | 4280 | /* round to next power of 2 */ |
4281 | max = MAX_RX_RINGS; | 4281 | max = MAX_RX_RINGS; |
4282 | while ((max & num_rings) == 0) | 4282 | while ((max & num_rings) == 0) |
4283 | max >>= 1; | 4283 | max >>= 1; |
4284 | 4284 | ||
4285 | if (num_rings != max) | 4285 | if (num_rings != max) |
4286 | max <<= 1; | 4286 | max <<= 1; |
4287 | 4287 | ||
4288 | bp->rx_max_ring = max; | 4288 | bp->rx_max_ring = max; |
4289 | bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; | 4289 | bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; |
4290 | } | 4290 | } |
4291 | 4291 | ||
4292 | static void | 4292 | static void |
4293 | bnx2_free_tx_skbs(struct bnx2 *bp) | 4293 | bnx2_free_tx_skbs(struct bnx2 *bp) |
4294 | { | 4294 | { |
4295 | int i; | 4295 | int i; |
4296 | 4296 | ||
4297 | if (bp->tx_buf_ring == NULL) | 4297 | if (bp->tx_buf_ring == NULL) |
4298 | return; | 4298 | return; |
4299 | 4299 | ||
4300 | for (i = 0; i < TX_DESC_CNT; ) { | 4300 | for (i = 0; i < TX_DESC_CNT; ) { |
4301 | struct sw_bd *tx_buf = &bp->tx_buf_ring[i]; | 4301 | struct sw_bd *tx_buf = &bp->tx_buf_ring[i]; |
4302 | struct sk_buff *skb = tx_buf->skb; | 4302 | struct sk_buff *skb = tx_buf->skb; |
4303 | int j, last; | 4303 | int j, last; |
4304 | 4304 | ||
4305 | if (skb == NULL) { | 4305 | if (skb == NULL) { |
4306 | i++; | 4306 | i++; |
4307 | continue; | 4307 | continue; |
4308 | } | 4308 | } |
4309 | 4309 | ||
4310 | pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), | 4310 | pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), |
4311 | skb_headlen(skb), PCI_DMA_TODEVICE); | 4311 | skb_headlen(skb), PCI_DMA_TODEVICE); |
4312 | 4312 | ||
4313 | tx_buf->skb = NULL; | 4313 | tx_buf->skb = NULL; |
4314 | 4314 | ||
4315 | last = skb_shinfo(skb)->nr_frags; | 4315 | last = skb_shinfo(skb)->nr_frags; |
4316 | for (j = 0; j < last; j++) { | 4316 | for (j = 0; j < last; j++) { |
4317 | tx_buf = &bp->tx_buf_ring[i + j + 1]; | 4317 | tx_buf = &bp->tx_buf_ring[i + j + 1]; |
4318 | pci_unmap_page(bp->pdev, | 4318 | pci_unmap_page(bp->pdev, |
4319 | pci_unmap_addr(tx_buf, mapping), | 4319 | pci_unmap_addr(tx_buf, mapping), |
4320 | skb_shinfo(skb)->frags[j].size, | 4320 | skb_shinfo(skb)->frags[j].size, |
4321 | PCI_DMA_TODEVICE); | 4321 | PCI_DMA_TODEVICE); |
4322 | } | 4322 | } |
4323 | dev_kfree_skb(skb); | 4323 | dev_kfree_skb(skb); |
4324 | i += j + 1; | 4324 | i += j + 1; |
4325 | } | 4325 | } |
4326 | 4326 | ||
4327 | } | 4327 | } |
4328 | 4328 | ||
4329 | static void | 4329 | static void |
4330 | bnx2_free_rx_skbs(struct bnx2 *bp) | 4330 | bnx2_free_rx_skbs(struct bnx2 *bp) |
4331 | { | 4331 | { |
4332 | int i; | 4332 | int i; |
4333 | 4333 | ||
4334 | if (bp->rx_buf_ring == NULL) | 4334 | if (bp->rx_buf_ring == NULL) |
4335 | return; | 4335 | return; |
4336 | 4336 | ||
4337 | for (i = 0; i < bp->rx_max_ring_idx; i++) { | 4337 | for (i = 0; i < bp->rx_max_ring_idx; i++) { |
4338 | struct sw_bd *rx_buf = &bp->rx_buf_ring[i]; | 4338 | struct sw_bd *rx_buf = &bp->rx_buf_ring[i]; |
4339 | struct sk_buff *skb = rx_buf->skb; | 4339 | struct sk_buff *skb = rx_buf->skb; |
4340 | 4340 | ||
4341 | if (skb == NULL) | 4341 | if (skb == NULL) |
4342 | continue; | 4342 | continue; |
4343 | 4343 | ||
4344 | pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), | 4344 | pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), |
4345 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); | 4345 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); |
4346 | 4346 | ||
4347 | rx_buf->skb = NULL; | 4347 | rx_buf->skb = NULL; |
4348 | 4348 | ||
4349 | dev_kfree_skb(skb); | 4349 | dev_kfree_skb(skb); |
4350 | } | 4350 | } |
4351 | } | 4351 | } |
4352 | 4352 | ||
4353 | static void | 4353 | static void |
4354 | bnx2_free_skbs(struct bnx2 *bp) | 4354 | bnx2_free_skbs(struct bnx2 *bp) |
4355 | { | 4355 | { |
4356 | bnx2_free_tx_skbs(bp); | 4356 | bnx2_free_tx_skbs(bp); |
4357 | bnx2_free_rx_skbs(bp); | 4357 | bnx2_free_rx_skbs(bp); |
4358 | } | 4358 | } |
4359 | 4359 | ||
4360 | static int | 4360 | static int |
4361 | bnx2_reset_nic(struct bnx2 *bp, u32 reset_code) | 4361 | bnx2_reset_nic(struct bnx2 *bp, u32 reset_code) |
4362 | { | 4362 | { |
4363 | int rc; | 4363 | int rc; |
4364 | 4364 | ||
4365 | rc = bnx2_reset_chip(bp, reset_code); | 4365 | rc = bnx2_reset_chip(bp, reset_code); |
4366 | bnx2_free_skbs(bp); | 4366 | bnx2_free_skbs(bp); |
4367 | if (rc) | 4367 | if (rc) |
4368 | return rc; | 4368 | return rc; |
4369 | 4369 | ||
4370 | if ((rc = bnx2_init_chip(bp)) != 0) | 4370 | if ((rc = bnx2_init_chip(bp)) != 0) |
4371 | return rc; | 4371 | return rc; |
4372 | 4372 | ||
4373 | bnx2_init_tx_ring(bp); | 4373 | bnx2_init_tx_ring(bp); |
4374 | bnx2_init_rx_ring(bp); | 4374 | bnx2_init_rx_ring(bp); |
4375 | return 0; | 4375 | return 0; |
4376 | } | 4376 | } |
4377 | 4377 | ||
4378 | static int | 4378 | static int |
4379 | bnx2_init_nic(struct bnx2 *bp) | 4379 | bnx2_init_nic(struct bnx2 *bp) |
4380 | { | 4380 | { |
4381 | int rc; | 4381 | int rc; |
4382 | 4382 | ||
4383 | if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0) | 4383 | if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0) |
4384 | return rc; | 4384 | return rc; |
4385 | 4385 | ||
4386 | spin_lock_bh(&bp->phy_lock); | 4386 | spin_lock_bh(&bp->phy_lock); |
4387 | bnx2_init_phy(bp); | 4387 | bnx2_init_phy(bp); |
4388 | bnx2_set_link(bp); | 4388 | bnx2_set_link(bp); |
4389 | spin_unlock_bh(&bp->phy_lock); | 4389 | spin_unlock_bh(&bp->phy_lock); |
4390 | return 0; | 4390 | return 0; |
4391 | } | 4391 | } |
4392 | 4392 | ||
4393 | static int | 4393 | static int |
4394 | bnx2_test_registers(struct bnx2 *bp) | 4394 | bnx2_test_registers(struct bnx2 *bp) |
4395 | { | 4395 | { |
4396 | int ret; | 4396 | int ret; |
4397 | int i, is_5709; | 4397 | int i, is_5709; |
4398 | static const struct { | 4398 | static const struct { |
4399 | u16 offset; | 4399 | u16 offset; |
4400 | u16 flags; | 4400 | u16 flags; |
4401 | #define BNX2_FL_NOT_5709 1 | 4401 | #define BNX2_FL_NOT_5709 1 |
4402 | u32 rw_mask; | 4402 | u32 rw_mask; |
4403 | u32 ro_mask; | 4403 | u32 ro_mask; |
4404 | } reg_tbl[] = { | 4404 | } reg_tbl[] = { |
4405 | { 0x006c, 0, 0x00000000, 0x0000003f }, | 4405 | { 0x006c, 0, 0x00000000, 0x0000003f }, |
4406 | { 0x0090, 0, 0xffffffff, 0x00000000 }, | 4406 | { 0x0090, 0, 0xffffffff, 0x00000000 }, |
4407 | { 0x0094, 0, 0x00000000, 0x00000000 }, | 4407 | { 0x0094, 0, 0x00000000, 0x00000000 }, |
4408 | 4408 | ||
4409 | { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 }, | 4409 | { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 }, |
4410 | { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, | 4410 | { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, |
4411 | { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, | 4411 | { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, |
4412 | { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff }, | 4412 | { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff }, |
4413 | { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 }, | 4413 | { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 }, |
4414 | { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 }, | 4414 | { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 }, |
4415 | { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff }, | 4415 | { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff }, |
4416 | { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, | 4416 | { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, |
4417 | { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, | 4417 | { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, |
4418 | 4418 | ||
4419 | { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, | 4419 | { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, |
4420 | { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, | 4420 | { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, |
4421 | { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, | 4421 | { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, |
4422 | { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, | 4422 | { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, |
4423 | { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, | 4423 | { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, |
4424 | { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, | 4424 | { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, |
4425 | 4425 | ||
4426 | { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 }, | 4426 | { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 }, |
4427 | { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 }, | 4427 | { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 }, |
4428 | { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 }, | 4428 | { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 }, |
4429 | 4429 | ||
4430 | { 0x1000, 0, 0x00000000, 0x00000001 }, | 4430 | { 0x1000, 0, 0x00000000, 0x00000001 }, |
4431 | { 0x1004, 0, 0x00000000, 0x000f0001 }, | 4431 | { 0x1004, 0, 0x00000000, 0x000f0001 }, |
4432 | 4432 | ||
4433 | { 0x1408, 0, 0x01c00800, 0x00000000 }, | 4433 | { 0x1408, 0, 0x01c00800, 0x00000000 }, |
4434 | { 0x149c, 0, 0x8000ffff, 0x00000000 }, | 4434 | { 0x149c, 0, 0x8000ffff, 0x00000000 }, |
4435 | { 0x14a8, 0, 0x00000000, 0x000001ff }, | 4435 | { 0x14a8, 0, 0x00000000, 0x000001ff }, |
4436 | { 0x14ac, 0, 0x0fffffff, 0x10000000 }, | 4436 | { 0x14ac, 0, 0x0fffffff, 0x10000000 }, |
4437 | { 0x14b0, 0, 0x00000002, 0x00000001 }, | 4437 | { 0x14b0, 0, 0x00000002, 0x00000001 }, |
4438 | { 0x14b8, 0, 0x00000000, 0x00000000 }, | 4438 | { 0x14b8, 0, 0x00000000, 0x00000000 }, |
4439 | { 0x14c0, 0, 0x00000000, 0x00000009 }, | 4439 | { 0x14c0, 0, 0x00000000, 0x00000009 }, |
4440 | { 0x14c4, 0, 0x00003fff, 0x00000000 }, | 4440 | { 0x14c4, 0, 0x00003fff, 0x00000000 }, |
4441 | { 0x14cc, 0, 0x00000000, 0x00000001 }, | 4441 | { 0x14cc, 0, 0x00000000, 0x00000001 }, |
4442 | { 0x14d0, 0, 0xffffffff, 0x00000000 }, | 4442 | { 0x14d0, 0, 0xffffffff, 0x00000000 }, |
4443 | 4443 | ||
4444 | { 0x1800, 0, 0x00000000, 0x00000001 }, | 4444 | { 0x1800, 0, 0x00000000, 0x00000001 }, |
4445 | { 0x1804, 0, 0x00000000, 0x00000003 }, | 4445 | { 0x1804, 0, 0x00000000, 0x00000003 }, |
4446 | 4446 | ||
4447 | { 0x2800, 0, 0x00000000, 0x00000001 }, | 4447 | { 0x2800, 0, 0x00000000, 0x00000001 }, |
4448 | { 0x2804, 0, 0x00000000, 0x00003f01 }, | 4448 | { 0x2804, 0, 0x00000000, 0x00003f01 }, |
4449 | { 0x2808, 0, 0x0f3f3f03, 0x00000000 }, | 4449 | { 0x2808, 0, 0x0f3f3f03, 0x00000000 }, |
4450 | { 0x2810, 0, 0xffff0000, 0x00000000 }, | 4450 | { 0x2810, 0, 0xffff0000, 0x00000000 }, |
4451 | { 0x2814, 0, 0xffff0000, 0x00000000 }, | 4451 | { 0x2814, 0, 0xffff0000, 0x00000000 }, |
4452 | { 0x2818, 0, 0xffff0000, 0x00000000 }, | 4452 | { 0x2818, 0, 0xffff0000, 0x00000000 }, |
4453 | { 0x281c, 0, 0xffff0000, 0x00000000 }, | 4453 | { 0x281c, 0, 0xffff0000, 0x00000000 }, |
4454 | { 0x2834, 0, 0xffffffff, 0x00000000 }, | 4454 | { 0x2834, 0, 0xffffffff, 0x00000000 }, |
4455 | { 0x2840, 0, 0x00000000, 0xffffffff }, | 4455 | { 0x2840, 0, 0x00000000, 0xffffffff }, |
4456 | { 0x2844, 0, 0x00000000, 0xffffffff }, | 4456 | { 0x2844, 0, 0x00000000, 0xffffffff }, |
4457 | { 0x2848, 0, 0xffffffff, 0x00000000 }, | 4457 | { 0x2848, 0, 0xffffffff, 0x00000000 }, |
4458 | { 0x284c, 0, 0xf800f800, 0x07ff07ff }, | 4458 | { 0x284c, 0, 0xf800f800, 0x07ff07ff }, |
4459 | 4459 | ||
4460 | { 0x2c00, 0, 0x00000000, 0x00000011 }, | 4460 | { 0x2c00, 0, 0x00000000, 0x00000011 }, |
4461 | { 0x2c04, 0, 0x00000000, 0x00030007 }, | 4461 | { 0x2c04, 0, 0x00000000, 0x00030007 }, |
4462 | 4462 | ||
4463 | { 0x3c00, 0, 0x00000000, 0x00000001 }, | 4463 | { 0x3c00, 0, 0x00000000, 0x00000001 }, |
4464 | { 0x3c04, 0, 0x00000000, 0x00070000 }, | 4464 | { 0x3c04, 0, 0x00000000, 0x00070000 }, |
4465 | { 0x3c08, 0, 0x00007f71, 0x07f00000 }, | 4465 | { 0x3c08, 0, 0x00007f71, 0x07f00000 }, |
4466 | { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 }, | 4466 | { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 }, |
4467 | { 0x3c10, 0, 0xffffffff, 0x00000000 }, | 4467 | { 0x3c10, 0, 0xffffffff, 0x00000000 }, |
4468 | { 0x3c14, 0, 0x00000000, 0xffffffff }, | 4468 | { 0x3c14, 0, 0x00000000, 0xffffffff }, |
4469 | { 0x3c18, 0, 0x00000000, 0xffffffff }, | 4469 | { 0x3c18, 0, 0x00000000, 0xffffffff }, |
4470 | { 0x3c1c, 0, 0xfffff000, 0x00000000 }, | 4470 | { 0x3c1c, 0, 0xfffff000, 0x00000000 }, |
4471 | { 0x3c20, 0, 0xffffff00, 0x00000000 }, | 4471 | { 0x3c20, 0, 0xffffff00, 0x00000000 }, |
4472 | 4472 | ||
4473 | { 0x5004, 0, 0x00000000, 0x0000007f }, | 4473 | { 0x5004, 0, 0x00000000, 0x0000007f }, |
4474 | { 0x5008, 0, 0x0f0007ff, 0x00000000 }, | 4474 | { 0x5008, 0, 0x0f0007ff, 0x00000000 }, |
4475 | 4475 | ||
4476 | { 0x5c00, 0, 0x00000000, 0x00000001 }, | 4476 | { 0x5c00, 0, 0x00000000, 0x00000001 }, |
4477 | { 0x5c04, 0, 0x00000000, 0x0003000f }, | 4477 | { 0x5c04, 0, 0x00000000, 0x0003000f }, |
4478 | { 0x5c08, 0, 0x00000003, 0x00000000 }, | 4478 | { 0x5c08, 0, 0x00000003, 0x00000000 }, |
4479 | { 0x5c0c, 0, 0x0000fff8, 0x00000000 }, | 4479 | { 0x5c0c, 0, 0x0000fff8, 0x00000000 }, |
4480 | { 0x5c10, 0, 0x00000000, 0xffffffff }, | 4480 | { 0x5c10, 0, 0x00000000, 0xffffffff }, |
4481 | { 0x5c80, 0, 0x00000000, 0x0f7113f1 }, | 4481 | { 0x5c80, 0, 0x00000000, 0x0f7113f1 }, |
4482 | { 0x5c84, 0, 0x00000000, 0x0000f333 }, | 4482 | { 0x5c84, 0, 0x00000000, 0x0000f333 }, |
4483 | { 0x5c88, 0, 0x00000000, 0x00077373 }, | 4483 | { 0x5c88, 0, 0x00000000, 0x00077373 }, |
4484 | { 0x5c8c, 0, 0x00000000, 0x0007f737 }, | 4484 | { 0x5c8c, 0, 0x00000000, 0x0007f737 }, |
4485 | 4485 | ||
4486 | { 0x6808, 0, 0x0000ff7f, 0x00000000 }, | 4486 | { 0x6808, 0, 0x0000ff7f, 0x00000000 }, |
4487 | { 0x680c, 0, 0xffffffff, 0x00000000 }, | 4487 | { 0x680c, 0, 0xffffffff, 0x00000000 }, |
4488 | { 0x6810, 0, 0xffffffff, 0x00000000 }, | 4488 | { 0x6810, 0, 0xffffffff, 0x00000000 }, |
4489 | { 0x6814, 0, 0xffffffff, 0x00000000 }, | 4489 | { 0x6814, 0, 0xffffffff, 0x00000000 }, |
4490 | { 0x6818, 0, 0xffffffff, 0x00000000 }, | 4490 | { 0x6818, 0, 0xffffffff, 0x00000000 }, |
4491 | { 0x681c, 0, 0xffffffff, 0x00000000 }, | 4491 | { 0x681c, 0, 0xffffffff, 0x00000000 }, |
4492 | { 0x6820, 0, 0x00ff00ff, 0x00000000 }, | 4492 | { 0x6820, 0, 0x00ff00ff, 0x00000000 }, |
4493 | { 0x6824, 0, 0x00ff00ff, 0x00000000 }, | 4493 | { 0x6824, 0, 0x00ff00ff, 0x00000000 }, |
4494 | { 0x6828, 0, 0x00ff00ff, 0x00000000 }, | 4494 | { 0x6828, 0, 0x00ff00ff, 0x00000000 }, |
4495 | { 0x682c, 0, 0x03ff03ff, 0x00000000 }, | 4495 | { 0x682c, 0, 0x03ff03ff, 0x00000000 }, |
4496 | { 0x6830, 0, 0x03ff03ff, 0x00000000 }, | 4496 | { 0x6830, 0, 0x03ff03ff, 0x00000000 }, |
4497 | { 0x6834, 0, 0x03ff03ff, 0x00000000 }, | 4497 | { 0x6834, 0, 0x03ff03ff, 0x00000000 }, |
4498 | { 0x6838, 0, 0x03ff03ff, 0x00000000 }, | 4498 | { 0x6838, 0, 0x03ff03ff, 0x00000000 }, |
4499 | { 0x683c, 0, 0x0000ffff, 0x00000000 }, | 4499 | { 0x683c, 0, 0x0000ffff, 0x00000000 }, |
4500 | { 0x6840, 0, 0x00000ff0, 0x00000000 }, | 4500 | { 0x6840, 0, 0x00000ff0, 0x00000000 }, |
4501 | { 0x6844, 0, 0x00ffff00, 0x00000000 }, | 4501 | { 0x6844, 0, 0x00ffff00, 0x00000000 }, |
4502 | { 0x684c, 0, 0xffffffff, 0x00000000 }, | 4502 | { 0x684c, 0, 0xffffffff, 0x00000000 }, |
4503 | { 0x6850, 0, 0x7f7f7f7f, 0x00000000 }, | 4503 | { 0x6850, 0, 0x7f7f7f7f, 0x00000000 }, |
4504 | { 0x6854, 0, 0x7f7f7f7f, 0x00000000 }, | 4504 | { 0x6854, 0, 0x7f7f7f7f, 0x00000000 }, |
4505 | { 0x6858, 0, 0x7f7f7f7f, 0x00000000 }, | 4505 | { 0x6858, 0, 0x7f7f7f7f, 0x00000000 }, |
4506 | { 0x685c, 0, 0x7f7f7f7f, 0x00000000 }, | 4506 | { 0x685c, 0, 0x7f7f7f7f, 0x00000000 }, |
4507 | { 0x6908, 0, 0x00000000, 0x0001ff0f }, | 4507 | { 0x6908, 0, 0x00000000, 0x0001ff0f }, |
4508 | { 0x690c, 0, 0x00000000, 0x0ffe00f0 }, | 4508 | { 0x690c, 0, 0x00000000, 0x0ffe00f0 }, |
4509 | 4509 | ||
4510 | { 0xffff, 0, 0x00000000, 0x00000000 }, | 4510 | { 0xffff, 0, 0x00000000, 0x00000000 }, |
4511 | }; | 4511 | }; |
4512 | 4512 | ||
4513 | ret = 0; | 4513 | ret = 0; |
4514 | is_5709 = 0; | 4514 | is_5709 = 0; |
4515 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 4515 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
4516 | is_5709 = 1; | 4516 | is_5709 = 1; |
4517 | 4517 | ||
4518 | for (i = 0; reg_tbl[i].offset != 0xffff; i++) { | 4518 | for (i = 0; reg_tbl[i].offset != 0xffff; i++) { |
4519 | u32 offset, rw_mask, ro_mask, save_val, val; | 4519 | u32 offset, rw_mask, ro_mask, save_val, val; |
4520 | u16 flags = reg_tbl[i].flags; | 4520 | u16 flags = reg_tbl[i].flags; |
4521 | 4521 | ||
4522 | if (is_5709 && (flags & BNX2_FL_NOT_5709)) | 4522 | if (is_5709 && (flags & BNX2_FL_NOT_5709)) |
4523 | continue; | 4523 | continue; |
4524 | 4524 | ||
4525 | offset = (u32) reg_tbl[i].offset; | 4525 | offset = (u32) reg_tbl[i].offset; |
4526 | rw_mask = reg_tbl[i].rw_mask; | 4526 | rw_mask = reg_tbl[i].rw_mask; |
4527 | ro_mask = reg_tbl[i].ro_mask; | 4527 | ro_mask = reg_tbl[i].ro_mask; |
4528 | 4528 | ||
4529 | save_val = readl(bp->regview + offset); | 4529 | save_val = readl(bp->regview + offset); |
4530 | 4530 | ||
4531 | writel(0, bp->regview + offset); | 4531 | writel(0, bp->regview + offset); |
4532 | 4532 | ||
4533 | val = readl(bp->regview + offset); | 4533 | val = readl(bp->regview + offset); |
4534 | if ((val & rw_mask) != 0) { | 4534 | if ((val & rw_mask) != 0) { |
4535 | goto reg_test_err; | 4535 | goto reg_test_err; |
4536 | } | 4536 | } |
4537 | 4537 | ||
4538 | if ((val & ro_mask) != (save_val & ro_mask)) { | 4538 | if ((val & ro_mask) != (save_val & ro_mask)) { |
4539 | goto reg_test_err; | 4539 | goto reg_test_err; |
4540 | } | 4540 | } |
4541 | 4541 | ||
4542 | writel(0xffffffff, bp->regview + offset); | 4542 | writel(0xffffffff, bp->regview + offset); |
4543 | 4543 | ||
4544 | val = readl(bp->regview + offset); | 4544 | val = readl(bp->regview + offset); |
4545 | if ((val & rw_mask) != rw_mask) { | 4545 | if ((val & rw_mask) != rw_mask) { |
4546 | goto reg_test_err; | 4546 | goto reg_test_err; |
4547 | } | 4547 | } |
4548 | 4548 | ||
4549 | if ((val & ro_mask) != (save_val & ro_mask)) { | 4549 | if ((val & ro_mask) != (save_val & ro_mask)) { |
4550 | goto reg_test_err; | 4550 | goto reg_test_err; |
4551 | } | 4551 | } |
4552 | 4552 | ||
4553 | writel(save_val, bp->regview + offset); | 4553 | writel(save_val, bp->regview + offset); |
4554 | continue; | 4554 | continue; |
4555 | 4555 | ||
4556 | reg_test_err: | 4556 | reg_test_err: |
4557 | writel(save_val, bp->regview + offset); | 4557 | writel(save_val, bp->regview + offset); |
4558 | ret = -ENODEV; | 4558 | ret = -ENODEV; |
4559 | break; | 4559 | break; |
4560 | } | 4560 | } |
4561 | return ret; | 4561 | return ret; |
4562 | } | 4562 | } |
4563 | 4563 | ||
4564 | static int | 4564 | static int |
4565 | bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) | 4565 | bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) |
4566 | { | 4566 | { |
4567 | static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555, | 4567 | static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555, |
4568 | 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa }; | 4568 | 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa }; |
4569 | int i; | 4569 | int i; |
4570 | 4570 | ||
4571 | for (i = 0; i < sizeof(test_pattern) / 4; i++) { | 4571 | for (i = 0; i < sizeof(test_pattern) / 4; i++) { |
4572 | u32 offset; | 4572 | u32 offset; |
4573 | 4573 | ||
4574 | for (offset = 0; offset < size; offset += 4) { | 4574 | for (offset = 0; offset < size; offset += 4) { |
4575 | 4575 | ||
4576 | REG_WR_IND(bp, start + offset, test_pattern[i]); | 4576 | REG_WR_IND(bp, start + offset, test_pattern[i]); |
4577 | 4577 | ||
4578 | if (REG_RD_IND(bp, start + offset) != | 4578 | if (REG_RD_IND(bp, start + offset) != |
4579 | test_pattern[i]) { | 4579 | test_pattern[i]) { |
4580 | return -ENODEV; | 4580 | return -ENODEV; |
4581 | } | 4581 | } |
4582 | } | 4582 | } |
4583 | } | 4583 | } |
4584 | return 0; | 4584 | return 0; |
4585 | } | 4585 | } |
4586 | 4586 | ||
4587 | static int | 4587 | static int |
4588 | bnx2_test_memory(struct bnx2 *bp) | 4588 | bnx2_test_memory(struct bnx2 *bp) |
4589 | { | 4589 | { |
4590 | int ret = 0; | 4590 | int ret = 0; |
4591 | int i; | 4591 | int i; |
4592 | static struct mem_entry { | 4592 | static struct mem_entry { |
4593 | u32 offset; | 4593 | u32 offset; |
4594 | u32 len; | 4594 | u32 len; |
4595 | } mem_tbl_5706[] = { | 4595 | } mem_tbl_5706[] = { |
4596 | { 0x60000, 0x4000 }, | 4596 | { 0x60000, 0x4000 }, |
4597 | { 0xa0000, 0x3000 }, | 4597 | { 0xa0000, 0x3000 }, |
4598 | { 0xe0000, 0x4000 }, | 4598 | { 0xe0000, 0x4000 }, |
4599 | { 0x120000, 0x4000 }, | 4599 | { 0x120000, 0x4000 }, |
4600 | { 0x1a0000, 0x4000 }, | 4600 | { 0x1a0000, 0x4000 }, |
4601 | { 0x160000, 0x4000 }, | 4601 | { 0x160000, 0x4000 }, |
4602 | { 0xffffffff, 0 }, | 4602 | { 0xffffffff, 0 }, |
4603 | }, | 4603 | }, |
4604 | mem_tbl_5709[] = { | 4604 | mem_tbl_5709[] = { |
4605 | { 0x60000, 0x4000 }, | 4605 | { 0x60000, 0x4000 }, |
4606 | { 0xa0000, 0x3000 }, | 4606 | { 0xa0000, 0x3000 }, |
4607 | { 0xe0000, 0x4000 }, | 4607 | { 0xe0000, 0x4000 }, |
4608 | { 0x120000, 0x4000 }, | 4608 | { 0x120000, 0x4000 }, |
4609 | { 0x1a0000, 0x4000 }, | 4609 | { 0x1a0000, 0x4000 }, |
4610 | { 0xffffffff, 0 }, | 4610 | { 0xffffffff, 0 }, |
4611 | }; | 4611 | }; |
4612 | struct mem_entry *mem_tbl; | 4612 | struct mem_entry *mem_tbl; |
4613 | 4613 | ||
4614 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 4614 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
4615 | mem_tbl = mem_tbl_5709; | 4615 | mem_tbl = mem_tbl_5709; |
4616 | else | 4616 | else |
4617 | mem_tbl = mem_tbl_5706; | 4617 | mem_tbl = mem_tbl_5706; |
4618 | 4618 | ||
4619 | for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { | 4619 | for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { |
4620 | if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset, | 4620 | if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset, |
4621 | mem_tbl[i].len)) != 0) { | 4621 | mem_tbl[i].len)) != 0) { |
4622 | return ret; | 4622 | return ret; |
4623 | } | 4623 | } |
4624 | } | 4624 | } |
4625 | 4625 | ||
4626 | return ret; | 4626 | return ret; |
4627 | } | 4627 | } |
4628 | 4628 | ||
4629 | #define BNX2_MAC_LOOPBACK 0 | 4629 | #define BNX2_MAC_LOOPBACK 0 |
4630 | #define BNX2_PHY_LOOPBACK 1 | 4630 | #define BNX2_PHY_LOOPBACK 1 |
4631 | 4631 | ||
4632 | static int | 4632 | static int |
4633 | bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | 4633 | bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) |
4634 | { | 4634 | { |
4635 | unsigned int pkt_size, num_pkts, i; | 4635 | unsigned int pkt_size, num_pkts, i; |
4636 | struct sk_buff *skb, *rx_skb; | 4636 | struct sk_buff *skb, *rx_skb; |
4637 | unsigned char *packet; | 4637 | unsigned char *packet; |
4638 | u16 rx_start_idx, rx_idx; | 4638 | u16 rx_start_idx, rx_idx; |
4639 | dma_addr_t map; | 4639 | dma_addr_t map; |
4640 | struct tx_bd *txbd; | 4640 | struct tx_bd *txbd; |
4641 | struct sw_bd *rx_buf; | 4641 | struct sw_bd *rx_buf; |
4642 | struct l2_fhdr *rx_hdr; | 4642 | struct l2_fhdr *rx_hdr; |
4643 | int ret = -ENODEV; | 4643 | int ret = -ENODEV; |
4644 | 4644 | ||
4645 | if (loopback_mode == BNX2_MAC_LOOPBACK) { | 4645 | if (loopback_mode == BNX2_MAC_LOOPBACK) { |
4646 | bp->loopback = MAC_LOOPBACK; | 4646 | bp->loopback = MAC_LOOPBACK; |
4647 | bnx2_set_mac_loopback(bp); | 4647 | bnx2_set_mac_loopback(bp); |
4648 | } | 4648 | } |
4649 | else if (loopback_mode == BNX2_PHY_LOOPBACK) { | 4649 | else if (loopback_mode == BNX2_PHY_LOOPBACK) { |
4650 | bp->loopback = PHY_LOOPBACK; | 4650 | bp->loopback = PHY_LOOPBACK; |
4651 | bnx2_set_phy_loopback(bp); | 4651 | bnx2_set_phy_loopback(bp); |
4652 | } | 4652 | } |
4653 | else | 4653 | else |
4654 | return -EINVAL; | 4654 | return -EINVAL; |
4655 | 4655 | ||
4656 | pkt_size = 1514; | 4656 | pkt_size = 1514; |
4657 | skb = netdev_alloc_skb(bp->dev, pkt_size); | 4657 | skb = netdev_alloc_skb(bp->dev, pkt_size); |
4658 | if (!skb) | 4658 | if (!skb) |
4659 | return -ENOMEM; | 4659 | return -ENOMEM; |
4660 | packet = skb_put(skb, pkt_size); | 4660 | packet = skb_put(skb, pkt_size); |
4661 | memcpy(packet, bp->dev->dev_addr, 6); | 4661 | memcpy(packet, bp->dev->dev_addr, 6); |
4662 | memset(packet + 6, 0x0, 8); | 4662 | memset(packet + 6, 0x0, 8); |
4663 | for (i = 14; i < pkt_size; i++) | 4663 | for (i = 14; i < pkt_size; i++) |
4664 | packet[i] = (unsigned char) (i & 0xff); | 4664 | packet[i] = (unsigned char) (i & 0xff); |
4665 | 4665 | ||
4666 | map = pci_map_single(bp->pdev, skb->data, pkt_size, | 4666 | map = pci_map_single(bp->pdev, skb->data, pkt_size, |
4667 | PCI_DMA_TODEVICE); | 4667 | PCI_DMA_TODEVICE); |
4668 | 4668 | ||
4669 | REG_WR(bp, BNX2_HC_COMMAND, | 4669 | REG_WR(bp, BNX2_HC_COMMAND, |
4670 | bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); | 4670 | bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); |
4671 | 4671 | ||
4672 | REG_RD(bp, BNX2_HC_COMMAND); | 4672 | REG_RD(bp, BNX2_HC_COMMAND); |
4673 | 4673 | ||
4674 | udelay(5); | 4674 | udelay(5); |
4675 | rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0; | 4675 | rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0; |
4676 | 4676 | ||
4677 | num_pkts = 0; | 4677 | num_pkts = 0; |
4678 | 4678 | ||
4679 | txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)]; | 4679 | txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)]; |
4680 | 4680 | ||
4681 | txbd->tx_bd_haddr_hi = (u64) map >> 32; | 4681 | txbd->tx_bd_haddr_hi = (u64) map >> 32; |
4682 | txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; | 4682 | txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; |
4683 | txbd->tx_bd_mss_nbytes = pkt_size; | 4683 | txbd->tx_bd_mss_nbytes = pkt_size; |
4684 | txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END; | 4684 | txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END; |
4685 | 4685 | ||
4686 | num_pkts++; | 4686 | num_pkts++; |
4687 | bp->tx_prod = NEXT_TX_BD(bp->tx_prod); | 4687 | bp->tx_prod = NEXT_TX_BD(bp->tx_prod); |
4688 | bp->tx_prod_bseq += pkt_size; | 4688 | bp->tx_prod_bseq += pkt_size; |
4689 | 4689 | ||
4690 | REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod); | 4690 | REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod); |
4691 | REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq); | 4691 | REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq); |
4692 | 4692 | ||
4693 | udelay(100); | 4693 | udelay(100); |
4694 | 4694 | ||
4695 | REG_WR(bp, BNX2_HC_COMMAND, | 4695 | REG_WR(bp, BNX2_HC_COMMAND, |
4696 | bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); | 4696 | bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); |
4697 | 4697 | ||
4698 | REG_RD(bp, BNX2_HC_COMMAND); | 4698 | REG_RD(bp, BNX2_HC_COMMAND); |
4699 | 4699 | ||
4700 | udelay(5); | 4700 | udelay(5); |
4701 | 4701 | ||
4702 | pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); | 4702 | pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); |
4703 | dev_kfree_skb(skb); | 4703 | dev_kfree_skb(skb); |
4704 | 4704 | ||
4705 | if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) { | 4705 | if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) { |
4706 | goto loopback_test_done; | 4706 | goto loopback_test_done; |
4707 | } | 4707 | } |
4708 | 4708 | ||
4709 | rx_idx = bp->status_blk->status_rx_quick_consumer_index0; | 4709 | rx_idx = bp->status_blk->status_rx_quick_consumer_index0; |
4710 | if (rx_idx != rx_start_idx + num_pkts) { | 4710 | if (rx_idx != rx_start_idx + num_pkts) { |
4711 | goto loopback_test_done; | 4711 | goto loopback_test_done; |
4712 | } | 4712 | } |
4713 | 4713 | ||
4714 | rx_buf = &bp->rx_buf_ring[rx_start_idx]; | 4714 | rx_buf = &bp->rx_buf_ring[rx_start_idx]; |
4715 | rx_skb = rx_buf->skb; | 4715 | rx_skb = rx_buf->skb; |
4716 | 4716 | ||
4717 | rx_hdr = (struct l2_fhdr *) rx_skb->data; | 4717 | rx_hdr = (struct l2_fhdr *) rx_skb->data; |
4718 | skb_reserve(rx_skb, bp->rx_offset); | 4718 | skb_reserve(rx_skb, bp->rx_offset); |
4719 | 4719 | ||
4720 | pci_dma_sync_single_for_cpu(bp->pdev, | 4720 | pci_dma_sync_single_for_cpu(bp->pdev, |
4721 | pci_unmap_addr(rx_buf, mapping), | 4721 | pci_unmap_addr(rx_buf, mapping), |
4722 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); | 4722 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); |
4723 | 4723 | ||
4724 | if (rx_hdr->l2_fhdr_status & | 4724 | if (rx_hdr->l2_fhdr_status & |
4725 | (L2_FHDR_ERRORS_BAD_CRC | | 4725 | (L2_FHDR_ERRORS_BAD_CRC | |
4726 | L2_FHDR_ERRORS_PHY_DECODE | | 4726 | L2_FHDR_ERRORS_PHY_DECODE | |
4727 | L2_FHDR_ERRORS_ALIGNMENT | | 4727 | L2_FHDR_ERRORS_ALIGNMENT | |
4728 | L2_FHDR_ERRORS_TOO_SHORT | | 4728 | L2_FHDR_ERRORS_TOO_SHORT | |
4729 | L2_FHDR_ERRORS_GIANT_FRAME)) { | 4729 | L2_FHDR_ERRORS_GIANT_FRAME)) { |
4730 | 4730 | ||
4731 | goto loopback_test_done; | 4731 | goto loopback_test_done; |
4732 | } | 4732 | } |
4733 | 4733 | ||
4734 | if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) { | 4734 | if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) { |
4735 | goto loopback_test_done; | 4735 | goto loopback_test_done; |
4736 | } | 4736 | } |
4737 | 4737 | ||
4738 | for (i = 14; i < pkt_size; i++) { | 4738 | for (i = 14; i < pkt_size; i++) { |
4739 | if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) { | 4739 | if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) { |
4740 | goto loopback_test_done; | 4740 | goto loopback_test_done; |
4741 | } | 4741 | } |
4742 | } | 4742 | } |
4743 | 4743 | ||
4744 | ret = 0; | 4744 | ret = 0; |
4745 | 4745 | ||
4746 | loopback_test_done: | 4746 | loopback_test_done: |
4747 | bp->loopback = 0; | 4747 | bp->loopback = 0; |
4748 | return ret; | 4748 | return ret; |
4749 | } | 4749 | } |
4750 | 4750 | ||
4751 | #define BNX2_MAC_LOOPBACK_FAILED 1 | 4751 | #define BNX2_MAC_LOOPBACK_FAILED 1 |
4752 | #define BNX2_PHY_LOOPBACK_FAILED 2 | 4752 | #define BNX2_PHY_LOOPBACK_FAILED 2 |
4753 | #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \ | 4753 | #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \ |
4754 | BNX2_PHY_LOOPBACK_FAILED) | 4754 | BNX2_PHY_LOOPBACK_FAILED) |
4755 | 4755 | ||
4756 | static int | 4756 | static int |
4757 | bnx2_test_loopback(struct bnx2 *bp) | 4757 | bnx2_test_loopback(struct bnx2 *bp) |
4758 | { | 4758 | { |
4759 | int rc = 0; | 4759 | int rc = 0; |
4760 | 4760 | ||
4761 | if (!netif_running(bp->dev)) | 4761 | if (!netif_running(bp->dev)) |
4762 | return BNX2_LOOPBACK_FAILED; | 4762 | return BNX2_LOOPBACK_FAILED; |
4763 | 4763 | ||
4764 | bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); | 4764 | bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); |
4765 | spin_lock_bh(&bp->phy_lock); | 4765 | spin_lock_bh(&bp->phy_lock); |
4766 | bnx2_init_phy(bp); | 4766 | bnx2_init_phy(bp); |
4767 | spin_unlock_bh(&bp->phy_lock); | 4767 | spin_unlock_bh(&bp->phy_lock); |
4768 | if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK)) | 4768 | if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK)) |
4769 | rc |= BNX2_MAC_LOOPBACK_FAILED; | 4769 | rc |= BNX2_MAC_LOOPBACK_FAILED; |
4770 | if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK)) | 4770 | if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK)) |
4771 | rc |= BNX2_PHY_LOOPBACK_FAILED; | 4771 | rc |= BNX2_PHY_LOOPBACK_FAILED; |
4772 | return rc; | 4772 | return rc; |
4773 | } | 4773 | } |
4774 | 4774 | ||
4775 | #define NVRAM_SIZE 0x200 | 4775 | #define NVRAM_SIZE 0x200 |
4776 | #define CRC32_RESIDUAL 0xdebb20e3 | 4776 | #define CRC32_RESIDUAL 0xdebb20e3 |
4777 | 4777 | ||
4778 | static int | 4778 | static int |
4779 | bnx2_test_nvram(struct bnx2 *bp) | 4779 | bnx2_test_nvram(struct bnx2 *bp) |
4780 | { | 4780 | { |
4781 | u32 buf[NVRAM_SIZE / 4]; | 4781 | u32 buf[NVRAM_SIZE / 4]; |
4782 | u8 *data = (u8 *) buf; | 4782 | u8 *data = (u8 *) buf; |
4783 | int rc = 0; | 4783 | int rc = 0; |
4784 | u32 magic, csum; | 4784 | u32 magic, csum; |
4785 | 4785 | ||
4786 | if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0) | 4786 | if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0) |
4787 | goto test_nvram_done; | 4787 | goto test_nvram_done; |
4788 | 4788 | ||
4789 | magic = be32_to_cpu(buf[0]); | 4789 | magic = be32_to_cpu(buf[0]); |
4790 | if (magic != 0x669955aa) { | 4790 | if (magic != 0x669955aa) { |
4791 | rc = -ENODEV; | 4791 | rc = -ENODEV; |
4792 | goto test_nvram_done; | 4792 | goto test_nvram_done; |
4793 | } | 4793 | } |
4794 | 4794 | ||
4795 | if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0) | 4795 | if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0) |
4796 | goto test_nvram_done; | 4796 | goto test_nvram_done; |
4797 | 4797 | ||
4798 | csum = ether_crc_le(0x100, data); | 4798 | csum = ether_crc_le(0x100, data); |
4799 | if (csum != CRC32_RESIDUAL) { | 4799 | if (csum != CRC32_RESIDUAL) { |
4800 | rc = -ENODEV; | 4800 | rc = -ENODEV; |
4801 | goto test_nvram_done; | 4801 | goto test_nvram_done; |
4802 | } | 4802 | } |
4803 | 4803 | ||
4804 | csum = ether_crc_le(0x100, data + 0x100); | 4804 | csum = ether_crc_le(0x100, data + 0x100); |
4805 | if (csum != CRC32_RESIDUAL) { | 4805 | if (csum != CRC32_RESIDUAL) { |
4806 | rc = -ENODEV; | 4806 | rc = -ENODEV; |
4807 | } | 4807 | } |
4808 | 4808 | ||
4809 | test_nvram_done: | 4809 | test_nvram_done: |
4810 | return rc; | 4810 | return rc; |
4811 | } | 4811 | } |
4812 | 4812 | ||
4813 | static int | 4813 | static int |
4814 | bnx2_test_link(struct bnx2 *bp) | 4814 | bnx2_test_link(struct bnx2 *bp) |
4815 | { | 4815 | { |
4816 | u32 bmsr; | 4816 | u32 bmsr; |
4817 | 4817 | ||
4818 | spin_lock_bh(&bp->phy_lock); | 4818 | spin_lock_bh(&bp->phy_lock); |
4819 | bnx2_enable_bmsr1(bp); | 4819 | bnx2_enable_bmsr1(bp); |
4820 | bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); | 4820 | bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); |
4821 | bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); | 4821 | bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); |
4822 | bnx2_disable_bmsr1(bp); | 4822 | bnx2_disable_bmsr1(bp); |
4823 | spin_unlock_bh(&bp->phy_lock); | 4823 | spin_unlock_bh(&bp->phy_lock); |
4824 | 4824 | ||
4825 | if (bmsr & BMSR_LSTATUS) { | 4825 | if (bmsr & BMSR_LSTATUS) { |
4826 | return 0; | 4826 | return 0; |
4827 | } | 4827 | } |
4828 | return -ENODEV; | 4828 | return -ENODEV; |
4829 | } | 4829 | } |
4830 | 4830 | ||
4831 | static int | 4831 | static int |
4832 | bnx2_test_intr(struct bnx2 *bp) | 4832 | bnx2_test_intr(struct bnx2 *bp) |
4833 | { | 4833 | { |
4834 | int i; | 4834 | int i; |
4835 | u16 status_idx; | 4835 | u16 status_idx; |
4836 | 4836 | ||
4837 | if (!netif_running(bp->dev)) | 4837 | if (!netif_running(bp->dev)) |
4838 | return -ENODEV; | 4838 | return -ENODEV; |
4839 | 4839 | ||
4840 | status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; | 4840 | status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; |
4841 | 4841 | ||
4842 | /* This register is not touched during run-time. */ | 4842 | /* This register is not touched during run-time. */ |
4843 | REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); | 4843 | REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); |
4844 | REG_RD(bp, BNX2_HC_COMMAND); | 4844 | REG_RD(bp, BNX2_HC_COMMAND); |
4845 | 4845 | ||
4846 | for (i = 0; i < 10; i++) { | 4846 | for (i = 0; i < 10; i++) { |
4847 | if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != | 4847 | if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != |
4848 | status_idx) { | 4848 | status_idx) { |
4849 | 4849 | ||
4850 | break; | 4850 | break; |
4851 | } | 4851 | } |
4852 | 4852 | ||
4853 | msleep_interruptible(10); | 4853 | msleep_interruptible(10); |
4854 | } | 4854 | } |
4855 | if (i < 10) | 4855 | if (i < 10) |
4856 | return 0; | 4856 | return 0; |
4857 | 4857 | ||
4858 | return -ENODEV; | 4858 | return -ENODEV; |
4859 | } | 4859 | } |
4860 | 4860 | ||
4861 | static void | 4861 | static void |
4862 | bnx2_5706_serdes_timer(struct bnx2 *bp) | 4862 | bnx2_5706_serdes_timer(struct bnx2 *bp) |
4863 | { | 4863 | { |
4864 | spin_lock(&bp->phy_lock); | 4864 | spin_lock(&bp->phy_lock); |
4865 | if (bp->serdes_an_pending) | 4865 | if (bp->serdes_an_pending) |
4866 | bp->serdes_an_pending--; | 4866 | bp->serdes_an_pending--; |
4867 | else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { | 4867 | else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { |
4868 | u32 bmcr; | 4868 | u32 bmcr; |
4869 | 4869 | ||
4870 | bp->current_interval = bp->timer_interval; | 4870 | bp->current_interval = bp->timer_interval; |
4871 | 4871 | ||
4872 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 4872 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
4873 | 4873 | ||
4874 | if (bmcr & BMCR_ANENABLE) { | 4874 | if (bmcr & BMCR_ANENABLE) { |
4875 | u32 phy1, phy2; | 4875 | u32 phy1, phy2; |
4876 | 4876 | ||
4877 | bnx2_write_phy(bp, 0x1c, 0x7c00); | 4877 | bnx2_write_phy(bp, 0x1c, 0x7c00); |
4878 | bnx2_read_phy(bp, 0x1c, &phy1); | 4878 | bnx2_read_phy(bp, 0x1c, &phy1); |
4879 | 4879 | ||
4880 | bnx2_write_phy(bp, 0x17, 0x0f01); | 4880 | bnx2_write_phy(bp, 0x17, 0x0f01); |
4881 | bnx2_read_phy(bp, 0x15, &phy2); | 4881 | bnx2_read_phy(bp, 0x15, &phy2); |
4882 | bnx2_write_phy(bp, 0x17, 0x0f01); | 4882 | bnx2_write_phy(bp, 0x17, 0x0f01); |
4883 | bnx2_read_phy(bp, 0x15, &phy2); | 4883 | bnx2_read_phy(bp, 0x15, &phy2); |
4884 | 4884 | ||
4885 | if ((phy1 & 0x10) && /* SIGNAL DETECT */ | 4885 | if ((phy1 & 0x10) && /* SIGNAL DETECT */ |
4886 | !(phy2 & 0x20)) { /* no CONFIG */ | 4886 | !(phy2 & 0x20)) { /* no CONFIG */ |
4887 | 4887 | ||
4888 | bmcr &= ~BMCR_ANENABLE; | 4888 | bmcr &= ~BMCR_ANENABLE; |
4889 | bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; | 4889 | bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; |
4890 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr); | 4890 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr); |
4891 | bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG; | 4891 | bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG; |
4892 | } | 4892 | } |
4893 | } | 4893 | } |
4894 | } | 4894 | } |
4895 | else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) && | 4895 | else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) && |
4896 | (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) { | 4896 | (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) { |
4897 | u32 phy2; | 4897 | u32 phy2; |
4898 | 4898 | ||
4899 | bnx2_write_phy(bp, 0x17, 0x0f01); | 4899 | bnx2_write_phy(bp, 0x17, 0x0f01); |
4900 | bnx2_read_phy(bp, 0x15, &phy2); | 4900 | bnx2_read_phy(bp, 0x15, &phy2); |
4901 | if (phy2 & 0x20) { | 4901 | if (phy2 & 0x20) { |
4902 | u32 bmcr; | 4902 | u32 bmcr; |
4903 | 4903 | ||
4904 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 4904 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
4905 | bmcr |= BMCR_ANENABLE; | 4905 | bmcr |= BMCR_ANENABLE; |
4906 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr); | 4906 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr); |
4907 | 4907 | ||
4908 | bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG; | 4908 | bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG; |
4909 | } | 4909 | } |
4910 | } else | 4910 | } else |
4911 | bp->current_interval = bp->timer_interval; | 4911 | bp->current_interval = bp->timer_interval; |
4912 | 4912 | ||
4913 | spin_unlock(&bp->phy_lock); | 4913 | spin_unlock(&bp->phy_lock); |
4914 | } | 4914 | } |
4915 | 4915 | ||
4916 | static void | 4916 | static void |
4917 | bnx2_5708_serdes_timer(struct bnx2 *bp) | 4917 | bnx2_5708_serdes_timer(struct bnx2 *bp) |
4918 | { | 4918 | { |
4919 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) | 4919 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) |
4920 | return; | 4920 | return; |
4921 | 4921 | ||
4922 | if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) { | 4922 | if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) { |
4923 | bp->serdes_an_pending = 0; | 4923 | bp->serdes_an_pending = 0; |
4924 | return; | 4924 | return; |
4925 | } | 4925 | } |
4926 | 4926 | ||
4927 | spin_lock(&bp->phy_lock); | 4927 | spin_lock(&bp->phy_lock); |
4928 | if (bp->serdes_an_pending) | 4928 | if (bp->serdes_an_pending) |
4929 | bp->serdes_an_pending--; | 4929 | bp->serdes_an_pending--; |
4930 | else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { | 4930 | else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { |
4931 | u32 bmcr; | 4931 | u32 bmcr; |
4932 | 4932 | ||
4933 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 4933 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
4934 | if (bmcr & BMCR_ANENABLE) { | 4934 | if (bmcr & BMCR_ANENABLE) { |
4935 | bnx2_enable_forced_2g5(bp); | 4935 | bnx2_enable_forced_2g5(bp); |
4936 | bp->current_interval = SERDES_FORCED_TIMEOUT; | 4936 | bp->current_interval = SERDES_FORCED_TIMEOUT; |
4937 | } else { | 4937 | } else { |
4938 | bnx2_disable_forced_2g5(bp); | 4938 | bnx2_disable_forced_2g5(bp); |
4939 | bp->serdes_an_pending = 2; | 4939 | bp->serdes_an_pending = 2; |
4940 | bp->current_interval = bp->timer_interval; | 4940 | bp->current_interval = bp->timer_interval; |
4941 | } | 4941 | } |
4942 | 4942 | ||
4943 | } else | 4943 | } else |
4944 | bp->current_interval = bp->timer_interval; | 4944 | bp->current_interval = bp->timer_interval; |
4945 | 4945 | ||
4946 | spin_unlock(&bp->phy_lock); | 4946 | spin_unlock(&bp->phy_lock); |
4947 | } | 4947 | } |
4948 | 4948 | ||
4949 | static void | 4949 | static void |
4950 | bnx2_timer(unsigned long data) | 4950 | bnx2_timer(unsigned long data) |
4951 | { | 4951 | { |
4952 | struct bnx2 *bp = (struct bnx2 *) data; | 4952 | struct bnx2 *bp = (struct bnx2 *) data; |
4953 | 4953 | ||
4954 | if (!netif_running(bp->dev)) | 4954 | if (!netif_running(bp->dev)) |
4955 | return; | 4955 | return; |
4956 | 4956 | ||
4957 | if (atomic_read(&bp->intr_sem) != 0) | 4957 | if (atomic_read(&bp->intr_sem) != 0) |
4958 | goto bnx2_restart_timer; | 4958 | goto bnx2_restart_timer; |
4959 | 4959 | ||
4960 | bnx2_send_heart_beat(bp); | 4960 | bnx2_send_heart_beat(bp); |
4961 | 4961 | ||
4962 | bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT); | 4962 | bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT); |
4963 | 4963 | ||
4964 | /* workaround occasional corrupted counters */ | 4964 | /* workaround occasional corrupted counters */ |
4965 | if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks) | 4965 | if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks) |
4966 | REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | | 4966 | REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | |
4967 | BNX2_HC_COMMAND_STATS_NOW); | 4967 | BNX2_HC_COMMAND_STATS_NOW); |
4968 | 4968 | ||
4969 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 4969 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
4970 | if (CHIP_NUM(bp) == CHIP_NUM_5706) | 4970 | if (CHIP_NUM(bp) == CHIP_NUM_5706) |
4971 | bnx2_5706_serdes_timer(bp); | 4971 | bnx2_5706_serdes_timer(bp); |
4972 | else | 4972 | else |
4973 | bnx2_5708_serdes_timer(bp); | 4973 | bnx2_5708_serdes_timer(bp); |
4974 | } | 4974 | } |
4975 | 4975 | ||
4976 | bnx2_restart_timer: | 4976 | bnx2_restart_timer: |
4977 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 4977 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
4978 | } | 4978 | } |
4979 | 4979 | ||
4980 | static int | 4980 | static int |
4981 | bnx2_request_irq(struct bnx2 *bp) | 4981 | bnx2_request_irq(struct bnx2 *bp) |
4982 | { | 4982 | { |
4983 | struct net_device *dev = bp->dev; | 4983 | struct net_device *dev = bp->dev; |
4984 | int rc = 0; | 4984 | int rc = 0; |
4985 | 4985 | ||
4986 | if (bp->flags & USING_MSI_FLAG) { | 4986 | if (bp->flags & USING_MSI_FLAG) { |
4987 | irq_handler_t fn = bnx2_msi; | 4987 | irq_handler_t fn = bnx2_msi; |
4988 | 4988 | ||
4989 | if (bp->flags & ONE_SHOT_MSI_FLAG) | 4989 | if (bp->flags & ONE_SHOT_MSI_FLAG) |
4990 | fn = bnx2_msi_1shot; | 4990 | fn = bnx2_msi_1shot; |
4991 | 4991 | ||
4992 | rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev); | 4992 | rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev); |
4993 | } else | 4993 | } else |
4994 | rc = request_irq(bp->pdev->irq, bnx2_interrupt, | 4994 | rc = request_irq(bp->pdev->irq, bnx2_interrupt, |
4995 | IRQF_SHARED, dev->name, dev); | 4995 | IRQF_SHARED, dev->name, dev); |
4996 | return rc; | 4996 | return rc; |
4997 | } | 4997 | } |
4998 | 4998 | ||
4999 | static void | 4999 | static void |
5000 | bnx2_free_irq(struct bnx2 *bp) | 5000 | bnx2_free_irq(struct bnx2 *bp) |
5001 | { | 5001 | { |
5002 | struct net_device *dev = bp->dev; | 5002 | struct net_device *dev = bp->dev; |
5003 | 5003 | ||
5004 | if (bp->flags & USING_MSI_FLAG) { | 5004 | if (bp->flags & USING_MSI_FLAG) { |
5005 | free_irq(bp->pdev->irq, dev); | 5005 | free_irq(bp->pdev->irq, dev); |
5006 | pci_disable_msi(bp->pdev); | 5006 | pci_disable_msi(bp->pdev); |
5007 | bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG); | 5007 | bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG); |
5008 | } else | 5008 | } else |
5009 | free_irq(bp->pdev->irq, dev); | 5009 | free_irq(bp->pdev->irq, dev); |
5010 | } | 5010 | } |
5011 | 5011 | ||
5012 | /* Called with rtnl_lock */ | 5012 | /* Called with rtnl_lock */ |
5013 | static int | 5013 | static int |
5014 | bnx2_open(struct net_device *dev) | 5014 | bnx2_open(struct net_device *dev) |
5015 | { | 5015 | { |
5016 | struct bnx2 *bp = netdev_priv(dev); | 5016 | struct bnx2 *bp = netdev_priv(dev); |
5017 | int rc; | 5017 | int rc; |
5018 | 5018 | ||
5019 | netif_carrier_off(dev); | 5019 | netif_carrier_off(dev); |
5020 | 5020 | ||
5021 | bnx2_set_power_state(bp, PCI_D0); | 5021 | bnx2_set_power_state(bp, PCI_D0); |
5022 | bnx2_disable_int(bp); | 5022 | bnx2_disable_int(bp); |
5023 | 5023 | ||
5024 | rc = bnx2_alloc_mem(bp); | 5024 | rc = bnx2_alloc_mem(bp); |
5025 | if (rc) | 5025 | if (rc) |
5026 | return rc; | 5026 | return rc; |
5027 | 5027 | ||
5028 | if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) { | 5028 | if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) { |
5029 | if (pci_enable_msi(bp->pdev) == 0) { | 5029 | if (pci_enable_msi(bp->pdev) == 0) { |
5030 | bp->flags |= USING_MSI_FLAG; | 5030 | bp->flags |= USING_MSI_FLAG; |
5031 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 5031 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
5032 | bp->flags |= ONE_SHOT_MSI_FLAG; | 5032 | bp->flags |= ONE_SHOT_MSI_FLAG; |
5033 | } | 5033 | } |
5034 | } | 5034 | } |
5035 | rc = bnx2_request_irq(bp); | 5035 | rc = bnx2_request_irq(bp); |
5036 | 5036 | ||
5037 | if (rc) { | 5037 | if (rc) { |
5038 | bnx2_free_mem(bp); | 5038 | bnx2_free_mem(bp); |
5039 | return rc; | 5039 | return rc; |
5040 | } | 5040 | } |
5041 | 5041 | ||
5042 | rc = bnx2_init_nic(bp); | 5042 | rc = bnx2_init_nic(bp); |
5043 | 5043 | ||
5044 | if (rc) { | 5044 | if (rc) { |
5045 | bnx2_free_irq(bp); | 5045 | bnx2_free_irq(bp); |
5046 | bnx2_free_skbs(bp); | 5046 | bnx2_free_skbs(bp); |
5047 | bnx2_free_mem(bp); | 5047 | bnx2_free_mem(bp); |
5048 | return rc; | 5048 | return rc; |
5049 | } | 5049 | } |
5050 | 5050 | ||
5051 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 5051 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
5052 | 5052 | ||
5053 | atomic_set(&bp->intr_sem, 0); | 5053 | atomic_set(&bp->intr_sem, 0); |
5054 | 5054 | ||
5055 | bnx2_enable_int(bp); | 5055 | bnx2_enable_int(bp); |
5056 | 5056 | ||
5057 | if (bp->flags & USING_MSI_FLAG) { | 5057 | if (bp->flags & USING_MSI_FLAG) { |
5058 | /* Test MSI to make sure it is working | 5058 | /* Test MSI to make sure it is working |
5059 | * If MSI test fails, go back to INTx mode | 5059 | * If MSI test fails, go back to INTx mode |
5060 | */ | 5060 | */ |
5061 | if (bnx2_test_intr(bp) != 0) { | 5061 | if (bnx2_test_intr(bp) != 0) { |
5062 | printk(KERN_WARNING PFX "%s: No interrupt was generated" | 5062 | printk(KERN_WARNING PFX "%s: No interrupt was generated" |
5063 | " using MSI, switching to INTx mode. Please" | 5063 | " using MSI, switching to INTx mode. Please" |
5064 | " report this failure to the PCI maintainer" | 5064 | " report this failure to the PCI maintainer" |
5065 | " and include system chipset information.\n", | 5065 | " and include system chipset information.\n", |
5066 | bp->dev->name); | 5066 | bp->dev->name); |
5067 | 5067 | ||
5068 | bnx2_disable_int(bp); | 5068 | bnx2_disable_int(bp); |
5069 | bnx2_free_irq(bp); | 5069 | bnx2_free_irq(bp); |
5070 | 5070 | ||
5071 | rc = bnx2_init_nic(bp); | 5071 | rc = bnx2_init_nic(bp); |
5072 | 5072 | ||
5073 | if (!rc) | 5073 | if (!rc) |
5074 | rc = bnx2_request_irq(bp); | 5074 | rc = bnx2_request_irq(bp); |
5075 | 5075 | ||
5076 | if (rc) { | 5076 | if (rc) { |
5077 | bnx2_free_skbs(bp); | 5077 | bnx2_free_skbs(bp); |
5078 | bnx2_free_mem(bp); | 5078 | bnx2_free_mem(bp); |
5079 | del_timer_sync(&bp->timer); | 5079 | del_timer_sync(&bp->timer); |
5080 | return rc; | 5080 | return rc; |
5081 | } | 5081 | } |
5082 | bnx2_enable_int(bp); | 5082 | bnx2_enable_int(bp); |
5083 | } | 5083 | } |
5084 | } | 5084 | } |
5085 | if (bp->flags & USING_MSI_FLAG) { | 5085 | if (bp->flags & USING_MSI_FLAG) { |
5086 | printk(KERN_INFO PFX "%s: using MSI\n", dev->name); | 5086 | printk(KERN_INFO PFX "%s: using MSI\n", dev->name); |
5087 | } | 5087 | } |
5088 | 5088 | ||
5089 | netif_start_queue(dev); | 5089 | netif_start_queue(dev); |
5090 | 5090 | ||
5091 | return 0; | 5091 | return 0; |
5092 | } | 5092 | } |
5093 | 5093 | ||
5094 | static void | 5094 | static void |
5095 | bnx2_reset_task(struct work_struct *work) | 5095 | bnx2_reset_task(struct work_struct *work) |
5096 | { | 5096 | { |
5097 | struct bnx2 *bp = container_of(work, struct bnx2, reset_task); | 5097 | struct bnx2 *bp = container_of(work, struct bnx2, reset_task); |
5098 | 5098 | ||
5099 | if (!netif_running(bp->dev)) | 5099 | if (!netif_running(bp->dev)) |
5100 | return; | 5100 | return; |
5101 | 5101 | ||
5102 | bp->in_reset_task = 1; | 5102 | bp->in_reset_task = 1; |
5103 | bnx2_netif_stop(bp); | 5103 | bnx2_netif_stop(bp); |
5104 | 5104 | ||
5105 | bnx2_init_nic(bp); | 5105 | bnx2_init_nic(bp); |
5106 | 5106 | ||
5107 | atomic_set(&bp->intr_sem, 1); | 5107 | atomic_set(&bp->intr_sem, 1); |
5108 | bnx2_netif_start(bp); | 5108 | bnx2_netif_start(bp); |
5109 | bp->in_reset_task = 0; | 5109 | bp->in_reset_task = 0; |
5110 | } | 5110 | } |
5111 | 5111 | ||
5112 | static void | 5112 | static void |
5113 | bnx2_tx_timeout(struct net_device *dev) | 5113 | bnx2_tx_timeout(struct net_device *dev) |
5114 | { | 5114 | { |
5115 | struct bnx2 *bp = netdev_priv(dev); | 5115 | struct bnx2 *bp = netdev_priv(dev); |
5116 | 5116 | ||
5117 | /* This allows the netif to be shutdown gracefully before resetting */ | 5117 | /* This allows the netif to be shutdown gracefully before resetting */ |
5118 | schedule_work(&bp->reset_task); | 5118 | schedule_work(&bp->reset_task); |
5119 | } | 5119 | } |
5120 | 5120 | ||
5121 | #ifdef BCM_VLAN | 5121 | #ifdef BCM_VLAN |
5122 | /* Called with rtnl_lock */ | 5122 | /* Called with rtnl_lock */ |
5123 | static void | 5123 | static void |
5124 | bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp) | 5124 | bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp) |
5125 | { | 5125 | { |
5126 | struct bnx2 *bp = netdev_priv(dev); | 5126 | struct bnx2 *bp = netdev_priv(dev); |
5127 | 5127 | ||
5128 | bnx2_netif_stop(bp); | 5128 | bnx2_netif_stop(bp); |
5129 | 5129 | ||
5130 | bp->vlgrp = vlgrp; | 5130 | bp->vlgrp = vlgrp; |
5131 | bnx2_set_rx_mode(dev); | 5131 | bnx2_set_rx_mode(dev); |
5132 | 5132 | ||
5133 | bnx2_netif_start(bp); | 5133 | bnx2_netif_start(bp); |
5134 | } | 5134 | } |
5135 | #endif | 5135 | #endif |
5136 | 5136 | ||
5137 | /* Called with netif_tx_lock. | 5137 | /* Called with netif_tx_lock. |
5138 | * bnx2_tx_int() runs without netif_tx_lock unless it needs to call | 5138 | * bnx2_tx_int() runs without netif_tx_lock unless it needs to call |
5139 | * netif_wake_queue(). | 5139 | * netif_wake_queue(). |
5140 | */ | 5140 | */ |
5141 | static int | 5141 | static int |
5142 | bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | 5142 | bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) |
5143 | { | 5143 | { |
5144 | struct bnx2 *bp = netdev_priv(dev); | 5144 | struct bnx2 *bp = netdev_priv(dev); |
5145 | dma_addr_t mapping; | 5145 | dma_addr_t mapping; |
5146 | struct tx_bd *txbd; | 5146 | struct tx_bd *txbd; |
5147 | struct sw_bd *tx_buf; | 5147 | struct sw_bd *tx_buf; |
5148 | u32 len, vlan_tag_flags, last_frag, mss; | 5148 | u32 len, vlan_tag_flags, last_frag, mss; |
5149 | u16 prod, ring_prod; | 5149 | u16 prod, ring_prod; |
5150 | int i; | 5150 | int i; |
5151 | 5151 | ||
5152 | if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) { | 5152 | if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) { |
5153 | netif_stop_queue(dev); | 5153 | netif_stop_queue(dev); |
5154 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", | 5154 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", |
5155 | dev->name); | 5155 | dev->name); |
5156 | 5156 | ||
5157 | return NETDEV_TX_BUSY; | 5157 | return NETDEV_TX_BUSY; |
5158 | } | 5158 | } |
5159 | len = skb_headlen(skb); | 5159 | len = skb_headlen(skb); |
5160 | prod = bp->tx_prod; | 5160 | prod = bp->tx_prod; |
5161 | ring_prod = TX_RING_IDX(prod); | 5161 | ring_prod = TX_RING_IDX(prod); |
5162 | 5162 | ||
5163 | vlan_tag_flags = 0; | 5163 | vlan_tag_flags = 0; |
5164 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 5164 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
5165 | vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; | 5165 | vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; |
5166 | } | 5166 | } |
5167 | 5167 | ||
5168 | if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) { | 5168 | if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) { |
5169 | vlan_tag_flags |= | 5169 | vlan_tag_flags |= |
5170 | (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); | 5170 | (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); |
5171 | } | 5171 | } |
5172 | if ((mss = skb_shinfo(skb)->gso_size)) { | 5172 | if ((mss = skb_shinfo(skb)->gso_size)) { |
5173 | u32 tcp_opt_len, ip_tcp_len; | 5173 | u32 tcp_opt_len, ip_tcp_len; |
5174 | struct iphdr *iph; | 5174 | struct iphdr *iph; |
5175 | 5175 | ||
5176 | vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; | 5176 | vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; |
5177 | 5177 | ||
5178 | tcp_opt_len = tcp_optlen(skb); | 5178 | tcp_opt_len = tcp_optlen(skb); |
5179 | 5179 | ||
5180 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { | 5180 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { |
5181 | u32 tcp_off = skb_transport_offset(skb) - | 5181 | u32 tcp_off = skb_transport_offset(skb) - |
5182 | sizeof(struct ipv6hdr) - ETH_HLEN; | 5182 | sizeof(struct ipv6hdr) - ETH_HLEN; |
5183 | 5183 | ||
5184 | vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) | | 5184 | vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) | |
5185 | TX_BD_FLAGS_SW_FLAGS; | 5185 | TX_BD_FLAGS_SW_FLAGS; |
5186 | if (likely(tcp_off == 0)) | 5186 | if (likely(tcp_off == 0)) |
5187 | vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK; | 5187 | vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK; |
5188 | else { | 5188 | else { |
5189 | tcp_off >>= 3; | 5189 | tcp_off >>= 3; |
5190 | vlan_tag_flags |= ((tcp_off & 0x3) << | 5190 | vlan_tag_flags |= ((tcp_off & 0x3) << |
5191 | TX_BD_FLAGS_TCP6_OFF0_SHL) | | 5191 | TX_BD_FLAGS_TCP6_OFF0_SHL) | |
5192 | ((tcp_off & 0x10) << | 5192 | ((tcp_off & 0x10) << |
5193 | TX_BD_FLAGS_TCP6_OFF4_SHL); | 5193 | TX_BD_FLAGS_TCP6_OFF4_SHL); |
5194 | mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL; | 5194 | mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL; |
5195 | } | 5195 | } |
5196 | } else { | 5196 | } else { |
5197 | if (skb_header_cloned(skb) && | 5197 | if (skb_header_cloned(skb) && |
5198 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 5198 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
5199 | dev_kfree_skb(skb); | 5199 | dev_kfree_skb(skb); |
5200 | return NETDEV_TX_OK; | 5200 | return NETDEV_TX_OK; |
5201 | } | 5201 | } |
5202 | 5202 | ||
5203 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); | 5203 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); |
5204 | 5204 | ||
5205 | iph = ip_hdr(skb); | 5205 | iph = ip_hdr(skb); |
5206 | iph->check = 0; | 5206 | iph->check = 0; |
5207 | iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); | 5207 | iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); |
5208 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | 5208 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
5209 | iph->daddr, 0, | 5209 | iph->daddr, 0, |
5210 | IPPROTO_TCP, | 5210 | IPPROTO_TCP, |
5211 | 0); | 5211 | 0); |
5212 | if (tcp_opt_len || (iph->ihl > 5)) { | 5212 | if (tcp_opt_len || (iph->ihl > 5)) { |
5213 | vlan_tag_flags |= ((iph->ihl - 5) + | 5213 | vlan_tag_flags |= ((iph->ihl - 5) + |
5214 | (tcp_opt_len >> 2)) << 8; | 5214 | (tcp_opt_len >> 2)) << 8; |
5215 | } | 5215 | } |
5216 | } | 5216 | } |
5217 | } else | 5217 | } else |
5218 | mss = 0; | 5218 | mss = 0; |
5219 | 5219 | ||
5220 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 5220 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
5221 | 5221 | ||
5222 | tx_buf = &bp->tx_buf_ring[ring_prod]; | 5222 | tx_buf = &bp->tx_buf_ring[ring_prod]; |
5223 | tx_buf->skb = skb; | 5223 | tx_buf->skb = skb; |
5224 | pci_unmap_addr_set(tx_buf, mapping, mapping); | 5224 | pci_unmap_addr_set(tx_buf, mapping, mapping); |
5225 | 5225 | ||
5226 | txbd = &bp->tx_desc_ring[ring_prod]; | 5226 | txbd = &bp->tx_desc_ring[ring_prod]; |
5227 | 5227 | ||
5228 | txbd->tx_bd_haddr_hi = (u64) mapping >> 32; | 5228 | txbd->tx_bd_haddr_hi = (u64) mapping >> 32; |
5229 | txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; | 5229 | txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; |
5230 | txbd->tx_bd_mss_nbytes = len | (mss << 16); | 5230 | txbd->tx_bd_mss_nbytes = len | (mss << 16); |
5231 | txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START; | 5231 | txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START; |
5232 | 5232 | ||
5233 | last_frag = skb_shinfo(skb)->nr_frags; | 5233 | last_frag = skb_shinfo(skb)->nr_frags; |
5234 | 5234 | ||
5235 | for (i = 0; i < last_frag; i++) { | 5235 | for (i = 0; i < last_frag; i++) { |
5236 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 5236 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
5237 | 5237 | ||
5238 | prod = NEXT_TX_BD(prod); | 5238 | prod = NEXT_TX_BD(prod); |
5239 | ring_prod = TX_RING_IDX(prod); | 5239 | ring_prod = TX_RING_IDX(prod); |
5240 | txbd = &bp->tx_desc_ring[ring_prod]; | 5240 | txbd = &bp->tx_desc_ring[ring_prod]; |
5241 | 5241 | ||
5242 | len = frag->size; | 5242 | len = frag->size; |
5243 | mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, | 5243 | mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, |
5244 | len, PCI_DMA_TODEVICE); | 5244 | len, PCI_DMA_TODEVICE); |
5245 | pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod], | 5245 | pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod], |
5246 | mapping, mapping); | 5246 | mapping, mapping); |
5247 | 5247 | ||
5248 | txbd->tx_bd_haddr_hi = (u64) mapping >> 32; | 5248 | txbd->tx_bd_haddr_hi = (u64) mapping >> 32; |
5249 | txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; | 5249 | txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; |
5250 | txbd->tx_bd_mss_nbytes = len | (mss << 16); | 5250 | txbd->tx_bd_mss_nbytes = len | (mss << 16); |
5251 | txbd->tx_bd_vlan_tag_flags = vlan_tag_flags; | 5251 | txbd->tx_bd_vlan_tag_flags = vlan_tag_flags; |
5252 | 5252 | ||
5253 | } | 5253 | } |
5254 | txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END; | 5254 | txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END; |
5255 | 5255 | ||
5256 | prod = NEXT_TX_BD(prod); | 5256 | prod = NEXT_TX_BD(prod); |
5257 | bp->tx_prod_bseq += skb->len; | 5257 | bp->tx_prod_bseq += skb->len; |
5258 | 5258 | ||
5259 | REG_WR16(bp, bp->tx_bidx_addr, prod); | 5259 | REG_WR16(bp, bp->tx_bidx_addr, prod); |
5260 | REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq); | 5260 | REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq); |
5261 | 5261 | ||
5262 | mmiowb(); | 5262 | mmiowb(); |
5263 | 5263 | ||
5264 | bp->tx_prod = prod; | 5264 | bp->tx_prod = prod; |
5265 | dev->trans_start = jiffies; | 5265 | dev->trans_start = jiffies; |
5266 | 5266 | ||
5267 | if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { | 5267 | if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { |
5268 | netif_stop_queue(dev); | 5268 | netif_stop_queue(dev); |
5269 | if (bnx2_tx_avail(bp) > bp->tx_wake_thresh) | 5269 | if (bnx2_tx_avail(bp) > bp->tx_wake_thresh) |
5270 | netif_wake_queue(dev); | 5270 | netif_wake_queue(dev); |
5271 | } | 5271 | } |
5272 | 5272 | ||
5273 | return NETDEV_TX_OK; | 5273 | return NETDEV_TX_OK; |
5274 | } | 5274 | } |
5275 | 5275 | ||
5276 | /* Called with rtnl_lock */ | 5276 | /* Called with rtnl_lock */ |
5277 | static int | 5277 | static int |
5278 | bnx2_close(struct net_device *dev) | 5278 | bnx2_close(struct net_device *dev) |
5279 | { | 5279 | { |
5280 | struct bnx2 *bp = netdev_priv(dev); | 5280 | struct bnx2 *bp = netdev_priv(dev); |
5281 | u32 reset_code; | 5281 | u32 reset_code; |
5282 | 5282 | ||
5283 | /* Calling flush_scheduled_work() may deadlock because | 5283 | /* Calling flush_scheduled_work() may deadlock because |
5284 | * linkwatch_event() may be on the workqueue and it will try to get | 5284 | * linkwatch_event() may be on the workqueue and it will try to get |
5285 | * the rtnl_lock which we are holding. | 5285 | * the rtnl_lock which we are holding. |
5286 | */ | 5286 | */ |
5287 | while (bp->in_reset_task) | 5287 | while (bp->in_reset_task) |
5288 | msleep(1); | 5288 | msleep(1); |
5289 | 5289 | ||
5290 | bnx2_netif_stop(bp); | 5290 | bnx2_netif_stop(bp); |
5291 | del_timer_sync(&bp->timer); | 5291 | del_timer_sync(&bp->timer); |
5292 | if (bp->flags & NO_WOL_FLAG) | 5292 | if (bp->flags & NO_WOL_FLAG) |
5293 | reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; | 5293 | reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; |
5294 | else if (bp->wol) | 5294 | else if (bp->wol) |
5295 | reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; | 5295 | reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; |
5296 | else | 5296 | else |
5297 | reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; | 5297 | reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; |
5298 | bnx2_reset_chip(bp, reset_code); | 5298 | bnx2_reset_chip(bp, reset_code); |
5299 | bnx2_free_irq(bp); | 5299 | bnx2_free_irq(bp); |
5300 | bnx2_free_skbs(bp); | 5300 | bnx2_free_skbs(bp); |
5301 | bnx2_free_mem(bp); | 5301 | bnx2_free_mem(bp); |
5302 | bp->link_up = 0; | 5302 | bp->link_up = 0; |
5303 | netif_carrier_off(bp->dev); | 5303 | netif_carrier_off(bp->dev); |
5304 | bnx2_set_power_state(bp, PCI_D3hot); | 5304 | bnx2_set_power_state(bp, PCI_D3hot); |
5305 | return 0; | 5305 | return 0; |
5306 | } | 5306 | } |
5307 | 5307 | ||
5308 | #define GET_NET_STATS64(ctr) \ | 5308 | #define GET_NET_STATS64(ctr) \ |
5309 | (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \ | 5309 | (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \ |
5310 | (unsigned long) (ctr##_lo) | 5310 | (unsigned long) (ctr##_lo) |
5311 | 5311 | ||
5312 | #define GET_NET_STATS32(ctr) \ | 5312 | #define GET_NET_STATS32(ctr) \ |
5313 | (ctr##_lo) | 5313 | (ctr##_lo) |
5314 | 5314 | ||
5315 | #if (BITS_PER_LONG == 64) | 5315 | #if (BITS_PER_LONG == 64) |
5316 | #define GET_NET_STATS GET_NET_STATS64 | 5316 | #define GET_NET_STATS GET_NET_STATS64 |
5317 | #else | 5317 | #else |
5318 | #define GET_NET_STATS GET_NET_STATS32 | 5318 | #define GET_NET_STATS GET_NET_STATS32 |
5319 | #endif | 5319 | #endif |
5320 | 5320 | ||
5321 | static struct net_device_stats * | 5321 | static struct net_device_stats * |
5322 | bnx2_get_stats(struct net_device *dev) | 5322 | bnx2_get_stats(struct net_device *dev) |
5323 | { | 5323 | { |
5324 | struct bnx2 *bp = netdev_priv(dev); | 5324 | struct bnx2 *bp = netdev_priv(dev); |
5325 | struct statistics_block *stats_blk = bp->stats_blk; | 5325 | struct statistics_block *stats_blk = bp->stats_blk; |
5326 | struct net_device_stats *net_stats = &bp->net_stats; | 5326 | struct net_device_stats *net_stats = &bp->net_stats; |
5327 | 5327 | ||
5328 | if (bp->stats_blk == NULL) { | 5328 | if (bp->stats_blk == NULL) { |
5329 | return net_stats; | 5329 | return net_stats; |
5330 | } | 5330 | } |
5331 | net_stats->rx_packets = | 5331 | net_stats->rx_packets = |
5332 | GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) + | 5332 | GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) + |
5333 | GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) + | 5333 | GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) + |
5334 | GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts); | 5334 | GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts); |
5335 | 5335 | ||
5336 | net_stats->tx_packets = | 5336 | net_stats->tx_packets = |
5337 | GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) + | 5337 | GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) + |
5338 | GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) + | 5338 | GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) + |
5339 | GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts); | 5339 | GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts); |
5340 | 5340 | ||
5341 | net_stats->rx_bytes = | 5341 | net_stats->rx_bytes = |
5342 | GET_NET_STATS(stats_blk->stat_IfHCInOctets); | 5342 | GET_NET_STATS(stats_blk->stat_IfHCInOctets); |
5343 | 5343 | ||
5344 | net_stats->tx_bytes = | 5344 | net_stats->tx_bytes = |
5345 | GET_NET_STATS(stats_blk->stat_IfHCOutOctets); | 5345 | GET_NET_STATS(stats_blk->stat_IfHCOutOctets); |
5346 | 5346 | ||
5347 | net_stats->multicast = | 5347 | net_stats->multicast = |
5348 | GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts); | 5348 | GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts); |
5349 | 5349 | ||
5350 | net_stats->collisions = | 5350 | net_stats->collisions = |
5351 | (unsigned long) stats_blk->stat_EtherStatsCollisions; | 5351 | (unsigned long) stats_blk->stat_EtherStatsCollisions; |
5352 | 5352 | ||
5353 | net_stats->rx_length_errors = | 5353 | net_stats->rx_length_errors = |
5354 | (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts + | 5354 | (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts + |
5355 | stats_blk->stat_EtherStatsOverrsizePkts); | 5355 | stats_blk->stat_EtherStatsOverrsizePkts); |
5356 | 5356 | ||
5357 | net_stats->rx_over_errors = | 5357 | net_stats->rx_over_errors = |
5358 | (unsigned long) stats_blk->stat_IfInMBUFDiscards; | 5358 | (unsigned long) stats_blk->stat_IfInMBUFDiscards; |
5359 | 5359 | ||
5360 | net_stats->rx_frame_errors = | 5360 | net_stats->rx_frame_errors = |
5361 | (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors; | 5361 | (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors; |
5362 | 5362 | ||
5363 | net_stats->rx_crc_errors = | 5363 | net_stats->rx_crc_errors = |
5364 | (unsigned long) stats_blk->stat_Dot3StatsFCSErrors; | 5364 | (unsigned long) stats_blk->stat_Dot3StatsFCSErrors; |
5365 | 5365 | ||
5366 | net_stats->rx_errors = net_stats->rx_length_errors + | 5366 | net_stats->rx_errors = net_stats->rx_length_errors + |
5367 | net_stats->rx_over_errors + net_stats->rx_frame_errors + | 5367 | net_stats->rx_over_errors + net_stats->rx_frame_errors + |
5368 | net_stats->rx_crc_errors; | 5368 | net_stats->rx_crc_errors; |
5369 | 5369 | ||
5370 | net_stats->tx_aborted_errors = | 5370 | net_stats->tx_aborted_errors = |
5371 | (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions + | 5371 | (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions + |
5372 | stats_blk->stat_Dot3StatsLateCollisions); | 5372 | stats_blk->stat_Dot3StatsLateCollisions); |
5373 | 5373 | ||
5374 | if ((CHIP_NUM(bp) == CHIP_NUM_5706) || | 5374 | if ((CHIP_NUM(bp) == CHIP_NUM_5706) || |
5375 | (CHIP_ID(bp) == CHIP_ID_5708_A0)) | 5375 | (CHIP_ID(bp) == CHIP_ID_5708_A0)) |
5376 | net_stats->tx_carrier_errors = 0; | 5376 | net_stats->tx_carrier_errors = 0; |
5377 | else { | 5377 | else { |
5378 | net_stats->tx_carrier_errors = | 5378 | net_stats->tx_carrier_errors = |
5379 | (unsigned long) | 5379 | (unsigned long) |
5380 | stats_blk->stat_Dot3StatsCarrierSenseErrors; | 5380 | stats_blk->stat_Dot3StatsCarrierSenseErrors; |
5381 | } | 5381 | } |
5382 | 5382 | ||
5383 | net_stats->tx_errors = | 5383 | net_stats->tx_errors = |
5384 | (unsigned long) | 5384 | (unsigned long) |
5385 | stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors | 5385 | stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors |
5386 | + | 5386 | + |
5387 | net_stats->tx_aborted_errors + | 5387 | net_stats->tx_aborted_errors + |
5388 | net_stats->tx_carrier_errors; | 5388 | net_stats->tx_carrier_errors; |
5389 | 5389 | ||
5390 | net_stats->rx_missed_errors = | 5390 | net_stats->rx_missed_errors = |
5391 | (unsigned long) (stats_blk->stat_IfInMBUFDiscards + | 5391 | (unsigned long) (stats_blk->stat_IfInMBUFDiscards + |
5392 | stats_blk->stat_FwRxDrop); | 5392 | stats_blk->stat_FwRxDrop); |
5393 | 5393 | ||
5394 | return net_stats; | 5394 | return net_stats; |
5395 | } | 5395 | } |
5396 | 5396 | ||
5397 | /* All ethtool functions called with rtnl_lock */ | 5397 | /* All ethtool functions called with rtnl_lock */ |
5398 | 5398 | ||
5399 | static int | 5399 | static int |
5400 | bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 5400 | bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
5401 | { | 5401 | { |
5402 | struct bnx2 *bp = netdev_priv(dev); | 5402 | struct bnx2 *bp = netdev_priv(dev); |
5403 | int support_serdes = 0, support_copper = 0; | 5403 | int support_serdes = 0, support_copper = 0; |
5404 | 5404 | ||
5405 | cmd->supported = SUPPORTED_Autoneg; | 5405 | cmd->supported = SUPPORTED_Autoneg; |
5406 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) { | 5406 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) { |
5407 | support_serdes = 1; | 5407 | support_serdes = 1; |
5408 | support_copper = 1; | 5408 | support_copper = 1; |
5409 | } else if (bp->phy_port == PORT_FIBRE) | 5409 | } else if (bp->phy_port == PORT_FIBRE) |
5410 | support_serdes = 1; | 5410 | support_serdes = 1; |
5411 | else | 5411 | else |
5412 | support_copper = 1; | 5412 | support_copper = 1; |
5413 | 5413 | ||
5414 | if (support_serdes) { | 5414 | if (support_serdes) { |
5415 | cmd->supported |= SUPPORTED_1000baseT_Full | | 5415 | cmd->supported |= SUPPORTED_1000baseT_Full | |
5416 | SUPPORTED_FIBRE; | 5416 | SUPPORTED_FIBRE; |
5417 | if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) | 5417 | if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) |
5418 | cmd->supported |= SUPPORTED_2500baseX_Full; | 5418 | cmd->supported |= SUPPORTED_2500baseX_Full; |
5419 | 5419 | ||
5420 | } | 5420 | } |
5421 | if (support_copper) { | 5421 | if (support_copper) { |
5422 | cmd->supported |= SUPPORTED_10baseT_Half | | 5422 | cmd->supported |= SUPPORTED_10baseT_Half | |
5423 | SUPPORTED_10baseT_Full | | 5423 | SUPPORTED_10baseT_Full | |
5424 | SUPPORTED_100baseT_Half | | 5424 | SUPPORTED_100baseT_Half | |
5425 | SUPPORTED_100baseT_Full | | 5425 | SUPPORTED_100baseT_Full | |
5426 | SUPPORTED_1000baseT_Full | | 5426 | SUPPORTED_1000baseT_Full | |
5427 | SUPPORTED_TP; | 5427 | SUPPORTED_TP; |
5428 | 5428 | ||
5429 | } | 5429 | } |
5430 | 5430 | ||
5431 | spin_lock_bh(&bp->phy_lock); | 5431 | spin_lock_bh(&bp->phy_lock); |
5432 | cmd->port = bp->phy_port; | 5432 | cmd->port = bp->phy_port; |
5433 | cmd->advertising = bp->advertising; | 5433 | cmd->advertising = bp->advertising; |
5434 | 5434 | ||
5435 | if (bp->autoneg & AUTONEG_SPEED) { | 5435 | if (bp->autoneg & AUTONEG_SPEED) { |
5436 | cmd->autoneg = AUTONEG_ENABLE; | 5436 | cmd->autoneg = AUTONEG_ENABLE; |
5437 | } | 5437 | } |
5438 | else { | 5438 | else { |
5439 | cmd->autoneg = AUTONEG_DISABLE; | 5439 | cmd->autoneg = AUTONEG_DISABLE; |
5440 | } | 5440 | } |
5441 | 5441 | ||
5442 | if (netif_carrier_ok(dev)) { | 5442 | if (netif_carrier_ok(dev)) { |
5443 | cmd->speed = bp->line_speed; | 5443 | cmd->speed = bp->line_speed; |
5444 | cmd->duplex = bp->duplex; | 5444 | cmd->duplex = bp->duplex; |
5445 | } | 5445 | } |
5446 | else { | 5446 | else { |
5447 | cmd->speed = -1; | 5447 | cmd->speed = -1; |
5448 | cmd->duplex = -1; | 5448 | cmd->duplex = -1; |
5449 | } | 5449 | } |
5450 | spin_unlock_bh(&bp->phy_lock); | 5450 | spin_unlock_bh(&bp->phy_lock); |
5451 | 5451 | ||
5452 | cmd->transceiver = XCVR_INTERNAL; | 5452 | cmd->transceiver = XCVR_INTERNAL; |
5453 | cmd->phy_address = bp->phy_addr; | 5453 | cmd->phy_address = bp->phy_addr; |
5454 | 5454 | ||
5455 | return 0; | 5455 | return 0; |
5456 | } | 5456 | } |
5457 | 5457 | ||
5458 | static int | 5458 | static int |
5459 | bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 5459 | bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
5460 | { | 5460 | { |
5461 | struct bnx2 *bp = netdev_priv(dev); | 5461 | struct bnx2 *bp = netdev_priv(dev); |
5462 | u8 autoneg = bp->autoneg; | 5462 | u8 autoneg = bp->autoneg; |
5463 | u8 req_duplex = bp->req_duplex; | 5463 | u8 req_duplex = bp->req_duplex; |
5464 | u16 req_line_speed = bp->req_line_speed; | 5464 | u16 req_line_speed = bp->req_line_speed; |
5465 | u32 advertising = bp->advertising; | 5465 | u32 advertising = bp->advertising; |
5466 | int err = -EINVAL; | 5466 | int err = -EINVAL; |
5467 | 5467 | ||
5468 | spin_lock_bh(&bp->phy_lock); | 5468 | spin_lock_bh(&bp->phy_lock); |
5469 | 5469 | ||
5470 | if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE) | 5470 | if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE) |
5471 | goto err_out_unlock; | 5471 | goto err_out_unlock; |
5472 | 5472 | ||
5473 | if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG)) | 5473 | if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG)) |
5474 | goto err_out_unlock; | 5474 | goto err_out_unlock; |
5475 | 5475 | ||
5476 | if (cmd->autoneg == AUTONEG_ENABLE) { | 5476 | if (cmd->autoneg == AUTONEG_ENABLE) { |
5477 | autoneg |= AUTONEG_SPEED; | 5477 | autoneg |= AUTONEG_SPEED; |
5478 | 5478 | ||
5479 | cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; | 5479 | cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; |
5480 | 5480 | ||
5481 | /* allow advertising 1 speed */ | 5481 | /* allow advertising 1 speed */ |
5482 | if ((cmd->advertising == ADVERTISED_10baseT_Half) || | 5482 | if ((cmd->advertising == ADVERTISED_10baseT_Half) || |
5483 | (cmd->advertising == ADVERTISED_10baseT_Full) || | 5483 | (cmd->advertising == ADVERTISED_10baseT_Full) || |
5484 | (cmd->advertising == ADVERTISED_100baseT_Half) || | 5484 | (cmd->advertising == ADVERTISED_100baseT_Half) || |
5485 | (cmd->advertising == ADVERTISED_100baseT_Full)) { | 5485 | (cmd->advertising == ADVERTISED_100baseT_Full)) { |
5486 | 5486 | ||
5487 | if (cmd->port == PORT_FIBRE) | 5487 | if (cmd->port == PORT_FIBRE) |
5488 | goto err_out_unlock; | 5488 | goto err_out_unlock; |
5489 | 5489 | ||
5490 | advertising = cmd->advertising; | 5490 | advertising = cmd->advertising; |
5491 | 5491 | ||
5492 | } else if (cmd->advertising == ADVERTISED_2500baseX_Full) { | 5492 | } else if (cmd->advertising == ADVERTISED_2500baseX_Full) { |
5493 | if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) || | 5493 | if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) || |
5494 | (cmd->port == PORT_TP)) | 5494 | (cmd->port == PORT_TP)) |
5495 | goto err_out_unlock; | 5495 | goto err_out_unlock; |
5496 | } else if (cmd->advertising == ADVERTISED_1000baseT_Full) | 5496 | } else if (cmd->advertising == ADVERTISED_1000baseT_Full) |
5497 | advertising = cmd->advertising; | 5497 | advertising = cmd->advertising; |
5498 | else if (cmd->advertising == ADVERTISED_1000baseT_Half) | 5498 | else if (cmd->advertising == ADVERTISED_1000baseT_Half) |
5499 | goto err_out_unlock; | 5499 | goto err_out_unlock; |
5500 | else { | 5500 | else { |
5501 | if (cmd->port == PORT_FIBRE) | 5501 | if (cmd->port == PORT_FIBRE) |
5502 | advertising = ETHTOOL_ALL_FIBRE_SPEED; | 5502 | advertising = ETHTOOL_ALL_FIBRE_SPEED; |
5503 | else | 5503 | else |
5504 | advertising = ETHTOOL_ALL_COPPER_SPEED; | 5504 | advertising = ETHTOOL_ALL_COPPER_SPEED; |
5505 | } | 5505 | } |
5506 | advertising |= ADVERTISED_Autoneg; | 5506 | advertising |= ADVERTISED_Autoneg; |
5507 | } | 5507 | } |
5508 | else { | 5508 | else { |
5509 | if (cmd->port == PORT_FIBRE) { | 5509 | if (cmd->port == PORT_FIBRE) { |
5510 | if ((cmd->speed != SPEED_1000 && | 5510 | if ((cmd->speed != SPEED_1000 && |
5511 | cmd->speed != SPEED_2500) || | 5511 | cmd->speed != SPEED_2500) || |
5512 | (cmd->duplex != DUPLEX_FULL)) | 5512 | (cmd->duplex != DUPLEX_FULL)) |
5513 | goto err_out_unlock; | 5513 | goto err_out_unlock; |
5514 | 5514 | ||
5515 | if (cmd->speed == SPEED_2500 && | 5515 | if (cmd->speed == SPEED_2500 && |
5516 | !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) | 5516 | !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) |
5517 | goto err_out_unlock; | 5517 | goto err_out_unlock; |
5518 | } | 5518 | } |
5519 | else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500) | 5519 | else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500) |
5520 | goto err_out_unlock; | 5520 | goto err_out_unlock; |
5521 | 5521 | ||
5522 | autoneg &= ~AUTONEG_SPEED; | 5522 | autoneg &= ~AUTONEG_SPEED; |
5523 | req_line_speed = cmd->speed; | 5523 | req_line_speed = cmd->speed; |
5524 | req_duplex = cmd->duplex; | 5524 | req_duplex = cmd->duplex; |
5525 | advertising = 0; | 5525 | advertising = 0; |
5526 | } | 5526 | } |
5527 | 5527 | ||
5528 | bp->autoneg = autoneg; | 5528 | bp->autoneg = autoneg; |
5529 | bp->advertising = advertising; | 5529 | bp->advertising = advertising; |
5530 | bp->req_line_speed = req_line_speed; | 5530 | bp->req_line_speed = req_line_speed; |
5531 | bp->req_duplex = req_duplex; | 5531 | bp->req_duplex = req_duplex; |
5532 | 5532 | ||
5533 | err = bnx2_setup_phy(bp, cmd->port); | 5533 | err = bnx2_setup_phy(bp, cmd->port); |
5534 | 5534 | ||
5535 | err_out_unlock: | 5535 | err_out_unlock: |
5536 | spin_unlock_bh(&bp->phy_lock); | 5536 | spin_unlock_bh(&bp->phy_lock); |
5537 | 5537 | ||
5538 | return err; | 5538 | return err; |
5539 | } | 5539 | } |
5540 | 5540 | ||
5541 | static void | 5541 | static void |
5542 | bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 5542 | bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
5543 | { | 5543 | { |
5544 | struct bnx2 *bp = netdev_priv(dev); | 5544 | struct bnx2 *bp = netdev_priv(dev); |
5545 | 5545 | ||
5546 | strcpy(info->driver, DRV_MODULE_NAME); | 5546 | strcpy(info->driver, DRV_MODULE_NAME); |
5547 | strcpy(info->version, DRV_MODULE_VERSION); | 5547 | strcpy(info->version, DRV_MODULE_VERSION); |
5548 | strcpy(info->bus_info, pci_name(bp->pdev)); | 5548 | strcpy(info->bus_info, pci_name(bp->pdev)); |
5549 | strcpy(info->fw_version, bp->fw_version); | 5549 | strcpy(info->fw_version, bp->fw_version); |
5550 | } | 5550 | } |
5551 | 5551 | ||
5552 | #define BNX2_REGDUMP_LEN (32 * 1024) | 5552 | #define BNX2_REGDUMP_LEN (32 * 1024) |
5553 | 5553 | ||
5554 | static int | 5554 | static int |
5555 | bnx2_get_regs_len(struct net_device *dev) | 5555 | bnx2_get_regs_len(struct net_device *dev) |
5556 | { | 5556 | { |
5557 | return BNX2_REGDUMP_LEN; | 5557 | return BNX2_REGDUMP_LEN; |
5558 | } | 5558 | } |
5559 | 5559 | ||
5560 | static void | 5560 | static void |
5561 | bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) | 5561 | bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) |
5562 | { | 5562 | { |
5563 | u32 *p = _p, i, offset; | 5563 | u32 *p = _p, i, offset; |
5564 | u8 *orig_p = _p; | 5564 | u8 *orig_p = _p; |
5565 | struct bnx2 *bp = netdev_priv(dev); | 5565 | struct bnx2 *bp = netdev_priv(dev); |
5566 | u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c, | 5566 | u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c, |
5567 | 0x0800, 0x0880, 0x0c00, 0x0c10, | 5567 | 0x0800, 0x0880, 0x0c00, 0x0c10, |
5568 | 0x0c30, 0x0d08, 0x1000, 0x101c, | 5568 | 0x0c30, 0x0d08, 0x1000, 0x101c, |
5569 | 0x1040, 0x1048, 0x1080, 0x10a4, | 5569 | 0x1040, 0x1048, 0x1080, 0x10a4, |
5570 | 0x1400, 0x1490, 0x1498, 0x14f0, | 5570 | 0x1400, 0x1490, 0x1498, 0x14f0, |
5571 | 0x1500, 0x155c, 0x1580, 0x15dc, | 5571 | 0x1500, 0x155c, 0x1580, 0x15dc, |
5572 | 0x1600, 0x1658, 0x1680, 0x16d8, | 5572 | 0x1600, 0x1658, 0x1680, 0x16d8, |
5573 | 0x1800, 0x1820, 0x1840, 0x1854, | 5573 | 0x1800, 0x1820, 0x1840, 0x1854, |
5574 | 0x1880, 0x1894, 0x1900, 0x1984, | 5574 | 0x1880, 0x1894, 0x1900, 0x1984, |
5575 | 0x1c00, 0x1c0c, 0x1c40, 0x1c54, | 5575 | 0x1c00, 0x1c0c, 0x1c40, 0x1c54, |
5576 | 0x1c80, 0x1c94, 0x1d00, 0x1d84, | 5576 | 0x1c80, 0x1c94, 0x1d00, 0x1d84, |
5577 | 0x2000, 0x2030, 0x23c0, 0x2400, | 5577 | 0x2000, 0x2030, 0x23c0, 0x2400, |
5578 | 0x2800, 0x2820, 0x2830, 0x2850, | 5578 | 0x2800, 0x2820, 0x2830, 0x2850, |
5579 | 0x2b40, 0x2c10, 0x2fc0, 0x3058, | 5579 | 0x2b40, 0x2c10, 0x2fc0, 0x3058, |
5580 | 0x3c00, 0x3c94, 0x4000, 0x4010, | 5580 | 0x3c00, 0x3c94, 0x4000, 0x4010, |
5581 | 0x4080, 0x4090, 0x43c0, 0x4458, | 5581 | 0x4080, 0x4090, 0x43c0, 0x4458, |
5582 | 0x4c00, 0x4c18, 0x4c40, 0x4c54, | 5582 | 0x4c00, 0x4c18, 0x4c40, 0x4c54, |
5583 | 0x4fc0, 0x5010, 0x53c0, 0x5444, | 5583 | 0x4fc0, 0x5010, 0x53c0, 0x5444, |
5584 | 0x5c00, 0x5c18, 0x5c80, 0x5c90, | 5584 | 0x5c00, 0x5c18, 0x5c80, 0x5c90, |
5585 | 0x5fc0, 0x6000, 0x6400, 0x6428, | 5585 | 0x5fc0, 0x6000, 0x6400, 0x6428, |
5586 | 0x6800, 0x6848, 0x684c, 0x6860, | 5586 | 0x6800, 0x6848, 0x684c, 0x6860, |
5587 | 0x6888, 0x6910, 0x8000 }; | 5587 | 0x6888, 0x6910, 0x8000 }; |
5588 | 5588 | ||
5589 | regs->version = 0; | 5589 | regs->version = 0; |
5590 | 5590 | ||
5591 | memset(p, 0, BNX2_REGDUMP_LEN); | 5591 | memset(p, 0, BNX2_REGDUMP_LEN); |
5592 | 5592 | ||
5593 | if (!netif_running(bp->dev)) | 5593 | if (!netif_running(bp->dev)) |
5594 | return; | 5594 | return; |
5595 | 5595 | ||
5596 | i = 0; | 5596 | i = 0; |
5597 | offset = reg_boundaries[0]; | 5597 | offset = reg_boundaries[0]; |
5598 | p += offset; | 5598 | p += offset; |
5599 | while (offset < BNX2_REGDUMP_LEN) { | 5599 | while (offset < BNX2_REGDUMP_LEN) { |
5600 | *p++ = REG_RD(bp, offset); | 5600 | *p++ = REG_RD(bp, offset); |
5601 | offset += 4; | 5601 | offset += 4; |
5602 | if (offset == reg_boundaries[i + 1]) { | 5602 | if (offset == reg_boundaries[i + 1]) { |
5603 | offset = reg_boundaries[i + 2]; | 5603 | offset = reg_boundaries[i + 2]; |
5604 | p = (u32 *) (orig_p + offset); | 5604 | p = (u32 *) (orig_p + offset); |
5605 | i += 2; | 5605 | i += 2; |
5606 | } | 5606 | } |
5607 | } | 5607 | } |
5608 | } | 5608 | } |
5609 | 5609 | ||
5610 | static void | 5610 | static void |
5611 | bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 5611 | bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
5612 | { | 5612 | { |
5613 | struct bnx2 *bp = netdev_priv(dev); | 5613 | struct bnx2 *bp = netdev_priv(dev); |
5614 | 5614 | ||
5615 | if (bp->flags & NO_WOL_FLAG) { | 5615 | if (bp->flags & NO_WOL_FLAG) { |
5616 | wol->supported = 0; | 5616 | wol->supported = 0; |
5617 | wol->wolopts = 0; | 5617 | wol->wolopts = 0; |
5618 | } | 5618 | } |
5619 | else { | 5619 | else { |
5620 | wol->supported = WAKE_MAGIC; | 5620 | wol->supported = WAKE_MAGIC; |
5621 | if (bp->wol) | 5621 | if (bp->wol) |
5622 | wol->wolopts = WAKE_MAGIC; | 5622 | wol->wolopts = WAKE_MAGIC; |
5623 | else | 5623 | else |
5624 | wol->wolopts = 0; | 5624 | wol->wolopts = 0; |
5625 | } | 5625 | } |
5626 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | 5626 | memset(&wol->sopass, 0, sizeof(wol->sopass)); |
5627 | } | 5627 | } |
5628 | 5628 | ||
5629 | static int | 5629 | static int |
5630 | bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 5630 | bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
5631 | { | 5631 | { |
5632 | struct bnx2 *bp = netdev_priv(dev); | 5632 | struct bnx2 *bp = netdev_priv(dev); |
5633 | 5633 | ||
5634 | if (wol->wolopts & ~WAKE_MAGIC) | 5634 | if (wol->wolopts & ~WAKE_MAGIC) |
5635 | return -EINVAL; | 5635 | return -EINVAL; |
5636 | 5636 | ||
5637 | if (wol->wolopts & WAKE_MAGIC) { | 5637 | if (wol->wolopts & WAKE_MAGIC) { |
5638 | if (bp->flags & NO_WOL_FLAG) | 5638 | if (bp->flags & NO_WOL_FLAG) |
5639 | return -EINVAL; | 5639 | return -EINVAL; |
5640 | 5640 | ||
5641 | bp->wol = 1; | 5641 | bp->wol = 1; |
5642 | } | 5642 | } |
5643 | else { | 5643 | else { |
5644 | bp->wol = 0; | 5644 | bp->wol = 0; |
5645 | } | 5645 | } |
5646 | return 0; | 5646 | return 0; |
5647 | } | 5647 | } |
5648 | 5648 | ||
5649 | static int | 5649 | static int |
5650 | bnx2_nway_reset(struct net_device *dev) | 5650 | bnx2_nway_reset(struct net_device *dev) |
5651 | { | 5651 | { |
5652 | struct bnx2 *bp = netdev_priv(dev); | 5652 | struct bnx2 *bp = netdev_priv(dev); |
5653 | u32 bmcr; | 5653 | u32 bmcr; |
5654 | 5654 | ||
5655 | if (!(bp->autoneg & AUTONEG_SPEED)) { | 5655 | if (!(bp->autoneg & AUTONEG_SPEED)) { |
5656 | return -EINVAL; | 5656 | return -EINVAL; |
5657 | } | 5657 | } |
5658 | 5658 | ||
5659 | spin_lock_bh(&bp->phy_lock); | 5659 | spin_lock_bh(&bp->phy_lock); |
5660 | 5660 | ||
5661 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) { | 5661 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) { |
5662 | int rc; | 5662 | int rc; |
5663 | 5663 | ||
5664 | rc = bnx2_setup_remote_phy(bp, bp->phy_port); | 5664 | rc = bnx2_setup_remote_phy(bp, bp->phy_port); |
5665 | spin_unlock_bh(&bp->phy_lock); | 5665 | spin_unlock_bh(&bp->phy_lock); |
5666 | return rc; | 5666 | return rc; |
5667 | } | 5667 | } |
5668 | 5668 | ||
5669 | /* Force a link down visible on the other side */ | 5669 | /* Force a link down visible on the other side */ |
5670 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 5670 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
5671 | bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); | 5671 | bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); |
5672 | spin_unlock_bh(&bp->phy_lock); | 5672 | spin_unlock_bh(&bp->phy_lock); |
5673 | 5673 | ||
5674 | msleep(20); | 5674 | msleep(20); |
5675 | 5675 | ||
5676 | spin_lock_bh(&bp->phy_lock); | 5676 | spin_lock_bh(&bp->phy_lock); |
5677 | 5677 | ||
5678 | bp->current_interval = SERDES_AN_TIMEOUT; | 5678 | bp->current_interval = SERDES_AN_TIMEOUT; |
5679 | bp->serdes_an_pending = 1; | 5679 | bp->serdes_an_pending = 1; |
5680 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 5680 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
5681 | } | 5681 | } |
5682 | 5682 | ||
5683 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); | 5683 | bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); |
5684 | bmcr &= ~BMCR_LOOPBACK; | 5684 | bmcr &= ~BMCR_LOOPBACK; |
5685 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); | 5685 | bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); |
5686 | 5686 | ||
5687 | spin_unlock_bh(&bp->phy_lock); | 5687 | spin_unlock_bh(&bp->phy_lock); |
5688 | 5688 | ||
5689 | return 0; | 5689 | return 0; |
5690 | } | 5690 | } |
5691 | 5691 | ||
5692 | static int | 5692 | static int |
5693 | bnx2_get_eeprom_len(struct net_device *dev) | 5693 | bnx2_get_eeprom_len(struct net_device *dev) |
5694 | { | 5694 | { |
5695 | struct bnx2 *bp = netdev_priv(dev); | 5695 | struct bnx2 *bp = netdev_priv(dev); |
5696 | 5696 | ||
5697 | if (bp->flash_info == NULL) | 5697 | if (bp->flash_info == NULL) |
5698 | return 0; | 5698 | return 0; |
5699 | 5699 | ||
5700 | return (int) bp->flash_size; | 5700 | return (int) bp->flash_size; |
5701 | } | 5701 | } |
5702 | 5702 | ||
5703 | static int | 5703 | static int |
5704 | bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | 5704 | bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, |
5705 | u8 *eebuf) | 5705 | u8 *eebuf) |
5706 | { | 5706 | { |
5707 | struct bnx2 *bp = netdev_priv(dev); | 5707 | struct bnx2 *bp = netdev_priv(dev); |
5708 | int rc; | 5708 | int rc; |
5709 | 5709 | ||
5710 | /* parameters already validated in ethtool_get_eeprom */ | 5710 | /* parameters already validated in ethtool_get_eeprom */ |
5711 | 5711 | ||
5712 | rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); | 5712 | rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); |
5713 | 5713 | ||
5714 | return rc; | 5714 | return rc; |
5715 | } | 5715 | } |
5716 | 5716 | ||
5717 | static int | 5717 | static int |
5718 | bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | 5718 | bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, |
5719 | u8 *eebuf) | 5719 | u8 *eebuf) |
5720 | { | 5720 | { |
5721 | struct bnx2 *bp = netdev_priv(dev); | 5721 | struct bnx2 *bp = netdev_priv(dev); |
5722 | int rc; | 5722 | int rc; |
5723 | 5723 | ||
5724 | /* parameters already validated in ethtool_set_eeprom */ | 5724 | /* parameters already validated in ethtool_set_eeprom */ |
5725 | 5725 | ||
5726 | rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); | 5726 | rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); |
5727 | 5727 | ||
5728 | return rc; | 5728 | return rc; |
5729 | } | 5729 | } |
5730 | 5730 | ||
5731 | static int | 5731 | static int |
5732 | bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) | 5732 | bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) |
5733 | { | 5733 | { |
5734 | struct bnx2 *bp = netdev_priv(dev); | 5734 | struct bnx2 *bp = netdev_priv(dev); |
5735 | 5735 | ||
5736 | memset(coal, 0, sizeof(struct ethtool_coalesce)); | 5736 | memset(coal, 0, sizeof(struct ethtool_coalesce)); |
5737 | 5737 | ||
5738 | coal->rx_coalesce_usecs = bp->rx_ticks; | 5738 | coal->rx_coalesce_usecs = bp->rx_ticks; |
5739 | coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip; | 5739 | coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip; |
5740 | coal->rx_coalesce_usecs_irq = bp->rx_ticks_int; | 5740 | coal->rx_coalesce_usecs_irq = bp->rx_ticks_int; |
5741 | coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int; | 5741 | coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int; |
5742 | 5742 | ||
5743 | coal->tx_coalesce_usecs = bp->tx_ticks; | 5743 | coal->tx_coalesce_usecs = bp->tx_ticks; |
5744 | coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip; | 5744 | coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip; |
5745 | coal->tx_coalesce_usecs_irq = bp->tx_ticks_int; | 5745 | coal->tx_coalesce_usecs_irq = bp->tx_ticks_int; |
5746 | coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int; | 5746 | coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int; |
5747 | 5747 | ||
5748 | coal->stats_block_coalesce_usecs = bp->stats_ticks; | 5748 | coal->stats_block_coalesce_usecs = bp->stats_ticks; |
5749 | 5749 | ||
5750 | return 0; | 5750 | return 0; |
5751 | } | 5751 | } |
5752 | 5752 | ||
5753 | static int | 5753 | static int |
5754 | bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) | 5754 | bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) |
5755 | { | 5755 | { |
5756 | struct bnx2 *bp = netdev_priv(dev); | 5756 | struct bnx2 *bp = netdev_priv(dev); |
5757 | 5757 | ||
5758 | bp->rx_ticks = (u16) coal->rx_coalesce_usecs; | 5758 | bp->rx_ticks = (u16) coal->rx_coalesce_usecs; |
5759 | if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff; | 5759 | if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff; |
5760 | 5760 | ||
5761 | bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; | 5761 | bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; |
5762 | if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff; | 5762 | if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff; |
5763 | 5763 | ||
5764 | bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq; | 5764 | bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq; |
5765 | if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff; | 5765 | if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff; |
5766 | 5766 | ||
5767 | bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq; | 5767 | bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq; |
5768 | if (bp->rx_quick_cons_trip_int > 0xff) | 5768 | if (bp->rx_quick_cons_trip_int > 0xff) |
5769 | bp->rx_quick_cons_trip_int = 0xff; | 5769 | bp->rx_quick_cons_trip_int = 0xff; |
5770 | 5770 | ||
5771 | bp->tx_ticks = (u16) coal->tx_coalesce_usecs; | 5771 | bp->tx_ticks = (u16) coal->tx_coalesce_usecs; |
5772 | if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff; | 5772 | if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff; |
5773 | 5773 | ||
5774 | bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames; | 5774 | bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames; |
5775 | if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff; | 5775 | if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff; |
5776 | 5776 | ||
5777 | bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq; | 5777 | bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq; |
5778 | if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff; | 5778 | if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff; |
5779 | 5779 | ||
5780 | bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq; | 5780 | bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq; |
5781 | if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int = | 5781 | if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int = |
5782 | 0xff; | 5782 | 0xff; |
5783 | 5783 | ||
5784 | bp->stats_ticks = coal->stats_block_coalesce_usecs; | 5784 | bp->stats_ticks = coal->stats_block_coalesce_usecs; |
5785 | if (CHIP_NUM(bp) == CHIP_NUM_5708) { | 5785 | if (CHIP_NUM(bp) == CHIP_NUM_5708) { |
5786 | if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) | 5786 | if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) |
5787 | bp->stats_ticks = USEC_PER_SEC; | 5787 | bp->stats_ticks = USEC_PER_SEC; |
5788 | } | 5788 | } |
5789 | if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00; | 5789 | if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00; |
5790 | bp->stats_ticks &= 0xffff00; | 5790 | bp->stats_ticks &= 0xffff00; |
5791 | 5791 | ||
5792 | if (netif_running(bp->dev)) { | 5792 | if (netif_running(bp->dev)) { |
5793 | bnx2_netif_stop(bp); | 5793 | bnx2_netif_stop(bp); |
5794 | bnx2_init_nic(bp); | 5794 | bnx2_init_nic(bp); |
5795 | bnx2_netif_start(bp); | 5795 | bnx2_netif_start(bp); |
5796 | } | 5796 | } |
5797 | 5797 | ||
5798 | return 0; | 5798 | return 0; |
5799 | } | 5799 | } |
5800 | 5800 | ||
5801 | static void | 5801 | static void |
5802 | bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 5802 | bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) |
5803 | { | 5803 | { |
5804 | struct bnx2 *bp = netdev_priv(dev); | 5804 | struct bnx2 *bp = netdev_priv(dev); |
5805 | 5805 | ||
5806 | ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT; | 5806 | ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT; |
5807 | ering->rx_mini_max_pending = 0; | 5807 | ering->rx_mini_max_pending = 0; |
5808 | ering->rx_jumbo_max_pending = 0; | 5808 | ering->rx_jumbo_max_pending = 0; |
5809 | 5809 | ||
5810 | ering->rx_pending = bp->rx_ring_size; | 5810 | ering->rx_pending = bp->rx_ring_size; |
5811 | ering->rx_mini_pending = 0; | 5811 | ering->rx_mini_pending = 0; |
5812 | ering->rx_jumbo_pending = 0; | 5812 | ering->rx_jumbo_pending = 0; |
5813 | 5813 | ||
5814 | ering->tx_max_pending = MAX_TX_DESC_CNT; | 5814 | ering->tx_max_pending = MAX_TX_DESC_CNT; |
5815 | ering->tx_pending = bp->tx_ring_size; | 5815 | ering->tx_pending = bp->tx_ring_size; |
5816 | } | 5816 | } |
5817 | 5817 | ||
5818 | static int | 5818 | static int |
5819 | bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 5819 | bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) |
5820 | { | 5820 | { |
5821 | struct bnx2 *bp = netdev_priv(dev); | 5821 | struct bnx2 *bp = netdev_priv(dev); |
5822 | 5822 | ||
5823 | if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) || | 5823 | if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) || |
5824 | (ering->tx_pending > MAX_TX_DESC_CNT) || | 5824 | (ering->tx_pending > MAX_TX_DESC_CNT) || |
5825 | (ering->tx_pending <= MAX_SKB_FRAGS)) { | 5825 | (ering->tx_pending <= MAX_SKB_FRAGS)) { |
5826 | 5826 | ||
5827 | return -EINVAL; | 5827 | return -EINVAL; |
5828 | } | 5828 | } |
5829 | if (netif_running(bp->dev)) { | 5829 | if (netif_running(bp->dev)) { |
5830 | bnx2_netif_stop(bp); | 5830 | bnx2_netif_stop(bp); |
5831 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); | 5831 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); |
5832 | bnx2_free_skbs(bp); | 5832 | bnx2_free_skbs(bp); |
5833 | bnx2_free_mem(bp); | 5833 | bnx2_free_mem(bp); |
5834 | } | 5834 | } |
5835 | 5835 | ||
5836 | bnx2_set_rx_ring_size(bp, ering->rx_pending); | 5836 | bnx2_set_rx_ring_size(bp, ering->rx_pending); |
5837 | bp->tx_ring_size = ering->tx_pending; | 5837 | bp->tx_ring_size = ering->tx_pending; |
5838 | 5838 | ||
5839 | if (netif_running(bp->dev)) { | 5839 | if (netif_running(bp->dev)) { |
5840 | int rc; | 5840 | int rc; |
5841 | 5841 | ||
5842 | rc = bnx2_alloc_mem(bp); | 5842 | rc = bnx2_alloc_mem(bp); |
5843 | if (rc) | 5843 | if (rc) |
5844 | return rc; | 5844 | return rc; |
5845 | bnx2_init_nic(bp); | 5845 | bnx2_init_nic(bp); |
5846 | bnx2_netif_start(bp); | 5846 | bnx2_netif_start(bp); |
5847 | } | 5847 | } |
5848 | 5848 | ||
5849 | return 0; | 5849 | return 0; |
5850 | } | 5850 | } |
5851 | 5851 | ||
5852 | static void | 5852 | static void |
5853 | bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 5853 | bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
5854 | { | 5854 | { |
5855 | struct bnx2 *bp = netdev_priv(dev); | 5855 | struct bnx2 *bp = netdev_priv(dev); |
5856 | 5856 | ||
5857 | epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0); | 5857 | epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0); |
5858 | epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0); | 5858 | epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0); |
5859 | epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0); | 5859 | epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0); |
5860 | } | 5860 | } |
5861 | 5861 | ||
5862 | static int | 5862 | static int |
5863 | bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 5863 | bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
5864 | { | 5864 | { |
5865 | struct bnx2 *bp = netdev_priv(dev); | 5865 | struct bnx2 *bp = netdev_priv(dev); |
5866 | 5866 | ||
5867 | bp->req_flow_ctrl = 0; | 5867 | bp->req_flow_ctrl = 0; |
5868 | if (epause->rx_pause) | 5868 | if (epause->rx_pause) |
5869 | bp->req_flow_ctrl |= FLOW_CTRL_RX; | 5869 | bp->req_flow_ctrl |= FLOW_CTRL_RX; |
5870 | if (epause->tx_pause) | 5870 | if (epause->tx_pause) |
5871 | bp->req_flow_ctrl |= FLOW_CTRL_TX; | 5871 | bp->req_flow_ctrl |= FLOW_CTRL_TX; |
5872 | 5872 | ||
5873 | if (epause->autoneg) { | 5873 | if (epause->autoneg) { |
5874 | bp->autoneg |= AUTONEG_FLOW_CTRL; | 5874 | bp->autoneg |= AUTONEG_FLOW_CTRL; |
5875 | } | 5875 | } |
5876 | else { | 5876 | else { |
5877 | bp->autoneg &= ~AUTONEG_FLOW_CTRL; | 5877 | bp->autoneg &= ~AUTONEG_FLOW_CTRL; |
5878 | } | 5878 | } |
5879 | 5879 | ||
5880 | spin_lock_bh(&bp->phy_lock); | 5880 | spin_lock_bh(&bp->phy_lock); |
5881 | 5881 | ||
5882 | bnx2_setup_phy(bp, bp->phy_port); | 5882 | bnx2_setup_phy(bp, bp->phy_port); |
5883 | 5883 | ||
5884 | spin_unlock_bh(&bp->phy_lock); | 5884 | spin_unlock_bh(&bp->phy_lock); |
5885 | 5885 | ||
5886 | return 0; | 5886 | return 0; |
5887 | } | 5887 | } |
5888 | 5888 | ||
5889 | static u32 | 5889 | static u32 |
5890 | bnx2_get_rx_csum(struct net_device *dev) | 5890 | bnx2_get_rx_csum(struct net_device *dev) |
5891 | { | 5891 | { |
5892 | struct bnx2 *bp = netdev_priv(dev); | 5892 | struct bnx2 *bp = netdev_priv(dev); |
5893 | 5893 | ||
5894 | return bp->rx_csum; | 5894 | return bp->rx_csum; |
5895 | } | 5895 | } |
5896 | 5896 | ||
5897 | static int | 5897 | static int |
5898 | bnx2_set_rx_csum(struct net_device *dev, u32 data) | 5898 | bnx2_set_rx_csum(struct net_device *dev, u32 data) |
5899 | { | 5899 | { |
5900 | struct bnx2 *bp = netdev_priv(dev); | 5900 | struct bnx2 *bp = netdev_priv(dev); |
5901 | 5901 | ||
5902 | bp->rx_csum = data; | 5902 | bp->rx_csum = data; |
5903 | return 0; | 5903 | return 0; |
5904 | } | 5904 | } |
5905 | 5905 | ||
5906 | static int | 5906 | static int |
5907 | bnx2_set_tso(struct net_device *dev, u32 data) | 5907 | bnx2_set_tso(struct net_device *dev, u32 data) |
5908 | { | 5908 | { |
5909 | struct bnx2 *bp = netdev_priv(dev); | 5909 | struct bnx2 *bp = netdev_priv(dev); |
5910 | 5910 | ||
5911 | if (data) { | 5911 | if (data) { |
5912 | dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; | 5912 | dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; |
5913 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 5913 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
5914 | dev->features |= NETIF_F_TSO6; | 5914 | dev->features |= NETIF_F_TSO6; |
5915 | } else | 5915 | } else |
5916 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | | 5916 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | |
5917 | NETIF_F_TSO_ECN); | 5917 | NETIF_F_TSO_ECN); |
5918 | return 0; | 5918 | return 0; |
5919 | } | 5919 | } |
5920 | 5920 | ||
5921 | #define BNX2_NUM_STATS 46 | 5921 | #define BNX2_NUM_STATS 46 |
5922 | 5922 | ||
5923 | static struct { | 5923 | static struct { |
5924 | char string[ETH_GSTRING_LEN]; | 5924 | char string[ETH_GSTRING_LEN]; |
5925 | } bnx2_stats_str_arr[BNX2_NUM_STATS] = { | 5925 | } bnx2_stats_str_arr[BNX2_NUM_STATS] = { |
5926 | { "rx_bytes" }, | 5926 | { "rx_bytes" }, |
5927 | { "rx_error_bytes" }, | 5927 | { "rx_error_bytes" }, |
5928 | { "tx_bytes" }, | 5928 | { "tx_bytes" }, |
5929 | { "tx_error_bytes" }, | 5929 | { "tx_error_bytes" }, |
5930 | { "rx_ucast_packets" }, | 5930 | { "rx_ucast_packets" }, |
5931 | { "rx_mcast_packets" }, | 5931 | { "rx_mcast_packets" }, |
5932 | { "rx_bcast_packets" }, | 5932 | { "rx_bcast_packets" }, |
5933 | { "tx_ucast_packets" }, | 5933 | { "tx_ucast_packets" }, |
5934 | { "tx_mcast_packets" }, | 5934 | { "tx_mcast_packets" }, |
5935 | { "tx_bcast_packets" }, | 5935 | { "tx_bcast_packets" }, |
5936 | { "tx_mac_errors" }, | 5936 | { "tx_mac_errors" }, |
5937 | { "tx_carrier_errors" }, | 5937 | { "tx_carrier_errors" }, |
5938 | { "rx_crc_errors" }, | 5938 | { "rx_crc_errors" }, |
5939 | { "rx_align_errors" }, | 5939 | { "rx_align_errors" }, |
5940 | { "tx_single_collisions" }, | 5940 | { "tx_single_collisions" }, |
5941 | { "tx_multi_collisions" }, | 5941 | { "tx_multi_collisions" }, |
5942 | { "tx_deferred" }, | 5942 | { "tx_deferred" }, |
5943 | { "tx_excess_collisions" }, | 5943 | { "tx_excess_collisions" }, |
5944 | { "tx_late_collisions" }, | 5944 | { "tx_late_collisions" }, |
5945 | { "tx_total_collisions" }, | 5945 | { "tx_total_collisions" }, |
5946 | { "rx_fragments" }, | 5946 | { "rx_fragments" }, |
5947 | { "rx_jabbers" }, | 5947 | { "rx_jabbers" }, |
5948 | { "rx_undersize_packets" }, | 5948 | { "rx_undersize_packets" }, |
5949 | { "rx_oversize_packets" }, | 5949 | { "rx_oversize_packets" }, |
5950 | { "rx_64_byte_packets" }, | 5950 | { "rx_64_byte_packets" }, |
5951 | { "rx_65_to_127_byte_packets" }, | 5951 | { "rx_65_to_127_byte_packets" }, |
5952 | { "rx_128_to_255_byte_packets" }, | 5952 | { "rx_128_to_255_byte_packets" }, |
5953 | { "rx_256_to_511_byte_packets" }, | 5953 | { "rx_256_to_511_byte_packets" }, |
5954 | { "rx_512_to_1023_byte_packets" }, | 5954 | { "rx_512_to_1023_byte_packets" }, |
5955 | { "rx_1024_to_1522_byte_packets" }, | 5955 | { "rx_1024_to_1522_byte_packets" }, |
5956 | { "rx_1523_to_9022_byte_packets" }, | 5956 | { "rx_1523_to_9022_byte_packets" }, |
5957 | { "tx_64_byte_packets" }, | 5957 | { "tx_64_byte_packets" }, |
5958 | { "tx_65_to_127_byte_packets" }, | 5958 | { "tx_65_to_127_byte_packets" }, |
5959 | { "tx_128_to_255_byte_packets" }, | 5959 | { "tx_128_to_255_byte_packets" }, |
5960 | { "tx_256_to_511_byte_packets" }, | 5960 | { "tx_256_to_511_byte_packets" }, |
5961 | { "tx_512_to_1023_byte_packets" }, | 5961 | { "tx_512_to_1023_byte_packets" }, |
5962 | { "tx_1024_to_1522_byte_packets" }, | 5962 | { "tx_1024_to_1522_byte_packets" }, |
5963 | { "tx_1523_to_9022_byte_packets" }, | 5963 | { "tx_1523_to_9022_byte_packets" }, |
5964 | { "rx_xon_frames" }, | 5964 | { "rx_xon_frames" }, |
5965 | { "rx_xoff_frames" }, | 5965 | { "rx_xoff_frames" }, |
5966 | { "tx_xon_frames" }, | 5966 | { "tx_xon_frames" }, |
5967 | { "tx_xoff_frames" }, | 5967 | { "tx_xoff_frames" }, |
5968 | { "rx_mac_ctrl_frames" }, | 5968 | { "rx_mac_ctrl_frames" }, |
5969 | { "rx_filtered_packets" }, | 5969 | { "rx_filtered_packets" }, |
5970 | { "rx_discards" }, | 5970 | { "rx_discards" }, |
5971 | { "rx_fw_discards" }, | 5971 | { "rx_fw_discards" }, |
5972 | }; | 5972 | }; |
5973 | 5973 | ||
5974 | #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) | 5974 | #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) |
5975 | 5975 | ||
5976 | static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { | 5976 | static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { |
5977 | STATS_OFFSET32(stat_IfHCInOctets_hi), | 5977 | STATS_OFFSET32(stat_IfHCInOctets_hi), |
5978 | STATS_OFFSET32(stat_IfHCInBadOctets_hi), | 5978 | STATS_OFFSET32(stat_IfHCInBadOctets_hi), |
5979 | STATS_OFFSET32(stat_IfHCOutOctets_hi), | 5979 | STATS_OFFSET32(stat_IfHCOutOctets_hi), |
5980 | STATS_OFFSET32(stat_IfHCOutBadOctets_hi), | 5980 | STATS_OFFSET32(stat_IfHCOutBadOctets_hi), |
5981 | STATS_OFFSET32(stat_IfHCInUcastPkts_hi), | 5981 | STATS_OFFSET32(stat_IfHCInUcastPkts_hi), |
5982 | STATS_OFFSET32(stat_IfHCInMulticastPkts_hi), | 5982 | STATS_OFFSET32(stat_IfHCInMulticastPkts_hi), |
5983 | STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi), | 5983 | STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi), |
5984 | STATS_OFFSET32(stat_IfHCOutUcastPkts_hi), | 5984 | STATS_OFFSET32(stat_IfHCOutUcastPkts_hi), |
5985 | STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi), | 5985 | STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi), |
5986 | STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi), | 5986 | STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi), |
5987 | STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors), | 5987 | STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors), |
5988 | STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), | 5988 | STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), |
5989 | STATS_OFFSET32(stat_Dot3StatsFCSErrors), | 5989 | STATS_OFFSET32(stat_Dot3StatsFCSErrors), |
5990 | STATS_OFFSET32(stat_Dot3StatsAlignmentErrors), | 5990 | STATS_OFFSET32(stat_Dot3StatsAlignmentErrors), |
5991 | STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames), | 5991 | STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames), |
5992 | STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames), | 5992 | STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames), |
5993 | STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), | 5993 | STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), |
5994 | STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions), | 5994 | STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions), |
5995 | STATS_OFFSET32(stat_Dot3StatsLateCollisions), | 5995 | STATS_OFFSET32(stat_Dot3StatsLateCollisions), |
5996 | STATS_OFFSET32(stat_EtherStatsCollisions), | 5996 | STATS_OFFSET32(stat_EtherStatsCollisions), |
5997 | STATS_OFFSET32(stat_EtherStatsFragments), | 5997 | STATS_OFFSET32(stat_EtherStatsFragments), |
5998 | STATS_OFFSET32(stat_EtherStatsJabbers), | 5998 | STATS_OFFSET32(stat_EtherStatsJabbers), |
5999 | STATS_OFFSET32(stat_EtherStatsUndersizePkts), | 5999 | STATS_OFFSET32(stat_EtherStatsUndersizePkts), |
6000 | STATS_OFFSET32(stat_EtherStatsOverrsizePkts), | 6000 | STATS_OFFSET32(stat_EtherStatsOverrsizePkts), |
6001 | STATS_OFFSET32(stat_EtherStatsPktsRx64Octets), | 6001 | STATS_OFFSET32(stat_EtherStatsPktsRx64Octets), |
6002 | STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets), | 6002 | STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets), |
6003 | STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets), | 6003 | STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets), |
6004 | STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets), | 6004 | STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets), |
6005 | STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets), | 6005 | STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets), |
6006 | STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets), | 6006 | STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets), |
6007 | STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets), | 6007 | STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets), |
6008 | STATS_OFFSET32(stat_EtherStatsPktsTx64Octets), | 6008 | STATS_OFFSET32(stat_EtherStatsPktsTx64Octets), |
6009 | STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets), | 6009 | STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets), |
6010 | STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets), | 6010 | STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets), |
6011 | STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets), | 6011 | STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets), |
6012 | STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets), | 6012 | STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets), |
6013 | STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets), | 6013 | STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets), |
6014 | STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets), | 6014 | STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets), |
6015 | STATS_OFFSET32(stat_XonPauseFramesReceived), | 6015 | STATS_OFFSET32(stat_XonPauseFramesReceived), |
6016 | STATS_OFFSET32(stat_XoffPauseFramesReceived), | 6016 | STATS_OFFSET32(stat_XoffPauseFramesReceived), |
6017 | STATS_OFFSET32(stat_OutXonSent), | 6017 | STATS_OFFSET32(stat_OutXonSent), |
6018 | STATS_OFFSET32(stat_OutXoffSent), | 6018 | STATS_OFFSET32(stat_OutXoffSent), |
6019 | STATS_OFFSET32(stat_MacControlFramesReceived), | 6019 | STATS_OFFSET32(stat_MacControlFramesReceived), |
6020 | STATS_OFFSET32(stat_IfInFramesL2FilterDiscards), | 6020 | STATS_OFFSET32(stat_IfInFramesL2FilterDiscards), |
6021 | STATS_OFFSET32(stat_IfInMBUFDiscards), | 6021 | STATS_OFFSET32(stat_IfInMBUFDiscards), |
6022 | STATS_OFFSET32(stat_FwRxDrop), | 6022 | STATS_OFFSET32(stat_FwRxDrop), |
6023 | }; | 6023 | }; |
6024 | 6024 | ||
6025 | /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are | 6025 | /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are |
6026 | * skipped because of errata. | 6026 | * skipped because of errata. |
6027 | */ | 6027 | */ |
6028 | static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { | 6028 | static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { |
6029 | 8,0,8,8,8,8,8,8,8,8, | 6029 | 8,0,8,8,8,8,8,8,8,8, |
6030 | 4,0,4,4,4,4,4,4,4,4, | 6030 | 4,0,4,4,4,4,4,4,4,4, |
6031 | 4,4,4,4,4,4,4,4,4,4, | 6031 | 4,4,4,4,4,4,4,4,4,4, |
6032 | 4,4,4,4,4,4,4,4,4,4, | 6032 | 4,4,4,4,4,4,4,4,4,4, |
6033 | 4,4,4,4,4,4, | 6033 | 4,4,4,4,4,4, |
6034 | }; | 6034 | }; |
6035 | 6035 | ||
6036 | static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = { | 6036 | static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = { |
6037 | 8,0,8,8,8,8,8,8,8,8, | 6037 | 8,0,8,8,8,8,8,8,8,8, |
6038 | 4,4,4,4,4,4,4,4,4,4, | 6038 | 4,4,4,4,4,4,4,4,4,4, |
6039 | 4,4,4,4,4,4,4,4,4,4, | 6039 | 4,4,4,4,4,4,4,4,4,4, |
6040 | 4,4,4,4,4,4,4,4,4,4, | 6040 | 4,4,4,4,4,4,4,4,4,4, |
6041 | 4,4,4,4,4,4, | 6041 | 4,4,4,4,4,4, |
6042 | }; | 6042 | }; |
6043 | 6043 | ||
6044 | #define BNX2_NUM_TESTS 6 | 6044 | #define BNX2_NUM_TESTS 6 |
6045 | 6045 | ||
6046 | static struct { | 6046 | static struct { |
6047 | char string[ETH_GSTRING_LEN]; | 6047 | char string[ETH_GSTRING_LEN]; |
6048 | } bnx2_tests_str_arr[BNX2_NUM_TESTS] = { | 6048 | } bnx2_tests_str_arr[BNX2_NUM_TESTS] = { |
6049 | { "register_test (offline)" }, | 6049 | { "register_test (offline)" }, |
6050 | { "memory_test (offline)" }, | 6050 | { "memory_test (offline)" }, |
6051 | { "loopback_test (offline)" }, | 6051 | { "loopback_test (offline)" }, |
6052 | { "nvram_test (online)" }, | 6052 | { "nvram_test (online)" }, |
6053 | { "interrupt_test (online)" }, | 6053 | { "interrupt_test (online)" }, |
6054 | { "link_test (online)" }, | 6054 | { "link_test (online)" }, |
6055 | }; | 6055 | }; |
6056 | 6056 | ||
6057 | static int | 6057 | static int |
6058 | bnx2_self_test_count(struct net_device *dev) | 6058 | bnx2_self_test_count(struct net_device *dev) |
6059 | { | 6059 | { |
6060 | return BNX2_NUM_TESTS; | 6060 | return BNX2_NUM_TESTS; |
6061 | } | 6061 | } |
6062 | 6062 | ||
6063 | static void | 6063 | static void |
6064 | bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) | 6064 | bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) |
6065 | { | 6065 | { |
6066 | struct bnx2 *bp = netdev_priv(dev); | 6066 | struct bnx2 *bp = netdev_priv(dev); |
6067 | 6067 | ||
6068 | memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); | 6068 | memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); |
6069 | if (etest->flags & ETH_TEST_FL_OFFLINE) { | 6069 | if (etest->flags & ETH_TEST_FL_OFFLINE) { |
6070 | int i; | 6070 | int i; |
6071 | 6071 | ||
6072 | bnx2_netif_stop(bp); | 6072 | bnx2_netif_stop(bp); |
6073 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); | 6073 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); |
6074 | bnx2_free_skbs(bp); | 6074 | bnx2_free_skbs(bp); |
6075 | 6075 | ||
6076 | if (bnx2_test_registers(bp) != 0) { | 6076 | if (bnx2_test_registers(bp) != 0) { |
6077 | buf[0] = 1; | 6077 | buf[0] = 1; |
6078 | etest->flags |= ETH_TEST_FL_FAILED; | 6078 | etest->flags |= ETH_TEST_FL_FAILED; |
6079 | } | 6079 | } |
6080 | if (bnx2_test_memory(bp) != 0) { | 6080 | if (bnx2_test_memory(bp) != 0) { |
6081 | buf[1] = 1; | 6081 | buf[1] = 1; |
6082 | etest->flags |= ETH_TEST_FL_FAILED; | 6082 | etest->flags |= ETH_TEST_FL_FAILED; |
6083 | } | 6083 | } |
6084 | if ((buf[2] = bnx2_test_loopback(bp)) != 0) | 6084 | if ((buf[2] = bnx2_test_loopback(bp)) != 0) |
6085 | etest->flags |= ETH_TEST_FL_FAILED; | 6085 | etest->flags |= ETH_TEST_FL_FAILED; |
6086 | 6086 | ||
6087 | if (!netif_running(bp->dev)) { | 6087 | if (!netif_running(bp->dev)) { |
6088 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); | 6088 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); |
6089 | } | 6089 | } |
6090 | else { | 6090 | else { |
6091 | bnx2_init_nic(bp); | 6091 | bnx2_init_nic(bp); |
6092 | bnx2_netif_start(bp); | 6092 | bnx2_netif_start(bp); |
6093 | } | 6093 | } |
6094 | 6094 | ||
6095 | /* wait for link up */ | 6095 | /* wait for link up */ |
6096 | for (i = 0; i < 7; i++) { | 6096 | for (i = 0; i < 7; i++) { |
6097 | if (bp->link_up) | 6097 | if (bp->link_up) |
6098 | break; | 6098 | break; |
6099 | msleep_interruptible(1000); | 6099 | msleep_interruptible(1000); |
6100 | } | 6100 | } |
6101 | } | 6101 | } |
6102 | 6102 | ||
6103 | if (bnx2_test_nvram(bp) != 0) { | 6103 | if (bnx2_test_nvram(bp) != 0) { |
6104 | buf[3] = 1; | 6104 | buf[3] = 1; |
6105 | etest->flags |= ETH_TEST_FL_FAILED; | 6105 | etest->flags |= ETH_TEST_FL_FAILED; |
6106 | } | 6106 | } |
6107 | if (bnx2_test_intr(bp) != 0) { | 6107 | if (bnx2_test_intr(bp) != 0) { |
6108 | buf[4] = 1; | 6108 | buf[4] = 1; |
6109 | etest->flags |= ETH_TEST_FL_FAILED; | 6109 | etest->flags |= ETH_TEST_FL_FAILED; |
6110 | } | 6110 | } |
6111 | 6111 | ||
6112 | if (bnx2_test_link(bp) != 0) { | 6112 | if (bnx2_test_link(bp) != 0) { |
6113 | buf[5] = 1; | 6113 | buf[5] = 1; |
6114 | etest->flags |= ETH_TEST_FL_FAILED; | 6114 | etest->flags |= ETH_TEST_FL_FAILED; |
6115 | 6115 | ||
6116 | } | 6116 | } |
6117 | } | 6117 | } |
6118 | 6118 | ||
6119 | static void | 6119 | static void |
6120 | bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | 6120 | bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf) |
6121 | { | 6121 | { |
6122 | switch (stringset) { | 6122 | switch (stringset) { |
6123 | case ETH_SS_STATS: | 6123 | case ETH_SS_STATS: |
6124 | memcpy(buf, bnx2_stats_str_arr, | 6124 | memcpy(buf, bnx2_stats_str_arr, |
6125 | sizeof(bnx2_stats_str_arr)); | 6125 | sizeof(bnx2_stats_str_arr)); |
6126 | break; | 6126 | break; |
6127 | case ETH_SS_TEST: | 6127 | case ETH_SS_TEST: |
6128 | memcpy(buf, bnx2_tests_str_arr, | 6128 | memcpy(buf, bnx2_tests_str_arr, |
6129 | sizeof(bnx2_tests_str_arr)); | 6129 | sizeof(bnx2_tests_str_arr)); |
6130 | break; | 6130 | break; |
6131 | } | 6131 | } |
6132 | } | 6132 | } |
6133 | 6133 | ||
6134 | static int | 6134 | static int |
6135 | bnx2_get_stats_count(struct net_device *dev) | 6135 | bnx2_get_stats_count(struct net_device *dev) |
6136 | { | 6136 | { |
6137 | return BNX2_NUM_STATS; | 6137 | return BNX2_NUM_STATS; |
6138 | } | 6138 | } |
6139 | 6139 | ||
6140 | static void | 6140 | static void |
6141 | bnx2_get_ethtool_stats(struct net_device *dev, | 6141 | bnx2_get_ethtool_stats(struct net_device *dev, |
6142 | struct ethtool_stats *stats, u64 *buf) | 6142 | struct ethtool_stats *stats, u64 *buf) |
6143 | { | 6143 | { |
6144 | struct bnx2 *bp = netdev_priv(dev); | 6144 | struct bnx2 *bp = netdev_priv(dev); |
6145 | int i; | 6145 | int i; |
6146 | u32 *hw_stats = (u32 *) bp->stats_blk; | 6146 | u32 *hw_stats = (u32 *) bp->stats_blk; |
6147 | u8 *stats_len_arr = NULL; | 6147 | u8 *stats_len_arr = NULL; |
6148 | 6148 | ||
6149 | if (hw_stats == NULL) { | 6149 | if (hw_stats == NULL) { |
6150 | memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); | 6150 | memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); |
6151 | return; | 6151 | return; |
6152 | } | 6152 | } |
6153 | 6153 | ||
6154 | if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || | 6154 | if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || |
6155 | (CHIP_ID(bp) == CHIP_ID_5706_A1) || | 6155 | (CHIP_ID(bp) == CHIP_ID_5706_A1) || |
6156 | (CHIP_ID(bp) == CHIP_ID_5706_A2) || | 6156 | (CHIP_ID(bp) == CHIP_ID_5706_A2) || |
6157 | (CHIP_ID(bp) == CHIP_ID_5708_A0)) | 6157 | (CHIP_ID(bp) == CHIP_ID_5708_A0)) |
6158 | stats_len_arr = bnx2_5706_stats_len_arr; | 6158 | stats_len_arr = bnx2_5706_stats_len_arr; |
6159 | else | 6159 | else |
6160 | stats_len_arr = bnx2_5708_stats_len_arr; | 6160 | stats_len_arr = bnx2_5708_stats_len_arr; |
6161 | 6161 | ||
6162 | for (i = 0; i < BNX2_NUM_STATS; i++) { | 6162 | for (i = 0; i < BNX2_NUM_STATS; i++) { |
6163 | if (stats_len_arr[i] == 0) { | 6163 | if (stats_len_arr[i] == 0) { |
6164 | /* skip this counter */ | 6164 | /* skip this counter */ |
6165 | buf[i] = 0; | 6165 | buf[i] = 0; |
6166 | continue; | 6166 | continue; |
6167 | } | 6167 | } |
6168 | if (stats_len_arr[i] == 4) { | 6168 | if (stats_len_arr[i] == 4) { |
6169 | /* 4-byte counter */ | 6169 | /* 4-byte counter */ |
6170 | buf[i] = (u64) | 6170 | buf[i] = (u64) |
6171 | *(hw_stats + bnx2_stats_offset_arr[i]); | 6171 | *(hw_stats + bnx2_stats_offset_arr[i]); |
6172 | continue; | 6172 | continue; |
6173 | } | 6173 | } |
6174 | /* 8-byte counter */ | 6174 | /* 8-byte counter */ |
6175 | buf[i] = (((u64) *(hw_stats + | 6175 | buf[i] = (((u64) *(hw_stats + |
6176 | bnx2_stats_offset_arr[i])) << 32) + | 6176 | bnx2_stats_offset_arr[i])) << 32) + |
6177 | *(hw_stats + bnx2_stats_offset_arr[i] + 1); | 6177 | *(hw_stats + bnx2_stats_offset_arr[i] + 1); |
6178 | } | 6178 | } |
6179 | } | 6179 | } |
6180 | 6180 | ||
6181 | static int | 6181 | static int |
6182 | bnx2_phys_id(struct net_device *dev, u32 data) | 6182 | bnx2_phys_id(struct net_device *dev, u32 data) |
6183 | { | 6183 | { |
6184 | struct bnx2 *bp = netdev_priv(dev); | 6184 | struct bnx2 *bp = netdev_priv(dev); |
6185 | int i; | 6185 | int i; |
6186 | u32 save; | 6186 | u32 save; |
6187 | 6187 | ||
6188 | if (data == 0) | 6188 | if (data == 0) |
6189 | data = 2; | 6189 | data = 2; |
6190 | 6190 | ||
6191 | save = REG_RD(bp, BNX2_MISC_CFG); | 6191 | save = REG_RD(bp, BNX2_MISC_CFG); |
6192 | REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); | 6192 | REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); |
6193 | 6193 | ||
6194 | for (i = 0; i < (data * 2); i++) { | 6194 | for (i = 0; i < (data * 2); i++) { |
6195 | if ((i % 2) == 0) { | 6195 | if ((i % 2) == 0) { |
6196 | REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); | 6196 | REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); |
6197 | } | 6197 | } |
6198 | else { | 6198 | else { |
6199 | REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | | 6199 | REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | |
6200 | BNX2_EMAC_LED_1000MB_OVERRIDE | | 6200 | BNX2_EMAC_LED_1000MB_OVERRIDE | |
6201 | BNX2_EMAC_LED_100MB_OVERRIDE | | 6201 | BNX2_EMAC_LED_100MB_OVERRIDE | |
6202 | BNX2_EMAC_LED_10MB_OVERRIDE | | 6202 | BNX2_EMAC_LED_10MB_OVERRIDE | |
6203 | BNX2_EMAC_LED_TRAFFIC_OVERRIDE | | 6203 | BNX2_EMAC_LED_TRAFFIC_OVERRIDE | |
6204 | BNX2_EMAC_LED_TRAFFIC); | 6204 | BNX2_EMAC_LED_TRAFFIC); |
6205 | } | 6205 | } |
6206 | msleep_interruptible(500); | 6206 | msleep_interruptible(500); |
6207 | if (signal_pending(current)) | 6207 | if (signal_pending(current)) |
6208 | break; | 6208 | break; |
6209 | } | 6209 | } |
6210 | REG_WR(bp, BNX2_EMAC_LED, 0); | 6210 | REG_WR(bp, BNX2_EMAC_LED, 0); |
6211 | REG_WR(bp, BNX2_MISC_CFG, save); | 6211 | REG_WR(bp, BNX2_MISC_CFG, save); |
6212 | return 0; | 6212 | return 0; |
6213 | } | 6213 | } |
6214 | 6214 | ||
6215 | static int | 6215 | static int |
6216 | bnx2_set_tx_csum(struct net_device *dev, u32 data) | 6216 | bnx2_set_tx_csum(struct net_device *dev, u32 data) |
6217 | { | 6217 | { |
6218 | struct bnx2 *bp = netdev_priv(dev); | 6218 | struct bnx2 *bp = netdev_priv(dev); |
6219 | 6219 | ||
6220 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 6220 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
6221 | return (ethtool_op_set_tx_hw_csum(dev, data)); | 6221 | return (ethtool_op_set_tx_ipv6_csum(dev, data)); |
6222 | else | 6222 | else |
6223 | return (ethtool_op_set_tx_csum(dev, data)); | 6223 | return (ethtool_op_set_tx_csum(dev, data)); |
6224 | } | 6224 | } |
6225 | 6225 | ||
6226 | static const struct ethtool_ops bnx2_ethtool_ops = { | 6226 | static const struct ethtool_ops bnx2_ethtool_ops = { |
6227 | .get_settings = bnx2_get_settings, | 6227 | .get_settings = bnx2_get_settings, |
6228 | .set_settings = bnx2_set_settings, | 6228 | .set_settings = bnx2_set_settings, |
6229 | .get_drvinfo = bnx2_get_drvinfo, | 6229 | .get_drvinfo = bnx2_get_drvinfo, |
6230 | .get_regs_len = bnx2_get_regs_len, | 6230 | .get_regs_len = bnx2_get_regs_len, |
6231 | .get_regs = bnx2_get_regs, | 6231 | .get_regs = bnx2_get_regs, |
6232 | .get_wol = bnx2_get_wol, | 6232 | .get_wol = bnx2_get_wol, |
6233 | .set_wol = bnx2_set_wol, | 6233 | .set_wol = bnx2_set_wol, |
6234 | .nway_reset = bnx2_nway_reset, | 6234 | .nway_reset = bnx2_nway_reset, |
6235 | .get_link = ethtool_op_get_link, | 6235 | .get_link = ethtool_op_get_link, |
6236 | .get_eeprom_len = bnx2_get_eeprom_len, | 6236 | .get_eeprom_len = bnx2_get_eeprom_len, |
6237 | .get_eeprom = bnx2_get_eeprom, | 6237 | .get_eeprom = bnx2_get_eeprom, |
6238 | .set_eeprom = bnx2_set_eeprom, | 6238 | .set_eeprom = bnx2_set_eeprom, |
6239 | .get_coalesce = bnx2_get_coalesce, | 6239 | .get_coalesce = bnx2_get_coalesce, |
6240 | .set_coalesce = bnx2_set_coalesce, | 6240 | .set_coalesce = bnx2_set_coalesce, |
6241 | .get_ringparam = bnx2_get_ringparam, | 6241 | .get_ringparam = bnx2_get_ringparam, |
6242 | .set_ringparam = bnx2_set_ringparam, | 6242 | .set_ringparam = bnx2_set_ringparam, |
6243 | .get_pauseparam = bnx2_get_pauseparam, | 6243 | .get_pauseparam = bnx2_get_pauseparam, |
6244 | .set_pauseparam = bnx2_set_pauseparam, | 6244 | .set_pauseparam = bnx2_set_pauseparam, |
6245 | .get_rx_csum = bnx2_get_rx_csum, | 6245 | .get_rx_csum = bnx2_get_rx_csum, |
6246 | .set_rx_csum = bnx2_set_rx_csum, | 6246 | .set_rx_csum = bnx2_set_rx_csum, |
6247 | .get_tx_csum = ethtool_op_get_tx_csum, | 6247 | .get_tx_csum = ethtool_op_get_tx_csum, |
6248 | .set_tx_csum = bnx2_set_tx_csum, | 6248 | .set_tx_csum = bnx2_set_tx_csum, |
6249 | .get_sg = ethtool_op_get_sg, | 6249 | .get_sg = ethtool_op_get_sg, |
6250 | .set_sg = ethtool_op_set_sg, | 6250 | .set_sg = ethtool_op_set_sg, |
6251 | .get_tso = ethtool_op_get_tso, | 6251 | .get_tso = ethtool_op_get_tso, |
6252 | .set_tso = bnx2_set_tso, | 6252 | .set_tso = bnx2_set_tso, |
6253 | .self_test_count = bnx2_self_test_count, | 6253 | .self_test_count = bnx2_self_test_count, |
6254 | .self_test = bnx2_self_test, | 6254 | .self_test = bnx2_self_test, |
6255 | .get_strings = bnx2_get_strings, | 6255 | .get_strings = bnx2_get_strings, |
6256 | .phys_id = bnx2_phys_id, | 6256 | .phys_id = bnx2_phys_id, |
6257 | .get_stats_count = bnx2_get_stats_count, | 6257 | .get_stats_count = bnx2_get_stats_count, |
6258 | .get_ethtool_stats = bnx2_get_ethtool_stats, | 6258 | .get_ethtool_stats = bnx2_get_ethtool_stats, |
6259 | .get_perm_addr = ethtool_op_get_perm_addr, | 6259 | .get_perm_addr = ethtool_op_get_perm_addr, |
6260 | }; | 6260 | }; |
6261 | 6261 | ||
6262 | /* Called with rtnl_lock */ | 6262 | /* Called with rtnl_lock */ |
6263 | static int | 6263 | static int |
6264 | bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 6264 | bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
6265 | { | 6265 | { |
6266 | struct mii_ioctl_data *data = if_mii(ifr); | 6266 | struct mii_ioctl_data *data = if_mii(ifr); |
6267 | struct bnx2 *bp = netdev_priv(dev); | 6267 | struct bnx2 *bp = netdev_priv(dev); |
6268 | int err; | 6268 | int err; |
6269 | 6269 | ||
6270 | switch(cmd) { | 6270 | switch(cmd) { |
6271 | case SIOCGMIIPHY: | 6271 | case SIOCGMIIPHY: |
6272 | data->phy_id = bp->phy_addr; | 6272 | data->phy_id = bp->phy_addr; |
6273 | 6273 | ||
6274 | /* fallthru */ | 6274 | /* fallthru */ |
6275 | case SIOCGMIIREG: { | 6275 | case SIOCGMIIREG: { |
6276 | u32 mii_regval; | 6276 | u32 mii_regval; |
6277 | 6277 | ||
6278 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) | 6278 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) |
6279 | return -EOPNOTSUPP; | 6279 | return -EOPNOTSUPP; |
6280 | 6280 | ||
6281 | if (!netif_running(dev)) | 6281 | if (!netif_running(dev)) |
6282 | return -EAGAIN; | 6282 | return -EAGAIN; |
6283 | 6283 | ||
6284 | spin_lock_bh(&bp->phy_lock); | 6284 | spin_lock_bh(&bp->phy_lock); |
6285 | err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); | 6285 | err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); |
6286 | spin_unlock_bh(&bp->phy_lock); | 6286 | spin_unlock_bh(&bp->phy_lock); |
6287 | 6287 | ||
6288 | data->val_out = mii_regval; | 6288 | data->val_out = mii_regval; |
6289 | 6289 | ||
6290 | return err; | 6290 | return err; |
6291 | } | 6291 | } |
6292 | 6292 | ||
6293 | case SIOCSMIIREG: | 6293 | case SIOCSMIIREG: |
6294 | if (!capable(CAP_NET_ADMIN)) | 6294 | if (!capable(CAP_NET_ADMIN)) |
6295 | return -EPERM; | 6295 | return -EPERM; |
6296 | 6296 | ||
6297 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) | 6297 | if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) |
6298 | return -EOPNOTSUPP; | 6298 | return -EOPNOTSUPP; |
6299 | 6299 | ||
6300 | if (!netif_running(dev)) | 6300 | if (!netif_running(dev)) |
6301 | return -EAGAIN; | 6301 | return -EAGAIN; |
6302 | 6302 | ||
6303 | spin_lock_bh(&bp->phy_lock); | 6303 | spin_lock_bh(&bp->phy_lock); |
6304 | err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); | 6304 | err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); |
6305 | spin_unlock_bh(&bp->phy_lock); | 6305 | spin_unlock_bh(&bp->phy_lock); |
6306 | 6306 | ||
6307 | return err; | 6307 | return err; |
6308 | 6308 | ||
6309 | default: | 6309 | default: |
6310 | /* do nothing */ | 6310 | /* do nothing */ |
6311 | break; | 6311 | break; |
6312 | } | 6312 | } |
6313 | return -EOPNOTSUPP; | 6313 | return -EOPNOTSUPP; |
6314 | } | 6314 | } |
6315 | 6315 | ||
6316 | /* Called with rtnl_lock */ | 6316 | /* Called with rtnl_lock */ |
6317 | static int | 6317 | static int |
6318 | bnx2_change_mac_addr(struct net_device *dev, void *p) | 6318 | bnx2_change_mac_addr(struct net_device *dev, void *p) |
6319 | { | 6319 | { |
6320 | struct sockaddr *addr = p; | 6320 | struct sockaddr *addr = p; |
6321 | struct bnx2 *bp = netdev_priv(dev); | 6321 | struct bnx2 *bp = netdev_priv(dev); |
6322 | 6322 | ||
6323 | if (!is_valid_ether_addr(addr->sa_data)) | 6323 | if (!is_valid_ether_addr(addr->sa_data)) |
6324 | return -EINVAL; | 6324 | return -EINVAL; |
6325 | 6325 | ||
6326 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 6326 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
6327 | if (netif_running(dev)) | 6327 | if (netif_running(dev)) |
6328 | bnx2_set_mac_addr(bp); | 6328 | bnx2_set_mac_addr(bp); |
6329 | 6329 | ||
6330 | return 0; | 6330 | return 0; |
6331 | } | 6331 | } |
6332 | 6332 | ||
6333 | /* Called with rtnl_lock */ | 6333 | /* Called with rtnl_lock */ |
6334 | static int | 6334 | static int |
6335 | bnx2_change_mtu(struct net_device *dev, int new_mtu) | 6335 | bnx2_change_mtu(struct net_device *dev, int new_mtu) |
6336 | { | 6336 | { |
6337 | struct bnx2 *bp = netdev_priv(dev); | 6337 | struct bnx2 *bp = netdev_priv(dev); |
6338 | 6338 | ||
6339 | if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || | 6339 | if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || |
6340 | ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE)) | 6340 | ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE)) |
6341 | return -EINVAL; | 6341 | return -EINVAL; |
6342 | 6342 | ||
6343 | dev->mtu = new_mtu; | 6343 | dev->mtu = new_mtu; |
6344 | if (netif_running(dev)) { | 6344 | if (netif_running(dev)) { |
6345 | bnx2_netif_stop(bp); | 6345 | bnx2_netif_stop(bp); |
6346 | 6346 | ||
6347 | bnx2_init_nic(bp); | 6347 | bnx2_init_nic(bp); |
6348 | 6348 | ||
6349 | bnx2_netif_start(bp); | 6349 | bnx2_netif_start(bp); |
6350 | } | 6350 | } |
6351 | return 0; | 6351 | return 0; |
6352 | } | 6352 | } |
6353 | 6353 | ||
6354 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) | 6354 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) |
6355 | static void | 6355 | static void |
6356 | poll_bnx2(struct net_device *dev) | 6356 | poll_bnx2(struct net_device *dev) |
6357 | { | 6357 | { |
6358 | struct bnx2 *bp = netdev_priv(dev); | 6358 | struct bnx2 *bp = netdev_priv(dev); |
6359 | 6359 | ||
6360 | disable_irq(bp->pdev->irq); | 6360 | disable_irq(bp->pdev->irq); |
6361 | bnx2_interrupt(bp->pdev->irq, dev); | 6361 | bnx2_interrupt(bp->pdev->irq, dev); |
6362 | enable_irq(bp->pdev->irq); | 6362 | enable_irq(bp->pdev->irq); |
6363 | } | 6363 | } |
6364 | #endif | 6364 | #endif |
6365 | 6365 | ||
6366 | static void __devinit | 6366 | static void __devinit |
6367 | bnx2_get_5709_media(struct bnx2 *bp) | 6367 | bnx2_get_5709_media(struct bnx2 *bp) |
6368 | { | 6368 | { |
6369 | u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); | 6369 | u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); |
6370 | u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID; | 6370 | u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID; |
6371 | u32 strap; | 6371 | u32 strap; |
6372 | 6372 | ||
6373 | if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) | 6373 | if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) |
6374 | return; | 6374 | return; |
6375 | else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { | 6375 | else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { |
6376 | bp->phy_flags |= PHY_SERDES_FLAG; | 6376 | bp->phy_flags |= PHY_SERDES_FLAG; |
6377 | return; | 6377 | return; |
6378 | } | 6378 | } |
6379 | 6379 | ||
6380 | if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) | 6380 | if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) |
6381 | strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; | 6381 | strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; |
6382 | else | 6382 | else |
6383 | strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; | 6383 | strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; |
6384 | 6384 | ||
6385 | if (PCI_FUNC(bp->pdev->devfn) == 0) { | 6385 | if (PCI_FUNC(bp->pdev->devfn) == 0) { |
6386 | switch (strap) { | 6386 | switch (strap) { |
6387 | case 0x4: | 6387 | case 0x4: |
6388 | case 0x5: | 6388 | case 0x5: |
6389 | case 0x6: | 6389 | case 0x6: |
6390 | bp->phy_flags |= PHY_SERDES_FLAG; | 6390 | bp->phy_flags |= PHY_SERDES_FLAG; |
6391 | return; | 6391 | return; |
6392 | } | 6392 | } |
6393 | } else { | 6393 | } else { |
6394 | switch (strap) { | 6394 | switch (strap) { |
6395 | case 0x1: | 6395 | case 0x1: |
6396 | case 0x2: | 6396 | case 0x2: |
6397 | case 0x4: | 6397 | case 0x4: |
6398 | bp->phy_flags |= PHY_SERDES_FLAG; | 6398 | bp->phy_flags |= PHY_SERDES_FLAG; |
6399 | return; | 6399 | return; |
6400 | } | 6400 | } |
6401 | } | 6401 | } |
6402 | } | 6402 | } |
6403 | 6403 | ||
6404 | static void __devinit | 6404 | static void __devinit |
6405 | bnx2_get_pci_speed(struct bnx2 *bp) | 6405 | bnx2_get_pci_speed(struct bnx2 *bp) |
6406 | { | 6406 | { |
6407 | u32 reg; | 6407 | u32 reg; |
6408 | 6408 | ||
6409 | reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS); | 6409 | reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS); |
6410 | if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) { | 6410 | if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) { |
6411 | u32 clkreg; | 6411 | u32 clkreg; |
6412 | 6412 | ||
6413 | bp->flags |= PCIX_FLAG; | 6413 | bp->flags |= PCIX_FLAG; |
6414 | 6414 | ||
6415 | clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); | 6415 | clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); |
6416 | 6416 | ||
6417 | clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; | 6417 | clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; |
6418 | switch (clkreg) { | 6418 | switch (clkreg) { |
6419 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: | 6419 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: |
6420 | bp->bus_speed_mhz = 133; | 6420 | bp->bus_speed_mhz = 133; |
6421 | break; | 6421 | break; |
6422 | 6422 | ||
6423 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: | 6423 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: |
6424 | bp->bus_speed_mhz = 100; | 6424 | bp->bus_speed_mhz = 100; |
6425 | break; | 6425 | break; |
6426 | 6426 | ||
6427 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: | 6427 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: |
6428 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: | 6428 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: |
6429 | bp->bus_speed_mhz = 66; | 6429 | bp->bus_speed_mhz = 66; |
6430 | break; | 6430 | break; |
6431 | 6431 | ||
6432 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: | 6432 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: |
6433 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: | 6433 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: |
6434 | bp->bus_speed_mhz = 50; | 6434 | bp->bus_speed_mhz = 50; |
6435 | break; | 6435 | break; |
6436 | 6436 | ||
6437 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: | 6437 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: |
6438 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: | 6438 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: |
6439 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: | 6439 | case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: |
6440 | bp->bus_speed_mhz = 33; | 6440 | bp->bus_speed_mhz = 33; |
6441 | break; | 6441 | break; |
6442 | } | 6442 | } |
6443 | } | 6443 | } |
6444 | else { | 6444 | else { |
6445 | if (reg & BNX2_PCICFG_MISC_STATUS_M66EN) | 6445 | if (reg & BNX2_PCICFG_MISC_STATUS_M66EN) |
6446 | bp->bus_speed_mhz = 66; | 6446 | bp->bus_speed_mhz = 66; |
6447 | else | 6447 | else |
6448 | bp->bus_speed_mhz = 33; | 6448 | bp->bus_speed_mhz = 33; |
6449 | } | 6449 | } |
6450 | 6450 | ||
6451 | if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET) | 6451 | if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET) |
6452 | bp->flags |= PCI_32BIT_FLAG; | 6452 | bp->flags |= PCI_32BIT_FLAG; |
6453 | 6453 | ||
6454 | } | 6454 | } |
6455 | 6455 | ||
6456 | static int __devinit | 6456 | static int __devinit |
6457 | bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | 6457 | bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) |
6458 | { | 6458 | { |
6459 | struct bnx2 *bp; | 6459 | struct bnx2 *bp; |
6460 | unsigned long mem_len; | 6460 | unsigned long mem_len; |
6461 | int rc, i, j; | 6461 | int rc, i, j; |
6462 | u32 reg; | 6462 | u32 reg; |
6463 | u64 dma_mask, persist_dma_mask; | 6463 | u64 dma_mask, persist_dma_mask; |
6464 | 6464 | ||
6465 | SET_MODULE_OWNER(dev); | 6465 | SET_MODULE_OWNER(dev); |
6466 | SET_NETDEV_DEV(dev, &pdev->dev); | 6466 | SET_NETDEV_DEV(dev, &pdev->dev); |
6467 | bp = netdev_priv(dev); | 6467 | bp = netdev_priv(dev); |
6468 | 6468 | ||
6469 | bp->flags = 0; | 6469 | bp->flags = 0; |
6470 | bp->phy_flags = 0; | 6470 | bp->phy_flags = 0; |
6471 | 6471 | ||
6472 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ | 6472 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ |
6473 | rc = pci_enable_device(pdev); | 6473 | rc = pci_enable_device(pdev); |
6474 | if (rc) { | 6474 | if (rc) { |
6475 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting."); | 6475 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting."); |
6476 | goto err_out; | 6476 | goto err_out; |
6477 | } | 6477 | } |
6478 | 6478 | ||
6479 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | 6479 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
6480 | dev_err(&pdev->dev, | 6480 | dev_err(&pdev->dev, |
6481 | "Cannot find PCI device base address, aborting.\n"); | 6481 | "Cannot find PCI device base address, aborting.\n"); |
6482 | rc = -ENODEV; | 6482 | rc = -ENODEV; |
6483 | goto err_out_disable; | 6483 | goto err_out_disable; |
6484 | } | 6484 | } |
6485 | 6485 | ||
6486 | rc = pci_request_regions(pdev, DRV_MODULE_NAME); | 6486 | rc = pci_request_regions(pdev, DRV_MODULE_NAME); |
6487 | if (rc) { | 6487 | if (rc) { |
6488 | dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); | 6488 | dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); |
6489 | goto err_out_disable; | 6489 | goto err_out_disable; |
6490 | } | 6490 | } |
6491 | 6491 | ||
6492 | pci_set_master(pdev); | 6492 | pci_set_master(pdev); |
6493 | 6493 | ||
6494 | bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); | 6494 | bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); |
6495 | if (bp->pm_cap == 0) { | 6495 | if (bp->pm_cap == 0) { |
6496 | dev_err(&pdev->dev, | 6496 | dev_err(&pdev->dev, |
6497 | "Cannot find power management capability, aborting.\n"); | 6497 | "Cannot find power management capability, aborting.\n"); |
6498 | rc = -EIO; | 6498 | rc = -EIO; |
6499 | goto err_out_release; | 6499 | goto err_out_release; |
6500 | } | 6500 | } |
6501 | 6501 | ||
6502 | bp->dev = dev; | 6502 | bp->dev = dev; |
6503 | bp->pdev = pdev; | 6503 | bp->pdev = pdev; |
6504 | 6504 | ||
6505 | spin_lock_init(&bp->phy_lock); | 6505 | spin_lock_init(&bp->phy_lock); |
6506 | spin_lock_init(&bp->indirect_lock); | 6506 | spin_lock_init(&bp->indirect_lock); |
6507 | INIT_WORK(&bp->reset_task, bnx2_reset_task); | 6507 | INIT_WORK(&bp->reset_task, bnx2_reset_task); |
6508 | 6508 | ||
6509 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); | 6509 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); |
6510 | mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); | 6510 | mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); |
6511 | dev->mem_end = dev->mem_start + mem_len; | 6511 | dev->mem_end = dev->mem_start + mem_len; |
6512 | dev->irq = pdev->irq; | 6512 | dev->irq = pdev->irq; |
6513 | 6513 | ||
6514 | bp->regview = ioremap_nocache(dev->base_addr, mem_len); | 6514 | bp->regview = ioremap_nocache(dev->base_addr, mem_len); |
6515 | 6515 | ||
6516 | if (!bp->regview) { | 6516 | if (!bp->regview) { |
6517 | dev_err(&pdev->dev, "Cannot map register space, aborting.\n"); | 6517 | dev_err(&pdev->dev, "Cannot map register space, aborting.\n"); |
6518 | rc = -ENOMEM; | 6518 | rc = -ENOMEM; |
6519 | goto err_out_release; | 6519 | goto err_out_release; |
6520 | } | 6520 | } |
6521 | 6521 | ||
6522 | /* Configure byte swap and enable write to the reg_window registers. | 6522 | /* Configure byte swap and enable write to the reg_window registers. |
6523 | * Rely on CPU to do target byte swapping on big endian systems | 6523 | * Rely on CPU to do target byte swapping on big endian systems |
6524 | * The chip's target access swapping will not swap all accesses | 6524 | * The chip's target access swapping will not swap all accesses |
6525 | */ | 6525 | */ |
6526 | pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, | 6526 | pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, |
6527 | BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | | 6527 | BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | |
6528 | BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); | 6528 | BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); |
6529 | 6529 | ||
6530 | bnx2_set_power_state(bp, PCI_D0); | 6530 | bnx2_set_power_state(bp, PCI_D0); |
6531 | 6531 | ||
6532 | bp->chip_id = REG_RD(bp, BNX2_MISC_ID); | 6532 | bp->chip_id = REG_RD(bp, BNX2_MISC_ID); |
6533 | 6533 | ||
6534 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { | 6534 | if (CHIP_NUM(bp) == CHIP_NUM_5709) { |
6535 | if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) { | 6535 | if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) { |
6536 | dev_err(&pdev->dev, | 6536 | dev_err(&pdev->dev, |
6537 | "Cannot find PCIE capability, aborting.\n"); | 6537 | "Cannot find PCIE capability, aborting.\n"); |
6538 | rc = -EIO; | 6538 | rc = -EIO; |
6539 | goto err_out_unmap; | 6539 | goto err_out_unmap; |
6540 | } | 6540 | } |
6541 | bp->flags |= PCIE_FLAG; | 6541 | bp->flags |= PCIE_FLAG; |
6542 | } else { | 6542 | } else { |
6543 | bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); | 6543 | bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); |
6544 | if (bp->pcix_cap == 0) { | 6544 | if (bp->pcix_cap == 0) { |
6545 | dev_err(&pdev->dev, | 6545 | dev_err(&pdev->dev, |
6546 | "Cannot find PCIX capability, aborting.\n"); | 6546 | "Cannot find PCIX capability, aborting.\n"); |
6547 | rc = -EIO; | 6547 | rc = -EIO; |
6548 | goto err_out_unmap; | 6548 | goto err_out_unmap; |
6549 | } | 6549 | } |
6550 | } | 6550 | } |
6551 | 6551 | ||
6552 | if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) { | 6552 | if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) { |
6553 | if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) | 6553 | if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) |
6554 | bp->flags |= MSI_CAP_FLAG; | 6554 | bp->flags |= MSI_CAP_FLAG; |
6555 | } | 6555 | } |
6556 | 6556 | ||
6557 | /* 5708 cannot support DMA addresses > 40-bit. */ | 6557 | /* 5708 cannot support DMA addresses > 40-bit. */ |
6558 | if (CHIP_NUM(bp) == CHIP_NUM_5708) | 6558 | if (CHIP_NUM(bp) == CHIP_NUM_5708) |
6559 | persist_dma_mask = dma_mask = DMA_40BIT_MASK; | 6559 | persist_dma_mask = dma_mask = DMA_40BIT_MASK; |
6560 | else | 6560 | else |
6561 | persist_dma_mask = dma_mask = DMA_64BIT_MASK; | 6561 | persist_dma_mask = dma_mask = DMA_64BIT_MASK; |
6562 | 6562 | ||
6563 | /* Configure DMA attributes. */ | 6563 | /* Configure DMA attributes. */ |
6564 | if (pci_set_dma_mask(pdev, dma_mask) == 0) { | 6564 | if (pci_set_dma_mask(pdev, dma_mask) == 0) { |
6565 | dev->features |= NETIF_F_HIGHDMA; | 6565 | dev->features |= NETIF_F_HIGHDMA; |
6566 | rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); | 6566 | rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); |
6567 | if (rc) { | 6567 | if (rc) { |
6568 | dev_err(&pdev->dev, | 6568 | dev_err(&pdev->dev, |
6569 | "pci_set_consistent_dma_mask failed, aborting.\n"); | 6569 | "pci_set_consistent_dma_mask failed, aborting.\n"); |
6570 | goto err_out_unmap; | 6570 | goto err_out_unmap; |
6571 | } | 6571 | } |
6572 | } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) { | 6572 | } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) { |
6573 | dev_err(&pdev->dev, "System does not support DMA, aborting.\n"); | 6573 | dev_err(&pdev->dev, "System does not support DMA, aborting.\n"); |
6574 | goto err_out_unmap; | 6574 | goto err_out_unmap; |
6575 | } | 6575 | } |
6576 | 6576 | ||
6577 | if (!(bp->flags & PCIE_FLAG)) | 6577 | if (!(bp->flags & PCIE_FLAG)) |
6578 | bnx2_get_pci_speed(bp); | 6578 | bnx2_get_pci_speed(bp); |
6579 | 6579 | ||
6580 | /* 5706A0 may falsely detect SERR and PERR. */ | 6580 | /* 5706A0 may falsely detect SERR and PERR. */ |
6581 | if (CHIP_ID(bp) == CHIP_ID_5706_A0) { | 6581 | if (CHIP_ID(bp) == CHIP_ID_5706_A0) { |
6582 | reg = REG_RD(bp, PCI_COMMAND); | 6582 | reg = REG_RD(bp, PCI_COMMAND); |
6583 | reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); | 6583 | reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); |
6584 | REG_WR(bp, PCI_COMMAND, reg); | 6584 | REG_WR(bp, PCI_COMMAND, reg); |
6585 | } | 6585 | } |
6586 | else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) && | 6586 | else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) && |
6587 | !(bp->flags & PCIX_FLAG)) { | 6587 | !(bp->flags & PCIX_FLAG)) { |
6588 | 6588 | ||
6589 | dev_err(&pdev->dev, | 6589 | dev_err(&pdev->dev, |
6590 | "5706 A1 can only be used in a PCIX bus, aborting.\n"); | 6590 | "5706 A1 can only be used in a PCIX bus, aborting.\n"); |
6591 | goto err_out_unmap; | 6591 | goto err_out_unmap; |
6592 | } | 6592 | } |
6593 | 6593 | ||
6594 | bnx2_init_nvram(bp); | 6594 | bnx2_init_nvram(bp); |
6595 | 6595 | ||
6596 | reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE); | 6596 | reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE); |
6597 | 6597 | ||
6598 | if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) == | 6598 | if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) == |
6599 | BNX2_SHM_HDR_SIGNATURE_SIG) { | 6599 | BNX2_SHM_HDR_SIGNATURE_SIG) { |
6600 | u32 off = PCI_FUNC(pdev->devfn) << 2; | 6600 | u32 off = PCI_FUNC(pdev->devfn) << 2; |
6601 | 6601 | ||
6602 | bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off); | 6602 | bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off); |
6603 | } else | 6603 | } else |
6604 | bp->shmem_base = HOST_VIEW_SHMEM_BASE; | 6604 | bp->shmem_base = HOST_VIEW_SHMEM_BASE; |
6605 | 6605 | ||
6606 | /* Get the permanent MAC address. First we need to make sure the | 6606 | /* Get the permanent MAC address. First we need to make sure the |
6607 | * firmware is actually running. | 6607 | * firmware is actually running. |
6608 | */ | 6608 | */ |
6609 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE); | 6609 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE); |
6610 | 6610 | ||
6611 | if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != | 6611 | if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != |
6612 | BNX2_DEV_INFO_SIGNATURE_MAGIC) { | 6612 | BNX2_DEV_INFO_SIGNATURE_MAGIC) { |
6613 | dev_err(&pdev->dev, "Firmware not running, aborting.\n"); | 6613 | dev_err(&pdev->dev, "Firmware not running, aborting.\n"); |
6614 | rc = -ENODEV; | 6614 | rc = -ENODEV; |
6615 | goto err_out_unmap; | 6615 | goto err_out_unmap; |
6616 | } | 6616 | } |
6617 | 6617 | ||
6618 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV); | 6618 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV); |
6619 | for (i = 0, j = 0; i < 3; i++) { | 6619 | for (i = 0, j = 0; i < 3; i++) { |
6620 | u8 num, k, skip0; | 6620 | u8 num, k, skip0; |
6621 | 6621 | ||
6622 | num = (u8) (reg >> (24 - (i * 8))); | 6622 | num = (u8) (reg >> (24 - (i * 8))); |
6623 | for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { | 6623 | for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { |
6624 | if (num >= k || !skip0 || k == 1) { | 6624 | if (num >= k || !skip0 || k == 1) { |
6625 | bp->fw_version[j++] = (num / k) + '0'; | 6625 | bp->fw_version[j++] = (num / k) + '0'; |
6626 | skip0 = 0; | 6626 | skip0 = 0; |
6627 | } | 6627 | } |
6628 | } | 6628 | } |
6629 | if (i != 2) | 6629 | if (i != 2) |
6630 | bp->fw_version[j++] = '.'; | 6630 | bp->fw_version[j++] = '.'; |
6631 | } | 6631 | } |
6632 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION); | 6632 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION); |
6633 | reg &= BNX2_CONDITION_MFW_RUN_MASK; | 6633 | reg &= BNX2_CONDITION_MFW_RUN_MASK; |
6634 | if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && | 6634 | if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && |
6635 | reg != BNX2_CONDITION_MFW_RUN_NONE) { | 6635 | reg != BNX2_CONDITION_MFW_RUN_NONE) { |
6636 | int i; | 6636 | int i; |
6637 | u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR); | 6637 | u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR); |
6638 | 6638 | ||
6639 | bp->fw_version[j++] = ' '; | 6639 | bp->fw_version[j++] = ' '; |
6640 | for (i = 0; i < 3; i++) { | 6640 | for (i = 0; i < 3; i++) { |
6641 | reg = REG_RD_IND(bp, addr + i * 4); | 6641 | reg = REG_RD_IND(bp, addr + i * 4); |
6642 | reg = swab32(reg); | 6642 | reg = swab32(reg); |
6643 | memcpy(&bp->fw_version[j], ®, 4); | 6643 | memcpy(&bp->fw_version[j], ®, 4); |
6644 | j += 4; | 6644 | j += 4; |
6645 | } | 6645 | } |
6646 | } | 6646 | } |
6647 | 6647 | ||
6648 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER); | 6648 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER); |
6649 | bp->mac_addr[0] = (u8) (reg >> 8); | 6649 | bp->mac_addr[0] = (u8) (reg >> 8); |
6650 | bp->mac_addr[1] = (u8) reg; | 6650 | bp->mac_addr[1] = (u8) reg; |
6651 | 6651 | ||
6652 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER); | 6652 | reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER); |
6653 | bp->mac_addr[2] = (u8) (reg >> 24); | 6653 | bp->mac_addr[2] = (u8) (reg >> 24); |
6654 | bp->mac_addr[3] = (u8) (reg >> 16); | 6654 | bp->mac_addr[3] = (u8) (reg >> 16); |
6655 | bp->mac_addr[4] = (u8) (reg >> 8); | 6655 | bp->mac_addr[4] = (u8) (reg >> 8); |
6656 | bp->mac_addr[5] = (u8) reg; | 6656 | bp->mac_addr[5] = (u8) reg; |
6657 | 6657 | ||
6658 | bp->tx_ring_size = MAX_TX_DESC_CNT; | 6658 | bp->tx_ring_size = MAX_TX_DESC_CNT; |
6659 | bnx2_set_rx_ring_size(bp, 255); | 6659 | bnx2_set_rx_ring_size(bp, 255); |
6660 | 6660 | ||
6661 | bp->rx_csum = 1; | 6661 | bp->rx_csum = 1; |
6662 | 6662 | ||
6663 | bp->rx_offset = sizeof(struct l2_fhdr) + 2; | 6663 | bp->rx_offset = sizeof(struct l2_fhdr) + 2; |
6664 | 6664 | ||
6665 | bp->tx_quick_cons_trip_int = 20; | 6665 | bp->tx_quick_cons_trip_int = 20; |
6666 | bp->tx_quick_cons_trip = 20; | 6666 | bp->tx_quick_cons_trip = 20; |
6667 | bp->tx_ticks_int = 80; | 6667 | bp->tx_ticks_int = 80; |
6668 | bp->tx_ticks = 80; | 6668 | bp->tx_ticks = 80; |
6669 | 6669 | ||
6670 | bp->rx_quick_cons_trip_int = 6; | 6670 | bp->rx_quick_cons_trip_int = 6; |
6671 | bp->rx_quick_cons_trip = 6; | 6671 | bp->rx_quick_cons_trip = 6; |
6672 | bp->rx_ticks_int = 18; | 6672 | bp->rx_ticks_int = 18; |
6673 | bp->rx_ticks = 18; | 6673 | bp->rx_ticks = 18; |
6674 | 6674 | ||
6675 | bp->stats_ticks = 1000000 & 0xffff00; | 6675 | bp->stats_ticks = 1000000 & 0xffff00; |
6676 | 6676 | ||
6677 | bp->timer_interval = HZ; | 6677 | bp->timer_interval = HZ; |
6678 | bp->current_interval = HZ; | 6678 | bp->current_interval = HZ; |
6679 | 6679 | ||
6680 | bp->phy_addr = 1; | 6680 | bp->phy_addr = 1; |
6681 | 6681 | ||
6682 | /* Disable WOL support if we are running on a SERDES chip. */ | 6682 | /* Disable WOL support if we are running on a SERDES chip. */ |
6683 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 6683 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
6684 | bnx2_get_5709_media(bp); | 6684 | bnx2_get_5709_media(bp); |
6685 | else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) | 6685 | else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) |
6686 | bp->phy_flags |= PHY_SERDES_FLAG; | 6686 | bp->phy_flags |= PHY_SERDES_FLAG; |
6687 | 6687 | ||
6688 | bp->phy_port = PORT_TP; | 6688 | bp->phy_port = PORT_TP; |
6689 | if (bp->phy_flags & PHY_SERDES_FLAG) { | 6689 | if (bp->phy_flags & PHY_SERDES_FLAG) { |
6690 | bp->phy_port = PORT_FIBRE; | 6690 | bp->phy_port = PORT_FIBRE; |
6691 | bp->flags |= NO_WOL_FLAG; | 6691 | bp->flags |= NO_WOL_FLAG; |
6692 | if (CHIP_NUM(bp) != CHIP_NUM_5706) { | 6692 | if (CHIP_NUM(bp) != CHIP_NUM_5706) { |
6693 | bp->phy_addr = 2; | 6693 | bp->phy_addr = 2; |
6694 | reg = REG_RD_IND(bp, bp->shmem_base + | 6694 | reg = REG_RD_IND(bp, bp->shmem_base + |
6695 | BNX2_SHARED_HW_CFG_CONFIG); | 6695 | BNX2_SHARED_HW_CFG_CONFIG); |
6696 | if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) | 6696 | if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) |
6697 | bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; | 6697 | bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; |
6698 | } | 6698 | } |
6699 | bnx2_init_remote_phy(bp); | 6699 | bnx2_init_remote_phy(bp); |
6700 | 6700 | ||
6701 | } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || | 6701 | } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || |
6702 | CHIP_NUM(bp) == CHIP_NUM_5708) | 6702 | CHIP_NUM(bp) == CHIP_NUM_5708) |
6703 | bp->phy_flags |= PHY_CRC_FIX_FLAG; | 6703 | bp->phy_flags |= PHY_CRC_FIX_FLAG; |
6704 | else if (CHIP_ID(bp) == CHIP_ID_5709_A0) | 6704 | else if (CHIP_ID(bp) == CHIP_ID_5709_A0) |
6705 | bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG; | 6705 | bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG; |
6706 | 6706 | ||
6707 | if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || | 6707 | if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || |
6708 | (CHIP_ID(bp) == CHIP_ID_5708_B0) || | 6708 | (CHIP_ID(bp) == CHIP_ID_5708_B0) || |
6709 | (CHIP_ID(bp) == CHIP_ID_5708_B1)) | 6709 | (CHIP_ID(bp) == CHIP_ID_5708_B1)) |
6710 | bp->flags |= NO_WOL_FLAG; | 6710 | bp->flags |= NO_WOL_FLAG; |
6711 | 6711 | ||
6712 | if (CHIP_ID(bp) == CHIP_ID_5706_A0) { | 6712 | if (CHIP_ID(bp) == CHIP_ID_5706_A0) { |
6713 | bp->tx_quick_cons_trip_int = | 6713 | bp->tx_quick_cons_trip_int = |
6714 | bp->tx_quick_cons_trip; | 6714 | bp->tx_quick_cons_trip; |
6715 | bp->tx_ticks_int = bp->tx_ticks; | 6715 | bp->tx_ticks_int = bp->tx_ticks; |
6716 | bp->rx_quick_cons_trip_int = | 6716 | bp->rx_quick_cons_trip_int = |
6717 | bp->rx_quick_cons_trip; | 6717 | bp->rx_quick_cons_trip; |
6718 | bp->rx_ticks_int = bp->rx_ticks; | 6718 | bp->rx_ticks_int = bp->rx_ticks; |
6719 | bp->comp_prod_trip_int = bp->comp_prod_trip; | 6719 | bp->comp_prod_trip_int = bp->comp_prod_trip; |
6720 | bp->com_ticks_int = bp->com_ticks; | 6720 | bp->com_ticks_int = bp->com_ticks; |
6721 | bp->cmd_ticks_int = bp->cmd_ticks; | 6721 | bp->cmd_ticks_int = bp->cmd_ticks; |
6722 | } | 6722 | } |
6723 | 6723 | ||
6724 | /* Disable MSI on 5706 if AMD 8132 bridge is found. | 6724 | /* Disable MSI on 5706 if AMD 8132 bridge is found. |
6725 | * | 6725 | * |
6726 | * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes | 6726 | * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes |
6727 | * with byte enables disabled on the unused 32-bit word. This is legal | 6727 | * with byte enables disabled on the unused 32-bit word. This is legal |
6728 | * but causes problems on the AMD 8132 which will eventually stop | 6728 | * but causes problems on the AMD 8132 which will eventually stop |
6729 | * responding after a while. | 6729 | * responding after a while. |
6730 | * | 6730 | * |
6731 | * AMD believes this incompatibility is unique to the 5706, and | 6731 | * AMD believes this incompatibility is unique to the 5706, and |
6732 | * prefers to locally disable MSI rather than globally disabling it. | 6732 | * prefers to locally disable MSI rather than globally disabling it. |
6733 | */ | 6733 | */ |
6734 | if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) { | 6734 | if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) { |
6735 | struct pci_dev *amd_8132 = NULL; | 6735 | struct pci_dev *amd_8132 = NULL; |
6736 | 6736 | ||
6737 | while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD, | 6737 | while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD, |
6738 | PCI_DEVICE_ID_AMD_8132_BRIDGE, | 6738 | PCI_DEVICE_ID_AMD_8132_BRIDGE, |
6739 | amd_8132))) { | 6739 | amd_8132))) { |
6740 | 6740 | ||
6741 | if (amd_8132->revision >= 0x10 && | 6741 | if (amd_8132->revision >= 0x10 && |
6742 | amd_8132->revision <= 0x13) { | 6742 | amd_8132->revision <= 0x13) { |
6743 | disable_msi = 1; | 6743 | disable_msi = 1; |
6744 | pci_dev_put(amd_8132); | 6744 | pci_dev_put(amd_8132); |
6745 | break; | 6745 | break; |
6746 | } | 6746 | } |
6747 | } | 6747 | } |
6748 | } | 6748 | } |
6749 | 6749 | ||
6750 | bnx2_set_default_link(bp); | 6750 | bnx2_set_default_link(bp); |
6751 | bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; | 6751 | bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; |
6752 | 6752 | ||
6753 | init_timer(&bp->timer); | 6753 | init_timer(&bp->timer); |
6754 | bp->timer.expires = RUN_AT(bp->timer_interval); | 6754 | bp->timer.expires = RUN_AT(bp->timer_interval); |
6755 | bp->timer.data = (unsigned long) bp; | 6755 | bp->timer.data = (unsigned long) bp; |
6756 | bp->timer.function = bnx2_timer; | 6756 | bp->timer.function = bnx2_timer; |
6757 | 6757 | ||
6758 | return 0; | 6758 | return 0; |
6759 | 6759 | ||
6760 | err_out_unmap: | 6760 | err_out_unmap: |
6761 | if (bp->regview) { | 6761 | if (bp->regview) { |
6762 | iounmap(bp->regview); | 6762 | iounmap(bp->regview); |
6763 | bp->regview = NULL; | 6763 | bp->regview = NULL; |
6764 | } | 6764 | } |
6765 | 6765 | ||
6766 | err_out_release: | 6766 | err_out_release: |
6767 | pci_release_regions(pdev); | 6767 | pci_release_regions(pdev); |
6768 | 6768 | ||
6769 | err_out_disable: | 6769 | err_out_disable: |
6770 | pci_disable_device(pdev); | 6770 | pci_disable_device(pdev); |
6771 | pci_set_drvdata(pdev, NULL); | 6771 | pci_set_drvdata(pdev, NULL); |
6772 | 6772 | ||
6773 | err_out: | 6773 | err_out: |
6774 | return rc; | 6774 | return rc; |
6775 | } | 6775 | } |
6776 | 6776 | ||
6777 | static char * __devinit | 6777 | static char * __devinit |
6778 | bnx2_bus_string(struct bnx2 *bp, char *str) | 6778 | bnx2_bus_string(struct bnx2 *bp, char *str) |
6779 | { | 6779 | { |
6780 | char *s = str; | 6780 | char *s = str; |
6781 | 6781 | ||
6782 | if (bp->flags & PCIE_FLAG) { | 6782 | if (bp->flags & PCIE_FLAG) { |
6783 | s += sprintf(s, "PCI Express"); | 6783 | s += sprintf(s, "PCI Express"); |
6784 | } else { | 6784 | } else { |
6785 | s += sprintf(s, "PCI"); | 6785 | s += sprintf(s, "PCI"); |
6786 | if (bp->flags & PCIX_FLAG) | 6786 | if (bp->flags & PCIX_FLAG) |
6787 | s += sprintf(s, "-X"); | 6787 | s += sprintf(s, "-X"); |
6788 | if (bp->flags & PCI_32BIT_FLAG) | 6788 | if (bp->flags & PCI_32BIT_FLAG) |
6789 | s += sprintf(s, " 32-bit"); | 6789 | s += sprintf(s, " 32-bit"); |
6790 | else | 6790 | else |
6791 | s += sprintf(s, " 64-bit"); | 6791 | s += sprintf(s, " 64-bit"); |
6792 | s += sprintf(s, " %dMHz", bp->bus_speed_mhz); | 6792 | s += sprintf(s, " %dMHz", bp->bus_speed_mhz); |
6793 | } | 6793 | } |
6794 | return str; | 6794 | return str; |
6795 | } | 6795 | } |
6796 | 6796 | ||
6797 | static int __devinit | 6797 | static int __devinit |
6798 | bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 6798 | bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
6799 | { | 6799 | { |
6800 | static int version_printed = 0; | 6800 | static int version_printed = 0; |
6801 | struct net_device *dev = NULL; | 6801 | struct net_device *dev = NULL; |
6802 | struct bnx2 *bp; | 6802 | struct bnx2 *bp; |
6803 | int rc, i; | 6803 | int rc, i; |
6804 | char str[40]; | 6804 | char str[40]; |
6805 | 6805 | ||
6806 | if (version_printed++ == 0) | 6806 | if (version_printed++ == 0) |
6807 | printk(KERN_INFO "%s", version); | 6807 | printk(KERN_INFO "%s", version); |
6808 | 6808 | ||
6809 | /* dev zeroed in init_etherdev */ | 6809 | /* dev zeroed in init_etherdev */ |
6810 | dev = alloc_etherdev(sizeof(*bp)); | 6810 | dev = alloc_etherdev(sizeof(*bp)); |
6811 | 6811 | ||
6812 | if (!dev) | 6812 | if (!dev) |
6813 | return -ENOMEM; | 6813 | return -ENOMEM; |
6814 | 6814 | ||
6815 | rc = bnx2_init_board(pdev, dev); | 6815 | rc = bnx2_init_board(pdev, dev); |
6816 | if (rc < 0) { | 6816 | if (rc < 0) { |
6817 | free_netdev(dev); | 6817 | free_netdev(dev); |
6818 | return rc; | 6818 | return rc; |
6819 | } | 6819 | } |
6820 | 6820 | ||
6821 | dev->open = bnx2_open; | 6821 | dev->open = bnx2_open; |
6822 | dev->hard_start_xmit = bnx2_start_xmit; | 6822 | dev->hard_start_xmit = bnx2_start_xmit; |
6823 | dev->stop = bnx2_close; | 6823 | dev->stop = bnx2_close; |
6824 | dev->get_stats = bnx2_get_stats; | 6824 | dev->get_stats = bnx2_get_stats; |
6825 | dev->set_multicast_list = bnx2_set_rx_mode; | 6825 | dev->set_multicast_list = bnx2_set_rx_mode; |
6826 | dev->do_ioctl = bnx2_ioctl; | 6826 | dev->do_ioctl = bnx2_ioctl; |
6827 | dev->set_mac_address = bnx2_change_mac_addr; | 6827 | dev->set_mac_address = bnx2_change_mac_addr; |
6828 | dev->change_mtu = bnx2_change_mtu; | 6828 | dev->change_mtu = bnx2_change_mtu; |
6829 | dev->tx_timeout = bnx2_tx_timeout; | 6829 | dev->tx_timeout = bnx2_tx_timeout; |
6830 | dev->watchdog_timeo = TX_TIMEOUT; | 6830 | dev->watchdog_timeo = TX_TIMEOUT; |
6831 | #ifdef BCM_VLAN | 6831 | #ifdef BCM_VLAN |
6832 | dev->vlan_rx_register = bnx2_vlan_rx_register; | 6832 | dev->vlan_rx_register = bnx2_vlan_rx_register; |
6833 | #endif | 6833 | #endif |
6834 | dev->poll = bnx2_poll; | 6834 | dev->poll = bnx2_poll; |
6835 | dev->ethtool_ops = &bnx2_ethtool_ops; | 6835 | dev->ethtool_ops = &bnx2_ethtool_ops; |
6836 | dev->weight = 64; | 6836 | dev->weight = 64; |
6837 | 6837 | ||
6838 | bp = netdev_priv(dev); | 6838 | bp = netdev_priv(dev); |
6839 | 6839 | ||
6840 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) | 6840 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) |
6841 | dev->poll_controller = poll_bnx2; | 6841 | dev->poll_controller = poll_bnx2; |
6842 | #endif | 6842 | #endif |
6843 | 6843 | ||
6844 | pci_set_drvdata(pdev, dev); | 6844 | pci_set_drvdata(pdev, dev); |
6845 | 6845 | ||
6846 | memcpy(dev->dev_addr, bp->mac_addr, 6); | 6846 | memcpy(dev->dev_addr, bp->mac_addr, 6); |
6847 | memcpy(dev->perm_addr, bp->mac_addr, 6); | 6847 | memcpy(dev->perm_addr, bp->mac_addr, 6); |
6848 | bp->name = board_info[ent->driver_data].name; | 6848 | bp->name = board_info[ent->driver_data].name; |
6849 | 6849 | ||
6850 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 6850 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
6851 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 6851 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
6852 | dev->features |= NETIF_F_IPV6_CSUM; | 6852 | dev->features |= NETIF_F_IPV6_CSUM; |
6853 | 6853 | ||
6854 | #ifdef BCM_VLAN | 6854 | #ifdef BCM_VLAN |
6855 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 6855 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
6856 | #endif | 6856 | #endif |
6857 | dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; | 6857 | dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; |
6858 | if (CHIP_NUM(bp) == CHIP_NUM_5709) | 6858 | if (CHIP_NUM(bp) == CHIP_NUM_5709) |
6859 | dev->features |= NETIF_F_TSO6; | 6859 | dev->features |= NETIF_F_TSO6; |
6860 | 6860 | ||
6861 | if ((rc = register_netdev(dev))) { | 6861 | if ((rc = register_netdev(dev))) { |
6862 | dev_err(&pdev->dev, "Cannot register net device\n"); | 6862 | dev_err(&pdev->dev, "Cannot register net device\n"); |
6863 | if (bp->regview) | 6863 | if (bp->regview) |
6864 | iounmap(bp->regview); | 6864 | iounmap(bp->regview); |
6865 | pci_release_regions(pdev); | 6865 | pci_release_regions(pdev); |
6866 | pci_disable_device(pdev); | 6866 | pci_disable_device(pdev); |
6867 | pci_set_drvdata(pdev, NULL); | 6867 | pci_set_drvdata(pdev, NULL); |
6868 | free_netdev(dev); | 6868 | free_netdev(dev); |
6869 | return rc; | 6869 | return rc; |
6870 | } | 6870 | } |
6871 | 6871 | ||
6872 | printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, " | 6872 | printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, " |
6873 | "IRQ %d, ", | 6873 | "IRQ %d, ", |
6874 | dev->name, | 6874 | dev->name, |
6875 | bp->name, | 6875 | bp->name, |
6876 | ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', | 6876 | ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', |
6877 | ((CHIP_ID(bp) & 0x0ff0) >> 4), | 6877 | ((CHIP_ID(bp) & 0x0ff0) >> 4), |
6878 | bnx2_bus_string(bp, str), | 6878 | bnx2_bus_string(bp, str), |
6879 | dev->base_addr, | 6879 | dev->base_addr, |
6880 | bp->pdev->irq); | 6880 | bp->pdev->irq); |
6881 | 6881 | ||
6882 | printk("node addr "); | 6882 | printk("node addr "); |
6883 | for (i = 0; i < 6; i++) | 6883 | for (i = 0; i < 6; i++) |
6884 | printk("%2.2x", dev->dev_addr[i]); | 6884 | printk("%2.2x", dev->dev_addr[i]); |
6885 | printk("\n"); | 6885 | printk("\n"); |
6886 | 6886 | ||
6887 | return 0; | 6887 | return 0; |
6888 | } | 6888 | } |
6889 | 6889 | ||
6890 | static void __devexit | 6890 | static void __devexit |
6891 | bnx2_remove_one(struct pci_dev *pdev) | 6891 | bnx2_remove_one(struct pci_dev *pdev) |
6892 | { | 6892 | { |
6893 | struct net_device *dev = pci_get_drvdata(pdev); | 6893 | struct net_device *dev = pci_get_drvdata(pdev); |
6894 | struct bnx2 *bp = netdev_priv(dev); | 6894 | struct bnx2 *bp = netdev_priv(dev); |
6895 | 6895 | ||
6896 | flush_scheduled_work(); | 6896 | flush_scheduled_work(); |
6897 | 6897 | ||
6898 | unregister_netdev(dev); | 6898 | unregister_netdev(dev); |
6899 | 6899 | ||
6900 | if (bp->regview) | 6900 | if (bp->regview) |
6901 | iounmap(bp->regview); | 6901 | iounmap(bp->regview); |
6902 | 6902 | ||
6903 | free_netdev(dev); | 6903 | free_netdev(dev); |
6904 | pci_release_regions(pdev); | 6904 | pci_release_regions(pdev); |
6905 | pci_disable_device(pdev); | 6905 | pci_disable_device(pdev); |
6906 | pci_set_drvdata(pdev, NULL); | 6906 | pci_set_drvdata(pdev, NULL); |
6907 | } | 6907 | } |
6908 | 6908 | ||
6909 | static int | 6909 | static int |
6910 | bnx2_suspend(struct pci_dev *pdev, pm_message_t state) | 6910 | bnx2_suspend(struct pci_dev *pdev, pm_message_t state) |
6911 | { | 6911 | { |
6912 | struct net_device *dev = pci_get_drvdata(pdev); | 6912 | struct net_device *dev = pci_get_drvdata(pdev); |
6913 | struct bnx2 *bp = netdev_priv(dev); | 6913 | struct bnx2 *bp = netdev_priv(dev); |
6914 | u32 reset_code; | 6914 | u32 reset_code; |
6915 | 6915 | ||
6916 | if (!netif_running(dev)) | 6916 | if (!netif_running(dev)) |
6917 | return 0; | 6917 | return 0; |
6918 | 6918 | ||
6919 | flush_scheduled_work(); | 6919 | flush_scheduled_work(); |
6920 | bnx2_netif_stop(bp); | 6920 | bnx2_netif_stop(bp); |
6921 | netif_device_detach(dev); | 6921 | netif_device_detach(dev); |
6922 | del_timer_sync(&bp->timer); | 6922 | del_timer_sync(&bp->timer); |
6923 | if (bp->flags & NO_WOL_FLAG) | 6923 | if (bp->flags & NO_WOL_FLAG) |
6924 | reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; | 6924 | reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; |
6925 | else if (bp->wol) | 6925 | else if (bp->wol) |
6926 | reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; | 6926 | reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; |
6927 | else | 6927 | else |
6928 | reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; | 6928 | reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; |
6929 | bnx2_reset_chip(bp, reset_code); | 6929 | bnx2_reset_chip(bp, reset_code); |
6930 | bnx2_free_skbs(bp); | 6930 | bnx2_free_skbs(bp); |
6931 | pci_save_state(pdev); | 6931 | pci_save_state(pdev); |
6932 | bnx2_set_power_state(bp, pci_choose_state(pdev, state)); | 6932 | bnx2_set_power_state(bp, pci_choose_state(pdev, state)); |
6933 | return 0; | 6933 | return 0; |
6934 | } | 6934 | } |
6935 | 6935 | ||
6936 | static int | 6936 | static int |
6937 | bnx2_resume(struct pci_dev *pdev) | 6937 | bnx2_resume(struct pci_dev *pdev) |
6938 | { | 6938 | { |
6939 | struct net_device *dev = pci_get_drvdata(pdev); | 6939 | struct net_device *dev = pci_get_drvdata(pdev); |
6940 | struct bnx2 *bp = netdev_priv(dev); | 6940 | struct bnx2 *bp = netdev_priv(dev); |
6941 | 6941 | ||
6942 | if (!netif_running(dev)) | 6942 | if (!netif_running(dev)) |
6943 | return 0; | 6943 | return 0; |
6944 | 6944 | ||
6945 | pci_restore_state(pdev); | 6945 | pci_restore_state(pdev); |
6946 | bnx2_set_power_state(bp, PCI_D0); | 6946 | bnx2_set_power_state(bp, PCI_D0); |
6947 | netif_device_attach(dev); | 6947 | netif_device_attach(dev); |
6948 | bnx2_init_nic(bp); | 6948 | bnx2_init_nic(bp); |
6949 | bnx2_netif_start(bp); | 6949 | bnx2_netif_start(bp); |
6950 | return 0; | 6950 | return 0; |
6951 | } | 6951 | } |
6952 | 6952 | ||
6953 | static struct pci_driver bnx2_pci_driver = { | 6953 | static struct pci_driver bnx2_pci_driver = { |
6954 | .name = DRV_MODULE_NAME, | 6954 | .name = DRV_MODULE_NAME, |
6955 | .id_table = bnx2_pci_tbl, | 6955 | .id_table = bnx2_pci_tbl, |
6956 | .probe = bnx2_init_one, | 6956 | .probe = bnx2_init_one, |
6957 | .remove = __devexit_p(bnx2_remove_one), | 6957 | .remove = __devexit_p(bnx2_remove_one), |
6958 | .suspend = bnx2_suspend, | 6958 | .suspend = bnx2_suspend, |
6959 | .resume = bnx2_resume, | 6959 | .resume = bnx2_resume, |
6960 | }; | 6960 | }; |
6961 | 6961 | ||
6962 | static int __init bnx2_init(void) | 6962 | static int __init bnx2_init(void) |
6963 | { | 6963 | { |
6964 | return pci_register_driver(&bnx2_pci_driver); | 6964 | return pci_register_driver(&bnx2_pci_driver); |
6965 | } | 6965 | } |
6966 | 6966 | ||
6967 | static void __exit bnx2_cleanup(void) | 6967 | static void __exit bnx2_cleanup(void) |
6968 | { | 6968 | { |
6969 | pci_unregister_driver(&bnx2_pci_driver); | 6969 | pci_unregister_driver(&bnx2_pci_driver); |
6970 | } | 6970 | } |
6971 | 6971 | ||
6972 | module_init(bnx2_init); | 6972 | module_init(bnx2_init); |
6973 | module_exit(bnx2_cleanup); | 6973 | module_exit(bnx2_cleanup); |
6974 | 6974 | ||
6975 | 6975 | ||
6976 | 6976 | ||
6977 | 6977 |
drivers/net/tg3.c
1 | /* | 1 | /* |
2 | * tg3.c: Broadcom Tigon3 ethernet driver. | 2 | * tg3.c: Broadcom Tigon3 ethernet driver. |
3 | * | 3 | * |
4 | * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) |
5 | * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) | 5 | * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) |
6 | * Copyright (C) 2004 Sun Microsystems Inc. | 6 | * Copyright (C) 2004 Sun Microsystems Inc. |
7 | * Copyright (C) 2005-2007 Broadcom Corporation. | 7 | * Copyright (C) 2005-2007 Broadcom Corporation. |
8 | * | 8 | * |
9 | * Firmware is: | 9 | * Firmware is: |
10 | * Derived from proprietary unpublished source code, | 10 | * Derived from proprietary unpublished source code, |
11 | * Copyright (C) 2000-2003 Broadcom Corporation. | 11 | * Copyright (C) 2000-2003 Broadcom Corporation. |
12 | * | 12 | * |
13 | * Permission is hereby granted for the distribution of this firmware | 13 | * Permission is hereby granted for the distribution of this firmware |
14 | * data in hexadecimal or equivalent format, provided this copyright | 14 | * data in hexadecimal or equivalent format, provided this copyright |
15 | * notice is accompanying it. | 15 | * notice is accompanying it. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | 18 | ||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/moduleparam.h> | 20 | #include <linux/moduleparam.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/compiler.h> | 23 | #include <linux/compiler.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/in.h> | 26 | #include <linux/in.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/ioport.h> | 28 | #include <linux/ioport.h> |
29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
30 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
31 | #include <linux/etherdevice.h> | 31 | #include <linux/etherdevice.h> |
32 | #include <linux/skbuff.h> | 32 | #include <linux/skbuff.h> |
33 | #include <linux/ethtool.h> | 33 | #include <linux/ethtool.h> |
34 | #include <linux/mii.h> | 34 | #include <linux/mii.h> |
35 | #include <linux/if_vlan.h> | 35 | #include <linux/if_vlan.h> |
36 | #include <linux/ip.h> | 36 | #include <linux/ip.h> |
37 | #include <linux/tcp.h> | 37 | #include <linux/tcp.h> |
38 | #include <linux/workqueue.h> | 38 | #include <linux/workqueue.h> |
39 | #include <linux/prefetch.h> | 39 | #include <linux/prefetch.h> |
40 | #include <linux/dma-mapping.h> | 40 | #include <linux/dma-mapping.h> |
41 | 41 | ||
42 | #include <net/checksum.h> | 42 | #include <net/checksum.h> |
43 | #include <net/ip.h> | 43 | #include <net/ip.h> |
44 | 44 | ||
45 | #include <asm/system.h> | 45 | #include <asm/system.h> |
46 | #include <asm/io.h> | 46 | #include <asm/io.h> |
47 | #include <asm/byteorder.h> | 47 | #include <asm/byteorder.h> |
48 | #include <asm/uaccess.h> | 48 | #include <asm/uaccess.h> |
49 | 49 | ||
50 | #ifdef CONFIG_SPARC | 50 | #ifdef CONFIG_SPARC |
51 | #include <asm/idprom.h> | 51 | #include <asm/idprom.h> |
52 | #include <asm/prom.h> | 52 | #include <asm/prom.h> |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 55 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
56 | #define TG3_VLAN_TAG_USED 1 | 56 | #define TG3_VLAN_TAG_USED 1 |
57 | #else | 57 | #else |
58 | #define TG3_VLAN_TAG_USED 0 | 58 | #define TG3_VLAN_TAG_USED 0 |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #define TG3_TSO_SUPPORT 1 | 61 | #define TG3_TSO_SUPPORT 1 |
62 | 62 | ||
63 | #include "tg3.h" | 63 | #include "tg3.h" |
64 | 64 | ||
65 | #define DRV_MODULE_NAME "tg3" | 65 | #define DRV_MODULE_NAME "tg3" |
66 | #define PFX DRV_MODULE_NAME ": " | 66 | #define PFX DRV_MODULE_NAME ": " |
67 | #define DRV_MODULE_VERSION "3.78" | 67 | #define DRV_MODULE_VERSION "3.78" |
68 | #define DRV_MODULE_RELDATE "July 11, 2007" | 68 | #define DRV_MODULE_RELDATE "July 11, 2007" |
69 | 69 | ||
70 | #define TG3_DEF_MAC_MODE 0 | 70 | #define TG3_DEF_MAC_MODE 0 |
71 | #define TG3_DEF_RX_MODE 0 | 71 | #define TG3_DEF_RX_MODE 0 |
72 | #define TG3_DEF_TX_MODE 0 | 72 | #define TG3_DEF_TX_MODE 0 |
73 | #define TG3_DEF_MSG_ENABLE \ | 73 | #define TG3_DEF_MSG_ENABLE \ |
74 | (NETIF_MSG_DRV | \ | 74 | (NETIF_MSG_DRV | \ |
75 | NETIF_MSG_PROBE | \ | 75 | NETIF_MSG_PROBE | \ |
76 | NETIF_MSG_LINK | \ | 76 | NETIF_MSG_LINK | \ |
77 | NETIF_MSG_TIMER | \ | 77 | NETIF_MSG_TIMER | \ |
78 | NETIF_MSG_IFDOWN | \ | 78 | NETIF_MSG_IFDOWN | \ |
79 | NETIF_MSG_IFUP | \ | 79 | NETIF_MSG_IFUP | \ |
80 | NETIF_MSG_RX_ERR | \ | 80 | NETIF_MSG_RX_ERR | \ |
81 | NETIF_MSG_TX_ERR) | 81 | NETIF_MSG_TX_ERR) |
82 | 82 | ||
83 | /* length of time before we decide the hardware is borked, | 83 | /* length of time before we decide the hardware is borked, |
84 | * and dev->tx_timeout() should be called to fix the problem | 84 | * and dev->tx_timeout() should be called to fix the problem |
85 | */ | 85 | */ |
86 | #define TG3_TX_TIMEOUT (5 * HZ) | 86 | #define TG3_TX_TIMEOUT (5 * HZ) |
87 | 87 | ||
88 | /* hardware minimum and maximum for a single frame's data payload */ | 88 | /* hardware minimum and maximum for a single frame's data payload */ |
89 | #define TG3_MIN_MTU 60 | 89 | #define TG3_MIN_MTU 60 |
90 | #define TG3_MAX_MTU(tp) \ | 90 | #define TG3_MAX_MTU(tp) \ |
91 | ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500) | 91 | ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500) |
92 | 92 | ||
93 | /* These numbers seem to be hard coded in the NIC firmware somehow. | 93 | /* These numbers seem to be hard coded in the NIC firmware somehow. |
94 | * You can't change the ring sizes, but you can change where you place | 94 | * You can't change the ring sizes, but you can change where you place |
95 | * them in the NIC onboard memory. | 95 | * them in the NIC onboard memory. |
96 | */ | 96 | */ |
97 | #define TG3_RX_RING_SIZE 512 | 97 | #define TG3_RX_RING_SIZE 512 |
98 | #define TG3_DEF_RX_RING_PENDING 200 | 98 | #define TG3_DEF_RX_RING_PENDING 200 |
99 | #define TG3_RX_JUMBO_RING_SIZE 256 | 99 | #define TG3_RX_JUMBO_RING_SIZE 256 |
100 | #define TG3_DEF_RX_JUMBO_RING_PENDING 100 | 100 | #define TG3_DEF_RX_JUMBO_RING_PENDING 100 |
101 | 101 | ||
102 | /* Do not place this n-ring entries value into the tp struct itself, | 102 | /* Do not place this n-ring entries value into the tp struct itself, |
103 | * we really want to expose these constants to GCC so that modulo et | 103 | * we really want to expose these constants to GCC so that modulo et |
104 | * al. operations are done with shifts and masks instead of with | 104 | * al. operations are done with shifts and masks instead of with |
105 | * hw multiply/modulo instructions. Another solution would be to | 105 | * hw multiply/modulo instructions. Another solution would be to |
106 | * replace things like '% foo' with '& (foo - 1)'. | 106 | * replace things like '% foo' with '& (foo - 1)'. |
107 | */ | 107 | */ |
108 | #define TG3_RX_RCB_RING_SIZE(tp) \ | 108 | #define TG3_RX_RCB_RING_SIZE(tp) \ |
109 | ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024) | 109 | ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024) |
110 | 110 | ||
111 | #define TG3_TX_RING_SIZE 512 | 111 | #define TG3_TX_RING_SIZE 512 |
112 | #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) | 112 | #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) |
113 | 113 | ||
114 | #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ | 114 | #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ |
115 | TG3_RX_RING_SIZE) | 115 | TG3_RX_RING_SIZE) |
116 | #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ | 116 | #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ |
117 | TG3_RX_JUMBO_RING_SIZE) | 117 | TG3_RX_JUMBO_RING_SIZE) |
118 | #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ | 118 | #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ |
119 | TG3_RX_RCB_RING_SIZE(tp)) | 119 | TG3_RX_RCB_RING_SIZE(tp)) |
120 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ | 120 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ |
121 | TG3_TX_RING_SIZE) | 121 | TG3_TX_RING_SIZE) |
122 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 122 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) |
123 | 123 | ||
124 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) | 124 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) |
125 | #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64) | 125 | #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64) |
126 | 126 | ||
127 | /* minimum number of free TX descriptors required to wake up TX process */ | 127 | /* minimum number of free TX descriptors required to wake up TX process */ |
128 | #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4) | 128 | #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4) |
129 | 129 | ||
130 | /* number of ETHTOOL_GSTATS u64's */ | 130 | /* number of ETHTOOL_GSTATS u64's */ |
131 | #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) | 131 | #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) |
132 | 132 | ||
133 | #define TG3_NUM_TEST 6 | 133 | #define TG3_NUM_TEST 6 |
134 | 134 | ||
135 | static char version[] __devinitdata = | 135 | static char version[] __devinitdata = |
136 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 136 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
137 | 137 | ||
138 | MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); | 138 | MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); |
139 | MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); | 139 | MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); |
140 | MODULE_LICENSE("GPL"); | 140 | MODULE_LICENSE("GPL"); |
141 | MODULE_VERSION(DRV_MODULE_VERSION); | 141 | MODULE_VERSION(DRV_MODULE_VERSION); |
142 | 142 | ||
143 | static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ | 143 | static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ |
144 | module_param(tg3_debug, int, 0); | 144 | module_param(tg3_debug, int, 0); |
145 | MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); | 145 | MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); |
146 | 146 | ||
147 | static struct pci_device_id tg3_pci_tbl[] = { | 147 | static struct pci_device_id tg3_pci_tbl[] = { |
148 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, | 148 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, |
149 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, | 149 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, |
150 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, | 150 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, |
151 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, | 151 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, |
152 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, | 152 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, |
153 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, | 153 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, |
154 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, | 154 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, |
155 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, | 155 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, |
156 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, | 156 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, |
157 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, | 157 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, |
158 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, | 158 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, |
159 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, | 159 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, |
160 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, | 160 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, |
161 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, | 161 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, |
162 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, | 162 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, |
163 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, | 163 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, |
164 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, | 164 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, |
165 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, | 165 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, |
166 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, | 166 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, |
167 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, | 167 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, |
168 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, | 168 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, |
169 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, | 169 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, |
170 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)}, | 170 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)}, |
171 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, | 171 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, |
172 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, | 172 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, |
173 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, | 173 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, |
174 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, | 174 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, |
175 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)}, | 175 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)}, |
176 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, | 176 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, |
177 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, | 177 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, |
178 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, | 178 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, |
179 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, | 179 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, |
180 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, | 180 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, |
181 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, | 181 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, |
182 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, | 182 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, |
183 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, | 183 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, |
184 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, | 184 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, |
185 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, | 185 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, |
186 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, | 186 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, |
187 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, | 187 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, |
188 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, | 188 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, |
189 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, | 189 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, |
190 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, | 190 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, |
191 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, | 191 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, |
192 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, | 192 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, |
193 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, | 193 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, |
194 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, | 194 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, |
195 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, | 195 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, |
196 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, | 196 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, |
197 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, | 197 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, |
198 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, | 198 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, |
199 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, | 199 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, |
200 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, | 200 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, |
201 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, | 201 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, |
202 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, | 202 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, |
203 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, | 203 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, |
204 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, | 204 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, |
205 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, | 205 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, |
206 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, | 206 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, |
207 | {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, | 207 | {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, |
208 | {} | 208 | {} |
209 | }; | 209 | }; |
210 | 210 | ||
211 | MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); | 211 | MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); |
212 | 212 | ||
213 | static const struct { | 213 | static const struct { |
214 | const char string[ETH_GSTRING_LEN]; | 214 | const char string[ETH_GSTRING_LEN]; |
215 | } ethtool_stats_keys[TG3_NUM_STATS] = { | 215 | } ethtool_stats_keys[TG3_NUM_STATS] = { |
216 | { "rx_octets" }, | 216 | { "rx_octets" }, |
217 | { "rx_fragments" }, | 217 | { "rx_fragments" }, |
218 | { "rx_ucast_packets" }, | 218 | { "rx_ucast_packets" }, |
219 | { "rx_mcast_packets" }, | 219 | { "rx_mcast_packets" }, |
220 | { "rx_bcast_packets" }, | 220 | { "rx_bcast_packets" }, |
221 | { "rx_fcs_errors" }, | 221 | { "rx_fcs_errors" }, |
222 | { "rx_align_errors" }, | 222 | { "rx_align_errors" }, |
223 | { "rx_xon_pause_rcvd" }, | 223 | { "rx_xon_pause_rcvd" }, |
224 | { "rx_xoff_pause_rcvd" }, | 224 | { "rx_xoff_pause_rcvd" }, |
225 | { "rx_mac_ctrl_rcvd" }, | 225 | { "rx_mac_ctrl_rcvd" }, |
226 | { "rx_xoff_entered" }, | 226 | { "rx_xoff_entered" }, |
227 | { "rx_frame_too_long_errors" }, | 227 | { "rx_frame_too_long_errors" }, |
228 | { "rx_jabbers" }, | 228 | { "rx_jabbers" }, |
229 | { "rx_undersize_packets" }, | 229 | { "rx_undersize_packets" }, |
230 | { "rx_in_length_errors" }, | 230 | { "rx_in_length_errors" }, |
231 | { "rx_out_length_errors" }, | 231 | { "rx_out_length_errors" }, |
232 | { "rx_64_or_less_octet_packets" }, | 232 | { "rx_64_or_less_octet_packets" }, |
233 | { "rx_65_to_127_octet_packets" }, | 233 | { "rx_65_to_127_octet_packets" }, |
234 | { "rx_128_to_255_octet_packets" }, | 234 | { "rx_128_to_255_octet_packets" }, |
235 | { "rx_256_to_511_octet_packets" }, | 235 | { "rx_256_to_511_octet_packets" }, |
236 | { "rx_512_to_1023_octet_packets" }, | 236 | { "rx_512_to_1023_octet_packets" }, |
237 | { "rx_1024_to_1522_octet_packets" }, | 237 | { "rx_1024_to_1522_octet_packets" }, |
238 | { "rx_1523_to_2047_octet_packets" }, | 238 | { "rx_1523_to_2047_octet_packets" }, |
239 | { "rx_2048_to_4095_octet_packets" }, | 239 | { "rx_2048_to_4095_octet_packets" }, |
240 | { "rx_4096_to_8191_octet_packets" }, | 240 | { "rx_4096_to_8191_octet_packets" }, |
241 | { "rx_8192_to_9022_octet_packets" }, | 241 | { "rx_8192_to_9022_octet_packets" }, |
242 | 242 | ||
243 | { "tx_octets" }, | 243 | { "tx_octets" }, |
244 | { "tx_collisions" }, | 244 | { "tx_collisions" }, |
245 | 245 | ||
246 | { "tx_xon_sent" }, | 246 | { "tx_xon_sent" }, |
247 | { "tx_xoff_sent" }, | 247 | { "tx_xoff_sent" }, |
248 | { "tx_flow_control" }, | 248 | { "tx_flow_control" }, |
249 | { "tx_mac_errors" }, | 249 | { "tx_mac_errors" }, |
250 | { "tx_single_collisions" }, | 250 | { "tx_single_collisions" }, |
251 | { "tx_mult_collisions" }, | 251 | { "tx_mult_collisions" }, |
252 | { "tx_deferred" }, | 252 | { "tx_deferred" }, |
253 | { "tx_excessive_collisions" }, | 253 | { "tx_excessive_collisions" }, |
254 | { "tx_late_collisions" }, | 254 | { "tx_late_collisions" }, |
255 | { "tx_collide_2times" }, | 255 | { "tx_collide_2times" }, |
256 | { "tx_collide_3times" }, | 256 | { "tx_collide_3times" }, |
257 | { "tx_collide_4times" }, | 257 | { "tx_collide_4times" }, |
258 | { "tx_collide_5times" }, | 258 | { "tx_collide_5times" }, |
259 | { "tx_collide_6times" }, | 259 | { "tx_collide_6times" }, |
260 | { "tx_collide_7times" }, | 260 | { "tx_collide_7times" }, |
261 | { "tx_collide_8times" }, | 261 | { "tx_collide_8times" }, |
262 | { "tx_collide_9times" }, | 262 | { "tx_collide_9times" }, |
263 | { "tx_collide_10times" }, | 263 | { "tx_collide_10times" }, |
264 | { "tx_collide_11times" }, | 264 | { "tx_collide_11times" }, |
265 | { "tx_collide_12times" }, | 265 | { "tx_collide_12times" }, |
266 | { "tx_collide_13times" }, | 266 | { "tx_collide_13times" }, |
267 | { "tx_collide_14times" }, | 267 | { "tx_collide_14times" }, |
268 | { "tx_collide_15times" }, | 268 | { "tx_collide_15times" }, |
269 | { "tx_ucast_packets" }, | 269 | { "tx_ucast_packets" }, |
270 | { "tx_mcast_packets" }, | 270 | { "tx_mcast_packets" }, |
271 | { "tx_bcast_packets" }, | 271 | { "tx_bcast_packets" }, |
272 | { "tx_carrier_sense_errors" }, | 272 | { "tx_carrier_sense_errors" }, |
273 | { "tx_discards" }, | 273 | { "tx_discards" }, |
274 | { "tx_errors" }, | 274 | { "tx_errors" }, |
275 | 275 | ||
276 | { "dma_writeq_full" }, | 276 | { "dma_writeq_full" }, |
277 | { "dma_write_prioq_full" }, | 277 | { "dma_write_prioq_full" }, |
278 | { "rxbds_empty" }, | 278 | { "rxbds_empty" }, |
279 | { "rx_discards" }, | 279 | { "rx_discards" }, |
280 | { "rx_errors" }, | 280 | { "rx_errors" }, |
281 | { "rx_threshold_hit" }, | 281 | { "rx_threshold_hit" }, |
282 | 282 | ||
283 | { "dma_readq_full" }, | 283 | { "dma_readq_full" }, |
284 | { "dma_read_prioq_full" }, | 284 | { "dma_read_prioq_full" }, |
285 | { "tx_comp_queue_full" }, | 285 | { "tx_comp_queue_full" }, |
286 | 286 | ||
287 | { "ring_set_send_prod_index" }, | 287 | { "ring_set_send_prod_index" }, |
288 | { "ring_status_update" }, | 288 | { "ring_status_update" }, |
289 | { "nic_irqs" }, | 289 | { "nic_irqs" }, |
290 | { "nic_avoided_irqs" }, | 290 | { "nic_avoided_irqs" }, |
291 | { "nic_tx_threshold_hit" } | 291 | { "nic_tx_threshold_hit" } |
292 | }; | 292 | }; |
293 | 293 | ||
294 | static const struct { | 294 | static const struct { |
295 | const char string[ETH_GSTRING_LEN]; | 295 | const char string[ETH_GSTRING_LEN]; |
296 | } ethtool_test_keys[TG3_NUM_TEST] = { | 296 | } ethtool_test_keys[TG3_NUM_TEST] = { |
297 | { "nvram test (online) " }, | 297 | { "nvram test (online) " }, |
298 | { "link test (online) " }, | 298 | { "link test (online) " }, |
299 | { "register test (offline)" }, | 299 | { "register test (offline)" }, |
300 | { "memory test (offline)" }, | 300 | { "memory test (offline)" }, |
301 | { "loopback test (offline)" }, | 301 | { "loopback test (offline)" }, |
302 | { "interrupt test (offline)" }, | 302 | { "interrupt test (offline)" }, |
303 | }; | 303 | }; |
304 | 304 | ||
305 | static void tg3_write32(struct tg3 *tp, u32 off, u32 val) | 305 | static void tg3_write32(struct tg3 *tp, u32 off, u32 val) |
306 | { | 306 | { |
307 | writel(val, tp->regs + off); | 307 | writel(val, tp->regs + off); |
308 | } | 308 | } |
309 | 309 | ||
310 | static u32 tg3_read32(struct tg3 *tp, u32 off) | 310 | static u32 tg3_read32(struct tg3 *tp, u32 off) |
311 | { | 311 | { |
312 | return (readl(tp->regs + off)); | 312 | return (readl(tp->regs + off)); |
313 | } | 313 | } |
314 | 314 | ||
315 | static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) | 315 | static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) |
316 | { | 316 | { |
317 | unsigned long flags; | 317 | unsigned long flags; |
318 | 318 | ||
319 | spin_lock_irqsave(&tp->indirect_lock, flags); | 319 | spin_lock_irqsave(&tp->indirect_lock, flags); |
320 | pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); | 320 | pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); |
321 | pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); | 321 | pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); |
322 | spin_unlock_irqrestore(&tp->indirect_lock, flags); | 322 | spin_unlock_irqrestore(&tp->indirect_lock, flags); |
323 | } | 323 | } |
324 | 324 | ||
325 | static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) | 325 | static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) |
326 | { | 326 | { |
327 | writel(val, tp->regs + off); | 327 | writel(val, tp->regs + off); |
328 | readl(tp->regs + off); | 328 | readl(tp->regs + off); |
329 | } | 329 | } |
330 | 330 | ||
331 | static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) | 331 | static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) |
332 | { | 332 | { |
333 | unsigned long flags; | 333 | unsigned long flags; |
334 | u32 val; | 334 | u32 val; |
335 | 335 | ||
336 | spin_lock_irqsave(&tp->indirect_lock, flags); | 336 | spin_lock_irqsave(&tp->indirect_lock, flags); |
337 | pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); | 337 | pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); |
338 | pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); | 338 | pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); |
339 | spin_unlock_irqrestore(&tp->indirect_lock, flags); | 339 | spin_unlock_irqrestore(&tp->indirect_lock, flags); |
340 | return val; | 340 | return val; |
341 | } | 341 | } |
342 | 342 | ||
343 | static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) | 343 | static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) |
344 | { | 344 | { |
345 | unsigned long flags; | 345 | unsigned long flags; |
346 | 346 | ||
347 | if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { | 347 | if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { |
348 | pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + | 348 | pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + |
349 | TG3_64BIT_REG_LOW, val); | 349 | TG3_64BIT_REG_LOW, val); |
350 | return; | 350 | return; |
351 | } | 351 | } |
352 | if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { | 352 | if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { |
353 | pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + | 353 | pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + |
354 | TG3_64BIT_REG_LOW, val); | 354 | TG3_64BIT_REG_LOW, val); |
355 | return; | 355 | return; |
356 | } | 356 | } |
357 | 357 | ||
358 | spin_lock_irqsave(&tp->indirect_lock, flags); | 358 | spin_lock_irqsave(&tp->indirect_lock, flags); |
359 | pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); | 359 | pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); |
360 | pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); | 360 | pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); |
361 | spin_unlock_irqrestore(&tp->indirect_lock, flags); | 361 | spin_unlock_irqrestore(&tp->indirect_lock, flags); |
362 | 362 | ||
363 | /* In indirect mode when disabling interrupts, we also need | 363 | /* In indirect mode when disabling interrupts, we also need |
364 | * to clear the interrupt bit in the GRC local ctrl register. | 364 | * to clear the interrupt bit in the GRC local ctrl register. |
365 | */ | 365 | */ |
366 | if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && | 366 | if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && |
367 | (val == 0x1)) { | 367 | (val == 0x1)) { |
368 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, | 368 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, |
369 | tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); | 369 | tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); |
370 | } | 370 | } |
371 | } | 371 | } |
372 | 372 | ||
373 | static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) | 373 | static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) |
374 | { | 374 | { |
375 | unsigned long flags; | 375 | unsigned long flags; |
376 | u32 val; | 376 | u32 val; |
377 | 377 | ||
378 | spin_lock_irqsave(&tp->indirect_lock, flags); | 378 | spin_lock_irqsave(&tp->indirect_lock, flags); |
379 | pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); | 379 | pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); |
380 | pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); | 380 | pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); |
381 | spin_unlock_irqrestore(&tp->indirect_lock, flags); | 381 | spin_unlock_irqrestore(&tp->indirect_lock, flags); |
382 | return val; | 382 | return val; |
383 | } | 383 | } |
384 | 384 | ||
385 | /* usec_wait specifies the wait time in usec when writing to certain registers | 385 | /* usec_wait specifies the wait time in usec when writing to certain registers |
386 | * where it is unsafe to read back the register without some delay. | 386 | * where it is unsafe to read back the register without some delay. |
387 | * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. | 387 | * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. |
388 | * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. | 388 | * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. |
389 | */ | 389 | */ |
390 | static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) | 390 | static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) |
391 | { | 391 | { |
392 | if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) || | 392 | if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) || |
393 | (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) | 393 | (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) |
394 | /* Non-posted methods */ | 394 | /* Non-posted methods */ |
395 | tp->write32(tp, off, val); | 395 | tp->write32(tp, off, val); |
396 | else { | 396 | else { |
397 | /* Posted method */ | 397 | /* Posted method */ |
398 | tg3_write32(tp, off, val); | 398 | tg3_write32(tp, off, val); |
399 | if (usec_wait) | 399 | if (usec_wait) |
400 | udelay(usec_wait); | 400 | udelay(usec_wait); |
401 | tp->read32(tp, off); | 401 | tp->read32(tp, off); |
402 | } | 402 | } |
403 | /* Wait again after the read for the posted method to guarantee that | 403 | /* Wait again after the read for the posted method to guarantee that |
404 | * the wait time is met. | 404 | * the wait time is met. |
405 | */ | 405 | */ |
406 | if (usec_wait) | 406 | if (usec_wait) |
407 | udelay(usec_wait); | 407 | udelay(usec_wait); |
408 | } | 408 | } |
409 | 409 | ||
410 | static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) | 410 | static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) |
411 | { | 411 | { |
412 | tp->write32_mbox(tp, off, val); | 412 | tp->write32_mbox(tp, off, val); |
413 | if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) && | 413 | if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) && |
414 | !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) | 414 | !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) |
415 | tp->read32_mbox(tp, off); | 415 | tp->read32_mbox(tp, off); |
416 | } | 416 | } |
417 | 417 | ||
418 | static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) | 418 | static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) |
419 | { | 419 | { |
420 | void __iomem *mbox = tp->regs + off; | 420 | void __iomem *mbox = tp->regs + off; |
421 | writel(val, mbox); | 421 | writel(val, mbox); |
422 | if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) | 422 | if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) |
423 | writel(val, mbox); | 423 | writel(val, mbox); |
424 | if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) | 424 | if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) |
425 | readl(mbox); | 425 | readl(mbox); |
426 | } | 426 | } |
427 | 427 | ||
428 | static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) | 428 | static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) |
429 | { | 429 | { |
430 | return (readl(tp->regs + off + GRCMBOX_BASE)); | 430 | return (readl(tp->regs + off + GRCMBOX_BASE)); |
431 | } | 431 | } |
432 | 432 | ||
433 | static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) | 433 | static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) |
434 | { | 434 | { |
435 | writel(val, tp->regs + off + GRCMBOX_BASE); | 435 | writel(val, tp->regs + off + GRCMBOX_BASE); |
436 | } | 436 | } |
437 | 437 | ||
438 | #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) | 438 | #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) |
439 | #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) | 439 | #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) |
440 | #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) | 440 | #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) |
441 | #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) | 441 | #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) |
442 | #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) | 442 | #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) |
443 | 443 | ||
444 | #define tw32(reg,val) tp->write32(tp, reg, val) | 444 | #define tw32(reg,val) tp->write32(tp, reg, val) |
445 | #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0) | 445 | #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0) |
446 | #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us)) | 446 | #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us)) |
447 | #define tr32(reg) tp->read32(tp, reg) | 447 | #define tr32(reg) tp->read32(tp, reg) |
448 | 448 | ||
449 | static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) | 449 | static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) |
450 | { | 450 | { |
451 | unsigned long flags; | 451 | unsigned long flags; |
452 | 452 | ||
453 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && | 453 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && |
454 | (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) | 454 | (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) |
455 | return; | 455 | return; |
456 | 456 | ||
457 | spin_lock_irqsave(&tp->indirect_lock, flags); | 457 | spin_lock_irqsave(&tp->indirect_lock, flags); |
458 | if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { | 458 | if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { |
459 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); | 459 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); |
460 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); | 460 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); |
461 | 461 | ||
462 | /* Always leave this as zero. */ | 462 | /* Always leave this as zero. */ |
463 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); | 463 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); |
464 | } else { | 464 | } else { |
465 | tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); | 465 | tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); |
466 | tw32_f(TG3PCI_MEM_WIN_DATA, val); | 466 | tw32_f(TG3PCI_MEM_WIN_DATA, val); |
467 | 467 | ||
468 | /* Always leave this as zero. */ | 468 | /* Always leave this as zero. */ |
469 | tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 469 | tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); |
470 | } | 470 | } |
471 | spin_unlock_irqrestore(&tp->indirect_lock, flags); | 471 | spin_unlock_irqrestore(&tp->indirect_lock, flags); |
472 | } | 472 | } |
473 | 473 | ||
474 | static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) | 474 | static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) |
475 | { | 475 | { |
476 | unsigned long flags; | 476 | unsigned long flags; |
477 | 477 | ||
478 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && | 478 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && |
479 | (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { | 479 | (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { |
480 | *val = 0; | 480 | *val = 0; |
481 | return; | 481 | return; |
482 | } | 482 | } |
483 | 483 | ||
484 | spin_lock_irqsave(&tp->indirect_lock, flags); | 484 | spin_lock_irqsave(&tp->indirect_lock, flags); |
485 | if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { | 485 | if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { |
486 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); | 486 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); |
487 | pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); | 487 | pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); |
488 | 488 | ||
489 | /* Always leave this as zero. */ | 489 | /* Always leave this as zero. */ |
490 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); | 490 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); |
491 | } else { | 491 | } else { |
492 | tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); | 492 | tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); |
493 | *val = tr32(TG3PCI_MEM_WIN_DATA); | 493 | *val = tr32(TG3PCI_MEM_WIN_DATA); |
494 | 494 | ||
495 | /* Always leave this as zero. */ | 495 | /* Always leave this as zero. */ |
496 | tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 496 | tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); |
497 | } | 497 | } |
498 | spin_unlock_irqrestore(&tp->indirect_lock, flags); | 498 | spin_unlock_irqrestore(&tp->indirect_lock, flags); |
499 | } | 499 | } |
500 | 500 | ||
501 | static void tg3_disable_ints(struct tg3 *tp) | 501 | static void tg3_disable_ints(struct tg3 *tp) |
502 | { | 502 | { |
503 | tw32(TG3PCI_MISC_HOST_CTRL, | 503 | tw32(TG3PCI_MISC_HOST_CTRL, |
504 | (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); | 504 | (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); |
505 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 505 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); |
506 | } | 506 | } |
507 | 507 | ||
508 | static inline void tg3_cond_int(struct tg3 *tp) | 508 | static inline void tg3_cond_int(struct tg3 *tp) |
509 | { | 509 | { |
510 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && | 510 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && |
511 | (tp->hw_status->status & SD_STATUS_UPDATED)) | 511 | (tp->hw_status->status & SD_STATUS_UPDATED)) |
512 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); | 512 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); |
513 | else | 513 | else |
514 | tw32(HOSTCC_MODE, tp->coalesce_mode | | 514 | tw32(HOSTCC_MODE, tp->coalesce_mode | |
515 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); | 515 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); |
516 | } | 516 | } |
517 | 517 | ||
518 | static void tg3_enable_ints(struct tg3 *tp) | 518 | static void tg3_enable_ints(struct tg3 *tp) |
519 | { | 519 | { |
520 | tp->irq_sync = 0; | 520 | tp->irq_sync = 0; |
521 | wmb(); | 521 | wmb(); |
522 | 522 | ||
523 | tw32(TG3PCI_MISC_HOST_CTRL, | 523 | tw32(TG3PCI_MISC_HOST_CTRL, |
524 | (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); | 524 | (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); |
525 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | 525 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
526 | (tp->last_tag << 24)); | 526 | (tp->last_tag << 24)); |
527 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) | 527 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) |
528 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | 528 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
529 | (tp->last_tag << 24)); | 529 | (tp->last_tag << 24)); |
530 | tg3_cond_int(tp); | 530 | tg3_cond_int(tp); |
531 | } | 531 | } |
532 | 532 | ||
533 | static inline unsigned int tg3_has_work(struct tg3 *tp) | 533 | static inline unsigned int tg3_has_work(struct tg3 *tp) |
534 | { | 534 | { |
535 | struct tg3_hw_status *sblk = tp->hw_status; | 535 | struct tg3_hw_status *sblk = tp->hw_status; |
536 | unsigned int work_exists = 0; | 536 | unsigned int work_exists = 0; |
537 | 537 | ||
538 | /* check for phy events */ | 538 | /* check for phy events */ |
539 | if (!(tp->tg3_flags & | 539 | if (!(tp->tg3_flags & |
540 | (TG3_FLAG_USE_LINKCHG_REG | | 540 | (TG3_FLAG_USE_LINKCHG_REG | |
541 | TG3_FLAG_POLL_SERDES))) { | 541 | TG3_FLAG_POLL_SERDES))) { |
542 | if (sblk->status & SD_STATUS_LINK_CHG) | 542 | if (sblk->status & SD_STATUS_LINK_CHG) |
543 | work_exists = 1; | 543 | work_exists = 1; |
544 | } | 544 | } |
545 | /* check for RX/TX work to do */ | 545 | /* check for RX/TX work to do */ |
546 | if (sblk->idx[0].tx_consumer != tp->tx_cons || | 546 | if (sblk->idx[0].tx_consumer != tp->tx_cons || |
547 | sblk->idx[0].rx_producer != tp->rx_rcb_ptr) | 547 | sblk->idx[0].rx_producer != tp->rx_rcb_ptr) |
548 | work_exists = 1; | 548 | work_exists = 1; |
549 | 549 | ||
550 | return work_exists; | 550 | return work_exists; |
551 | } | 551 | } |
552 | 552 | ||
553 | /* tg3_restart_ints | 553 | /* tg3_restart_ints |
554 | * similar to tg3_enable_ints, but it accurately determines whether there | 554 | * similar to tg3_enable_ints, but it accurately determines whether there |
555 | * is new work pending and can return without flushing the PIO write | 555 | * is new work pending and can return without flushing the PIO write |
556 | * which reenables interrupts | 556 | * which reenables interrupts |
557 | */ | 557 | */ |
558 | static void tg3_restart_ints(struct tg3 *tp) | 558 | static void tg3_restart_ints(struct tg3 *tp) |
559 | { | 559 | { |
560 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | 560 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
561 | tp->last_tag << 24); | 561 | tp->last_tag << 24); |
562 | mmiowb(); | 562 | mmiowb(); |
563 | 563 | ||
564 | /* When doing tagged status, this work check is unnecessary. | 564 | /* When doing tagged status, this work check is unnecessary. |
565 | * The last_tag we write above tells the chip which piece of | 565 | * The last_tag we write above tells the chip which piece of |
566 | * work we've completed. | 566 | * work we've completed. |
567 | */ | 567 | */ |
568 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && | 568 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && |
569 | tg3_has_work(tp)) | 569 | tg3_has_work(tp)) |
570 | tw32(HOSTCC_MODE, tp->coalesce_mode | | 570 | tw32(HOSTCC_MODE, tp->coalesce_mode | |
571 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); | 571 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); |
572 | } | 572 | } |
573 | 573 | ||
574 | static inline void tg3_netif_stop(struct tg3 *tp) | 574 | static inline void tg3_netif_stop(struct tg3 *tp) |
575 | { | 575 | { |
576 | tp->dev->trans_start = jiffies; /* prevent tx timeout */ | 576 | tp->dev->trans_start = jiffies; /* prevent tx timeout */ |
577 | netif_poll_disable(tp->dev); | 577 | netif_poll_disable(tp->dev); |
578 | netif_tx_disable(tp->dev); | 578 | netif_tx_disable(tp->dev); |
579 | } | 579 | } |
580 | 580 | ||
581 | static inline void tg3_netif_start(struct tg3 *tp) | 581 | static inline void tg3_netif_start(struct tg3 *tp) |
582 | { | 582 | { |
583 | netif_wake_queue(tp->dev); | 583 | netif_wake_queue(tp->dev); |
584 | /* NOTE: unconditional netif_wake_queue is only appropriate | 584 | /* NOTE: unconditional netif_wake_queue is only appropriate |
585 | * so long as all callers are assured to have free tx slots | 585 | * so long as all callers are assured to have free tx slots |
586 | * (such as after tg3_init_hw) | 586 | * (such as after tg3_init_hw) |
587 | */ | 587 | */ |
588 | netif_poll_enable(tp->dev); | 588 | netif_poll_enable(tp->dev); |
589 | tp->hw_status->status |= SD_STATUS_UPDATED; | 589 | tp->hw_status->status |= SD_STATUS_UPDATED; |
590 | tg3_enable_ints(tp); | 590 | tg3_enable_ints(tp); |
591 | } | 591 | } |
592 | 592 | ||
593 | static void tg3_switch_clocks(struct tg3 *tp) | 593 | static void tg3_switch_clocks(struct tg3 *tp) |
594 | { | 594 | { |
595 | u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); | 595 | u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); |
596 | u32 orig_clock_ctrl; | 596 | u32 orig_clock_ctrl; |
597 | 597 | ||
598 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) | 598 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) |
599 | return; | 599 | return; |
600 | 600 | ||
601 | orig_clock_ctrl = clock_ctrl; | 601 | orig_clock_ctrl = clock_ctrl; |
602 | clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | | 602 | clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | |
603 | CLOCK_CTRL_CLKRUN_OENABLE | | 603 | CLOCK_CTRL_CLKRUN_OENABLE | |
604 | 0x1f); | 604 | 0x1f); |
605 | tp->pci_clock_ctrl = clock_ctrl; | 605 | tp->pci_clock_ctrl = clock_ctrl; |
606 | 606 | ||
607 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 607 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
608 | if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { | 608 | if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { |
609 | tw32_wait_f(TG3PCI_CLOCK_CTRL, | 609 | tw32_wait_f(TG3PCI_CLOCK_CTRL, |
610 | clock_ctrl | CLOCK_CTRL_625_CORE, 40); | 610 | clock_ctrl | CLOCK_CTRL_625_CORE, 40); |
611 | } | 611 | } |
612 | } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { | 612 | } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { |
613 | tw32_wait_f(TG3PCI_CLOCK_CTRL, | 613 | tw32_wait_f(TG3PCI_CLOCK_CTRL, |
614 | clock_ctrl | | 614 | clock_ctrl | |
615 | (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), | 615 | (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), |
616 | 40); | 616 | 40); |
617 | tw32_wait_f(TG3PCI_CLOCK_CTRL, | 617 | tw32_wait_f(TG3PCI_CLOCK_CTRL, |
618 | clock_ctrl | (CLOCK_CTRL_ALTCLK), | 618 | clock_ctrl | (CLOCK_CTRL_ALTCLK), |
619 | 40); | 619 | 40); |
620 | } | 620 | } |
621 | tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); | 621 | tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); |
622 | } | 622 | } |
623 | 623 | ||
624 | #define PHY_BUSY_LOOPS 5000 | 624 | #define PHY_BUSY_LOOPS 5000 |
625 | 625 | ||
626 | static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) | 626 | static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) |
627 | { | 627 | { |
628 | u32 frame_val; | 628 | u32 frame_val; |
629 | unsigned int loops; | 629 | unsigned int loops; |
630 | int ret; | 630 | int ret; |
631 | 631 | ||
632 | if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { | 632 | if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { |
633 | tw32_f(MAC_MI_MODE, | 633 | tw32_f(MAC_MI_MODE, |
634 | (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); | 634 | (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); |
635 | udelay(80); | 635 | udelay(80); |
636 | } | 636 | } |
637 | 637 | ||
638 | *val = 0x0; | 638 | *val = 0x0; |
639 | 639 | ||
640 | frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) & | 640 | frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) & |
641 | MI_COM_PHY_ADDR_MASK); | 641 | MI_COM_PHY_ADDR_MASK); |
642 | frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & | 642 | frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & |
643 | MI_COM_REG_ADDR_MASK); | 643 | MI_COM_REG_ADDR_MASK); |
644 | frame_val |= (MI_COM_CMD_READ | MI_COM_START); | 644 | frame_val |= (MI_COM_CMD_READ | MI_COM_START); |
645 | 645 | ||
646 | tw32_f(MAC_MI_COM, frame_val); | 646 | tw32_f(MAC_MI_COM, frame_val); |
647 | 647 | ||
648 | loops = PHY_BUSY_LOOPS; | 648 | loops = PHY_BUSY_LOOPS; |
649 | while (loops != 0) { | 649 | while (loops != 0) { |
650 | udelay(10); | 650 | udelay(10); |
651 | frame_val = tr32(MAC_MI_COM); | 651 | frame_val = tr32(MAC_MI_COM); |
652 | 652 | ||
653 | if ((frame_val & MI_COM_BUSY) == 0) { | 653 | if ((frame_val & MI_COM_BUSY) == 0) { |
654 | udelay(5); | 654 | udelay(5); |
655 | frame_val = tr32(MAC_MI_COM); | 655 | frame_val = tr32(MAC_MI_COM); |
656 | break; | 656 | break; |
657 | } | 657 | } |
658 | loops -= 1; | 658 | loops -= 1; |
659 | } | 659 | } |
660 | 660 | ||
661 | ret = -EBUSY; | 661 | ret = -EBUSY; |
662 | if (loops != 0) { | 662 | if (loops != 0) { |
663 | *val = frame_val & MI_COM_DATA_MASK; | 663 | *val = frame_val & MI_COM_DATA_MASK; |
664 | ret = 0; | 664 | ret = 0; |
665 | } | 665 | } |
666 | 666 | ||
667 | if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { | 667 | if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { |
668 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 668 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
669 | udelay(80); | 669 | udelay(80); |
670 | } | 670 | } |
671 | 671 | ||
672 | return ret; | 672 | return ret; |
673 | } | 673 | } |
674 | 674 | ||
675 | static int tg3_writephy(struct tg3 *tp, int reg, u32 val) | 675 | static int tg3_writephy(struct tg3 *tp, int reg, u32 val) |
676 | { | 676 | { |
677 | u32 frame_val; | 677 | u32 frame_val; |
678 | unsigned int loops; | 678 | unsigned int loops; |
679 | int ret; | 679 | int ret; |
680 | 680 | ||
681 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && | 681 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && |
682 | (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) | 682 | (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) |
683 | return 0; | 683 | return 0; |
684 | 684 | ||
685 | if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { | 685 | if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { |
686 | tw32_f(MAC_MI_MODE, | 686 | tw32_f(MAC_MI_MODE, |
687 | (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); | 687 | (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); |
688 | udelay(80); | 688 | udelay(80); |
689 | } | 689 | } |
690 | 690 | ||
691 | frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) & | 691 | frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) & |
692 | MI_COM_PHY_ADDR_MASK); | 692 | MI_COM_PHY_ADDR_MASK); |
693 | frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & | 693 | frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & |
694 | MI_COM_REG_ADDR_MASK); | 694 | MI_COM_REG_ADDR_MASK); |
695 | frame_val |= (val & MI_COM_DATA_MASK); | 695 | frame_val |= (val & MI_COM_DATA_MASK); |
696 | frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); | 696 | frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); |
697 | 697 | ||
698 | tw32_f(MAC_MI_COM, frame_val); | 698 | tw32_f(MAC_MI_COM, frame_val); |
699 | 699 | ||
700 | loops = PHY_BUSY_LOOPS; | 700 | loops = PHY_BUSY_LOOPS; |
701 | while (loops != 0) { | 701 | while (loops != 0) { |
702 | udelay(10); | 702 | udelay(10); |
703 | frame_val = tr32(MAC_MI_COM); | 703 | frame_val = tr32(MAC_MI_COM); |
704 | if ((frame_val & MI_COM_BUSY) == 0) { | 704 | if ((frame_val & MI_COM_BUSY) == 0) { |
705 | udelay(5); | 705 | udelay(5); |
706 | frame_val = tr32(MAC_MI_COM); | 706 | frame_val = tr32(MAC_MI_COM); |
707 | break; | 707 | break; |
708 | } | 708 | } |
709 | loops -= 1; | 709 | loops -= 1; |
710 | } | 710 | } |
711 | 711 | ||
712 | ret = -EBUSY; | 712 | ret = -EBUSY; |
713 | if (loops != 0) | 713 | if (loops != 0) |
714 | ret = 0; | 714 | ret = 0; |
715 | 715 | ||
716 | if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { | 716 | if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { |
717 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 717 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
718 | udelay(80); | 718 | udelay(80); |
719 | } | 719 | } |
720 | 720 | ||
721 | return ret; | 721 | return ret; |
722 | } | 722 | } |
723 | 723 | ||
724 | static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) | 724 | static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) |
725 | { | 725 | { |
726 | u32 phy; | 726 | u32 phy; |
727 | 727 | ||
728 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || | 728 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || |
729 | (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) | 729 | (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) |
730 | return; | 730 | return; |
731 | 731 | ||
732 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 732 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
733 | u32 ephy; | 733 | u32 ephy; |
734 | 734 | ||
735 | if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) { | 735 | if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) { |
736 | tg3_writephy(tp, MII_TG3_EPHY_TEST, | 736 | tg3_writephy(tp, MII_TG3_EPHY_TEST, |
737 | ephy | MII_TG3_EPHY_SHADOW_EN); | 737 | ephy | MII_TG3_EPHY_SHADOW_EN); |
738 | if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) { | 738 | if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) { |
739 | if (enable) | 739 | if (enable) |
740 | phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX; | 740 | phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX; |
741 | else | 741 | else |
742 | phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX; | 742 | phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX; |
743 | tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy); | 743 | tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy); |
744 | } | 744 | } |
745 | tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy); | 745 | tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy); |
746 | } | 746 | } |
747 | } else { | 747 | } else { |
748 | phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | | 748 | phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | |
749 | MII_TG3_AUXCTL_SHDWSEL_MISC; | 749 | MII_TG3_AUXCTL_SHDWSEL_MISC; |
750 | if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && | 750 | if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && |
751 | !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { | 751 | !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { |
752 | if (enable) | 752 | if (enable) |
753 | phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; | 753 | phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; |
754 | else | 754 | else |
755 | phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; | 755 | phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; |
756 | phy |= MII_TG3_AUXCTL_MISC_WREN; | 756 | phy |= MII_TG3_AUXCTL_MISC_WREN; |
757 | tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); | 757 | tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); |
758 | } | 758 | } |
759 | } | 759 | } |
760 | } | 760 | } |
761 | 761 | ||
762 | static void tg3_phy_set_wirespeed(struct tg3 *tp) | 762 | static void tg3_phy_set_wirespeed(struct tg3 *tp) |
763 | { | 763 | { |
764 | u32 val; | 764 | u32 val; |
765 | 765 | ||
766 | if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) | 766 | if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) |
767 | return; | 767 | return; |
768 | 768 | ||
769 | if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && | 769 | if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && |
770 | !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) | 770 | !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) |
771 | tg3_writephy(tp, MII_TG3_AUX_CTRL, | 771 | tg3_writephy(tp, MII_TG3_AUX_CTRL, |
772 | (val | (1 << 15) | (1 << 4))); | 772 | (val | (1 << 15) | (1 << 4))); |
773 | } | 773 | } |
774 | 774 | ||
775 | static int tg3_bmcr_reset(struct tg3 *tp) | 775 | static int tg3_bmcr_reset(struct tg3 *tp) |
776 | { | 776 | { |
777 | u32 phy_control; | 777 | u32 phy_control; |
778 | int limit, err; | 778 | int limit, err; |
779 | 779 | ||
780 | /* OK, reset it, and poll the BMCR_RESET bit until it | 780 | /* OK, reset it, and poll the BMCR_RESET bit until it |
781 | * clears or we time out. | 781 | * clears or we time out. |
782 | */ | 782 | */ |
783 | phy_control = BMCR_RESET; | 783 | phy_control = BMCR_RESET; |
784 | err = tg3_writephy(tp, MII_BMCR, phy_control); | 784 | err = tg3_writephy(tp, MII_BMCR, phy_control); |
785 | if (err != 0) | 785 | if (err != 0) |
786 | return -EBUSY; | 786 | return -EBUSY; |
787 | 787 | ||
788 | limit = 5000; | 788 | limit = 5000; |
789 | while (limit--) { | 789 | while (limit--) { |
790 | err = tg3_readphy(tp, MII_BMCR, &phy_control); | 790 | err = tg3_readphy(tp, MII_BMCR, &phy_control); |
791 | if (err != 0) | 791 | if (err != 0) |
792 | return -EBUSY; | 792 | return -EBUSY; |
793 | 793 | ||
794 | if ((phy_control & BMCR_RESET) == 0) { | 794 | if ((phy_control & BMCR_RESET) == 0) { |
795 | udelay(40); | 795 | udelay(40); |
796 | break; | 796 | break; |
797 | } | 797 | } |
798 | udelay(10); | 798 | udelay(10); |
799 | } | 799 | } |
800 | if (limit <= 0) | 800 | if (limit <= 0) |
801 | return -EBUSY; | 801 | return -EBUSY; |
802 | 802 | ||
803 | return 0; | 803 | return 0; |
804 | } | 804 | } |
805 | 805 | ||
806 | static int tg3_wait_macro_done(struct tg3 *tp) | 806 | static int tg3_wait_macro_done(struct tg3 *tp) |
807 | { | 807 | { |
808 | int limit = 100; | 808 | int limit = 100; |
809 | 809 | ||
810 | while (limit--) { | 810 | while (limit--) { |
811 | u32 tmp32; | 811 | u32 tmp32; |
812 | 812 | ||
813 | if (!tg3_readphy(tp, 0x16, &tmp32)) { | 813 | if (!tg3_readphy(tp, 0x16, &tmp32)) { |
814 | if ((tmp32 & 0x1000) == 0) | 814 | if ((tmp32 & 0x1000) == 0) |
815 | break; | 815 | break; |
816 | } | 816 | } |
817 | } | 817 | } |
818 | if (limit <= 0) | 818 | if (limit <= 0) |
819 | return -EBUSY; | 819 | return -EBUSY; |
820 | 820 | ||
821 | return 0; | 821 | return 0; |
822 | } | 822 | } |
823 | 823 | ||
824 | static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) | 824 | static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) |
825 | { | 825 | { |
826 | static const u32 test_pat[4][6] = { | 826 | static const u32 test_pat[4][6] = { |
827 | { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, | 827 | { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, |
828 | { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, | 828 | { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, |
829 | { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, | 829 | { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, |
830 | { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } | 830 | { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } |
831 | }; | 831 | }; |
832 | int chan; | 832 | int chan; |
833 | 833 | ||
834 | for (chan = 0; chan < 4; chan++) { | 834 | for (chan = 0; chan < 4; chan++) { |
835 | int i; | 835 | int i; |
836 | 836 | ||
837 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, | 837 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, |
838 | (chan * 0x2000) | 0x0200); | 838 | (chan * 0x2000) | 0x0200); |
839 | tg3_writephy(tp, 0x16, 0x0002); | 839 | tg3_writephy(tp, 0x16, 0x0002); |
840 | 840 | ||
841 | for (i = 0; i < 6; i++) | 841 | for (i = 0; i < 6; i++) |
842 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, | 842 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, |
843 | test_pat[chan][i]); | 843 | test_pat[chan][i]); |
844 | 844 | ||
845 | tg3_writephy(tp, 0x16, 0x0202); | 845 | tg3_writephy(tp, 0x16, 0x0202); |
846 | if (tg3_wait_macro_done(tp)) { | 846 | if (tg3_wait_macro_done(tp)) { |
847 | *resetp = 1; | 847 | *resetp = 1; |
848 | return -EBUSY; | 848 | return -EBUSY; |
849 | } | 849 | } |
850 | 850 | ||
851 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, | 851 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, |
852 | (chan * 0x2000) | 0x0200); | 852 | (chan * 0x2000) | 0x0200); |
853 | tg3_writephy(tp, 0x16, 0x0082); | 853 | tg3_writephy(tp, 0x16, 0x0082); |
854 | if (tg3_wait_macro_done(tp)) { | 854 | if (tg3_wait_macro_done(tp)) { |
855 | *resetp = 1; | 855 | *resetp = 1; |
856 | return -EBUSY; | 856 | return -EBUSY; |
857 | } | 857 | } |
858 | 858 | ||
859 | tg3_writephy(tp, 0x16, 0x0802); | 859 | tg3_writephy(tp, 0x16, 0x0802); |
860 | if (tg3_wait_macro_done(tp)) { | 860 | if (tg3_wait_macro_done(tp)) { |
861 | *resetp = 1; | 861 | *resetp = 1; |
862 | return -EBUSY; | 862 | return -EBUSY; |
863 | } | 863 | } |
864 | 864 | ||
865 | for (i = 0; i < 6; i += 2) { | 865 | for (i = 0; i < 6; i += 2) { |
866 | u32 low, high; | 866 | u32 low, high; |
867 | 867 | ||
868 | if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || | 868 | if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || |
869 | tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || | 869 | tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || |
870 | tg3_wait_macro_done(tp)) { | 870 | tg3_wait_macro_done(tp)) { |
871 | *resetp = 1; | 871 | *resetp = 1; |
872 | return -EBUSY; | 872 | return -EBUSY; |
873 | } | 873 | } |
874 | low &= 0x7fff; | 874 | low &= 0x7fff; |
875 | high &= 0x000f; | 875 | high &= 0x000f; |
876 | if (low != test_pat[chan][i] || | 876 | if (low != test_pat[chan][i] || |
877 | high != test_pat[chan][i+1]) { | 877 | high != test_pat[chan][i+1]) { |
878 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); | 878 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); |
879 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); | 879 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); |
880 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); | 880 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); |
881 | 881 | ||
882 | return -EBUSY; | 882 | return -EBUSY; |
883 | } | 883 | } |
884 | } | 884 | } |
885 | } | 885 | } |
886 | 886 | ||
887 | return 0; | 887 | return 0; |
888 | } | 888 | } |
889 | 889 | ||
890 | static int tg3_phy_reset_chanpat(struct tg3 *tp) | 890 | static int tg3_phy_reset_chanpat(struct tg3 *tp) |
891 | { | 891 | { |
892 | int chan; | 892 | int chan; |
893 | 893 | ||
894 | for (chan = 0; chan < 4; chan++) { | 894 | for (chan = 0; chan < 4; chan++) { |
895 | int i; | 895 | int i; |
896 | 896 | ||
897 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, | 897 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, |
898 | (chan * 0x2000) | 0x0200); | 898 | (chan * 0x2000) | 0x0200); |
899 | tg3_writephy(tp, 0x16, 0x0002); | 899 | tg3_writephy(tp, 0x16, 0x0002); |
900 | for (i = 0; i < 6; i++) | 900 | for (i = 0; i < 6; i++) |
901 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); | 901 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); |
902 | tg3_writephy(tp, 0x16, 0x0202); | 902 | tg3_writephy(tp, 0x16, 0x0202); |
903 | if (tg3_wait_macro_done(tp)) | 903 | if (tg3_wait_macro_done(tp)) |
904 | return -EBUSY; | 904 | return -EBUSY; |
905 | } | 905 | } |
906 | 906 | ||
907 | return 0; | 907 | return 0; |
908 | } | 908 | } |
909 | 909 | ||
910 | static int tg3_phy_reset_5703_4_5(struct tg3 *tp) | 910 | static int tg3_phy_reset_5703_4_5(struct tg3 *tp) |
911 | { | 911 | { |
912 | u32 reg32, phy9_orig; | 912 | u32 reg32, phy9_orig; |
913 | int retries, do_phy_reset, err; | 913 | int retries, do_phy_reset, err; |
914 | 914 | ||
915 | retries = 10; | 915 | retries = 10; |
916 | do_phy_reset = 1; | 916 | do_phy_reset = 1; |
917 | do { | 917 | do { |
918 | if (do_phy_reset) { | 918 | if (do_phy_reset) { |
919 | err = tg3_bmcr_reset(tp); | 919 | err = tg3_bmcr_reset(tp); |
920 | if (err) | 920 | if (err) |
921 | return err; | 921 | return err; |
922 | do_phy_reset = 0; | 922 | do_phy_reset = 0; |
923 | } | 923 | } |
924 | 924 | ||
925 | /* Disable transmitter and interrupt. */ | 925 | /* Disable transmitter and interrupt. */ |
926 | if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) | 926 | if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) |
927 | continue; | 927 | continue; |
928 | 928 | ||
929 | reg32 |= 0x3000; | 929 | reg32 |= 0x3000; |
930 | tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); | 930 | tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); |
931 | 931 | ||
932 | /* Set full-duplex, 1000 mbps. */ | 932 | /* Set full-duplex, 1000 mbps. */ |
933 | tg3_writephy(tp, MII_BMCR, | 933 | tg3_writephy(tp, MII_BMCR, |
934 | BMCR_FULLDPLX | TG3_BMCR_SPEED1000); | 934 | BMCR_FULLDPLX | TG3_BMCR_SPEED1000); |
935 | 935 | ||
936 | /* Set to master mode. */ | 936 | /* Set to master mode. */ |
937 | if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig)) | 937 | if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig)) |
938 | continue; | 938 | continue; |
939 | 939 | ||
940 | tg3_writephy(tp, MII_TG3_CTRL, | 940 | tg3_writephy(tp, MII_TG3_CTRL, |
941 | (MII_TG3_CTRL_AS_MASTER | | 941 | (MII_TG3_CTRL_AS_MASTER | |
942 | MII_TG3_CTRL_ENABLE_AS_MASTER)); | 942 | MII_TG3_CTRL_ENABLE_AS_MASTER)); |
943 | 943 | ||
944 | /* Enable SM_DSP_CLOCK and 6dB. */ | 944 | /* Enable SM_DSP_CLOCK and 6dB. */ |
945 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 945 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); |
946 | 946 | ||
947 | /* Block the PHY control access. */ | 947 | /* Block the PHY control access. */ |
948 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); | 948 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); |
949 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800); | 949 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800); |
950 | 950 | ||
951 | err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); | 951 | err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); |
952 | if (!err) | 952 | if (!err) |
953 | break; | 953 | break; |
954 | } while (--retries); | 954 | } while (--retries); |
955 | 955 | ||
956 | err = tg3_phy_reset_chanpat(tp); | 956 | err = tg3_phy_reset_chanpat(tp); |
957 | if (err) | 957 | if (err) |
958 | return err; | 958 | return err; |
959 | 959 | ||
960 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); | 960 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); |
961 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000); | 961 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000); |
962 | 962 | ||
963 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); | 963 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); |
964 | tg3_writephy(tp, 0x16, 0x0000); | 964 | tg3_writephy(tp, 0x16, 0x0000); |
965 | 965 | ||
966 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 966 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || |
967 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | 967 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { |
968 | /* Set Extended packet length bit for jumbo frames */ | 968 | /* Set Extended packet length bit for jumbo frames */ |
969 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); | 969 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); |
970 | } | 970 | } |
971 | else { | 971 | else { |
972 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 972 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); |
973 | } | 973 | } |
974 | 974 | ||
975 | tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); | 975 | tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); |
976 | 976 | ||
977 | if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { | 977 | if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { |
978 | reg32 &= ~0x3000; | 978 | reg32 &= ~0x3000; |
979 | tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); | 979 | tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); |
980 | } else if (!err) | 980 | } else if (!err) |
981 | err = -EBUSY; | 981 | err = -EBUSY; |
982 | 982 | ||
983 | return err; | 983 | return err; |
984 | } | 984 | } |
985 | 985 | ||
986 | static void tg3_link_report(struct tg3 *); | 986 | static void tg3_link_report(struct tg3 *); |
987 | 987 | ||
988 | /* This will reset the tigon3 PHY if there is no valid | 988 | /* This will reset the tigon3 PHY if there is no valid |
989 | * link unless the FORCE argument is non-zero. | 989 | * link unless the FORCE argument is non-zero. |
990 | */ | 990 | */ |
991 | static int tg3_phy_reset(struct tg3 *tp) | 991 | static int tg3_phy_reset(struct tg3 *tp) |
992 | { | 992 | { |
993 | u32 phy_status; | 993 | u32 phy_status; |
994 | int err; | 994 | int err; |
995 | 995 | ||
996 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 996 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
997 | u32 val; | 997 | u32 val; |
998 | 998 | ||
999 | val = tr32(GRC_MISC_CFG); | 999 | val = tr32(GRC_MISC_CFG); |
1000 | tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); | 1000 | tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); |
1001 | udelay(40); | 1001 | udelay(40); |
1002 | } | 1002 | } |
1003 | err = tg3_readphy(tp, MII_BMSR, &phy_status); | 1003 | err = tg3_readphy(tp, MII_BMSR, &phy_status); |
1004 | err |= tg3_readphy(tp, MII_BMSR, &phy_status); | 1004 | err |= tg3_readphy(tp, MII_BMSR, &phy_status); |
1005 | if (err != 0) | 1005 | if (err != 0) |
1006 | return -EBUSY; | 1006 | return -EBUSY; |
1007 | 1007 | ||
1008 | if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { | 1008 | if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { |
1009 | netif_carrier_off(tp->dev); | 1009 | netif_carrier_off(tp->dev); |
1010 | tg3_link_report(tp); | 1010 | tg3_link_report(tp); |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 1013 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || |
1014 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 1014 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || |
1015 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 1015 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { |
1016 | err = tg3_phy_reset_5703_4_5(tp); | 1016 | err = tg3_phy_reset_5703_4_5(tp); |
1017 | if (err) | 1017 | if (err) |
1018 | return err; | 1018 | return err; |
1019 | goto out; | 1019 | goto out; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | err = tg3_bmcr_reset(tp); | 1022 | err = tg3_bmcr_reset(tp); |
1023 | if (err) | 1023 | if (err) |
1024 | return err; | 1024 | return err; |
1025 | 1025 | ||
1026 | out: | 1026 | out: |
1027 | if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { | 1027 | if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { |
1028 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 1028 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); |
1029 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); | 1029 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); |
1030 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa); | 1030 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa); |
1031 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); | 1031 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); |
1032 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323); | 1032 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323); |
1033 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 1033 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); |
1034 | } | 1034 | } |
1035 | if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) { | 1035 | if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) { |
1036 | tg3_writephy(tp, 0x1c, 0x8d68); | 1036 | tg3_writephy(tp, 0x1c, 0x8d68); |
1037 | tg3_writephy(tp, 0x1c, 0x8d68); | 1037 | tg3_writephy(tp, 0x1c, 0x8d68); |
1038 | } | 1038 | } |
1039 | if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) { | 1039 | if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) { |
1040 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 1040 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); |
1041 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); | 1041 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); |
1042 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b); | 1042 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b); |
1043 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); | 1043 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); |
1044 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506); | 1044 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506); |
1045 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f); | 1045 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f); |
1046 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); | 1046 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); |
1047 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 1047 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); |
1048 | } | 1048 | } |
1049 | else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { | 1049 | else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { |
1050 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 1050 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); |
1051 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); | 1051 | tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); |
1052 | if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { | 1052 | if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { |
1053 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); | 1053 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); |
1054 | tg3_writephy(tp, MII_TG3_TEST1, | 1054 | tg3_writephy(tp, MII_TG3_TEST1, |
1055 | MII_TG3_TEST1_TRIM_EN | 0x4); | 1055 | MII_TG3_TEST1_TRIM_EN | 0x4); |
1056 | } else | 1056 | } else |
1057 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); | 1057 | tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); |
1058 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 1058 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); |
1059 | } | 1059 | } |
1060 | /* Set Extended packet length bit (bit 14) on all chips that */ | 1060 | /* Set Extended packet length bit (bit 14) on all chips that */ |
1061 | /* support jumbo frames */ | 1061 | /* support jumbo frames */ |
1062 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { | 1062 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { |
1063 | /* Cannot do read-modify-write on 5401 */ | 1063 | /* Cannot do read-modify-write on 5401 */ |
1064 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); | 1064 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); |
1065 | } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { | 1065 | } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { |
1066 | u32 phy_reg; | 1066 | u32 phy_reg; |
1067 | 1067 | ||
1068 | /* Set bit 14 with read-modify-write to preserve other bits */ | 1068 | /* Set bit 14 with read-modify-write to preserve other bits */ |
1069 | if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && | 1069 | if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && |
1070 | !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg)) | 1070 | !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg)) |
1071 | tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000); | 1071 | tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000); |
1072 | } | 1072 | } |
1073 | 1073 | ||
1074 | /* Set phy register 0x10 bit 0 to high fifo elasticity to support | 1074 | /* Set phy register 0x10 bit 0 to high fifo elasticity to support |
1075 | * jumbo frames transmission. | 1075 | * jumbo frames transmission. |
1076 | */ | 1076 | */ |
1077 | if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { | 1077 | if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { |
1078 | u32 phy_reg; | 1078 | u32 phy_reg; |
1079 | 1079 | ||
1080 | if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) | 1080 | if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) |
1081 | tg3_writephy(tp, MII_TG3_EXT_CTRL, | 1081 | tg3_writephy(tp, MII_TG3_EXT_CTRL, |
1082 | phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); | 1082 | phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 1085 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
1086 | /* adjust output voltage */ | 1086 | /* adjust output voltage */ |
1087 | tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12); | 1087 | tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12); |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | tg3_phy_toggle_automdix(tp, 1); | 1090 | tg3_phy_toggle_automdix(tp, 1); |
1091 | tg3_phy_set_wirespeed(tp); | 1091 | tg3_phy_set_wirespeed(tp); |
1092 | return 0; | 1092 | return 0; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static void tg3_frob_aux_power(struct tg3 *tp) | 1095 | static void tg3_frob_aux_power(struct tg3 *tp) |
1096 | { | 1096 | { |
1097 | struct tg3 *tp_peer = tp; | 1097 | struct tg3 *tp_peer = tp; |
1098 | 1098 | ||
1099 | if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0) | 1099 | if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0) |
1100 | return; | 1100 | return; |
1101 | 1101 | ||
1102 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || | 1102 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || |
1103 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { | 1103 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { |
1104 | struct net_device *dev_peer; | 1104 | struct net_device *dev_peer; |
1105 | 1105 | ||
1106 | dev_peer = pci_get_drvdata(tp->pdev_peer); | 1106 | dev_peer = pci_get_drvdata(tp->pdev_peer); |
1107 | /* remove_one() may have been run on the peer. */ | 1107 | /* remove_one() may have been run on the peer. */ |
1108 | if (!dev_peer) | 1108 | if (!dev_peer) |
1109 | tp_peer = tp; | 1109 | tp_peer = tp; |
1110 | else | 1110 | else |
1111 | tp_peer = netdev_priv(dev_peer); | 1111 | tp_peer = netdev_priv(dev_peer); |
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || | 1114 | if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || |
1115 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || | 1115 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || |
1116 | (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || | 1116 | (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || |
1117 | (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { | 1117 | (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { |
1118 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 1118 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
1119 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 1119 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { |
1120 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 1120 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | |
1121 | (GRC_LCLCTRL_GPIO_OE0 | | 1121 | (GRC_LCLCTRL_GPIO_OE0 | |
1122 | GRC_LCLCTRL_GPIO_OE1 | | 1122 | GRC_LCLCTRL_GPIO_OE1 | |
1123 | GRC_LCLCTRL_GPIO_OE2 | | 1123 | GRC_LCLCTRL_GPIO_OE2 | |
1124 | GRC_LCLCTRL_GPIO_OUTPUT0 | | 1124 | GRC_LCLCTRL_GPIO_OUTPUT0 | |
1125 | GRC_LCLCTRL_GPIO_OUTPUT1), | 1125 | GRC_LCLCTRL_GPIO_OUTPUT1), |
1126 | 100); | 1126 | 100); |
1127 | } else { | 1127 | } else { |
1128 | u32 no_gpio2; | 1128 | u32 no_gpio2; |
1129 | u32 grc_local_ctrl = 0; | 1129 | u32 grc_local_ctrl = 0; |
1130 | 1130 | ||
1131 | if (tp_peer != tp && | 1131 | if (tp_peer != tp && |
1132 | (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) | 1132 | (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) |
1133 | return; | 1133 | return; |
1134 | 1134 | ||
1135 | /* Workaround to prevent overdrawing Amps. */ | 1135 | /* Workaround to prevent overdrawing Amps. */ |
1136 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 1136 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == |
1137 | ASIC_REV_5714) { | 1137 | ASIC_REV_5714) { |
1138 | grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; | 1138 | grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; |
1139 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 1139 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | |
1140 | grc_local_ctrl, 100); | 1140 | grc_local_ctrl, 100); |
1141 | } | 1141 | } |
1142 | 1142 | ||
1143 | /* On 5753 and variants, GPIO2 cannot be used. */ | 1143 | /* On 5753 and variants, GPIO2 cannot be used. */ |
1144 | no_gpio2 = tp->nic_sram_data_cfg & | 1144 | no_gpio2 = tp->nic_sram_data_cfg & |
1145 | NIC_SRAM_DATA_CFG_NO_GPIO2; | 1145 | NIC_SRAM_DATA_CFG_NO_GPIO2; |
1146 | 1146 | ||
1147 | grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | | 1147 | grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | |
1148 | GRC_LCLCTRL_GPIO_OE1 | | 1148 | GRC_LCLCTRL_GPIO_OE1 | |
1149 | GRC_LCLCTRL_GPIO_OE2 | | 1149 | GRC_LCLCTRL_GPIO_OE2 | |
1150 | GRC_LCLCTRL_GPIO_OUTPUT1 | | 1150 | GRC_LCLCTRL_GPIO_OUTPUT1 | |
1151 | GRC_LCLCTRL_GPIO_OUTPUT2; | 1151 | GRC_LCLCTRL_GPIO_OUTPUT2; |
1152 | if (no_gpio2) { | 1152 | if (no_gpio2) { |
1153 | grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | | 1153 | grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | |
1154 | GRC_LCLCTRL_GPIO_OUTPUT2); | 1154 | GRC_LCLCTRL_GPIO_OUTPUT2); |
1155 | } | 1155 | } |
1156 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 1156 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | |
1157 | grc_local_ctrl, 100); | 1157 | grc_local_ctrl, 100); |
1158 | 1158 | ||
1159 | grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; | 1159 | grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; |
1160 | 1160 | ||
1161 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 1161 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | |
1162 | grc_local_ctrl, 100); | 1162 | grc_local_ctrl, 100); |
1163 | 1163 | ||
1164 | if (!no_gpio2) { | 1164 | if (!no_gpio2) { |
1165 | grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; | 1165 | grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; |
1166 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 1166 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | |
1167 | grc_local_ctrl, 100); | 1167 | grc_local_ctrl, 100); |
1168 | } | 1168 | } |
1169 | } | 1169 | } |
1170 | } else { | 1170 | } else { |
1171 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 1171 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
1172 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { | 1172 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { |
1173 | if (tp_peer != tp && | 1173 | if (tp_peer != tp && |
1174 | (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) | 1174 | (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) |
1175 | return; | 1175 | return; |
1176 | 1176 | ||
1177 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 1177 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | |
1178 | (GRC_LCLCTRL_GPIO_OE1 | | 1178 | (GRC_LCLCTRL_GPIO_OE1 | |
1179 | GRC_LCLCTRL_GPIO_OUTPUT1), 100); | 1179 | GRC_LCLCTRL_GPIO_OUTPUT1), 100); |
1180 | 1180 | ||
1181 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 1181 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | |
1182 | GRC_LCLCTRL_GPIO_OE1, 100); | 1182 | GRC_LCLCTRL_GPIO_OE1, 100); |
1183 | 1183 | ||
1184 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 1184 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | |
1185 | (GRC_LCLCTRL_GPIO_OE1 | | 1185 | (GRC_LCLCTRL_GPIO_OE1 | |
1186 | GRC_LCLCTRL_GPIO_OUTPUT1), 100); | 1186 | GRC_LCLCTRL_GPIO_OUTPUT1), 100); |
1187 | } | 1187 | } |
1188 | } | 1188 | } |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) | 1191 | static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) |
1192 | { | 1192 | { |
1193 | if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) | 1193 | if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) |
1194 | return 1; | 1194 | return 1; |
1195 | else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) { | 1195 | else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) { |
1196 | if (speed != SPEED_10) | 1196 | if (speed != SPEED_10) |
1197 | return 1; | 1197 | return 1; |
1198 | } else if (speed == SPEED_10) | 1198 | } else if (speed == SPEED_10) |
1199 | return 1; | 1199 | return 1; |
1200 | 1200 | ||
1201 | return 0; | 1201 | return 0; |
1202 | } | 1202 | } |
1203 | 1203 | ||
1204 | static int tg3_setup_phy(struct tg3 *, int); | 1204 | static int tg3_setup_phy(struct tg3 *, int); |
1205 | 1205 | ||
1206 | #define RESET_KIND_SHUTDOWN 0 | 1206 | #define RESET_KIND_SHUTDOWN 0 |
1207 | #define RESET_KIND_INIT 1 | 1207 | #define RESET_KIND_INIT 1 |
1208 | #define RESET_KIND_SUSPEND 2 | 1208 | #define RESET_KIND_SUSPEND 2 |
1209 | 1209 | ||
1210 | static void tg3_write_sig_post_reset(struct tg3 *, int); | 1210 | static void tg3_write_sig_post_reset(struct tg3 *, int); |
1211 | static int tg3_halt_cpu(struct tg3 *, u32); | 1211 | static int tg3_halt_cpu(struct tg3 *, u32); |
1212 | static int tg3_nvram_lock(struct tg3 *); | 1212 | static int tg3_nvram_lock(struct tg3 *); |
1213 | static void tg3_nvram_unlock(struct tg3 *); | 1213 | static void tg3_nvram_unlock(struct tg3 *); |
1214 | 1214 | ||
1215 | static void tg3_power_down_phy(struct tg3 *tp) | 1215 | static void tg3_power_down_phy(struct tg3 *tp) |
1216 | { | 1216 | { |
1217 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 1217 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { |
1218 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | 1218 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { |
1219 | u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); | 1219 | u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); |
1220 | u32 serdes_cfg = tr32(MAC_SERDES_CFG); | 1220 | u32 serdes_cfg = tr32(MAC_SERDES_CFG); |
1221 | 1221 | ||
1222 | sg_dig_ctrl |= | 1222 | sg_dig_ctrl |= |
1223 | SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; | 1223 | SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; |
1224 | tw32(SG_DIG_CTRL, sg_dig_ctrl); | 1224 | tw32(SG_DIG_CTRL, sg_dig_ctrl); |
1225 | tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); | 1225 | tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); |
1226 | } | 1226 | } |
1227 | return; | 1227 | return; |
1228 | } | 1228 | } |
1229 | 1229 | ||
1230 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 1230 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
1231 | u32 val; | 1231 | u32 val; |
1232 | 1232 | ||
1233 | tg3_bmcr_reset(tp); | 1233 | tg3_bmcr_reset(tp); |
1234 | val = tr32(GRC_MISC_CFG); | 1234 | val = tr32(GRC_MISC_CFG); |
1235 | tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); | 1235 | tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); |
1236 | udelay(40); | 1236 | udelay(40); |
1237 | return; | 1237 | return; |
1238 | } else { | 1238 | } else { |
1239 | tg3_writephy(tp, MII_TG3_EXT_CTRL, | 1239 | tg3_writephy(tp, MII_TG3_EXT_CTRL, |
1240 | MII_TG3_EXT_CTRL_FORCE_LED_OFF); | 1240 | MII_TG3_EXT_CTRL_FORCE_LED_OFF); |
1241 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); | 1241 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); |
1242 | } | 1242 | } |
1243 | 1243 | ||
1244 | /* The PHY should not be powered down on some chips because | 1244 | /* The PHY should not be powered down on some chips because |
1245 | * of bugs. | 1245 | * of bugs. |
1246 | */ | 1246 | */ |
1247 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 1247 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
1248 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 1248 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || |
1249 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && | 1249 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && |
1250 | (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) | 1250 | (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) |
1251 | return; | 1251 | return; |
1252 | tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); | 1252 | tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | 1255 | static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) |
1256 | { | 1256 | { |
1257 | u32 misc_host_ctrl; | 1257 | u32 misc_host_ctrl; |
1258 | u16 power_control, power_caps; | 1258 | u16 power_control, power_caps; |
1259 | int pm = tp->pm_cap; | 1259 | int pm = tp->pm_cap; |
1260 | 1260 | ||
1261 | /* Make sure register accesses (indirect or otherwise) | 1261 | /* Make sure register accesses (indirect or otherwise) |
1262 | * will function correctly. | 1262 | * will function correctly. |
1263 | */ | 1263 | */ |
1264 | pci_write_config_dword(tp->pdev, | 1264 | pci_write_config_dword(tp->pdev, |
1265 | TG3PCI_MISC_HOST_CTRL, | 1265 | TG3PCI_MISC_HOST_CTRL, |
1266 | tp->misc_host_ctrl); | 1266 | tp->misc_host_ctrl); |
1267 | 1267 | ||
1268 | pci_read_config_word(tp->pdev, | 1268 | pci_read_config_word(tp->pdev, |
1269 | pm + PCI_PM_CTRL, | 1269 | pm + PCI_PM_CTRL, |
1270 | &power_control); | 1270 | &power_control); |
1271 | power_control |= PCI_PM_CTRL_PME_STATUS; | 1271 | power_control |= PCI_PM_CTRL_PME_STATUS; |
1272 | power_control &= ~(PCI_PM_CTRL_STATE_MASK); | 1272 | power_control &= ~(PCI_PM_CTRL_STATE_MASK); |
1273 | switch (state) { | 1273 | switch (state) { |
1274 | case PCI_D0: | 1274 | case PCI_D0: |
1275 | power_control |= 0; | 1275 | power_control |= 0; |
1276 | pci_write_config_word(tp->pdev, | 1276 | pci_write_config_word(tp->pdev, |
1277 | pm + PCI_PM_CTRL, | 1277 | pm + PCI_PM_CTRL, |
1278 | power_control); | 1278 | power_control); |
1279 | udelay(100); /* Delay after power state change */ | 1279 | udelay(100); /* Delay after power state change */ |
1280 | 1280 | ||
1281 | /* Switch out of Vaux if it is a NIC */ | 1281 | /* Switch out of Vaux if it is a NIC */ |
1282 | if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) | 1282 | if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) |
1283 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100); | 1283 | tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100); |
1284 | 1284 | ||
1285 | return 0; | 1285 | return 0; |
1286 | 1286 | ||
1287 | case PCI_D1: | 1287 | case PCI_D1: |
1288 | power_control |= 1; | 1288 | power_control |= 1; |
1289 | break; | 1289 | break; |
1290 | 1290 | ||
1291 | case PCI_D2: | 1291 | case PCI_D2: |
1292 | power_control |= 2; | 1292 | power_control |= 2; |
1293 | break; | 1293 | break; |
1294 | 1294 | ||
1295 | case PCI_D3hot: | 1295 | case PCI_D3hot: |
1296 | power_control |= 3; | 1296 | power_control |= 3; |
1297 | break; | 1297 | break; |
1298 | 1298 | ||
1299 | default: | 1299 | default: |
1300 | printk(KERN_WARNING PFX "%s: Invalid power state (%d) " | 1300 | printk(KERN_WARNING PFX "%s: Invalid power state (%d) " |
1301 | "requested.\n", | 1301 | "requested.\n", |
1302 | tp->dev->name, state); | 1302 | tp->dev->name, state); |
1303 | return -EINVAL; | 1303 | return -EINVAL; |
1304 | }; | 1304 | }; |
1305 | 1305 | ||
1306 | power_control |= PCI_PM_CTRL_PME_ENABLE; | 1306 | power_control |= PCI_PM_CTRL_PME_ENABLE; |
1307 | 1307 | ||
1308 | misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); | 1308 | misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); |
1309 | tw32(TG3PCI_MISC_HOST_CTRL, | 1309 | tw32(TG3PCI_MISC_HOST_CTRL, |
1310 | misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); | 1310 | misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); |
1311 | 1311 | ||
1312 | if (tp->link_config.phy_is_low_power == 0) { | 1312 | if (tp->link_config.phy_is_low_power == 0) { |
1313 | tp->link_config.phy_is_low_power = 1; | 1313 | tp->link_config.phy_is_low_power = 1; |
1314 | tp->link_config.orig_speed = tp->link_config.speed; | 1314 | tp->link_config.orig_speed = tp->link_config.speed; |
1315 | tp->link_config.orig_duplex = tp->link_config.duplex; | 1315 | tp->link_config.orig_duplex = tp->link_config.duplex; |
1316 | tp->link_config.orig_autoneg = tp->link_config.autoneg; | 1316 | tp->link_config.orig_autoneg = tp->link_config.autoneg; |
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { | 1319 | if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { |
1320 | tp->link_config.speed = SPEED_10; | 1320 | tp->link_config.speed = SPEED_10; |
1321 | tp->link_config.duplex = DUPLEX_HALF; | 1321 | tp->link_config.duplex = DUPLEX_HALF; |
1322 | tp->link_config.autoneg = AUTONEG_ENABLE; | 1322 | tp->link_config.autoneg = AUTONEG_ENABLE; |
1323 | tg3_setup_phy(tp, 0); | 1323 | tg3_setup_phy(tp, 0); |
1324 | } | 1324 | } |
1325 | 1325 | ||
1326 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 1326 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
1327 | u32 val; | 1327 | u32 val; |
1328 | 1328 | ||
1329 | val = tr32(GRC_VCPU_EXT_CTRL); | 1329 | val = tr32(GRC_VCPU_EXT_CTRL); |
1330 | tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); | 1330 | tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); |
1331 | } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { | 1331 | } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { |
1332 | int i; | 1332 | int i; |
1333 | u32 val; | 1333 | u32 val; |
1334 | 1334 | ||
1335 | for (i = 0; i < 200; i++) { | 1335 | for (i = 0; i < 200; i++) { |
1336 | tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); | 1336 | tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); |
1337 | if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) | 1337 | if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) |
1338 | break; | 1338 | break; |
1339 | msleep(1); | 1339 | msleep(1); |
1340 | } | 1340 | } |
1341 | } | 1341 | } |
1342 | if (tp->tg3_flags & TG3_FLAG_WOL_CAP) | 1342 | if (tp->tg3_flags & TG3_FLAG_WOL_CAP) |
1343 | tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | | 1343 | tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | |
1344 | WOL_DRV_STATE_SHUTDOWN | | 1344 | WOL_DRV_STATE_SHUTDOWN | |
1345 | WOL_DRV_WOL | | 1345 | WOL_DRV_WOL | |
1346 | WOL_SET_MAGIC_PKT); | 1346 | WOL_SET_MAGIC_PKT); |
1347 | 1347 | ||
1348 | pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps); | 1348 | pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps); |
1349 | 1349 | ||
1350 | if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) { | 1350 | if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) { |
1351 | u32 mac_mode; | 1351 | u32 mac_mode; |
1352 | 1352 | ||
1353 | if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { | 1353 | if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { |
1354 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); | 1354 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); |
1355 | udelay(40); | 1355 | udelay(40); |
1356 | 1356 | ||
1357 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) | 1357 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) |
1358 | mac_mode = MAC_MODE_PORT_MODE_GMII; | 1358 | mac_mode = MAC_MODE_PORT_MODE_GMII; |
1359 | else | 1359 | else |
1360 | mac_mode = MAC_MODE_PORT_MODE_MII; | 1360 | mac_mode = MAC_MODE_PORT_MODE_MII; |
1361 | 1361 | ||
1362 | mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; | 1362 | mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; |
1363 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 1363 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == |
1364 | ASIC_REV_5700) { | 1364 | ASIC_REV_5700) { |
1365 | u32 speed = (tp->tg3_flags & | 1365 | u32 speed = (tp->tg3_flags & |
1366 | TG3_FLAG_WOL_SPEED_100MB) ? | 1366 | TG3_FLAG_WOL_SPEED_100MB) ? |
1367 | SPEED_100 : SPEED_10; | 1367 | SPEED_100 : SPEED_10; |
1368 | if (tg3_5700_link_polarity(tp, speed)) | 1368 | if (tg3_5700_link_polarity(tp, speed)) |
1369 | mac_mode |= MAC_MODE_LINK_POLARITY; | 1369 | mac_mode |= MAC_MODE_LINK_POLARITY; |
1370 | else | 1370 | else |
1371 | mac_mode &= ~MAC_MODE_LINK_POLARITY; | 1371 | mac_mode &= ~MAC_MODE_LINK_POLARITY; |
1372 | } | 1372 | } |
1373 | } else { | 1373 | } else { |
1374 | mac_mode = MAC_MODE_PORT_MODE_TBI; | 1374 | mac_mode = MAC_MODE_PORT_MODE_TBI; |
1375 | } | 1375 | } |
1376 | 1376 | ||
1377 | if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) | 1377 | if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) |
1378 | tw32(MAC_LED_CTRL, tp->led_ctrl); | 1378 | tw32(MAC_LED_CTRL, tp->led_ctrl); |
1379 | 1379 | ||
1380 | if (((power_caps & PCI_PM_CAP_PME_D3cold) && | 1380 | if (((power_caps & PCI_PM_CAP_PME_D3cold) && |
1381 | (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))) | 1381 | (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))) |
1382 | mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; | 1382 | mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; |
1383 | 1383 | ||
1384 | tw32_f(MAC_MODE, mac_mode); | 1384 | tw32_f(MAC_MODE, mac_mode); |
1385 | udelay(100); | 1385 | udelay(100); |
1386 | 1386 | ||
1387 | tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); | 1387 | tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); |
1388 | udelay(10); | 1388 | udelay(10); |
1389 | } | 1389 | } |
1390 | 1390 | ||
1391 | if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && | 1391 | if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && |
1392 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 1392 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
1393 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { | 1393 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { |
1394 | u32 base_val; | 1394 | u32 base_val; |
1395 | 1395 | ||
1396 | base_val = tp->pci_clock_ctrl; | 1396 | base_val = tp->pci_clock_ctrl; |
1397 | base_val |= (CLOCK_CTRL_RXCLK_DISABLE | | 1397 | base_val |= (CLOCK_CTRL_RXCLK_DISABLE | |
1398 | CLOCK_CTRL_TXCLK_DISABLE); | 1398 | CLOCK_CTRL_TXCLK_DISABLE); |
1399 | 1399 | ||
1400 | tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | | 1400 | tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | |
1401 | CLOCK_CTRL_PWRDOWN_PLL133, 40); | 1401 | CLOCK_CTRL_PWRDOWN_PLL133, 40); |
1402 | } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || | 1402 | } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || |
1403 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { | 1403 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { |
1404 | /* do nothing */ | 1404 | /* do nothing */ |
1405 | } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 1405 | } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
1406 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { | 1406 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { |
1407 | u32 newbits1, newbits2; | 1407 | u32 newbits1, newbits2; |
1408 | 1408 | ||
1409 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 1409 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
1410 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 1410 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { |
1411 | newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | | 1411 | newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | |
1412 | CLOCK_CTRL_TXCLK_DISABLE | | 1412 | CLOCK_CTRL_TXCLK_DISABLE | |
1413 | CLOCK_CTRL_ALTCLK); | 1413 | CLOCK_CTRL_ALTCLK); |
1414 | newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; | 1414 | newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; |
1415 | } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 1415 | } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
1416 | newbits1 = CLOCK_CTRL_625_CORE; | 1416 | newbits1 = CLOCK_CTRL_625_CORE; |
1417 | newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; | 1417 | newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; |
1418 | } else { | 1418 | } else { |
1419 | newbits1 = CLOCK_CTRL_ALTCLK; | 1419 | newbits1 = CLOCK_CTRL_ALTCLK; |
1420 | newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; | 1420 | newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, | 1423 | tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, |
1424 | 40); | 1424 | 40); |
1425 | 1425 | ||
1426 | tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, | 1426 | tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, |
1427 | 40); | 1427 | 40); |
1428 | 1428 | ||
1429 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 1429 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
1430 | u32 newbits3; | 1430 | u32 newbits3; |
1431 | 1431 | ||
1432 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 1432 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
1433 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 1433 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { |
1434 | newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | | 1434 | newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | |
1435 | CLOCK_CTRL_TXCLK_DISABLE | | 1435 | CLOCK_CTRL_TXCLK_DISABLE | |
1436 | CLOCK_CTRL_44MHZ_CORE); | 1436 | CLOCK_CTRL_44MHZ_CORE); |
1437 | } else { | 1437 | } else { |
1438 | newbits3 = CLOCK_CTRL_44MHZ_CORE; | 1438 | newbits3 = CLOCK_CTRL_44MHZ_CORE; |
1439 | } | 1439 | } |
1440 | 1440 | ||
1441 | tw32_wait_f(TG3PCI_CLOCK_CTRL, | 1441 | tw32_wait_f(TG3PCI_CLOCK_CTRL, |
1442 | tp->pci_clock_ctrl | newbits3, 40); | 1442 | tp->pci_clock_ctrl | newbits3, 40); |
1443 | } | 1443 | } |
1444 | } | 1444 | } |
1445 | 1445 | ||
1446 | if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && | 1446 | if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && |
1447 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | 1447 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) |
1448 | tg3_power_down_phy(tp); | 1448 | tg3_power_down_phy(tp); |
1449 | 1449 | ||
1450 | tg3_frob_aux_power(tp); | 1450 | tg3_frob_aux_power(tp); |
1451 | 1451 | ||
1452 | /* Workaround for unstable PLL clock */ | 1452 | /* Workaround for unstable PLL clock */ |
1453 | if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || | 1453 | if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || |
1454 | (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { | 1454 | (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { |
1455 | u32 val = tr32(0x7d00); | 1455 | u32 val = tr32(0x7d00); |
1456 | 1456 | ||
1457 | val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); | 1457 | val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); |
1458 | tw32(0x7d00, val); | 1458 | tw32(0x7d00, val); |
1459 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { | 1459 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { |
1460 | int err; | 1460 | int err; |
1461 | 1461 | ||
1462 | err = tg3_nvram_lock(tp); | 1462 | err = tg3_nvram_lock(tp); |
1463 | tg3_halt_cpu(tp, RX_CPU_BASE); | 1463 | tg3_halt_cpu(tp, RX_CPU_BASE); |
1464 | if (!err) | 1464 | if (!err) |
1465 | tg3_nvram_unlock(tp); | 1465 | tg3_nvram_unlock(tp); |
1466 | } | 1466 | } |
1467 | } | 1467 | } |
1468 | 1468 | ||
1469 | tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); | 1469 | tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); |
1470 | 1470 | ||
1471 | /* Finally, set the new power state. */ | 1471 | /* Finally, set the new power state. */ |
1472 | pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control); | 1472 | pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control); |
1473 | udelay(100); /* Delay after power state change */ | 1473 | udelay(100); /* Delay after power state change */ |
1474 | 1474 | ||
1475 | return 0; | 1475 | return 0; |
1476 | } | 1476 | } |
1477 | 1477 | ||
1478 | static void tg3_link_report(struct tg3 *tp) | 1478 | static void tg3_link_report(struct tg3 *tp) |
1479 | { | 1479 | { |
1480 | if (!netif_carrier_ok(tp->dev)) { | 1480 | if (!netif_carrier_ok(tp->dev)) { |
1481 | if (netif_msg_link(tp)) | 1481 | if (netif_msg_link(tp)) |
1482 | printk(KERN_INFO PFX "%s: Link is down.\n", | 1482 | printk(KERN_INFO PFX "%s: Link is down.\n", |
1483 | tp->dev->name); | 1483 | tp->dev->name); |
1484 | } else if (netif_msg_link(tp)) { | 1484 | } else if (netif_msg_link(tp)) { |
1485 | printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", | 1485 | printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", |
1486 | tp->dev->name, | 1486 | tp->dev->name, |
1487 | (tp->link_config.active_speed == SPEED_1000 ? | 1487 | (tp->link_config.active_speed == SPEED_1000 ? |
1488 | 1000 : | 1488 | 1000 : |
1489 | (tp->link_config.active_speed == SPEED_100 ? | 1489 | (tp->link_config.active_speed == SPEED_100 ? |
1490 | 100 : 10)), | 1490 | 100 : 10)), |
1491 | (tp->link_config.active_duplex == DUPLEX_FULL ? | 1491 | (tp->link_config.active_duplex == DUPLEX_FULL ? |
1492 | "full" : "half")); | 1492 | "full" : "half")); |
1493 | 1493 | ||
1494 | printk(KERN_INFO PFX "%s: Flow control is %s for TX and " | 1494 | printk(KERN_INFO PFX "%s: Flow control is %s for TX and " |
1495 | "%s for RX.\n", | 1495 | "%s for RX.\n", |
1496 | tp->dev->name, | 1496 | tp->dev->name, |
1497 | (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off", | 1497 | (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off", |
1498 | (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off"); | 1498 | (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off"); |
1499 | } | 1499 | } |
1500 | } | 1500 | } |
1501 | 1501 | ||
1502 | static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv) | 1502 | static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv) |
1503 | { | 1503 | { |
1504 | u32 new_tg3_flags = 0; | 1504 | u32 new_tg3_flags = 0; |
1505 | u32 old_rx_mode = tp->rx_mode; | 1505 | u32 old_rx_mode = tp->rx_mode; |
1506 | u32 old_tx_mode = tp->tx_mode; | 1506 | u32 old_tx_mode = tp->tx_mode; |
1507 | 1507 | ||
1508 | if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) { | 1508 | if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) { |
1509 | 1509 | ||
1510 | /* Convert 1000BaseX flow control bits to 1000BaseT | 1510 | /* Convert 1000BaseX flow control bits to 1000BaseT |
1511 | * bits before resolving flow control. | 1511 | * bits before resolving flow control. |
1512 | */ | 1512 | */ |
1513 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { | 1513 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { |
1514 | local_adv &= ~(ADVERTISE_PAUSE_CAP | | 1514 | local_adv &= ~(ADVERTISE_PAUSE_CAP | |
1515 | ADVERTISE_PAUSE_ASYM); | 1515 | ADVERTISE_PAUSE_ASYM); |
1516 | remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM); | 1516 | remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM); |
1517 | 1517 | ||
1518 | if (local_adv & ADVERTISE_1000XPAUSE) | 1518 | if (local_adv & ADVERTISE_1000XPAUSE) |
1519 | local_adv |= ADVERTISE_PAUSE_CAP; | 1519 | local_adv |= ADVERTISE_PAUSE_CAP; |
1520 | if (local_adv & ADVERTISE_1000XPSE_ASYM) | 1520 | if (local_adv & ADVERTISE_1000XPSE_ASYM) |
1521 | local_adv |= ADVERTISE_PAUSE_ASYM; | 1521 | local_adv |= ADVERTISE_PAUSE_ASYM; |
1522 | if (remote_adv & LPA_1000XPAUSE) | 1522 | if (remote_adv & LPA_1000XPAUSE) |
1523 | remote_adv |= LPA_PAUSE_CAP; | 1523 | remote_adv |= LPA_PAUSE_CAP; |
1524 | if (remote_adv & LPA_1000XPAUSE_ASYM) | 1524 | if (remote_adv & LPA_1000XPAUSE_ASYM) |
1525 | remote_adv |= LPA_PAUSE_ASYM; | 1525 | remote_adv |= LPA_PAUSE_ASYM; |
1526 | } | 1526 | } |
1527 | 1527 | ||
1528 | if (local_adv & ADVERTISE_PAUSE_CAP) { | 1528 | if (local_adv & ADVERTISE_PAUSE_CAP) { |
1529 | if (local_adv & ADVERTISE_PAUSE_ASYM) { | 1529 | if (local_adv & ADVERTISE_PAUSE_ASYM) { |
1530 | if (remote_adv & LPA_PAUSE_CAP) | 1530 | if (remote_adv & LPA_PAUSE_CAP) |
1531 | new_tg3_flags |= | 1531 | new_tg3_flags |= |
1532 | (TG3_FLAG_RX_PAUSE | | 1532 | (TG3_FLAG_RX_PAUSE | |
1533 | TG3_FLAG_TX_PAUSE); | 1533 | TG3_FLAG_TX_PAUSE); |
1534 | else if (remote_adv & LPA_PAUSE_ASYM) | 1534 | else if (remote_adv & LPA_PAUSE_ASYM) |
1535 | new_tg3_flags |= | 1535 | new_tg3_flags |= |
1536 | (TG3_FLAG_RX_PAUSE); | 1536 | (TG3_FLAG_RX_PAUSE); |
1537 | } else { | 1537 | } else { |
1538 | if (remote_adv & LPA_PAUSE_CAP) | 1538 | if (remote_adv & LPA_PAUSE_CAP) |
1539 | new_tg3_flags |= | 1539 | new_tg3_flags |= |
1540 | (TG3_FLAG_RX_PAUSE | | 1540 | (TG3_FLAG_RX_PAUSE | |
1541 | TG3_FLAG_TX_PAUSE); | 1541 | TG3_FLAG_TX_PAUSE); |
1542 | } | 1542 | } |
1543 | } else if (local_adv & ADVERTISE_PAUSE_ASYM) { | 1543 | } else if (local_adv & ADVERTISE_PAUSE_ASYM) { |
1544 | if ((remote_adv & LPA_PAUSE_CAP) && | 1544 | if ((remote_adv & LPA_PAUSE_CAP) && |
1545 | (remote_adv & LPA_PAUSE_ASYM)) | 1545 | (remote_adv & LPA_PAUSE_ASYM)) |
1546 | new_tg3_flags |= TG3_FLAG_TX_PAUSE; | 1546 | new_tg3_flags |= TG3_FLAG_TX_PAUSE; |
1547 | } | 1547 | } |
1548 | 1548 | ||
1549 | tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE); | 1549 | tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE); |
1550 | tp->tg3_flags |= new_tg3_flags; | 1550 | tp->tg3_flags |= new_tg3_flags; |
1551 | } else { | 1551 | } else { |
1552 | new_tg3_flags = tp->tg3_flags; | 1552 | new_tg3_flags = tp->tg3_flags; |
1553 | } | 1553 | } |
1554 | 1554 | ||
1555 | if (new_tg3_flags & TG3_FLAG_RX_PAUSE) | 1555 | if (new_tg3_flags & TG3_FLAG_RX_PAUSE) |
1556 | tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; | 1556 | tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; |
1557 | else | 1557 | else |
1558 | tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; | 1558 | tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; |
1559 | 1559 | ||
1560 | if (old_rx_mode != tp->rx_mode) { | 1560 | if (old_rx_mode != tp->rx_mode) { |
1561 | tw32_f(MAC_RX_MODE, tp->rx_mode); | 1561 | tw32_f(MAC_RX_MODE, tp->rx_mode); |
1562 | } | 1562 | } |
1563 | 1563 | ||
1564 | if (new_tg3_flags & TG3_FLAG_TX_PAUSE) | 1564 | if (new_tg3_flags & TG3_FLAG_TX_PAUSE) |
1565 | tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; | 1565 | tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; |
1566 | else | 1566 | else |
1567 | tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; | 1567 | tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; |
1568 | 1568 | ||
1569 | if (old_tx_mode != tp->tx_mode) { | 1569 | if (old_tx_mode != tp->tx_mode) { |
1570 | tw32_f(MAC_TX_MODE, tp->tx_mode); | 1570 | tw32_f(MAC_TX_MODE, tp->tx_mode); |
1571 | } | 1571 | } |
1572 | } | 1572 | } |
1573 | 1573 | ||
1574 | static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) | 1574 | static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) |
1575 | { | 1575 | { |
1576 | switch (val & MII_TG3_AUX_STAT_SPDMASK) { | 1576 | switch (val & MII_TG3_AUX_STAT_SPDMASK) { |
1577 | case MII_TG3_AUX_STAT_10HALF: | 1577 | case MII_TG3_AUX_STAT_10HALF: |
1578 | *speed = SPEED_10; | 1578 | *speed = SPEED_10; |
1579 | *duplex = DUPLEX_HALF; | 1579 | *duplex = DUPLEX_HALF; |
1580 | break; | 1580 | break; |
1581 | 1581 | ||
1582 | case MII_TG3_AUX_STAT_10FULL: | 1582 | case MII_TG3_AUX_STAT_10FULL: |
1583 | *speed = SPEED_10; | 1583 | *speed = SPEED_10; |
1584 | *duplex = DUPLEX_FULL; | 1584 | *duplex = DUPLEX_FULL; |
1585 | break; | 1585 | break; |
1586 | 1586 | ||
1587 | case MII_TG3_AUX_STAT_100HALF: | 1587 | case MII_TG3_AUX_STAT_100HALF: |
1588 | *speed = SPEED_100; | 1588 | *speed = SPEED_100; |
1589 | *duplex = DUPLEX_HALF; | 1589 | *duplex = DUPLEX_HALF; |
1590 | break; | 1590 | break; |
1591 | 1591 | ||
1592 | case MII_TG3_AUX_STAT_100FULL: | 1592 | case MII_TG3_AUX_STAT_100FULL: |
1593 | *speed = SPEED_100; | 1593 | *speed = SPEED_100; |
1594 | *duplex = DUPLEX_FULL; | 1594 | *duplex = DUPLEX_FULL; |
1595 | break; | 1595 | break; |
1596 | 1596 | ||
1597 | case MII_TG3_AUX_STAT_1000HALF: | 1597 | case MII_TG3_AUX_STAT_1000HALF: |
1598 | *speed = SPEED_1000; | 1598 | *speed = SPEED_1000; |
1599 | *duplex = DUPLEX_HALF; | 1599 | *duplex = DUPLEX_HALF; |
1600 | break; | 1600 | break; |
1601 | 1601 | ||
1602 | case MII_TG3_AUX_STAT_1000FULL: | 1602 | case MII_TG3_AUX_STAT_1000FULL: |
1603 | *speed = SPEED_1000; | 1603 | *speed = SPEED_1000; |
1604 | *duplex = DUPLEX_FULL; | 1604 | *duplex = DUPLEX_FULL; |
1605 | break; | 1605 | break; |
1606 | 1606 | ||
1607 | default: | 1607 | default: |
1608 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 1608 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
1609 | *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : | 1609 | *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : |
1610 | SPEED_10; | 1610 | SPEED_10; |
1611 | *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : | 1611 | *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : |
1612 | DUPLEX_HALF; | 1612 | DUPLEX_HALF; |
1613 | break; | 1613 | break; |
1614 | } | 1614 | } |
1615 | *speed = SPEED_INVALID; | 1615 | *speed = SPEED_INVALID; |
1616 | *duplex = DUPLEX_INVALID; | 1616 | *duplex = DUPLEX_INVALID; |
1617 | break; | 1617 | break; |
1618 | }; | 1618 | }; |
1619 | } | 1619 | } |
1620 | 1620 | ||
1621 | static void tg3_phy_copper_begin(struct tg3 *tp) | 1621 | static void tg3_phy_copper_begin(struct tg3 *tp) |
1622 | { | 1622 | { |
1623 | u32 new_adv; | 1623 | u32 new_adv; |
1624 | int i; | 1624 | int i; |
1625 | 1625 | ||
1626 | if (tp->link_config.phy_is_low_power) { | 1626 | if (tp->link_config.phy_is_low_power) { |
1627 | /* Entering low power mode. Disable gigabit and | 1627 | /* Entering low power mode. Disable gigabit and |
1628 | * 100baseT advertisements. | 1628 | * 100baseT advertisements. |
1629 | */ | 1629 | */ |
1630 | tg3_writephy(tp, MII_TG3_CTRL, 0); | 1630 | tg3_writephy(tp, MII_TG3_CTRL, 0); |
1631 | 1631 | ||
1632 | new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL | | 1632 | new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL | |
1633 | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); | 1633 | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); |
1634 | if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) | 1634 | if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) |
1635 | new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL); | 1635 | new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL); |
1636 | 1636 | ||
1637 | tg3_writephy(tp, MII_ADVERTISE, new_adv); | 1637 | tg3_writephy(tp, MII_ADVERTISE, new_adv); |
1638 | } else if (tp->link_config.speed == SPEED_INVALID) { | 1638 | } else if (tp->link_config.speed == SPEED_INVALID) { |
1639 | if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) | 1639 | if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) |
1640 | tp->link_config.advertising &= | 1640 | tp->link_config.advertising &= |
1641 | ~(ADVERTISED_1000baseT_Half | | 1641 | ~(ADVERTISED_1000baseT_Half | |
1642 | ADVERTISED_1000baseT_Full); | 1642 | ADVERTISED_1000baseT_Full); |
1643 | 1643 | ||
1644 | new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); | 1644 | new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); |
1645 | if (tp->link_config.advertising & ADVERTISED_10baseT_Half) | 1645 | if (tp->link_config.advertising & ADVERTISED_10baseT_Half) |
1646 | new_adv |= ADVERTISE_10HALF; | 1646 | new_adv |= ADVERTISE_10HALF; |
1647 | if (tp->link_config.advertising & ADVERTISED_10baseT_Full) | 1647 | if (tp->link_config.advertising & ADVERTISED_10baseT_Full) |
1648 | new_adv |= ADVERTISE_10FULL; | 1648 | new_adv |= ADVERTISE_10FULL; |
1649 | if (tp->link_config.advertising & ADVERTISED_100baseT_Half) | 1649 | if (tp->link_config.advertising & ADVERTISED_100baseT_Half) |
1650 | new_adv |= ADVERTISE_100HALF; | 1650 | new_adv |= ADVERTISE_100HALF; |
1651 | if (tp->link_config.advertising & ADVERTISED_100baseT_Full) | 1651 | if (tp->link_config.advertising & ADVERTISED_100baseT_Full) |
1652 | new_adv |= ADVERTISE_100FULL; | 1652 | new_adv |= ADVERTISE_100FULL; |
1653 | tg3_writephy(tp, MII_ADVERTISE, new_adv); | 1653 | tg3_writephy(tp, MII_ADVERTISE, new_adv); |
1654 | 1654 | ||
1655 | if (tp->link_config.advertising & | 1655 | if (tp->link_config.advertising & |
1656 | (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { | 1656 | (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { |
1657 | new_adv = 0; | 1657 | new_adv = 0; |
1658 | if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) | 1658 | if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) |
1659 | new_adv |= MII_TG3_CTRL_ADV_1000_HALF; | 1659 | new_adv |= MII_TG3_CTRL_ADV_1000_HALF; |
1660 | if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) | 1660 | if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) |
1661 | new_adv |= MII_TG3_CTRL_ADV_1000_FULL; | 1661 | new_adv |= MII_TG3_CTRL_ADV_1000_FULL; |
1662 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) && | 1662 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) && |
1663 | (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 1663 | (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || |
1664 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) | 1664 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) |
1665 | new_adv |= (MII_TG3_CTRL_AS_MASTER | | 1665 | new_adv |= (MII_TG3_CTRL_AS_MASTER | |
1666 | MII_TG3_CTRL_ENABLE_AS_MASTER); | 1666 | MII_TG3_CTRL_ENABLE_AS_MASTER); |
1667 | tg3_writephy(tp, MII_TG3_CTRL, new_adv); | 1667 | tg3_writephy(tp, MII_TG3_CTRL, new_adv); |
1668 | } else { | 1668 | } else { |
1669 | tg3_writephy(tp, MII_TG3_CTRL, 0); | 1669 | tg3_writephy(tp, MII_TG3_CTRL, 0); |
1670 | } | 1670 | } |
1671 | } else { | 1671 | } else { |
1672 | /* Asking for a specific link mode. */ | 1672 | /* Asking for a specific link mode. */ |
1673 | if (tp->link_config.speed == SPEED_1000) { | 1673 | if (tp->link_config.speed == SPEED_1000) { |
1674 | new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; | 1674 | new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; |
1675 | tg3_writephy(tp, MII_ADVERTISE, new_adv); | 1675 | tg3_writephy(tp, MII_ADVERTISE, new_adv); |
1676 | 1676 | ||
1677 | if (tp->link_config.duplex == DUPLEX_FULL) | 1677 | if (tp->link_config.duplex == DUPLEX_FULL) |
1678 | new_adv = MII_TG3_CTRL_ADV_1000_FULL; | 1678 | new_adv = MII_TG3_CTRL_ADV_1000_FULL; |
1679 | else | 1679 | else |
1680 | new_adv = MII_TG3_CTRL_ADV_1000_HALF; | 1680 | new_adv = MII_TG3_CTRL_ADV_1000_HALF; |
1681 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 1681 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || |
1682 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) | 1682 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) |
1683 | new_adv |= (MII_TG3_CTRL_AS_MASTER | | 1683 | new_adv |= (MII_TG3_CTRL_AS_MASTER | |
1684 | MII_TG3_CTRL_ENABLE_AS_MASTER); | 1684 | MII_TG3_CTRL_ENABLE_AS_MASTER); |
1685 | tg3_writephy(tp, MII_TG3_CTRL, new_adv); | 1685 | tg3_writephy(tp, MII_TG3_CTRL, new_adv); |
1686 | } else { | 1686 | } else { |
1687 | tg3_writephy(tp, MII_TG3_CTRL, 0); | 1687 | tg3_writephy(tp, MII_TG3_CTRL, 0); |
1688 | 1688 | ||
1689 | new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; | 1689 | new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; |
1690 | if (tp->link_config.speed == SPEED_100) { | 1690 | if (tp->link_config.speed == SPEED_100) { |
1691 | if (tp->link_config.duplex == DUPLEX_FULL) | 1691 | if (tp->link_config.duplex == DUPLEX_FULL) |
1692 | new_adv |= ADVERTISE_100FULL; | 1692 | new_adv |= ADVERTISE_100FULL; |
1693 | else | 1693 | else |
1694 | new_adv |= ADVERTISE_100HALF; | 1694 | new_adv |= ADVERTISE_100HALF; |
1695 | } else { | 1695 | } else { |
1696 | if (tp->link_config.duplex == DUPLEX_FULL) | 1696 | if (tp->link_config.duplex == DUPLEX_FULL) |
1697 | new_adv |= ADVERTISE_10FULL; | 1697 | new_adv |= ADVERTISE_10FULL; |
1698 | else | 1698 | else |
1699 | new_adv |= ADVERTISE_10HALF; | 1699 | new_adv |= ADVERTISE_10HALF; |
1700 | } | 1700 | } |
1701 | tg3_writephy(tp, MII_ADVERTISE, new_adv); | 1701 | tg3_writephy(tp, MII_ADVERTISE, new_adv); |
1702 | } | 1702 | } |
1703 | } | 1703 | } |
1704 | 1704 | ||
1705 | if (tp->link_config.autoneg == AUTONEG_DISABLE && | 1705 | if (tp->link_config.autoneg == AUTONEG_DISABLE && |
1706 | tp->link_config.speed != SPEED_INVALID) { | 1706 | tp->link_config.speed != SPEED_INVALID) { |
1707 | u32 bmcr, orig_bmcr; | 1707 | u32 bmcr, orig_bmcr; |
1708 | 1708 | ||
1709 | tp->link_config.active_speed = tp->link_config.speed; | 1709 | tp->link_config.active_speed = tp->link_config.speed; |
1710 | tp->link_config.active_duplex = tp->link_config.duplex; | 1710 | tp->link_config.active_duplex = tp->link_config.duplex; |
1711 | 1711 | ||
1712 | bmcr = 0; | 1712 | bmcr = 0; |
1713 | switch (tp->link_config.speed) { | 1713 | switch (tp->link_config.speed) { |
1714 | default: | 1714 | default: |
1715 | case SPEED_10: | 1715 | case SPEED_10: |
1716 | break; | 1716 | break; |
1717 | 1717 | ||
1718 | case SPEED_100: | 1718 | case SPEED_100: |
1719 | bmcr |= BMCR_SPEED100; | 1719 | bmcr |= BMCR_SPEED100; |
1720 | break; | 1720 | break; |
1721 | 1721 | ||
1722 | case SPEED_1000: | 1722 | case SPEED_1000: |
1723 | bmcr |= TG3_BMCR_SPEED1000; | 1723 | bmcr |= TG3_BMCR_SPEED1000; |
1724 | break; | 1724 | break; |
1725 | }; | 1725 | }; |
1726 | 1726 | ||
1727 | if (tp->link_config.duplex == DUPLEX_FULL) | 1727 | if (tp->link_config.duplex == DUPLEX_FULL) |
1728 | bmcr |= BMCR_FULLDPLX; | 1728 | bmcr |= BMCR_FULLDPLX; |
1729 | 1729 | ||
1730 | if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && | 1730 | if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && |
1731 | (bmcr != orig_bmcr)) { | 1731 | (bmcr != orig_bmcr)) { |
1732 | tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); | 1732 | tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); |
1733 | for (i = 0; i < 1500; i++) { | 1733 | for (i = 0; i < 1500; i++) { |
1734 | u32 tmp; | 1734 | u32 tmp; |
1735 | 1735 | ||
1736 | udelay(10); | 1736 | udelay(10); |
1737 | if (tg3_readphy(tp, MII_BMSR, &tmp) || | 1737 | if (tg3_readphy(tp, MII_BMSR, &tmp) || |
1738 | tg3_readphy(tp, MII_BMSR, &tmp)) | 1738 | tg3_readphy(tp, MII_BMSR, &tmp)) |
1739 | continue; | 1739 | continue; |
1740 | if (!(tmp & BMSR_LSTATUS)) { | 1740 | if (!(tmp & BMSR_LSTATUS)) { |
1741 | udelay(40); | 1741 | udelay(40); |
1742 | break; | 1742 | break; |
1743 | } | 1743 | } |
1744 | } | 1744 | } |
1745 | tg3_writephy(tp, MII_BMCR, bmcr); | 1745 | tg3_writephy(tp, MII_BMCR, bmcr); |
1746 | udelay(40); | 1746 | udelay(40); |
1747 | } | 1747 | } |
1748 | } else { | 1748 | } else { |
1749 | tg3_writephy(tp, MII_BMCR, | 1749 | tg3_writephy(tp, MII_BMCR, |
1750 | BMCR_ANENABLE | BMCR_ANRESTART); | 1750 | BMCR_ANENABLE | BMCR_ANRESTART); |
1751 | } | 1751 | } |
1752 | } | 1752 | } |
1753 | 1753 | ||
1754 | static int tg3_init_5401phy_dsp(struct tg3 *tp) | 1754 | static int tg3_init_5401phy_dsp(struct tg3 *tp) |
1755 | { | 1755 | { |
1756 | int err; | 1756 | int err; |
1757 | 1757 | ||
1758 | /* Turn off tap power management. */ | 1758 | /* Turn off tap power management. */ |
1759 | /* Set Extended packet length bit */ | 1759 | /* Set Extended packet length bit */ |
1760 | err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); | 1760 | err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); |
1761 | 1761 | ||
1762 | err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012); | 1762 | err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012); |
1763 | err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804); | 1763 | err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804); |
1764 | 1764 | ||
1765 | err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013); | 1765 | err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013); |
1766 | err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204); | 1766 | err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204); |
1767 | 1767 | ||
1768 | err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); | 1768 | err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); |
1769 | err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132); | 1769 | err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132); |
1770 | 1770 | ||
1771 | err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); | 1771 | err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); |
1772 | err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232); | 1772 | err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232); |
1773 | 1773 | ||
1774 | err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); | 1774 | err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); |
1775 | err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20); | 1775 | err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20); |
1776 | 1776 | ||
1777 | udelay(40); | 1777 | udelay(40); |
1778 | 1778 | ||
1779 | return err; | 1779 | return err; |
1780 | } | 1780 | } |
1781 | 1781 | ||
1782 | static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) | 1782 | static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) |
1783 | { | 1783 | { |
1784 | u32 adv_reg, all_mask = 0; | 1784 | u32 adv_reg, all_mask = 0; |
1785 | 1785 | ||
1786 | if (mask & ADVERTISED_10baseT_Half) | 1786 | if (mask & ADVERTISED_10baseT_Half) |
1787 | all_mask |= ADVERTISE_10HALF; | 1787 | all_mask |= ADVERTISE_10HALF; |
1788 | if (mask & ADVERTISED_10baseT_Full) | 1788 | if (mask & ADVERTISED_10baseT_Full) |
1789 | all_mask |= ADVERTISE_10FULL; | 1789 | all_mask |= ADVERTISE_10FULL; |
1790 | if (mask & ADVERTISED_100baseT_Half) | 1790 | if (mask & ADVERTISED_100baseT_Half) |
1791 | all_mask |= ADVERTISE_100HALF; | 1791 | all_mask |= ADVERTISE_100HALF; |
1792 | if (mask & ADVERTISED_100baseT_Full) | 1792 | if (mask & ADVERTISED_100baseT_Full) |
1793 | all_mask |= ADVERTISE_100FULL; | 1793 | all_mask |= ADVERTISE_100FULL; |
1794 | 1794 | ||
1795 | if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) | 1795 | if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) |
1796 | return 0; | 1796 | return 0; |
1797 | 1797 | ||
1798 | if ((adv_reg & all_mask) != all_mask) | 1798 | if ((adv_reg & all_mask) != all_mask) |
1799 | return 0; | 1799 | return 0; |
1800 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { | 1800 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { |
1801 | u32 tg3_ctrl; | 1801 | u32 tg3_ctrl; |
1802 | 1802 | ||
1803 | all_mask = 0; | 1803 | all_mask = 0; |
1804 | if (mask & ADVERTISED_1000baseT_Half) | 1804 | if (mask & ADVERTISED_1000baseT_Half) |
1805 | all_mask |= ADVERTISE_1000HALF; | 1805 | all_mask |= ADVERTISE_1000HALF; |
1806 | if (mask & ADVERTISED_1000baseT_Full) | 1806 | if (mask & ADVERTISED_1000baseT_Full) |
1807 | all_mask |= ADVERTISE_1000FULL; | 1807 | all_mask |= ADVERTISE_1000FULL; |
1808 | 1808 | ||
1809 | if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl)) | 1809 | if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl)) |
1810 | return 0; | 1810 | return 0; |
1811 | 1811 | ||
1812 | if ((tg3_ctrl & all_mask) != all_mask) | 1812 | if ((tg3_ctrl & all_mask) != all_mask) |
1813 | return 0; | 1813 | return 0; |
1814 | } | 1814 | } |
1815 | return 1; | 1815 | return 1; |
1816 | } | 1816 | } |
1817 | 1817 | ||
1818 | static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) | 1818 | static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) |
1819 | { | 1819 | { |
1820 | int current_link_up; | 1820 | int current_link_up; |
1821 | u32 bmsr, dummy; | 1821 | u32 bmsr, dummy; |
1822 | u16 current_speed; | 1822 | u16 current_speed; |
1823 | u8 current_duplex; | 1823 | u8 current_duplex; |
1824 | int i, err; | 1824 | int i, err; |
1825 | 1825 | ||
1826 | tw32(MAC_EVENT, 0); | 1826 | tw32(MAC_EVENT, 0); |
1827 | 1827 | ||
1828 | tw32_f(MAC_STATUS, | 1828 | tw32_f(MAC_STATUS, |
1829 | (MAC_STATUS_SYNC_CHANGED | | 1829 | (MAC_STATUS_SYNC_CHANGED | |
1830 | MAC_STATUS_CFG_CHANGED | | 1830 | MAC_STATUS_CFG_CHANGED | |
1831 | MAC_STATUS_MI_COMPLETION | | 1831 | MAC_STATUS_MI_COMPLETION | |
1832 | MAC_STATUS_LNKSTATE_CHANGED)); | 1832 | MAC_STATUS_LNKSTATE_CHANGED)); |
1833 | udelay(40); | 1833 | udelay(40); |
1834 | 1834 | ||
1835 | tp->mi_mode = MAC_MI_MODE_BASE; | 1835 | tp->mi_mode = MAC_MI_MODE_BASE; |
1836 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 1836 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
1837 | udelay(80); | 1837 | udelay(80); |
1838 | 1838 | ||
1839 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); | 1839 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); |
1840 | 1840 | ||
1841 | /* Some third-party PHYs need to be reset on link going | 1841 | /* Some third-party PHYs need to be reset on link going |
1842 | * down. | 1842 | * down. |
1843 | */ | 1843 | */ |
1844 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 1844 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || |
1845 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 1845 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || |
1846 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && | 1846 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && |
1847 | netif_carrier_ok(tp->dev)) { | 1847 | netif_carrier_ok(tp->dev)) { |
1848 | tg3_readphy(tp, MII_BMSR, &bmsr); | 1848 | tg3_readphy(tp, MII_BMSR, &bmsr); |
1849 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && | 1849 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && |
1850 | !(bmsr & BMSR_LSTATUS)) | 1850 | !(bmsr & BMSR_LSTATUS)) |
1851 | force_reset = 1; | 1851 | force_reset = 1; |
1852 | } | 1852 | } |
1853 | if (force_reset) | 1853 | if (force_reset) |
1854 | tg3_phy_reset(tp); | 1854 | tg3_phy_reset(tp); |
1855 | 1855 | ||
1856 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { | 1856 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { |
1857 | tg3_readphy(tp, MII_BMSR, &bmsr); | 1857 | tg3_readphy(tp, MII_BMSR, &bmsr); |
1858 | if (tg3_readphy(tp, MII_BMSR, &bmsr) || | 1858 | if (tg3_readphy(tp, MII_BMSR, &bmsr) || |
1859 | !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) | 1859 | !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) |
1860 | bmsr = 0; | 1860 | bmsr = 0; |
1861 | 1861 | ||
1862 | if (!(bmsr & BMSR_LSTATUS)) { | 1862 | if (!(bmsr & BMSR_LSTATUS)) { |
1863 | err = tg3_init_5401phy_dsp(tp); | 1863 | err = tg3_init_5401phy_dsp(tp); |
1864 | if (err) | 1864 | if (err) |
1865 | return err; | 1865 | return err; |
1866 | 1866 | ||
1867 | tg3_readphy(tp, MII_BMSR, &bmsr); | 1867 | tg3_readphy(tp, MII_BMSR, &bmsr); |
1868 | for (i = 0; i < 1000; i++) { | 1868 | for (i = 0; i < 1000; i++) { |
1869 | udelay(10); | 1869 | udelay(10); |
1870 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && | 1870 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && |
1871 | (bmsr & BMSR_LSTATUS)) { | 1871 | (bmsr & BMSR_LSTATUS)) { |
1872 | udelay(40); | 1872 | udelay(40); |
1873 | break; | 1873 | break; |
1874 | } | 1874 | } |
1875 | } | 1875 | } |
1876 | 1876 | ||
1877 | if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 && | 1877 | if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 && |
1878 | !(bmsr & BMSR_LSTATUS) && | 1878 | !(bmsr & BMSR_LSTATUS) && |
1879 | tp->link_config.active_speed == SPEED_1000) { | 1879 | tp->link_config.active_speed == SPEED_1000) { |
1880 | err = tg3_phy_reset(tp); | 1880 | err = tg3_phy_reset(tp); |
1881 | if (!err) | 1881 | if (!err) |
1882 | err = tg3_init_5401phy_dsp(tp); | 1882 | err = tg3_init_5401phy_dsp(tp); |
1883 | if (err) | 1883 | if (err) |
1884 | return err; | 1884 | return err; |
1885 | } | 1885 | } |
1886 | } | 1886 | } |
1887 | } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 1887 | } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || |
1888 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { | 1888 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { |
1889 | /* 5701 {A0,B0} CRC bug workaround */ | 1889 | /* 5701 {A0,B0} CRC bug workaround */ |
1890 | tg3_writephy(tp, 0x15, 0x0a75); | 1890 | tg3_writephy(tp, 0x15, 0x0a75); |
1891 | tg3_writephy(tp, 0x1c, 0x8c68); | 1891 | tg3_writephy(tp, 0x1c, 0x8c68); |
1892 | tg3_writephy(tp, 0x1c, 0x8d68); | 1892 | tg3_writephy(tp, 0x1c, 0x8d68); |
1893 | tg3_writephy(tp, 0x1c, 0x8c68); | 1893 | tg3_writephy(tp, 0x1c, 0x8c68); |
1894 | } | 1894 | } |
1895 | 1895 | ||
1896 | /* Clear pending interrupts... */ | 1896 | /* Clear pending interrupts... */ |
1897 | tg3_readphy(tp, MII_TG3_ISTAT, &dummy); | 1897 | tg3_readphy(tp, MII_TG3_ISTAT, &dummy); |
1898 | tg3_readphy(tp, MII_TG3_ISTAT, &dummy); | 1898 | tg3_readphy(tp, MII_TG3_ISTAT, &dummy); |
1899 | 1899 | ||
1900 | if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) | 1900 | if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) |
1901 | tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); | 1901 | tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); |
1902 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) | 1902 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) |
1903 | tg3_writephy(tp, MII_TG3_IMASK, ~0); | 1903 | tg3_writephy(tp, MII_TG3_IMASK, ~0); |
1904 | 1904 | ||
1905 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 1905 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
1906 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 1906 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { |
1907 | if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) | 1907 | if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) |
1908 | tg3_writephy(tp, MII_TG3_EXT_CTRL, | 1908 | tg3_writephy(tp, MII_TG3_EXT_CTRL, |
1909 | MII_TG3_EXT_CTRL_LNK3_LED_MODE); | 1909 | MII_TG3_EXT_CTRL_LNK3_LED_MODE); |
1910 | else | 1910 | else |
1911 | tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); | 1911 | tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); |
1912 | } | 1912 | } |
1913 | 1913 | ||
1914 | current_link_up = 0; | 1914 | current_link_up = 0; |
1915 | current_speed = SPEED_INVALID; | 1915 | current_speed = SPEED_INVALID; |
1916 | current_duplex = DUPLEX_INVALID; | 1916 | current_duplex = DUPLEX_INVALID; |
1917 | 1917 | ||
1918 | if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) { | 1918 | if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) { |
1919 | u32 val; | 1919 | u32 val; |
1920 | 1920 | ||
1921 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); | 1921 | tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); |
1922 | tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); | 1922 | tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); |
1923 | if (!(val & (1 << 10))) { | 1923 | if (!(val & (1 << 10))) { |
1924 | val |= (1 << 10); | 1924 | val |= (1 << 10); |
1925 | tg3_writephy(tp, MII_TG3_AUX_CTRL, val); | 1925 | tg3_writephy(tp, MII_TG3_AUX_CTRL, val); |
1926 | goto relink; | 1926 | goto relink; |
1927 | } | 1927 | } |
1928 | } | 1928 | } |
1929 | 1929 | ||
1930 | bmsr = 0; | 1930 | bmsr = 0; |
1931 | for (i = 0; i < 100; i++) { | 1931 | for (i = 0; i < 100; i++) { |
1932 | tg3_readphy(tp, MII_BMSR, &bmsr); | 1932 | tg3_readphy(tp, MII_BMSR, &bmsr); |
1933 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && | 1933 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && |
1934 | (bmsr & BMSR_LSTATUS)) | 1934 | (bmsr & BMSR_LSTATUS)) |
1935 | break; | 1935 | break; |
1936 | udelay(40); | 1936 | udelay(40); |
1937 | } | 1937 | } |
1938 | 1938 | ||
1939 | if (bmsr & BMSR_LSTATUS) { | 1939 | if (bmsr & BMSR_LSTATUS) { |
1940 | u32 aux_stat, bmcr; | 1940 | u32 aux_stat, bmcr; |
1941 | 1941 | ||
1942 | tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); | 1942 | tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); |
1943 | for (i = 0; i < 2000; i++) { | 1943 | for (i = 0; i < 2000; i++) { |
1944 | udelay(10); | 1944 | udelay(10); |
1945 | if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && | 1945 | if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && |
1946 | aux_stat) | 1946 | aux_stat) |
1947 | break; | 1947 | break; |
1948 | } | 1948 | } |
1949 | 1949 | ||
1950 | tg3_aux_stat_to_speed_duplex(tp, aux_stat, | 1950 | tg3_aux_stat_to_speed_duplex(tp, aux_stat, |
1951 | ¤t_speed, | 1951 | ¤t_speed, |
1952 | ¤t_duplex); | 1952 | ¤t_duplex); |
1953 | 1953 | ||
1954 | bmcr = 0; | 1954 | bmcr = 0; |
1955 | for (i = 0; i < 200; i++) { | 1955 | for (i = 0; i < 200; i++) { |
1956 | tg3_readphy(tp, MII_BMCR, &bmcr); | 1956 | tg3_readphy(tp, MII_BMCR, &bmcr); |
1957 | if (tg3_readphy(tp, MII_BMCR, &bmcr)) | 1957 | if (tg3_readphy(tp, MII_BMCR, &bmcr)) |
1958 | continue; | 1958 | continue; |
1959 | if (bmcr && bmcr != 0x7fff) | 1959 | if (bmcr && bmcr != 0x7fff) |
1960 | break; | 1960 | break; |
1961 | udelay(10); | 1961 | udelay(10); |
1962 | } | 1962 | } |
1963 | 1963 | ||
1964 | if (tp->link_config.autoneg == AUTONEG_ENABLE) { | 1964 | if (tp->link_config.autoneg == AUTONEG_ENABLE) { |
1965 | if (bmcr & BMCR_ANENABLE) { | 1965 | if (bmcr & BMCR_ANENABLE) { |
1966 | current_link_up = 1; | 1966 | current_link_up = 1; |
1967 | 1967 | ||
1968 | /* Force autoneg restart if we are exiting | 1968 | /* Force autoneg restart if we are exiting |
1969 | * low power mode. | 1969 | * low power mode. |
1970 | */ | 1970 | */ |
1971 | if (!tg3_copper_is_advertising_all(tp, | 1971 | if (!tg3_copper_is_advertising_all(tp, |
1972 | tp->link_config.advertising)) | 1972 | tp->link_config.advertising)) |
1973 | current_link_up = 0; | 1973 | current_link_up = 0; |
1974 | } else { | 1974 | } else { |
1975 | current_link_up = 0; | 1975 | current_link_up = 0; |
1976 | } | 1976 | } |
1977 | } else { | 1977 | } else { |
1978 | if (!(bmcr & BMCR_ANENABLE) && | 1978 | if (!(bmcr & BMCR_ANENABLE) && |
1979 | tp->link_config.speed == current_speed && | 1979 | tp->link_config.speed == current_speed && |
1980 | tp->link_config.duplex == current_duplex) { | 1980 | tp->link_config.duplex == current_duplex) { |
1981 | current_link_up = 1; | 1981 | current_link_up = 1; |
1982 | } else { | 1982 | } else { |
1983 | current_link_up = 0; | 1983 | current_link_up = 0; |
1984 | } | 1984 | } |
1985 | } | 1985 | } |
1986 | 1986 | ||
1987 | tp->link_config.active_speed = current_speed; | 1987 | tp->link_config.active_speed = current_speed; |
1988 | tp->link_config.active_duplex = current_duplex; | 1988 | tp->link_config.active_duplex = current_duplex; |
1989 | } | 1989 | } |
1990 | 1990 | ||
1991 | if (current_link_up == 1 && | 1991 | if (current_link_up == 1 && |
1992 | (tp->link_config.active_duplex == DUPLEX_FULL) && | 1992 | (tp->link_config.active_duplex == DUPLEX_FULL) && |
1993 | (tp->link_config.autoneg == AUTONEG_ENABLE)) { | 1993 | (tp->link_config.autoneg == AUTONEG_ENABLE)) { |
1994 | u32 local_adv, remote_adv; | 1994 | u32 local_adv, remote_adv; |
1995 | 1995 | ||
1996 | if (tg3_readphy(tp, MII_ADVERTISE, &local_adv)) | 1996 | if (tg3_readphy(tp, MII_ADVERTISE, &local_adv)) |
1997 | local_adv = 0; | 1997 | local_adv = 0; |
1998 | local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); | 1998 | local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
1999 | 1999 | ||
2000 | if (tg3_readphy(tp, MII_LPA, &remote_adv)) | 2000 | if (tg3_readphy(tp, MII_LPA, &remote_adv)) |
2001 | remote_adv = 0; | 2001 | remote_adv = 0; |
2002 | 2002 | ||
2003 | remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); | 2003 | remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); |
2004 | 2004 | ||
2005 | /* If we are not advertising full pause capability, | 2005 | /* If we are not advertising full pause capability, |
2006 | * something is wrong. Bring the link down and reconfigure. | 2006 | * something is wrong. Bring the link down and reconfigure. |
2007 | */ | 2007 | */ |
2008 | if (local_adv != ADVERTISE_PAUSE_CAP) { | 2008 | if (local_adv != ADVERTISE_PAUSE_CAP) { |
2009 | current_link_up = 0; | 2009 | current_link_up = 0; |
2010 | } else { | 2010 | } else { |
2011 | tg3_setup_flow_control(tp, local_adv, remote_adv); | 2011 | tg3_setup_flow_control(tp, local_adv, remote_adv); |
2012 | } | 2012 | } |
2013 | } | 2013 | } |
2014 | relink: | 2014 | relink: |
2015 | if (current_link_up == 0 || tp->link_config.phy_is_low_power) { | 2015 | if (current_link_up == 0 || tp->link_config.phy_is_low_power) { |
2016 | u32 tmp; | 2016 | u32 tmp; |
2017 | 2017 | ||
2018 | tg3_phy_copper_begin(tp); | 2018 | tg3_phy_copper_begin(tp); |
2019 | 2019 | ||
2020 | tg3_readphy(tp, MII_BMSR, &tmp); | 2020 | tg3_readphy(tp, MII_BMSR, &tmp); |
2021 | if (!tg3_readphy(tp, MII_BMSR, &tmp) && | 2021 | if (!tg3_readphy(tp, MII_BMSR, &tmp) && |
2022 | (tmp & BMSR_LSTATUS)) | 2022 | (tmp & BMSR_LSTATUS)) |
2023 | current_link_up = 1; | 2023 | current_link_up = 1; |
2024 | } | 2024 | } |
2025 | 2025 | ||
2026 | tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; | 2026 | tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; |
2027 | if (current_link_up == 1) { | 2027 | if (current_link_up == 1) { |
2028 | if (tp->link_config.active_speed == SPEED_100 || | 2028 | if (tp->link_config.active_speed == SPEED_100 || |
2029 | tp->link_config.active_speed == SPEED_10) | 2029 | tp->link_config.active_speed == SPEED_10) |
2030 | tp->mac_mode |= MAC_MODE_PORT_MODE_MII; | 2030 | tp->mac_mode |= MAC_MODE_PORT_MODE_MII; |
2031 | else | 2031 | else |
2032 | tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; | 2032 | tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; |
2033 | } else | 2033 | } else |
2034 | tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; | 2034 | tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; |
2035 | 2035 | ||
2036 | tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; | 2036 | tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; |
2037 | if (tp->link_config.active_duplex == DUPLEX_HALF) | 2037 | if (tp->link_config.active_duplex == DUPLEX_HALF) |
2038 | tp->mac_mode |= MAC_MODE_HALF_DUPLEX; | 2038 | tp->mac_mode |= MAC_MODE_HALF_DUPLEX; |
2039 | 2039 | ||
2040 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { | 2040 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { |
2041 | if (current_link_up == 1 && | 2041 | if (current_link_up == 1 && |
2042 | tg3_5700_link_polarity(tp, tp->link_config.active_speed)) | 2042 | tg3_5700_link_polarity(tp, tp->link_config.active_speed)) |
2043 | tp->mac_mode |= MAC_MODE_LINK_POLARITY; | 2043 | tp->mac_mode |= MAC_MODE_LINK_POLARITY; |
2044 | else | 2044 | else |
2045 | tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; | 2045 | tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; |
2046 | } | 2046 | } |
2047 | 2047 | ||
2048 | /* ??? Without this setting Netgear GA302T PHY does not | 2048 | /* ??? Without this setting Netgear GA302T PHY does not |
2049 | * ??? send/receive packets... | 2049 | * ??? send/receive packets... |
2050 | */ | 2050 | */ |
2051 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 && | 2051 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 && |
2052 | tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { | 2052 | tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { |
2053 | tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; | 2053 | tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; |
2054 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 2054 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
2055 | udelay(80); | 2055 | udelay(80); |
2056 | } | 2056 | } |
2057 | 2057 | ||
2058 | tw32_f(MAC_MODE, tp->mac_mode); | 2058 | tw32_f(MAC_MODE, tp->mac_mode); |
2059 | udelay(40); | 2059 | udelay(40); |
2060 | 2060 | ||
2061 | if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { | 2061 | if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { |
2062 | /* Polled via timer. */ | 2062 | /* Polled via timer. */ |
2063 | tw32_f(MAC_EVENT, 0); | 2063 | tw32_f(MAC_EVENT, 0); |
2064 | } else { | 2064 | } else { |
2065 | tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); | 2065 | tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); |
2066 | } | 2066 | } |
2067 | udelay(40); | 2067 | udelay(40); |
2068 | 2068 | ||
2069 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && | 2069 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && |
2070 | current_link_up == 1 && | 2070 | current_link_up == 1 && |
2071 | tp->link_config.active_speed == SPEED_1000 && | 2071 | tp->link_config.active_speed == SPEED_1000 && |
2072 | ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) || | 2072 | ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) || |
2073 | (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) { | 2073 | (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) { |
2074 | udelay(120); | 2074 | udelay(120); |
2075 | tw32_f(MAC_STATUS, | 2075 | tw32_f(MAC_STATUS, |
2076 | (MAC_STATUS_SYNC_CHANGED | | 2076 | (MAC_STATUS_SYNC_CHANGED | |
2077 | MAC_STATUS_CFG_CHANGED)); | 2077 | MAC_STATUS_CFG_CHANGED)); |
2078 | udelay(40); | 2078 | udelay(40); |
2079 | tg3_write_mem(tp, | 2079 | tg3_write_mem(tp, |
2080 | NIC_SRAM_FIRMWARE_MBOX, | 2080 | NIC_SRAM_FIRMWARE_MBOX, |
2081 | NIC_SRAM_FIRMWARE_MBOX_MAGIC2); | 2081 | NIC_SRAM_FIRMWARE_MBOX_MAGIC2); |
2082 | } | 2082 | } |
2083 | 2083 | ||
2084 | if (current_link_up != netif_carrier_ok(tp->dev)) { | 2084 | if (current_link_up != netif_carrier_ok(tp->dev)) { |
2085 | if (current_link_up) | 2085 | if (current_link_up) |
2086 | netif_carrier_on(tp->dev); | 2086 | netif_carrier_on(tp->dev); |
2087 | else | 2087 | else |
2088 | netif_carrier_off(tp->dev); | 2088 | netif_carrier_off(tp->dev); |
2089 | tg3_link_report(tp); | 2089 | tg3_link_report(tp); |
2090 | } | 2090 | } |
2091 | 2091 | ||
2092 | return 0; | 2092 | return 0; |
2093 | } | 2093 | } |
2094 | 2094 | ||
2095 | struct tg3_fiber_aneginfo { | 2095 | struct tg3_fiber_aneginfo { |
2096 | int state; | 2096 | int state; |
2097 | #define ANEG_STATE_UNKNOWN 0 | 2097 | #define ANEG_STATE_UNKNOWN 0 |
2098 | #define ANEG_STATE_AN_ENABLE 1 | 2098 | #define ANEG_STATE_AN_ENABLE 1 |
2099 | #define ANEG_STATE_RESTART_INIT 2 | 2099 | #define ANEG_STATE_RESTART_INIT 2 |
2100 | #define ANEG_STATE_RESTART 3 | 2100 | #define ANEG_STATE_RESTART 3 |
2101 | #define ANEG_STATE_DISABLE_LINK_OK 4 | 2101 | #define ANEG_STATE_DISABLE_LINK_OK 4 |
2102 | #define ANEG_STATE_ABILITY_DETECT_INIT 5 | 2102 | #define ANEG_STATE_ABILITY_DETECT_INIT 5 |
2103 | #define ANEG_STATE_ABILITY_DETECT 6 | 2103 | #define ANEG_STATE_ABILITY_DETECT 6 |
2104 | #define ANEG_STATE_ACK_DETECT_INIT 7 | 2104 | #define ANEG_STATE_ACK_DETECT_INIT 7 |
2105 | #define ANEG_STATE_ACK_DETECT 8 | 2105 | #define ANEG_STATE_ACK_DETECT 8 |
2106 | #define ANEG_STATE_COMPLETE_ACK_INIT 9 | 2106 | #define ANEG_STATE_COMPLETE_ACK_INIT 9 |
2107 | #define ANEG_STATE_COMPLETE_ACK 10 | 2107 | #define ANEG_STATE_COMPLETE_ACK 10 |
2108 | #define ANEG_STATE_IDLE_DETECT_INIT 11 | 2108 | #define ANEG_STATE_IDLE_DETECT_INIT 11 |
2109 | #define ANEG_STATE_IDLE_DETECT 12 | 2109 | #define ANEG_STATE_IDLE_DETECT 12 |
2110 | #define ANEG_STATE_LINK_OK 13 | 2110 | #define ANEG_STATE_LINK_OK 13 |
2111 | #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 | 2111 | #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 |
2112 | #define ANEG_STATE_NEXT_PAGE_WAIT 15 | 2112 | #define ANEG_STATE_NEXT_PAGE_WAIT 15 |
2113 | 2113 | ||
2114 | u32 flags; | 2114 | u32 flags; |
2115 | #define MR_AN_ENABLE 0x00000001 | 2115 | #define MR_AN_ENABLE 0x00000001 |
2116 | #define MR_RESTART_AN 0x00000002 | 2116 | #define MR_RESTART_AN 0x00000002 |
2117 | #define MR_AN_COMPLETE 0x00000004 | 2117 | #define MR_AN_COMPLETE 0x00000004 |
2118 | #define MR_PAGE_RX 0x00000008 | 2118 | #define MR_PAGE_RX 0x00000008 |
2119 | #define MR_NP_LOADED 0x00000010 | 2119 | #define MR_NP_LOADED 0x00000010 |
2120 | #define MR_TOGGLE_TX 0x00000020 | 2120 | #define MR_TOGGLE_TX 0x00000020 |
2121 | #define MR_LP_ADV_FULL_DUPLEX 0x00000040 | 2121 | #define MR_LP_ADV_FULL_DUPLEX 0x00000040 |
2122 | #define MR_LP_ADV_HALF_DUPLEX 0x00000080 | 2122 | #define MR_LP_ADV_HALF_DUPLEX 0x00000080 |
2123 | #define MR_LP_ADV_SYM_PAUSE 0x00000100 | 2123 | #define MR_LP_ADV_SYM_PAUSE 0x00000100 |
2124 | #define MR_LP_ADV_ASYM_PAUSE 0x00000200 | 2124 | #define MR_LP_ADV_ASYM_PAUSE 0x00000200 |
2125 | #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 | 2125 | #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 |
2126 | #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 | 2126 | #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 |
2127 | #define MR_LP_ADV_NEXT_PAGE 0x00001000 | 2127 | #define MR_LP_ADV_NEXT_PAGE 0x00001000 |
2128 | #define MR_TOGGLE_RX 0x00002000 | 2128 | #define MR_TOGGLE_RX 0x00002000 |
2129 | #define MR_NP_RX 0x00004000 | 2129 | #define MR_NP_RX 0x00004000 |
2130 | 2130 | ||
2131 | #define MR_LINK_OK 0x80000000 | 2131 | #define MR_LINK_OK 0x80000000 |
2132 | 2132 | ||
2133 | unsigned long link_time, cur_time; | 2133 | unsigned long link_time, cur_time; |
2134 | 2134 | ||
2135 | u32 ability_match_cfg; | 2135 | u32 ability_match_cfg; |
2136 | int ability_match_count; | 2136 | int ability_match_count; |
2137 | 2137 | ||
2138 | char ability_match, idle_match, ack_match; | 2138 | char ability_match, idle_match, ack_match; |
2139 | 2139 | ||
2140 | u32 txconfig, rxconfig; | 2140 | u32 txconfig, rxconfig; |
2141 | #define ANEG_CFG_NP 0x00000080 | 2141 | #define ANEG_CFG_NP 0x00000080 |
2142 | #define ANEG_CFG_ACK 0x00000040 | 2142 | #define ANEG_CFG_ACK 0x00000040 |
2143 | #define ANEG_CFG_RF2 0x00000020 | 2143 | #define ANEG_CFG_RF2 0x00000020 |
2144 | #define ANEG_CFG_RF1 0x00000010 | 2144 | #define ANEG_CFG_RF1 0x00000010 |
2145 | #define ANEG_CFG_PS2 0x00000001 | 2145 | #define ANEG_CFG_PS2 0x00000001 |
2146 | #define ANEG_CFG_PS1 0x00008000 | 2146 | #define ANEG_CFG_PS1 0x00008000 |
2147 | #define ANEG_CFG_HD 0x00004000 | 2147 | #define ANEG_CFG_HD 0x00004000 |
2148 | #define ANEG_CFG_FD 0x00002000 | 2148 | #define ANEG_CFG_FD 0x00002000 |
2149 | #define ANEG_CFG_INVAL 0x00001f06 | 2149 | #define ANEG_CFG_INVAL 0x00001f06 |
2150 | 2150 | ||
2151 | }; | 2151 | }; |
2152 | #define ANEG_OK 0 | 2152 | #define ANEG_OK 0 |
2153 | #define ANEG_DONE 1 | 2153 | #define ANEG_DONE 1 |
2154 | #define ANEG_TIMER_ENAB 2 | 2154 | #define ANEG_TIMER_ENAB 2 |
2155 | #define ANEG_FAILED -1 | 2155 | #define ANEG_FAILED -1 |
2156 | 2156 | ||
2157 | #define ANEG_STATE_SETTLE_TIME 10000 | 2157 | #define ANEG_STATE_SETTLE_TIME 10000 |
2158 | 2158 | ||
2159 | static int tg3_fiber_aneg_smachine(struct tg3 *tp, | 2159 | static int tg3_fiber_aneg_smachine(struct tg3 *tp, |
2160 | struct tg3_fiber_aneginfo *ap) | 2160 | struct tg3_fiber_aneginfo *ap) |
2161 | { | 2161 | { |
2162 | unsigned long delta; | 2162 | unsigned long delta; |
2163 | u32 rx_cfg_reg; | 2163 | u32 rx_cfg_reg; |
2164 | int ret; | 2164 | int ret; |
2165 | 2165 | ||
2166 | if (ap->state == ANEG_STATE_UNKNOWN) { | 2166 | if (ap->state == ANEG_STATE_UNKNOWN) { |
2167 | ap->rxconfig = 0; | 2167 | ap->rxconfig = 0; |
2168 | ap->link_time = 0; | 2168 | ap->link_time = 0; |
2169 | ap->cur_time = 0; | 2169 | ap->cur_time = 0; |
2170 | ap->ability_match_cfg = 0; | 2170 | ap->ability_match_cfg = 0; |
2171 | ap->ability_match_count = 0; | 2171 | ap->ability_match_count = 0; |
2172 | ap->ability_match = 0; | 2172 | ap->ability_match = 0; |
2173 | ap->idle_match = 0; | 2173 | ap->idle_match = 0; |
2174 | ap->ack_match = 0; | 2174 | ap->ack_match = 0; |
2175 | } | 2175 | } |
2176 | ap->cur_time++; | 2176 | ap->cur_time++; |
2177 | 2177 | ||
2178 | if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { | 2178 | if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { |
2179 | rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); | 2179 | rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); |
2180 | 2180 | ||
2181 | if (rx_cfg_reg != ap->ability_match_cfg) { | 2181 | if (rx_cfg_reg != ap->ability_match_cfg) { |
2182 | ap->ability_match_cfg = rx_cfg_reg; | 2182 | ap->ability_match_cfg = rx_cfg_reg; |
2183 | ap->ability_match = 0; | 2183 | ap->ability_match = 0; |
2184 | ap->ability_match_count = 0; | 2184 | ap->ability_match_count = 0; |
2185 | } else { | 2185 | } else { |
2186 | if (++ap->ability_match_count > 1) { | 2186 | if (++ap->ability_match_count > 1) { |
2187 | ap->ability_match = 1; | 2187 | ap->ability_match = 1; |
2188 | ap->ability_match_cfg = rx_cfg_reg; | 2188 | ap->ability_match_cfg = rx_cfg_reg; |
2189 | } | 2189 | } |
2190 | } | 2190 | } |
2191 | if (rx_cfg_reg & ANEG_CFG_ACK) | 2191 | if (rx_cfg_reg & ANEG_CFG_ACK) |
2192 | ap->ack_match = 1; | 2192 | ap->ack_match = 1; |
2193 | else | 2193 | else |
2194 | ap->ack_match = 0; | 2194 | ap->ack_match = 0; |
2195 | 2195 | ||
2196 | ap->idle_match = 0; | 2196 | ap->idle_match = 0; |
2197 | } else { | 2197 | } else { |
2198 | ap->idle_match = 1; | 2198 | ap->idle_match = 1; |
2199 | ap->ability_match_cfg = 0; | 2199 | ap->ability_match_cfg = 0; |
2200 | ap->ability_match_count = 0; | 2200 | ap->ability_match_count = 0; |
2201 | ap->ability_match = 0; | 2201 | ap->ability_match = 0; |
2202 | ap->ack_match = 0; | 2202 | ap->ack_match = 0; |
2203 | 2203 | ||
2204 | rx_cfg_reg = 0; | 2204 | rx_cfg_reg = 0; |
2205 | } | 2205 | } |
2206 | 2206 | ||
2207 | ap->rxconfig = rx_cfg_reg; | 2207 | ap->rxconfig = rx_cfg_reg; |
2208 | ret = ANEG_OK; | 2208 | ret = ANEG_OK; |
2209 | 2209 | ||
2210 | switch(ap->state) { | 2210 | switch(ap->state) { |
2211 | case ANEG_STATE_UNKNOWN: | 2211 | case ANEG_STATE_UNKNOWN: |
2212 | if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) | 2212 | if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) |
2213 | ap->state = ANEG_STATE_AN_ENABLE; | 2213 | ap->state = ANEG_STATE_AN_ENABLE; |
2214 | 2214 | ||
2215 | /* fallthru */ | 2215 | /* fallthru */ |
2216 | case ANEG_STATE_AN_ENABLE: | 2216 | case ANEG_STATE_AN_ENABLE: |
2217 | ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); | 2217 | ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); |
2218 | if (ap->flags & MR_AN_ENABLE) { | 2218 | if (ap->flags & MR_AN_ENABLE) { |
2219 | ap->link_time = 0; | 2219 | ap->link_time = 0; |
2220 | ap->cur_time = 0; | 2220 | ap->cur_time = 0; |
2221 | ap->ability_match_cfg = 0; | 2221 | ap->ability_match_cfg = 0; |
2222 | ap->ability_match_count = 0; | 2222 | ap->ability_match_count = 0; |
2223 | ap->ability_match = 0; | 2223 | ap->ability_match = 0; |
2224 | ap->idle_match = 0; | 2224 | ap->idle_match = 0; |
2225 | ap->ack_match = 0; | 2225 | ap->ack_match = 0; |
2226 | 2226 | ||
2227 | ap->state = ANEG_STATE_RESTART_INIT; | 2227 | ap->state = ANEG_STATE_RESTART_INIT; |
2228 | } else { | 2228 | } else { |
2229 | ap->state = ANEG_STATE_DISABLE_LINK_OK; | 2229 | ap->state = ANEG_STATE_DISABLE_LINK_OK; |
2230 | } | 2230 | } |
2231 | break; | 2231 | break; |
2232 | 2232 | ||
2233 | case ANEG_STATE_RESTART_INIT: | 2233 | case ANEG_STATE_RESTART_INIT: |
2234 | ap->link_time = ap->cur_time; | 2234 | ap->link_time = ap->cur_time; |
2235 | ap->flags &= ~(MR_NP_LOADED); | 2235 | ap->flags &= ~(MR_NP_LOADED); |
2236 | ap->txconfig = 0; | 2236 | ap->txconfig = 0; |
2237 | tw32(MAC_TX_AUTO_NEG, 0); | 2237 | tw32(MAC_TX_AUTO_NEG, 0); |
2238 | tp->mac_mode |= MAC_MODE_SEND_CONFIGS; | 2238 | tp->mac_mode |= MAC_MODE_SEND_CONFIGS; |
2239 | tw32_f(MAC_MODE, tp->mac_mode); | 2239 | tw32_f(MAC_MODE, tp->mac_mode); |
2240 | udelay(40); | 2240 | udelay(40); |
2241 | 2241 | ||
2242 | ret = ANEG_TIMER_ENAB; | 2242 | ret = ANEG_TIMER_ENAB; |
2243 | ap->state = ANEG_STATE_RESTART; | 2243 | ap->state = ANEG_STATE_RESTART; |
2244 | 2244 | ||
2245 | /* fallthru */ | 2245 | /* fallthru */ |
2246 | case ANEG_STATE_RESTART: | 2246 | case ANEG_STATE_RESTART: |
2247 | delta = ap->cur_time - ap->link_time; | 2247 | delta = ap->cur_time - ap->link_time; |
2248 | if (delta > ANEG_STATE_SETTLE_TIME) { | 2248 | if (delta > ANEG_STATE_SETTLE_TIME) { |
2249 | ap->state = ANEG_STATE_ABILITY_DETECT_INIT; | 2249 | ap->state = ANEG_STATE_ABILITY_DETECT_INIT; |
2250 | } else { | 2250 | } else { |
2251 | ret = ANEG_TIMER_ENAB; | 2251 | ret = ANEG_TIMER_ENAB; |
2252 | } | 2252 | } |
2253 | break; | 2253 | break; |
2254 | 2254 | ||
2255 | case ANEG_STATE_DISABLE_LINK_OK: | 2255 | case ANEG_STATE_DISABLE_LINK_OK: |
2256 | ret = ANEG_DONE; | 2256 | ret = ANEG_DONE; |
2257 | break; | 2257 | break; |
2258 | 2258 | ||
2259 | case ANEG_STATE_ABILITY_DETECT_INIT: | 2259 | case ANEG_STATE_ABILITY_DETECT_INIT: |
2260 | ap->flags &= ~(MR_TOGGLE_TX); | 2260 | ap->flags &= ~(MR_TOGGLE_TX); |
2261 | ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1); | 2261 | ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1); |
2262 | tw32(MAC_TX_AUTO_NEG, ap->txconfig); | 2262 | tw32(MAC_TX_AUTO_NEG, ap->txconfig); |
2263 | tp->mac_mode |= MAC_MODE_SEND_CONFIGS; | 2263 | tp->mac_mode |= MAC_MODE_SEND_CONFIGS; |
2264 | tw32_f(MAC_MODE, tp->mac_mode); | 2264 | tw32_f(MAC_MODE, tp->mac_mode); |
2265 | udelay(40); | 2265 | udelay(40); |
2266 | 2266 | ||
2267 | ap->state = ANEG_STATE_ABILITY_DETECT; | 2267 | ap->state = ANEG_STATE_ABILITY_DETECT; |
2268 | break; | 2268 | break; |
2269 | 2269 | ||
2270 | case ANEG_STATE_ABILITY_DETECT: | 2270 | case ANEG_STATE_ABILITY_DETECT: |
2271 | if (ap->ability_match != 0 && ap->rxconfig != 0) { | 2271 | if (ap->ability_match != 0 && ap->rxconfig != 0) { |
2272 | ap->state = ANEG_STATE_ACK_DETECT_INIT; | 2272 | ap->state = ANEG_STATE_ACK_DETECT_INIT; |
2273 | } | 2273 | } |
2274 | break; | 2274 | break; |
2275 | 2275 | ||
2276 | case ANEG_STATE_ACK_DETECT_INIT: | 2276 | case ANEG_STATE_ACK_DETECT_INIT: |
2277 | ap->txconfig |= ANEG_CFG_ACK; | 2277 | ap->txconfig |= ANEG_CFG_ACK; |
2278 | tw32(MAC_TX_AUTO_NEG, ap->txconfig); | 2278 | tw32(MAC_TX_AUTO_NEG, ap->txconfig); |
2279 | tp->mac_mode |= MAC_MODE_SEND_CONFIGS; | 2279 | tp->mac_mode |= MAC_MODE_SEND_CONFIGS; |
2280 | tw32_f(MAC_MODE, tp->mac_mode); | 2280 | tw32_f(MAC_MODE, tp->mac_mode); |
2281 | udelay(40); | 2281 | udelay(40); |
2282 | 2282 | ||
2283 | ap->state = ANEG_STATE_ACK_DETECT; | 2283 | ap->state = ANEG_STATE_ACK_DETECT; |
2284 | 2284 | ||
2285 | /* fallthru */ | 2285 | /* fallthru */ |
2286 | case ANEG_STATE_ACK_DETECT: | 2286 | case ANEG_STATE_ACK_DETECT: |
2287 | if (ap->ack_match != 0) { | 2287 | if (ap->ack_match != 0) { |
2288 | if ((ap->rxconfig & ~ANEG_CFG_ACK) == | 2288 | if ((ap->rxconfig & ~ANEG_CFG_ACK) == |
2289 | (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { | 2289 | (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { |
2290 | ap->state = ANEG_STATE_COMPLETE_ACK_INIT; | 2290 | ap->state = ANEG_STATE_COMPLETE_ACK_INIT; |
2291 | } else { | 2291 | } else { |
2292 | ap->state = ANEG_STATE_AN_ENABLE; | 2292 | ap->state = ANEG_STATE_AN_ENABLE; |
2293 | } | 2293 | } |
2294 | } else if (ap->ability_match != 0 && | 2294 | } else if (ap->ability_match != 0 && |
2295 | ap->rxconfig == 0) { | 2295 | ap->rxconfig == 0) { |
2296 | ap->state = ANEG_STATE_AN_ENABLE; | 2296 | ap->state = ANEG_STATE_AN_ENABLE; |
2297 | } | 2297 | } |
2298 | break; | 2298 | break; |
2299 | 2299 | ||
2300 | case ANEG_STATE_COMPLETE_ACK_INIT: | 2300 | case ANEG_STATE_COMPLETE_ACK_INIT: |
2301 | if (ap->rxconfig & ANEG_CFG_INVAL) { | 2301 | if (ap->rxconfig & ANEG_CFG_INVAL) { |
2302 | ret = ANEG_FAILED; | 2302 | ret = ANEG_FAILED; |
2303 | break; | 2303 | break; |
2304 | } | 2304 | } |
2305 | ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | | 2305 | ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | |
2306 | MR_LP_ADV_HALF_DUPLEX | | 2306 | MR_LP_ADV_HALF_DUPLEX | |
2307 | MR_LP_ADV_SYM_PAUSE | | 2307 | MR_LP_ADV_SYM_PAUSE | |
2308 | MR_LP_ADV_ASYM_PAUSE | | 2308 | MR_LP_ADV_ASYM_PAUSE | |
2309 | MR_LP_ADV_REMOTE_FAULT1 | | 2309 | MR_LP_ADV_REMOTE_FAULT1 | |
2310 | MR_LP_ADV_REMOTE_FAULT2 | | 2310 | MR_LP_ADV_REMOTE_FAULT2 | |
2311 | MR_LP_ADV_NEXT_PAGE | | 2311 | MR_LP_ADV_NEXT_PAGE | |
2312 | MR_TOGGLE_RX | | 2312 | MR_TOGGLE_RX | |
2313 | MR_NP_RX); | 2313 | MR_NP_RX); |
2314 | if (ap->rxconfig & ANEG_CFG_FD) | 2314 | if (ap->rxconfig & ANEG_CFG_FD) |
2315 | ap->flags |= MR_LP_ADV_FULL_DUPLEX; | 2315 | ap->flags |= MR_LP_ADV_FULL_DUPLEX; |
2316 | if (ap->rxconfig & ANEG_CFG_HD) | 2316 | if (ap->rxconfig & ANEG_CFG_HD) |
2317 | ap->flags |= MR_LP_ADV_HALF_DUPLEX; | 2317 | ap->flags |= MR_LP_ADV_HALF_DUPLEX; |
2318 | if (ap->rxconfig & ANEG_CFG_PS1) | 2318 | if (ap->rxconfig & ANEG_CFG_PS1) |
2319 | ap->flags |= MR_LP_ADV_SYM_PAUSE; | 2319 | ap->flags |= MR_LP_ADV_SYM_PAUSE; |
2320 | if (ap->rxconfig & ANEG_CFG_PS2) | 2320 | if (ap->rxconfig & ANEG_CFG_PS2) |
2321 | ap->flags |= MR_LP_ADV_ASYM_PAUSE; | 2321 | ap->flags |= MR_LP_ADV_ASYM_PAUSE; |
2322 | if (ap->rxconfig & ANEG_CFG_RF1) | 2322 | if (ap->rxconfig & ANEG_CFG_RF1) |
2323 | ap->flags |= MR_LP_ADV_REMOTE_FAULT1; | 2323 | ap->flags |= MR_LP_ADV_REMOTE_FAULT1; |
2324 | if (ap->rxconfig & ANEG_CFG_RF2) | 2324 | if (ap->rxconfig & ANEG_CFG_RF2) |
2325 | ap->flags |= MR_LP_ADV_REMOTE_FAULT2; | 2325 | ap->flags |= MR_LP_ADV_REMOTE_FAULT2; |
2326 | if (ap->rxconfig & ANEG_CFG_NP) | 2326 | if (ap->rxconfig & ANEG_CFG_NP) |
2327 | ap->flags |= MR_LP_ADV_NEXT_PAGE; | 2327 | ap->flags |= MR_LP_ADV_NEXT_PAGE; |
2328 | 2328 | ||
2329 | ap->link_time = ap->cur_time; | 2329 | ap->link_time = ap->cur_time; |
2330 | 2330 | ||
2331 | ap->flags ^= (MR_TOGGLE_TX); | 2331 | ap->flags ^= (MR_TOGGLE_TX); |
2332 | if (ap->rxconfig & 0x0008) | 2332 | if (ap->rxconfig & 0x0008) |
2333 | ap->flags |= MR_TOGGLE_RX; | 2333 | ap->flags |= MR_TOGGLE_RX; |
2334 | if (ap->rxconfig & ANEG_CFG_NP) | 2334 | if (ap->rxconfig & ANEG_CFG_NP) |
2335 | ap->flags |= MR_NP_RX; | 2335 | ap->flags |= MR_NP_RX; |
2336 | ap->flags |= MR_PAGE_RX; | 2336 | ap->flags |= MR_PAGE_RX; |
2337 | 2337 | ||
2338 | ap->state = ANEG_STATE_COMPLETE_ACK; | 2338 | ap->state = ANEG_STATE_COMPLETE_ACK; |
2339 | ret = ANEG_TIMER_ENAB; | 2339 | ret = ANEG_TIMER_ENAB; |
2340 | break; | 2340 | break; |
2341 | 2341 | ||
2342 | case ANEG_STATE_COMPLETE_ACK: | 2342 | case ANEG_STATE_COMPLETE_ACK: |
2343 | if (ap->ability_match != 0 && | 2343 | if (ap->ability_match != 0 && |
2344 | ap->rxconfig == 0) { | 2344 | ap->rxconfig == 0) { |
2345 | ap->state = ANEG_STATE_AN_ENABLE; | 2345 | ap->state = ANEG_STATE_AN_ENABLE; |
2346 | break; | 2346 | break; |
2347 | } | 2347 | } |
2348 | delta = ap->cur_time - ap->link_time; | 2348 | delta = ap->cur_time - ap->link_time; |
2349 | if (delta > ANEG_STATE_SETTLE_TIME) { | 2349 | if (delta > ANEG_STATE_SETTLE_TIME) { |
2350 | if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { | 2350 | if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { |
2351 | ap->state = ANEG_STATE_IDLE_DETECT_INIT; | 2351 | ap->state = ANEG_STATE_IDLE_DETECT_INIT; |
2352 | } else { | 2352 | } else { |
2353 | if ((ap->txconfig & ANEG_CFG_NP) == 0 && | 2353 | if ((ap->txconfig & ANEG_CFG_NP) == 0 && |
2354 | !(ap->flags & MR_NP_RX)) { | 2354 | !(ap->flags & MR_NP_RX)) { |
2355 | ap->state = ANEG_STATE_IDLE_DETECT_INIT; | 2355 | ap->state = ANEG_STATE_IDLE_DETECT_INIT; |
2356 | } else { | 2356 | } else { |
2357 | ret = ANEG_FAILED; | 2357 | ret = ANEG_FAILED; |
2358 | } | 2358 | } |
2359 | } | 2359 | } |
2360 | } | 2360 | } |
2361 | break; | 2361 | break; |
2362 | 2362 | ||
2363 | case ANEG_STATE_IDLE_DETECT_INIT: | 2363 | case ANEG_STATE_IDLE_DETECT_INIT: |
2364 | ap->link_time = ap->cur_time; | 2364 | ap->link_time = ap->cur_time; |
2365 | tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; | 2365 | tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; |
2366 | tw32_f(MAC_MODE, tp->mac_mode); | 2366 | tw32_f(MAC_MODE, tp->mac_mode); |
2367 | udelay(40); | 2367 | udelay(40); |
2368 | 2368 | ||
2369 | ap->state = ANEG_STATE_IDLE_DETECT; | 2369 | ap->state = ANEG_STATE_IDLE_DETECT; |
2370 | ret = ANEG_TIMER_ENAB; | 2370 | ret = ANEG_TIMER_ENAB; |
2371 | break; | 2371 | break; |
2372 | 2372 | ||
2373 | case ANEG_STATE_IDLE_DETECT: | 2373 | case ANEG_STATE_IDLE_DETECT: |
2374 | if (ap->ability_match != 0 && | 2374 | if (ap->ability_match != 0 && |
2375 | ap->rxconfig == 0) { | 2375 | ap->rxconfig == 0) { |
2376 | ap->state = ANEG_STATE_AN_ENABLE; | 2376 | ap->state = ANEG_STATE_AN_ENABLE; |
2377 | break; | 2377 | break; |
2378 | } | 2378 | } |
2379 | delta = ap->cur_time - ap->link_time; | 2379 | delta = ap->cur_time - ap->link_time; |
2380 | if (delta > ANEG_STATE_SETTLE_TIME) { | 2380 | if (delta > ANEG_STATE_SETTLE_TIME) { |
2381 | /* XXX another gem from the Broadcom driver :( */ | 2381 | /* XXX another gem from the Broadcom driver :( */ |
2382 | ap->state = ANEG_STATE_LINK_OK; | 2382 | ap->state = ANEG_STATE_LINK_OK; |
2383 | } | 2383 | } |
2384 | break; | 2384 | break; |
2385 | 2385 | ||
2386 | case ANEG_STATE_LINK_OK: | 2386 | case ANEG_STATE_LINK_OK: |
2387 | ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); | 2387 | ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); |
2388 | ret = ANEG_DONE; | 2388 | ret = ANEG_DONE; |
2389 | break; | 2389 | break; |
2390 | 2390 | ||
2391 | case ANEG_STATE_NEXT_PAGE_WAIT_INIT: | 2391 | case ANEG_STATE_NEXT_PAGE_WAIT_INIT: |
2392 | /* ??? unimplemented */ | 2392 | /* ??? unimplemented */ |
2393 | break; | 2393 | break; |
2394 | 2394 | ||
2395 | case ANEG_STATE_NEXT_PAGE_WAIT: | 2395 | case ANEG_STATE_NEXT_PAGE_WAIT: |
2396 | /* ??? unimplemented */ | 2396 | /* ??? unimplemented */ |
2397 | break; | 2397 | break; |
2398 | 2398 | ||
2399 | default: | 2399 | default: |
2400 | ret = ANEG_FAILED; | 2400 | ret = ANEG_FAILED; |
2401 | break; | 2401 | break; |
2402 | }; | 2402 | }; |
2403 | 2403 | ||
2404 | return ret; | 2404 | return ret; |
2405 | } | 2405 | } |
2406 | 2406 | ||
2407 | static int fiber_autoneg(struct tg3 *tp, u32 *flags) | 2407 | static int fiber_autoneg(struct tg3 *tp, u32 *flags) |
2408 | { | 2408 | { |
2409 | int res = 0; | 2409 | int res = 0; |
2410 | struct tg3_fiber_aneginfo aninfo; | 2410 | struct tg3_fiber_aneginfo aninfo; |
2411 | int status = ANEG_FAILED; | 2411 | int status = ANEG_FAILED; |
2412 | unsigned int tick; | 2412 | unsigned int tick; |
2413 | u32 tmp; | 2413 | u32 tmp; |
2414 | 2414 | ||
2415 | tw32_f(MAC_TX_AUTO_NEG, 0); | 2415 | tw32_f(MAC_TX_AUTO_NEG, 0); |
2416 | 2416 | ||
2417 | tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; | 2417 | tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; |
2418 | tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); | 2418 | tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); |
2419 | udelay(40); | 2419 | udelay(40); |
2420 | 2420 | ||
2421 | tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); | 2421 | tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); |
2422 | udelay(40); | 2422 | udelay(40); |
2423 | 2423 | ||
2424 | memset(&aninfo, 0, sizeof(aninfo)); | 2424 | memset(&aninfo, 0, sizeof(aninfo)); |
2425 | aninfo.flags |= MR_AN_ENABLE; | 2425 | aninfo.flags |= MR_AN_ENABLE; |
2426 | aninfo.state = ANEG_STATE_UNKNOWN; | 2426 | aninfo.state = ANEG_STATE_UNKNOWN; |
2427 | aninfo.cur_time = 0; | 2427 | aninfo.cur_time = 0; |
2428 | tick = 0; | 2428 | tick = 0; |
2429 | while (++tick < 195000) { | 2429 | while (++tick < 195000) { |
2430 | status = tg3_fiber_aneg_smachine(tp, &aninfo); | 2430 | status = tg3_fiber_aneg_smachine(tp, &aninfo); |
2431 | if (status == ANEG_DONE || status == ANEG_FAILED) | 2431 | if (status == ANEG_DONE || status == ANEG_FAILED) |
2432 | break; | 2432 | break; |
2433 | 2433 | ||
2434 | udelay(1); | 2434 | udelay(1); |
2435 | } | 2435 | } |
2436 | 2436 | ||
2437 | tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; | 2437 | tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; |
2438 | tw32_f(MAC_MODE, tp->mac_mode); | 2438 | tw32_f(MAC_MODE, tp->mac_mode); |
2439 | udelay(40); | 2439 | udelay(40); |
2440 | 2440 | ||
2441 | *flags = aninfo.flags; | 2441 | *flags = aninfo.flags; |
2442 | 2442 | ||
2443 | if (status == ANEG_DONE && | 2443 | if (status == ANEG_DONE && |
2444 | (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | | 2444 | (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | |
2445 | MR_LP_ADV_FULL_DUPLEX))) | 2445 | MR_LP_ADV_FULL_DUPLEX))) |
2446 | res = 1; | 2446 | res = 1; |
2447 | 2447 | ||
2448 | return res; | 2448 | return res; |
2449 | } | 2449 | } |
2450 | 2450 | ||
2451 | static void tg3_init_bcm8002(struct tg3 *tp) | 2451 | static void tg3_init_bcm8002(struct tg3 *tp) |
2452 | { | 2452 | { |
2453 | u32 mac_status = tr32(MAC_STATUS); | 2453 | u32 mac_status = tr32(MAC_STATUS); |
2454 | int i; | 2454 | int i; |
2455 | 2455 | ||
2456 | /* Reset when initting first time or we have a link. */ | 2456 | /* Reset when initting first time or we have a link. */ |
2457 | if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) && | 2457 | if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) && |
2458 | !(mac_status & MAC_STATUS_PCS_SYNCED)) | 2458 | !(mac_status & MAC_STATUS_PCS_SYNCED)) |
2459 | return; | 2459 | return; |
2460 | 2460 | ||
2461 | /* Set PLL lock range. */ | 2461 | /* Set PLL lock range. */ |
2462 | tg3_writephy(tp, 0x16, 0x8007); | 2462 | tg3_writephy(tp, 0x16, 0x8007); |
2463 | 2463 | ||
2464 | /* SW reset */ | 2464 | /* SW reset */ |
2465 | tg3_writephy(tp, MII_BMCR, BMCR_RESET); | 2465 | tg3_writephy(tp, MII_BMCR, BMCR_RESET); |
2466 | 2466 | ||
2467 | /* Wait for reset to complete. */ | 2467 | /* Wait for reset to complete. */ |
2468 | /* XXX schedule_timeout() ... */ | 2468 | /* XXX schedule_timeout() ... */ |
2469 | for (i = 0; i < 500; i++) | 2469 | for (i = 0; i < 500; i++) |
2470 | udelay(10); | 2470 | udelay(10); |
2471 | 2471 | ||
2472 | /* Config mode; select PMA/Ch 1 regs. */ | 2472 | /* Config mode; select PMA/Ch 1 regs. */ |
2473 | tg3_writephy(tp, 0x10, 0x8411); | 2473 | tg3_writephy(tp, 0x10, 0x8411); |
2474 | 2474 | ||
2475 | /* Enable auto-lock and comdet, select txclk for tx. */ | 2475 | /* Enable auto-lock and comdet, select txclk for tx. */ |
2476 | tg3_writephy(tp, 0x11, 0x0a10); | 2476 | tg3_writephy(tp, 0x11, 0x0a10); |
2477 | 2477 | ||
2478 | tg3_writephy(tp, 0x18, 0x00a0); | 2478 | tg3_writephy(tp, 0x18, 0x00a0); |
2479 | tg3_writephy(tp, 0x16, 0x41ff); | 2479 | tg3_writephy(tp, 0x16, 0x41ff); |
2480 | 2480 | ||
2481 | /* Assert and deassert POR. */ | 2481 | /* Assert and deassert POR. */ |
2482 | tg3_writephy(tp, 0x13, 0x0400); | 2482 | tg3_writephy(tp, 0x13, 0x0400); |
2483 | udelay(40); | 2483 | udelay(40); |
2484 | tg3_writephy(tp, 0x13, 0x0000); | 2484 | tg3_writephy(tp, 0x13, 0x0000); |
2485 | 2485 | ||
2486 | tg3_writephy(tp, 0x11, 0x0a50); | 2486 | tg3_writephy(tp, 0x11, 0x0a50); |
2487 | udelay(40); | 2487 | udelay(40); |
2488 | tg3_writephy(tp, 0x11, 0x0a10); | 2488 | tg3_writephy(tp, 0x11, 0x0a10); |
2489 | 2489 | ||
2490 | /* Wait for signal to stabilize */ | 2490 | /* Wait for signal to stabilize */ |
2491 | /* XXX schedule_timeout() ... */ | 2491 | /* XXX schedule_timeout() ... */ |
2492 | for (i = 0; i < 15000; i++) | 2492 | for (i = 0; i < 15000; i++) |
2493 | udelay(10); | 2493 | udelay(10); |
2494 | 2494 | ||
2495 | /* Deselect the channel register so we can read the PHYID | 2495 | /* Deselect the channel register so we can read the PHYID |
2496 | * later. | 2496 | * later. |
2497 | */ | 2497 | */ |
2498 | tg3_writephy(tp, 0x10, 0x8011); | 2498 | tg3_writephy(tp, 0x10, 0x8011); |
2499 | } | 2499 | } |
2500 | 2500 | ||
2501 | static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) | 2501 | static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) |
2502 | { | 2502 | { |
2503 | u32 sg_dig_ctrl, sg_dig_status; | 2503 | u32 sg_dig_ctrl, sg_dig_status; |
2504 | u32 serdes_cfg, expected_sg_dig_ctrl; | 2504 | u32 serdes_cfg, expected_sg_dig_ctrl; |
2505 | int workaround, port_a; | 2505 | int workaround, port_a; |
2506 | int current_link_up; | 2506 | int current_link_up; |
2507 | 2507 | ||
2508 | serdes_cfg = 0; | 2508 | serdes_cfg = 0; |
2509 | expected_sg_dig_ctrl = 0; | 2509 | expected_sg_dig_ctrl = 0; |
2510 | workaround = 0; | 2510 | workaround = 0; |
2511 | port_a = 1; | 2511 | port_a = 1; |
2512 | current_link_up = 0; | 2512 | current_link_up = 0; |
2513 | 2513 | ||
2514 | if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && | 2514 | if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && |
2515 | tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { | 2515 | tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { |
2516 | workaround = 1; | 2516 | workaround = 1; |
2517 | if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) | 2517 | if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) |
2518 | port_a = 0; | 2518 | port_a = 0; |
2519 | 2519 | ||
2520 | /* preserve bits 0-11,13,14 for signal pre-emphasis */ | 2520 | /* preserve bits 0-11,13,14 for signal pre-emphasis */ |
2521 | /* preserve bits 20-23 for voltage regulator */ | 2521 | /* preserve bits 20-23 for voltage regulator */ |
2522 | serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; | 2522 | serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; |
2523 | } | 2523 | } |
2524 | 2524 | ||
2525 | sg_dig_ctrl = tr32(SG_DIG_CTRL); | 2525 | sg_dig_ctrl = tr32(SG_DIG_CTRL); |
2526 | 2526 | ||
2527 | if (tp->link_config.autoneg != AUTONEG_ENABLE) { | 2527 | if (tp->link_config.autoneg != AUTONEG_ENABLE) { |
2528 | if (sg_dig_ctrl & (1 << 31)) { | 2528 | if (sg_dig_ctrl & (1 << 31)) { |
2529 | if (workaround) { | 2529 | if (workaround) { |
2530 | u32 val = serdes_cfg; | 2530 | u32 val = serdes_cfg; |
2531 | 2531 | ||
2532 | if (port_a) | 2532 | if (port_a) |
2533 | val |= 0xc010000; | 2533 | val |= 0xc010000; |
2534 | else | 2534 | else |
2535 | val |= 0x4010000; | 2535 | val |= 0x4010000; |
2536 | tw32_f(MAC_SERDES_CFG, val); | 2536 | tw32_f(MAC_SERDES_CFG, val); |
2537 | } | 2537 | } |
2538 | tw32_f(SG_DIG_CTRL, 0x01388400); | 2538 | tw32_f(SG_DIG_CTRL, 0x01388400); |
2539 | } | 2539 | } |
2540 | if (mac_status & MAC_STATUS_PCS_SYNCED) { | 2540 | if (mac_status & MAC_STATUS_PCS_SYNCED) { |
2541 | tg3_setup_flow_control(tp, 0, 0); | 2541 | tg3_setup_flow_control(tp, 0, 0); |
2542 | current_link_up = 1; | 2542 | current_link_up = 1; |
2543 | } | 2543 | } |
2544 | goto out; | 2544 | goto out; |
2545 | } | 2545 | } |
2546 | 2546 | ||
2547 | /* Want auto-negotiation. */ | 2547 | /* Want auto-negotiation. */ |
2548 | expected_sg_dig_ctrl = 0x81388400; | 2548 | expected_sg_dig_ctrl = 0x81388400; |
2549 | 2549 | ||
2550 | /* Pause capability */ | 2550 | /* Pause capability */ |
2551 | expected_sg_dig_ctrl |= (1 << 11); | 2551 | expected_sg_dig_ctrl |= (1 << 11); |
2552 | 2552 | ||
2553 | /* Asymettric pause */ | 2553 | /* Asymettric pause */ |
2554 | expected_sg_dig_ctrl |= (1 << 12); | 2554 | expected_sg_dig_ctrl |= (1 << 12); |
2555 | 2555 | ||
2556 | if (sg_dig_ctrl != expected_sg_dig_ctrl) { | 2556 | if (sg_dig_ctrl != expected_sg_dig_ctrl) { |
2557 | if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && | 2557 | if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && |
2558 | tp->serdes_counter && | 2558 | tp->serdes_counter && |
2559 | ((mac_status & (MAC_STATUS_PCS_SYNCED | | 2559 | ((mac_status & (MAC_STATUS_PCS_SYNCED | |
2560 | MAC_STATUS_RCVD_CFG)) == | 2560 | MAC_STATUS_RCVD_CFG)) == |
2561 | MAC_STATUS_PCS_SYNCED)) { | 2561 | MAC_STATUS_PCS_SYNCED)) { |
2562 | tp->serdes_counter--; | 2562 | tp->serdes_counter--; |
2563 | current_link_up = 1; | 2563 | current_link_up = 1; |
2564 | goto out; | 2564 | goto out; |
2565 | } | 2565 | } |
2566 | restart_autoneg: | 2566 | restart_autoneg: |
2567 | if (workaround) | 2567 | if (workaround) |
2568 | tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); | 2568 | tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); |
2569 | tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30)); | 2569 | tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30)); |
2570 | udelay(5); | 2570 | udelay(5); |
2571 | tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); | 2571 | tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); |
2572 | 2572 | ||
2573 | tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; | 2573 | tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; |
2574 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 2574 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; |
2575 | } else if (mac_status & (MAC_STATUS_PCS_SYNCED | | 2575 | } else if (mac_status & (MAC_STATUS_PCS_SYNCED | |
2576 | MAC_STATUS_SIGNAL_DET)) { | 2576 | MAC_STATUS_SIGNAL_DET)) { |
2577 | sg_dig_status = tr32(SG_DIG_STATUS); | 2577 | sg_dig_status = tr32(SG_DIG_STATUS); |
2578 | mac_status = tr32(MAC_STATUS); | 2578 | mac_status = tr32(MAC_STATUS); |
2579 | 2579 | ||
2580 | if ((sg_dig_status & (1 << 1)) && | 2580 | if ((sg_dig_status & (1 << 1)) && |
2581 | (mac_status & MAC_STATUS_PCS_SYNCED)) { | 2581 | (mac_status & MAC_STATUS_PCS_SYNCED)) { |
2582 | u32 local_adv, remote_adv; | 2582 | u32 local_adv, remote_adv; |
2583 | 2583 | ||
2584 | local_adv = ADVERTISE_PAUSE_CAP; | 2584 | local_adv = ADVERTISE_PAUSE_CAP; |
2585 | remote_adv = 0; | 2585 | remote_adv = 0; |
2586 | if (sg_dig_status & (1 << 19)) | 2586 | if (sg_dig_status & (1 << 19)) |
2587 | remote_adv |= LPA_PAUSE_CAP; | 2587 | remote_adv |= LPA_PAUSE_CAP; |
2588 | if (sg_dig_status & (1 << 20)) | 2588 | if (sg_dig_status & (1 << 20)) |
2589 | remote_adv |= LPA_PAUSE_ASYM; | 2589 | remote_adv |= LPA_PAUSE_ASYM; |
2590 | 2590 | ||
2591 | tg3_setup_flow_control(tp, local_adv, remote_adv); | 2591 | tg3_setup_flow_control(tp, local_adv, remote_adv); |
2592 | current_link_up = 1; | 2592 | current_link_up = 1; |
2593 | tp->serdes_counter = 0; | 2593 | tp->serdes_counter = 0; |
2594 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 2594 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; |
2595 | } else if (!(sg_dig_status & (1 << 1))) { | 2595 | } else if (!(sg_dig_status & (1 << 1))) { |
2596 | if (tp->serdes_counter) | 2596 | if (tp->serdes_counter) |
2597 | tp->serdes_counter--; | 2597 | tp->serdes_counter--; |
2598 | else { | 2598 | else { |
2599 | if (workaround) { | 2599 | if (workaround) { |
2600 | u32 val = serdes_cfg; | 2600 | u32 val = serdes_cfg; |
2601 | 2601 | ||
2602 | if (port_a) | 2602 | if (port_a) |
2603 | val |= 0xc010000; | 2603 | val |= 0xc010000; |
2604 | else | 2604 | else |
2605 | val |= 0x4010000; | 2605 | val |= 0x4010000; |
2606 | 2606 | ||
2607 | tw32_f(MAC_SERDES_CFG, val); | 2607 | tw32_f(MAC_SERDES_CFG, val); |
2608 | } | 2608 | } |
2609 | 2609 | ||
2610 | tw32_f(SG_DIG_CTRL, 0x01388400); | 2610 | tw32_f(SG_DIG_CTRL, 0x01388400); |
2611 | udelay(40); | 2611 | udelay(40); |
2612 | 2612 | ||
2613 | /* Link parallel detection - link is up */ | 2613 | /* Link parallel detection - link is up */ |
2614 | /* only if we have PCS_SYNC and not */ | 2614 | /* only if we have PCS_SYNC and not */ |
2615 | /* receiving config code words */ | 2615 | /* receiving config code words */ |
2616 | mac_status = tr32(MAC_STATUS); | 2616 | mac_status = tr32(MAC_STATUS); |
2617 | if ((mac_status & MAC_STATUS_PCS_SYNCED) && | 2617 | if ((mac_status & MAC_STATUS_PCS_SYNCED) && |
2618 | !(mac_status & MAC_STATUS_RCVD_CFG)) { | 2618 | !(mac_status & MAC_STATUS_RCVD_CFG)) { |
2619 | tg3_setup_flow_control(tp, 0, 0); | 2619 | tg3_setup_flow_control(tp, 0, 0); |
2620 | current_link_up = 1; | 2620 | current_link_up = 1; |
2621 | tp->tg3_flags2 |= | 2621 | tp->tg3_flags2 |= |
2622 | TG3_FLG2_PARALLEL_DETECT; | 2622 | TG3_FLG2_PARALLEL_DETECT; |
2623 | tp->serdes_counter = | 2623 | tp->serdes_counter = |
2624 | SERDES_PARALLEL_DET_TIMEOUT; | 2624 | SERDES_PARALLEL_DET_TIMEOUT; |
2625 | } else | 2625 | } else |
2626 | goto restart_autoneg; | 2626 | goto restart_autoneg; |
2627 | } | 2627 | } |
2628 | } | 2628 | } |
2629 | } else { | 2629 | } else { |
2630 | tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; | 2630 | tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; |
2631 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 2631 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; |
2632 | } | 2632 | } |
2633 | 2633 | ||
2634 | out: | 2634 | out: |
2635 | return current_link_up; | 2635 | return current_link_up; |
2636 | } | 2636 | } |
2637 | 2637 | ||
2638 | static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) | 2638 | static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) |
2639 | { | 2639 | { |
2640 | int current_link_up = 0; | 2640 | int current_link_up = 0; |
2641 | 2641 | ||
2642 | if (!(mac_status & MAC_STATUS_PCS_SYNCED)) | 2642 | if (!(mac_status & MAC_STATUS_PCS_SYNCED)) |
2643 | goto out; | 2643 | goto out; |
2644 | 2644 | ||
2645 | if (tp->link_config.autoneg == AUTONEG_ENABLE) { | 2645 | if (tp->link_config.autoneg == AUTONEG_ENABLE) { |
2646 | u32 flags; | 2646 | u32 flags; |
2647 | int i; | 2647 | int i; |
2648 | 2648 | ||
2649 | if (fiber_autoneg(tp, &flags)) { | 2649 | if (fiber_autoneg(tp, &flags)) { |
2650 | u32 local_adv, remote_adv; | 2650 | u32 local_adv, remote_adv; |
2651 | 2651 | ||
2652 | local_adv = ADVERTISE_PAUSE_CAP; | 2652 | local_adv = ADVERTISE_PAUSE_CAP; |
2653 | remote_adv = 0; | 2653 | remote_adv = 0; |
2654 | if (flags & MR_LP_ADV_SYM_PAUSE) | 2654 | if (flags & MR_LP_ADV_SYM_PAUSE) |
2655 | remote_adv |= LPA_PAUSE_CAP; | 2655 | remote_adv |= LPA_PAUSE_CAP; |
2656 | if (flags & MR_LP_ADV_ASYM_PAUSE) | 2656 | if (flags & MR_LP_ADV_ASYM_PAUSE) |
2657 | remote_adv |= LPA_PAUSE_ASYM; | 2657 | remote_adv |= LPA_PAUSE_ASYM; |
2658 | 2658 | ||
2659 | tg3_setup_flow_control(tp, local_adv, remote_adv); | 2659 | tg3_setup_flow_control(tp, local_adv, remote_adv); |
2660 | 2660 | ||
2661 | current_link_up = 1; | 2661 | current_link_up = 1; |
2662 | } | 2662 | } |
2663 | for (i = 0; i < 30; i++) { | 2663 | for (i = 0; i < 30; i++) { |
2664 | udelay(20); | 2664 | udelay(20); |
2665 | tw32_f(MAC_STATUS, | 2665 | tw32_f(MAC_STATUS, |
2666 | (MAC_STATUS_SYNC_CHANGED | | 2666 | (MAC_STATUS_SYNC_CHANGED | |
2667 | MAC_STATUS_CFG_CHANGED)); | 2667 | MAC_STATUS_CFG_CHANGED)); |
2668 | udelay(40); | 2668 | udelay(40); |
2669 | if ((tr32(MAC_STATUS) & | 2669 | if ((tr32(MAC_STATUS) & |
2670 | (MAC_STATUS_SYNC_CHANGED | | 2670 | (MAC_STATUS_SYNC_CHANGED | |
2671 | MAC_STATUS_CFG_CHANGED)) == 0) | 2671 | MAC_STATUS_CFG_CHANGED)) == 0) |
2672 | break; | 2672 | break; |
2673 | } | 2673 | } |
2674 | 2674 | ||
2675 | mac_status = tr32(MAC_STATUS); | 2675 | mac_status = tr32(MAC_STATUS); |
2676 | if (current_link_up == 0 && | 2676 | if (current_link_up == 0 && |
2677 | (mac_status & MAC_STATUS_PCS_SYNCED) && | 2677 | (mac_status & MAC_STATUS_PCS_SYNCED) && |
2678 | !(mac_status & MAC_STATUS_RCVD_CFG)) | 2678 | !(mac_status & MAC_STATUS_RCVD_CFG)) |
2679 | current_link_up = 1; | 2679 | current_link_up = 1; |
2680 | } else { | 2680 | } else { |
2681 | /* Forcing 1000FD link up. */ | 2681 | /* Forcing 1000FD link up. */ |
2682 | current_link_up = 1; | 2682 | current_link_up = 1; |
2683 | 2683 | ||
2684 | tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); | 2684 | tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); |
2685 | udelay(40); | 2685 | udelay(40); |
2686 | 2686 | ||
2687 | tw32_f(MAC_MODE, tp->mac_mode); | 2687 | tw32_f(MAC_MODE, tp->mac_mode); |
2688 | udelay(40); | 2688 | udelay(40); |
2689 | } | 2689 | } |
2690 | 2690 | ||
2691 | out: | 2691 | out: |
2692 | return current_link_up; | 2692 | return current_link_up; |
2693 | } | 2693 | } |
2694 | 2694 | ||
2695 | static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) | 2695 | static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) |
2696 | { | 2696 | { |
2697 | u32 orig_pause_cfg; | 2697 | u32 orig_pause_cfg; |
2698 | u16 orig_active_speed; | 2698 | u16 orig_active_speed; |
2699 | u8 orig_active_duplex; | 2699 | u8 orig_active_duplex; |
2700 | u32 mac_status; | 2700 | u32 mac_status; |
2701 | int current_link_up; | 2701 | int current_link_up; |
2702 | int i; | 2702 | int i; |
2703 | 2703 | ||
2704 | orig_pause_cfg = | 2704 | orig_pause_cfg = |
2705 | (tp->tg3_flags & (TG3_FLAG_RX_PAUSE | | 2705 | (tp->tg3_flags & (TG3_FLAG_RX_PAUSE | |
2706 | TG3_FLAG_TX_PAUSE)); | 2706 | TG3_FLAG_TX_PAUSE)); |
2707 | orig_active_speed = tp->link_config.active_speed; | 2707 | orig_active_speed = tp->link_config.active_speed; |
2708 | orig_active_duplex = tp->link_config.active_duplex; | 2708 | orig_active_duplex = tp->link_config.active_duplex; |
2709 | 2709 | ||
2710 | if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) && | 2710 | if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) && |
2711 | netif_carrier_ok(tp->dev) && | 2711 | netif_carrier_ok(tp->dev) && |
2712 | (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) { | 2712 | (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) { |
2713 | mac_status = tr32(MAC_STATUS); | 2713 | mac_status = tr32(MAC_STATUS); |
2714 | mac_status &= (MAC_STATUS_PCS_SYNCED | | 2714 | mac_status &= (MAC_STATUS_PCS_SYNCED | |
2715 | MAC_STATUS_SIGNAL_DET | | 2715 | MAC_STATUS_SIGNAL_DET | |
2716 | MAC_STATUS_CFG_CHANGED | | 2716 | MAC_STATUS_CFG_CHANGED | |
2717 | MAC_STATUS_RCVD_CFG); | 2717 | MAC_STATUS_RCVD_CFG); |
2718 | if (mac_status == (MAC_STATUS_PCS_SYNCED | | 2718 | if (mac_status == (MAC_STATUS_PCS_SYNCED | |
2719 | MAC_STATUS_SIGNAL_DET)) { | 2719 | MAC_STATUS_SIGNAL_DET)) { |
2720 | tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | | 2720 | tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | |
2721 | MAC_STATUS_CFG_CHANGED)); | 2721 | MAC_STATUS_CFG_CHANGED)); |
2722 | return 0; | 2722 | return 0; |
2723 | } | 2723 | } |
2724 | } | 2724 | } |
2725 | 2725 | ||
2726 | tw32_f(MAC_TX_AUTO_NEG, 0); | 2726 | tw32_f(MAC_TX_AUTO_NEG, 0); |
2727 | 2727 | ||
2728 | tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); | 2728 | tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); |
2729 | tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; | 2729 | tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; |
2730 | tw32_f(MAC_MODE, tp->mac_mode); | 2730 | tw32_f(MAC_MODE, tp->mac_mode); |
2731 | udelay(40); | 2731 | udelay(40); |
2732 | 2732 | ||
2733 | if (tp->phy_id == PHY_ID_BCM8002) | 2733 | if (tp->phy_id == PHY_ID_BCM8002) |
2734 | tg3_init_bcm8002(tp); | 2734 | tg3_init_bcm8002(tp); |
2735 | 2735 | ||
2736 | /* Enable link change event even when serdes polling. */ | 2736 | /* Enable link change event even when serdes polling. */ |
2737 | tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); | 2737 | tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); |
2738 | udelay(40); | 2738 | udelay(40); |
2739 | 2739 | ||
2740 | current_link_up = 0; | 2740 | current_link_up = 0; |
2741 | mac_status = tr32(MAC_STATUS); | 2741 | mac_status = tr32(MAC_STATUS); |
2742 | 2742 | ||
2743 | if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) | 2743 | if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) |
2744 | current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); | 2744 | current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); |
2745 | else | 2745 | else |
2746 | current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); | 2746 | current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); |
2747 | 2747 | ||
2748 | tp->hw_status->status = | 2748 | tp->hw_status->status = |
2749 | (SD_STATUS_UPDATED | | 2749 | (SD_STATUS_UPDATED | |
2750 | (tp->hw_status->status & ~SD_STATUS_LINK_CHG)); | 2750 | (tp->hw_status->status & ~SD_STATUS_LINK_CHG)); |
2751 | 2751 | ||
2752 | for (i = 0; i < 100; i++) { | 2752 | for (i = 0; i < 100; i++) { |
2753 | tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | | 2753 | tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | |
2754 | MAC_STATUS_CFG_CHANGED)); | 2754 | MAC_STATUS_CFG_CHANGED)); |
2755 | udelay(5); | 2755 | udelay(5); |
2756 | if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | | 2756 | if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | |
2757 | MAC_STATUS_CFG_CHANGED | | 2757 | MAC_STATUS_CFG_CHANGED | |
2758 | MAC_STATUS_LNKSTATE_CHANGED)) == 0) | 2758 | MAC_STATUS_LNKSTATE_CHANGED)) == 0) |
2759 | break; | 2759 | break; |
2760 | } | 2760 | } |
2761 | 2761 | ||
2762 | mac_status = tr32(MAC_STATUS); | 2762 | mac_status = tr32(MAC_STATUS); |
2763 | if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { | 2763 | if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { |
2764 | current_link_up = 0; | 2764 | current_link_up = 0; |
2765 | if (tp->link_config.autoneg == AUTONEG_ENABLE && | 2765 | if (tp->link_config.autoneg == AUTONEG_ENABLE && |
2766 | tp->serdes_counter == 0) { | 2766 | tp->serdes_counter == 0) { |
2767 | tw32_f(MAC_MODE, (tp->mac_mode | | 2767 | tw32_f(MAC_MODE, (tp->mac_mode | |
2768 | MAC_MODE_SEND_CONFIGS)); | 2768 | MAC_MODE_SEND_CONFIGS)); |
2769 | udelay(1); | 2769 | udelay(1); |
2770 | tw32_f(MAC_MODE, tp->mac_mode); | 2770 | tw32_f(MAC_MODE, tp->mac_mode); |
2771 | } | 2771 | } |
2772 | } | 2772 | } |
2773 | 2773 | ||
2774 | if (current_link_up == 1) { | 2774 | if (current_link_up == 1) { |
2775 | tp->link_config.active_speed = SPEED_1000; | 2775 | tp->link_config.active_speed = SPEED_1000; |
2776 | tp->link_config.active_duplex = DUPLEX_FULL; | 2776 | tp->link_config.active_duplex = DUPLEX_FULL; |
2777 | tw32(MAC_LED_CTRL, (tp->led_ctrl | | 2777 | tw32(MAC_LED_CTRL, (tp->led_ctrl | |
2778 | LED_CTRL_LNKLED_OVERRIDE | | 2778 | LED_CTRL_LNKLED_OVERRIDE | |
2779 | LED_CTRL_1000MBPS_ON)); | 2779 | LED_CTRL_1000MBPS_ON)); |
2780 | } else { | 2780 | } else { |
2781 | tp->link_config.active_speed = SPEED_INVALID; | 2781 | tp->link_config.active_speed = SPEED_INVALID; |
2782 | tp->link_config.active_duplex = DUPLEX_INVALID; | 2782 | tp->link_config.active_duplex = DUPLEX_INVALID; |
2783 | tw32(MAC_LED_CTRL, (tp->led_ctrl | | 2783 | tw32(MAC_LED_CTRL, (tp->led_ctrl | |
2784 | LED_CTRL_LNKLED_OVERRIDE | | 2784 | LED_CTRL_LNKLED_OVERRIDE | |
2785 | LED_CTRL_TRAFFIC_OVERRIDE)); | 2785 | LED_CTRL_TRAFFIC_OVERRIDE)); |
2786 | } | 2786 | } |
2787 | 2787 | ||
2788 | if (current_link_up != netif_carrier_ok(tp->dev)) { | 2788 | if (current_link_up != netif_carrier_ok(tp->dev)) { |
2789 | if (current_link_up) | 2789 | if (current_link_up) |
2790 | netif_carrier_on(tp->dev); | 2790 | netif_carrier_on(tp->dev); |
2791 | else | 2791 | else |
2792 | netif_carrier_off(tp->dev); | 2792 | netif_carrier_off(tp->dev); |
2793 | tg3_link_report(tp); | 2793 | tg3_link_report(tp); |
2794 | } else { | 2794 | } else { |
2795 | u32 now_pause_cfg = | 2795 | u32 now_pause_cfg = |
2796 | tp->tg3_flags & (TG3_FLAG_RX_PAUSE | | 2796 | tp->tg3_flags & (TG3_FLAG_RX_PAUSE | |
2797 | TG3_FLAG_TX_PAUSE); | 2797 | TG3_FLAG_TX_PAUSE); |
2798 | if (orig_pause_cfg != now_pause_cfg || | 2798 | if (orig_pause_cfg != now_pause_cfg || |
2799 | orig_active_speed != tp->link_config.active_speed || | 2799 | orig_active_speed != tp->link_config.active_speed || |
2800 | orig_active_duplex != tp->link_config.active_duplex) | 2800 | orig_active_duplex != tp->link_config.active_duplex) |
2801 | tg3_link_report(tp); | 2801 | tg3_link_report(tp); |
2802 | } | 2802 | } |
2803 | 2803 | ||
2804 | return 0; | 2804 | return 0; |
2805 | } | 2805 | } |
2806 | 2806 | ||
2807 | static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) | 2807 | static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) |
2808 | { | 2808 | { |
2809 | int current_link_up, err = 0; | 2809 | int current_link_up, err = 0; |
2810 | u32 bmsr, bmcr; | 2810 | u32 bmsr, bmcr; |
2811 | u16 current_speed; | 2811 | u16 current_speed; |
2812 | u8 current_duplex; | 2812 | u8 current_duplex; |
2813 | 2813 | ||
2814 | tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; | 2814 | tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; |
2815 | tw32_f(MAC_MODE, tp->mac_mode); | 2815 | tw32_f(MAC_MODE, tp->mac_mode); |
2816 | udelay(40); | 2816 | udelay(40); |
2817 | 2817 | ||
2818 | tw32(MAC_EVENT, 0); | 2818 | tw32(MAC_EVENT, 0); |
2819 | 2819 | ||
2820 | tw32_f(MAC_STATUS, | 2820 | tw32_f(MAC_STATUS, |
2821 | (MAC_STATUS_SYNC_CHANGED | | 2821 | (MAC_STATUS_SYNC_CHANGED | |
2822 | MAC_STATUS_CFG_CHANGED | | 2822 | MAC_STATUS_CFG_CHANGED | |
2823 | MAC_STATUS_MI_COMPLETION | | 2823 | MAC_STATUS_MI_COMPLETION | |
2824 | MAC_STATUS_LNKSTATE_CHANGED)); | 2824 | MAC_STATUS_LNKSTATE_CHANGED)); |
2825 | udelay(40); | 2825 | udelay(40); |
2826 | 2826 | ||
2827 | if (force_reset) | 2827 | if (force_reset) |
2828 | tg3_phy_reset(tp); | 2828 | tg3_phy_reset(tp); |
2829 | 2829 | ||
2830 | current_link_up = 0; | 2830 | current_link_up = 0; |
2831 | current_speed = SPEED_INVALID; | 2831 | current_speed = SPEED_INVALID; |
2832 | current_duplex = DUPLEX_INVALID; | 2832 | current_duplex = DUPLEX_INVALID; |
2833 | 2833 | ||
2834 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 2834 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); |
2835 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 2835 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); |
2836 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { | 2836 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { |
2837 | if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) | 2837 | if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) |
2838 | bmsr |= BMSR_LSTATUS; | 2838 | bmsr |= BMSR_LSTATUS; |
2839 | else | 2839 | else |
2840 | bmsr &= ~BMSR_LSTATUS; | 2840 | bmsr &= ~BMSR_LSTATUS; |
2841 | } | 2841 | } |
2842 | 2842 | ||
2843 | err |= tg3_readphy(tp, MII_BMCR, &bmcr); | 2843 | err |= tg3_readphy(tp, MII_BMCR, &bmcr); |
2844 | 2844 | ||
2845 | if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && | 2845 | if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && |
2846 | (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { | 2846 | (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { |
2847 | /* do nothing, just check for link up at the end */ | 2847 | /* do nothing, just check for link up at the end */ |
2848 | } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { | 2848 | } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { |
2849 | u32 adv, new_adv; | 2849 | u32 adv, new_adv; |
2850 | 2850 | ||
2851 | err |= tg3_readphy(tp, MII_ADVERTISE, &adv); | 2851 | err |= tg3_readphy(tp, MII_ADVERTISE, &adv); |
2852 | new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | | 2852 | new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | |
2853 | ADVERTISE_1000XPAUSE | | 2853 | ADVERTISE_1000XPAUSE | |
2854 | ADVERTISE_1000XPSE_ASYM | | 2854 | ADVERTISE_1000XPSE_ASYM | |
2855 | ADVERTISE_SLCT); | 2855 | ADVERTISE_SLCT); |
2856 | 2856 | ||
2857 | /* Always advertise symmetric PAUSE just like copper */ | 2857 | /* Always advertise symmetric PAUSE just like copper */ |
2858 | new_adv |= ADVERTISE_1000XPAUSE; | 2858 | new_adv |= ADVERTISE_1000XPAUSE; |
2859 | 2859 | ||
2860 | if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) | 2860 | if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) |
2861 | new_adv |= ADVERTISE_1000XHALF; | 2861 | new_adv |= ADVERTISE_1000XHALF; |
2862 | if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) | 2862 | if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) |
2863 | new_adv |= ADVERTISE_1000XFULL; | 2863 | new_adv |= ADVERTISE_1000XFULL; |
2864 | 2864 | ||
2865 | if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { | 2865 | if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { |
2866 | tg3_writephy(tp, MII_ADVERTISE, new_adv); | 2866 | tg3_writephy(tp, MII_ADVERTISE, new_adv); |
2867 | bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; | 2867 | bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; |
2868 | tg3_writephy(tp, MII_BMCR, bmcr); | 2868 | tg3_writephy(tp, MII_BMCR, bmcr); |
2869 | 2869 | ||
2870 | tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); | 2870 | tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); |
2871 | tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; | 2871 | tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; |
2872 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 2872 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; |
2873 | 2873 | ||
2874 | return err; | 2874 | return err; |
2875 | } | 2875 | } |
2876 | } else { | 2876 | } else { |
2877 | u32 new_bmcr; | 2877 | u32 new_bmcr; |
2878 | 2878 | ||
2879 | bmcr &= ~BMCR_SPEED1000; | 2879 | bmcr &= ~BMCR_SPEED1000; |
2880 | new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); | 2880 | new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); |
2881 | 2881 | ||
2882 | if (tp->link_config.duplex == DUPLEX_FULL) | 2882 | if (tp->link_config.duplex == DUPLEX_FULL) |
2883 | new_bmcr |= BMCR_FULLDPLX; | 2883 | new_bmcr |= BMCR_FULLDPLX; |
2884 | 2884 | ||
2885 | if (new_bmcr != bmcr) { | 2885 | if (new_bmcr != bmcr) { |
2886 | /* BMCR_SPEED1000 is a reserved bit that needs | 2886 | /* BMCR_SPEED1000 is a reserved bit that needs |
2887 | * to be set on write. | 2887 | * to be set on write. |
2888 | */ | 2888 | */ |
2889 | new_bmcr |= BMCR_SPEED1000; | 2889 | new_bmcr |= BMCR_SPEED1000; |
2890 | 2890 | ||
2891 | /* Force a linkdown */ | 2891 | /* Force a linkdown */ |
2892 | if (netif_carrier_ok(tp->dev)) { | 2892 | if (netif_carrier_ok(tp->dev)) { |
2893 | u32 adv; | 2893 | u32 adv; |
2894 | 2894 | ||
2895 | err |= tg3_readphy(tp, MII_ADVERTISE, &adv); | 2895 | err |= tg3_readphy(tp, MII_ADVERTISE, &adv); |
2896 | adv &= ~(ADVERTISE_1000XFULL | | 2896 | adv &= ~(ADVERTISE_1000XFULL | |
2897 | ADVERTISE_1000XHALF | | 2897 | ADVERTISE_1000XHALF | |
2898 | ADVERTISE_SLCT); | 2898 | ADVERTISE_SLCT); |
2899 | tg3_writephy(tp, MII_ADVERTISE, adv); | 2899 | tg3_writephy(tp, MII_ADVERTISE, adv); |
2900 | tg3_writephy(tp, MII_BMCR, bmcr | | 2900 | tg3_writephy(tp, MII_BMCR, bmcr | |
2901 | BMCR_ANRESTART | | 2901 | BMCR_ANRESTART | |
2902 | BMCR_ANENABLE); | 2902 | BMCR_ANENABLE); |
2903 | udelay(10); | 2903 | udelay(10); |
2904 | netif_carrier_off(tp->dev); | 2904 | netif_carrier_off(tp->dev); |
2905 | } | 2905 | } |
2906 | tg3_writephy(tp, MII_BMCR, new_bmcr); | 2906 | tg3_writephy(tp, MII_BMCR, new_bmcr); |
2907 | bmcr = new_bmcr; | 2907 | bmcr = new_bmcr; |
2908 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 2908 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); |
2909 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 2909 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); |
2910 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 2910 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == |
2911 | ASIC_REV_5714) { | 2911 | ASIC_REV_5714) { |
2912 | if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) | 2912 | if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) |
2913 | bmsr |= BMSR_LSTATUS; | 2913 | bmsr |= BMSR_LSTATUS; |
2914 | else | 2914 | else |
2915 | bmsr &= ~BMSR_LSTATUS; | 2915 | bmsr &= ~BMSR_LSTATUS; |
2916 | } | 2916 | } |
2917 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 2917 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; |
2918 | } | 2918 | } |
2919 | } | 2919 | } |
2920 | 2920 | ||
2921 | if (bmsr & BMSR_LSTATUS) { | 2921 | if (bmsr & BMSR_LSTATUS) { |
2922 | current_speed = SPEED_1000; | 2922 | current_speed = SPEED_1000; |
2923 | current_link_up = 1; | 2923 | current_link_up = 1; |
2924 | if (bmcr & BMCR_FULLDPLX) | 2924 | if (bmcr & BMCR_FULLDPLX) |
2925 | current_duplex = DUPLEX_FULL; | 2925 | current_duplex = DUPLEX_FULL; |
2926 | else | 2926 | else |
2927 | current_duplex = DUPLEX_HALF; | 2927 | current_duplex = DUPLEX_HALF; |
2928 | 2928 | ||
2929 | if (bmcr & BMCR_ANENABLE) { | 2929 | if (bmcr & BMCR_ANENABLE) { |
2930 | u32 local_adv, remote_adv, common; | 2930 | u32 local_adv, remote_adv, common; |
2931 | 2931 | ||
2932 | err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); | 2932 | err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); |
2933 | err |= tg3_readphy(tp, MII_LPA, &remote_adv); | 2933 | err |= tg3_readphy(tp, MII_LPA, &remote_adv); |
2934 | common = local_adv & remote_adv; | 2934 | common = local_adv & remote_adv; |
2935 | if (common & (ADVERTISE_1000XHALF | | 2935 | if (common & (ADVERTISE_1000XHALF | |
2936 | ADVERTISE_1000XFULL)) { | 2936 | ADVERTISE_1000XFULL)) { |
2937 | if (common & ADVERTISE_1000XFULL) | 2937 | if (common & ADVERTISE_1000XFULL) |
2938 | current_duplex = DUPLEX_FULL; | 2938 | current_duplex = DUPLEX_FULL; |
2939 | else | 2939 | else |
2940 | current_duplex = DUPLEX_HALF; | 2940 | current_duplex = DUPLEX_HALF; |
2941 | 2941 | ||
2942 | tg3_setup_flow_control(tp, local_adv, | 2942 | tg3_setup_flow_control(tp, local_adv, |
2943 | remote_adv); | 2943 | remote_adv); |
2944 | } | 2944 | } |
2945 | else | 2945 | else |
2946 | current_link_up = 0; | 2946 | current_link_up = 0; |
2947 | } | 2947 | } |
2948 | } | 2948 | } |
2949 | 2949 | ||
2950 | tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; | 2950 | tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; |
2951 | if (tp->link_config.active_duplex == DUPLEX_HALF) | 2951 | if (tp->link_config.active_duplex == DUPLEX_HALF) |
2952 | tp->mac_mode |= MAC_MODE_HALF_DUPLEX; | 2952 | tp->mac_mode |= MAC_MODE_HALF_DUPLEX; |
2953 | 2953 | ||
2954 | tw32_f(MAC_MODE, tp->mac_mode); | 2954 | tw32_f(MAC_MODE, tp->mac_mode); |
2955 | udelay(40); | 2955 | udelay(40); |
2956 | 2956 | ||
2957 | tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); | 2957 | tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); |
2958 | 2958 | ||
2959 | tp->link_config.active_speed = current_speed; | 2959 | tp->link_config.active_speed = current_speed; |
2960 | tp->link_config.active_duplex = current_duplex; | 2960 | tp->link_config.active_duplex = current_duplex; |
2961 | 2961 | ||
2962 | if (current_link_up != netif_carrier_ok(tp->dev)) { | 2962 | if (current_link_up != netif_carrier_ok(tp->dev)) { |
2963 | if (current_link_up) | 2963 | if (current_link_up) |
2964 | netif_carrier_on(tp->dev); | 2964 | netif_carrier_on(tp->dev); |
2965 | else { | 2965 | else { |
2966 | netif_carrier_off(tp->dev); | 2966 | netif_carrier_off(tp->dev); |
2967 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 2967 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; |
2968 | } | 2968 | } |
2969 | tg3_link_report(tp); | 2969 | tg3_link_report(tp); |
2970 | } | 2970 | } |
2971 | return err; | 2971 | return err; |
2972 | } | 2972 | } |
2973 | 2973 | ||
2974 | static void tg3_serdes_parallel_detect(struct tg3 *tp) | 2974 | static void tg3_serdes_parallel_detect(struct tg3 *tp) |
2975 | { | 2975 | { |
2976 | if (tp->serdes_counter) { | 2976 | if (tp->serdes_counter) { |
2977 | /* Give autoneg time to complete. */ | 2977 | /* Give autoneg time to complete. */ |
2978 | tp->serdes_counter--; | 2978 | tp->serdes_counter--; |
2979 | return; | 2979 | return; |
2980 | } | 2980 | } |
2981 | if (!netif_carrier_ok(tp->dev) && | 2981 | if (!netif_carrier_ok(tp->dev) && |
2982 | (tp->link_config.autoneg == AUTONEG_ENABLE)) { | 2982 | (tp->link_config.autoneg == AUTONEG_ENABLE)) { |
2983 | u32 bmcr; | 2983 | u32 bmcr; |
2984 | 2984 | ||
2985 | tg3_readphy(tp, MII_BMCR, &bmcr); | 2985 | tg3_readphy(tp, MII_BMCR, &bmcr); |
2986 | if (bmcr & BMCR_ANENABLE) { | 2986 | if (bmcr & BMCR_ANENABLE) { |
2987 | u32 phy1, phy2; | 2987 | u32 phy1, phy2; |
2988 | 2988 | ||
2989 | /* Select shadow register 0x1f */ | 2989 | /* Select shadow register 0x1f */ |
2990 | tg3_writephy(tp, 0x1c, 0x7c00); | 2990 | tg3_writephy(tp, 0x1c, 0x7c00); |
2991 | tg3_readphy(tp, 0x1c, &phy1); | 2991 | tg3_readphy(tp, 0x1c, &phy1); |
2992 | 2992 | ||
2993 | /* Select expansion interrupt status register */ | 2993 | /* Select expansion interrupt status register */ |
2994 | tg3_writephy(tp, 0x17, 0x0f01); | 2994 | tg3_writephy(tp, 0x17, 0x0f01); |
2995 | tg3_readphy(tp, 0x15, &phy2); | 2995 | tg3_readphy(tp, 0x15, &phy2); |
2996 | tg3_readphy(tp, 0x15, &phy2); | 2996 | tg3_readphy(tp, 0x15, &phy2); |
2997 | 2997 | ||
2998 | if ((phy1 & 0x10) && !(phy2 & 0x20)) { | 2998 | if ((phy1 & 0x10) && !(phy2 & 0x20)) { |
2999 | /* We have signal detect and not receiving | 2999 | /* We have signal detect and not receiving |
3000 | * config code words, link is up by parallel | 3000 | * config code words, link is up by parallel |
3001 | * detection. | 3001 | * detection. |
3002 | */ | 3002 | */ |
3003 | 3003 | ||
3004 | bmcr &= ~BMCR_ANENABLE; | 3004 | bmcr &= ~BMCR_ANENABLE; |
3005 | bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; | 3005 | bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; |
3006 | tg3_writephy(tp, MII_BMCR, bmcr); | 3006 | tg3_writephy(tp, MII_BMCR, bmcr); |
3007 | tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; | 3007 | tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; |
3008 | } | 3008 | } |
3009 | } | 3009 | } |
3010 | } | 3010 | } |
3011 | else if (netif_carrier_ok(tp->dev) && | 3011 | else if (netif_carrier_ok(tp->dev) && |
3012 | (tp->link_config.autoneg == AUTONEG_ENABLE) && | 3012 | (tp->link_config.autoneg == AUTONEG_ENABLE) && |
3013 | (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { | 3013 | (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { |
3014 | u32 phy2; | 3014 | u32 phy2; |
3015 | 3015 | ||
3016 | /* Select expansion interrupt status register */ | 3016 | /* Select expansion interrupt status register */ |
3017 | tg3_writephy(tp, 0x17, 0x0f01); | 3017 | tg3_writephy(tp, 0x17, 0x0f01); |
3018 | tg3_readphy(tp, 0x15, &phy2); | 3018 | tg3_readphy(tp, 0x15, &phy2); |
3019 | if (phy2 & 0x20) { | 3019 | if (phy2 & 0x20) { |
3020 | u32 bmcr; | 3020 | u32 bmcr; |
3021 | 3021 | ||
3022 | /* Config code words received, turn on autoneg. */ | 3022 | /* Config code words received, turn on autoneg. */ |
3023 | tg3_readphy(tp, MII_BMCR, &bmcr); | 3023 | tg3_readphy(tp, MII_BMCR, &bmcr); |
3024 | tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); | 3024 | tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); |
3025 | 3025 | ||
3026 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 3026 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; |
3027 | 3027 | ||
3028 | } | 3028 | } |
3029 | } | 3029 | } |
3030 | } | 3030 | } |
3031 | 3031 | ||
3032 | static int tg3_setup_phy(struct tg3 *tp, int force_reset) | 3032 | static int tg3_setup_phy(struct tg3 *tp, int force_reset) |
3033 | { | 3033 | { |
3034 | int err; | 3034 | int err; |
3035 | 3035 | ||
3036 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 3036 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { |
3037 | err = tg3_setup_fiber_phy(tp, force_reset); | 3037 | err = tg3_setup_fiber_phy(tp, force_reset); |
3038 | } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { | 3038 | } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { |
3039 | err = tg3_setup_fiber_mii_phy(tp, force_reset); | 3039 | err = tg3_setup_fiber_mii_phy(tp, force_reset); |
3040 | } else { | 3040 | } else { |
3041 | err = tg3_setup_copper_phy(tp, force_reset); | 3041 | err = tg3_setup_copper_phy(tp, force_reset); |
3042 | } | 3042 | } |
3043 | 3043 | ||
3044 | if (tp->link_config.active_speed == SPEED_1000 && | 3044 | if (tp->link_config.active_speed == SPEED_1000 && |
3045 | tp->link_config.active_duplex == DUPLEX_HALF) | 3045 | tp->link_config.active_duplex == DUPLEX_HALF) |
3046 | tw32(MAC_TX_LENGTHS, | 3046 | tw32(MAC_TX_LENGTHS, |
3047 | ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 3047 | ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | |
3048 | (6 << TX_LENGTHS_IPG_SHIFT) | | 3048 | (6 << TX_LENGTHS_IPG_SHIFT) | |
3049 | (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); | 3049 | (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); |
3050 | else | 3050 | else |
3051 | tw32(MAC_TX_LENGTHS, | 3051 | tw32(MAC_TX_LENGTHS, |
3052 | ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 3052 | ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | |
3053 | (6 << TX_LENGTHS_IPG_SHIFT) | | 3053 | (6 << TX_LENGTHS_IPG_SHIFT) | |
3054 | (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); | 3054 | (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); |
3055 | 3055 | ||
3056 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 3056 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
3057 | if (netif_carrier_ok(tp->dev)) { | 3057 | if (netif_carrier_ok(tp->dev)) { |
3058 | tw32(HOSTCC_STAT_COAL_TICKS, | 3058 | tw32(HOSTCC_STAT_COAL_TICKS, |
3059 | tp->coal.stats_block_coalesce_usecs); | 3059 | tp->coal.stats_block_coalesce_usecs); |
3060 | } else { | 3060 | } else { |
3061 | tw32(HOSTCC_STAT_COAL_TICKS, 0); | 3061 | tw32(HOSTCC_STAT_COAL_TICKS, 0); |
3062 | } | 3062 | } |
3063 | } | 3063 | } |
3064 | 3064 | ||
3065 | if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) { | 3065 | if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) { |
3066 | u32 val = tr32(PCIE_PWR_MGMT_THRESH); | 3066 | u32 val = tr32(PCIE_PWR_MGMT_THRESH); |
3067 | if (!netif_carrier_ok(tp->dev)) | 3067 | if (!netif_carrier_ok(tp->dev)) |
3068 | val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | | 3068 | val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | |
3069 | tp->pwrmgmt_thresh; | 3069 | tp->pwrmgmt_thresh; |
3070 | else | 3070 | else |
3071 | val |= PCIE_PWR_MGMT_L1_THRESH_MSK; | 3071 | val |= PCIE_PWR_MGMT_L1_THRESH_MSK; |
3072 | tw32(PCIE_PWR_MGMT_THRESH, val); | 3072 | tw32(PCIE_PWR_MGMT_THRESH, val); |
3073 | } | 3073 | } |
3074 | 3074 | ||
3075 | return err; | 3075 | return err; |
3076 | } | 3076 | } |
3077 | 3077 | ||
3078 | /* This is called whenever we suspect that the system chipset is re- | 3078 | /* This is called whenever we suspect that the system chipset is re- |
3079 | * ordering the sequence of MMIO to the tx send mailbox. The symptom | 3079 | * ordering the sequence of MMIO to the tx send mailbox. The symptom |
3080 | * is bogus tx completions. We try to recover by setting the | 3080 | * is bogus tx completions. We try to recover by setting the |
3081 | * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later | 3081 | * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later |
3082 | * in the workqueue. | 3082 | * in the workqueue. |
3083 | */ | 3083 | */ |
3084 | static void tg3_tx_recover(struct tg3 *tp) | 3084 | static void tg3_tx_recover(struct tg3 *tp) |
3085 | { | 3085 | { |
3086 | BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || | 3086 | BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || |
3087 | tp->write32_tx_mbox == tg3_write_indirect_mbox); | 3087 | tp->write32_tx_mbox == tg3_write_indirect_mbox); |
3088 | 3088 | ||
3089 | printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-" | 3089 | printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-" |
3090 | "mapped I/O cycles to the network device, attempting to " | 3090 | "mapped I/O cycles to the network device, attempting to " |
3091 | "recover. Please report the problem to the driver maintainer " | 3091 | "recover. Please report the problem to the driver maintainer " |
3092 | "and include system chipset information.\n", tp->dev->name); | 3092 | "and include system chipset information.\n", tp->dev->name); |
3093 | 3093 | ||
3094 | spin_lock(&tp->lock); | 3094 | spin_lock(&tp->lock); |
3095 | tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; | 3095 | tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; |
3096 | spin_unlock(&tp->lock); | 3096 | spin_unlock(&tp->lock); |
3097 | } | 3097 | } |
3098 | 3098 | ||
3099 | static inline u32 tg3_tx_avail(struct tg3 *tp) | 3099 | static inline u32 tg3_tx_avail(struct tg3 *tp) |
3100 | { | 3100 | { |
3101 | smp_mb(); | 3101 | smp_mb(); |
3102 | return (tp->tx_pending - | 3102 | return (tp->tx_pending - |
3103 | ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); | 3103 | ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); |
3104 | } | 3104 | } |
3105 | 3105 | ||
3106 | /* Tigon3 never reports partial packet sends. So we do not | 3106 | /* Tigon3 never reports partial packet sends. So we do not |
3107 | * need special logic to handle SKBs that have not had all | 3107 | * need special logic to handle SKBs that have not had all |
3108 | * of their frags sent yet, like SunGEM does. | 3108 | * of their frags sent yet, like SunGEM does. |
3109 | */ | 3109 | */ |
3110 | static void tg3_tx(struct tg3 *tp) | 3110 | static void tg3_tx(struct tg3 *tp) |
3111 | { | 3111 | { |
3112 | u32 hw_idx = tp->hw_status->idx[0].tx_consumer; | 3112 | u32 hw_idx = tp->hw_status->idx[0].tx_consumer; |
3113 | u32 sw_idx = tp->tx_cons; | 3113 | u32 sw_idx = tp->tx_cons; |
3114 | 3114 | ||
3115 | while (sw_idx != hw_idx) { | 3115 | while (sw_idx != hw_idx) { |
3116 | struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; | 3116 | struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; |
3117 | struct sk_buff *skb = ri->skb; | 3117 | struct sk_buff *skb = ri->skb; |
3118 | int i, tx_bug = 0; | 3118 | int i, tx_bug = 0; |
3119 | 3119 | ||
3120 | if (unlikely(skb == NULL)) { | 3120 | if (unlikely(skb == NULL)) { |
3121 | tg3_tx_recover(tp); | 3121 | tg3_tx_recover(tp); |
3122 | return; | 3122 | return; |
3123 | } | 3123 | } |
3124 | 3124 | ||
3125 | pci_unmap_single(tp->pdev, | 3125 | pci_unmap_single(tp->pdev, |
3126 | pci_unmap_addr(ri, mapping), | 3126 | pci_unmap_addr(ri, mapping), |
3127 | skb_headlen(skb), | 3127 | skb_headlen(skb), |
3128 | PCI_DMA_TODEVICE); | 3128 | PCI_DMA_TODEVICE); |
3129 | 3129 | ||
3130 | ri->skb = NULL; | 3130 | ri->skb = NULL; |
3131 | 3131 | ||
3132 | sw_idx = NEXT_TX(sw_idx); | 3132 | sw_idx = NEXT_TX(sw_idx); |
3133 | 3133 | ||
3134 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 3134 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
3135 | ri = &tp->tx_buffers[sw_idx]; | 3135 | ri = &tp->tx_buffers[sw_idx]; |
3136 | if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) | 3136 | if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) |
3137 | tx_bug = 1; | 3137 | tx_bug = 1; |
3138 | 3138 | ||
3139 | pci_unmap_page(tp->pdev, | 3139 | pci_unmap_page(tp->pdev, |
3140 | pci_unmap_addr(ri, mapping), | 3140 | pci_unmap_addr(ri, mapping), |
3141 | skb_shinfo(skb)->frags[i].size, | 3141 | skb_shinfo(skb)->frags[i].size, |
3142 | PCI_DMA_TODEVICE); | 3142 | PCI_DMA_TODEVICE); |
3143 | 3143 | ||
3144 | sw_idx = NEXT_TX(sw_idx); | 3144 | sw_idx = NEXT_TX(sw_idx); |
3145 | } | 3145 | } |
3146 | 3146 | ||
3147 | dev_kfree_skb(skb); | 3147 | dev_kfree_skb(skb); |
3148 | 3148 | ||
3149 | if (unlikely(tx_bug)) { | 3149 | if (unlikely(tx_bug)) { |
3150 | tg3_tx_recover(tp); | 3150 | tg3_tx_recover(tp); |
3151 | return; | 3151 | return; |
3152 | } | 3152 | } |
3153 | } | 3153 | } |
3154 | 3154 | ||
3155 | tp->tx_cons = sw_idx; | 3155 | tp->tx_cons = sw_idx; |
3156 | 3156 | ||
3157 | /* Need to make the tx_cons update visible to tg3_start_xmit() | 3157 | /* Need to make the tx_cons update visible to tg3_start_xmit() |
3158 | * before checking for netif_queue_stopped(). Without the | 3158 | * before checking for netif_queue_stopped(). Without the |
3159 | * memory barrier, there is a small possibility that tg3_start_xmit() | 3159 | * memory barrier, there is a small possibility that tg3_start_xmit() |
3160 | * will miss it and cause the queue to be stopped forever. | 3160 | * will miss it and cause the queue to be stopped forever. |
3161 | */ | 3161 | */ |
3162 | smp_mb(); | 3162 | smp_mb(); |
3163 | 3163 | ||
3164 | if (unlikely(netif_queue_stopped(tp->dev) && | 3164 | if (unlikely(netif_queue_stopped(tp->dev) && |
3165 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) { | 3165 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) { |
3166 | netif_tx_lock(tp->dev); | 3166 | netif_tx_lock(tp->dev); |
3167 | if (netif_queue_stopped(tp->dev) && | 3167 | if (netif_queue_stopped(tp->dev) && |
3168 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))) | 3168 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))) |
3169 | netif_wake_queue(tp->dev); | 3169 | netif_wake_queue(tp->dev); |
3170 | netif_tx_unlock(tp->dev); | 3170 | netif_tx_unlock(tp->dev); |
3171 | } | 3171 | } |
3172 | } | 3172 | } |
3173 | 3173 | ||
3174 | /* Returns size of skb allocated or < 0 on error. | 3174 | /* Returns size of skb allocated or < 0 on error. |
3175 | * | 3175 | * |
3176 | * We only need to fill in the address because the other members | 3176 | * We only need to fill in the address because the other members |
3177 | * of the RX descriptor are invariant, see tg3_init_rings. | 3177 | * of the RX descriptor are invariant, see tg3_init_rings. |
3178 | * | 3178 | * |
3179 | * Note the purposeful assymetry of cpu vs. chip accesses. For | 3179 | * Note the purposeful assymetry of cpu vs. chip accesses. For |
3180 | * posting buffers we only dirty the first cache line of the RX | 3180 | * posting buffers we only dirty the first cache line of the RX |
3181 | * descriptor (containing the address). Whereas for the RX status | 3181 | * descriptor (containing the address). Whereas for the RX status |
3182 | * buffers the cpu only reads the last cacheline of the RX descriptor | 3182 | * buffers the cpu only reads the last cacheline of the RX descriptor |
3183 | * (to fetch the error flags, vlan tag, checksum, and opaque cookie). | 3183 | * (to fetch the error flags, vlan tag, checksum, and opaque cookie). |
3184 | */ | 3184 | */ |
3185 | static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, | 3185 | static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, |
3186 | int src_idx, u32 dest_idx_unmasked) | 3186 | int src_idx, u32 dest_idx_unmasked) |
3187 | { | 3187 | { |
3188 | struct tg3_rx_buffer_desc *desc; | 3188 | struct tg3_rx_buffer_desc *desc; |
3189 | struct ring_info *map, *src_map; | 3189 | struct ring_info *map, *src_map; |
3190 | struct sk_buff *skb; | 3190 | struct sk_buff *skb; |
3191 | dma_addr_t mapping; | 3191 | dma_addr_t mapping; |
3192 | int skb_size, dest_idx; | 3192 | int skb_size, dest_idx; |
3193 | 3193 | ||
3194 | src_map = NULL; | 3194 | src_map = NULL; |
3195 | switch (opaque_key) { | 3195 | switch (opaque_key) { |
3196 | case RXD_OPAQUE_RING_STD: | 3196 | case RXD_OPAQUE_RING_STD: |
3197 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 3197 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; |
3198 | desc = &tp->rx_std[dest_idx]; | 3198 | desc = &tp->rx_std[dest_idx]; |
3199 | map = &tp->rx_std_buffers[dest_idx]; | 3199 | map = &tp->rx_std_buffers[dest_idx]; |
3200 | if (src_idx >= 0) | 3200 | if (src_idx >= 0) |
3201 | src_map = &tp->rx_std_buffers[src_idx]; | 3201 | src_map = &tp->rx_std_buffers[src_idx]; |
3202 | skb_size = tp->rx_pkt_buf_sz; | 3202 | skb_size = tp->rx_pkt_buf_sz; |
3203 | break; | 3203 | break; |
3204 | 3204 | ||
3205 | case RXD_OPAQUE_RING_JUMBO: | 3205 | case RXD_OPAQUE_RING_JUMBO: |
3206 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 3206 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; |
3207 | desc = &tp->rx_jumbo[dest_idx]; | 3207 | desc = &tp->rx_jumbo[dest_idx]; |
3208 | map = &tp->rx_jumbo_buffers[dest_idx]; | 3208 | map = &tp->rx_jumbo_buffers[dest_idx]; |
3209 | if (src_idx >= 0) | 3209 | if (src_idx >= 0) |
3210 | src_map = &tp->rx_jumbo_buffers[src_idx]; | 3210 | src_map = &tp->rx_jumbo_buffers[src_idx]; |
3211 | skb_size = RX_JUMBO_PKT_BUF_SZ; | 3211 | skb_size = RX_JUMBO_PKT_BUF_SZ; |
3212 | break; | 3212 | break; |
3213 | 3213 | ||
3214 | default: | 3214 | default: |
3215 | return -EINVAL; | 3215 | return -EINVAL; |
3216 | }; | 3216 | }; |
3217 | 3217 | ||
3218 | /* Do not overwrite any of the map or rp information | 3218 | /* Do not overwrite any of the map or rp information |
3219 | * until we are sure we can commit to a new buffer. | 3219 | * until we are sure we can commit to a new buffer. |
3220 | * | 3220 | * |
3221 | * Callers depend upon this behavior and assume that | 3221 | * Callers depend upon this behavior and assume that |
3222 | * we leave everything unchanged if we fail. | 3222 | * we leave everything unchanged if we fail. |
3223 | */ | 3223 | */ |
3224 | skb = netdev_alloc_skb(tp->dev, skb_size); | 3224 | skb = netdev_alloc_skb(tp->dev, skb_size); |
3225 | if (skb == NULL) | 3225 | if (skb == NULL) |
3226 | return -ENOMEM; | 3226 | return -ENOMEM; |
3227 | 3227 | ||
3228 | skb_reserve(skb, tp->rx_offset); | 3228 | skb_reserve(skb, tp->rx_offset); |
3229 | 3229 | ||
3230 | mapping = pci_map_single(tp->pdev, skb->data, | 3230 | mapping = pci_map_single(tp->pdev, skb->data, |
3231 | skb_size - tp->rx_offset, | 3231 | skb_size - tp->rx_offset, |
3232 | PCI_DMA_FROMDEVICE); | 3232 | PCI_DMA_FROMDEVICE); |
3233 | 3233 | ||
3234 | map->skb = skb; | 3234 | map->skb = skb; |
3235 | pci_unmap_addr_set(map, mapping, mapping); | 3235 | pci_unmap_addr_set(map, mapping, mapping); |
3236 | 3236 | ||
3237 | if (src_map != NULL) | 3237 | if (src_map != NULL) |
3238 | src_map->skb = NULL; | 3238 | src_map->skb = NULL; |
3239 | 3239 | ||
3240 | desc->addr_hi = ((u64)mapping >> 32); | 3240 | desc->addr_hi = ((u64)mapping >> 32); |
3241 | desc->addr_lo = ((u64)mapping & 0xffffffff); | 3241 | desc->addr_lo = ((u64)mapping & 0xffffffff); |
3242 | 3242 | ||
3243 | return skb_size; | 3243 | return skb_size; |
3244 | } | 3244 | } |
3245 | 3245 | ||
3246 | /* We only need to move over in the address because the other | 3246 | /* We only need to move over in the address because the other |
3247 | * members of the RX descriptor are invariant. See notes above | 3247 | * members of the RX descriptor are invariant. See notes above |
3248 | * tg3_alloc_rx_skb for full details. | 3248 | * tg3_alloc_rx_skb for full details. |
3249 | */ | 3249 | */ |
3250 | static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, | 3250 | static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, |
3251 | int src_idx, u32 dest_idx_unmasked) | 3251 | int src_idx, u32 dest_idx_unmasked) |
3252 | { | 3252 | { |
3253 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; | 3253 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; |
3254 | struct ring_info *src_map, *dest_map; | 3254 | struct ring_info *src_map, *dest_map; |
3255 | int dest_idx; | 3255 | int dest_idx; |
3256 | 3256 | ||
3257 | switch (opaque_key) { | 3257 | switch (opaque_key) { |
3258 | case RXD_OPAQUE_RING_STD: | 3258 | case RXD_OPAQUE_RING_STD: |
3259 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 3259 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; |
3260 | dest_desc = &tp->rx_std[dest_idx]; | 3260 | dest_desc = &tp->rx_std[dest_idx]; |
3261 | dest_map = &tp->rx_std_buffers[dest_idx]; | 3261 | dest_map = &tp->rx_std_buffers[dest_idx]; |
3262 | src_desc = &tp->rx_std[src_idx]; | 3262 | src_desc = &tp->rx_std[src_idx]; |
3263 | src_map = &tp->rx_std_buffers[src_idx]; | 3263 | src_map = &tp->rx_std_buffers[src_idx]; |
3264 | break; | 3264 | break; |
3265 | 3265 | ||
3266 | case RXD_OPAQUE_RING_JUMBO: | 3266 | case RXD_OPAQUE_RING_JUMBO: |
3267 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 3267 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; |
3268 | dest_desc = &tp->rx_jumbo[dest_idx]; | 3268 | dest_desc = &tp->rx_jumbo[dest_idx]; |
3269 | dest_map = &tp->rx_jumbo_buffers[dest_idx]; | 3269 | dest_map = &tp->rx_jumbo_buffers[dest_idx]; |
3270 | src_desc = &tp->rx_jumbo[src_idx]; | 3270 | src_desc = &tp->rx_jumbo[src_idx]; |
3271 | src_map = &tp->rx_jumbo_buffers[src_idx]; | 3271 | src_map = &tp->rx_jumbo_buffers[src_idx]; |
3272 | break; | 3272 | break; |
3273 | 3273 | ||
3274 | default: | 3274 | default: |
3275 | return; | 3275 | return; |
3276 | }; | 3276 | }; |
3277 | 3277 | ||
3278 | dest_map->skb = src_map->skb; | 3278 | dest_map->skb = src_map->skb; |
3279 | pci_unmap_addr_set(dest_map, mapping, | 3279 | pci_unmap_addr_set(dest_map, mapping, |
3280 | pci_unmap_addr(src_map, mapping)); | 3280 | pci_unmap_addr(src_map, mapping)); |
3281 | dest_desc->addr_hi = src_desc->addr_hi; | 3281 | dest_desc->addr_hi = src_desc->addr_hi; |
3282 | dest_desc->addr_lo = src_desc->addr_lo; | 3282 | dest_desc->addr_lo = src_desc->addr_lo; |
3283 | 3283 | ||
3284 | src_map->skb = NULL; | 3284 | src_map->skb = NULL; |
3285 | } | 3285 | } |
3286 | 3286 | ||
3287 | #if TG3_VLAN_TAG_USED | 3287 | #if TG3_VLAN_TAG_USED |
3288 | static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) | 3288 | static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) |
3289 | { | 3289 | { |
3290 | return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag); | 3290 | return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag); |
3291 | } | 3291 | } |
3292 | #endif | 3292 | #endif |
3293 | 3293 | ||
3294 | /* The RX ring scheme is composed of multiple rings which post fresh | 3294 | /* The RX ring scheme is composed of multiple rings which post fresh |
3295 | * buffers to the chip, and one special ring the chip uses to report | 3295 | * buffers to the chip, and one special ring the chip uses to report |
3296 | * status back to the host. | 3296 | * status back to the host. |
3297 | * | 3297 | * |
3298 | * The special ring reports the status of received packets to the | 3298 | * The special ring reports the status of received packets to the |
3299 | * host. The chip does not write into the original descriptor the | 3299 | * host. The chip does not write into the original descriptor the |
3300 | * RX buffer was obtained from. The chip simply takes the original | 3300 | * RX buffer was obtained from. The chip simply takes the original |
3301 | * descriptor as provided by the host, updates the status and length | 3301 | * descriptor as provided by the host, updates the status and length |
3302 | * field, then writes this into the next status ring entry. | 3302 | * field, then writes this into the next status ring entry. |
3303 | * | 3303 | * |
3304 | * Each ring the host uses to post buffers to the chip is described | 3304 | * Each ring the host uses to post buffers to the chip is described |
3305 | * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, | 3305 | * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, |
3306 | * it is first placed into the on-chip ram. When the packet's length | 3306 | * it is first placed into the on-chip ram. When the packet's length |
3307 | * is known, it walks down the TG3_BDINFO entries to select the ring. | 3307 | * is known, it walks down the TG3_BDINFO entries to select the ring. |
3308 | * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO | 3308 | * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO |
3309 | * which is within the range of the new packet's length is chosen. | 3309 | * which is within the range of the new packet's length is chosen. |
3310 | * | 3310 | * |
3311 | * The "separate ring for rx status" scheme may sound queer, but it makes | 3311 | * The "separate ring for rx status" scheme may sound queer, but it makes |
3312 | * sense from a cache coherency perspective. If only the host writes | 3312 | * sense from a cache coherency perspective. If only the host writes |
3313 | * to the buffer post rings, and only the chip writes to the rx status | 3313 | * to the buffer post rings, and only the chip writes to the rx status |
3314 | * rings, then cache lines never move beyond shared-modified state. | 3314 | * rings, then cache lines never move beyond shared-modified state. |
3315 | * If both the host and chip were to write into the same ring, cache line | 3315 | * If both the host and chip were to write into the same ring, cache line |
3316 | * eviction could occur since both entities want it in an exclusive state. | 3316 | * eviction could occur since both entities want it in an exclusive state. |
3317 | */ | 3317 | */ |
3318 | static int tg3_rx(struct tg3 *tp, int budget) | 3318 | static int tg3_rx(struct tg3 *tp, int budget) |
3319 | { | 3319 | { |
3320 | u32 work_mask, rx_std_posted = 0; | 3320 | u32 work_mask, rx_std_posted = 0; |
3321 | u32 sw_idx = tp->rx_rcb_ptr; | 3321 | u32 sw_idx = tp->rx_rcb_ptr; |
3322 | u16 hw_idx; | 3322 | u16 hw_idx; |
3323 | int received; | 3323 | int received; |
3324 | 3324 | ||
3325 | hw_idx = tp->hw_status->idx[0].rx_producer; | 3325 | hw_idx = tp->hw_status->idx[0].rx_producer; |
3326 | /* | 3326 | /* |
3327 | * We need to order the read of hw_idx and the read of | 3327 | * We need to order the read of hw_idx and the read of |
3328 | * the opaque cookie. | 3328 | * the opaque cookie. |
3329 | */ | 3329 | */ |
3330 | rmb(); | 3330 | rmb(); |
3331 | work_mask = 0; | 3331 | work_mask = 0; |
3332 | received = 0; | 3332 | received = 0; |
3333 | while (sw_idx != hw_idx && budget > 0) { | 3333 | while (sw_idx != hw_idx && budget > 0) { |
3334 | struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx]; | 3334 | struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx]; |
3335 | unsigned int len; | 3335 | unsigned int len; |
3336 | struct sk_buff *skb; | 3336 | struct sk_buff *skb; |
3337 | dma_addr_t dma_addr; | 3337 | dma_addr_t dma_addr; |
3338 | u32 opaque_key, desc_idx, *post_ptr; | 3338 | u32 opaque_key, desc_idx, *post_ptr; |
3339 | 3339 | ||
3340 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; | 3340 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; |
3341 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; | 3341 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; |
3342 | if (opaque_key == RXD_OPAQUE_RING_STD) { | 3342 | if (opaque_key == RXD_OPAQUE_RING_STD) { |
3343 | dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], | 3343 | dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], |
3344 | mapping); | 3344 | mapping); |
3345 | skb = tp->rx_std_buffers[desc_idx].skb; | 3345 | skb = tp->rx_std_buffers[desc_idx].skb; |
3346 | post_ptr = &tp->rx_std_ptr; | 3346 | post_ptr = &tp->rx_std_ptr; |
3347 | rx_std_posted++; | 3347 | rx_std_posted++; |
3348 | } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { | 3348 | } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { |
3349 | dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], | 3349 | dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], |
3350 | mapping); | 3350 | mapping); |
3351 | skb = tp->rx_jumbo_buffers[desc_idx].skb; | 3351 | skb = tp->rx_jumbo_buffers[desc_idx].skb; |
3352 | post_ptr = &tp->rx_jumbo_ptr; | 3352 | post_ptr = &tp->rx_jumbo_ptr; |
3353 | } | 3353 | } |
3354 | else { | 3354 | else { |
3355 | goto next_pkt_nopost; | 3355 | goto next_pkt_nopost; |
3356 | } | 3356 | } |
3357 | 3357 | ||
3358 | work_mask |= opaque_key; | 3358 | work_mask |= opaque_key; |
3359 | 3359 | ||
3360 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 3360 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && |
3361 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { | 3361 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { |
3362 | drop_it: | 3362 | drop_it: |
3363 | tg3_recycle_rx(tp, opaque_key, | 3363 | tg3_recycle_rx(tp, opaque_key, |
3364 | desc_idx, *post_ptr); | 3364 | desc_idx, *post_ptr); |
3365 | drop_it_no_recycle: | 3365 | drop_it_no_recycle: |
3366 | /* Other statistics kept track of by card. */ | 3366 | /* Other statistics kept track of by card. */ |
3367 | tp->net_stats.rx_dropped++; | 3367 | tp->net_stats.rx_dropped++; |
3368 | goto next_pkt; | 3368 | goto next_pkt; |
3369 | } | 3369 | } |
3370 | 3370 | ||
3371 | len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */ | 3371 | len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */ |
3372 | 3372 | ||
3373 | if (len > RX_COPY_THRESHOLD | 3373 | if (len > RX_COPY_THRESHOLD |
3374 | && tp->rx_offset == 2 | 3374 | && tp->rx_offset == 2 |
3375 | /* rx_offset != 2 iff this is a 5701 card running | 3375 | /* rx_offset != 2 iff this is a 5701 card running |
3376 | * in PCI-X mode [see tg3_get_invariants()] */ | 3376 | * in PCI-X mode [see tg3_get_invariants()] */ |
3377 | ) { | 3377 | ) { |
3378 | int skb_size; | 3378 | int skb_size; |
3379 | 3379 | ||
3380 | skb_size = tg3_alloc_rx_skb(tp, opaque_key, | 3380 | skb_size = tg3_alloc_rx_skb(tp, opaque_key, |
3381 | desc_idx, *post_ptr); | 3381 | desc_idx, *post_ptr); |
3382 | if (skb_size < 0) | 3382 | if (skb_size < 0) |
3383 | goto drop_it; | 3383 | goto drop_it; |
3384 | 3384 | ||
3385 | pci_unmap_single(tp->pdev, dma_addr, | 3385 | pci_unmap_single(tp->pdev, dma_addr, |
3386 | skb_size - tp->rx_offset, | 3386 | skb_size - tp->rx_offset, |
3387 | PCI_DMA_FROMDEVICE); | 3387 | PCI_DMA_FROMDEVICE); |
3388 | 3388 | ||
3389 | skb_put(skb, len); | 3389 | skb_put(skb, len); |
3390 | } else { | 3390 | } else { |
3391 | struct sk_buff *copy_skb; | 3391 | struct sk_buff *copy_skb; |
3392 | 3392 | ||
3393 | tg3_recycle_rx(tp, opaque_key, | 3393 | tg3_recycle_rx(tp, opaque_key, |
3394 | desc_idx, *post_ptr); | 3394 | desc_idx, *post_ptr); |
3395 | 3395 | ||
3396 | copy_skb = netdev_alloc_skb(tp->dev, len + 2); | 3396 | copy_skb = netdev_alloc_skb(tp->dev, len + 2); |
3397 | if (copy_skb == NULL) | 3397 | if (copy_skb == NULL) |
3398 | goto drop_it_no_recycle; | 3398 | goto drop_it_no_recycle; |
3399 | 3399 | ||
3400 | skb_reserve(copy_skb, 2); | 3400 | skb_reserve(copy_skb, 2); |
3401 | skb_put(copy_skb, len); | 3401 | skb_put(copy_skb, len); |
3402 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 3402 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
3403 | skb_copy_from_linear_data(skb, copy_skb->data, len); | 3403 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
3404 | pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 3404 | pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
3405 | 3405 | ||
3406 | /* We'll reuse the original ring buffer. */ | 3406 | /* We'll reuse the original ring buffer. */ |
3407 | skb = copy_skb; | 3407 | skb = copy_skb; |
3408 | } | 3408 | } |
3409 | 3409 | ||
3410 | if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) && | 3410 | if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) && |
3411 | (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && | 3411 | (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && |
3412 | (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) | 3412 | (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) |
3413 | >> RXD_TCPCSUM_SHIFT) == 0xffff)) | 3413 | >> RXD_TCPCSUM_SHIFT) == 0xffff)) |
3414 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 3414 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3415 | else | 3415 | else |
3416 | skb->ip_summed = CHECKSUM_NONE; | 3416 | skb->ip_summed = CHECKSUM_NONE; |
3417 | 3417 | ||
3418 | skb->protocol = eth_type_trans(skb, tp->dev); | 3418 | skb->protocol = eth_type_trans(skb, tp->dev); |
3419 | #if TG3_VLAN_TAG_USED | 3419 | #if TG3_VLAN_TAG_USED |
3420 | if (tp->vlgrp != NULL && | 3420 | if (tp->vlgrp != NULL && |
3421 | desc->type_flags & RXD_FLAG_VLAN) { | 3421 | desc->type_flags & RXD_FLAG_VLAN) { |
3422 | tg3_vlan_rx(tp, skb, | 3422 | tg3_vlan_rx(tp, skb, |
3423 | desc->err_vlan & RXD_VLAN_MASK); | 3423 | desc->err_vlan & RXD_VLAN_MASK); |
3424 | } else | 3424 | } else |
3425 | #endif | 3425 | #endif |
3426 | netif_receive_skb(skb); | 3426 | netif_receive_skb(skb); |
3427 | 3427 | ||
3428 | tp->dev->last_rx = jiffies; | 3428 | tp->dev->last_rx = jiffies; |
3429 | received++; | 3429 | received++; |
3430 | budget--; | 3430 | budget--; |
3431 | 3431 | ||
3432 | next_pkt: | 3432 | next_pkt: |
3433 | (*post_ptr)++; | 3433 | (*post_ptr)++; |
3434 | 3434 | ||
3435 | if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { | 3435 | if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { |
3436 | u32 idx = *post_ptr % TG3_RX_RING_SIZE; | 3436 | u32 idx = *post_ptr % TG3_RX_RING_SIZE; |
3437 | 3437 | ||
3438 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + | 3438 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + |
3439 | TG3_64BIT_REG_LOW, idx); | 3439 | TG3_64BIT_REG_LOW, idx); |
3440 | work_mask &= ~RXD_OPAQUE_RING_STD; | 3440 | work_mask &= ~RXD_OPAQUE_RING_STD; |
3441 | rx_std_posted = 0; | 3441 | rx_std_posted = 0; |
3442 | } | 3442 | } |
3443 | next_pkt_nopost: | 3443 | next_pkt_nopost: |
3444 | sw_idx++; | 3444 | sw_idx++; |
3445 | sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1); | 3445 | sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1); |
3446 | 3446 | ||
3447 | /* Refresh hw_idx to see if there is new work */ | 3447 | /* Refresh hw_idx to see if there is new work */ |
3448 | if (sw_idx == hw_idx) { | 3448 | if (sw_idx == hw_idx) { |
3449 | hw_idx = tp->hw_status->idx[0].rx_producer; | 3449 | hw_idx = tp->hw_status->idx[0].rx_producer; |
3450 | rmb(); | 3450 | rmb(); |
3451 | } | 3451 | } |
3452 | } | 3452 | } |
3453 | 3453 | ||
3454 | /* ACK the status ring. */ | 3454 | /* ACK the status ring. */ |
3455 | tp->rx_rcb_ptr = sw_idx; | 3455 | tp->rx_rcb_ptr = sw_idx; |
3456 | tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx); | 3456 | tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx); |
3457 | 3457 | ||
3458 | /* Refill RX ring(s). */ | 3458 | /* Refill RX ring(s). */ |
3459 | if (work_mask & RXD_OPAQUE_RING_STD) { | 3459 | if (work_mask & RXD_OPAQUE_RING_STD) { |
3460 | sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE; | 3460 | sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE; |
3461 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, | 3461 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, |
3462 | sw_idx); | 3462 | sw_idx); |
3463 | } | 3463 | } |
3464 | if (work_mask & RXD_OPAQUE_RING_JUMBO) { | 3464 | if (work_mask & RXD_OPAQUE_RING_JUMBO) { |
3465 | sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE; | 3465 | sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE; |
3466 | tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, | 3466 | tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, |
3467 | sw_idx); | 3467 | sw_idx); |
3468 | } | 3468 | } |
3469 | mmiowb(); | 3469 | mmiowb(); |
3470 | 3470 | ||
3471 | return received; | 3471 | return received; |
3472 | } | 3472 | } |
3473 | 3473 | ||
3474 | static int tg3_poll(struct net_device *netdev, int *budget) | 3474 | static int tg3_poll(struct net_device *netdev, int *budget) |
3475 | { | 3475 | { |
3476 | struct tg3 *tp = netdev_priv(netdev); | 3476 | struct tg3 *tp = netdev_priv(netdev); |
3477 | struct tg3_hw_status *sblk = tp->hw_status; | 3477 | struct tg3_hw_status *sblk = tp->hw_status; |
3478 | int done; | 3478 | int done; |
3479 | 3479 | ||
3480 | /* handle link change and other phy events */ | 3480 | /* handle link change and other phy events */ |
3481 | if (!(tp->tg3_flags & | 3481 | if (!(tp->tg3_flags & |
3482 | (TG3_FLAG_USE_LINKCHG_REG | | 3482 | (TG3_FLAG_USE_LINKCHG_REG | |
3483 | TG3_FLAG_POLL_SERDES))) { | 3483 | TG3_FLAG_POLL_SERDES))) { |
3484 | if (sblk->status & SD_STATUS_LINK_CHG) { | 3484 | if (sblk->status & SD_STATUS_LINK_CHG) { |
3485 | sblk->status = SD_STATUS_UPDATED | | 3485 | sblk->status = SD_STATUS_UPDATED | |
3486 | (sblk->status & ~SD_STATUS_LINK_CHG); | 3486 | (sblk->status & ~SD_STATUS_LINK_CHG); |
3487 | spin_lock(&tp->lock); | 3487 | spin_lock(&tp->lock); |
3488 | tg3_setup_phy(tp, 0); | 3488 | tg3_setup_phy(tp, 0); |
3489 | spin_unlock(&tp->lock); | 3489 | spin_unlock(&tp->lock); |
3490 | } | 3490 | } |
3491 | } | 3491 | } |
3492 | 3492 | ||
3493 | /* run TX completion thread */ | 3493 | /* run TX completion thread */ |
3494 | if (sblk->idx[0].tx_consumer != tp->tx_cons) { | 3494 | if (sblk->idx[0].tx_consumer != tp->tx_cons) { |
3495 | tg3_tx(tp); | 3495 | tg3_tx(tp); |
3496 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) { | 3496 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) { |
3497 | netif_rx_complete(netdev); | 3497 | netif_rx_complete(netdev); |
3498 | schedule_work(&tp->reset_task); | 3498 | schedule_work(&tp->reset_task); |
3499 | return 0; | 3499 | return 0; |
3500 | } | 3500 | } |
3501 | } | 3501 | } |
3502 | 3502 | ||
3503 | /* run RX thread, within the bounds set by NAPI. | 3503 | /* run RX thread, within the bounds set by NAPI. |
3504 | * All RX "locking" is done by ensuring outside | 3504 | * All RX "locking" is done by ensuring outside |
3505 | * code synchronizes with dev->poll() | 3505 | * code synchronizes with dev->poll() |
3506 | */ | 3506 | */ |
3507 | if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { | 3507 | if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { |
3508 | int orig_budget = *budget; | 3508 | int orig_budget = *budget; |
3509 | int work_done; | 3509 | int work_done; |
3510 | 3510 | ||
3511 | if (orig_budget > netdev->quota) | 3511 | if (orig_budget > netdev->quota) |
3512 | orig_budget = netdev->quota; | 3512 | orig_budget = netdev->quota; |
3513 | 3513 | ||
3514 | work_done = tg3_rx(tp, orig_budget); | 3514 | work_done = tg3_rx(tp, orig_budget); |
3515 | 3515 | ||
3516 | *budget -= work_done; | 3516 | *budget -= work_done; |
3517 | netdev->quota -= work_done; | 3517 | netdev->quota -= work_done; |
3518 | } | 3518 | } |
3519 | 3519 | ||
3520 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { | 3520 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { |
3521 | tp->last_tag = sblk->status_tag; | 3521 | tp->last_tag = sblk->status_tag; |
3522 | rmb(); | 3522 | rmb(); |
3523 | } else | 3523 | } else |
3524 | sblk->status &= ~SD_STATUS_UPDATED; | 3524 | sblk->status &= ~SD_STATUS_UPDATED; |
3525 | 3525 | ||
3526 | /* if no more work, tell net stack and NIC we're done */ | 3526 | /* if no more work, tell net stack and NIC we're done */ |
3527 | done = !tg3_has_work(tp); | 3527 | done = !tg3_has_work(tp); |
3528 | if (done) { | 3528 | if (done) { |
3529 | netif_rx_complete(netdev); | 3529 | netif_rx_complete(netdev); |
3530 | tg3_restart_ints(tp); | 3530 | tg3_restart_ints(tp); |
3531 | } | 3531 | } |
3532 | 3532 | ||
3533 | return (done ? 0 : 1); | 3533 | return (done ? 0 : 1); |
3534 | } | 3534 | } |
3535 | 3535 | ||
3536 | static void tg3_irq_quiesce(struct tg3 *tp) | 3536 | static void tg3_irq_quiesce(struct tg3 *tp) |
3537 | { | 3537 | { |
3538 | BUG_ON(tp->irq_sync); | 3538 | BUG_ON(tp->irq_sync); |
3539 | 3539 | ||
3540 | tp->irq_sync = 1; | 3540 | tp->irq_sync = 1; |
3541 | smp_mb(); | 3541 | smp_mb(); |
3542 | 3542 | ||
3543 | synchronize_irq(tp->pdev->irq); | 3543 | synchronize_irq(tp->pdev->irq); |
3544 | } | 3544 | } |
3545 | 3545 | ||
3546 | static inline int tg3_irq_sync(struct tg3 *tp) | 3546 | static inline int tg3_irq_sync(struct tg3 *tp) |
3547 | { | 3547 | { |
3548 | return tp->irq_sync; | 3548 | return tp->irq_sync; |
3549 | } | 3549 | } |
3550 | 3550 | ||
3551 | /* Fully shutdown all tg3 driver activity elsewhere in the system. | 3551 | /* Fully shutdown all tg3 driver activity elsewhere in the system. |
3552 | * If irq_sync is non-zero, then the IRQ handler must be synchronized | 3552 | * If irq_sync is non-zero, then the IRQ handler must be synchronized |
3553 | * with as well. Most of the time, this is not necessary except when | 3553 | * with as well. Most of the time, this is not necessary except when |
3554 | * shutting down the device. | 3554 | * shutting down the device. |
3555 | */ | 3555 | */ |
3556 | static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) | 3556 | static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) |
3557 | { | 3557 | { |
3558 | spin_lock_bh(&tp->lock); | 3558 | spin_lock_bh(&tp->lock); |
3559 | if (irq_sync) | 3559 | if (irq_sync) |
3560 | tg3_irq_quiesce(tp); | 3560 | tg3_irq_quiesce(tp); |
3561 | } | 3561 | } |
3562 | 3562 | ||
3563 | static inline void tg3_full_unlock(struct tg3 *tp) | 3563 | static inline void tg3_full_unlock(struct tg3 *tp) |
3564 | { | 3564 | { |
3565 | spin_unlock_bh(&tp->lock); | 3565 | spin_unlock_bh(&tp->lock); |
3566 | } | 3566 | } |
3567 | 3567 | ||
3568 | /* One-shot MSI handler - Chip automatically disables interrupt | 3568 | /* One-shot MSI handler - Chip automatically disables interrupt |
3569 | * after sending MSI so driver doesn't have to do it. | 3569 | * after sending MSI so driver doesn't have to do it. |
3570 | */ | 3570 | */ |
3571 | static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) | 3571 | static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) |
3572 | { | 3572 | { |
3573 | struct net_device *dev = dev_id; | 3573 | struct net_device *dev = dev_id; |
3574 | struct tg3 *tp = netdev_priv(dev); | 3574 | struct tg3 *tp = netdev_priv(dev); |
3575 | 3575 | ||
3576 | prefetch(tp->hw_status); | 3576 | prefetch(tp->hw_status); |
3577 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 3577 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
3578 | 3578 | ||
3579 | if (likely(!tg3_irq_sync(tp))) | 3579 | if (likely(!tg3_irq_sync(tp))) |
3580 | netif_rx_schedule(dev); /* schedule NAPI poll */ | 3580 | netif_rx_schedule(dev); /* schedule NAPI poll */ |
3581 | 3581 | ||
3582 | return IRQ_HANDLED; | 3582 | return IRQ_HANDLED; |
3583 | } | 3583 | } |
3584 | 3584 | ||
3585 | /* MSI ISR - No need to check for interrupt sharing and no need to | 3585 | /* MSI ISR - No need to check for interrupt sharing and no need to |
3586 | * flush status block and interrupt mailbox. PCI ordering rules | 3586 | * flush status block and interrupt mailbox. PCI ordering rules |
3587 | * guarantee that MSI will arrive after the status block. | 3587 | * guarantee that MSI will arrive after the status block. |
3588 | */ | 3588 | */ |
3589 | static irqreturn_t tg3_msi(int irq, void *dev_id) | 3589 | static irqreturn_t tg3_msi(int irq, void *dev_id) |
3590 | { | 3590 | { |
3591 | struct net_device *dev = dev_id; | 3591 | struct net_device *dev = dev_id; |
3592 | struct tg3 *tp = netdev_priv(dev); | 3592 | struct tg3 *tp = netdev_priv(dev); |
3593 | 3593 | ||
3594 | prefetch(tp->hw_status); | 3594 | prefetch(tp->hw_status); |
3595 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 3595 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
3596 | /* | 3596 | /* |
3597 | * Writing any value to intr-mbox-0 clears PCI INTA# and | 3597 | * Writing any value to intr-mbox-0 clears PCI INTA# and |
3598 | * chip-internal interrupt pending events. | 3598 | * chip-internal interrupt pending events. |
3599 | * Writing non-zero to intr-mbox-0 additional tells the | 3599 | * Writing non-zero to intr-mbox-0 additional tells the |
3600 | * NIC to stop sending us irqs, engaging "in-intr-handler" | 3600 | * NIC to stop sending us irqs, engaging "in-intr-handler" |
3601 | * event coalescing. | 3601 | * event coalescing. |
3602 | */ | 3602 | */ |
3603 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 3603 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); |
3604 | if (likely(!tg3_irq_sync(tp))) | 3604 | if (likely(!tg3_irq_sync(tp))) |
3605 | netif_rx_schedule(dev); /* schedule NAPI poll */ | 3605 | netif_rx_schedule(dev); /* schedule NAPI poll */ |
3606 | 3606 | ||
3607 | return IRQ_RETVAL(1); | 3607 | return IRQ_RETVAL(1); |
3608 | } | 3608 | } |
3609 | 3609 | ||
3610 | static irqreturn_t tg3_interrupt(int irq, void *dev_id) | 3610 | static irqreturn_t tg3_interrupt(int irq, void *dev_id) |
3611 | { | 3611 | { |
3612 | struct net_device *dev = dev_id; | 3612 | struct net_device *dev = dev_id; |
3613 | struct tg3 *tp = netdev_priv(dev); | 3613 | struct tg3 *tp = netdev_priv(dev); |
3614 | struct tg3_hw_status *sblk = tp->hw_status; | 3614 | struct tg3_hw_status *sblk = tp->hw_status; |
3615 | unsigned int handled = 1; | 3615 | unsigned int handled = 1; |
3616 | 3616 | ||
3617 | /* In INTx mode, it is possible for the interrupt to arrive at | 3617 | /* In INTx mode, it is possible for the interrupt to arrive at |
3618 | * the CPU before the status block posted prior to the interrupt. | 3618 | * the CPU before the status block posted prior to the interrupt. |
3619 | * Reading the PCI State register will confirm whether the | 3619 | * Reading the PCI State register will confirm whether the |
3620 | * interrupt is ours and will flush the status block. | 3620 | * interrupt is ours and will flush the status block. |
3621 | */ | 3621 | */ |
3622 | if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { | 3622 | if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { |
3623 | if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || | 3623 | if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || |
3624 | (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { | 3624 | (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { |
3625 | handled = 0; | 3625 | handled = 0; |
3626 | goto out; | 3626 | goto out; |
3627 | } | 3627 | } |
3628 | } | 3628 | } |
3629 | 3629 | ||
3630 | /* | 3630 | /* |
3631 | * Writing any value to intr-mbox-0 clears PCI INTA# and | 3631 | * Writing any value to intr-mbox-0 clears PCI INTA# and |
3632 | * chip-internal interrupt pending events. | 3632 | * chip-internal interrupt pending events. |
3633 | * Writing non-zero to intr-mbox-0 additional tells the | 3633 | * Writing non-zero to intr-mbox-0 additional tells the |
3634 | * NIC to stop sending us irqs, engaging "in-intr-handler" | 3634 | * NIC to stop sending us irqs, engaging "in-intr-handler" |
3635 | * event coalescing. | 3635 | * event coalescing. |
3636 | * | 3636 | * |
3637 | * Flush the mailbox to de-assert the IRQ immediately to prevent | 3637 | * Flush the mailbox to de-assert the IRQ immediately to prevent |
3638 | * spurious interrupts. The flush impacts performance but | 3638 | * spurious interrupts. The flush impacts performance but |
3639 | * excessive spurious interrupts can be worse in some cases. | 3639 | * excessive spurious interrupts can be worse in some cases. |
3640 | */ | 3640 | */ |
3641 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 3641 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); |
3642 | if (tg3_irq_sync(tp)) | 3642 | if (tg3_irq_sync(tp)) |
3643 | goto out; | 3643 | goto out; |
3644 | sblk->status &= ~SD_STATUS_UPDATED; | 3644 | sblk->status &= ~SD_STATUS_UPDATED; |
3645 | if (likely(tg3_has_work(tp))) { | 3645 | if (likely(tg3_has_work(tp))) { |
3646 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 3646 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
3647 | netif_rx_schedule(dev); /* schedule NAPI poll */ | 3647 | netif_rx_schedule(dev); /* schedule NAPI poll */ |
3648 | } else { | 3648 | } else { |
3649 | /* No work, shared interrupt perhaps? re-enable | 3649 | /* No work, shared interrupt perhaps? re-enable |
3650 | * interrupts, and flush that PCI write | 3650 | * interrupts, and flush that PCI write |
3651 | */ | 3651 | */ |
3652 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | 3652 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
3653 | 0x00000000); | 3653 | 0x00000000); |
3654 | } | 3654 | } |
3655 | out: | 3655 | out: |
3656 | return IRQ_RETVAL(handled); | 3656 | return IRQ_RETVAL(handled); |
3657 | } | 3657 | } |
3658 | 3658 | ||
3659 | static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) | 3659 | static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) |
3660 | { | 3660 | { |
3661 | struct net_device *dev = dev_id; | 3661 | struct net_device *dev = dev_id; |
3662 | struct tg3 *tp = netdev_priv(dev); | 3662 | struct tg3 *tp = netdev_priv(dev); |
3663 | struct tg3_hw_status *sblk = tp->hw_status; | 3663 | struct tg3_hw_status *sblk = tp->hw_status; |
3664 | unsigned int handled = 1; | 3664 | unsigned int handled = 1; |
3665 | 3665 | ||
3666 | /* In INTx mode, it is possible for the interrupt to arrive at | 3666 | /* In INTx mode, it is possible for the interrupt to arrive at |
3667 | * the CPU before the status block posted prior to the interrupt. | 3667 | * the CPU before the status block posted prior to the interrupt. |
3668 | * Reading the PCI State register will confirm whether the | 3668 | * Reading the PCI State register will confirm whether the |
3669 | * interrupt is ours and will flush the status block. | 3669 | * interrupt is ours and will flush the status block. |
3670 | */ | 3670 | */ |
3671 | if (unlikely(sblk->status_tag == tp->last_tag)) { | 3671 | if (unlikely(sblk->status_tag == tp->last_tag)) { |
3672 | if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || | 3672 | if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || |
3673 | (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { | 3673 | (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { |
3674 | handled = 0; | 3674 | handled = 0; |
3675 | goto out; | 3675 | goto out; |
3676 | } | 3676 | } |
3677 | } | 3677 | } |
3678 | 3678 | ||
3679 | /* | 3679 | /* |
3680 | * writing any value to intr-mbox-0 clears PCI INTA# and | 3680 | * writing any value to intr-mbox-0 clears PCI INTA# and |
3681 | * chip-internal interrupt pending events. | 3681 | * chip-internal interrupt pending events. |
3682 | * writing non-zero to intr-mbox-0 additional tells the | 3682 | * writing non-zero to intr-mbox-0 additional tells the |
3683 | * NIC to stop sending us irqs, engaging "in-intr-handler" | 3683 | * NIC to stop sending us irqs, engaging "in-intr-handler" |
3684 | * event coalescing. | 3684 | * event coalescing. |
3685 | * | 3685 | * |
3686 | * Flush the mailbox to de-assert the IRQ immediately to prevent | 3686 | * Flush the mailbox to de-assert the IRQ immediately to prevent |
3687 | * spurious interrupts. The flush impacts performance but | 3687 | * spurious interrupts. The flush impacts performance but |
3688 | * excessive spurious interrupts can be worse in some cases. | 3688 | * excessive spurious interrupts can be worse in some cases. |
3689 | */ | 3689 | */ |
3690 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 3690 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); |
3691 | if (tg3_irq_sync(tp)) | 3691 | if (tg3_irq_sync(tp)) |
3692 | goto out; | 3692 | goto out; |
3693 | if (netif_rx_schedule_prep(dev)) { | 3693 | if (netif_rx_schedule_prep(dev)) { |
3694 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 3694 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
3695 | /* Update last_tag to mark that this status has been | 3695 | /* Update last_tag to mark that this status has been |
3696 | * seen. Because interrupt may be shared, we may be | 3696 | * seen. Because interrupt may be shared, we may be |
3697 | * racing with tg3_poll(), so only update last_tag | 3697 | * racing with tg3_poll(), so only update last_tag |
3698 | * if tg3_poll() is not scheduled. | 3698 | * if tg3_poll() is not scheduled. |
3699 | */ | 3699 | */ |
3700 | tp->last_tag = sblk->status_tag; | 3700 | tp->last_tag = sblk->status_tag; |
3701 | __netif_rx_schedule(dev); | 3701 | __netif_rx_schedule(dev); |
3702 | } | 3702 | } |
3703 | out: | 3703 | out: |
3704 | return IRQ_RETVAL(handled); | 3704 | return IRQ_RETVAL(handled); |
3705 | } | 3705 | } |
3706 | 3706 | ||
3707 | /* ISR for interrupt test */ | 3707 | /* ISR for interrupt test */ |
3708 | static irqreturn_t tg3_test_isr(int irq, void *dev_id) | 3708 | static irqreturn_t tg3_test_isr(int irq, void *dev_id) |
3709 | { | 3709 | { |
3710 | struct net_device *dev = dev_id; | 3710 | struct net_device *dev = dev_id; |
3711 | struct tg3 *tp = netdev_priv(dev); | 3711 | struct tg3 *tp = netdev_priv(dev); |
3712 | struct tg3_hw_status *sblk = tp->hw_status; | 3712 | struct tg3_hw_status *sblk = tp->hw_status; |
3713 | 3713 | ||
3714 | if ((sblk->status & SD_STATUS_UPDATED) || | 3714 | if ((sblk->status & SD_STATUS_UPDATED) || |
3715 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { | 3715 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { |
3716 | tg3_disable_ints(tp); | 3716 | tg3_disable_ints(tp); |
3717 | return IRQ_RETVAL(1); | 3717 | return IRQ_RETVAL(1); |
3718 | } | 3718 | } |
3719 | return IRQ_RETVAL(0); | 3719 | return IRQ_RETVAL(0); |
3720 | } | 3720 | } |
3721 | 3721 | ||
3722 | static int tg3_init_hw(struct tg3 *, int); | 3722 | static int tg3_init_hw(struct tg3 *, int); |
3723 | static int tg3_halt(struct tg3 *, int, int); | 3723 | static int tg3_halt(struct tg3 *, int, int); |
3724 | 3724 | ||
3725 | /* Restart hardware after configuration changes, self-test, etc. | 3725 | /* Restart hardware after configuration changes, self-test, etc. |
3726 | * Invoked with tp->lock held. | 3726 | * Invoked with tp->lock held. |
3727 | */ | 3727 | */ |
3728 | static int tg3_restart_hw(struct tg3 *tp, int reset_phy) | 3728 | static int tg3_restart_hw(struct tg3 *tp, int reset_phy) |
3729 | { | 3729 | { |
3730 | int err; | 3730 | int err; |
3731 | 3731 | ||
3732 | err = tg3_init_hw(tp, reset_phy); | 3732 | err = tg3_init_hw(tp, reset_phy); |
3733 | if (err) { | 3733 | if (err) { |
3734 | printk(KERN_ERR PFX "%s: Failed to re-initialize device, " | 3734 | printk(KERN_ERR PFX "%s: Failed to re-initialize device, " |
3735 | "aborting.\n", tp->dev->name); | 3735 | "aborting.\n", tp->dev->name); |
3736 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 3736 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
3737 | tg3_full_unlock(tp); | 3737 | tg3_full_unlock(tp); |
3738 | del_timer_sync(&tp->timer); | 3738 | del_timer_sync(&tp->timer); |
3739 | tp->irq_sync = 0; | 3739 | tp->irq_sync = 0; |
3740 | netif_poll_enable(tp->dev); | 3740 | netif_poll_enable(tp->dev); |
3741 | dev_close(tp->dev); | 3741 | dev_close(tp->dev); |
3742 | tg3_full_lock(tp, 0); | 3742 | tg3_full_lock(tp, 0); |
3743 | } | 3743 | } |
3744 | return err; | 3744 | return err; |
3745 | } | 3745 | } |
3746 | 3746 | ||
3747 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3747 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3748 | static void tg3_poll_controller(struct net_device *dev) | 3748 | static void tg3_poll_controller(struct net_device *dev) |
3749 | { | 3749 | { |
3750 | struct tg3 *tp = netdev_priv(dev); | 3750 | struct tg3 *tp = netdev_priv(dev); |
3751 | 3751 | ||
3752 | tg3_interrupt(tp->pdev->irq, dev); | 3752 | tg3_interrupt(tp->pdev->irq, dev); |
3753 | } | 3753 | } |
3754 | #endif | 3754 | #endif |
3755 | 3755 | ||
3756 | static void tg3_reset_task(struct work_struct *work) | 3756 | static void tg3_reset_task(struct work_struct *work) |
3757 | { | 3757 | { |
3758 | struct tg3 *tp = container_of(work, struct tg3, reset_task); | 3758 | struct tg3 *tp = container_of(work, struct tg3, reset_task); |
3759 | unsigned int restart_timer; | 3759 | unsigned int restart_timer; |
3760 | 3760 | ||
3761 | tg3_full_lock(tp, 0); | 3761 | tg3_full_lock(tp, 0); |
3762 | 3762 | ||
3763 | if (!netif_running(tp->dev)) { | 3763 | if (!netif_running(tp->dev)) { |
3764 | tg3_full_unlock(tp); | 3764 | tg3_full_unlock(tp); |
3765 | return; | 3765 | return; |
3766 | } | 3766 | } |
3767 | 3767 | ||
3768 | tg3_full_unlock(tp); | 3768 | tg3_full_unlock(tp); |
3769 | 3769 | ||
3770 | tg3_netif_stop(tp); | 3770 | tg3_netif_stop(tp); |
3771 | 3771 | ||
3772 | tg3_full_lock(tp, 1); | 3772 | tg3_full_lock(tp, 1); |
3773 | 3773 | ||
3774 | restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; | 3774 | restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; |
3775 | tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; | 3775 | tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; |
3776 | 3776 | ||
3777 | if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) { | 3777 | if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) { |
3778 | tp->write32_tx_mbox = tg3_write32_tx_mbox; | 3778 | tp->write32_tx_mbox = tg3_write32_tx_mbox; |
3779 | tp->write32_rx_mbox = tg3_write_flush_reg32; | 3779 | tp->write32_rx_mbox = tg3_write_flush_reg32; |
3780 | tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; | 3780 | tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; |
3781 | tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING; | 3781 | tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING; |
3782 | } | 3782 | } |
3783 | 3783 | ||
3784 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); | 3784 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); |
3785 | if (tg3_init_hw(tp, 1)) | 3785 | if (tg3_init_hw(tp, 1)) |
3786 | goto out; | 3786 | goto out; |
3787 | 3787 | ||
3788 | tg3_netif_start(tp); | 3788 | tg3_netif_start(tp); |
3789 | 3789 | ||
3790 | if (restart_timer) | 3790 | if (restart_timer) |
3791 | mod_timer(&tp->timer, jiffies + 1); | 3791 | mod_timer(&tp->timer, jiffies + 1); |
3792 | 3792 | ||
3793 | out: | 3793 | out: |
3794 | tg3_full_unlock(tp); | 3794 | tg3_full_unlock(tp); |
3795 | } | 3795 | } |
3796 | 3796 | ||
3797 | static void tg3_dump_short_state(struct tg3 *tp) | 3797 | static void tg3_dump_short_state(struct tg3 *tp) |
3798 | { | 3798 | { |
3799 | printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n", | 3799 | printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n", |
3800 | tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS)); | 3800 | tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS)); |
3801 | printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n", | 3801 | printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n", |
3802 | tr32(RDMAC_STATUS), tr32(WDMAC_STATUS)); | 3802 | tr32(RDMAC_STATUS), tr32(WDMAC_STATUS)); |
3803 | } | 3803 | } |
3804 | 3804 | ||
3805 | static void tg3_tx_timeout(struct net_device *dev) | 3805 | static void tg3_tx_timeout(struct net_device *dev) |
3806 | { | 3806 | { |
3807 | struct tg3 *tp = netdev_priv(dev); | 3807 | struct tg3 *tp = netdev_priv(dev); |
3808 | 3808 | ||
3809 | if (netif_msg_tx_err(tp)) { | 3809 | if (netif_msg_tx_err(tp)) { |
3810 | printk(KERN_ERR PFX "%s: transmit timed out, resetting\n", | 3810 | printk(KERN_ERR PFX "%s: transmit timed out, resetting\n", |
3811 | dev->name); | 3811 | dev->name); |
3812 | tg3_dump_short_state(tp); | 3812 | tg3_dump_short_state(tp); |
3813 | } | 3813 | } |
3814 | 3814 | ||
3815 | schedule_work(&tp->reset_task); | 3815 | schedule_work(&tp->reset_task); |
3816 | } | 3816 | } |
3817 | 3817 | ||
3818 | /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ | 3818 | /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ |
3819 | static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) | 3819 | static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) |
3820 | { | 3820 | { |
3821 | u32 base = (u32) mapping & 0xffffffff; | 3821 | u32 base = (u32) mapping & 0xffffffff; |
3822 | 3822 | ||
3823 | return ((base > 0xffffdcc0) && | 3823 | return ((base > 0xffffdcc0) && |
3824 | (base + len + 8 < base)); | 3824 | (base + len + 8 < base)); |
3825 | } | 3825 | } |
3826 | 3826 | ||
3827 | /* Test for DMA addresses > 40-bit */ | 3827 | /* Test for DMA addresses > 40-bit */ |
3828 | static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, | 3828 | static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, |
3829 | int len) | 3829 | int len) |
3830 | { | 3830 | { |
3831 | #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) | 3831 | #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) |
3832 | if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) | 3832 | if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) |
3833 | return (((u64) mapping + len) > DMA_40BIT_MASK); | 3833 | return (((u64) mapping + len) > DMA_40BIT_MASK); |
3834 | return 0; | 3834 | return 0; |
3835 | #else | 3835 | #else |
3836 | return 0; | 3836 | return 0; |
3837 | #endif | 3837 | #endif |
3838 | } | 3838 | } |
3839 | 3839 | ||
3840 | static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); | 3840 | static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); |
3841 | 3841 | ||
3842 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ | 3842 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ |
3843 | static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | 3843 | static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, |
3844 | u32 last_plus_one, u32 *start, | 3844 | u32 last_plus_one, u32 *start, |
3845 | u32 base_flags, u32 mss) | 3845 | u32 base_flags, u32 mss) |
3846 | { | 3846 | { |
3847 | struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC); | 3847 | struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC); |
3848 | dma_addr_t new_addr = 0; | 3848 | dma_addr_t new_addr = 0; |
3849 | u32 entry = *start; | 3849 | u32 entry = *start; |
3850 | int i, ret = 0; | 3850 | int i, ret = 0; |
3851 | 3851 | ||
3852 | if (!new_skb) { | 3852 | if (!new_skb) { |
3853 | ret = -1; | 3853 | ret = -1; |
3854 | } else { | 3854 | } else { |
3855 | /* New SKB is guaranteed to be linear. */ | 3855 | /* New SKB is guaranteed to be linear. */ |
3856 | entry = *start; | 3856 | entry = *start; |
3857 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, | 3857 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, |
3858 | PCI_DMA_TODEVICE); | 3858 | PCI_DMA_TODEVICE); |
3859 | /* Make sure new skb does not cross any 4G boundaries. | 3859 | /* Make sure new skb does not cross any 4G boundaries. |
3860 | * Drop the packet if it does. | 3860 | * Drop the packet if it does. |
3861 | */ | 3861 | */ |
3862 | if (tg3_4g_overflow_test(new_addr, new_skb->len)) { | 3862 | if (tg3_4g_overflow_test(new_addr, new_skb->len)) { |
3863 | ret = -1; | 3863 | ret = -1; |
3864 | dev_kfree_skb(new_skb); | 3864 | dev_kfree_skb(new_skb); |
3865 | new_skb = NULL; | 3865 | new_skb = NULL; |
3866 | } else { | 3866 | } else { |
3867 | tg3_set_txd(tp, entry, new_addr, new_skb->len, | 3867 | tg3_set_txd(tp, entry, new_addr, new_skb->len, |
3868 | base_flags, 1 | (mss << 1)); | 3868 | base_flags, 1 | (mss << 1)); |
3869 | *start = NEXT_TX(entry); | 3869 | *start = NEXT_TX(entry); |
3870 | } | 3870 | } |
3871 | } | 3871 | } |
3872 | 3872 | ||
3873 | /* Now clean up the sw ring entries. */ | 3873 | /* Now clean up the sw ring entries. */ |
3874 | i = 0; | 3874 | i = 0; |
3875 | while (entry != last_plus_one) { | 3875 | while (entry != last_plus_one) { |
3876 | int len; | 3876 | int len; |
3877 | 3877 | ||
3878 | if (i == 0) | 3878 | if (i == 0) |
3879 | len = skb_headlen(skb); | 3879 | len = skb_headlen(skb); |
3880 | else | 3880 | else |
3881 | len = skb_shinfo(skb)->frags[i-1].size; | 3881 | len = skb_shinfo(skb)->frags[i-1].size; |
3882 | pci_unmap_single(tp->pdev, | 3882 | pci_unmap_single(tp->pdev, |
3883 | pci_unmap_addr(&tp->tx_buffers[entry], mapping), | 3883 | pci_unmap_addr(&tp->tx_buffers[entry], mapping), |
3884 | len, PCI_DMA_TODEVICE); | 3884 | len, PCI_DMA_TODEVICE); |
3885 | if (i == 0) { | 3885 | if (i == 0) { |
3886 | tp->tx_buffers[entry].skb = new_skb; | 3886 | tp->tx_buffers[entry].skb = new_skb; |
3887 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr); | 3887 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr); |
3888 | } else { | 3888 | } else { |
3889 | tp->tx_buffers[entry].skb = NULL; | 3889 | tp->tx_buffers[entry].skb = NULL; |
3890 | } | 3890 | } |
3891 | entry = NEXT_TX(entry); | 3891 | entry = NEXT_TX(entry); |
3892 | i++; | 3892 | i++; |
3893 | } | 3893 | } |
3894 | 3894 | ||
3895 | dev_kfree_skb(skb); | 3895 | dev_kfree_skb(skb); |
3896 | 3896 | ||
3897 | return ret; | 3897 | return ret; |
3898 | } | 3898 | } |
3899 | 3899 | ||
3900 | static void tg3_set_txd(struct tg3 *tp, int entry, | 3900 | static void tg3_set_txd(struct tg3 *tp, int entry, |
3901 | dma_addr_t mapping, int len, u32 flags, | 3901 | dma_addr_t mapping, int len, u32 flags, |
3902 | u32 mss_and_is_end) | 3902 | u32 mss_and_is_end) |
3903 | { | 3903 | { |
3904 | struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry]; | 3904 | struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry]; |
3905 | int is_end = (mss_and_is_end & 0x1); | 3905 | int is_end = (mss_and_is_end & 0x1); |
3906 | u32 mss = (mss_and_is_end >> 1); | 3906 | u32 mss = (mss_and_is_end >> 1); |
3907 | u32 vlan_tag = 0; | 3907 | u32 vlan_tag = 0; |
3908 | 3908 | ||
3909 | if (is_end) | 3909 | if (is_end) |
3910 | flags |= TXD_FLAG_END; | 3910 | flags |= TXD_FLAG_END; |
3911 | if (flags & TXD_FLAG_VLAN) { | 3911 | if (flags & TXD_FLAG_VLAN) { |
3912 | vlan_tag = flags >> 16; | 3912 | vlan_tag = flags >> 16; |
3913 | flags &= 0xffff; | 3913 | flags &= 0xffff; |
3914 | } | 3914 | } |
3915 | vlan_tag |= (mss << TXD_MSS_SHIFT); | 3915 | vlan_tag |= (mss << TXD_MSS_SHIFT); |
3916 | 3916 | ||
3917 | txd->addr_hi = ((u64) mapping >> 32); | 3917 | txd->addr_hi = ((u64) mapping >> 32); |
3918 | txd->addr_lo = ((u64) mapping & 0xffffffff); | 3918 | txd->addr_lo = ((u64) mapping & 0xffffffff); |
3919 | txd->len_flags = (len << TXD_LEN_SHIFT) | flags; | 3919 | txd->len_flags = (len << TXD_LEN_SHIFT) | flags; |
3920 | txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; | 3920 | txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; |
3921 | } | 3921 | } |
3922 | 3922 | ||
3923 | /* hard_start_xmit for devices that don't have any bugs and | 3923 | /* hard_start_xmit for devices that don't have any bugs and |
3924 | * support TG3_FLG2_HW_TSO_2 only. | 3924 | * support TG3_FLG2_HW_TSO_2 only. |
3925 | */ | 3925 | */ |
3926 | static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | 3926 | static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
3927 | { | 3927 | { |
3928 | struct tg3 *tp = netdev_priv(dev); | 3928 | struct tg3 *tp = netdev_priv(dev); |
3929 | dma_addr_t mapping; | 3929 | dma_addr_t mapping; |
3930 | u32 len, entry, base_flags, mss; | 3930 | u32 len, entry, base_flags, mss; |
3931 | 3931 | ||
3932 | len = skb_headlen(skb); | 3932 | len = skb_headlen(skb); |
3933 | 3933 | ||
3934 | /* We are running in BH disabled context with netif_tx_lock | 3934 | /* We are running in BH disabled context with netif_tx_lock |
3935 | * and TX reclaim runs via tp->poll inside of a software | 3935 | * and TX reclaim runs via tp->poll inside of a software |
3936 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3936 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
3937 | * no IRQ context deadlocks to worry about either. Rejoice! | 3937 | * no IRQ context deadlocks to worry about either. Rejoice! |
3938 | */ | 3938 | */ |
3939 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3939 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
3940 | if (!netif_queue_stopped(dev)) { | 3940 | if (!netif_queue_stopped(dev)) { |
3941 | netif_stop_queue(dev); | 3941 | netif_stop_queue(dev); |
3942 | 3942 | ||
3943 | /* This is a hard error, log it. */ | 3943 | /* This is a hard error, log it. */ |
3944 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 3944 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " |
3945 | "queue awake!\n", dev->name); | 3945 | "queue awake!\n", dev->name); |
3946 | } | 3946 | } |
3947 | return NETDEV_TX_BUSY; | 3947 | return NETDEV_TX_BUSY; |
3948 | } | 3948 | } |
3949 | 3949 | ||
3950 | entry = tp->tx_prod; | 3950 | entry = tp->tx_prod; |
3951 | base_flags = 0; | 3951 | base_flags = 0; |
3952 | mss = 0; | 3952 | mss = 0; |
3953 | if ((mss = skb_shinfo(skb)->gso_size) != 0) { | 3953 | if ((mss = skb_shinfo(skb)->gso_size) != 0) { |
3954 | int tcp_opt_len, ip_tcp_len; | 3954 | int tcp_opt_len, ip_tcp_len; |
3955 | 3955 | ||
3956 | if (skb_header_cloned(skb) && | 3956 | if (skb_header_cloned(skb) && |
3957 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 3957 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
3958 | dev_kfree_skb(skb); | 3958 | dev_kfree_skb(skb); |
3959 | goto out_unlock; | 3959 | goto out_unlock; |
3960 | } | 3960 | } |
3961 | 3961 | ||
3962 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 3962 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
3963 | mss |= (skb_headlen(skb) - ETH_HLEN) << 9; | 3963 | mss |= (skb_headlen(skb) - ETH_HLEN) << 9; |
3964 | else { | 3964 | else { |
3965 | struct iphdr *iph = ip_hdr(skb); | 3965 | struct iphdr *iph = ip_hdr(skb); |
3966 | 3966 | ||
3967 | tcp_opt_len = tcp_optlen(skb); | 3967 | tcp_opt_len = tcp_optlen(skb); |
3968 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); | 3968 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); |
3969 | 3969 | ||
3970 | iph->check = 0; | 3970 | iph->check = 0; |
3971 | iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); | 3971 | iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); |
3972 | mss |= (ip_tcp_len + tcp_opt_len) << 9; | 3972 | mss |= (ip_tcp_len + tcp_opt_len) << 9; |
3973 | } | 3973 | } |
3974 | 3974 | ||
3975 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | | 3975 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | |
3976 | TXD_FLAG_CPU_POST_DMA); | 3976 | TXD_FLAG_CPU_POST_DMA); |
3977 | 3977 | ||
3978 | tcp_hdr(skb)->check = 0; | 3978 | tcp_hdr(skb)->check = 0; |
3979 | 3979 | ||
3980 | } | 3980 | } |
3981 | else if (skb->ip_summed == CHECKSUM_PARTIAL) | 3981 | else if (skb->ip_summed == CHECKSUM_PARTIAL) |
3982 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | 3982 | base_flags |= TXD_FLAG_TCPUDP_CSUM; |
3983 | #if TG3_VLAN_TAG_USED | 3983 | #if TG3_VLAN_TAG_USED |
3984 | if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) | 3984 | if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) |
3985 | base_flags |= (TXD_FLAG_VLAN | | 3985 | base_flags |= (TXD_FLAG_VLAN | |
3986 | (vlan_tx_tag_get(skb) << 16)); | 3986 | (vlan_tx_tag_get(skb) << 16)); |
3987 | #endif | 3987 | #endif |
3988 | 3988 | ||
3989 | /* Queue skb data, a.k.a. the main skb fragment. */ | 3989 | /* Queue skb data, a.k.a. the main skb fragment. */ |
3990 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 3990 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
3991 | 3991 | ||
3992 | tp->tx_buffers[entry].skb = skb; | 3992 | tp->tx_buffers[entry].skb = skb; |
3993 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | 3993 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); |
3994 | 3994 | ||
3995 | tg3_set_txd(tp, entry, mapping, len, base_flags, | 3995 | tg3_set_txd(tp, entry, mapping, len, base_flags, |
3996 | (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); | 3996 | (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); |
3997 | 3997 | ||
3998 | entry = NEXT_TX(entry); | 3998 | entry = NEXT_TX(entry); |
3999 | 3999 | ||
4000 | /* Now loop through additional data fragments, and queue them. */ | 4000 | /* Now loop through additional data fragments, and queue them. */ |
4001 | if (skb_shinfo(skb)->nr_frags > 0) { | 4001 | if (skb_shinfo(skb)->nr_frags > 0) { |
4002 | unsigned int i, last; | 4002 | unsigned int i, last; |
4003 | 4003 | ||
4004 | last = skb_shinfo(skb)->nr_frags - 1; | 4004 | last = skb_shinfo(skb)->nr_frags - 1; |
4005 | for (i = 0; i <= last; i++) { | 4005 | for (i = 0; i <= last; i++) { |
4006 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 4006 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
4007 | 4007 | ||
4008 | len = frag->size; | 4008 | len = frag->size; |
4009 | mapping = pci_map_page(tp->pdev, | 4009 | mapping = pci_map_page(tp->pdev, |
4010 | frag->page, | 4010 | frag->page, |
4011 | frag->page_offset, | 4011 | frag->page_offset, |
4012 | len, PCI_DMA_TODEVICE); | 4012 | len, PCI_DMA_TODEVICE); |
4013 | 4013 | ||
4014 | tp->tx_buffers[entry].skb = NULL; | 4014 | tp->tx_buffers[entry].skb = NULL; |
4015 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | 4015 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); |
4016 | 4016 | ||
4017 | tg3_set_txd(tp, entry, mapping, len, | 4017 | tg3_set_txd(tp, entry, mapping, len, |
4018 | base_flags, (i == last) | (mss << 1)); | 4018 | base_flags, (i == last) | (mss << 1)); |
4019 | 4019 | ||
4020 | entry = NEXT_TX(entry); | 4020 | entry = NEXT_TX(entry); |
4021 | } | 4021 | } |
4022 | } | 4022 | } |
4023 | 4023 | ||
4024 | /* Packets are ready, update Tx producer idx local and on card. */ | 4024 | /* Packets are ready, update Tx producer idx local and on card. */ |
4025 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 4025 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
4026 | 4026 | ||
4027 | tp->tx_prod = entry; | 4027 | tp->tx_prod = entry; |
4028 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { | 4028 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { |
4029 | netif_stop_queue(dev); | 4029 | netif_stop_queue(dev); |
4030 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) | 4030 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) |
4031 | netif_wake_queue(tp->dev); | 4031 | netif_wake_queue(tp->dev); |
4032 | } | 4032 | } |
4033 | 4033 | ||
4034 | out_unlock: | 4034 | out_unlock: |
4035 | mmiowb(); | 4035 | mmiowb(); |
4036 | 4036 | ||
4037 | dev->trans_start = jiffies; | 4037 | dev->trans_start = jiffies; |
4038 | 4038 | ||
4039 | return NETDEV_TX_OK; | 4039 | return NETDEV_TX_OK; |
4040 | } | 4040 | } |
4041 | 4041 | ||
4042 | static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); | 4042 | static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); |
4043 | 4043 | ||
4044 | /* Use GSO to workaround a rare TSO bug that may be triggered when the | 4044 | /* Use GSO to workaround a rare TSO bug that may be triggered when the |
4045 | * TSO header is greater than 80 bytes. | 4045 | * TSO header is greater than 80 bytes. |
4046 | */ | 4046 | */ |
4047 | static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | 4047 | static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) |
4048 | { | 4048 | { |
4049 | struct sk_buff *segs, *nskb; | 4049 | struct sk_buff *segs, *nskb; |
4050 | 4050 | ||
4051 | /* Estimate the number of fragments in the worst case */ | 4051 | /* Estimate the number of fragments in the worst case */ |
4052 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { | 4052 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { |
4053 | netif_stop_queue(tp->dev); | 4053 | netif_stop_queue(tp->dev); |
4054 | if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3)) | 4054 | if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3)) |
4055 | return NETDEV_TX_BUSY; | 4055 | return NETDEV_TX_BUSY; |
4056 | 4056 | ||
4057 | netif_wake_queue(tp->dev); | 4057 | netif_wake_queue(tp->dev); |
4058 | } | 4058 | } |
4059 | 4059 | ||
4060 | segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); | 4060 | segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); |
4061 | if (unlikely(IS_ERR(segs))) | 4061 | if (unlikely(IS_ERR(segs))) |
4062 | goto tg3_tso_bug_end; | 4062 | goto tg3_tso_bug_end; |
4063 | 4063 | ||
4064 | do { | 4064 | do { |
4065 | nskb = segs; | 4065 | nskb = segs; |
4066 | segs = segs->next; | 4066 | segs = segs->next; |
4067 | nskb->next = NULL; | 4067 | nskb->next = NULL; |
4068 | tg3_start_xmit_dma_bug(nskb, tp->dev); | 4068 | tg3_start_xmit_dma_bug(nskb, tp->dev); |
4069 | } while (segs); | 4069 | } while (segs); |
4070 | 4070 | ||
4071 | tg3_tso_bug_end: | 4071 | tg3_tso_bug_end: |
4072 | dev_kfree_skb(skb); | 4072 | dev_kfree_skb(skb); |
4073 | 4073 | ||
4074 | return NETDEV_TX_OK; | 4074 | return NETDEV_TX_OK; |
4075 | } | 4075 | } |
4076 | 4076 | ||
4077 | /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and | 4077 | /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and |
4078 | * support TG3_FLG2_HW_TSO_1 or firmware TSO only. | 4078 | * support TG3_FLG2_HW_TSO_1 or firmware TSO only. |
4079 | */ | 4079 | */ |
4080 | static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | 4080 | static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) |
4081 | { | 4081 | { |
4082 | struct tg3 *tp = netdev_priv(dev); | 4082 | struct tg3 *tp = netdev_priv(dev); |
4083 | dma_addr_t mapping; | 4083 | dma_addr_t mapping; |
4084 | u32 len, entry, base_flags, mss; | 4084 | u32 len, entry, base_flags, mss; |
4085 | int would_hit_hwbug; | 4085 | int would_hit_hwbug; |
4086 | 4086 | ||
4087 | len = skb_headlen(skb); | 4087 | len = skb_headlen(skb); |
4088 | 4088 | ||
4089 | /* We are running in BH disabled context with netif_tx_lock | 4089 | /* We are running in BH disabled context with netif_tx_lock |
4090 | * and TX reclaim runs via tp->poll inside of a software | 4090 | * and TX reclaim runs via tp->poll inside of a software |
4091 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 4091 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
4092 | * no IRQ context deadlocks to worry about either. Rejoice! | 4092 | * no IRQ context deadlocks to worry about either. Rejoice! |
4093 | */ | 4093 | */ |
4094 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 4094 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
4095 | if (!netif_queue_stopped(dev)) { | 4095 | if (!netif_queue_stopped(dev)) { |
4096 | netif_stop_queue(dev); | 4096 | netif_stop_queue(dev); |
4097 | 4097 | ||
4098 | /* This is a hard error, log it. */ | 4098 | /* This is a hard error, log it. */ |
4099 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 4099 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " |
4100 | "queue awake!\n", dev->name); | 4100 | "queue awake!\n", dev->name); |
4101 | } | 4101 | } |
4102 | return NETDEV_TX_BUSY; | 4102 | return NETDEV_TX_BUSY; |
4103 | } | 4103 | } |
4104 | 4104 | ||
4105 | entry = tp->tx_prod; | 4105 | entry = tp->tx_prod; |
4106 | base_flags = 0; | 4106 | base_flags = 0; |
4107 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 4107 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
4108 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | 4108 | base_flags |= TXD_FLAG_TCPUDP_CSUM; |
4109 | mss = 0; | 4109 | mss = 0; |
4110 | if ((mss = skb_shinfo(skb)->gso_size) != 0) { | 4110 | if ((mss = skb_shinfo(skb)->gso_size) != 0) { |
4111 | struct iphdr *iph; | 4111 | struct iphdr *iph; |
4112 | int tcp_opt_len, ip_tcp_len, hdr_len; | 4112 | int tcp_opt_len, ip_tcp_len, hdr_len; |
4113 | 4113 | ||
4114 | if (skb_header_cloned(skb) && | 4114 | if (skb_header_cloned(skb) && |
4115 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 4115 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
4116 | dev_kfree_skb(skb); | 4116 | dev_kfree_skb(skb); |
4117 | goto out_unlock; | 4117 | goto out_unlock; |
4118 | } | 4118 | } |
4119 | 4119 | ||
4120 | tcp_opt_len = tcp_optlen(skb); | 4120 | tcp_opt_len = tcp_optlen(skb); |
4121 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); | 4121 | ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); |
4122 | 4122 | ||
4123 | hdr_len = ip_tcp_len + tcp_opt_len; | 4123 | hdr_len = ip_tcp_len + tcp_opt_len; |
4124 | if (unlikely((ETH_HLEN + hdr_len) > 80) && | 4124 | if (unlikely((ETH_HLEN + hdr_len) > 80) && |
4125 | (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) | 4125 | (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) |
4126 | return (tg3_tso_bug(tp, skb)); | 4126 | return (tg3_tso_bug(tp, skb)); |
4127 | 4127 | ||
4128 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | | 4128 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | |
4129 | TXD_FLAG_CPU_POST_DMA); | 4129 | TXD_FLAG_CPU_POST_DMA); |
4130 | 4130 | ||
4131 | iph = ip_hdr(skb); | 4131 | iph = ip_hdr(skb); |
4132 | iph->check = 0; | 4132 | iph->check = 0; |
4133 | iph->tot_len = htons(mss + hdr_len); | 4133 | iph->tot_len = htons(mss + hdr_len); |
4134 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | 4134 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { |
4135 | tcp_hdr(skb)->check = 0; | 4135 | tcp_hdr(skb)->check = 0; |
4136 | base_flags &= ~TXD_FLAG_TCPUDP_CSUM; | 4136 | base_flags &= ~TXD_FLAG_TCPUDP_CSUM; |
4137 | } else | 4137 | } else |
4138 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | 4138 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
4139 | iph->daddr, 0, | 4139 | iph->daddr, 0, |
4140 | IPPROTO_TCP, | 4140 | IPPROTO_TCP, |
4141 | 0); | 4141 | 0); |
4142 | 4142 | ||
4143 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || | 4143 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || |
4144 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { | 4144 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { |
4145 | if (tcp_opt_len || iph->ihl > 5) { | 4145 | if (tcp_opt_len || iph->ihl > 5) { |
4146 | int tsflags; | 4146 | int tsflags; |
4147 | 4147 | ||
4148 | tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); | 4148 | tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); |
4149 | mss |= (tsflags << 11); | 4149 | mss |= (tsflags << 11); |
4150 | } | 4150 | } |
4151 | } else { | 4151 | } else { |
4152 | if (tcp_opt_len || iph->ihl > 5) { | 4152 | if (tcp_opt_len || iph->ihl > 5) { |
4153 | int tsflags; | 4153 | int tsflags; |
4154 | 4154 | ||
4155 | tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); | 4155 | tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); |
4156 | base_flags |= tsflags << 12; | 4156 | base_flags |= tsflags << 12; |
4157 | } | 4157 | } |
4158 | } | 4158 | } |
4159 | } | 4159 | } |
4160 | #if TG3_VLAN_TAG_USED | 4160 | #if TG3_VLAN_TAG_USED |
4161 | if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) | 4161 | if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) |
4162 | base_flags |= (TXD_FLAG_VLAN | | 4162 | base_flags |= (TXD_FLAG_VLAN | |
4163 | (vlan_tx_tag_get(skb) << 16)); | 4163 | (vlan_tx_tag_get(skb) << 16)); |
4164 | #endif | 4164 | #endif |
4165 | 4165 | ||
4166 | /* Queue skb data, a.k.a. the main skb fragment. */ | 4166 | /* Queue skb data, a.k.a. the main skb fragment. */ |
4167 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 4167 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
4168 | 4168 | ||
4169 | tp->tx_buffers[entry].skb = skb; | 4169 | tp->tx_buffers[entry].skb = skb; |
4170 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | 4170 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); |
4171 | 4171 | ||
4172 | would_hit_hwbug = 0; | 4172 | would_hit_hwbug = 0; |
4173 | 4173 | ||
4174 | if (tg3_4g_overflow_test(mapping, len)) | 4174 | if (tg3_4g_overflow_test(mapping, len)) |
4175 | would_hit_hwbug = 1; | 4175 | would_hit_hwbug = 1; |
4176 | 4176 | ||
4177 | tg3_set_txd(tp, entry, mapping, len, base_flags, | 4177 | tg3_set_txd(tp, entry, mapping, len, base_flags, |
4178 | (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); | 4178 | (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); |
4179 | 4179 | ||
4180 | entry = NEXT_TX(entry); | 4180 | entry = NEXT_TX(entry); |
4181 | 4181 | ||
4182 | /* Now loop through additional data fragments, and queue them. */ | 4182 | /* Now loop through additional data fragments, and queue them. */ |
4183 | if (skb_shinfo(skb)->nr_frags > 0) { | 4183 | if (skb_shinfo(skb)->nr_frags > 0) { |
4184 | unsigned int i, last; | 4184 | unsigned int i, last; |
4185 | 4185 | ||
4186 | last = skb_shinfo(skb)->nr_frags - 1; | 4186 | last = skb_shinfo(skb)->nr_frags - 1; |
4187 | for (i = 0; i <= last; i++) { | 4187 | for (i = 0; i <= last; i++) { |
4188 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 4188 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
4189 | 4189 | ||
4190 | len = frag->size; | 4190 | len = frag->size; |
4191 | mapping = pci_map_page(tp->pdev, | 4191 | mapping = pci_map_page(tp->pdev, |
4192 | frag->page, | 4192 | frag->page, |
4193 | frag->page_offset, | 4193 | frag->page_offset, |
4194 | len, PCI_DMA_TODEVICE); | 4194 | len, PCI_DMA_TODEVICE); |
4195 | 4195 | ||
4196 | tp->tx_buffers[entry].skb = NULL; | 4196 | tp->tx_buffers[entry].skb = NULL; |
4197 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | 4197 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); |
4198 | 4198 | ||
4199 | if (tg3_4g_overflow_test(mapping, len)) | 4199 | if (tg3_4g_overflow_test(mapping, len)) |
4200 | would_hit_hwbug = 1; | 4200 | would_hit_hwbug = 1; |
4201 | 4201 | ||
4202 | if (tg3_40bit_overflow_test(tp, mapping, len)) | 4202 | if (tg3_40bit_overflow_test(tp, mapping, len)) |
4203 | would_hit_hwbug = 1; | 4203 | would_hit_hwbug = 1; |
4204 | 4204 | ||
4205 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 4205 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
4206 | tg3_set_txd(tp, entry, mapping, len, | 4206 | tg3_set_txd(tp, entry, mapping, len, |
4207 | base_flags, (i == last)|(mss << 1)); | 4207 | base_flags, (i == last)|(mss << 1)); |
4208 | else | 4208 | else |
4209 | tg3_set_txd(tp, entry, mapping, len, | 4209 | tg3_set_txd(tp, entry, mapping, len, |
4210 | base_flags, (i == last)); | 4210 | base_flags, (i == last)); |
4211 | 4211 | ||
4212 | entry = NEXT_TX(entry); | 4212 | entry = NEXT_TX(entry); |
4213 | } | 4213 | } |
4214 | } | 4214 | } |
4215 | 4215 | ||
4216 | if (would_hit_hwbug) { | 4216 | if (would_hit_hwbug) { |
4217 | u32 last_plus_one = entry; | 4217 | u32 last_plus_one = entry; |
4218 | u32 start; | 4218 | u32 start; |
4219 | 4219 | ||
4220 | start = entry - 1 - skb_shinfo(skb)->nr_frags; | 4220 | start = entry - 1 - skb_shinfo(skb)->nr_frags; |
4221 | start &= (TG3_TX_RING_SIZE - 1); | 4221 | start &= (TG3_TX_RING_SIZE - 1); |
4222 | 4222 | ||
4223 | /* If the workaround fails due to memory/mapping | 4223 | /* If the workaround fails due to memory/mapping |
4224 | * failure, silently drop this packet. | 4224 | * failure, silently drop this packet. |
4225 | */ | 4225 | */ |
4226 | if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, | 4226 | if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, |
4227 | &start, base_flags, mss)) | 4227 | &start, base_flags, mss)) |
4228 | goto out_unlock; | 4228 | goto out_unlock; |
4229 | 4229 | ||
4230 | entry = start; | 4230 | entry = start; |
4231 | } | 4231 | } |
4232 | 4232 | ||
4233 | /* Packets are ready, update Tx producer idx local and on card. */ | 4233 | /* Packets are ready, update Tx producer idx local and on card. */ |
4234 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 4234 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
4235 | 4235 | ||
4236 | tp->tx_prod = entry; | 4236 | tp->tx_prod = entry; |
4237 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { | 4237 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { |
4238 | netif_stop_queue(dev); | 4238 | netif_stop_queue(dev); |
4239 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) | 4239 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) |
4240 | netif_wake_queue(tp->dev); | 4240 | netif_wake_queue(tp->dev); |
4241 | } | 4241 | } |
4242 | 4242 | ||
4243 | out_unlock: | 4243 | out_unlock: |
4244 | mmiowb(); | 4244 | mmiowb(); |
4245 | 4245 | ||
4246 | dev->trans_start = jiffies; | 4246 | dev->trans_start = jiffies; |
4247 | 4247 | ||
4248 | return NETDEV_TX_OK; | 4248 | return NETDEV_TX_OK; |
4249 | } | 4249 | } |
4250 | 4250 | ||
4251 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | 4251 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, |
4252 | int new_mtu) | 4252 | int new_mtu) |
4253 | { | 4253 | { |
4254 | dev->mtu = new_mtu; | 4254 | dev->mtu = new_mtu; |
4255 | 4255 | ||
4256 | if (new_mtu > ETH_DATA_LEN) { | 4256 | if (new_mtu > ETH_DATA_LEN) { |
4257 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { | 4257 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { |
4258 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; | 4258 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; |
4259 | ethtool_op_set_tso(dev, 0); | 4259 | ethtool_op_set_tso(dev, 0); |
4260 | } | 4260 | } |
4261 | else | 4261 | else |
4262 | tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; | 4262 | tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; |
4263 | } else { | 4263 | } else { |
4264 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) | 4264 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) |
4265 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 4265 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; |
4266 | tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; | 4266 | tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; |
4267 | } | 4267 | } |
4268 | } | 4268 | } |
4269 | 4269 | ||
4270 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) | 4270 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) |
4271 | { | 4271 | { |
4272 | struct tg3 *tp = netdev_priv(dev); | 4272 | struct tg3 *tp = netdev_priv(dev); |
4273 | int err; | 4273 | int err; |
4274 | 4274 | ||
4275 | if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) | 4275 | if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) |
4276 | return -EINVAL; | 4276 | return -EINVAL; |
4277 | 4277 | ||
4278 | if (!netif_running(dev)) { | 4278 | if (!netif_running(dev)) { |
4279 | /* We'll just catch it later when the | 4279 | /* We'll just catch it later when the |
4280 | * device is up'd. | 4280 | * device is up'd. |
4281 | */ | 4281 | */ |
4282 | tg3_set_mtu(dev, tp, new_mtu); | 4282 | tg3_set_mtu(dev, tp, new_mtu); |
4283 | return 0; | 4283 | return 0; |
4284 | } | 4284 | } |
4285 | 4285 | ||
4286 | tg3_netif_stop(tp); | 4286 | tg3_netif_stop(tp); |
4287 | 4287 | ||
4288 | tg3_full_lock(tp, 1); | 4288 | tg3_full_lock(tp, 1); |
4289 | 4289 | ||
4290 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 4290 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
4291 | 4291 | ||
4292 | tg3_set_mtu(dev, tp, new_mtu); | 4292 | tg3_set_mtu(dev, tp, new_mtu); |
4293 | 4293 | ||
4294 | err = tg3_restart_hw(tp, 0); | 4294 | err = tg3_restart_hw(tp, 0); |
4295 | 4295 | ||
4296 | if (!err) | 4296 | if (!err) |
4297 | tg3_netif_start(tp); | 4297 | tg3_netif_start(tp); |
4298 | 4298 | ||
4299 | tg3_full_unlock(tp); | 4299 | tg3_full_unlock(tp); |
4300 | 4300 | ||
4301 | return err; | 4301 | return err; |
4302 | } | 4302 | } |
4303 | 4303 | ||
4304 | /* Free up pending packets in all rx/tx rings. | 4304 | /* Free up pending packets in all rx/tx rings. |
4305 | * | 4305 | * |
4306 | * The chip has been shut down and the driver detached from | 4306 | * The chip has been shut down and the driver detached from |
4307 | * the networking, so no interrupts or new tx packets will | 4307 | * the networking, so no interrupts or new tx packets will |
4308 | * end up in the driver. tp->{tx,}lock is not held and we are not | 4308 | * end up in the driver. tp->{tx,}lock is not held and we are not |
4309 | * in an interrupt context and thus may sleep. | 4309 | * in an interrupt context and thus may sleep. |
4310 | */ | 4310 | */ |
4311 | static void tg3_free_rings(struct tg3 *tp) | 4311 | static void tg3_free_rings(struct tg3 *tp) |
4312 | { | 4312 | { |
4313 | struct ring_info *rxp; | 4313 | struct ring_info *rxp; |
4314 | int i; | 4314 | int i; |
4315 | 4315 | ||
4316 | for (i = 0; i < TG3_RX_RING_SIZE; i++) { | 4316 | for (i = 0; i < TG3_RX_RING_SIZE; i++) { |
4317 | rxp = &tp->rx_std_buffers[i]; | 4317 | rxp = &tp->rx_std_buffers[i]; |
4318 | 4318 | ||
4319 | if (rxp->skb == NULL) | 4319 | if (rxp->skb == NULL) |
4320 | continue; | 4320 | continue; |
4321 | pci_unmap_single(tp->pdev, | 4321 | pci_unmap_single(tp->pdev, |
4322 | pci_unmap_addr(rxp, mapping), | 4322 | pci_unmap_addr(rxp, mapping), |
4323 | tp->rx_pkt_buf_sz - tp->rx_offset, | 4323 | tp->rx_pkt_buf_sz - tp->rx_offset, |
4324 | PCI_DMA_FROMDEVICE); | 4324 | PCI_DMA_FROMDEVICE); |
4325 | dev_kfree_skb_any(rxp->skb); | 4325 | dev_kfree_skb_any(rxp->skb); |
4326 | rxp->skb = NULL; | 4326 | rxp->skb = NULL; |
4327 | } | 4327 | } |
4328 | 4328 | ||
4329 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { | 4329 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { |
4330 | rxp = &tp->rx_jumbo_buffers[i]; | 4330 | rxp = &tp->rx_jumbo_buffers[i]; |
4331 | 4331 | ||
4332 | if (rxp->skb == NULL) | 4332 | if (rxp->skb == NULL) |
4333 | continue; | 4333 | continue; |
4334 | pci_unmap_single(tp->pdev, | 4334 | pci_unmap_single(tp->pdev, |
4335 | pci_unmap_addr(rxp, mapping), | 4335 | pci_unmap_addr(rxp, mapping), |
4336 | RX_JUMBO_PKT_BUF_SZ - tp->rx_offset, | 4336 | RX_JUMBO_PKT_BUF_SZ - tp->rx_offset, |
4337 | PCI_DMA_FROMDEVICE); | 4337 | PCI_DMA_FROMDEVICE); |
4338 | dev_kfree_skb_any(rxp->skb); | 4338 | dev_kfree_skb_any(rxp->skb); |
4339 | rxp->skb = NULL; | 4339 | rxp->skb = NULL; |
4340 | } | 4340 | } |
4341 | 4341 | ||
4342 | for (i = 0; i < TG3_TX_RING_SIZE; ) { | 4342 | for (i = 0; i < TG3_TX_RING_SIZE; ) { |
4343 | struct tx_ring_info *txp; | 4343 | struct tx_ring_info *txp; |
4344 | struct sk_buff *skb; | 4344 | struct sk_buff *skb; |
4345 | int j; | 4345 | int j; |
4346 | 4346 | ||
4347 | txp = &tp->tx_buffers[i]; | 4347 | txp = &tp->tx_buffers[i]; |
4348 | skb = txp->skb; | 4348 | skb = txp->skb; |
4349 | 4349 | ||
4350 | if (skb == NULL) { | 4350 | if (skb == NULL) { |
4351 | i++; | 4351 | i++; |
4352 | continue; | 4352 | continue; |
4353 | } | 4353 | } |
4354 | 4354 | ||
4355 | pci_unmap_single(tp->pdev, | 4355 | pci_unmap_single(tp->pdev, |
4356 | pci_unmap_addr(txp, mapping), | 4356 | pci_unmap_addr(txp, mapping), |
4357 | skb_headlen(skb), | 4357 | skb_headlen(skb), |
4358 | PCI_DMA_TODEVICE); | 4358 | PCI_DMA_TODEVICE); |
4359 | txp->skb = NULL; | 4359 | txp->skb = NULL; |
4360 | 4360 | ||
4361 | i++; | 4361 | i++; |
4362 | 4362 | ||
4363 | for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { | 4363 | for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { |
4364 | txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; | 4364 | txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; |
4365 | pci_unmap_page(tp->pdev, | 4365 | pci_unmap_page(tp->pdev, |
4366 | pci_unmap_addr(txp, mapping), | 4366 | pci_unmap_addr(txp, mapping), |
4367 | skb_shinfo(skb)->frags[j].size, | 4367 | skb_shinfo(skb)->frags[j].size, |
4368 | PCI_DMA_TODEVICE); | 4368 | PCI_DMA_TODEVICE); |
4369 | i++; | 4369 | i++; |
4370 | } | 4370 | } |
4371 | 4371 | ||
4372 | dev_kfree_skb_any(skb); | 4372 | dev_kfree_skb_any(skb); |
4373 | } | 4373 | } |
4374 | } | 4374 | } |
4375 | 4375 | ||
4376 | /* Initialize tx/rx rings for packet processing. | 4376 | /* Initialize tx/rx rings for packet processing. |
4377 | * | 4377 | * |
4378 | * The chip has been shut down and the driver detached from | 4378 | * The chip has been shut down and the driver detached from |
4379 | * the networking, so no interrupts or new tx packets will | 4379 | * the networking, so no interrupts or new tx packets will |
4380 | * end up in the driver. tp->{tx,}lock are held and thus | 4380 | * end up in the driver. tp->{tx,}lock are held and thus |
4381 | * we may not sleep. | 4381 | * we may not sleep. |
4382 | */ | 4382 | */ |
4383 | static int tg3_init_rings(struct tg3 *tp) | 4383 | static int tg3_init_rings(struct tg3 *tp) |
4384 | { | 4384 | { |
4385 | u32 i; | 4385 | u32 i; |
4386 | 4386 | ||
4387 | /* Free up all the SKBs. */ | 4387 | /* Free up all the SKBs. */ |
4388 | tg3_free_rings(tp); | 4388 | tg3_free_rings(tp); |
4389 | 4389 | ||
4390 | /* Zero out all descriptors. */ | 4390 | /* Zero out all descriptors. */ |
4391 | memset(tp->rx_std, 0, TG3_RX_RING_BYTES); | 4391 | memset(tp->rx_std, 0, TG3_RX_RING_BYTES); |
4392 | memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES); | 4392 | memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES); |
4393 | memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | 4393 | memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); |
4394 | memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); | 4394 | memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); |
4395 | 4395 | ||
4396 | tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ; | 4396 | tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ; |
4397 | if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && | 4397 | if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && |
4398 | (tp->dev->mtu > ETH_DATA_LEN)) | 4398 | (tp->dev->mtu > ETH_DATA_LEN)) |
4399 | tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ; | 4399 | tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ; |
4400 | 4400 | ||
4401 | /* Initialize invariants of the rings, we only set this | 4401 | /* Initialize invariants of the rings, we only set this |
4402 | * stuff once. This works because the card does not | 4402 | * stuff once. This works because the card does not |
4403 | * write into the rx buffer posting rings. | 4403 | * write into the rx buffer posting rings. |
4404 | */ | 4404 | */ |
4405 | for (i = 0; i < TG3_RX_RING_SIZE; i++) { | 4405 | for (i = 0; i < TG3_RX_RING_SIZE; i++) { |
4406 | struct tg3_rx_buffer_desc *rxd; | 4406 | struct tg3_rx_buffer_desc *rxd; |
4407 | 4407 | ||
4408 | rxd = &tp->rx_std[i]; | 4408 | rxd = &tp->rx_std[i]; |
4409 | rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64) | 4409 | rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64) |
4410 | << RXD_LEN_SHIFT; | 4410 | << RXD_LEN_SHIFT; |
4411 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); | 4411 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); |
4412 | rxd->opaque = (RXD_OPAQUE_RING_STD | | 4412 | rxd->opaque = (RXD_OPAQUE_RING_STD | |
4413 | (i << RXD_OPAQUE_INDEX_SHIFT)); | 4413 | (i << RXD_OPAQUE_INDEX_SHIFT)); |
4414 | } | 4414 | } |
4415 | 4415 | ||
4416 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 4416 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { |
4417 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { | 4417 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { |
4418 | struct tg3_rx_buffer_desc *rxd; | 4418 | struct tg3_rx_buffer_desc *rxd; |
4419 | 4419 | ||
4420 | rxd = &tp->rx_jumbo[i]; | 4420 | rxd = &tp->rx_jumbo[i]; |
4421 | rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64) | 4421 | rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64) |
4422 | << RXD_LEN_SHIFT; | 4422 | << RXD_LEN_SHIFT; |
4423 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | | 4423 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | |
4424 | RXD_FLAG_JUMBO; | 4424 | RXD_FLAG_JUMBO; |
4425 | rxd->opaque = (RXD_OPAQUE_RING_JUMBO | | 4425 | rxd->opaque = (RXD_OPAQUE_RING_JUMBO | |
4426 | (i << RXD_OPAQUE_INDEX_SHIFT)); | 4426 | (i << RXD_OPAQUE_INDEX_SHIFT)); |
4427 | } | 4427 | } |
4428 | } | 4428 | } |
4429 | 4429 | ||
4430 | /* Now allocate fresh SKBs for each rx ring. */ | 4430 | /* Now allocate fresh SKBs for each rx ring. */ |
4431 | for (i = 0; i < tp->rx_pending; i++) { | 4431 | for (i = 0; i < tp->rx_pending; i++) { |
4432 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { | 4432 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { |
4433 | printk(KERN_WARNING PFX | 4433 | printk(KERN_WARNING PFX |
4434 | "%s: Using a smaller RX standard ring, " | 4434 | "%s: Using a smaller RX standard ring, " |
4435 | "only %d out of %d buffers were allocated " | 4435 | "only %d out of %d buffers were allocated " |
4436 | "successfully.\n", | 4436 | "successfully.\n", |
4437 | tp->dev->name, i, tp->rx_pending); | 4437 | tp->dev->name, i, tp->rx_pending); |
4438 | if (i == 0) | 4438 | if (i == 0) |
4439 | return -ENOMEM; | 4439 | return -ENOMEM; |
4440 | tp->rx_pending = i; | 4440 | tp->rx_pending = i; |
4441 | break; | 4441 | break; |
4442 | } | 4442 | } |
4443 | } | 4443 | } |
4444 | 4444 | ||
4445 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 4445 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { |
4446 | for (i = 0; i < tp->rx_jumbo_pending; i++) { | 4446 | for (i = 0; i < tp->rx_jumbo_pending; i++) { |
4447 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, | 4447 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, |
4448 | -1, i) < 0) { | 4448 | -1, i) < 0) { |
4449 | printk(KERN_WARNING PFX | 4449 | printk(KERN_WARNING PFX |
4450 | "%s: Using a smaller RX jumbo ring, " | 4450 | "%s: Using a smaller RX jumbo ring, " |
4451 | "only %d out of %d buffers were " | 4451 | "only %d out of %d buffers were " |
4452 | "allocated successfully.\n", | 4452 | "allocated successfully.\n", |
4453 | tp->dev->name, i, tp->rx_jumbo_pending); | 4453 | tp->dev->name, i, tp->rx_jumbo_pending); |
4454 | if (i == 0) { | 4454 | if (i == 0) { |
4455 | tg3_free_rings(tp); | 4455 | tg3_free_rings(tp); |
4456 | return -ENOMEM; | 4456 | return -ENOMEM; |
4457 | } | 4457 | } |
4458 | tp->rx_jumbo_pending = i; | 4458 | tp->rx_jumbo_pending = i; |
4459 | break; | 4459 | break; |
4460 | } | 4460 | } |
4461 | } | 4461 | } |
4462 | } | 4462 | } |
4463 | return 0; | 4463 | return 0; |
4464 | } | 4464 | } |
4465 | 4465 | ||
4466 | /* | 4466 | /* |
4467 | * Must not be invoked with interrupt sources disabled and | 4467 | * Must not be invoked with interrupt sources disabled and |
4468 | * the hardware shutdown down. | 4468 | * the hardware shutdown down. |
4469 | */ | 4469 | */ |
4470 | static void tg3_free_consistent(struct tg3 *tp) | 4470 | static void tg3_free_consistent(struct tg3 *tp) |
4471 | { | 4471 | { |
4472 | kfree(tp->rx_std_buffers); | 4472 | kfree(tp->rx_std_buffers); |
4473 | tp->rx_std_buffers = NULL; | 4473 | tp->rx_std_buffers = NULL; |
4474 | if (tp->rx_std) { | 4474 | if (tp->rx_std) { |
4475 | pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, | 4475 | pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, |
4476 | tp->rx_std, tp->rx_std_mapping); | 4476 | tp->rx_std, tp->rx_std_mapping); |
4477 | tp->rx_std = NULL; | 4477 | tp->rx_std = NULL; |
4478 | } | 4478 | } |
4479 | if (tp->rx_jumbo) { | 4479 | if (tp->rx_jumbo) { |
4480 | pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, | 4480 | pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, |
4481 | tp->rx_jumbo, tp->rx_jumbo_mapping); | 4481 | tp->rx_jumbo, tp->rx_jumbo_mapping); |
4482 | tp->rx_jumbo = NULL; | 4482 | tp->rx_jumbo = NULL; |
4483 | } | 4483 | } |
4484 | if (tp->rx_rcb) { | 4484 | if (tp->rx_rcb) { |
4485 | pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), | 4485 | pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), |
4486 | tp->rx_rcb, tp->rx_rcb_mapping); | 4486 | tp->rx_rcb, tp->rx_rcb_mapping); |
4487 | tp->rx_rcb = NULL; | 4487 | tp->rx_rcb = NULL; |
4488 | } | 4488 | } |
4489 | if (tp->tx_ring) { | 4489 | if (tp->tx_ring) { |
4490 | pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, | 4490 | pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, |
4491 | tp->tx_ring, tp->tx_desc_mapping); | 4491 | tp->tx_ring, tp->tx_desc_mapping); |
4492 | tp->tx_ring = NULL; | 4492 | tp->tx_ring = NULL; |
4493 | } | 4493 | } |
4494 | if (tp->hw_status) { | 4494 | if (tp->hw_status) { |
4495 | pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, | 4495 | pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, |
4496 | tp->hw_status, tp->status_mapping); | 4496 | tp->hw_status, tp->status_mapping); |
4497 | tp->hw_status = NULL; | 4497 | tp->hw_status = NULL; |
4498 | } | 4498 | } |
4499 | if (tp->hw_stats) { | 4499 | if (tp->hw_stats) { |
4500 | pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), | 4500 | pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), |
4501 | tp->hw_stats, tp->stats_mapping); | 4501 | tp->hw_stats, tp->stats_mapping); |
4502 | tp->hw_stats = NULL; | 4502 | tp->hw_stats = NULL; |
4503 | } | 4503 | } |
4504 | } | 4504 | } |
4505 | 4505 | ||
4506 | /* | 4506 | /* |
4507 | * Must not be invoked with interrupt sources disabled and | 4507 | * Must not be invoked with interrupt sources disabled and |
4508 | * the hardware shutdown down. Can sleep. | 4508 | * the hardware shutdown down. Can sleep. |
4509 | */ | 4509 | */ |
4510 | static int tg3_alloc_consistent(struct tg3 *tp) | 4510 | static int tg3_alloc_consistent(struct tg3 *tp) |
4511 | { | 4511 | { |
4512 | tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) * | 4512 | tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) * |
4513 | (TG3_RX_RING_SIZE + | 4513 | (TG3_RX_RING_SIZE + |
4514 | TG3_RX_JUMBO_RING_SIZE)) + | 4514 | TG3_RX_JUMBO_RING_SIZE)) + |
4515 | (sizeof(struct tx_ring_info) * | 4515 | (sizeof(struct tx_ring_info) * |
4516 | TG3_TX_RING_SIZE), | 4516 | TG3_TX_RING_SIZE), |
4517 | GFP_KERNEL); | 4517 | GFP_KERNEL); |
4518 | if (!tp->rx_std_buffers) | 4518 | if (!tp->rx_std_buffers) |
4519 | return -ENOMEM; | 4519 | return -ENOMEM; |
4520 | 4520 | ||
4521 | tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE]; | 4521 | tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE]; |
4522 | tp->tx_buffers = (struct tx_ring_info *) | 4522 | tp->tx_buffers = (struct tx_ring_info *) |
4523 | &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE]; | 4523 | &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE]; |
4524 | 4524 | ||
4525 | tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, | 4525 | tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, |
4526 | &tp->rx_std_mapping); | 4526 | &tp->rx_std_mapping); |
4527 | if (!tp->rx_std) | 4527 | if (!tp->rx_std) |
4528 | goto err_out; | 4528 | goto err_out; |
4529 | 4529 | ||
4530 | tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, | 4530 | tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, |
4531 | &tp->rx_jumbo_mapping); | 4531 | &tp->rx_jumbo_mapping); |
4532 | 4532 | ||
4533 | if (!tp->rx_jumbo) | 4533 | if (!tp->rx_jumbo) |
4534 | goto err_out; | 4534 | goto err_out; |
4535 | 4535 | ||
4536 | tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), | 4536 | tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), |
4537 | &tp->rx_rcb_mapping); | 4537 | &tp->rx_rcb_mapping); |
4538 | if (!tp->rx_rcb) | 4538 | if (!tp->rx_rcb) |
4539 | goto err_out; | 4539 | goto err_out; |
4540 | 4540 | ||
4541 | tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES, | 4541 | tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES, |
4542 | &tp->tx_desc_mapping); | 4542 | &tp->tx_desc_mapping); |
4543 | if (!tp->tx_ring) | 4543 | if (!tp->tx_ring) |
4544 | goto err_out; | 4544 | goto err_out; |
4545 | 4545 | ||
4546 | tp->hw_status = pci_alloc_consistent(tp->pdev, | 4546 | tp->hw_status = pci_alloc_consistent(tp->pdev, |
4547 | TG3_HW_STATUS_SIZE, | 4547 | TG3_HW_STATUS_SIZE, |
4548 | &tp->status_mapping); | 4548 | &tp->status_mapping); |
4549 | if (!tp->hw_status) | 4549 | if (!tp->hw_status) |
4550 | goto err_out; | 4550 | goto err_out; |
4551 | 4551 | ||
4552 | tp->hw_stats = pci_alloc_consistent(tp->pdev, | 4552 | tp->hw_stats = pci_alloc_consistent(tp->pdev, |
4553 | sizeof(struct tg3_hw_stats), | 4553 | sizeof(struct tg3_hw_stats), |
4554 | &tp->stats_mapping); | 4554 | &tp->stats_mapping); |
4555 | if (!tp->hw_stats) | 4555 | if (!tp->hw_stats) |
4556 | goto err_out; | 4556 | goto err_out; |
4557 | 4557 | ||
4558 | memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); | 4558 | memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); |
4559 | memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); | 4559 | memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); |
4560 | 4560 | ||
4561 | return 0; | 4561 | return 0; |
4562 | 4562 | ||
4563 | err_out: | 4563 | err_out: |
4564 | tg3_free_consistent(tp); | 4564 | tg3_free_consistent(tp); |
4565 | return -ENOMEM; | 4565 | return -ENOMEM; |
4566 | } | 4566 | } |
4567 | 4567 | ||
4568 | #define MAX_WAIT_CNT 1000 | 4568 | #define MAX_WAIT_CNT 1000 |
4569 | 4569 | ||
4570 | /* To stop a block, clear the enable bit and poll till it | 4570 | /* To stop a block, clear the enable bit and poll till it |
4571 | * clears. tp->lock is held. | 4571 | * clears. tp->lock is held. |
4572 | */ | 4572 | */ |
4573 | static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) | 4573 | static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) |
4574 | { | 4574 | { |
4575 | unsigned int i; | 4575 | unsigned int i; |
4576 | u32 val; | 4576 | u32 val; |
4577 | 4577 | ||
4578 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 4578 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
4579 | switch (ofs) { | 4579 | switch (ofs) { |
4580 | case RCVLSC_MODE: | 4580 | case RCVLSC_MODE: |
4581 | case DMAC_MODE: | 4581 | case DMAC_MODE: |
4582 | case MBFREE_MODE: | 4582 | case MBFREE_MODE: |
4583 | case BUFMGR_MODE: | 4583 | case BUFMGR_MODE: |
4584 | case MEMARB_MODE: | 4584 | case MEMARB_MODE: |
4585 | /* We can't enable/disable these bits of the | 4585 | /* We can't enable/disable these bits of the |
4586 | * 5705/5750, just say success. | 4586 | * 5705/5750, just say success. |
4587 | */ | 4587 | */ |
4588 | return 0; | 4588 | return 0; |
4589 | 4589 | ||
4590 | default: | 4590 | default: |
4591 | break; | 4591 | break; |
4592 | }; | 4592 | }; |
4593 | } | 4593 | } |
4594 | 4594 | ||
4595 | val = tr32(ofs); | 4595 | val = tr32(ofs); |
4596 | val &= ~enable_bit; | 4596 | val &= ~enable_bit; |
4597 | tw32_f(ofs, val); | 4597 | tw32_f(ofs, val); |
4598 | 4598 | ||
4599 | for (i = 0; i < MAX_WAIT_CNT; i++) { | 4599 | for (i = 0; i < MAX_WAIT_CNT; i++) { |
4600 | udelay(100); | 4600 | udelay(100); |
4601 | val = tr32(ofs); | 4601 | val = tr32(ofs); |
4602 | if ((val & enable_bit) == 0) | 4602 | if ((val & enable_bit) == 0) |
4603 | break; | 4603 | break; |
4604 | } | 4604 | } |
4605 | 4605 | ||
4606 | if (i == MAX_WAIT_CNT && !silent) { | 4606 | if (i == MAX_WAIT_CNT && !silent) { |
4607 | printk(KERN_ERR PFX "tg3_stop_block timed out, " | 4607 | printk(KERN_ERR PFX "tg3_stop_block timed out, " |
4608 | "ofs=%lx enable_bit=%x\n", | 4608 | "ofs=%lx enable_bit=%x\n", |
4609 | ofs, enable_bit); | 4609 | ofs, enable_bit); |
4610 | return -ENODEV; | 4610 | return -ENODEV; |
4611 | } | 4611 | } |
4612 | 4612 | ||
4613 | return 0; | 4613 | return 0; |
4614 | } | 4614 | } |
4615 | 4615 | ||
4616 | /* tp->lock is held. */ | 4616 | /* tp->lock is held. */ |
4617 | static int tg3_abort_hw(struct tg3 *tp, int silent) | 4617 | static int tg3_abort_hw(struct tg3 *tp, int silent) |
4618 | { | 4618 | { |
4619 | int i, err; | 4619 | int i, err; |
4620 | 4620 | ||
4621 | tg3_disable_ints(tp); | 4621 | tg3_disable_ints(tp); |
4622 | 4622 | ||
4623 | tp->rx_mode &= ~RX_MODE_ENABLE; | 4623 | tp->rx_mode &= ~RX_MODE_ENABLE; |
4624 | tw32_f(MAC_RX_MODE, tp->rx_mode); | 4624 | tw32_f(MAC_RX_MODE, tp->rx_mode); |
4625 | udelay(10); | 4625 | udelay(10); |
4626 | 4626 | ||
4627 | err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); | 4627 | err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); |
4628 | err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); | 4628 | err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); |
4629 | err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); | 4629 | err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); |
4630 | err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); | 4630 | err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); |
4631 | err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); | 4631 | err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); |
4632 | err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); | 4632 | err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); |
4633 | 4633 | ||
4634 | err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); | 4634 | err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); |
4635 | err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); | 4635 | err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); |
4636 | err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); | 4636 | err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); |
4637 | err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); | 4637 | err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); |
4638 | err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); | 4638 | err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); |
4639 | err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); | 4639 | err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); |
4640 | err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); | 4640 | err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); |
4641 | 4641 | ||
4642 | tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; | 4642 | tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; |
4643 | tw32_f(MAC_MODE, tp->mac_mode); | 4643 | tw32_f(MAC_MODE, tp->mac_mode); |
4644 | udelay(40); | 4644 | udelay(40); |
4645 | 4645 | ||
4646 | tp->tx_mode &= ~TX_MODE_ENABLE; | 4646 | tp->tx_mode &= ~TX_MODE_ENABLE; |
4647 | tw32_f(MAC_TX_MODE, tp->tx_mode); | 4647 | tw32_f(MAC_TX_MODE, tp->tx_mode); |
4648 | 4648 | ||
4649 | for (i = 0; i < MAX_WAIT_CNT; i++) { | 4649 | for (i = 0; i < MAX_WAIT_CNT; i++) { |
4650 | udelay(100); | 4650 | udelay(100); |
4651 | if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) | 4651 | if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) |
4652 | break; | 4652 | break; |
4653 | } | 4653 | } |
4654 | if (i >= MAX_WAIT_CNT) { | 4654 | if (i >= MAX_WAIT_CNT) { |
4655 | printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, " | 4655 | printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, " |
4656 | "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", | 4656 | "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", |
4657 | tp->dev->name, tr32(MAC_TX_MODE)); | 4657 | tp->dev->name, tr32(MAC_TX_MODE)); |
4658 | err |= -ENODEV; | 4658 | err |= -ENODEV; |
4659 | } | 4659 | } |
4660 | 4660 | ||
4661 | err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); | 4661 | err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); |
4662 | err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); | 4662 | err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); |
4663 | err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); | 4663 | err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); |
4664 | 4664 | ||
4665 | tw32(FTQ_RESET, 0xffffffff); | 4665 | tw32(FTQ_RESET, 0xffffffff); |
4666 | tw32(FTQ_RESET, 0x00000000); | 4666 | tw32(FTQ_RESET, 0x00000000); |
4667 | 4667 | ||
4668 | err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); | 4668 | err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); |
4669 | err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); | 4669 | err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); |
4670 | 4670 | ||
4671 | if (tp->hw_status) | 4671 | if (tp->hw_status) |
4672 | memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); | 4672 | memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); |
4673 | if (tp->hw_stats) | 4673 | if (tp->hw_stats) |
4674 | memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); | 4674 | memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); |
4675 | 4675 | ||
4676 | return err; | 4676 | return err; |
4677 | } | 4677 | } |
4678 | 4678 | ||
4679 | /* tp->lock is held. */ | 4679 | /* tp->lock is held. */ |
4680 | static int tg3_nvram_lock(struct tg3 *tp) | 4680 | static int tg3_nvram_lock(struct tg3 *tp) |
4681 | { | 4681 | { |
4682 | if (tp->tg3_flags & TG3_FLAG_NVRAM) { | 4682 | if (tp->tg3_flags & TG3_FLAG_NVRAM) { |
4683 | int i; | 4683 | int i; |
4684 | 4684 | ||
4685 | if (tp->nvram_lock_cnt == 0) { | 4685 | if (tp->nvram_lock_cnt == 0) { |
4686 | tw32(NVRAM_SWARB, SWARB_REQ_SET1); | 4686 | tw32(NVRAM_SWARB, SWARB_REQ_SET1); |
4687 | for (i = 0; i < 8000; i++) { | 4687 | for (i = 0; i < 8000; i++) { |
4688 | if (tr32(NVRAM_SWARB) & SWARB_GNT1) | 4688 | if (tr32(NVRAM_SWARB) & SWARB_GNT1) |
4689 | break; | 4689 | break; |
4690 | udelay(20); | 4690 | udelay(20); |
4691 | } | 4691 | } |
4692 | if (i == 8000) { | 4692 | if (i == 8000) { |
4693 | tw32(NVRAM_SWARB, SWARB_REQ_CLR1); | 4693 | tw32(NVRAM_SWARB, SWARB_REQ_CLR1); |
4694 | return -ENODEV; | 4694 | return -ENODEV; |
4695 | } | 4695 | } |
4696 | } | 4696 | } |
4697 | tp->nvram_lock_cnt++; | 4697 | tp->nvram_lock_cnt++; |
4698 | } | 4698 | } |
4699 | return 0; | 4699 | return 0; |
4700 | } | 4700 | } |
4701 | 4701 | ||
4702 | /* tp->lock is held. */ | 4702 | /* tp->lock is held. */ |
4703 | static void tg3_nvram_unlock(struct tg3 *tp) | 4703 | static void tg3_nvram_unlock(struct tg3 *tp) |
4704 | { | 4704 | { |
4705 | if (tp->tg3_flags & TG3_FLAG_NVRAM) { | 4705 | if (tp->tg3_flags & TG3_FLAG_NVRAM) { |
4706 | if (tp->nvram_lock_cnt > 0) | 4706 | if (tp->nvram_lock_cnt > 0) |
4707 | tp->nvram_lock_cnt--; | 4707 | tp->nvram_lock_cnt--; |
4708 | if (tp->nvram_lock_cnt == 0) | 4708 | if (tp->nvram_lock_cnt == 0) |
4709 | tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); | 4709 | tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); |
4710 | } | 4710 | } |
4711 | } | 4711 | } |
4712 | 4712 | ||
4713 | /* tp->lock is held. */ | 4713 | /* tp->lock is held. */ |
4714 | static void tg3_enable_nvram_access(struct tg3 *tp) | 4714 | static void tg3_enable_nvram_access(struct tg3 *tp) |
4715 | { | 4715 | { |
4716 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 4716 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
4717 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { | 4717 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { |
4718 | u32 nvaccess = tr32(NVRAM_ACCESS); | 4718 | u32 nvaccess = tr32(NVRAM_ACCESS); |
4719 | 4719 | ||
4720 | tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); | 4720 | tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); |
4721 | } | 4721 | } |
4722 | } | 4722 | } |
4723 | 4723 | ||
4724 | /* tp->lock is held. */ | 4724 | /* tp->lock is held. */ |
4725 | static void tg3_disable_nvram_access(struct tg3 *tp) | 4725 | static void tg3_disable_nvram_access(struct tg3 *tp) |
4726 | { | 4726 | { |
4727 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 4727 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
4728 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { | 4728 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { |
4729 | u32 nvaccess = tr32(NVRAM_ACCESS); | 4729 | u32 nvaccess = tr32(NVRAM_ACCESS); |
4730 | 4730 | ||
4731 | tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); | 4731 | tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); |
4732 | } | 4732 | } |
4733 | } | 4733 | } |
4734 | 4734 | ||
4735 | /* tp->lock is held. */ | 4735 | /* tp->lock is held. */ |
4736 | static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) | 4736 | static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) |
4737 | { | 4737 | { |
4738 | tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, | 4738 | tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, |
4739 | NIC_SRAM_FIRMWARE_MBOX_MAGIC1); | 4739 | NIC_SRAM_FIRMWARE_MBOX_MAGIC1); |
4740 | 4740 | ||
4741 | if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { | 4741 | if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { |
4742 | switch (kind) { | 4742 | switch (kind) { |
4743 | case RESET_KIND_INIT: | 4743 | case RESET_KIND_INIT: |
4744 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 4744 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
4745 | DRV_STATE_START); | 4745 | DRV_STATE_START); |
4746 | break; | 4746 | break; |
4747 | 4747 | ||
4748 | case RESET_KIND_SHUTDOWN: | 4748 | case RESET_KIND_SHUTDOWN: |
4749 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 4749 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
4750 | DRV_STATE_UNLOAD); | 4750 | DRV_STATE_UNLOAD); |
4751 | break; | 4751 | break; |
4752 | 4752 | ||
4753 | case RESET_KIND_SUSPEND: | 4753 | case RESET_KIND_SUSPEND: |
4754 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 4754 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
4755 | DRV_STATE_SUSPEND); | 4755 | DRV_STATE_SUSPEND); |
4756 | break; | 4756 | break; |
4757 | 4757 | ||
4758 | default: | 4758 | default: |
4759 | break; | 4759 | break; |
4760 | }; | 4760 | }; |
4761 | } | 4761 | } |
4762 | } | 4762 | } |
4763 | 4763 | ||
4764 | /* tp->lock is held. */ | 4764 | /* tp->lock is held. */ |
4765 | static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) | 4765 | static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) |
4766 | { | 4766 | { |
4767 | if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { | 4767 | if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { |
4768 | switch (kind) { | 4768 | switch (kind) { |
4769 | case RESET_KIND_INIT: | 4769 | case RESET_KIND_INIT: |
4770 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 4770 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
4771 | DRV_STATE_START_DONE); | 4771 | DRV_STATE_START_DONE); |
4772 | break; | 4772 | break; |
4773 | 4773 | ||
4774 | case RESET_KIND_SHUTDOWN: | 4774 | case RESET_KIND_SHUTDOWN: |
4775 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 4775 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
4776 | DRV_STATE_UNLOAD_DONE); | 4776 | DRV_STATE_UNLOAD_DONE); |
4777 | break; | 4777 | break; |
4778 | 4778 | ||
4779 | default: | 4779 | default: |
4780 | break; | 4780 | break; |
4781 | }; | 4781 | }; |
4782 | } | 4782 | } |
4783 | } | 4783 | } |
4784 | 4784 | ||
4785 | /* tp->lock is held. */ | 4785 | /* tp->lock is held. */ |
4786 | static void tg3_write_sig_legacy(struct tg3 *tp, int kind) | 4786 | static void tg3_write_sig_legacy(struct tg3 *tp, int kind) |
4787 | { | 4787 | { |
4788 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { | 4788 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { |
4789 | switch (kind) { | 4789 | switch (kind) { |
4790 | case RESET_KIND_INIT: | 4790 | case RESET_KIND_INIT: |
4791 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 4791 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
4792 | DRV_STATE_START); | 4792 | DRV_STATE_START); |
4793 | break; | 4793 | break; |
4794 | 4794 | ||
4795 | case RESET_KIND_SHUTDOWN: | 4795 | case RESET_KIND_SHUTDOWN: |
4796 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 4796 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
4797 | DRV_STATE_UNLOAD); | 4797 | DRV_STATE_UNLOAD); |
4798 | break; | 4798 | break; |
4799 | 4799 | ||
4800 | case RESET_KIND_SUSPEND: | 4800 | case RESET_KIND_SUSPEND: |
4801 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 4801 | tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, |
4802 | DRV_STATE_SUSPEND); | 4802 | DRV_STATE_SUSPEND); |
4803 | break; | 4803 | break; |
4804 | 4804 | ||
4805 | default: | 4805 | default: |
4806 | break; | 4806 | break; |
4807 | }; | 4807 | }; |
4808 | } | 4808 | } |
4809 | } | 4809 | } |
4810 | 4810 | ||
4811 | static int tg3_poll_fw(struct tg3 *tp) | 4811 | static int tg3_poll_fw(struct tg3 *tp) |
4812 | { | 4812 | { |
4813 | int i; | 4813 | int i; |
4814 | u32 val; | 4814 | u32 val; |
4815 | 4815 | ||
4816 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 4816 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
4817 | /* Wait up to 20ms for init done. */ | 4817 | /* Wait up to 20ms for init done. */ |
4818 | for (i = 0; i < 200; i++) { | 4818 | for (i = 0; i < 200; i++) { |
4819 | if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) | 4819 | if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) |
4820 | return 0; | 4820 | return 0; |
4821 | udelay(100); | 4821 | udelay(100); |
4822 | } | 4822 | } |
4823 | return -ENODEV; | 4823 | return -ENODEV; |
4824 | } | 4824 | } |
4825 | 4825 | ||
4826 | /* Wait for firmware initialization to complete. */ | 4826 | /* Wait for firmware initialization to complete. */ |
4827 | for (i = 0; i < 100000; i++) { | 4827 | for (i = 0; i < 100000; i++) { |
4828 | tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); | 4828 | tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); |
4829 | if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) | 4829 | if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) |
4830 | break; | 4830 | break; |
4831 | udelay(10); | 4831 | udelay(10); |
4832 | } | 4832 | } |
4833 | 4833 | ||
4834 | /* Chip might not be fitted with firmware. Some Sun onboard | 4834 | /* Chip might not be fitted with firmware. Some Sun onboard |
4835 | * parts are configured like that. So don't signal the timeout | 4835 | * parts are configured like that. So don't signal the timeout |
4836 | * of the above loop as an error, but do report the lack of | 4836 | * of the above loop as an error, but do report the lack of |
4837 | * running firmware once. | 4837 | * running firmware once. |
4838 | */ | 4838 | */ |
4839 | if (i >= 100000 && | 4839 | if (i >= 100000 && |
4840 | !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { | 4840 | !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { |
4841 | tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; | 4841 | tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; |
4842 | 4842 | ||
4843 | printk(KERN_INFO PFX "%s: No firmware running.\n", | 4843 | printk(KERN_INFO PFX "%s: No firmware running.\n", |
4844 | tp->dev->name); | 4844 | tp->dev->name); |
4845 | } | 4845 | } |
4846 | 4846 | ||
4847 | return 0; | 4847 | return 0; |
4848 | } | 4848 | } |
4849 | 4849 | ||
4850 | static void tg3_stop_fw(struct tg3 *); | 4850 | static void tg3_stop_fw(struct tg3 *); |
4851 | 4851 | ||
4852 | /* tp->lock is held. */ | 4852 | /* tp->lock is held. */ |
4853 | static int tg3_chip_reset(struct tg3 *tp) | 4853 | static int tg3_chip_reset(struct tg3 *tp) |
4854 | { | 4854 | { |
4855 | u32 val; | 4855 | u32 val; |
4856 | void (*write_op)(struct tg3 *, u32, u32); | 4856 | void (*write_op)(struct tg3 *, u32, u32); |
4857 | int err; | 4857 | int err; |
4858 | 4858 | ||
4859 | tg3_nvram_lock(tp); | 4859 | tg3_nvram_lock(tp); |
4860 | 4860 | ||
4861 | /* No matching tg3_nvram_unlock() after this because | 4861 | /* No matching tg3_nvram_unlock() after this because |
4862 | * chip reset below will undo the nvram lock. | 4862 | * chip reset below will undo the nvram lock. |
4863 | */ | 4863 | */ |
4864 | tp->nvram_lock_cnt = 0; | 4864 | tp->nvram_lock_cnt = 0; |
4865 | 4865 | ||
4866 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | 4866 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || |
4867 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 4867 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
4868 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) | 4868 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) |
4869 | tw32(GRC_FASTBOOT_PC, 0); | 4869 | tw32(GRC_FASTBOOT_PC, 0); |
4870 | 4870 | ||
4871 | /* | 4871 | /* |
4872 | * We must avoid the readl() that normally takes place. | 4872 | * We must avoid the readl() that normally takes place. |
4873 | * It locks machines, causes machine checks, and other | 4873 | * It locks machines, causes machine checks, and other |
4874 | * fun things. So, temporarily disable the 5701 | 4874 | * fun things. So, temporarily disable the 5701 |
4875 | * hardware workaround, while we do the reset. | 4875 | * hardware workaround, while we do the reset. |
4876 | */ | 4876 | */ |
4877 | write_op = tp->write32; | 4877 | write_op = tp->write32; |
4878 | if (write_op == tg3_write_flush_reg32) | 4878 | if (write_op == tg3_write_flush_reg32) |
4879 | tp->write32 = tg3_write32; | 4879 | tp->write32 = tg3_write32; |
4880 | 4880 | ||
4881 | /* Prevent the irq handler from reading or writing PCI registers | 4881 | /* Prevent the irq handler from reading or writing PCI registers |
4882 | * during chip reset when the memory enable bit in the PCI command | 4882 | * during chip reset when the memory enable bit in the PCI command |
4883 | * register may be cleared. The chip does not generate interrupt | 4883 | * register may be cleared. The chip does not generate interrupt |
4884 | * at this time, but the irq handler may still be called due to irq | 4884 | * at this time, but the irq handler may still be called due to irq |
4885 | * sharing or irqpoll. | 4885 | * sharing or irqpoll. |
4886 | */ | 4886 | */ |
4887 | tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; | 4887 | tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; |
4888 | if (tp->hw_status) { | 4888 | if (tp->hw_status) { |
4889 | tp->hw_status->status = 0; | 4889 | tp->hw_status->status = 0; |
4890 | tp->hw_status->status_tag = 0; | 4890 | tp->hw_status->status_tag = 0; |
4891 | } | 4891 | } |
4892 | tp->last_tag = 0; | 4892 | tp->last_tag = 0; |
4893 | smp_mb(); | 4893 | smp_mb(); |
4894 | synchronize_irq(tp->pdev->irq); | 4894 | synchronize_irq(tp->pdev->irq); |
4895 | 4895 | ||
4896 | /* do the reset */ | 4896 | /* do the reset */ |
4897 | val = GRC_MISC_CFG_CORECLK_RESET; | 4897 | val = GRC_MISC_CFG_CORECLK_RESET; |
4898 | 4898 | ||
4899 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 4899 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { |
4900 | if (tr32(0x7e2c) == 0x60) { | 4900 | if (tr32(0x7e2c) == 0x60) { |
4901 | tw32(0x7e2c, 0x20); | 4901 | tw32(0x7e2c, 0x20); |
4902 | } | 4902 | } |
4903 | if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { | 4903 | if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { |
4904 | tw32(GRC_MISC_CFG, (1 << 29)); | 4904 | tw32(GRC_MISC_CFG, (1 << 29)); |
4905 | val |= (1 << 29); | 4905 | val |= (1 << 29); |
4906 | } | 4906 | } |
4907 | } | 4907 | } |
4908 | 4908 | ||
4909 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 4909 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
4910 | tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); | 4910 | tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); |
4911 | tw32(GRC_VCPU_EXT_CTRL, | 4911 | tw32(GRC_VCPU_EXT_CTRL, |
4912 | tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); | 4912 | tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); |
4913 | } | 4913 | } |
4914 | 4914 | ||
4915 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 4915 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) |
4916 | val |= GRC_MISC_CFG_KEEP_GPHY_POWER; | 4916 | val |= GRC_MISC_CFG_KEEP_GPHY_POWER; |
4917 | tw32(GRC_MISC_CFG, val); | 4917 | tw32(GRC_MISC_CFG, val); |
4918 | 4918 | ||
4919 | /* restore 5701 hardware bug workaround write method */ | 4919 | /* restore 5701 hardware bug workaround write method */ |
4920 | tp->write32 = write_op; | 4920 | tp->write32 = write_op; |
4921 | 4921 | ||
4922 | /* Unfortunately, we have to delay before the PCI read back. | 4922 | /* Unfortunately, we have to delay before the PCI read back. |
4923 | * Some 575X chips even will not respond to a PCI cfg access | 4923 | * Some 575X chips even will not respond to a PCI cfg access |
4924 | * when the reset command is given to the chip. | 4924 | * when the reset command is given to the chip. |
4925 | * | 4925 | * |
4926 | * How do these hardware designers expect things to work | 4926 | * How do these hardware designers expect things to work |
4927 | * properly if the PCI write is posted for a long period | 4927 | * properly if the PCI write is posted for a long period |
4928 | * of time? It is always necessary to have some method by | 4928 | * of time? It is always necessary to have some method by |
4929 | * which a register read back can occur to push the write | 4929 | * which a register read back can occur to push the write |
4930 | * out which does the reset. | 4930 | * out which does the reset. |
4931 | * | 4931 | * |
4932 | * For most tg3 variants the trick below was working. | 4932 | * For most tg3 variants the trick below was working. |
4933 | * Ho hum... | 4933 | * Ho hum... |
4934 | */ | 4934 | */ |
4935 | udelay(120); | 4935 | udelay(120); |
4936 | 4936 | ||
4937 | /* Flush PCI posted writes. The normal MMIO registers | 4937 | /* Flush PCI posted writes. The normal MMIO registers |
4938 | * are inaccessible at this time so this is the only | 4938 | * are inaccessible at this time so this is the only |
4939 | * way to make this reliably (actually, this is no longer | 4939 | * way to make this reliably (actually, this is no longer |
4940 | * the case, see above). I tried to use indirect | 4940 | * the case, see above). I tried to use indirect |
4941 | * register read/write but this upset some 5701 variants. | 4941 | * register read/write but this upset some 5701 variants. |
4942 | */ | 4942 | */ |
4943 | pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); | 4943 | pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); |
4944 | 4944 | ||
4945 | udelay(120); | 4945 | udelay(120); |
4946 | 4946 | ||
4947 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 4947 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { |
4948 | if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { | 4948 | if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { |
4949 | int i; | 4949 | int i; |
4950 | u32 cfg_val; | 4950 | u32 cfg_val; |
4951 | 4951 | ||
4952 | /* Wait for link training to complete. */ | 4952 | /* Wait for link training to complete. */ |
4953 | for (i = 0; i < 5000; i++) | 4953 | for (i = 0; i < 5000; i++) |
4954 | udelay(100); | 4954 | udelay(100); |
4955 | 4955 | ||
4956 | pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); | 4956 | pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); |
4957 | pci_write_config_dword(tp->pdev, 0xc4, | 4957 | pci_write_config_dword(tp->pdev, 0xc4, |
4958 | cfg_val | (1 << 15)); | 4958 | cfg_val | (1 << 15)); |
4959 | } | 4959 | } |
4960 | /* Set PCIE max payload size and clear error status. */ | 4960 | /* Set PCIE max payload size and clear error status. */ |
4961 | pci_write_config_dword(tp->pdev, 0xd8, 0xf5000); | 4961 | pci_write_config_dword(tp->pdev, 0xd8, 0xf5000); |
4962 | } | 4962 | } |
4963 | 4963 | ||
4964 | /* Re-enable indirect register accesses. */ | 4964 | /* Re-enable indirect register accesses. */ |
4965 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 4965 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, |
4966 | tp->misc_host_ctrl); | 4966 | tp->misc_host_ctrl); |
4967 | 4967 | ||
4968 | /* Set MAX PCI retry to zero. */ | 4968 | /* Set MAX PCI retry to zero. */ |
4969 | val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); | 4969 | val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); |
4970 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && | 4970 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && |
4971 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) | 4971 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) |
4972 | val |= PCISTATE_RETRY_SAME_DMA; | 4972 | val |= PCISTATE_RETRY_SAME_DMA; |
4973 | pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); | 4973 | pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); |
4974 | 4974 | ||
4975 | pci_restore_state(tp->pdev); | 4975 | pci_restore_state(tp->pdev); |
4976 | 4976 | ||
4977 | tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING; | 4977 | tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING; |
4978 | 4978 | ||
4979 | /* Make sure PCI-X relaxed ordering bit is clear. */ | 4979 | /* Make sure PCI-X relaxed ordering bit is clear. */ |
4980 | pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val); | 4980 | pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val); |
4981 | val &= ~PCIX_CAPS_RELAXED_ORDERING; | 4981 | val &= ~PCIX_CAPS_RELAXED_ORDERING; |
4982 | pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val); | 4982 | pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val); |
4983 | 4983 | ||
4984 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { | 4984 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { |
4985 | u32 val; | 4985 | u32 val; |
4986 | 4986 | ||
4987 | /* Chip reset on 5780 will reset MSI enable bit, | 4987 | /* Chip reset on 5780 will reset MSI enable bit, |
4988 | * so need to restore it. | 4988 | * so need to restore it. |
4989 | */ | 4989 | */ |
4990 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 4990 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
4991 | u16 ctrl; | 4991 | u16 ctrl; |
4992 | 4992 | ||
4993 | pci_read_config_word(tp->pdev, | 4993 | pci_read_config_word(tp->pdev, |
4994 | tp->msi_cap + PCI_MSI_FLAGS, | 4994 | tp->msi_cap + PCI_MSI_FLAGS, |
4995 | &ctrl); | 4995 | &ctrl); |
4996 | pci_write_config_word(tp->pdev, | 4996 | pci_write_config_word(tp->pdev, |
4997 | tp->msi_cap + PCI_MSI_FLAGS, | 4997 | tp->msi_cap + PCI_MSI_FLAGS, |
4998 | ctrl | PCI_MSI_FLAGS_ENABLE); | 4998 | ctrl | PCI_MSI_FLAGS_ENABLE); |
4999 | val = tr32(MSGINT_MODE); | 4999 | val = tr32(MSGINT_MODE); |
5000 | tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); | 5000 | tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); |
5001 | } | 5001 | } |
5002 | 5002 | ||
5003 | val = tr32(MEMARB_MODE); | 5003 | val = tr32(MEMARB_MODE); |
5004 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); | 5004 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); |
5005 | 5005 | ||
5006 | } else | 5006 | } else |
5007 | tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); | 5007 | tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); |
5008 | 5008 | ||
5009 | if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { | 5009 | if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { |
5010 | tg3_stop_fw(tp); | 5010 | tg3_stop_fw(tp); |
5011 | tw32(0x5000, 0x400); | 5011 | tw32(0x5000, 0x400); |
5012 | } | 5012 | } |
5013 | 5013 | ||
5014 | tw32(GRC_MODE, tp->grc_mode); | 5014 | tw32(GRC_MODE, tp->grc_mode); |
5015 | 5015 | ||
5016 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { | 5016 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { |
5017 | u32 val = tr32(0xc4); | 5017 | u32 val = tr32(0xc4); |
5018 | 5018 | ||
5019 | tw32(0xc4, val | (1 << 15)); | 5019 | tw32(0xc4, val | (1 << 15)); |
5020 | } | 5020 | } |
5021 | 5021 | ||
5022 | if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && | 5022 | if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && |
5023 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 5023 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { |
5024 | tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; | 5024 | tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; |
5025 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) | 5025 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) |
5026 | tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; | 5026 | tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; |
5027 | tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); | 5027 | tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); |
5028 | } | 5028 | } |
5029 | 5029 | ||
5030 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 5030 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { |
5031 | tp->mac_mode = MAC_MODE_PORT_MODE_TBI; | 5031 | tp->mac_mode = MAC_MODE_PORT_MODE_TBI; |
5032 | tw32_f(MAC_MODE, tp->mac_mode); | 5032 | tw32_f(MAC_MODE, tp->mac_mode); |
5033 | } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { | 5033 | } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { |
5034 | tp->mac_mode = MAC_MODE_PORT_MODE_GMII; | 5034 | tp->mac_mode = MAC_MODE_PORT_MODE_GMII; |
5035 | tw32_f(MAC_MODE, tp->mac_mode); | 5035 | tw32_f(MAC_MODE, tp->mac_mode); |
5036 | } else | 5036 | } else |
5037 | tw32_f(MAC_MODE, 0); | 5037 | tw32_f(MAC_MODE, 0); |
5038 | udelay(40); | 5038 | udelay(40); |
5039 | 5039 | ||
5040 | err = tg3_poll_fw(tp); | 5040 | err = tg3_poll_fw(tp); |
5041 | if (err) | 5041 | if (err) |
5042 | return err; | 5042 | return err; |
5043 | 5043 | ||
5044 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && | 5044 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && |
5045 | tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { | 5045 | tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { |
5046 | u32 val = tr32(0x7c00); | 5046 | u32 val = tr32(0x7c00); |
5047 | 5047 | ||
5048 | tw32(0x7c00, val | (1 << 25)); | 5048 | tw32(0x7c00, val | (1 << 25)); |
5049 | } | 5049 | } |
5050 | 5050 | ||
5051 | /* Reprobe ASF enable state. */ | 5051 | /* Reprobe ASF enable state. */ |
5052 | tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF; | 5052 | tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF; |
5053 | tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE; | 5053 | tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE; |
5054 | tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); | 5054 | tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); |
5055 | if (val == NIC_SRAM_DATA_SIG_MAGIC) { | 5055 | if (val == NIC_SRAM_DATA_SIG_MAGIC) { |
5056 | u32 nic_cfg; | 5056 | u32 nic_cfg; |
5057 | 5057 | ||
5058 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); | 5058 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); |
5059 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { | 5059 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { |
5060 | tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; | 5060 | tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; |
5061 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 5061 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) |
5062 | tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; | 5062 | tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; |
5063 | } | 5063 | } |
5064 | } | 5064 | } |
5065 | 5065 | ||
5066 | return 0; | 5066 | return 0; |
5067 | } | 5067 | } |
5068 | 5068 | ||
5069 | /* tp->lock is held. */ | 5069 | /* tp->lock is held. */ |
5070 | static void tg3_stop_fw(struct tg3 *tp) | 5070 | static void tg3_stop_fw(struct tg3 *tp) |
5071 | { | 5071 | { |
5072 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { | 5072 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { |
5073 | u32 val; | 5073 | u32 val; |
5074 | int i; | 5074 | int i; |
5075 | 5075 | ||
5076 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); | 5076 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); |
5077 | val = tr32(GRC_RX_CPU_EVENT); | 5077 | val = tr32(GRC_RX_CPU_EVENT); |
5078 | val |= (1 << 14); | 5078 | val |= (1 << 14); |
5079 | tw32(GRC_RX_CPU_EVENT, val); | 5079 | tw32(GRC_RX_CPU_EVENT, val); |
5080 | 5080 | ||
5081 | /* Wait for RX cpu to ACK the event. */ | 5081 | /* Wait for RX cpu to ACK the event. */ |
5082 | for (i = 0; i < 100; i++) { | 5082 | for (i = 0; i < 100; i++) { |
5083 | if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14))) | 5083 | if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14))) |
5084 | break; | 5084 | break; |
5085 | udelay(1); | 5085 | udelay(1); |
5086 | } | 5086 | } |
5087 | } | 5087 | } |
5088 | } | 5088 | } |
5089 | 5089 | ||
5090 | /* tp->lock is held. */ | 5090 | /* tp->lock is held. */ |
5091 | static int tg3_halt(struct tg3 *tp, int kind, int silent) | 5091 | static int tg3_halt(struct tg3 *tp, int kind, int silent) |
5092 | { | 5092 | { |
5093 | int err; | 5093 | int err; |
5094 | 5094 | ||
5095 | tg3_stop_fw(tp); | 5095 | tg3_stop_fw(tp); |
5096 | 5096 | ||
5097 | tg3_write_sig_pre_reset(tp, kind); | 5097 | tg3_write_sig_pre_reset(tp, kind); |
5098 | 5098 | ||
5099 | tg3_abort_hw(tp, silent); | 5099 | tg3_abort_hw(tp, silent); |
5100 | err = tg3_chip_reset(tp); | 5100 | err = tg3_chip_reset(tp); |
5101 | 5101 | ||
5102 | tg3_write_sig_legacy(tp, kind); | 5102 | tg3_write_sig_legacy(tp, kind); |
5103 | tg3_write_sig_post_reset(tp, kind); | 5103 | tg3_write_sig_post_reset(tp, kind); |
5104 | 5104 | ||
5105 | if (err) | 5105 | if (err) |
5106 | return err; | 5106 | return err; |
5107 | 5107 | ||
5108 | return 0; | 5108 | return 0; |
5109 | } | 5109 | } |
5110 | 5110 | ||
5111 | #define TG3_FW_RELEASE_MAJOR 0x0 | 5111 | #define TG3_FW_RELEASE_MAJOR 0x0 |
5112 | #define TG3_FW_RELASE_MINOR 0x0 | 5112 | #define TG3_FW_RELASE_MINOR 0x0 |
5113 | #define TG3_FW_RELEASE_FIX 0x0 | 5113 | #define TG3_FW_RELEASE_FIX 0x0 |
5114 | #define TG3_FW_START_ADDR 0x08000000 | 5114 | #define TG3_FW_START_ADDR 0x08000000 |
5115 | #define TG3_FW_TEXT_ADDR 0x08000000 | 5115 | #define TG3_FW_TEXT_ADDR 0x08000000 |
5116 | #define TG3_FW_TEXT_LEN 0x9c0 | 5116 | #define TG3_FW_TEXT_LEN 0x9c0 |
5117 | #define TG3_FW_RODATA_ADDR 0x080009c0 | 5117 | #define TG3_FW_RODATA_ADDR 0x080009c0 |
5118 | #define TG3_FW_RODATA_LEN 0x60 | 5118 | #define TG3_FW_RODATA_LEN 0x60 |
5119 | #define TG3_FW_DATA_ADDR 0x08000a40 | 5119 | #define TG3_FW_DATA_ADDR 0x08000a40 |
5120 | #define TG3_FW_DATA_LEN 0x20 | 5120 | #define TG3_FW_DATA_LEN 0x20 |
5121 | #define TG3_FW_SBSS_ADDR 0x08000a60 | 5121 | #define TG3_FW_SBSS_ADDR 0x08000a60 |
5122 | #define TG3_FW_SBSS_LEN 0xc | 5122 | #define TG3_FW_SBSS_LEN 0xc |
5123 | #define TG3_FW_BSS_ADDR 0x08000a70 | 5123 | #define TG3_FW_BSS_ADDR 0x08000a70 |
5124 | #define TG3_FW_BSS_LEN 0x10 | 5124 | #define TG3_FW_BSS_LEN 0x10 |
5125 | 5125 | ||
5126 | static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = { | 5126 | static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = { |
5127 | 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800, | 5127 | 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800, |
5128 | 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000, | 5128 | 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000, |
5129 | 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034, | 5129 | 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034, |
5130 | 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000, | 5130 | 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000, |
5131 | 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105, | 5131 | 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105, |
5132 | 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0, | 5132 | 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0, |
5133 | 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010, | 5133 | 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010, |
5134 | 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01, | 5134 | 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01, |
5135 | 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c, | 5135 | 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c, |
5136 | 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000, | 5136 | 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000, |
5137 | 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400, | 5137 | 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400, |
5138 | 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c, | 5138 | 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c, |
5139 | 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000, | 5139 | 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000, |
5140 | 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64, | 5140 | 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64, |
5141 | 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000, | 5141 | 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000, |
5142 | 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, | 5142 | 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, |
5143 | 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68, | 5143 | 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68, |
5144 | 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003, | 5144 | 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003, |
5145 | 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800, | 5145 | 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800, |
5146 | 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001, | 5146 | 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001, |
5147 | 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60, | 5147 | 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60, |
5148 | 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008, | 5148 | 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008, |
5149 | 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, | 5149 | 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, |
5150 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 5150 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
5151 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 5151 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
5152 | 0, 0, 0, 0, 0, 0, | 5152 | 0, 0, 0, 0, 0, 0, |
5153 | 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002, | 5153 | 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002, |
5154 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, | 5154 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, |
5155 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, | 5155 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, |
5156 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, | 5156 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, |
5157 | 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009, | 5157 | 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009, |
5158 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b, | 5158 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b, |
5159 | 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000, | 5159 | 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000, |
5160 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000, | 5160 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000, |
5161 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, | 5161 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, |
5162 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, | 5162 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, |
5163 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014, | 5163 | 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014, |
5164 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 5164 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
5165 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 5165 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
5166 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 5166 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
5167 | 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010, | 5167 | 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010, |
5168 | 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74, | 5168 | 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74, |
5169 | 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c, | 5169 | 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c, |
5170 | 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800, | 5170 | 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800, |
5171 | 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001, | 5171 | 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001, |
5172 | 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028, | 5172 | 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028, |
5173 | 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800, | 5173 | 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800, |
5174 | 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0, | 5174 | 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0, |
5175 | 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, | 5175 | 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, |
5176 | 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001, | 5176 | 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001, |
5177 | 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810, | 5177 | 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810, |
5178 | 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018, | 5178 | 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018, |
5179 | 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec, | 5179 | 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec, |
5180 | 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c, | 5180 | 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c, |
5181 | 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74, | 5181 | 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74, |
5182 | 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000, | 5182 | 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000, |
5183 | 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c, | 5183 | 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c, |
5184 | 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c, | 5184 | 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c, |
5185 | 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df, | 5185 | 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df, |
5186 | 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000, | 5186 | 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000, |
5187 | 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800, | 5187 | 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800, |
5188 | 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402, | 5188 | 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402, |
5189 | 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00, | 5189 | 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00, |
5190 | 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010, | 5190 | 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010, |
5191 | 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df, | 5191 | 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df, |
5192 | 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001, | 5192 | 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001, |
5193 | 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008, | 5193 | 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008, |
5194 | 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021, | 5194 | 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021, |
5195 | 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018, | 5195 | 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018, |
5196 | 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b, | 5196 | 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b, |
5197 | 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000, | 5197 | 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000, |
5198 | 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008, | 5198 | 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008, |
5199 | 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b, | 5199 | 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b, |
5200 | 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001, | 5200 | 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001, |
5201 | 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821, | 5201 | 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821, |
5202 | 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000, | 5202 | 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000, |
5203 | 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000, | 5203 | 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000, |
5204 | 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821, | 5204 | 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821, |
5205 | 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff, | 5205 | 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff, |
5206 | 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008, | 5206 | 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008, |
5207 | 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010, | 5207 | 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010, |
5208 | 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000, | 5208 | 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000, |
5209 | 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428, | 5209 | 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428, |
5210 | 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c, | 5210 | 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c, |
5211 | 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e, | 5211 | 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e, |
5212 | 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010, | 5212 | 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010, |
5213 | 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000, | 5213 | 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000, |
5214 | 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001, | 5214 | 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001, |
5215 | 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000, | 5215 | 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000, |
5216 | 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824, | 5216 | 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824, |
5217 | 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000 | 5217 | 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000 |
5218 | }; | 5218 | }; |
5219 | 5219 | ||
5220 | static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = { | 5220 | static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = { |
5221 | 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430, | 5221 | 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430, |
5222 | 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74, | 5222 | 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74, |
5223 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, | 5223 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, |
5224 | 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000, | 5224 | 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000, |
5225 | 0x00000000 | 5225 | 0x00000000 |
5226 | }; | 5226 | }; |
5227 | 5227 | ||
5228 | #if 0 /* All zeros, don't eat up space with it. */ | 5228 | #if 0 /* All zeros, don't eat up space with it. */ |
5229 | u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = { | 5229 | u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = { |
5230 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, | 5230 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
5231 | 0x00000000, 0x00000000, 0x00000000, 0x00000000 | 5231 | 0x00000000, 0x00000000, 0x00000000, 0x00000000 |
5232 | }; | 5232 | }; |
5233 | #endif | 5233 | #endif |
5234 | 5234 | ||
5235 | #define RX_CPU_SCRATCH_BASE 0x30000 | 5235 | #define RX_CPU_SCRATCH_BASE 0x30000 |
5236 | #define RX_CPU_SCRATCH_SIZE 0x04000 | 5236 | #define RX_CPU_SCRATCH_SIZE 0x04000 |
5237 | #define TX_CPU_SCRATCH_BASE 0x34000 | 5237 | #define TX_CPU_SCRATCH_BASE 0x34000 |
5238 | #define TX_CPU_SCRATCH_SIZE 0x04000 | 5238 | #define TX_CPU_SCRATCH_SIZE 0x04000 |
5239 | 5239 | ||
5240 | /* tp->lock is held. */ | 5240 | /* tp->lock is held. */ |
5241 | static int tg3_halt_cpu(struct tg3 *tp, u32 offset) | 5241 | static int tg3_halt_cpu(struct tg3 *tp, u32 offset) |
5242 | { | 5242 | { |
5243 | int i; | 5243 | int i; |
5244 | 5244 | ||
5245 | BUG_ON(offset == TX_CPU_BASE && | 5245 | BUG_ON(offset == TX_CPU_BASE && |
5246 | (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)); | 5246 | (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)); |
5247 | 5247 | ||
5248 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 5248 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
5249 | u32 val = tr32(GRC_VCPU_EXT_CTRL); | 5249 | u32 val = tr32(GRC_VCPU_EXT_CTRL); |
5250 | 5250 | ||
5251 | tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); | 5251 | tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); |
5252 | return 0; | 5252 | return 0; |
5253 | } | 5253 | } |
5254 | if (offset == RX_CPU_BASE) { | 5254 | if (offset == RX_CPU_BASE) { |
5255 | for (i = 0; i < 10000; i++) { | 5255 | for (i = 0; i < 10000; i++) { |
5256 | tw32(offset + CPU_STATE, 0xffffffff); | 5256 | tw32(offset + CPU_STATE, 0xffffffff); |
5257 | tw32(offset + CPU_MODE, CPU_MODE_HALT); | 5257 | tw32(offset + CPU_MODE, CPU_MODE_HALT); |
5258 | if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) | 5258 | if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) |
5259 | break; | 5259 | break; |
5260 | } | 5260 | } |
5261 | 5261 | ||
5262 | tw32(offset + CPU_STATE, 0xffffffff); | 5262 | tw32(offset + CPU_STATE, 0xffffffff); |
5263 | tw32_f(offset + CPU_MODE, CPU_MODE_HALT); | 5263 | tw32_f(offset + CPU_MODE, CPU_MODE_HALT); |
5264 | udelay(10); | 5264 | udelay(10); |
5265 | } else { | 5265 | } else { |
5266 | for (i = 0; i < 10000; i++) { | 5266 | for (i = 0; i < 10000; i++) { |
5267 | tw32(offset + CPU_STATE, 0xffffffff); | 5267 | tw32(offset + CPU_STATE, 0xffffffff); |
5268 | tw32(offset + CPU_MODE, CPU_MODE_HALT); | 5268 | tw32(offset + CPU_MODE, CPU_MODE_HALT); |
5269 | if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) | 5269 | if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) |
5270 | break; | 5270 | break; |
5271 | } | 5271 | } |
5272 | } | 5272 | } |
5273 | 5273 | ||
5274 | if (i >= 10000) { | 5274 | if (i >= 10000) { |
5275 | printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, " | 5275 | printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, " |
5276 | "and %s CPU\n", | 5276 | "and %s CPU\n", |
5277 | tp->dev->name, | 5277 | tp->dev->name, |
5278 | (offset == RX_CPU_BASE ? "RX" : "TX")); | 5278 | (offset == RX_CPU_BASE ? "RX" : "TX")); |
5279 | return -ENODEV; | 5279 | return -ENODEV; |
5280 | } | 5280 | } |
5281 | 5281 | ||
5282 | /* Clear firmware's nvram arbitration. */ | 5282 | /* Clear firmware's nvram arbitration. */ |
5283 | if (tp->tg3_flags & TG3_FLAG_NVRAM) | 5283 | if (tp->tg3_flags & TG3_FLAG_NVRAM) |
5284 | tw32(NVRAM_SWARB, SWARB_REQ_CLR0); | 5284 | tw32(NVRAM_SWARB, SWARB_REQ_CLR0); |
5285 | return 0; | 5285 | return 0; |
5286 | } | 5286 | } |
5287 | 5287 | ||
5288 | struct fw_info { | 5288 | struct fw_info { |
5289 | unsigned int text_base; | 5289 | unsigned int text_base; |
5290 | unsigned int text_len; | 5290 | unsigned int text_len; |
5291 | const u32 *text_data; | 5291 | const u32 *text_data; |
5292 | unsigned int rodata_base; | 5292 | unsigned int rodata_base; |
5293 | unsigned int rodata_len; | 5293 | unsigned int rodata_len; |
5294 | const u32 *rodata_data; | 5294 | const u32 *rodata_data; |
5295 | unsigned int data_base; | 5295 | unsigned int data_base; |
5296 | unsigned int data_len; | 5296 | unsigned int data_len; |
5297 | const u32 *data_data; | 5297 | const u32 *data_data; |
5298 | }; | 5298 | }; |
5299 | 5299 | ||
5300 | /* tp->lock is held. */ | 5300 | /* tp->lock is held. */ |
5301 | static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, | 5301 | static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, |
5302 | int cpu_scratch_size, struct fw_info *info) | 5302 | int cpu_scratch_size, struct fw_info *info) |
5303 | { | 5303 | { |
5304 | int err, lock_err, i; | 5304 | int err, lock_err, i; |
5305 | void (*write_op)(struct tg3 *, u32, u32); | 5305 | void (*write_op)(struct tg3 *, u32, u32); |
5306 | 5306 | ||
5307 | if (cpu_base == TX_CPU_BASE && | 5307 | if (cpu_base == TX_CPU_BASE && |
5308 | (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 5308 | (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
5309 | printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load " | 5309 | printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load " |
5310 | "TX cpu firmware on %s which is 5705.\n", | 5310 | "TX cpu firmware on %s which is 5705.\n", |
5311 | tp->dev->name); | 5311 | tp->dev->name); |
5312 | return -EINVAL; | 5312 | return -EINVAL; |
5313 | } | 5313 | } |
5314 | 5314 | ||
5315 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 5315 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) |
5316 | write_op = tg3_write_mem; | 5316 | write_op = tg3_write_mem; |
5317 | else | 5317 | else |
5318 | write_op = tg3_write_indirect_reg32; | 5318 | write_op = tg3_write_indirect_reg32; |
5319 | 5319 | ||
5320 | /* It is possible that bootcode is still loading at this point. | 5320 | /* It is possible that bootcode is still loading at this point. |
5321 | * Get the nvram lock first before halting the cpu. | 5321 | * Get the nvram lock first before halting the cpu. |
5322 | */ | 5322 | */ |
5323 | lock_err = tg3_nvram_lock(tp); | 5323 | lock_err = tg3_nvram_lock(tp); |
5324 | err = tg3_halt_cpu(tp, cpu_base); | 5324 | err = tg3_halt_cpu(tp, cpu_base); |
5325 | if (!lock_err) | 5325 | if (!lock_err) |
5326 | tg3_nvram_unlock(tp); | 5326 | tg3_nvram_unlock(tp); |
5327 | if (err) | 5327 | if (err) |
5328 | goto out; | 5328 | goto out; |
5329 | 5329 | ||
5330 | for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) | 5330 | for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) |
5331 | write_op(tp, cpu_scratch_base + i, 0); | 5331 | write_op(tp, cpu_scratch_base + i, 0); |
5332 | tw32(cpu_base + CPU_STATE, 0xffffffff); | 5332 | tw32(cpu_base + CPU_STATE, 0xffffffff); |
5333 | tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); | 5333 | tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); |
5334 | for (i = 0; i < (info->text_len / sizeof(u32)); i++) | 5334 | for (i = 0; i < (info->text_len / sizeof(u32)); i++) |
5335 | write_op(tp, (cpu_scratch_base + | 5335 | write_op(tp, (cpu_scratch_base + |
5336 | (info->text_base & 0xffff) + | 5336 | (info->text_base & 0xffff) + |
5337 | (i * sizeof(u32))), | 5337 | (i * sizeof(u32))), |
5338 | (info->text_data ? | 5338 | (info->text_data ? |
5339 | info->text_data[i] : 0)); | 5339 | info->text_data[i] : 0)); |
5340 | for (i = 0; i < (info->rodata_len / sizeof(u32)); i++) | 5340 | for (i = 0; i < (info->rodata_len / sizeof(u32)); i++) |
5341 | write_op(tp, (cpu_scratch_base + | 5341 | write_op(tp, (cpu_scratch_base + |
5342 | (info->rodata_base & 0xffff) + | 5342 | (info->rodata_base & 0xffff) + |
5343 | (i * sizeof(u32))), | 5343 | (i * sizeof(u32))), |
5344 | (info->rodata_data ? | 5344 | (info->rodata_data ? |
5345 | info->rodata_data[i] : 0)); | 5345 | info->rodata_data[i] : 0)); |
5346 | for (i = 0; i < (info->data_len / sizeof(u32)); i++) | 5346 | for (i = 0; i < (info->data_len / sizeof(u32)); i++) |
5347 | write_op(tp, (cpu_scratch_base + | 5347 | write_op(tp, (cpu_scratch_base + |
5348 | (info->data_base & 0xffff) + | 5348 | (info->data_base & 0xffff) + |
5349 | (i * sizeof(u32))), | 5349 | (i * sizeof(u32))), |
5350 | (info->data_data ? | 5350 | (info->data_data ? |
5351 | info->data_data[i] : 0)); | 5351 | info->data_data[i] : 0)); |
5352 | 5352 | ||
5353 | err = 0; | 5353 | err = 0; |
5354 | 5354 | ||
5355 | out: | 5355 | out: |
5356 | return err; | 5356 | return err; |
5357 | } | 5357 | } |
5358 | 5358 | ||
5359 | /* tp->lock is held. */ | 5359 | /* tp->lock is held. */ |
5360 | static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) | 5360 | static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) |
5361 | { | 5361 | { |
5362 | struct fw_info info; | 5362 | struct fw_info info; |
5363 | int err, i; | 5363 | int err, i; |
5364 | 5364 | ||
5365 | info.text_base = TG3_FW_TEXT_ADDR; | 5365 | info.text_base = TG3_FW_TEXT_ADDR; |
5366 | info.text_len = TG3_FW_TEXT_LEN; | 5366 | info.text_len = TG3_FW_TEXT_LEN; |
5367 | info.text_data = &tg3FwText[0]; | 5367 | info.text_data = &tg3FwText[0]; |
5368 | info.rodata_base = TG3_FW_RODATA_ADDR; | 5368 | info.rodata_base = TG3_FW_RODATA_ADDR; |
5369 | info.rodata_len = TG3_FW_RODATA_LEN; | 5369 | info.rodata_len = TG3_FW_RODATA_LEN; |
5370 | info.rodata_data = &tg3FwRodata[0]; | 5370 | info.rodata_data = &tg3FwRodata[0]; |
5371 | info.data_base = TG3_FW_DATA_ADDR; | 5371 | info.data_base = TG3_FW_DATA_ADDR; |
5372 | info.data_len = TG3_FW_DATA_LEN; | 5372 | info.data_len = TG3_FW_DATA_LEN; |
5373 | info.data_data = NULL; | 5373 | info.data_data = NULL; |
5374 | 5374 | ||
5375 | err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, | 5375 | err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, |
5376 | RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, | 5376 | RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, |
5377 | &info); | 5377 | &info); |
5378 | if (err) | 5378 | if (err) |
5379 | return err; | 5379 | return err; |
5380 | 5380 | ||
5381 | err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, | 5381 | err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, |
5382 | TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, | 5382 | TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, |
5383 | &info); | 5383 | &info); |
5384 | if (err) | 5384 | if (err) |
5385 | return err; | 5385 | return err; |
5386 | 5386 | ||
5387 | /* Now startup only the RX cpu. */ | 5387 | /* Now startup only the RX cpu. */ |
5388 | tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); | 5388 | tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); |
5389 | tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR); | 5389 | tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR); |
5390 | 5390 | ||
5391 | for (i = 0; i < 5; i++) { | 5391 | for (i = 0; i < 5; i++) { |
5392 | if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR) | 5392 | if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR) |
5393 | break; | 5393 | break; |
5394 | tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); | 5394 | tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); |
5395 | tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); | 5395 | tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); |
5396 | tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR); | 5396 | tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR); |
5397 | udelay(1000); | 5397 | udelay(1000); |
5398 | } | 5398 | } |
5399 | if (i >= 5) { | 5399 | if (i >= 5) { |
5400 | printk(KERN_ERR PFX "tg3_load_firmware fails for %s " | 5400 | printk(KERN_ERR PFX "tg3_load_firmware fails for %s " |
5401 | "to set RX CPU PC, is %08x should be %08x\n", | 5401 | "to set RX CPU PC, is %08x should be %08x\n", |
5402 | tp->dev->name, tr32(RX_CPU_BASE + CPU_PC), | 5402 | tp->dev->name, tr32(RX_CPU_BASE + CPU_PC), |
5403 | TG3_FW_TEXT_ADDR); | 5403 | TG3_FW_TEXT_ADDR); |
5404 | return -ENODEV; | 5404 | return -ENODEV; |
5405 | } | 5405 | } |
5406 | tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); | 5406 | tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); |
5407 | tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); | 5407 | tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); |
5408 | 5408 | ||
5409 | return 0; | 5409 | return 0; |
5410 | } | 5410 | } |
5411 | 5411 | ||
5412 | 5412 | ||
5413 | #define TG3_TSO_FW_RELEASE_MAJOR 0x1 | 5413 | #define TG3_TSO_FW_RELEASE_MAJOR 0x1 |
5414 | #define TG3_TSO_FW_RELASE_MINOR 0x6 | 5414 | #define TG3_TSO_FW_RELASE_MINOR 0x6 |
5415 | #define TG3_TSO_FW_RELEASE_FIX 0x0 | 5415 | #define TG3_TSO_FW_RELEASE_FIX 0x0 |
5416 | #define TG3_TSO_FW_START_ADDR 0x08000000 | 5416 | #define TG3_TSO_FW_START_ADDR 0x08000000 |
5417 | #define TG3_TSO_FW_TEXT_ADDR 0x08000000 | 5417 | #define TG3_TSO_FW_TEXT_ADDR 0x08000000 |
5418 | #define TG3_TSO_FW_TEXT_LEN 0x1aa0 | 5418 | #define TG3_TSO_FW_TEXT_LEN 0x1aa0 |
5419 | #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0 | 5419 | #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0 |
5420 | #define TG3_TSO_FW_RODATA_LEN 0x60 | 5420 | #define TG3_TSO_FW_RODATA_LEN 0x60 |
5421 | #define TG3_TSO_FW_DATA_ADDR 0x08001b20 | 5421 | #define TG3_TSO_FW_DATA_ADDR 0x08001b20 |
5422 | #define TG3_TSO_FW_DATA_LEN 0x30 | 5422 | #define TG3_TSO_FW_DATA_LEN 0x30 |
5423 | #define TG3_TSO_FW_SBSS_ADDR 0x08001b50 | 5423 | #define TG3_TSO_FW_SBSS_ADDR 0x08001b50 |
5424 | #define TG3_TSO_FW_SBSS_LEN 0x2c | 5424 | #define TG3_TSO_FW_SBSS_LEN 0x2c |
5425 | #define TG3_TSO_FW_BSS_ADDR 0x08001b80 | 5425 | #define TG3_TSO_FW_BSS_ADDR 0x08001b80 |
5426 | #define TG3_TSO_FW_BSS_LEN 0x894 | 5426 | #define TG3_TSO_FW_BSS_LEN 0x894 |
5427 | 5427 | ||
5428 | static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = { | 5428 | static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = { |
5429 | 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000, | 5429 | 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000, |
5430 | 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800, | 5430 | 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800, |
5431 | 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, | 5431 | 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, |
5432 | 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800, | 5432 | 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800, |
5433 | 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001, | 5433 | 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001, |
5434 | 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c, | 5434 | 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c, |
5435 | 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001, | 5435 | 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001, |
5436 | 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008, | 5436 | 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008, |
5437 | 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, | 5437 | 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, |
5438 | 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001, | 5438 | 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001, |
5439 | 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000, | 5439 | 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000, |
5440 | 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001, | 5440 | 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001, |
5441 | 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800, | 5441 | 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800, |
5442 | 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c, | 5442 | 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c, |
5443 | 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, | 5443 | 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, |
5444 | 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021, | 5444 | 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021, |
5445 | 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800, | 5445 | 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800, |
5446 | 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c, | 5446 | 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c, |
5447 | 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac, | 5447 | 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac, |
5448 | 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800, | 5448 | 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800, |
5449 | 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8, | 5449 | 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8, |
5450 | 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8, | 5450 | 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8, |
5451 | 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90, | 5451 | 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90, |
5452 | 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068, | 5452 | 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068, |
5453 | 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c, | 5453 | 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c, |
5454 | 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021, | 5454 | 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021, |
5455 | 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008, | 5455 | 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008, |
5456 | 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021, | 5456 | 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021, |
5457 | 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b, | 5457 | 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b, |
5458 | 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, | 5458 | 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, |
5459 | 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, | 5459 | 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, |
5460 | 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020, | 5460 | 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020, |
5461 | 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800, | 5461 | 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800, |
5462 | 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98, | 5462 | 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98, |
5463 | 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902, | 5463 | 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902, |
5464 | 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602, | 5464 | 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602, |
5465 | 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001, | 5465 | 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001, |
5466 | 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c, | 5466 | 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c, |
5467 | 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac, | 5467 | 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac, |
5468 | 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4, | 5468 | 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4, |
5469 | 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410, | 5469 | 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410, |
5470 | 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800, | 5470 | 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800, |
5471 | 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4, | 5471 | 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4, |
5472 | 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800, | 5472 | 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800, |
5473 | 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800, | 5473 | 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800, |
5474 | 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800, | 5474 | 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800, |
5475 | 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800, | 5475 | 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800, |
5476 | 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821, | 5476 | 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821, |
5477 | 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800, | 5477 | 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800, |
5478 | 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821, | 5478 | 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821, |
5479 | 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800, | 5479 | 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800, |
5480 | 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14, | 5480 | 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14, |
5481 | 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800, | 5481 | 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800, |
5482 | 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, | 5482 | 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, |
5483 | 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002, | 5483 | 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002, |
5484 | 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80, | 5484 | 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80, |
5485 | 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001, | 5485 | 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001, |
5486 | 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003, | 5486 | 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003, |
5487 | 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000, | 5487 | 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000, |
5488 | 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656, | 5488 | 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656, |
5489 | 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078, | 5489 | 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078, |
5490 | 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800, | 5490 | 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800, |
5491 | 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c, | 5491 | 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c, |
5492 | 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c, | 5492 | 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c, |
5493 | 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100, | 5493 | 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100, |
5494 | 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054, | 5494 | 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054, |
5495 | 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c, | 5495 | 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c, |
5496 | 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0, | 5496 | 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0, |
5497 | 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825, | 5497 | 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825, |
5498 | 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff, | 5498 | 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff, |
5499 | 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000, | 5499 | 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000, |
5500 | 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004, | 5500 | 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004, |
5501 | 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021, | 5501 | 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021, |
5502 | 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0, | 5502 | 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0, |
5503 | 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008, | 5503 | 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008, |
5504 | 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c, | 5504 | 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c, |
5505 | 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003, | 5505 | 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003, |
5506 | 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c, | 5506 | 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c, |
5507 | 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b, | 5507 | 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b, |
5508 | 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98, | 5508 | 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98, |
5509 | 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000, | 5509 | 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000, |
5510 | 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018, | 5510 | 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018, |
5511 | 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028, | 5511 | 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028, |
5512 | 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff, | 5512 | 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff, |
5513 | 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000, | 5513 | 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000, |
5514 | 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821, | 5514 | 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821, |
5515 | 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90, | 5515 | 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90, |
5516 | 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002, | 5516 | 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002, |
5517 | 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014, | 5517 | 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014, |
5518 | 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f, | 5518 | 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f, |
5519 | 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a, | 5519 | 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a, |
5520 | 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400, | 5520 | 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400, |
5521 | 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010, | 5521 | 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010, |
5522 | 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e, | 5522 | 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e, |
5523 | 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800, | 5523 | 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800, |
5524 | 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000, | 5524 | 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000, |
5525 | 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000, | 5525 | 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000, |
5526 | 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246, | 5526 | 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246, |
5527 | 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff, | 5527 | 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff, |
5528 | 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821, | 5528 | 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821, |
5529 | 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000, | 5529 | 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000, |
5530 | 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9, | 5530 | 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9, |
5531 | 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc, | 5531 | 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc, |
5532 | 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000, | 5532 | 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000, |
5533 | 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a, | 5533 | 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a, |
5534 | 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286, | 5534 | 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286, |
5535 | 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023, | 5535 | 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023, |
5536 | 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c, | 5536 | 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c, |
5537 | 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010, | 5537 | 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010, |
5538 | 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400, | 5538 | 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400, |
5539 | 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024, | 5539 | 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024, |
5540 | 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800, | 5540 | 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800, |
5541 | 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800, | 5541 | 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800, |
5542 | 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021, | 5542 | 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021, |
5543 | 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8, | 5543 | 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8, |
5544 | 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021, | 5544 | 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021, |
5545 | 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8, | 5545 | 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8, |
5546 | 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60, | 5546 | 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60, |
5547 | 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, | 5547 | 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, |
5548 | 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000, | 5548 | 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000, |
5549 | 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800, | 5549 | 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800, |
5550 | 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021, | 5550 | 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021, |
5551 | 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021, | 5551 | 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021, |
5552 | 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002, | 5552 | 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002, |
5553 | 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000, | 5553 | 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000, |
5554 | 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800, | 5554 | 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800, |
5555 | 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc, | 5555 | 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc, |
5556 | 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50, | 5556 | 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50, |
5557 | 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025, | 5557 | 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025, |
5558 | 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800, | 5558 | 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800, |
5559 | 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f, | 5559 | 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f, |
5560 | 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40, | 5560 | 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40, |
5561 | 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, | 5561 | 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, |
5562 | 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, | 5562 | 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, |
5563 | 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000, | 5563 | 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000, |
5564 | 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008, | 5564 | 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008, |
5565 | 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02, | 5565 | 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02, |
5566 | 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02, | 5566 | 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02, |
5567 | 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, | 5567 | 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, |
5568 | 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000, | 5568 | 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000, |
5569 | 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000, | 5569 | 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000, |
5570 | 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008, | 5570 | 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008, |
5571 | 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2, | 5571 | 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2, |
5572 | 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402, | 5572 | 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402, |
5573 | 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4, | 5573 | 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4, |
5574 | 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023, | 5574 | 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023, |
5575 | 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a, | 5575 | 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a, |
5576 | 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004, | 5576 | 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004, |
5577 | 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400, | 5577 | 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400, |
5578 | 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4, | 5578 | 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4, |
5579 | 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800, | 5579 | 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800, |
5580 | 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4, | 5580 | 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4, |
5581 | 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800, | 5581 | 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800, |
5582 | 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4, | 5582 | 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4, |
5583 | 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821, | 5583 | 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821, |
5584 | 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800, | 5584 | 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800, |
5585 | 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6, | 5585 | 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6, |
5586 | 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800, | 5586 | 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800, |
5587 | 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021, | 5587 | 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021, |
5588 | 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008, | 5588 | 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008, |
5589 | 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a, | 5589 | 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a, |
5590 | 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402, | 5590 | 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402, |
5591 | 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c, | 5591 | 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c, |
5592 | 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb, | 5592 | 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb, |
5593 | 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821, | 5593 | 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821, |
5594 | 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021, | 5594 | 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021, |
5595 | 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006, | 5595 | 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006, |
5596 | 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008, | 5596 | 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008, |
5597 | 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02, | 5597 | 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02, |
5598 | 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021, | 5598 | 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021, |
5599 | 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081, | 5599 | 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081, |
5600 | 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800, | 5600 | 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800, |
5601 | 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800, | 5601 | 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800, |
5602 | 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a, | 5602 | 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a, |
5603 | 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02, | 5603 | 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02, |
5604 | 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821, | 5604 | 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821, |
5605 | 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023, | 5605 | 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023, |
5606 | 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff, | 5606 | 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff, |
5607 | 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042, | 5607 | 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042, |
5608 | 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, | 5608 | 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, |
5609 | 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, | 5609 | 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, |
5610 | 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, | 5610 | 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, |
5611 | 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, | 5611 | 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, |
5612 | 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, | 5612 | 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, |
5613 | 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821, | 5613 | 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821, |
5614 | 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800, | 5614 | 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800, |
5615 | 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043, | 5615 | 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043, |
5616 | 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021, | 5616 | 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021, |
5617 | 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, | 5617 | 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, |
5618 | 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800, | 5618 | 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800, |
5619 | 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff, | 5619 | 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff, |
5620 | 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, | 5620 | 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, |
5621 | 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007, | 5621 | 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007, |
5622 | 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402, | 5622 | 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402, |
5623 | 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff, | 5623 | 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff, |
5624 | 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021, | 5624 | 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021, |
5625 | 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff, | 5625 | 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff, |
5626 | 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005, | 5626 | 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005, |
5627 | 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800, | 5627 | 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800, |
5628 | 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4, | 5628 | 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4, |
5629 | 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b, | 5629 | 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b, |
5630 | 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4, | 5630 | 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4, |
5631 | 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800, | 5631 | 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800, |
5632 | 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034, | 5632 | 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034, |
5633 | 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000, | 5633 | 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000, |
5634 | 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac, | 5634 | 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac, |
5635 | 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022, | 5635 | 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022, |
5636 | 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000, | 5636 | 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000, |
5637 | 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0, | 5637 | 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0, |
5638 | 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021, | 5638 | 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021, |
5639 | 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000, | 5639 | 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000, |
5640 | 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc, | 5640 | 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc, |
5641 | 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005, | 5641 | 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005, |
5642 | 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080, | 5642 | 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080, |
5643 | 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800, | 5643 | 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800, |
5644 | 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014, | 5644 | 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014, |
5645 | 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823, | 5645 | 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823, |
5646 | 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021, | 5646 | 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021, |
5647 | 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010, | 5647 | 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010, |
5648 | 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5, | 5648 | 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5, |
5649 | 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, | 5649 | 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, |
5650 | 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021, | 5650 | 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021, |
5651 | 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c, | 5651 | 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c, |
5652 | 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005, | 5652 | 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005, |
5653 | 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800, | 5653 | 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800, |
5654 | 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500, | 5654 | 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500, |
5655 | 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023, | 5655 | 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023, |
5656 | 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821, | 5656 | 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821, |
5657 | 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000, | 5657 | 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000, |
5658 | 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021, | 5658 | 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021, |
5659 | 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006, | 5659 | 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006, |
5660 | 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0, | 5660 | 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0, |
5661 | 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006, | 5661 | 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006, |
5662 | 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905, | 5662 | 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905, |
5663 | 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860, | 5663 | 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860, |
5664 | 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab, | 5664 | 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab, |
5665 | 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff, | 5665 | 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff, |
5666 | 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, | 5666 | 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, |
5667 | 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038, | 5667 | 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038, |
5668 | 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020, | 5668 | 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020, |
5669 | 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450, | 5669 | 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450, |
5670 | 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003, | 5670 | 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003, |
5671 | 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff, | 5671 | 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff, |
5672 | 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002, | 5672 | 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002, |
5673 | 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f, | 5673 | 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f, |
5674 | 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000, | 5674 | 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000, |
5675 | 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820, | 5675 | 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820, |
5676 | 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4, | 5676 | 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4, |
5677 | 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, | 5677 | 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, |
5678 | 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, | 5678 | 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, |
5679 | 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, | 5679 | 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, |
5680 | 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002, | 5680 | 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002, |
5681 | 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff, | 5681 | 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff, |
5682 | 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8, | 5682 | 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8, |
5683 | 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438, | 5683 | 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438, |
5684 | 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800, | 5684 | 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800, |
5685 | 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800, | 5685 | 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800, |
5686 | 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000, | 5686 | 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000, |
5687 | 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000, | 5687 | 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000, |
5688 | 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021, | 5688 | 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021, |
5689 | 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, | 5689 | 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, |
5690 | 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, | 5690 | 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, |
5691 | 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b, | 5691 | 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b, |
5692 | 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02, | 5692 | 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02, |
5693 | 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, | 5693 | 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, |
5694 | 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, | 5694 | 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, |
5695 | 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff, | 5695 | 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff, |
5696 | 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, | 5696 | 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, |
5697 | 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651, | 5697 | 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651, |
5698 | 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, | 5698 | 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, |
5699 | 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0, | 5699 | 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0, |
5700 | 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, | 5700 | 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, |
5701 | 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, | 5701 | 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, |
5702 | 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000, | 5702 | 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000, |
5703 | 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800, | 5703 | 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800, |
5704 | 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b, | 5704 | 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b, |
5705 | 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010, | 5705 | 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010, |
5706 | 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001, | 5706 | 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001, |
5707 | 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800, | 5707 | 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800, |
5708 | 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000, | 5708 | 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000, |
5709 | 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008, | 5709 | 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008, |
5710 | 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, | 5710 | 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, |
5711 | 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010, | 5711 | 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010, |
5712 | 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000, | 5712 | 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000, |
5713 | }; | 5713 | }; |
5714 | 5714 | ||
5715 | static const u32 tg3TsoFwRodata[] = { | 5715 | static const u32 tg3TsoFwRodata[] = { |
5716 | 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, | 5716 | 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, |
5717 | 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f, | 5717 | 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f, |
5718 | 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000, | 5718 | 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000, |
5719 | 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000, | 5719 | 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000, |
5720 | 0x00000000, | 5720 | 0x00000000, |
5721 | }; | 5721 | }; |
5722 | 5722 | ||
5723 | static const u32 tg3TsoFwData[] = { | 5723 | static const u32 tg3TsoFwData[] = { |
5724 | 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000, | 5724 | 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000, |
5725 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, | 5725 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
5726 | 0x00000000, | 5726 | 0x00000000, |
5727 | }; | 5727 | }; |
5728 | 5728 | ||
5729 | /* 5705 needs a special version of the TSO firmware. */ | 5729 | /* 5705 needs a special version of the TSO firmware. */ |
5730 | #define TG3_TSO5_FW_RELEASE_MAJOR 0x1 | 5730 | #define TG3_TSO5_FW_RELEASE_MAJOR 0x1 |
5731 | #define TG3_TSO5_FW_RELASE_MINOR 0x2 | 5731 | #define TG3_TSO5_FW_RELASE_MINOR 0x2 |
5732 | #define TG3_TSO5_FW_RELEASE_FIX 0x0 | 5732 | #define TG3_TSO5_FW_RELEASE_FIX 0x0 |
5733 | #define TG3_TSO5_FW_START_ADDR 0x00010000 | 5733 | #define TG3_TSO5_FW_START_ADDR 0x00010000 |
5734 | #define TG3_TSO5_FW_TEXT_ADDR 0x00010000 | 5734 | #define TG3_TSO5_FW_TEXT_ADDR 0x00010000 |
5735 | #define TG3_TSO5_FW_TEXT_LEN 0xe90 | 5735 | #define TG3_TSO5_FW_TEXT_LEN 0xe90 |
5736 | #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90 | 5736 | #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90 |
5737 | #define TG3_TSO5_FW_RODATA_LEN 0x50 | 5737 | #define TG3_TSO5_FW_RODATA_LEN 0x50 |
5738 | #define TG3_TSO5_FW_DATA_ADDR 0x00010f00 | 5738 | #define TG3_TSO5_FW_DATA_ADDR 0x00010f00 |
5739 | #define TG3_TSO5_FW_DATA_LEN 0x20 | 5739 | #define TG3_TSO5_FW_DATA_LEN 0x20 |
5740 | #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20 | 5740 | #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20 |
5741 | #define TG3_TSO5_FW_SBSS_LEN 0x28 | 5741 | #define TG3_TSO5_FW_SBSS_LEN 0x28 |
5742 | #define TG3_TSO5_FW_BSS_ADDR 0x00010f50 | 5742 | #define TG3_TSO5_FW_BSS_ADDR 0x00010f50 |
5743 | #define TG3_TSO5_FW_BSS_LEN 0x88 | 5743 | #define TG3_TSO5_FW_BSS_LEN 0x88 |
5744 | 5744 | ||
5745 | static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = { | 5745 | static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = { |
5746 | 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000, | 5746 | 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000, |
5747 | 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001, | 5747 | 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001, |
5748 | 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, | 5748 | 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, |
5749 | 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001, | 5749 | 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001, |
5750 | 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001, | 5750 | 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001, |
5751 | 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378, | 5751 | 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378, |
5752 | 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, | 5752 | 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, |
5753 | 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014, | 5753 | 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014, |
5754 | 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400, | 5754 | 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400, |
5755 | 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000, | 5755 | 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000, |
5756 | 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200, | 5756 | 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200, |
5757 | 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000, | 5757 | 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000, |
5758 | 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, | 5758 | 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, |
5759 | 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821, | 5759 | 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821, |
5760 | 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, | 5760 | 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, |
5761 | 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, | 5761 | 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, |
5762 | 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60, | 5762 | 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60, |
5763 | 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821, | 5763 | 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821, |
5764 | 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000, | 5764 | 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000, |
5765 | 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028, | 5765 | 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028, |
5766 | 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402, | 5766 | 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402, |
5767 | 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014, | 5767 | 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014, |
5768 | 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff, | 5768 | 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff, |
5769 | 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b, | 5769 | 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b, |
5770 | 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004, | 5770 | 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004, |
5771 | 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8, | 5771 | 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8, |
5772 | 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001, | 5772 | 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001, |
5773 | 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021, | 5773 | 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021, |
5774 | 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2, | 5774 | 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2, |
5775 | 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a, | 5775 | 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a, |
5776 | 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, | 5776 | 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, |
5777 | 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001, | 5777 | 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001, |
5778 | 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001, | 5778 | 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001, |
5779 | 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021, | 5779 | 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021, |
5780 | 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000, | 5780 | 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000, |
5781 | 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c, | 5781 | 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c, |
5782 | 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005, | 5782 | 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005, |
5783 | 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006, | 5783 | 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006, |
5784 | 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c, | 5784 | 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c, |
5785 | 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c, | 5785 | 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c, |
5786 | 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021, | 5786 | 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021, |
5787 | 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001, | 5787 | 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001, |
5788 | 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b, | 5788 | 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b, |
5789 | 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c, | 5789 | 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c, |
5790 | 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76, | 5790 | 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76, |
5791 | 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c, | 5791 | 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c, |
5792 | 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70, | 5792 | 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70, |
5793 | 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c, | 5793 | 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c, |
5794 | 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72, | 5794 | 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72, |
5795 | 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff, | 5795 | 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff, |
5796 | 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78, | 5796 | 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78, |
5797 | 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78, | 5797 | 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78, |
5798 | 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005, | 5798 | 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005, |
5799 | 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d, | 5799 | 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d, |
5800 | 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005, | 5800 | 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005, |
5801 | 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027, | 5801 | 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027, |
5802 | 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d, | 5802 | 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d, |
5803 | 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff, | 5803 | 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff, |
5804 | 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001, | 5804 | 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001, |
5805 | 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000, | 5805 | 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000, |
5806 | 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a, | 5806 | 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a, |
5807 | 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff, | 5807 | 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff, |
5808 | 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001, | 5808 | 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001, |
5809 | 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200, | 5809 | 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200, |
5810 | 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001, | 5810 | 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001, |
5811 | 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021, | 5811 | 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021, |
5812 | 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, | 5812 | 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, |
5813 | 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00, | 5813 | 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00, |
5814 | 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001, | 5814 | 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001, |
5815 | 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000, | 5815 | 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000, |
5816 | 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003, | 5816 | 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003, |
5817 | 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001, | 5817 | 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001, |
5818 | 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56, | 5818 | 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56, |
5819 | 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4, | 5819 | 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4, |
5820 | 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64, | 5820 | 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64, |
5821 | 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088, | 5821 | 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088, |
5822 | 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001, | 5822 | 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001, |
5823 | 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57, | 5823 | 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57, |
5824 | 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001, | 5824 | 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001, |
5825 | 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001, | 5825 | 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001, |
5826 | 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000, | 5826 | 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000, |
5827 | 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001, | 5827 | 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001, |
5828 | 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823, | 5828 | 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823, |
5829 | 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001, | 5829 | 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001, |
5830 | 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001, | 5830 | 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001, |
5831 | 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001, | 5831 | 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001, |
5832 | 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021, | 5832 | 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021, |
5833 | 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, | 5833 | 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, |
5834 | 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, | 5834 | 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, |
5835 | 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001, | 5835 | 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001, |
5836 | 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001, | 5836 | 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001, |
5837 | 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec, | 5837 | 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec, |
5838 | 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000, | 5838 | 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000, |
5839 | 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024, | 5839 | 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024, |
5840 | 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, | 5840 | 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, |
5841 | 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000, | 5841 | 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000, |
5842 | 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, | 5842 | 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, |
5843 | 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, | 5843 | 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, |
5844 | 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001, | 5844 | 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001, |
5845 | 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001, | 5845 | 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001, |
5846 | 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff, | 5846 | 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff, |
5847 | 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c, | 5847 | 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c, |
5848 | 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54, | 5848 | 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54, |
5849 | 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001, | 5849 | 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001, |
5850 | 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, | 5850 | 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, |
5851 | 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624, | 5851 | 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624, |
5852 | 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001, | 5852 | 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001, |
5853 | 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, | 5853 | 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, |
5854 | 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283, | 5854 | 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283, |
5855 | 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825, | 5855 | 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825, |
5856 | 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003, | 5856 | 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003, |
5857 | 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, | 5857 | 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, |
5858 | 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c, | 5858 | 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c, |
5859 | 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009, | 5859 | 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009, |
5860 | 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025, | 5860 | 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025, |
5861 | 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008, | 5861 | 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008, |
5862 | 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021, | 5862 | 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021, |
5863 | 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001, | 5863 | 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001, |
5864 | 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, | 5864 | 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, |
5865 | 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014, | 5865 | 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014, |
5866 | 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001, | 5866 | 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001, |
5867 | 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, | 5867 | 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, |
5868 | 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001, | 5868 | 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001, |
5869 | 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020, | 5869 | 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020, |
5870 | 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804, | 5870 | 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804, |
5871 | 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20, | 5871 | 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20, |
5872 | 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315, | 5872 | 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315, |
5873 | 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005, | 5873 | 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005, |
5874 | 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001, | 5874 | 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001, |
5875 | 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001, | 5875 | 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001, |
5876 | 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014, | 5876 | 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014, |
5877 | 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8, | 5877 | 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8, |
5878 | 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000, | 5878 | 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000, |
5879 | 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008, | 5879 | 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008, |
5880 | 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008, | 5880 | 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008, |
5881 | 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b, | 5881 | 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b, |
5882 | 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd, | 5882 | 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd, |
5883 | 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000, | 5883 | 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000, |
5884 | 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025, | 5884 | 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025, |
5885 | 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008, | 5885 | 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008, |
5886 | 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff, | 5886 | 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff, |
5887 | 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008, | 5887 | 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008, |
5888 | 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021, | 5888 | 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021, |
5889 | 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f, | 5889 | 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f, |
5890 | 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600, | 5890 | 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600, |
5891 | 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40, | 5891 | 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40, |
5892 | 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000, | 5892 | 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000, |
5893 | 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, | 5893 | 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, |
5894 | 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44, | 5894 | 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44, |
5895 | 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003, | 5895 | 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003, |
5896 | 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001, | 5896 | 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001, |
5897 | 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001, | 5897 | 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001, |
5898 | 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c, | 5898 | 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c, |
5899 | 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, | 5899 | 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, |
5900 | 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, | 5900 | 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, |
5901 | 0x00000000, 0x00000000, 0x00000000, | 5901 | 0x00000000, 0x00000000, 0x00000000, |
5902 | }; | 5902 | }; |
5903 | 5903 | ||
5904 | static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = { | 5904 | static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = { |
5905 | 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, | 5905 | 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, |
5906 | 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, | 5906 | 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, |
5907 | 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, | 5907 | 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, |
5908 | 0x00000000, 0x00000000, 0x00000000, | 5908 | 0x00000000, 0x00000000, 0x00000000, |
5909 | }; | 5909 | }; |
5910 | 5910 | ||
5911 | static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = { | 5911 | static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = { |
5912 | 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000, | 5912 | 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000, |
5913 | 0x00000000, 0x00000000, 0x00000000, | 5913 | 0x00000000, 0x00000000, 0x00000000, |
5914 | }; | 5914 | }; |
5915 | 5915 | ||
5916 | /* tp->lock is held. */ | 5916 | /* tp->lock is held. */ |
5917 | static int tg3_load_tso_firmware(struct tg3 *tp) | 5917 | static int tg3_load_tso_firmware(struct tg3 *tp) |
5918 | { | 5918 | { |
5919 | struct fw_info info; | 5919 | struct fw_info info; |
5920 | unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; | 5920 | unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; |
5921 | int err, i; | 5921 | int err, i; |
5922 | 5922 | ||
5923 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 5923 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
5924 | return 0; | 5924 | return 0; |
5925 | 5925 | ||
5926 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 5926 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { |
5927 | info.text_base = TG3_TSO5_FW_TEXT_ADDR; | 5927 | info.text_base = TG3_TSO5_FW_TEXT_ADDR; |
5928 | info.text_len = TG3_TSO5_FW_TEXT_LEN; | 5928 | info.text_len = TG3_TSO5_FW_TEXT_LEN; |
5929 | info.text_data = &tg3Tso5FwText[0]; | 5929 | info.text_data = &tg3Tso5FwText[0]; |
5930 | info.rodata_base = TG3_TSO5_FW_RODATA_ADDR; | 5930 | info.rodata_base = TG3_TSO5_FW_RODATA_ADDR; |
5931 | info.rodata_len = TG3_TSO5_FW_RODATA_LEN; | 5931 | info.rodata_len = TG3_TSO5_FW_RODATA_LEN; |
5932 | info.rodata_data = &tg3Tso5FwRodata[0]; | 5932 | info.rodata_data = &tg3Tso5FwRodata[0]; |
5933 | info.data_base = TG3_TSO5_FW_DATA_ADDR; | 5933 | info.data_base = TG3_TSO5_FW_DATA_ADDR; |
5934 | info.data_len = TG3_TSO5_FW_DATA_LEN; | 5934 | info.data_len = TG3_TSO5_FW_DATA_LEN; |
5935 | info.data_data = &tg3Tso5FwData[0]; | 5935 | info.data_data = &tg3Tso5FwData[0]; |
5936 | cpu_base = RX_CPU_BASE; | 5936 | cpu_base = RX_CPU_BASE; |
5937 | cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; | 5937 | cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; |
5938 | cpu_scratch_size = (info.text_len + | 5938 | cpu_scratch_size = (info.text_len + |
5939 | info.rodata_len + | 5939 | info.rodata_len + |
5940 | info.data_len + | 5940 | info.data_len + |
5941 | TG3_TSO5_FW_SBSS_LEN + | 5941 | TG3_TSO5_FW_SBSS_LEN + |
5942 | TG3_TSO5_FW_BSS_LEN); | 5942 | TG3_TSO5_FW_BSS_LEN); |
5943 | } else { | 5943 | } else { |
5944 | info.text_base = TG3_TSO_FW_TEXT_ADDR; | 5944 | info.text_base = TG3_TSO_FW_TEXT_ADDR; |
5945 | info.text_len = TG3_TSO_FW_TEXT_LEN; | 5945 | info.text_len = TG3_TSO_FW_TEXT_LEN; |
5946 | info.text_data = &tg3TsoFwText[0]; | 5946 | info.text_data = &tg3TsoFwText[0]; |
5947 | info.rodata_base = TG3_TSO_FW_RODATA_ADDR; | 5947 | info.rodata_base = TG3_TSO_FW_RODATA_ADDR; |
5948 | info.rodata_len = TG3_TSO_FW_RODATA_LEN; | 5948 | info.rodata_len = TG3_TSO_FW_RODATA_LEN; |
5949 | info.rodata_data = &tg3TsoFwRodata[0]; | 5949 | info.rodata_data = &tg3TsoFwRodata[0]; |
5950 | info.data_base = TG3_TSO_FW_DATA_ADDR; | 5950 | info.data_base = TG3_TSO_FW_DATA_ADDR; |
5951 | info.data_len = TG3_TSO_FW_DATA_LEN; | 5951 | info.data_len = TG3_TSO_FW_DATA_LEN; |
5952 | info.data_data = &tg3TsoFwData[0]; | 5952 | info.data_data = &tg3TsoFwData[0]; |
5953 | cpu_base = TX_CPU_BASE; | 5953 | cpu_base = TX_CPU_BASE; |
5954 | cpu_scratch_base = TX_CPU_SCRATCH_BASE; | 5954 | cpu_scratch_base = TX_CPU_SCRATCH_BASE; |
5955 | cpu_scratch_size = TX_CPU_SCRATCH_SIZE; | 5955 | cpu_scratch_size = TX_CPU_SCRATCH_SIZE; |
5956 | } | 5956 | } |
5957 | 5957 | ||
5958 | err = tg3_load_firmware_cpu(tp, cpu_base, | 5958 | err = tg3_load_firmware_cpu(tp, cpu_base, |
5959 | cpu_scratch_base, cpu_scratch_size, | 5959 | cpu_scratch_base, cpu_scratch_size, |
5960 | &info); | 5960 | &info); |
5961 | if (err) | 5961 | if (err) |
5962 | return err; | 5962 | return err; |
5963 | 5963 | ||
5964 | /* Now startup the cpu. */ | 5964 | /* Now startup the cpu. */ |
5965 | tw32(cpu_base + CPU_STATE, 0xffffffff); | 5965 | tw32(cpu_base + CPU_STATE, 0xffffffff); |
5966 | tw32_f(cpu_base + CPU_PC, info.text_base); | 5966 | tw32_f(cpu_base + CPU_PC, info.text_base); |
5967 | 5967 | ||
5968 | for (i = 0; i < 5; i++) { | 5968 | for (i = 0; i < 5; i++) { |
5969 | if (tr32(cpu_base + CPU_PC) == info.text_base) | 5969 | if (tr32(cpu_base + CPU_PC) == info.text_base) |
5970 | break; | 5970 | break; |
5971 | tw32(cpu_base + CPU_STATE, 0xffffffff); | 5971 | tw32(cpu_base + CPU_STATE, 0xffffffff); |
5972 | tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); | 5972 | tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); |
5973 | tw32_f(cpu_base + CPU_PC, info.text_base); | 5973 | tw32_f(cpu_base + CPU_PC, info.text_base); |
5974 | udelay(1000); | 5974 | udelay(1000); |
5975 | } | 5975 | } |
5976 | if (i >= 5) { | 5976 | if (i >= 5) { |
5977 | printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s " | 5977 | printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s " |
5978 | "to set CPU PC, is %08x should be %08x\n", | 5978 | "to set CPU PC, is %08x should be %08x\n", |
5979 | tp->dev->name, tr32(cpu_base + CPU_PC), | 5979 | tp->dev->name, tr32(cpu_base + CPU_PC), |
5980 | info.text_base); | 5980 | info.text_base); |
5981 | return -ENODEV; | 5981 | return -ENODEV; |
5982 | } | 5982 | } |
5983 | tw32(cpu_base + CPU_STATE, 0xffffffff); | 5983 | tw32(cpu_base + CPU_STATE, 0xffffffff); |
5984 | tw32_f(cpu_base + CPU_MODE, 0x00000000); | 5984 | tw32_f(cpu_base + CPU_MODE, 0x00000000); |
5985 | return 0; | 5985 | return 0; |
5986 | } | 5986 | } |
5987 | 5987 | ||
5988 | 5988 | ||
5989 | /* tp->lock is held. */ | 5989 | /* tp->lock is held. */ |
5990 | static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) | 5990 | static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) |
5991 | { | 5991 | { |
5992 | u32 addr_high, addr_low; | 5992 | u32 addr_high, addr_low; |
5993 | int i; | 5993 | int i; |
5994 | 5994 | ||
5995 | addr_high = ((tp->dev->dev_addr[0] << 8) | | 5995 | addr_high = ((tp->dev->dev_addr[0] << 8) | |
5996 | tp->dev->dev_addr[1]); | 5996 | tp->dev->dev_addr[1]); |
5997 | addr_low = ((tp->dev->dev_addr[2] << 24) | | 5997 | addr_low = ((tp->dev->dev_addr[2] << 24) | |
5998 | (tp->dev->dev_addr[3] << 16) | | 5998 | (tp->dev->dev_addr[3] << 16) | |
5999 | (tp->dev->dev_addr[4] << 8) | | 5999 | (tp->dev->dev_addr[4] << 8) | |
6000 | (tp->dev->dev_addr[5] << 0)); | 6000 | (tp->dev->dev_addr[5] << 0)); |
6001 | for (i = 0; i < 4; i++) { | 6001 | for (i = 0; i < 4; i++) { |
6002 | if (i == 1 && skip_mac_1) | 6002 | if (i == 1 && skip_mac_1) |
6003 | continue; | 6003 | continue; |
6004 | tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); | 6004 | tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); |
6005 | tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); | 6005 | tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); |
6006 | } | 6006 | } |
6007 | 6007 | ||
6008 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 6008 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || |
6009 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | 6009 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { |
6010 | for (i = 0; i < 12; i++) { | 6010 | for (i = 0; i < 12; i++) { |
6011 | tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); | 6011 | tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); |
6012 | tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); | 6012 | tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); |
6013 | } | 6013 | } |
6014 | } | 6014 | } |
6015 | 6015 | ||
6016 | addr_high = (tp->dev->dev_addr[0] + | 6016 | addr_high = (tp->dev->dev_addr[0] + |
6017 | tp->dev->dev_addr[1] + | 6017 | tp->dev->dev_addr[1] + |
6018 | tp->dev->dev_addr[2] + | 6018 | tp->dev->dev_addr[2] + |
6019 | tp->dev->dev_addr[3] + | 6019 | tp->dev->dev_addr[3] + |
6020 | tp->dev->dev_addr[4] + | 6020 | tp->dev->dev_addr[4] + |
6021 | tp->dev->dev_addr[5]) & | 6021 | tp->dev->dev_addr[5]) & |
6022 | TX_BACKOFF_SEED_MASK; | 6022 | TX_BACKOFF_SEED_MASK; |
6023 | tw32(MAC_TX_BACKOFF_SEED, addr_high); | 6023 | tw32(MAC_TX_BACKOFF_SEED, addr_high); |
6024 | } | 6024 | } |
6025 | 6025 | ||
6026 | static int tg3_set_mac_addr(struct net_device *dev, void *p) | 6026 | static int tg3_set_mac_addr(struct net_device *dev, void *p) |
6027 | { | 6027 | { |
6028 | struct tg3 *tp = netdev_priv(dev); | 6028 | struct tg3 *tp = netdev_priv(dev); |
6029 | struct sockaddr *addr = p; | 6029 | struct sockaddr *addr = p; |
6030 | int err = 0, skip_mac_1 = 0; | 6030 | int err = 0, skip_mac_1 = 0; |
6031 | 6031 | ||
6032 | if (!is_valid_ether_addr(addr->sa_data)) | 6032 | if (!is_valid_ether_addr(addr->sa_data)) |
6033 | return -EINVAL; | 6033 | return -EINVAL; |
6034 | 6034 | ||
6035 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 6035 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
6036 | 6036 | ||
6037 | if (!netif_running(dev)) | 6037 | if (!netif_running(dev)) |
6038 | return 0; | 6038 | return 0; |
6039 | 6039 | ||
6040 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { | 6040 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { |
6041 | u32 addr0_high, addr0_low, addr1_high, addr1_low; | 6041 | u32 addr0_high, addr0_low, addr1_high, addr1_low; |
6042 | 6042 | ||
6043 | addr0_high = tr32(MAC_ADDR_0_HIGH); | 6043 | addr0_high = tr32(MAC_ADDR_0_HIGH); |
6044 | addr0_low = tr32(MAC_ADDR_0_LOW); | 6044 | addr0_low = tr32(MAC_ADDR_0_LOW); |
6045 | addr1_high = tr32(MAC_ADDR_1_HIGH); | 6045 | addr1_high = tr32(MAC_ADDR_1_HIGH); |
6046 | addr1_low = tr32(MAC_ADDR_1_LOW); | 6046 | addr1_low = tr32(MAC_ADDR_1_LOW); |
6047 | 6047 | ||
6048 | /* Skip MAC addr 1 if ASF is using it. */ | 6048 | /* Skip MAC addr 1 if ASF is using it. */ |
6049 | if ((addr0_high != addr1_high || addr0_low != addr1_low) && | 6049 | if ((addr0_high != addr1_high || addr0_low != addr1_low) && |
6050 | !(addr1_high == 0 && addr1_low == 0)) | 6050 | !(addr1_high == 0 && addr1_low == 0)) |
6051 | skip_mac_1 = 1; | 6051 | skip_mac_1 = 1; |
6052 | } | 6052 | } |
6053 | spin_lock_bh(&tp->lock); | 6053 | spin_lock_bh(&tp->lock); |
6054 | __tg3_set_mac_addr(tp, skip_mac_1); | 6054 | __tg3_set_mac_addr(tp, skip_mac_1); |
6055 | spin_unlock_bh(&tp->lock); | 6055 | spin_unlock_bh(&tp->lock); |
6056 | 6056 | ||
6057 | return err; | 6057 | return err; |
6058 | } | 6058 | } |
6059 | 6059 | ||
6060 | /* tp->lock is held. */ | 6060 | /* tp->lock is held. */ |
6061 | static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, | 6061 | static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, |
6062 | dma_addr_t mapping, u32 maxlen_flags, | 6062 | dma_addr_t mapping, u32 maxlen_flags, |
6063 | u32 nic_addr) | 6063 | u32 nic_addr) |
6064 | { | 6064 | { |
6065 | tg3_write_mem(tp, | 6065 | tg3_write_mem(tp, |
6066 | (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), | 6066 | (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), |
6067 | ((u64) mapping >> 32)); | 6067 | ((u64) mapping >> 32)); |
6068 | tg3_write_mem(tp, | 6068 | tg3_write_mem(tp, |
6069 | (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), | 6069 | (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), |
6070 | ((u64) mapping & 0xffffffff)); | 6070 | ((u64) mapping & 0xffffffff)); |
6071 | tg3_write_mem(tp, | 6071 | tg3_write_mem(tp, |
6072 | (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), | 6072 | (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), |
6073 | maxlen_flags); | 6073 | maxlen_flags); |
6074 | 6074 | ||
6075 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 6075 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
6076 | tg3_write_mem(tp, | 6076 | tg3_write_mem(tp, |
6077 | (bdinfo_addr + TG3_BDINFO_NIC_ADDR), | 6077 | (bdinfo_addr + TG3_BDINFO_NIC_ADDR), |
6078 | nic_addr); | 6078 | nic_addr); |
6079 | } | 6079 | } |
6080 | 6080 | ||
6081 | static void __tg3_set_rx_mode(struct net_device *); | 6081 | static void __tg3_set_rx_mode(struct net_device *); |
6082 | static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | 6082 | static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) |
6083 | { | 6083 | { |
6084 | tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); | 6084 | tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); |
6085 | tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); | 6085 | tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); |
6086 | tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); | 6086 | tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); |
6087 | tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); | 6087 | tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); |
6088 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 6088 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
6089 | tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); | 6089 | tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); |
6090 | tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); | 6090 | tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); |
6091 | } | 6091 | } |
6092 | tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); | 6092 | tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); |
6093 | tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); | 6093 | tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); |
6094 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 6094 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
6095 | u32 val = ec->stats_block_coalesce_usecs; | 6095 | u32 val = ec->stats_block_coalesce_usecs; |
6096 | 6096 | ||
6097 | if (!netif_carrier_ok(tp->dev)) | 6097 | if (!netif_carrier_ok(tp->dev)) |
6098 | val = 0; | 6098 | val = 0; |
6099 | 6099 | ||
6100 | tw32(HOSTCC_STAT_COAL_TICKS, val); | 6100 | tw32(HOSTCC_STAT_COAL_TICKS, val); |
6101 | } | 6101 | } |
6102 | } | 6102 | } |
6103 | 6103 | ||
6104 | /* tp->lock is held. */ | 6104 | /* tp->lock is held. */ |
6105 | static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | 6105 | static int tg3_reset_hw(struct tg3 *tp, int reset_phy) |
6106 | { | 6106 | { |
6107 | u32 val, rdmac_mode; | 6107 | u32 val, rdmac_mode; |
6108 | int i, err, limit; | 6108 | int i, err, limit; |
6109 | 6109 | ||
6110 | tg3_disable_ints(tp); | 6110 | tg3_disable_ints(tp); |
6111 | 6111 | ||
6112 | tg3_stop_fw(tp); | 6112 | tg3_stop_fw(tp); |
6113 | 6113 | ||
6114 | tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); | 6114 | tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); |
6115 | 6115 | ||
6116 | if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) { | 6116 | if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) { |
6117 | tg3_abort_hw(tp, 1); | 6117 | tg3_abort_hw(tp, 1); |
6118 | } | 6118 | } |
6119 | 6119 | ||
6120 | if (reset_phy) | 6120 | if (reset_phy) |
6121 | tg3_phy_reset(tp); | 6121 | tg3_phy_reset(tp); |
6122 | 6122 | ||
6123 | err = tg3_chip_reset(tp); | 6123 | err = tg3_chip_reset(tp); |
6124 | if (err) | 6124 | if (err) |
6125 | return err; | 6125 | return err; |
6126 | 6126 | ||
6127 | tg3_write_sig_legacy(tp, RESET_KIND_INIT); | 6127 | tg3_write_sig_legacy(tp, RESET_KIND_INIT); |
6128 | 6128 | ||
6129 | /* This works around an issue with Athlon chipsets on | 6129 | /* This works around an issue with Athlon chipsets on |
6130 | * B3 tigon3 silicon. This bit has no effect on any | 6130 | * B3 tigon3 silicon. This bit has no effect on any |
6131 | * other revision. But do not set this on PCI Express | 6131 | * other revision. But do not set this on PCI Express |
6132 | * chips. | 6132 | * chips. |
6133 | */ | 6133 | */ |
6134 | if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | 6134 | if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) |
6135 | tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; | 6135 | tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; |
6136 | tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); | 6136 | tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); |
6137 | 6137 | ||
6138 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && | 6138 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && |
6139 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { | 6139 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { |
6140 | val = tr32(TG3PCI_PCISTATE); | 6140 | val = tr32(TG3PCI_PCISTATE); |
6141 | val |= PCISTATE_RETRY_SAME_DMA; | 6141 | val |= PCISTATE_RETRY_SAME_DMA; |
6142 | tw32(TG3PCI_PCISTATE, val); | 6142 | tw32(TG3PCI_PCISTATE, val); |
6143 | } | 6143 | } |
6144 | 6144 | ||
6145 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { | 6145 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { |
6146 | /* Enable some hw fixes. */ | 6146 | /* Enable some hw fixes. */ |
6147 | val = tr32(TG3PCI_MSI_DATA); | 6147 | val = tr32(TG3PCI_MSI_DATA); |
6148 | val |= (1 << 26) | (1 << 28) | (1 << 29); | 6148 | val |= (1 << 26) | (1 << 28) | (1 << 29); |
6149 | tw32(TG3PCI_MSI_DATA, val); | 6149 | tw32(TG3PCI_MSI_DATA, val); |
6150 | } | 6150 | } |
6151 | 6151 | ||
6152 | /* Descriptor ring init may make accesses to the | 6152 | /* Descriptor ring init may make accesses to the |
6153 | * NIC SRAM area to setup the TX descriptors, so we | 6153 | * NIC SRAM area to setup the TX descriptors, so we |
6154 | * can only do this after the hardware has been | 6154 | * can only do this after the hardware has been |
6155 | * successfully reset. | 6155 | * successfully reset. |
6156 | */ | 6156 | */ |
6157 | err = tg3_init_rings(tp); | 6157 | err = tg3_init_rings(tp); |
6158 | if (err) | 6158 | if (err) |
6159 | return err; | 6159 | return err; |
6160 | 6160 | ||
6161 | /* This value is determined during the probe time DMA | 6161 | /* This value is determined during the probe time DMA |
6162 | * engine test, tg3_test_dma. | 6162 | * engine test, tg3_test_dma. |
6163 | */ | 6163 | */ |
6164 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 6164 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); |
6165 | 6165 | ||
6166 | tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | | 6166 | tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | |
6167 | GRC_MODE_4X_NIC_SEND_RINGS | | 6167 | GRC_MODE_4X_NIC_SEND_RINGS | |
6168 | GRC_MODE_NO_TX_PHDR_CSUM | | 6168 | GRC_MODE_NO_TX_PHDR_CSUM | |
6169 | GRC_MODE_NO_RX_PHDR_CSUM); | 6169 | GRC_MODE_NO_RX_PHDR_CSUM); |
6170 | tp->grc_mode |= GRC_MODE_HOST_SENDBDS; | 6170 | tp->grc_mode |= GRC_MODE_HOST_SENDBDS; |
6171 | 6171 | ||
6172 | /* Pseudo-header checksum is done by hardware logic and not | 6172 | /* Pseudo-header checksum is done by hardware logic and not |
6173 | * the offload processers, so make the chip do the pseudo- | 6173 | * the offload processers, so make the chip do the pseudo- |
6174 | * header checksums on receive. For transmit it is more | 6174 | * header checksums on receive. For transmit it is more |
6175 | * convenient to do the pseudo-header checksum in software | 6175 | * convenient to do the pseudo-header checksum in software |
6176 | * as Linux does that on transmit for us in all cases. | 6176 | * as Linux does that on transmit for us in all cases. |
6177 | */ | 6177 | */ |
6178 | tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; | 6178 | tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; |
6179 | 6179 | ||
6180 | tw32(GRC_MODE, | 6180 | tw32(GRC_MODE, |
6181 | tp->grc_mode | | 6181 | tp->grc_mode | |
6182 | (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); | 6182 | (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); |
6183 | 6183 | ||
6184 | /* Setup the timer prescalar register. Clock is always 66Mhz. */ | 6184 | /* Setup the timer prescalar register. Clock is always 66Mhz. */ |
6185 | val = tr32(GRC_MISC_CFG); | 6185 | val = tr32(GRC_MISC_CFG); |
6186 | val &= ~0xff; | 6186 | val &= ~0xff; |
6187 | val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); | 6187 | val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); |
6188 | tw32(GRC_MISC_CFG, val); | 6188 | tw32(GRC_MISC_CFG, val); |
6189 | 6189 | ||
6190 | /* Initialize MBUF/DESC pool. */ | 6190 | /* Initialize MBUF/DESC pool. */ |
6191 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 6191 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { |
6192 | /* Do nothing. */ | 6192 | /* Do nothing. */ |
6193 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { | 6193 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { |
6194 | tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); | 6194 | tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); |
6195 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) | 6195 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) |
6196 | tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); | 6196 | tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); |
6197 | else | 6197 | else |
6198 | tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); | 6198 | tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); |
6199 | tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); | 6199 | tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); |
6200 | tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); | 6200 | tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); |
6201 | } | 6201 | } |
6202 | else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { | 6202 | else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { |
6203 | int fw_len; | 6203 | int fw_len; |
6204 | 6204 | ||
6205 | fw_len = (TG3_TSO5_FW_TEXT_LEN + | 6205 | fw_len = (TG3_TSO5_FW_TEXT_LEN + |
6206 | TG3_TSO5_FW_RODATA_LEN + | 6206 | TG3_TSO5_FW_RODATA_LEN + |
6207 | TG3_TSO5_FW_DATA_LEN + | 6207 | TG3_TSO5_FW_DATA_LEN + |
6208 | TG3_TSO5_FW_SBSS_LEN + | 6208 | TG3_TSO5_FW_SBSS_LEN + |
6209 | TG3_TSO5_FW_BSS_LEN); | 6209 | TG3_TSO5_FW_BSS_LEN); |
6210 | fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); | 6210 | fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); |
6211 | tw32(BUFMGR_MB_POOL_ADDR, | 6211 | tw32(BUFMGR_MB_POOL_ADDR, |
6212 | NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); | 6212 | NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); |
6213 | tw32(BUFMGR_MB_POOL_SIZE, | 6213 | tw32(BUFMGR_MB_POOL_SIZE, |
6214 | NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); | 6214 | NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); |
6215 | } | 6215 | } |
6216 | 6216 | ||
6217 | if (tp->dev->mtu <= ETH_DATA_LEN) { | 6217 | if (tp->dev->mtu <= ETH_DATA_LEN) { |
6218 | tw32(BUFMGR_MB_RDMA_LOW_WATER, | 6218 | tw32(BUFMGR_MB_RDMA_LOW_WATER, |
6219 | tp->bufmgr_config.mbuf_read_dma_low_water); | 6219 | tp->bufmgr_config.mbuf_read_dma_low_water); |
6220 | tw32(BUFMGR_MB_MACRX_LOW_WATER, | 6220 | tw32(BUFMGR_MB_MACRX_LOW_WATER, |
6221 | tp->bufmgr_config.mbuf_mac_rx_low_water); | 6221 | tp->bufmgr_config.mbuf_mac_rx_low_water); |
6222 | tw32(BUFMGR_MB_HIGH_WATER, | 6222 | tw32(BUFMGR_MB_HIGH_WATER, |
6223 | tp->bufmgr_config.mbuf_high_water); | 6223 | tp->bufmgr_config.mbuf_high_water); |
6224 | } else { | 6224 | } else { |
6225 | tw32(BUFMGR_MB_RDMA_LOW_WATER, | 6225 | tw32(BUFMGR_MB_RDMA_LOW_WATER, |
6226 | tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); | 6226 | tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); |
6227 | tw32(BUFMGR_MB_MACRX_LOW_WATER, | 6227 | tw32(BUFMGR_MB_MACRX_LOW_WATER, |
6228 | tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); | 6228 | tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); |
6229 | tw32(BUFMGR_MB_HIGH_WATER, | 6229 | tw32(BUFMGR_MB_HIGH_WATER, |
6230 | tp->bufmgr_config.mbuf_high_water_jumbo); | 6230 | tp->bufmgr_config.mbuf_high_water_jumbo); |
6231 | } | 6231 | } |
6232 | tw32(BUFMGR_DMA_LOW_WATER, | 6232 | tw32(BUFMGR_DMA_LOW_WATER, |
6233 | tp->bufmgr_config.dma_low_water); | 6233 | tp->bufmgr_config.dma_low_water); |
6234 | tw32(BUFMGR_DMA_HIGH_WATER, | 6234 | tw32(BUFMGR_DMA_HIGH_WATER, |
6235 | tp->bufmgr_config.dma_high_water); | 6235 | tp->bufmgr_config.dma_high_water); |
6236 | 6236 | ||
6237 | tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE); | 6237 | tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE); |
6238 | for (i = 0; i < 2000; i++) { | 6238 | for (i = 0; i < 2000; i++) { |
6239 | if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) | 6239 | if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) |
6240 | break; | 6240 | break; |
6241 | udelay(10); | 6241 | udelay(10); |
6242 | } | 6242 | } |
6243 | if (i >= 2000) { | 6243 | if (i >= 2000) { |
6244 | printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n", | 6244 | printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n", |
6245 | tp->dev->name); | 6245 | tp->dev->name); |
6246 | return -ENODEV; | 6246 | return -ENODEV; |
6247 | } | 6247 | } |
6248 | 6248 | ||
6249 | /* Setup replenish threshold. */ | 6249 | /* Setup replenish threshold. */ |
6250 | val = tp->rx_pending / 8; | 6250 | val = tp->rx_pending / 8; |
6251 | if (val == 0) | 6251 | if (val == 0) |
6252 | val = 1; | 6252 | val = 1; |
6253 | else if (val > tp->rx_std_max_post) | 6253 | else if (val > tp->rx_std_max_post) |
6254 | val = tp->rx_std_max_post; | 6254 | val = tp->rx_std_max_post; |
6255 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 6255 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
6256 | if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) | 6256 | if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) |
6257 | tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); | 6257 | tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); |
6258 | 6258 | ||
6259 | if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2)) | 6259 | if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2)) |
6260 | val = TG3_RX_INTERNAL_RING_SZ_5906 / 2; | 6260 | val = TG3_RX_INTERNAL_RING_SZ_5906 / 2; |
6261 | } | 6261 | } |
6262 | 6262 | ||
6263 | tw32(RCVBDI_STD_THRESH, val); | 6263 | tw32(RCVBDI_STD_THRESH, val); |
6264 | 6264 | ||
6265 | /* Initialize TG3_BDINFO's at: | 6265 | /* Initialize TG3_BDINFO's at: |
6266 | * RCVDBDI_STD_BD: standard eth size rx ring | 6266 | * RCVDBDI_STD_BD: standard eth size rx ring |
6267 | * RCVDBDI_JUMBO_BD: jumbo frame rx ring | 6267 | * RCVDBDI_JUMBO_BD: jumbo frame rx ring |
6268 | * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) | 6268 | * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) |
6269 | * | 6269 | * |
6270 | * like so: | 6270 | * like so: |
6271 | * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring | 6271 | * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring |
6272 | * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | | 6272 | * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | |
6273 | * ring attribute flags | 6273 | * ring attribute flags |
6274 | * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM | 6274 | * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM |
6275 | * | 6275 | * |
6276 | * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. | 6276 | * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. |
6277 | * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. | 6277 | * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. |
6278 | * | 6278 | * |
6279 | * The size of each ring is fixed in the firmware, but the location is | 6279 | * The size of each ring is fixed in the firmware, but the location is |
6280 | * configurable. | 6280 | * configurable. |
6281 | */ | 6281 | */ |
6282 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, | 6282 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, |
6283 | ((u64) tp->rx_std_mapping >> 32)); | 6283 | ((u64) tp->rx_std_mapping >> 32)); |
6284 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, | 6284 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, |
6285 | ((u64) tp->rx_std_mapping & 0xffffffff)); | 6285 | ((u64) tp->rx_std_mapping & 0xffffffff)); |
6286 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, | 6286 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, |
6287 | NIC_SRAM_RX_BUFFER_DESC); | 6287 | NIC_SRAM_RX_BUFFER_DESC); |
6288 | 6288 | ||
6289 | /* Don't even try to program the JUMBO/MINI buffer descriptor | 6289 | /* Don't even try to program the JUMBO/MINI buffer descriptor |
6290 | * configs on 5705. | 6290 | * configs on 5705. |
6291 | */ | 6291 | */ |
6292 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 6292 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
6293 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, | 6293 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, |
6294 | RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT); | 6294 | RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT); |
6295 | } else { | 6295 | } else { |
6296 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, | 6296 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, |
6297 | RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); | 6297 | RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); |
6298 | 6298 | ||
6299 | tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, | 6299 | tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, |
6300 | BDINFO_FLAGS_DISABLED); | 6300 | BDINFO_FLAGS_DISABLED); |
6301 | 6301 | ||
6302 | /* Setup replenish threshold. */ | 6302 | /* Setup replenish threshold. */ |
6303 | tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); | 6303 | tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); |
6304 | 6304 | ||
6305 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 6305 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { |
6306 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, | 6306 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, |
6307 | ((u64) tp->rx_jumbo_mapping >> 32)); | 6307 | ((u64) tp->rx_jumbo_mapping >> 32)); |
6308 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, | 6308 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, |
6309 | ((u64) tp->rx_jumbo_mapping & 0xffffffff)); | 6309 | ((u64) tp->rx_jumbo_mapping & 0xffffffff)); |
6310 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, | 6310 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, |
6311 | RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); | 6311 | RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); |
6312 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, | 6312 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, |
6313 | NIC_SRAM_RX_JUMBO_BUFFER_DESC); | 6313 | NIC_SRAM_RX_JUMBO_BUFFER_DESC); |
6314 | } else { | 6314 | } else { |
6315 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, | 6315 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, |
6316 | BDINFO_FLAGS_DISABLED); | 6316 | BDINFO_FLAGS_DISABLED); |
6317 | } | 6317 | } |
6318 | 6318 | ||
6319 | } | 6319 | } |
6320 | 6320 | ||
6321 | /* There is only one send ring on 5705/5750, no need to explicitly | 6321 | /* There is only one send ring on 5705/5750, no need to explicitly |
6322 | * disable the others. | 6322 | * disable the others. |
6323 | */ | 6323 | */ |
6324 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 6324 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
6325 | /* Clear out send RCB ring in SRAM. */ | 6325 | /* Clear out send RCB ring in SRAM. */ |
6326 | for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE) | 6326 | for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE) |
6327 | tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, | 6327 | tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, |
6328 | BDINFO_FLAGS_DISABLED); | 6328 | BDINFO_FLAGS_DISABLED); |
6329 | } | 6329 | } |
6330 | 6330 | ||
6331 | tp->tx_prod = 0; | 6331 | tp->tx_prod = 0; |
6332 | tp->tx_cons = 0; | 6332 | tp->tx_cons = 0; |
6333 | tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); | 6333 | tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); |
6334 | tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); | 6334 | tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); |
6335 | 6335 | ||
6336 | tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB, | 6336 | tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB, |
6337 | tp->tx_desc_mapping, | 6337 | tp->tx_desc_mapping, |
6338 | (TG3_TX_RING_SIZE << | 6338 | (TG3_TX_RING_SIZE << |
6339 | BDINFO_FLAGS_MAXLEN_SHIFT), | 6339 | BDINFO_FLAGS_MAXLEN_SHIFT), |
6340 | NIC_SRAM_TX_BUFFER_DESC); | 6340 | NIC_SRAM_TX_BUFFER_DESC); |
6341 | 6341 | ||
6342 | /* There is only one receive return ring on 5705/5750, no need | 6342 | /* There is only one receive return ring on 5705/5750, no need |
6343 | * to explicitly disable the others. | 6343 | * to explicitly disable the others. |
6344 | */ | 6344 | */ |
6345 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 6345 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
6346 | for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK; | 6346 | for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK; |
6347 | i += TG3_BDINFO_SIZE) { | 6347 | i += TG3_BDINFO_SIZE) { |
6348 | tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, | 6348 | tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, |
6349 | BDINFO_FLAGS_DISABLED); | 6349 | BDINFO_FLAGS_DISABLED); |
6350 | } | 6350 | } |
6351 | } | 6351 | } |
6352 | 6352 | ||
6353 | tp->rx_rcb_ptr = 0; | 6353 | tp->rx_rcb_ptr = 0; |
6354 | tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0); | 6354 | tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0); |
6355 | 6355 | ||
6356 | tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB, | 6356 | tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB, |
6357 | tp->rx_rcb_mapping, | 6357 | tp->rx_rcb_mapping, |
6358 | (TG3_RX_RCB_RING_SIZE(tp) << | 6358 | (TG3_RX_RCB_RING_SIZE(tp) << |
6359 | BDINFO_FLAGS_MAXLEN_SHIFT), | 6359 | BDINFO_FLAGS_MAXLEN_SHIFT), |
6360 | 0); | 6360 | 0); |
6361 | 6361 | ||
6362 | tp->rx_std_ptr = tp->rx_pending; | 6362 | tp->rx_std_ptr = tp->rx_pending; |
6363 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, | 6363 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, |
6364 | tp->rx_std_ptr); | 6364 | tp->rx_std_ptr); |
6365 | 6365 | ||
6366 | tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? | 6366 | tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? |
6367 | tp->rx_jumbo_pending : 0; | 6367 | tp->rx_jumbo_pending : 0; |
6368 | tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, | 6368 | tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, |
6369 | tp->rx_jumbo_ptr); | 6369 | tp->rx_jumbo_ptr); |
6370 | 6370 | ||
6371 | /* Initialize MAC address and backoff seed. */ | 6371 | /* Initialize MAC address and backoff seed. */ |
6372 | __tg3_set_mac_addr(tp, 0); | 6372 | __tg3_set_mac_addr(tp, 0); |
6373 | 6373 | ||
6374 | /* MTU + ethernet header + FCS + optional VLAN tag */ | 6374 | /* MTU + ethernet header + FCS + optional VLAN tag */ |
6375 | tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8); | 6375 | tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8); |
6376 | 6376 | ||
6377 | /* The slot time is changed by tg3_setup_phy if we | 6377 | /* The slot time is changed by tg3_setup_phy if we |
6378 | * run at gigabit with half duplex. | 6378 | * run at gigabit with half duplex. |
6379 | */ | 6379 | */ |
6380 | tw32(MAC_TX_LENGTHS, | 6380 | tw32(MAC_TX_LENGTHS, |
6381 | (2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 6381 | (2 << TX_LENGTHS_IPG_CRS_SHIFT) | |
6382 | (6 << TX_LENGTHS_IPG_SHIFT) | | 6382 | (6 << TX_LENGTHS_IPG_SHIFT) | |
6383 | (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); | 6383 | (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); |
6384 | 6384 | ||
6385 | /* Receive rules. */ | 6385 | /* Receive rules. */ |
6386 | tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); | 6386 | tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); |
6387 | tw32(RCVLPC_CONFIG, 0x0181); | 6387 | tw32(RCVLPC_CONFIG, 0x0181); |
6388 | 6388 | ||
6389 | /* Calculate RDMAC_MODE setting early, we need it to determine | 6389 | /* Calculate RDMAC_MODE setting early, we need it to determine |
6390 | * the RCVLPC_STATE_ENABLE mask. | 6390 | * the RCVLPC_STATE_ENABLE mask. |
6391 | */ | 6391 | */ |
6392 | rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | | 6392 | rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | |
6393 | RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | | 6393 | RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | |
6394 | RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | | 6394 | RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | |
6395 | RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | | 6395 | RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | |
6396 | RDMAC_MODE_LNGREAD_ENAB); | 6396 | RDMAC_MODE_LNGREAD_ENAB); |
6397 | 6397 | ||
6398 | /* If statement applies to 5705 and 5750 PCI devices only */ | 6398 | /* If statement applies to 5705 and 5750 PCI devices only */ |
6399 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 6399 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && |
6400 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || | 6400 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || |
6401 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) { | 6401 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) { |
6402 | if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && | 6402 | if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && |
6403 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 6403 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { |
6404 | rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; | 6404 | rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; |
6405 | } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && | 6405 | } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && |
6406 | !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { | 6406 | !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { |
6407 | rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; | 6407 | rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; |
6408 | } | 6408 | } |
6409 | } | 6409 | } |
6410 | 6410 | ||
6411 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) | 6411 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) |
6412 | rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; | 6412 | rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; |
6413 | 6413 | ||
6414 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 6414 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
6415 | rdmac_mode |= (1 << 27); | 6415 | rdmac_mode |= (1 << 27); |
6416 | 6416 | ||
6417 | /* Receive/send statistics. */ | 6417 | /* Receive/send statistics. */ |
6418 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 6418 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { |
6419 | val = tr32(RCVLPC_STATS_ENABLE); | 6419 | val = tr32(RCVLPC_STATS_ENABLE); |
6420 | val &= ~RCVLPC_STATSENAB_DACK_FIX; | 6420 | val &= ~RCVLPC_STATSENAB_DACK_FIX; |
6421 | tw32(RCVLPC_STATS_ENABLE, val); | 6421 | tw32(RCVLPC_STATS_ENABLE, val); |
6422 | } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && | 6422 | } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && |
6423 | (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { | 6423 | (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { |
6424 | val = tr32(RCVLPC_STATS_ENABLE); | 6424 | val = tr32(RCVLPC_STATS_ENABLE); |
6425 | val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; | 6425 | val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; |
6426 | tw32(RCVLPC_STATS_ENABLE, val); | 6426 | tw32(RCVLPC_STATS_ENABLE, val); |
6427 | } else { | 6427 | } else { |
6428 | tw32(RCVLPC_STATS_ENABLE, 0xffffff); | 6428 | tw32(RCVLPC_STATS_ENABLE, 0xffffff); |
6429 | } | 6429 | } |
6430 | tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); | 6430 | tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); |
6431 | tw32(SNDDATAI_STATSENAB, 0xffffff); | 6431 | tw32(SNDDATAI_STATSENAB, 0xffffff); |
6432 | tw32(SNDDATAI_STATSCTRL, | 6432 | tw32(SNDDATAI_STATSCTRL, |
6433 | (SNDDATAI_SCTRL_ENABLE | | 6433 | (SNDDATAI_SCTRL_ENABLE | |
6434 | SNDDATAI_SCTRL_FASTUPD)); | 6434 | SNDDATAI_SCTRL_FASTUPD)); |
6435 | 6435 | ||
6436 | /* Setup host coalescing engine. */ | 6436 | /* Setup host coalescing engine. */ |
6437 | tw32(HOSTCC_MODE, 0); | 6437 | tw32(HOSTCC_MODE, 0); |
6438 | for (i = 0; i < 2000; i++) { | 6438 | for (i = 0; i < 2000; i++) { |
6439 | if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) | 6439 | if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) |
6440 | break; | 6440 | break; |
6441 | udelay(10); | 6441 | udelay(10); |
6442 | } | 6442 | } |
6443 | 6443 | ||
6444 | __tg3_set_coalesce(tp, &tp->coal); | 6444 | __tg3_set_coalesce(tp, &tp->coal); |
6445 | 6445 | ||
6446 | /* set status block DMA address */ | 6446 | /* set status block DMA address */ |
6447 | tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, | 6447 | tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, |
6448 | ((u64) tp->status_mapping >> 32)); | 6448 | ((u64) tp->status_mapping >> 32)); |
6449 | tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, | 6449 | tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, |
6450 | ((u64) tp->status_mapping & 0xffffffff)); | 6450 | ((u64) tp->status_mapping & 0xffffffff)); |
6451 | 6451 | ||
6452 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 6452 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
6453 | /* Status/statistics block address. See tg3_timer, | 6453 | /* Status/statistics block address. See tg3_timer, |
6454 | * the tg3_periodic_fetch_stats call there, and | 6454 | * the tg3_periodic_fetch_stats call there, and |
6455 | * tg3_get_stats to see how this works for 5705/5750 chips. | 6455 | * tg3_get_stats to see how this works for 5705/5750 chips. |
6456 | */ | 6456 | */ |
6457 | tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, | 6457 | tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, |
6458 | ((u64) tp->stats_mapping >> 32)); | 6458 | ((u64) tp->stats_mapping >> 32)); |
6459 | tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, | 6459 | tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, |
6460 | ((u64) tp->stats_mapping & 0xffffffff)); | 6460 | ((u64) tp->stats_mapping & 0xffffffff)); |
6461 | tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); | 6461 | tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); |
6462 | tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); | 6462 | tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); |
6463 | } | 6463 | } |
6464 | 6464 | ||
6465 | tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); | 6465 | tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); |
6466 | 6466 | ||
6467 | tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); | 6467 | tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); |
6468 | tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); | 6468 | tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); |
6469 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 6469 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
6470 | tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); | 6470 | tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); |
6471 | 6471 | ||
6472 | /* Clear statistics/status block in chip, and status block in ram. */ | 6472 | /* Clear statistics/status block in chip, and status block in ram. */ |
6473 | for (i = NIC_SRAM_STATS_BLK; | 6473 | for (i = NIC_SRAM_STATS_BLK; |
6474 | i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; | 6474 | i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; |
6475 | i += sizeof(u32)) { | 6475 | i += sizeof(u32)) { |
6476 | tg3_write_mem(tp, i, 0); | 6476 | tg3_write_mem(tp, i, 0); |
6477 | udelay(40); | 6477 | udelay(40); |
6478 | } | 6478 | } |
6479 | memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); | 6479 | memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); |
6480 | 6480 | ||
6481 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { | 6481 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { |
6482 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 6482 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; |
6483 | /* reset to prevent losing 1st rx packet intermittently */ | 6483 | /* reset to prevent losing 1st rx packet intermittently */ |
6484 | tw32_f(MAC_RX_MODE, RX_MODE_RESET); | 6484 | tw32_f(MAC_RX_MODE, RX_MODE_RESET); |
6485 | udelay(10); | 6485 | udelay(10); |
6486 | } | 6486 | } |
6487 | 6487 | ||
6488 | tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | | 6488 | tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | |
6489 | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; | 6489 | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; |
6490 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 6490 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && |
6491 | !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && | 6491 | !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && |
6492 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) | 6492 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) |
6493 | tp->mac_mode |= MAC_MODE_LINK_POLARITY; | 6493 | tp->mac_mode |= MAC_MODE_LINK_POLARITY; |
6494 | tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); | 6494 | tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); |
6495 | udelay(40); | 6495 | udelay(40); |
6496 | 6496 | ||
6497 | /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). | 6497 | /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). |
6498 | * If TG3_FLG2_IS_NIC is zero, we should read the | 6498 | * If TG3_FLG2_IS_NIC is zero, we should read the |
6499 | * register to preserve the GPIO settings for LOMs. The GPIOs, | 6499 | * register to preserve the GPIO settings for LOMs. The GPIOs, |
6500 | * whether used as inputs or outputs, are set by boot code after | 6500 | * whether used as inputs or outputs, are set by boot code after |
6501 | * reset. | 6501 | * reset. |
6502 | */ | 6502 | */ |
6503 | if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) { | 6503 | if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) { |
6504 | u32 gpio_mask; | 6504 | u32 gpio_mask; |
6505 | 6505 | ||
6506 | gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | | 6506 | gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | |
6507 | GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | | 6507 | GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | |
6508 | GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; | 6508 | GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; |
6509 | 6509 | ||
6510 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) | 6510 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) |
6511 | gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | | 6511 | gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | |
6512 | GRC_LCLCTRL_GPIO_OUTPUT3; | 6512 | GRC_LCLCTRL_GPIO_OUTPUT3; |
6513 | 6513 | ||
6514 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 6514 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) |
6515 | gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; | 6515 | gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; |
6516 | 6516 | ||
6517 | tp->grc_local_ctrl &= ~gpio_mask; | 6517 | tp->grc_local_ctrl &= ~gpio_mask; |
6518 | tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; | 6518 | tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; |
6519 | 6519 | ||
6520 | /* GPIO1 must be driven high for eeprom write protect */ | 6520 | /* GPIO1 must be driven high for eeprom write protect */ |
6521 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) | 6521 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) |
6522 | tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | | 6522 | tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | |
6523 | GRC_LCLCTRL_GPIO_OUTPUT1); | 6523 | GRC_LCLCTRL_GPIO_OUTPUT1); |
6524 | } | 6524 | } |
6525 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | 6525 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); |
6526 | udelay(100); | 6526 | udelay(100); |
6527 | 6527 | ||
6528 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); | 6528 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); |
6529 | tp->last_tag = 0; | 6529 | tp->last_tag = 0; |
6530 | 6530 | ||
6531 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 6531 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
6532 | tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); | 6532 | tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); |
6533 | udelay(40); | 6533 | udelay(40); |
6534 | } | 6534 | } |
6535 | 6535 | ||
6536 | val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | | 6536 | val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | |
6537 | WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | | 6537 | WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | |
6538 | WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | | 6538 | WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | |
6539 | WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | | 6539 | WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | |
6540 | WDMAC_MODE_LNGREAD_ENAB); | 6540 | WDMAC_MODE_LNGREAD_ENAB); |
6541 | 6541 | ||
6542 | /* If statement applies to 5705 and 5750 PCI devices only */ | 6542 | /* If statement applies to 5705 and 5750 PCI devices only */ |
6543 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 6543 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && |
6544 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || | 6544 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || |
6545 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { | 6545 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { |
6546 | if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) && | 6546 | if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) && |
6547 | (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || | 6547 | (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || |
6548 | tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { | 6548 | tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { |
6549 | /* nothing */ | 6549 | /* nothing */ |
6550 | } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && | 6550 | } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && |
6551 | !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && | 6551 | !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && |
6552 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { | 6552 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { |
6553 | val |= WDMAC_MODE_RX_ACCEL; | 6553 | val |= WDMAC_MODE_RX_ACCEL; |
6554 | } | 6554 | } |
6555 | } | 6555 | } |
6556 | 6556 | ||
6557 | /* Enable host coalescing bug fix */ | 6557 | /* Enable host coalescing bug fix */ |
6558 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || | 6558 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || |
6559 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)) | 6559 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)) |
6560 | val |= (1 << 29); | 6560 | val |= (1 << 29); |
6561 | 6561 | ||
6562 | tw32_f(WDMAC_MODE, val); | 6562 | tw32_f(WDMAC_MODE, val); |
6563 | udelay(40); | 6563 | udelay(40); |
6564 | 6564 | ||
6565 | if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { | 6565 | if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { |
6566 | val = tr32(TG3PCI_X_CAPS); | 6566 | val = tr32(TG3PCI_X_CAPS); |
6567 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { | 6567 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { |
6568 | val &= ~PCIX_CAPS_BURST_MASK; | 6568 | val &= ~PCIX_CAPS_BURST_MASK; |
6569 | val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); | 6569 | val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); |
6570 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | 6570 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { |
6571 | val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK); | 6571 | val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK); |
6572 | val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); | 6572 | val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); |
6573 | } | 6573 | } |
6574 | tw32(TG3PCI_X_CAPS, val); | 6574 | tw32(TG3PCI_X_CAPS, val); |
6575 | } | 6575 | } |
6576 | 6576 | ||
6577 | tw32_f(RDMAC_MODE, rdmac_mode); | 6577 | tw32_f(RDMAC_MODE, rdmac_mode); |
6578 | udelay(40); | 6578 | udelay(40); |
6579 | 6579 | ||
6580 | tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); | 6580 | tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); |
6581 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 6581 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
6582 | tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); | 6582 | tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); |
6583 | tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); | 6583 | tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); |
6584 | tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); | 6584 | tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); |
6585 | tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); | 6585 | tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); |
6586 | tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); | 6586 | tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); |
6587 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); | 6587 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); |
6588 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 6588 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
6589 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); | 6589 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); |
6590 | tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE); | 6590 | tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE); |
6591 | tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); | 6591 | tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); |
6592 | 6592 | ||
6593 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { | 6593 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { |
6594 | err = tg3_load_5701_a0_firmware_fix(tp); | 6594 | err = tg3_load_5701_a0_firmware_fix(tp); |
6595 | if (err) | 6595 | if (err) |
6596 | return err; | 6596 | return err; |
6597 | } | 6597 | } |
6598 | 6598 | ||
6599 | if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { | 6599 | if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { |
6600 | err = tg3_load_tso_firmware(tp); | 6600 | err = tg3_load_tso_firmware(tp); |
6601 | if (err) | 6601 | if (err) |
6602 | return err; | 6602 | return err; |
6603 | } | 6603 | } |
6604 | 6604 | ||
6605 | tp->tx_mode = TX_MODE_ENABLE; | 6605 | tp->tx_mode = TX_MODE_ENABLE; |
6606 | tw32_f(MAC_TX_MODE, tp->tx_mode); | 6606 | tw32_f(MAC_TX_MODE, tp->tx_mode); |
6607 | udelay(100); | 6607 | udelay(100); |
6608 | 6608 | ||
6609 | tp->rx_mode = RX_MODE_ENABLE; | 6609 | tp->rx_mode = RX_MODE_ENABLE; |
6610 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 6610 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) |
6611 | tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; | 6611 | tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; |
6612 | 6612 | ||
6613 | tw32_f(MAC_RX_MODE, tp->rx_mode); | 6613 | tw32_f(MAC_RX_MODE, tp->rx_mode); |
6614 | udelay(10); | 6614 | udelay(10); |
6615 | 6615 | ||
6616 | if (tp->link_config.phy_is_low_power) { | 6616 | if (tp->link_config.phy_is_low_power) { |
6617 | tp->link_config.phy_is_low_power = 0; | 6617 | tp->link_config.phy_is_low_power = 0; |
6618 | tp->link_config.speed = tp->link_config.orig_speed; | 6618 | tp->link_config.speed = tp->link_config.orig_speed; |
6619 | tp->link_config.duplex = tp->link_config.orig_duplex; | 6619 | tp->link_config.duplex = tp->link_config.orig_duplex; |
6620 | tp->link_config.autoneg = tp->link_config.orig_autoneg; | 6620 | tp->link_config.autoneg = tp->link_config.orig_autoneg; |
6621 | } | 6621 | } |
6622 | 6622 | ||
6623 | tp->mi_mode = MAC_MI_MODE_BASE; | 6623 | tp->mi_mode = MAC_MI_MODE_BASE; |
6624 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 6624 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
6625 | udelay(80); | 6625 | udelay(80); |
6626 | 6626 | ||
6627 | tw32(MAC_LED_CTRL, tp->led_ctrl); | 6627 | tw32(MAC_LED_CTRL, tp->led_ctrl); |
6628 | 6628 | ||
6629 | tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); | 6629 | tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); |
6630 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 6630 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { |
6631 | tw32_f(MAC_RX_MODE, RX_MODE_RESET); | 6631 | tw32_f(MAC_RX_MODE, RX_MODE_RESET); |
6632 | udelay(10); | 6632 | udelay(10); |
6633 | } | 6633 | } |
6634 | tw32_f(MAC_RX_MODE, tp->rx_mode); | 6634 | tw32_f(MAC_RX_MODE, tp->rx_mode); |
6635 | udelay(10); | 6635 | udelay(10); |
6636 | 6636 | ||
6637 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 6637 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { |
6638 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && | 6638 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && |
6639 | !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) { | 6639 | !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) { |
6640 | /* Set drive transmission level to 1.2V */ | 6640 | /* Set drive transmission level to 1.2V */ |
6641 | /* only if the signal pre-emphasis bit is not set */ | 6641 | /* only if the signal pre-emphasis bit is not set */ |
6642 | val = tr32(MAC_SERDES_CFG); | 6642 | val = tr32(MAC_SERDES_CFG); |
6643 | val &= 0xfffff000; | 6643 | val &= 0xfffff000; |
6644 | val |= 0x880; | 6644 | val |= 0x880; |
6645 | tw32(MAC_SERDES_CFG, val); | 6645 | tw32(MAC_SERDES_CFG, val); |
6646 | } | 6646 | } |
6647 | if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) | 6647 | if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) |
6648 | tw32(MAC_SERDES_CFG, 0x616000); | 6648 | tw32(MAC_SERDES_CFG, 0x616000); |
6649 | } | 6649 | } |
6650 | 6650 | ||
6651 | /* Prevent chip from dropping frames when flow control | 6651 | /* Prevent chip from dropping frames when flow control |
6652 | * is enabled. | 6652 | * is enabled. |
6653 | */ | 6653 | */ |
6654 | tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2); | 6654 | tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2); |
6655 | 6655 | ||
6656 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && | 6656 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && |
6657 | (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { | 6657 | (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { |
6658 | /* Use hardware link auto-negotiation */ | 6658 | /* Use hardware link auto-negotiation */ |
6659 | tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; | 6659 | tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; |
6660 | } | 6660 | } |
6661 | 6661 | ||
6662 | if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && | 6662 | if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && |
6663 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { | 6663 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { |
6664 | u32 tmp; | 6664 | u32 tmp; |
6665 | 6665 | ||
6666 | tmp = tr32(SERDES_RX_CTRL); | 6666 | tmp = tr32(SERDES_RX_CTRL); |
6667 | tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); | 6667 | tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); |
6668 | tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; | 6668 | tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; |
6669 | tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; | 6669 | tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; |
6670 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | 6670 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); |
6671 | } | 6671 | } |
6672 | 6672 | ||
6673 | err = tg3_setup_phy(tp, 0); | 6673 | err = tg3_setup_phy(tp, 0); |
6674 | if (err) | 6674 | if (err) |
6675 | return err; | 6675 | return err; |
6676 | 6676 | ||
6677 | if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && | 6677 | if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && |
6678 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) { | 6678 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) { |
6679 | u32 tmp; | 6679 | u32 tmp; |
6680 | 6680 | ||
6681 | /* Clear CRC stats. */ | 6681 | /* Clear CRC stats. */ |
6682 | if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { | 6682 | if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { |
6683 | tg3_writephy(tp, MII_TG3_TEST1, | 6683 | tg3_writephy(tp, MII_TG3_TEST1, |
6684 | tmp | MII_TG3_TEST1_CRC_EN); | 6684 | tmp | MII_TG3_TEST1_CRC_EN); |
6685 | tg3_readphy(tp, 0x14, &tmp); | 6685 | tg3_readphy(tp, 0x14, &tmp); |
6686 | } | 6686 | } |
6687 | } | 6687 | } |
6688 | 6688 | ||
6689 | __tg3_set_rx_mode(tp->dev); | 6689 | __tg3_set_rx_mode(tp->dev); |
6690 | 6690 | ||
6691 | /* Initialize receive rules. */ | 6691 | /* Initialize receive rules. */ |
6692 | tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); | 6692 | tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); |
6693 | tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); | 6693 | tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); |
6694 | tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); | 6694 | tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); |
6695 | tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); | 6695 | tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); |
6696 | 6696 | ||
6697 | if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 6697 | if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && |
6698 | !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | 6698 | !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) |
6699 | limit = 8; | 6699 | limit = 8; |
6700 | else | 6700 | else |
6701 | limit = 16; | 6701 | limit = 16; |
6702 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) | 6702 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) |
6703 | limit -= 4; | 6703 | limit -= 4; |
6704 | switch (limit) { | 6704 | switch (limit) { |
6705 | case 16: | 6705 | case 16: |
6706 | tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); | 6706 | tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); |
6707 | case 15: | 6707 | case 15: |
6708 | tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); | 6708 | tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); |
6709 | case 14: | 6709 | case 14: |
6710 | tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); | 6710 | tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); |
6711 | case 13: | 6711 | case 13: |
6712 | tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); | 6712 | tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); |
6713 | case 12: | 6713 | case 12: |
6714 | tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); | 6714 | tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); |
6715 | case 11: | 6715 | case 11: |
6716 | tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); | 6716 | tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); |
6717 | case 10: | 6717 | case 10: |
6718 | tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); | 6718 | tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); |
6719 | case 9: | 6719 | case 9: |
6720 | tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); | 6720 | tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); |
6721 | case 8: | 6721 | case 8: |
6722 | tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); | 6722 | tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); |
6723 | case 7: | 6723 | case 7: |
6724 | tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); | 6724 | tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); |
6725 | case 6: | 6725 | case 6: |
6726 | tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); | 6726 | tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); |
6727 | case 5: | 6727 | case 5: |
6728 | tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); | 6728 | tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); |
6729 | case 4: | 6729 | case 4: |
6730 | /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ | 6730 | /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ |
6731 | case 3: | 6731 | case 3: |
6732 | /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ | 6732 | /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ |
6733 | case 2: | 6733 | case 2: |
6734 | case 1: | 6734 | case 1: |
6735 | 6735 | ||
6736 | default: | 6736 | default: |
6737 | break; | 6737 | break; |
6738 | }; | 6738 | }; |
6739 | 6739 | ||
6740 | tg3_write_sig_post_reset(tp, RESET_KIND_INIT); | 6740 | tg3_write_sig_post_reset(tp, RESET_KIND_INIT); |
6741 | 6741 | ||
6742 | return 0; | 6742 | return 0; |
6743 | } | 6743 | } |
6744 | 6744 | ||
6745 | /* Called at device open time to get the chip ready for | 6745 | /* Called at device open time to get the chip ready for |
6746 | * packet processing. Invoked with tp->lock held. | 6746 | * packet processing. Invoked with tp->lock held. |
6747 | */ | 6747 | */ |
6748 | static int tg3_init_hw(struct tg3 *tp, int reset_phy) | 6748 | static int tg3_init_hw(struct tg3 *tp, int reset_phy) |
6749 | { | 6749 | { |
6750 | int err; | 6750 | int err; |
6751 | 6751 | ||
6752 | /* Force the chip into D0. */ | 6752 | /* Force the chip into D0. */ |
6753 | err = tg3_set_power_state(tp, PCI_D0); | 6753 | err = tg3_set_power_state(tp, PCI_D0); |
6754 | if (err) | 6754 | if (err) |
6755 | goto out; | 6755 | goto out; |
6756 | 6756 | ||
6757 | tg3_switch_clocks(tp); | 6757 | tg3_switch_clocks(tp); |
6758 | 6758 | ||
6759 | tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 6759 | tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); |
6760 | 6760 | ||
6761 | err = tg3_reset_hw(tp, reset_phy); | 6761 | err = tg3_reset_hw(tp, reset_phy); |
6762 | 6762 | ||
6763 | out: | 6763 | out: |
6764 | return err; | 6764 | return err; |
6765 | } | 6765 | } |
6766 | 6766 | ||
6767 | #define TG3_STAT_ADD32(PSTAT, REG) \ | 6767 | #define TG3_STAT_ADD32(PSTAT, REG) \ |
6768 | do { u32 __val = tr32(REG); \ | 6768 | do { u32 __val = tr32(REG); \ |
6769 | (PSTAT)->low += __val; \ | 6769 | (PSTAT)->low += __val; \ |
6770 | if ((PSTAT)->low < __val) \ | 6770 | if ((PSTAT)->low < __val) \ |
6771 | (PSTAT)->high += 1; \ | 6771 | (PSTAT)->high += 1; \ |
6772 | } while (0) | 6772 | } while (0) |
6773 | 6773 | ||
6774 | static void tg3_periodic_fetch_stats(struct tg3 *tp) | 6774 | static void tg3_periodic_fetch_stats(struct tg3 *tp) |
6775 | { | 6775 | { |
6776 | struct tg3_hw_stats *sp = tp->hw_stats; | 6776 | struct tg3_hw_stats *sp = tp->hw_stats; |
6777 | 6777 | ||
6778 | if (!netif_carrier_ok(tp->dev)) | 6778 | if (!netif_carrier_ok(tp->dev)) |
6779 | return; | 6779 | return; |
6780 | 6780 | ||
6781 | TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); | 6781 | TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); |
6782 | TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); | 6782 | TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); |
6783 | TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); | 6783 | TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); |
6784 | TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); | 6784 | TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); |
6785 | TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); | 6785 | TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); |
6786 | TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); | 6786 | TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); |
6787 | TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); | 6787 | TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); |
6788 | TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); | 6788 | TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); |
6789 | TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); | 6789 | TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); |
6790 | TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); | 6790 | TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); |
6791 | TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); | 6791 | TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); |
6792 | TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); | 6792 | TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); |
6793 | TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); | 6793 | TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); |
6794 | 6794 | ||
6795 | TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); | 6795 | TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); |
6796 | TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); | 6796 | TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); |
6797 | TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); | 6797 | TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); |
6798 | TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); | 6798 | TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); |
6799 | TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); | 6799 | TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); |
6800 | TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); | 6800 | TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); |
6801 | TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); | 6801 | TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); |
6802 | TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); | 6802 | TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); |
6803 | TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); | 6803 | TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); |
6804 | TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); | 6804 | TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); |
6805 | TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); | 6805 | TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); |
6806 | TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); | 6806 | TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); |
6807 | TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); | 6807 | TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); |
6808 | TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); | 6808 | TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); |
6809 | 6809 | ||
6810 | TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); | 6810 | TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); |
6811 | TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); | 6811 | TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); |
6812 | TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); | 6812 | TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); |
6813 | } | 6813 | } |
6814 | 6814 | ||
6815 | static void tg3_timer(unsigned long __opaque) | 6815 | static void tg3_timer(unsigned long __opaque) |
6816 | { | 6816 | { |
6817 | struct tg3 *tp = (struct tg3 *) __opaque; | 6817 | struct tg3 *tp = (struct tg3 *) __opaque; |
6818 | 6818 | ||
6819 | if (tp->irq_sync) | 6819 | if (tp->irq_sync) |
6820 | goto restart_timer; | 6820 | goto restart_timer; |
6821 | 6821 | ||
6822 | spin_lock(&tp->lock); | 6822 | spin_lock(&tp->lock); |
6823 | 6823 | ||
6824 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { | 6824 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { |
6825 | /* All of this garbage is because when using non-tagged | 6825 | /* All of this garbage is because when using non-tagged |
6826 | * IRQ status the mailbox/status_block protocol the chip | 6826 | * IRQ status the mailbox/status_block protocol the chip |
6827 | * uses with the cpu is race prone. | 6827 | * uses with the cpu is race prone. |
6828 | */ | 6828 | */ |
6829 | if (tp->hw_status->status & SD_STATUS_UPDATED) { | 6829 | if (tp->hw_status->status & SD_STATUS_UPDATED) { |
6830 | tw32(GRC_LOCAL_CTRL, | 6830 | tw32(GRC_LOCAL_CTRL, |
6831 | tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); | 6831 | tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); |
6832 | } else { | 6832 | } else { |
6833 | tw32(HOSTCC_MODE, tp->coalesce_mode | | 6833 | tw32(HOSTCC_MODE, tp->coalesce_mode | |
6834 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); | 6834 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); |
6835 | } | 6835 | } |
6836 | 6836 | ||
6837 | if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { | 6837 | if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { |
6838 | tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; | 6838 | tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; |
6839 | spin_unlock(&tp->lock); | 6839 | spin_unlock(&tp->lock); |
6840 | schedule_work(&tp->reset_task); | 6840 | schedule_work(&tp->reset_task); |
6841 | return; | 6841 | return; |
6842 | } | 6842 | } |
6843 | } | 6843 | } |
6844 | 6844 | ||
6845 | /* This part only runs once per second. */ | 6845 | /* This part only runs once per second. */ |
6846 | if (!--tp->timer_counter) { | 6846 | if (!--tp->timer_counter) { |
6847 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 6847 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) |
6848 | tg3_periodic_fetch_stats(tp); | 6848 | tg3_periodic_fetch_stats(tp); |
6849 | 6849 | ||
6850 | if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { | 6850 | if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { |
6851 | u32 mac_stat; | 6851 | u32 mac_stat; |
6852 | int phy_event; | 6852 | int phy_event; |
6853 | 6853 | ||
6854 | mac_stat = tr32(MAC_STATUS); | 6854 | mac_stat = tr32(MAC_STATUS); |
6855 | 6855 | ||
6856 | phy_event = 0; | 6856 | phy_event = 0; |
6857 | if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) { | 6857 | if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) { |
6858 | if (mac_stat & MAC_STATUS_MI_INTERRUPT) | 6858 | if (mac_stat & MAC_STATUS_MI_INTERRUPT) |
6859 | phy_event = 1; | 6859 | phy_event = 1; |
6860 | } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) | 6860 | } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) |
6861 | phy_event = 1; | 6861 | phy_event = 1; |
6862 | 6862 | ||
6863 | if (phy_event) | 6863 | if (phy_event) |
6864 | tg3_setup_phy(tp, 0); | 6864 | tg3_setup_phy(tp, 0); |
6865 | } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) { | 6865 | } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) { |
6866 | u32 mac_stat = tr32(MAC_STATUS); | 6866 | u32 mac_stat = tr32(MAC_STATUS); |
6867 | int need_setup = 0; | 6867 | int need_setup = 0; |
6868 | 6868 | ||
6869 | if (netif_carrier_ok(tp->dev) && | 6869 | if (netif_carrier_ok(tp->dev) && |
6870 | (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { | 6870 | (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { |
6871 | need_setup = 1; | 6871 | need_setup = 1; |
6872 | } | 6872 | } |
6873 | if (! netif_carrier_ok(tp->dev) && | 6873 | if (! netif_carrier_ok(tp->dev) && |
6874 | (mac_stat & (MAC_STATUS_PCS_SYNCED | | 6874 | (mac_stat & (MAC_STATUS_PCS_SYNCED | |
6875 | MAC_STATUS_SIGNAL_DET))) { | 6875 | MAC_STATUS_SIGNAL_DET))) { |
6876 | need_setup = 1; | 6876 | need_setup = 1; |
6877 | } | 6877 | } |
6878 | if (need_setup) { | 6878 | if (need_setup) { |
6879 | if (!tp->serdes_counter) { | 6879 | if (!tp->serdes_counter) { |
6880 | tw32_f(MAC_MODE, | 6880 | tw32_f(MAC_MODE, |
6881 | (tp->mac_mode & | 6881 | (tp->mac_mode & |
6882 | ~MAC_MODE_PORT_MODE_MASK)); | 6882 | ~MAC_MODE_PORT_MODE_MASK)); |
6883 | udelay(40); | 6883 | udelay(40); |
6884 | tw32_f(MAC_MODE, tp->mac_mode); | 6884 | tw32_f(MAC_MODE, tp->mac_mode); |
6885 | udelay(40); | 6885 | udelay(40); |
6886 | } | 6886 | } |
6887 | tg3_setup_phy(tp, 0); | 6887 | tg3_setup_phy(tp, 0); |
6888 | } | 6888 | } |
6889 | } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) | 6889 | } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) |
6890 | tg3_serdes_parallel_detect(tp); | 6890 | tg3_serdes_parallel_detect(tp); |
6891 | 6891 | ||
6892 | tp->timer_counter = tp->timer_multiplier; | 6892 | tp->timer_counter = tp->timer_multiplier; |
6893 | } | 6893 | } |
6894 | 6894 | ||
6895 | /* Heartbeat is only sent once every 2 seconds. | 6895 | /* Heartbeat is only sent once every 2 seconds. |
6896 | * | 6896 | * |
6897 | * The heartbeat is to tell the ASF firmware that the host | 6897 | * The heartbeat is to tell the ASF firmware that the host |
6898 | * driver is still alive. In the event that the OS crashes, | 6898 | * driver is still alive. In the event that the OS crashes, |
6899 | * ASF needs to reset the hardware to free up the FIFO space | 6899 | * ASF needs to reset the hardware to free up the FIFO space |
6900 | * that may be filled with rx packets destined for the host. | 6900 | * that may be filled with rx packets destined for the host. |
6901 | * If the FIFO is full, ASF will no longer function properly. | 6901 | * If the FIFO is full, ASF will no longer function properly. |
6902 | * | 6902 | * |
6903 | * Unintended resets have been reported on real time kernels | 6903 | * Unintended resets have been reported on real time kernels |
6904 | * where the timer doesn't run on time. Netpoll will also have | 6904 | * where the timer doesn't run on time. Netpoll will also have |
6905 | * same problem. | 6905 | * same problem. |
6906 | * | 6906 | * |
6907 | * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware | 6907 | * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware |
6908 | * to check the ring condition when the heartbeat is expiring | 6908 | * to check the ring condition when the heartbeat is expiring |
6909 | * before doing the reset. This will prevent most unintended | 6909 | * before doing the reset. This will prevent most unintended |
6910 | * resets. | 6910 | * resets. |
6911 | */ | 6911 | */ |
6912 | if (!--tp->asf_counter) { | 6912 | if (!--tp->asf_counter) { |
6913 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { | 6913 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { |
6914 | u32 val; | 6914 | u32 val; |
6915 | 6915 | ||
6916 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, | 6916 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, |
6917 | FWCMD_NICDRV_ALIVE3); | 6917 | FWCMD_NICDRV_ALIVE3); |
6918 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); | 6918 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); |
6919 | /* 5 seconds timeout */ | 6919 | /* 5 seconds timeout */ |
6920 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); | 6920 | tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); |
6921 | val = tr32(GRC_RX_CPU_EVENT); | 6921 | val = tr32(GRC_RX_CPU_EVENT); |
6922 | val |= (1 << 14); | 6922 | val |= (1 << 14); |
6923 | tw32(GRC_RX_CPU_EVENT, val); | 6923 | tw32(GRC_RX_CPU_EVENT, val); |
6924 | } | 6924 | } |
6925 | tp->asf_counter = tp->asf_multiplier; | 6925 | tp->asf_counter = tp->asf_multiplier; |
6926 | } | 6926 | } |
6927 | 6927 | ||
6928 | spin_unlock(&tp->lock); | 6928 | spin_unlock(&tp->lock); |
6929 | 6929 | ||
6930 | restart_timer: | 6930 | restart_timer: |
6931 | tp->timer.expires = jiffies + tp->timer_offset; | 6931 | tp->timer.expires = jiffies + tp->timer_offset; |
6932 | add_timer(&tp->timer); | 6932 | add_timer(&tp->timer); |
6933 | } | 6933 | } |
6934 | 6934 | ||
6935 | static int tg3_request_irq(struct tg3 *tp) | 6935 | static int tg3_request_irq(struct tg3 *tp) |
6936 | { | 6936 | { |
6937 | irq_handler_t fn; | 6937 | irq_handler_t fn; |
6938 | unsigned long flags; | 6938 | unsigned long flags; |
6939 | struct net_device *dev = tp->dev; | 6939 | struct net_device *dev = tp->dev; |
6940 | 6940 | ||
6941 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 6941 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
6942 | fn = tg3_msi; | 6942 | fn = tg3_msi; |
6943 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) | 6943 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) |
6944 | fn = tg3_msi_1shot; | 6944 | fn = tg3_msi_1shot; |
6945 | flags = IRQF_SAMPLE_RANDOM; | 6945 | flags = IRQF_SAMPLE_RANDOM; |
6946 | } else { | 6946 | } else { |
6947 | fn = tg3_interrupt; | 6947 | fn = tg3_interrupt; |
6948 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | 6948 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) |
6949 | fn = tg3_interrupt_tagged; | 6949 | fn = tg3_interrupt_tagged; |
6950 | flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; | 6950 | flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; |
6951 | } | 6951 | } |
6952 | return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev)); | 6952 | return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev)); |
6953 | } | 6953 | } |
6954 | 6954 | ||
6955 | static int tg3_test_interrupt(struct tg3 *tp) | 6955 | static int tg3_test_interrupt(struct tg3 *tp) |
6956 | { | 6956 | { |
6957 | struct net_device *dev = tp->dev; | 6957 | struct net_device *dev = tp->dev; |
6958 | int err, i, intr_ok = 0; | 6958 | int err, i, intr_ok = 0; |
6959 | 6959 | ||
6960 | if (!netif_running(dev)) | 6960 | if (!netif_running(dev)) |
6961 | return -ENODEV; | 6961 | return -ENODEV; |
6962 | 6962 | ||
6963 | tg3_disable_ints(tp); | 6963 | tg3_disable_ints(tp); |
6964 | 6964 | ||
6965 | free_irq(tp->pdev->irq, dev); | 6965 | free_irq(tp->pdev->irq, dev); |
6966 | 6966 | ||
6967 | err = request_irq(tp->pdev->irq, tg3_test_isr, | 6967 | err = request_irq(tp->pdev->irq, tg3_test_isr, |
6968 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); | 6968 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); |
6969 | if (err) | 6969 | if (err) |
6970 | return err; | 6970 | return err; |
6971 | 6971 | ||
6972 | tp->hw_status->status &= ~SD_STATUS_UPDATED; | 6972 | tp->hw_status->status &= ~SD_STATUS_UPDATED; |
6973 | tg3_enable_ints(tp); | 6973 | tg3_enable_ints(tp); |
6974 | 6974 | ||
6975 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | | 6975 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | |
6976 | HOSTCC_MODE_NOW); | 6976 | HOSTCC_MODE_NOW); |
6977 | 6977 | ||
6978 | for (i = 0; i < 5; i++) { | 6978 | for (i = 0; i < 5; i++) { |
6979 | u32 int_mbox, misc_host_ctrl; | 6979 | u32 int_mbox, misc_host_ctrl; |
6980 | 6980 | ||
6981 | int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 + | 6981 | int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 + |
6982 | TG3_64BIT_REG_LOW); | 6982 | TG3_64BIT_REG_LOW); |
6983 | misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); | 6983 | misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); |
6984 | 6984 | ||
6985 | if ((int_mbox != 0) || | 6985 | if ((int_mbox != 0) || |
6986 | (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { | 6986 | (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { |
6987 | intr_ok = 1; | 6987 | intr_ok = 1; |
6988 | break; | 6988 | break; |
6989 | } | 6989 | } |
6990 | 6990 | ||
6991 | msleep(10); | 6991 | msleep(10); |
6992 | } | 6992 | } |
6993 | 6993 | ||
6994 | tg3_disable_ints(tp); | 6994 | tg3_disable_ints(tp); |
6995 | 6995 | ||
6996 | free_irq(tp->pdev->irq, dev); | 6996 | free_irq(tp->pdev->irq, dev); |
6997 | 6997 | ||
6998 | err = tg3_request_irq(tp); | 6998 | err = tg3_request_irq(tp); |
6999 | 6999 | ||
7000 | if (err) | 7000 | if (err) |
7001 | return err; | 7001 | return err; |
7002 | 7002 | ||
7003 | if (intr_ok) | 7003 | if (intr_ok) |
7004 | return 0; | 7004 | return 0; |
7005 | 7005 | ||
7006 | return -EIO; | 7006 | return -EIO; |
7007 | } | 7007 | } |
7008 | 7008 | ||
7009 | /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is | 7009 | /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is |
7010 | * successfully restored | 7010 | * successfully restored |
7011 | */ | 7011 | */ |
7012 | static int tg3_test_msi(struct tg3 *tp) | 7012 | static int tg3_test_msi(struct tg3 *tp) |
7013 | { | 7013 | { |
7014 | struct net_device *dev = tp->dev; | 7014 | struct net_device *dev = tp->dev; |
7015 | int err; | 7015 | int err; |
7016 | u16 pci_cmd; | 7016 | u16 pci_cmd; |
7017 | 7017 | ||
7018 | if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) | 7018 | if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) |
7019 | return 0; | 7019 | return 0; |
7020 | 7020 | ||
7021 | /* Turn off SERR reporting in case MSI terminates with Master | 7021 | /* Turn off SERR reporting in case MSI terminates with Master |
7022 | * Abort. | 7022 | * Abort. |
7023 | */ | 7023 | */ |
7024 | pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); | 7024 | pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); |
7025 | pci_write_config_word(tp->pdev, PCI_COMMAND, | 7025 | pci_write_config_word(tp->pdev, PCI_COMMAND, |
7026 | pci_cmd & ~PCI_COMMAND_SERR); | 7026 | pci_cmd & ~PCI_COMMAND_SERR); |
7027 | 7027 | ||
7028 | err = tg3_test_interrupt(tp); | 7028 | err = tg3_test_interrupt(tp); |
7029 | 7029 | ||
7030 | pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); | 7030 | pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); |
7031 | 7031 | ||
7032 | if (!err) | 7032 | if (!err) |
7033 | return 0; | 7033 | return 0; |
7034 | 7034 | ||
7035 | /* other failures */ | 7035 | /* other failures */ |
7036 | if (err != -EIO) | 7036 | if (err != -EIO) |
7037 | return err; | 7037 | return err; |
7038 | 7038 | ||
7039 | /* MSI test failed, go back to INTx mode */ | 7039 | /* MSI test failed, go back to INTx mode */ |
7040 | printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, " | 7040 | printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, " |
7041 | "switching to INTx mode. Please report this failure to " | 7041 | "switching to INTx mode. Please report this failure to " |
7042 | "the PCI maintainer and include system chipset information.\n", | 7042 | "the PCI maintainer and include system chipset information.\n", |
7043 | tp->dev->name); | 7043 | tp->dev->name); |
7044 | 7044 | ||
7045 | free_irq(tp->pdev->irq, dev); | 7045 | free_irq(tp->pdev->irq, dev); |
7046 | pci_disable_msi(tp->pdev); | 7046 | pci_disable_msi(tp->pdev); |
7047 | 7047 | ||
7048 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 7048 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; |
7049 | 7049 | ||
7050 | err = tg3_request_irq(tp); | 7050 | err = tg3_request_irq(tp); |
7051 | if (err) | 7051 | if (err) |
7052 | return err; | 7052 | return err; |
7053 | 7053 | ||
7054 | /* Need to reset the chip because the MSI cycle may have terminated | 7054 | /* Need to reset the chip because the MSI cycle may have terminated |
7055 | * with Master Abort. | 7055 | * with Master Abort. |
7056 | */ | 7056 | */ |
7057 | tg3_full_lock(tp, 1); | 7057 | tg3_full_lock(tp, 1); |
7058 | 7058 | ||
7059 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 7059 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
7060 | err = tg3_init_hw(tp, 1); | 7060 | err = tg3_init_hw(tp, 1); |
7061 | 7061 | ||
7062 | tg3_full_unlock(tp); | 7062 | tg3_full_unlock(tp); |
7063 | 7063 | ||
7064 | if (err) | 7064 | if (err) |
7065 | free_irq(tp->pdev->irq, dev); | 7065 | free_irq(tp->pdev->irq, dev); |
7066 | 7066 | ||
7067 | return err; | 7067 | return err; |
7068 | } | 7068 | } |
7069 | 7069 | ||
7070 | static int tg3_open(struct net_device *dev) | 7070 | static int tg3_open(struct net_device *dev) |
7071 | { | 7071 | { |
7072 | struct tg3 *tp = netdev_priv(dev); | 7072 | struct tg3 *tp = netdev_priv(dev); |
7073 | int err; | 7073 | int err; |
7074 | 7074 | ||
7075 | netif_carrier_off(tp->dev); | 7075 | netif_carrier_off(tp->dev); |
7076 | 7076 | ||
7077 | tg3_full_lock(tp, 0); | 7077 | tg3_full_lock(tp, 0); |
7078 | 7078 | ||
7079 | err = tg3_set_power_state(tp, PCI_D0); | 7079 | err = tg3_set_power_state(tp, PCI_D0); |
7080 | if (err) { | 7080 | if (err) { |
7081 | tg3_full_unlock(tp); | 7081 | tg3_full_unlock(tp); |
7082 | return err; | 7082 | return err; |
7083 | } | 7083 | } |
7084 | 7084 | ||
7085 | tg3_disable_ints(tp); | 7085 | tg3_disable_ints(tp); |
7086 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; | 7086 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; |
7087 | 7087 | ||
7088 | tg3_full_unlock(tp); | 7088 | tg3_full_unlock(tp); |
7089 | 7089 | ||
7090 | /* The placement of this call is tied | 7090 | /* The placement of this call is tied |
7091 | * to the setup and use of Host TX descriptors. | 7091 | * to the setup and use of Host TX descriptors. |
7092 | */ | 7092 | */ |
7093 | err = tg3_alloc_consistent(tp); | 7093 | err = tg3_alloc_consistent(tp); |
7094 | if (err) | 7094 | if (err) |
7095 | return err; | 7095 | return err; |
7096 | 7096 | ||
7097 | if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) { | 7097 | if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) { |
7098 | /* All MSI supporting chips should support tagged | 7098 | /* All MSI supporting chips should support tagged |
7099 | * status. Assert that this is the case. | 7099 | * status. Assert that this is the case. |
7100 | */ | 7100 | */ |
7101 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { | 7101 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { |
7102 | printk(KERN_WARNING PFX "%s: MSI without TAGGED? " | 7102 | printk(KERN_WARNING PFX "%s: MSI without TAGGED? " |
7103 | "Not using MSI.\n", tp->dev->name); | 7103 | "Not using MSI.\n", tp->dev->name); |
7104 | } else if (pci_enable_msi(tp->pdev) == 0) { | 7104 | } else if (pci_enable_msi(tp->pdev) == 0) { |
7105 | u32 msi_mode; | 7105 | u32 msi_mode; |
7106 | 7106 | ||
7107 | msi_mode = tr32(MSGINT_MODE); | 7107 | msi_mode = tr32(MSGINT_MODE); |
7108 | tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); | 7108 | tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); |
7109 | tp->tg3_flags2 |= TG3_FLG2_USING_MSI; | 7109 | tp->tg3_flags2 |= TG3_FLG2_USING_MSI; |
7110 | } | 7110 | } |
7111 | } | 7111 | } |
7112 | err = tg3_request_irq(tp); | 7112 | err = tg3_request_irq(tp); |
7113 | 7113 | ||
7114 | if (err) { | 7114 | if (err) { |
7115 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 7115 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
7116 | pci_disable_msi(tp->pdev); | 7116 | pci_disable_msi(tp->pdev); |
7117 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 7117 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; |
7118 | } | 7118 | } |
7119 | tg3_free_consistent(tp); | 7119 | tg3_free_consistent(tp); |
7120 | return err; | 7120 | return err; |
7121 | } | 7121 | } |
7122 | 7122 | ||
7123 | tg3_full_lock(tp, 0); | 7123 | tg3_full_lock(tp, 0); |
7124 | 7124 | ||
7125 | err = tg3_init_hw(tp, 1); | 7125 | err = tg3_init_hw(tp, 1); |
7126 | if (err) { | 7126 | if (err) { |
7127 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 7127 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
7128 | tg3_free_rings(tp); | 7128 | tg3_free_rings(tp); |
7129 | } else { | 7129 | } else { |
7130 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | 7130 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) |
7131 | tp->timer_offset = HZ; | 7131 | tp->timer_offset = HZ; |
7132 | else | 7132 | else |
7133 | tp->timer_offset = HZ / 10; | 7133 | tp->timer_offset = HZ / 10; |
7134 | 7134 | ||
7135 | BUG_ON(tp->timer_offset > HZ); | 7135 | BUG_ON(tp->timer_offset > HZ); |
7136 | tp->timer_counter = tp->timer_multiplier = | 7136 | tp->timer_counter = tp->timer_multiplier = |
7137 | (HZ / tp->timer_offset); | 7137 | (HZ / tp->timer_offset); |
7138 | tp->asf_counter = tp->asf_multiplier = | 7138 | tp->asf_counter = tp->asf_multiplier = |
7139 | ((HZ / tp->timer_offset) * 2); | 7139 | ((HZ / tp->timer_offset) * 2); |
7140 | 7140 | ||
7141 | init_timer(&tp->timer); | 7141 | init_timer(&tp->timer); |
7142 | tp->timer.expires = jiffies + tp->timer_offset; | 7142 | tp->timer.expires = jiffies + tp->timer_offset; |
7143 | tp->timer.data = (unsigned long) tp; | 7143 | tp->timer.data = (unsigned long) tp; |
7144 | tp->timer.function = tg3_timer; | 7144 | tp->timer.function = tg3_timer; |
7145 | } | 7145 | } |
7146 | 7146 | ||
7147 | tg3_full_unlock(tp); | 7147 | tg3_full_unlock(tp); |
7148 | 7148 | ||
7149 | if (err) { | 7149 | if (err) { |
7150 | free_irq(tp->pdev->irq, dev); | 7150 | free_irq(tp->pdev->irq, dev); |
7151 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 7151 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
7152 | pci_disable_msi(tp->pdev); | 7152 | pci_disable_msi(tp->pdev); |
7153 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 7153 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; |
7154 | } | 7154 | } |
7155 | tg3_free_consistent(tp); | 7155 | tg3_free_consistent(tp); |
7156 | return err; | 7156 | return err; |
7157 | } | 7157 | } |
7158 | 7158 | ||
7159 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 7159 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
7160 | err = tg3_test_msi(tp); | 7160 | err = tg3_test_msi(tp); |
7161 | 7161 | ||
7162 | if (err) { | 7162 | if (err) { |
7163 | tg3_full_lock(tp, 0); | 7163 | tg3_full_lock(tp, 0); |
7164 | 7164 | ||
7165 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 7165 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
7166 | pci_disable_msi(tp->pdev); | 7166 | pci_disable_msi(tp->pdev); |
7167 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 7167 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; |
7168 | } | 7168 | } |
7169 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 7169 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
7170 | tg3_free_rings(tp); | 7170 | tg3_free_rings(tp); |
7171 | tg3_free_consistent(tp); | 7171 | tg3_free_consistent(tp); |
7172 | 7172 | ||
7173 | tg3_full_unlock(tp); | 7173 | tg3_full_unlock(tp); |
7174 | 7174 | ||
7175 | return err; | 7175 | return err; |
7176 | } | 7176 | } |
7177 | 7177 | ||
7178 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 7178 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
7179 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) { | 7179 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) { |
7180 | u32 val = tr32(PCIE_TRANSACTION_CFG); | 7180 | u32 val = tr32(PCIE_TRANSACTION_CFG); |
7181 | 7181 | ||
7182 | tw32(PCIE_TRANSACTION_CFG, | 7182 | tw32(PCIE_TRANSACTION_CFG, |
7183 | val | PCIE_TRANS_CFG_1SHOT_MSI); | 7183 | val | PCIE_TRANS_CFG_1SHOT_MSI); |
7184 | } | 7184 | } |
7185 | } | 7185 | } |
7186 | } | 7186 | } |
7187 | 7187 | ||
7188 | tg3_full_lock(tp, 0); | 7188 | tg3_full_lock(tp, 0); |
7189 | 7189 | ||
7190 | add_timer(&tp->timer); | 7190 | add_timer(&tp->timer); |
7191 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 7191 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
7192 | tg3_enable_ints(tp); | 7192 | tg3_enable_ints(tp); |
7193 | 7193 | ||
7194 | tg3_full_unlock(tp); | 7194 | tg3_full_unlock(tp); |
7195 | 7195 | ||
7196 | netif_start_queue(dev); | 7196 | netif_start_queue(dev); |
7197 | 7197 | ||
7198 | return 0; | 7198 | return 0; |
7199 | } | 7199 | } |
7200 | 7200 | ||
7201 | #if 0 | 7201 | #if 0 |
7202 | /*static*/ void tg3_dump_state(struct tg3 *tp) | 7202 | /*static*/ void tg3_dump_state(struct tg3 *tp) |
7203 | { | 7203 | { |
7204 | u32 val32, val32_2, val32_3, val32_4, val32_5; | 7204 | u32 val32, val32_2, val32_3, val32_4, val32_5; |
7205 | u16 val16; | 7205 | u16 val16; |
7206 | int i; | 7206 | int i; |
7207 | 7207 | ||
7208 | pci_read_config_word(tp->pdev, PCI_STATUS, &val16); | 7208 | pci_read_config_word(tp->pdev, PCI_STATUS, &val16); |
7209 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32); | 7209 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32); |
7210 | printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n", | 7210 | printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n", |
7211 | val16, val32); | 7211 | val16, val32); |
7212 | 7212 | ||
7213 | /* MAC block */ | 7213 | /* MAC block */ |
7214 | printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n", | 7214 | printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n", |
7215 | tr32(MAC_MODE), tr32(MAC_STATUS)); | 7215 | tr32(MAC_MODE), tr32(MAC_STATUS)); |
7216 | printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n", | 7216 | printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n", |
7217 | tr32(MAC_EVENT), tr32(MAC_LED_CTRL)); | 7217 | tr32(MAC_EVENT), tr32(MAC_LED_CTRL)); |
7218 | printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n", | 7218 | printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n", |
7219 | tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS)); | 7219 | tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS)); |
7220 | printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n", | 7220 | printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n", |
7221 | tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS)); | 7221 | tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS)); |
7222 | 7222 | ||
7223 | /* Send data initiator control block */ | 7223 | /* Send data initiator control block */ |
7224 | printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n", | 7224 | printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n", |
7225 | tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS)); | 7225 | tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS)); |
7226 | printk(" SNDDATAI_STATSCTRL[%08x]\n", | 7226 | printk(" SNDDATAI_STATSCTRL[%08x]\n", |
7227 | tr32(SNDDATAI_STATSCTRL)); | 7227 | tr32(SNDDATAI_STATSCTRL)); |
7228 | 7228 | ||
7229 | /* Send data completion control block */ | 7229 | /* Send data completion control block */ |
7230 | printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE)); | 7230 | printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE)); |
7231 | 7231 | ||
7232 | /* Send BD ring selector block */ | 7232 | /* Send BD ring selector block */ |
7233 | printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n", | 7233 | printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n", |
7234 | tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS)); | 7234 | tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS)); |
7235 | 7235 | ||
7236 | /* Send BD initiator control block */ | 7236 | /* Send BD initiator control block */ |
7237 | printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n", | 7237 | printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n", |
7238 | tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS)); | 7238 | tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS)); |
7239 | 7239 | ||
7240 | /* Send BD completion control block */ | 7240 | /* Send BD completion control block */ |
7241 | printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE)); | 7241 | printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE)); |
7242 | 7242 | ||
7243 | /* Receive list placement control block */ | 7243 | /* Receive list placement control block */ |
7244 | printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n", | 7244 | printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n", |
7245 | tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS)); | 7245 | tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS)); |
7246 | printk(" RCVLPC_STATSCTRL[%08x]\n", | 7246 | printk(" RCVLPC_STATSCTRL[%08x]\n", |
7247 | tr32(RCVLPC_STATSCTRL)); | 7247 | tr32(RCVLPC_STATSCTRL)); |
7248 | 7248 | ||
7249 | /* Receive data and receive BD initiator control block */ | 7249 | /* Receive data and receive BD initiator control block */ |
7250 | printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n", | 7250 | printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n", |
7251 | tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS)); | 7251 | tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS)); |
7252 | 7252 | ||
7253 | /* Receive data completion control block */ | 7253 | /* Receive data completion control block */ |
7254 | printk("DEBUG: RCVDCC_MODE[%08x]\n", | 7254 | printk("DEBUG: RCVDCC_MODE[%08x]\n", |
7255 | tr32(RCVDCC_MODE)); | 7255 | tr32(RCVDCC_MODE)); |
7256 | 7256 | ||
7257 | /* Receive BD initiator control block */ | 7257 | /* Receive BD initiator control block */ |
7258 | printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n", | 7258 | printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n", |
7259 | tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS)); | 7259 | tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS)); |
7260 | 7260 | ||
7261 | /* Receive BD completion control block */ | 7261 | /* Receive BD completion control block */ |
7262 | printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n", | 7262 | printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n", |
7263 | tr32(RCVCC_MODE), tr32(RCVCC_STATUS)); | 7263 | tr32(RCVCC_MODE), tr32(RCVCC_STATUS)); |
7264 | 7264 | ||
7265 | /* Receive list selector control block */ | 7265 | /* Receive list selector control block */ |
7266 | printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n", | 7266 | printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n", |
7267 | tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS)); | 7267 | tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS)); |
7268 | 7268 | ||
7269 | /* Mbuf cluster free block */ | 7269 | /* Mbuf cluster free block */ |
7270 | printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n", | 7270 | printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n", |
7271 | tr32(MBFREE_MODE), tr32(MBFREE_STATUS)); | 7271 | tr32(MBFREE_MODE), tr32(MBFREE_STATUS)); |
7272 | 7272 | ||
7273 | /* Host coalescing control block */ | 7273 | /* Host coalescing control block */ |
7274 | printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n", | 7274 | printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n", |
7275 | tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS)); | 7275 | tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS)); |
7276 | printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n", | 7276 | printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n", |
7277 | tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH), | 7277 | tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH), |
7278 | tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW)); | 7278 | tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW)); |
7279 | printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n", | 7279 | printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n", |
7280 | tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH), | 7280 | tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH), |
7281 | tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW)); | 7281 | tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW)); |
7282 | printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n", | 7282 | printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n", |
7283 | tr32(HOSTCC_STATS_BLK_NIC_ADDR)); | 7283 | tr32(HOSTCC_STATS_BLK_NIC_ADDR)); |
7284 | printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n", | 7284 | printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n", |
7285 | tr32(HOSTCC_STATUS_BLK_NIC_ADDR)); | 7285 | tr32(HOSTCC_STATUS_BLK_NIC_ADDR)); |
7286 | 7286 | ||
7287 | /* Memory arbiter control block */ | 7287 | /* Memory arbiter control block */ |
7288 | printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n", | 7288 | printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n", |
7289 | tr32(MEMARB_MODE), tr32(MEMARB_STATUS)); | 7289 | tr32(MEMARB_MODE), tr32(MEMARB_STATUS)); |
7290 | 7290 | ||
7291 | /* Buffer manager control block */ | 7291 | /* Buffer manager control block */ |
7292 | printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n", | 7292 | printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n", |
7293 | tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS)); | 7293 | tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS)); |
7294 | printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n", | 7294 | printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n", |
7295 | tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE)); | 7295 | tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE)); |
7296 | printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] " | 7296 | printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] " |
7297 | "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n", | 7297 | "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n", |
7298 | tr32(BUFMGR_DMA_DESC_POOL_ADDR), | 7298 | tr32(BUFMGR_DMA_DESC_POOL_ADDR), |
7299 | tr32(BUFMGR_DMA_DESC_POOL_SIZE)); | 7299 | tr32(BUFMGR_DMA_DESC_POOL_SIZE)); |
7300 | 7300 | ||
7301 | /* Read DMA control block */ | 7301 | /* Read DMA control block */ |
7302 | printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n", | 7302 | printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n", |
7303 | tr32(RDMAC_MODE), tr32(RDMAC_STATUS)); | 7303 | tr32(RDMAC_MODE), tr32(RDMAC_STATUS)); |
7304 | 7304 | ||
7305 | /* Write DMA control block */ | 7305 | /* Write DMA control block */ |
7306 | printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n", | 7306 | printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n", |
7307 | tr32(WDMAC_MODE), tr32(WDMAC_STATUS)); | 7307 | tr32(WDMAC_MODE), tr32(WDMAC_STATUS)); |
7308 | 7308 | ||
7309 | /* DMA completion block */ | 7309 | /* DMA completion block */ |
7310 | printk("DEBUG: DMAC_MODE[%08x]\n", | 7310 | printk("DEBUG: DMAC_MODE[%08x]\n", |
7311 | tr32(DMAC_MODE)); | 7311 | tr32(DMAC_MODE)); |
7312 | 7312 | ||
7313 | /* GRC block */ | 7313 | /* GRC block */ |
7314 | printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n", | 7314 | printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n", |
7315 | tr32(GRC_MODE), tr32(GRC_MISC_CFG)); | 7315 | tr32(GRC_MODE), tr32(GRC_MISC_CFG)); |
7316 | printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n", | 7316 | printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n", |
7317 | tr32(GRC_LOCAL_CTRL)); | 7317 | tr32(GRC_LOCAL_CTRL)); |
7318 | 7318 | ||
7319 | /* TG3_BDINFOs */ | 7319 | /* TG3_BDINFOs */ |
7320 | printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n", | 7320 | printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n", |
7321 | tr32(RCVDBDI_JUMBO_BD + 0x0), | 7321 | tr32(RCVDBDI_JUMBO_BD + 0x0), |
7322 | tr32(RCVDBDI_JUMBO_BD + 0x4), | 7322 | tr32(RCVDBDI_JUMBO_BD + 0x4), |
7323 | tr32(RCVDBDI_JUMBO_BD + 0x8), | 7323 | tr32(RCVDBDI_JUMBO_BD + 0x8), |
7324 | tr32(RCVDBDI_JUMBO_BD + 0xc)); | 7324 | tr32(RCVDBDI_JUMBO_BD + 0xc)); |
7325 | printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n", | 7325 | printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n", |
7326 | tr32(RCVDBDI_STD_BD + 0x0), | 7326 | tr32(RCVDBDI_STD_BD + 0x0), |
7327 | tr32(RCVDBDI_STD_BD + 0x4), | 7327 | tr32(RCVDBDI_STD_BD + 0x4), |
7328 | tr32(RCVDBDI_STD_BD + 0x8), | 7328 | tr32(RCVDBDI_STD_BD + 0x8), |
7329 | tr32(RCVDBDI_STD_BD + 0xc)); | 7329 | tr32(RCVDBDI_STD_BD + 0xc)); |
7330 | printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n", | 7330 | printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n", |
7331 | tr32(RCVDBDI_MINI_BD + 0x0), | 7331 | tr32(RCVDBDI_MINI_BD + 0x0), |
7332 | tr32(RCVDBDI_MINI_BD + 0x4), | 7332 | tr32(RCVDBDI_MINI_BD + 0x4), |
7333 | tr32(RCVDBDI_MINI_BD + 0x8), | 7333 | tr32(RCVDBDI_MINI_BD + 0x8), |
7334 | tr32(RCVDBDI_MINI_BD + 0xc)); | 7334 | tr32(RCVDBDI_MINI_BD + 0xc)); |
7335 | 7335 | ||
7336 | tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32); | 7336 | tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32); |
7337 | tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2); | 7337 | tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2); |
7338 | tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3); | 7338 | tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3); |
7339 | tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4); | 7339 | tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4); |
7340 | printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n", | 7340 | printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n", |
7341 | val32, val32_2, val32_3, val32_4); | 7341 | val32, val32_2, val32_3, val32_4); |
7342 | 7342 | ||
7343 | tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32); | 7343 | tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32); |
7344 | tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2); | 7344 | tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2); |
7345 | tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3); | 7345 | tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3); |
7346 | tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4); | 7346 | tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4); |
7347 | printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n", | 7347 | printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n", |
7348 | val32, val32_2, val32_3, val32_4); | 7348 | val32, val32_2, val32_3, val32_4); |
7349 | 7349 | ||
7350 | tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32); | 7350 | tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32); |
7351 | tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2); | 7351 | tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2); |
7352 | tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3); | 7352 | tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3); |
7353 | tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4); | 7353 | tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4); |
7354 | tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5); | 7354 | tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5); |
7355 | printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n", | 7355 | printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n", |
7356 | val32, val32_2, val32_3, val32_4, val32_5); | 7356 | val32, val32_2, val32_3, val32_4, val32_5); |
7357 | 7357 | ||
7358 | /* SW status block */ | 7358 | /* SW status block */ |
7359 | printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", | 7359 | printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", |
7360 | tp->hw_status->status, | 7360 | tp->hw_status->status, |
7361 | tp->hw_status->status_tag, | 7361 | tp->hw_status->status_tag, |
7362 | tp->hw_status->rx_jumbo_consumer, | 7362 | tp->hw_status->rx_jumbo_consumer, |
7363 | tp->hw_status->rx_consumer, | 7363 | tp->hw_status->rx_consumer, |
7364 | tp->hw_status->rx_mini_consumer, | 7364 | tp->hw_status->rx_mini_consumer, |
7365 | tp->hw_status->idx[0].rx_producer, | 7365 | tp->hw_status->idx[0].rx_producer, |
7366 | tp->hw_status->idx[0].tx_consumer); | 7366 | tp->hw_status->idx[0].tx_consumer); |
7367 | 7367 | ||
7368 | /* SW statistics block */ | 7368 | /* SW statistics block */ |
7369 | printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n", | 7369 | printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n", |
7370 | ((u32 *)tp->hw_stats)[0], | 7370 | ((u32 *)tp->hw_stats)[0], |
7371 | ((u32 *)tp->hw_stats)[1], | 7371 | ((u32 *)tp->hw_stats)[1], |
7372 | ((u32 *)tp->hw_stats)[2], | 7372 | ((u32 *)tp->hw_stats)[2], |
7373 | ((u32 *)tp->hw_stats)[3]); | 7373 | ((u32 *)tp->hw_stats)[3]); |
7374 | 7374 | ||
7375 | /* Mailboxes */ | 7375 | /* Mailboxes */ |
7376 | printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n", | 7376 | printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n", |
7377 | tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0), | 7377 | tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0), |
7378 | tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4), | 7378 | tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4), |
7379 | tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0), | 7379 | tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0), |
7380 | tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4)); | 7380 | tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4)); |
7381 | 7381 | ||
7382 | /* NIC side send descriptors. */ | 7382 | /* NIC side send descriptors. */ |
7383 | for (i = 0; i < 6; i++) { | 7383 | for (i = 0; i < 6; i++) { |
7384 | unsigned long txd; | 7384 | unsigned long txd; |
7385 | 7385 | ||
7386 | txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC | 7386 | txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC |
7387 | + (i * sizeof(struct tg3_tx_buffer_desc)); | 7387 | + (i * sizeof(struct tg3_tx_buffer_desc)); |
7388 | printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n", | 7388 | printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n", |
7389 | i, | 7389 | i, |
7390 | readl(txd + 0x0), readl(txd + 0x4), | 7390 | readl(txd + 0x0), readl(txd + 0x4), |
7391 | readl(txd + 0x8), readl(txd + 0xc)); | 7391 | readl(txd + 0x8), readl(txd + 0xc)); |
7392 | } | 7392 | } |
7393 | 7393 | ||
7394 | /* NIC side RX descriptors. */ | 7394 | /* NIC side RX descriptors. */ |
7395 | for (i = 0; i < 6; i++) { | 7395 | for (i = 0; i < 6; i++) { |
7396 | unsigned long rxd; | 7396 | unsigned long rxd; |
7397 | 7397 | ||
7398 | rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC | 7398 | rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC |
7399 | + (i * sizeof(struct tg3_rx_buffer_desc)); | 7399 | + (i * sizeof(struct tg3_rx_buffer_desc)); |
7400 | printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n", | 7400 | printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n", |
7401 | i, | 7401 | i, |
7402 | readl(rxd + 0x0), readl(rxd + 0x4), | 7402 | readl(rxd + 0x0), readl(rxd + 0x4), |
7403 | readl(rxd + 0x8), readl(rxd + 0xc)); | 7403 | readl(rxd + 0x8), readl(rxd + 0xc)); |
7404 | rxd += (4 * sizeof(u32)); | 7404 | rxd += (4 * sizeof(u32)); |
7405 | printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n", | 7405 | printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n", |
7406 | i, | 7406 | i, |
7407 | readl(rxd + 0x0), readl(rxd + 0x4), | 7407 | readl(rxd + 0x0), readl(rxd + 0x4), |
7408 | readl(rxd + 0x8), readl(rxd + 0xc)); | 7408 | readl(rxd + 0x8), readl(rxd + 0xc)); |
7409 | } | 7409 | } |
7410 | 7410 | ||
7411 | for (i = 0; i < 6; i++) { | 7411 | for (i = 0; i < 6; i++) { |
7412 | unsigned long rxd; | 7412 | unsigned long rxd; |
7413 | 7413 | ||
7414 | rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC | 7414 | rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC |
7415 | + (i * sizeof(struct tg3_rx_buffer_desc)); | 7415 | + (i * sizeof(struct tg3_rx_buffer_desc)); |
7416 | printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n", | 7416 | printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n", |
7417 | i, | 7417 | i, |
7418 | readl(rxd + 0x0), readl(rxd + 0x4), | 7418 | readl(rxd + 0x0), readl(rxd + 0x4), |
7419 | readl(rxd + 0x8), readl(rxd + 0xc)); | 7419 | readl(rxd + 0x8), readl(rxd + 0xc)); |
7420 | rxd += (4 * sizeof(u32)); | 7420 | rxd += (4 * sizeof(u32)); |
7421 | printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n", | 7421 | printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n", |
7422 | i, | 7422 | i, |
7423 | readl(rxd + 0x0), readl(rxd + 0x4), | 7423 | readl(rxd + 0x0), readl(rxd + 0x4), |
7424 | readl(rxd + 0x8), readl(rxd + 0xc)); | 7424 | readl(rxd + 0x8), readl(rxd + 0xc)); |
7425 | } | 7425 | } |
7426 | } | 7426 | } |
7427 | #endif | 7427 | #endif |
7428 | 7428 | ||
7429 | static struct net_device_stats *tg3_get_stats(struct net_device *); | 7429 | static struct net_device_stats *tg3_get_stats(struct net_device *); |
7430 | static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); | 7430 | static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); |
7431 | 7431 | ||
7432 | static int tg3_close(struct net_device *dev) | 7432 | static int tg3_close(struct net_device *dev) |
7433 | { | 7433 | { |
7434 | struct tg3 *tp = netdev_priv(dev); | 7434 | struct tg3 *tp = netdev_priv(dev); |
7435 | 7435 | ||
7436 | cancel_work_sync(&tp->reset_task); | 7436 | cancel_work_sync(&tp->reset_task); |
7437 | 7437 | ||
7438 | netif_stop_queue(dev); | 7438 | netif_stop_queue(dev); |
7439 | 7439 | ||
7440 | del_timer_sync(&tp->timer); | 7440 | del_timer_sync(&tp->timer); |
7441 | 7441 | ||
7442 | tg3_full_lock(tp, 1); | 7442 | tg3_full_lock(tp, 1); |
7443 | #if 0 | 7443 | #if 0 |
7444 | tg3_dump_state(tp); | 7444 | tg3_dump_state(tp); |
7445 | #endif | 7445 | #endif |
7446 | 7446 | ||
7447 | tg3_disable_ints(tp); | 7447 | tg3_disable_ints(tp); |
7448 | 7448 | ||
7449 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 7449 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
7450 | tg3_free_rings(tp); | 7450 | tg3_free_rings(tp); |
7451 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; | 7451 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; |
7452 | 7452 | ||
7453 | tg3_full_unlock(tp); | 7453 | tg3_full_unlock(tp); |
7454 | 7454 | ||
7455 | free_irq(tp->pdev->irq, dev); | 7455 | free_irq(tp->pdev->irq, dev); |
7456 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 7456 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
7457 | pci_disable_msi(tp->pdev); | 7457 | pci_disable_msi(tp->pdev); |
7458 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 7458 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; |
7459 | } | 7459 | } |
7460 | 7460 | ||
7461 | memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), | 7461 | memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), |
7462 | sizeof(tp->net_stats_prev)); | 7462 | sizeof(tp->net_stats_prev)); |
7463 | memcpy(&tp->estats_prev, tg3_get_estats(tp), | 7463 | memcpy(&tp->estats_prev, tg3_get_estats(tp), |
7464 | sizeof(tp->estats_prev)); | 7464 | sizeof(tp->estats_prev)); |
7465 | 7465 | ||
7466 | tg3_free_consistent(tp); | 7466 | tg3_free_consistent(tp); |
7467 | 7467 | ||
7468 | tg3_set_power_state(tp, PCI_D3hot); | 7468 | tg3_set_power_state(tp, PCI_D3hot); |
7469 | 7469 | ||
7470 | netif_carrier_off(tp->dev); | 7470 | netif_carrier_off(tp->dev); |
7471 | 7471 | ||
7472 | return 0; | 7472 | return 0; |
7473 | } | 7473 | } |
7474 | 7474 | ||
7475 | static inline unsigned long get_stat64(tg3_stat64_t *val) | 7475 | static inline unsigned long get_stat64(tg3_stat64_t *val) |
7476 | { | 7476 | { |
7477 | unsigned long ret; | 7477 | unsigned long ret; |
7478 | 7478 | ||
7479 | #if (BITS_PER_LONG == 32) | 7479 | #if (BITS_PER_LONG == 32) |
7480 | ret = val->low; | 7480 | ret = val->low; |
7481 | #else | 7481 | #else |
7482 | ret = ((u64)val->high << 32) | ((u64)val->low); | 7482 | ret = ((u64)val->high << 32) | ((u64)val->low); |
7483 | #endif | 7483 | #endif |
7484 | return ret; | 7484 | return ret; |
7485 | } | 7485 | } |
7486 | 7486 | ||
7487 | static unsigned long calc_crc_errors(struct tg3 *tp) | 7487 | static unsigned long calc_crc_errors(struct tg3 *tp) |
7488 | { | 7488 | { |
7489 | struct tg3_hw_stats *hw_stats = tp->hw_stats; | 7489 | struct tg3_hw_stats *hw_stats = tp->hw_stats; |
7490 | 7490 | ||
7491 | if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && | 7491 | if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && |
7492 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 7492 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
7493 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { | 7493 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { |
7494 | u32 val; | 7494 | u32 val; |
7495 | 7495 | ||
7496 | spin_lock_bh(&tp->lock); | 7496 | spin_lock_bh(&tp->lock); |
7497 | if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { | 7497 | if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { |
7498 | tg3_writephy(tp, MII_TG3_TEST1, | 7498 | tg3_writephy(tp, MII_TG3_TEST1, |
7499 | val | MII_TG3_TEST1_CRC_EN); | 7499 | val | MII_TG3_TEST1_CRC_EN); |
7500 | tg3_readphy(tp, 0x14, &val); | 7500 | tg3_readphy(tp, 0x14, &val); |
7501 | } else | 7501 | } else |
7502 | val = 0; | 7502 | val = 0; |
7503 | spin_unlock_bh(&tp->lock); | 7503 | spin_unlock_bh(&tp->lock); |
7504 | 7504 | ||
7505 | tp->phy_crc_errors += val; | 7505 | tp->phy_crc_errors += val; |
7506 | 7506 | ||
7507 | return tp->phy_crc_errors; | 7507 | return tp->phy_crc_errors; |
7508 | } | 7508 | } |
7509 | 7509 | ||
7510 | return get_stat64(&hw_stats->rx_fcs_errors); | 7510 | return get_stat64(&hw_stats->rx_fcs_errors); |
7511 | } | 7511 | } |
7512 | 7512 | ||
7513 | #define ESTAT_ADD(member) \ | 7513 | #define ESTAT_ADD(member) \ |
7514 | estats->member = old_estats->member + \ | 7514 | estats->member = old_estats->member + \ |
7515 | get_stat64(&hw_stats->member) | 7515 | get_stat64(&hw_stats->member) |
7516 | 7516 | ||
7517 | static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) | 7517 | static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) |
7518 | { | 7518 | { |
7519 | struct tg3_ethtool_stats *estats = &tp->estats; | 7519 | struct tg3_ethtool_stats *estats = &tp->estats; |
7520 | struct tg3_ethtool_stats *old_estats = &tp->estats_prev; | 7520 | struct tg3_ethtool_stats *old_estats = &tp->estats_prev; |
7521 | struct tg3_hw_stats *hw_stats = tp->hw_stats; | 7521 | struct tg3_hw_stats *hw_stats = tp->hw_stats; |
7522 | 7522 | ||
7523 | if (!hw_stats) | 7523 | if (!hw_stats) |
7524 | return old_estats; | 7524 | return old_estats; |
7525 | 7525 | ||
7526 | ESTAT_ADD(rx_octets); | 7526 | ESTAT_ADD(rx_octets); |
7527 | ESTAT_ADD(rx_fragments); | 7527 | ESTAT_ADD(rx_fragments); |
7528 | ESTAT_ADD(rx_ucast_packets); | 7528 | ESTAT_ADD(rx_ucast_packets); |
7529 | ESTAT_ADD(rx_mcast_packets); | 7529 | ESTAT_ADD(rx_mcast_packets); |
7530 | ESTAT_ADD(rx_bcast_packets); | 7530 | ESTAT_ADD(rx_bcast_packets); |
7531 | ESTAT_ADD(rx_fcs_errors); | 7531 | ESTAT_ADD(rx_fcs_errors); |
7532 | ESTAT_ADD(rx_align_errors); | 7532 | ESTAT_ADD(rx_align_errors); |
7533 | ESTAT_ADD(rx_xon_pause_rcvd); | 7533 | ESTAT_ADD(rx_xon_pause_rcvd); |
7534 | ESTAT_ADD(rx_xoff_pause_rcvd); | 7534 | ESTAT_ADD(rx_xoff_pause_rcvd); |
7535 | ESTAT_ADD(rx_mac_ctrl_rcvd); | 7535 | ESTAT_ADD(rx_mac_ctrl_rcvd); |
7536 | ESTAT_ADD(rx_xoff_entered); | 7536 | ESTAT_ADD(rx_xoff_entered); |
7537 | ESTAT_ADD(rx_frame_too_long_errors); | 7537 | ESTAT_ADD(rx_frame_too_long_errors); |
7538 | ESTAT_ADD(rx_jabbers); | 7538 | ESTAT_ADD(rx_jabbers); |
7539 | ESTAT_ADD(rx_undersize_packets); | 7539 | ESTAT_ADD(rx_undersize_packets); |
7540 | ESTAT_ADD(rx_in_length_errors); | 7540 | ESTAT_ADD(rx_in_length_errors); |
7541 | ESTAT_ADD(rx_out_length_errors); | 7541 | ESTAT_ADD(rx_out_length_errors); |
7542 | ESTAT_ADD(rx_64_or_less_octet_packets); | 7542 | ESTAT_ADD(rx_64_or_less_octet_packets); |
7543 | ESTAT_ADD(rx_65_to_127_octet_packets); | 7543 | ESTAT_ADD(rx_65_to_127_octet_packets); |
7544 | ESTAT_ADD(rx_128_to_255_octet_packets); | 7544 | ESTAT_ADD(rx_128_to_255_octet_packets); |
7545 | ESTAT_ADD(rx_256_to_511_octet_packets); | 7545 | ESTAT_ADD(rx_256_to_511_octet_packets); |
7546 | ESTAT_ADD(rx_512_to_1023_octet_packets); | 7546 | ESTAT_ADD(rx_512_to_1023_octet_packets); |
7547 | ESTAT_ADD(rx_1024_to_1522_octet_packets); | 7547 | ESTAT_ADD(rx_1024_to_1522_octet_packets); |
7548 | ESTAT_ADD(rx_1523_to_2047_octet_packets); | 7548 | ESTAT_ADD(rx_1523_to_2047_octet_packets); |
7549 | ESTAT_ADD(rx_2048_to_4095_octet_packets); | 7549 | ESTAT_ADD(rx_2048_to_4095_octet_packets); |
7550 | ESTAT_ADD(rx_4096_to_8191_octet_packets); | 7550 | ESTAT_ADD(rx_4096_to_8191_octet_packets); |
7551 | ESTAT_ADD(rx_8192_to_9022_octet_packets); | 7551 | ESTAT_ADD(rx_8192_to_9022_octet_packets); |
7552 | 7552 | ||
7553 | ESTAT_ADD(tx_octets); | 7553 | ESTAT_ADD(tx_octets); |
7554 | ESTAT_ADD(tx_collisions); | 7554 | ESTAT_ADD(tx_collisions); |
7555 | ESTAT_ADD(tx_xon_sent); | 7555 | ESTAT_ADD(tx_xon_sent); |
7556 | ESTAT_ADD(tx_xoff_sent); | 7556 | ESTAT_ADD(tx_xoff_sent); |
7557 | ESTAT_ADD(tx_flow_control); | 7557 | ESTAT_ADD(tx_flow_control); |
7558 | ESTAT_ADD(tx_mac_errors); | 7558 | ESTAT_ADD(tx_mac_errors); |
7559 | ESTAT_ADD(tx_single_collisions); | 7559 | ESTAT_ADD(tx_single_collisions); |
7560 | ESTAT_ADD(tx_mult_collisions); | 7560 | ESTAT_ADD(tx_mult_collisions); |
7561 | ESTAT_ADD(tx_deferred); | 7561 | ESTAT_ADD(tx_deferred); |
7562 | ESTAT_ADD(tx_excessive_collisions); | 7562 | ESTAT_ADD(tx_excessive_collisions); |
7563 | ESTAT_ADD(tx_late_collisions); | 7563 | ESTAT_ADD(tx_late_collisions); |
7564 | ESTAT_ADD(tx_collide_2times); | 7564 | ESTAT_ADD(tx_collide_2times); |
7565 | ESTAT_ADD(tx_collide_3times); | 7565 | ESTAT_ADD(tx_collide_3times); |
7566 | ESTAT_ADD(tx_collide_4times); | 7566 | ESTAT_ADD(tx_collide_4times); |
7567 | ESTAT_ADD(tx_collide_5times); | 7567 | ESTAT_ADD(tx_collide_5times); |
7568 | ESTAT_ADD(tx_collide_6times); | 7568 | ESTAT_ADD(tx_collide_6times); |
7569 | ESTAT_ADD(tx_collide_7times); | 7569 | ESTAT_ADD(tx_collide_7times); |
7570 | ESTAT_ADD(tx_collide_8times); | 7570 | ESTAT_ADD(tx_collide_8times); |
7571 | ESTAT_ADD(tx_collide_9times); | 7571 | ESTAT_ADD(tx_collide_9times); |
7572 | ESTAT_ADD(tx_collide_10times); | 7572 | ESTAT_ADD(tx_collide_10times); |
7573 | ESTAT_ADD(tx_collide_11times); | 7573 | ESTAT_ADD(tx_collide_11times); |
7574 | ESTAT_ADD(tx_collide_12times); | 7574 | ESTAT_ADD(tx_collide_12times); |
7575 | ESTAT_ADD(tx_collide_13times); | 7575 | ESTAT_ADD(tx_collide_13times); |
7576 | ESTAT_ADD(tx_collide_14times); | 7576 | ESTAT_ADD(tx_collide_14times); |
7577 | ESTAT_ADD(tx_collide_15times); | 7577 | ESTAT_ADD(tx_collide_15times); |
7578 | ESTAT_ADD(tx_ucast_packets); | 7578 | ESTAT_ADD(tx_ucast_packets); |
7579 | ESTAT_ADD(tx_mcast_packets); | 7579 | ESTAT_ADD(tx_mcast_packets); |
7580 | ESTAT_ADD(tx_bcast_packets); | 7580 | ESTAT_ADD(tx_bcast_packets); |
7581 | ESTAT_ADD(tx_carrier_sense_errors); | 7581 | ESTAT_ADD(tx_carrier_sense_errors); |
7582 | ESTAT_ADD(tx_discards); | 7582 | ESTAT_ADD(tx_discards); |
7583 | ESTAT_ADD(tx_errors); | 7583 | ESTAT_ADD(tx_errors); |
7584 | 7584 | ||
7585 | ESTAT_ADD(dma_writeq_full); | 7585 | ESTAT_ADD(dma_writeq_full); |
7586 | ESTAT_ADD(dma_write_prioq_full); | 7586 | ESTAT_ADD(dma_write_prioq_full); |
7587 | ESTAT_ADD(rxbds_empty); | 7587 | ESTAT_ADD(rxbds_empty); |
7588 | ESTAT_ADD(rx_discards); | 7588 | ESTAT_ADD(rx_discards); |
7589 | ESTAT_ADD(rx_errors); | 7589 | ESTAT_ADD(rx_errors); |
7590 | ESTAT_ADD(rx_threshold_hit); | 7590 | ESTAT_ADD(rx_threshold_hit); |
7591 | 7591 | ||
7592 | ESTAT_ADD(dma_readq_full); | 7592 | ESTAT_ADD(dma_readq_full); |
7593 | ESTAT_ADD(dma_read_prioq_full); | 7593 | ESTAT_ADD(dma_read_prioq_full); |
7594 | ESTAT_ADD(tx_comp_queue_full); | 7594 | ESTAT_ADD(tx_comp_queue_full); |
7595 | 7595 | ||
7596 | ESTAT_ADD(ring_set_send_prod_index); | 7596 | ESTAT_ADD(ring_set_send_prod_index); |
7597 | ESTAT_ADD(ring_status_update); | 7597 | ESTAT_ADD(ring_status_update); |
7598 | ESTAT_ADD(nic_irqs); | 7598 | ESTAT_ADD(nic_irqs); |
7599 | ESTAT_ADD(nic_avoided_irqs); | 7599 | ESTAT_ADD(nic_avoided_irqs); |
7600 | ESTAT_ADD(nic_tx_threshold_hit); | 7600 | ESTAT_ADD(nic_tx_threshold_hit); |
7601 | 7601 | ||
7602 | return estats; | 7602 | return estats; |
7603 | } | 7603 | } |
7604 | 7604 | ||
7605 | static struct net_device_stats *tg3_get_stats(struct net_device *dev) | 7605 | static struct net_device_stats *tg3_get_stats(struct net_device *dev) |
7606 | { | 7606 | { |
7607 | struct tg3 *tp = netdev_priv(dev); | 7607 | struct tg3 *tp = netdev_priv(dev); |
7608 | struct net_device_stats *stats = &tp->net_stats; | 7608 | struct net_device_stats *stats = &tp->net_stats; |
7609 | struct net_device_stats *old_stats = &tp->net_stats_prev; | 7609 | struct net_device_stats *old_stats = &tp->net_stats_prev; |
7610 | struct tg3_hw_stats *hw_stats = tp->hw_stats; | 7610 | struct tg3_hw_stats *hw_stats = tp->hw_stats; |
7611 | 7611 | ||
7612 | if (!hw_stats) | 7612 | if (!hw_stats) |
7613 | return old_stats; | 7613 | return old_stats; |
7614 | 7614 | ||
7615 | stats->rx_packets = old_stats->rx_packets + | 7615 | stats->rx_packets = old_stats->rx_packets + |
7616 | get_stat64(&hw_stats->rx_ucast_packets) + | 7616 | get_stat64(&hw_stats->rx_ucast_packets) + |
7617 | get_stat64(&hw_stats->rx_mcast_packets) + | 7617 | get_stat64(&hw_stats->rx_mcast_packets) + |
7618 | get_stat64(&hw_stats->rx_bcast_packets); | 7618 | get_stat64(&hw_stats->rx_bcast_packets); |
7619 | 7619 | ||
7620 | stats->tx_packets = old_stats->tx_packets + | 7620 | stats->tx_packets = old_stats->tx_packets + |
7621 | get_stat64(&hw_stats->tx_ucast_packets) + | 7621 | get_stat64(&hw_stats->tx_ucast_packets) + |
7622 | get_stat64(&hw_stats->tx_mcast_packets) + | 7622 | get_stat64(&hw_stats->tx_mcast_packets) + |
7623 | get_stat64(&hw_stats->tx_bcast_packets); | 7623 | get_stat64(&hw_stats->tx_bcast_packets); |
7624 | 7624 | ||
7625 | stats->rx_bytes = old_stats->rx_bytes + | 7625 | stats->rx_bytes = old_stats->rx_bytes + |
7626 | get_stat64(&hw_stats->rx_octets); | 7626 | get_stat64(&hw_stats->rx_octets); |
7627 | stats->tx_bytes = old_stats->tx_bytes + | 7627 | stats->tx_bytes = old_stats->tx_bytes + |
7628 | get_stat64(&hw_stats->tx_octets); | 7628 | get_stat64(&hw_stats->tx_octets); |
7629 | 7629 | ||
7630 | stats->rx_errors = old_stats->rx_errors + | 7630 | stats->rx_errors = old_stats->rx_errors + |
7631 | get_stat64(&hw_stats->rx_errors); | 7631 | get_stat64(&hw_stats->rx_errors); |
7632 | stats->tx_errors = old_stats->tx_errors + | 7632 | stats->tx_errors = old_stats->tx_errors + |
7633 | get_stat64(&hw_stats->tx_errors) + | 7633 | get_stat64(&hw_stats->tx_errors) + |
7634 | get_stat64(&hw_stats->tx_mac_errors) + | 7634 | get_stat64(&hw_stats->tx_mac_errors) + |
7635 | get_stat64(&hw_stats->tx_carrier_sense_errors) + | 7635 | get_stat64(&hw_stats->tx_carrier_sense_errors) + |
7636 | get_stat64(&hw_stats->tx_discards); | 7636 | get_stat64(&hw_stats->tx_discards); |
7637 | 7637 | ||
7638 | stats->multicast = old_stats->multicast + | 7638 | stats->multicast = old_stats->multicast + |
7639 | get_stat64(&hw_stats->rx_mcast_packets); | 7639 | get_stat64(&hw_stats->rx_mcast_packets); |
7640 | stats->collisions = old_stats->collisions + | 7640 | stats->collisions = old_stats->collisions + |
7641 | get_stat64(&hw_stats->tx_collisions); | 7641 | get_stat64(&hw_stats->tx_collisions); |
7642 | 7642 | ||
7643 | stats->rx_length_errors = old_stats->rx_length_errors + | 7643 | stats->rx_length_errors = old_stats->rx_length_errors + |
7644 | get_stat64(&hw_stats->rx_frame_too_long_errors) + | 7644 | get_stat64(&hw_stats->rx_frame_too_long_errors) + |
7645 | get_stat64(&hw_stats->rx_undersize_packets); | 7645 | get_stat64(&hw_stats->rx_undersize_packets); |
7646 | 7646 | ||
7647 | stats->rx_over_errors = old_stats->rx_over_errors + | 7647 | stats->rx_over_errors = old_stats->rx_over_errors + |
7648 | get_stat64(&hw_stats->rxbds_empty); | 7648 | get_stat64(&hw_stats->rxbds_empty); |
7649 | stats->rx_frame_errors = old_stats->rx_frame_errors + | 7649 | stats->rx_frame_errors = old_stats->rx_frame_errors + |
7650 | get_stat64(&hw_stats->rx_align_errors); | 7650 | get_stat64(&hw_stats->rx_align_errors); |
7651 | stats->tx_aborted_errors = old_stats->tx_aborted_errors + | 7651 | stats->tx_aborted_errors = old_stats->tx_aborted_errors + |
7652 | get_stat64(&hw_stats->tx_discards); | 7652 | get_stat64(&hw_stats->tx_discards); |
7653 | stats->tx_carrier_errors = old_stats->tx_carrier_errors + | 7653 | stats->tx_carrier_errors = old_stats->tx_carrier_errors + |
7654 | get_stat64(&hw_stats->tx_carrier_sense_errors); | 7654 | get_stat64(&hw_stats->tx_carrier_sense_errors); |
7655 | 7655 | ||
7656 | stats->rx_crc_errors = old_stats->rx_crc_errors + | 7656 | stats->rx_crc_errors = old_stats->rx_crc_errors + |
7657 | calc_crc_errors(tp); | 7657 | calc_crc_errors(tp); |
7658 | 7658 | ||
7659 | stats->rx_missed_errors = old_stats->rx_missed_errors + | 7659 | stats->rx_missed_errors = old_stats->rx_missed_errors + |
7660 | get_stat64(&hw_stats->rx_discards); | 7660 | get_stat64(&hw_stats->rx_discards); |
7661 | 7661 | ||
7662 | return stats; | 7662 | return stats; |
7663 | } | 7663 | } |
7664 | 7664 | ||
7665 | static inline u32 calc_crc(unsigned char *buf, int len) | 7665 | static inline u32 calc_crc(unsigned char *buf, int len) |
7666 | { | 7666 | { |
7667 | u32 reg; | 7667 | u32 reg; |
7668 | u32 tmp; | 7668 | u32 tmp; |
7669 | int j, k; | 7669 | int j, k; |
7670 | 7670 | ||
7671 | reg = 0xffffffff; | 7671 | reg = 0xffffffff; |
7672 | 7672 | ||
7673 | for (j = 0; j < len; j++) { | 7673 | for (j = 0; j < len; j++) { |
7674 | reg ^= buf[j]; | 7674 | reg ^= buf[j]; |
7675 | 7675 | ||
7676 | for (k = 0; k < 8; k++) { | 7676 | for (k = 0; k < 8; k++) { |
7677 | tmp = reg & 0x01; | 7677 | tmp = reg & 0x01; |
7678 | 7678 | ||
7679 | reg >>= 1; | 7679 | reg >>= 1; |
7680 | 7680 | ||
7681 | if (tmp) { | 7681 | if (tmp) { |
7682 | reg ^= 0xedb88320; | 7682 | reg ^= 0xedb88320; |
7683 | } | 7683 | } |
7684 | } | 7684 | } |
7685 | } | 7685 | } |
7686 | 7686 | ||
7687 | return ~reg; | 7687 | return ~reg; |
7688 | } | 7688 | } |
7689 | 7689 | ||
7690 | static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) | 7690 | static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) |
7691 | { | 7691 | { |
7692 | /* accept or reject all multicast frames */ | 7692 | /* accept or reject all multicast frames */ |
7693 | tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); | 7693 | tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); |
7694 | tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); | 7694 | tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); |
7695 | tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); | 7695 | tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); |
7696 | tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); | 7696 | tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); |
7697 | } | 7697 | } |
7698 | 7698 | ||
7699 | static void __tg3_set_rx_mode(struct net_device *dev) | 7699 | static void __tg3_set_rx_mode(struct net_device *dev) |
7700 | { | 7700 | { |
7701 | struct tg3 *tp = netdev_priv(dev); | 7701 | struct tg3 *tp = netdev_priv(dev); |
7702 | u32 rx_mode; | 7702 | u32 rx_mode; |
7703 | 7703 | ||
7704 | rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | | 7704 | rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | |
7705 | RX_MODE_KEEP_VLAN_TAG); | 7705 | RX_MODE_KEEP_VLAN_TAG); |
7706 | 7706 | ||
7707 | /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG | 7707 | /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG |
7708 | * flag clear. | 7708 | * flag clear. |
7709 | */ | 7709 | */ |
7710 | #if TG3_VLAN_TAG_USED | 7710 | #if TG3_VLAN_TAG_USED |
7711 | if (!tp->vlgrp && | 7711 | if (!tp->vlgrp && |
7712 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | 7712 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) |
7713 | rx_mode |= RX_MODE_KEEP_VLAN_TAG; | 7713 | rx_mode |= RX_MODE_KEEP_VLAN_TAG; |
7714 | #else | 7714 | #else |
7715 | /* By definition, VLAN is disabled always in this | 7715 | /* By definition, VLAN is disabled always in this |
7716 | * case. | 7716 | * case. |
7717 | */ | 7717 | */ |
7718 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | 7718 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) |
7719 | rx_mode |= RX_MODE_KEEP_VLAN_TAG; | 7719 | rx_mode |= RX_MODE_KEEP_VLAN_TAG; |
7720 | #endif | 7720 | #endif |
7721 | 7721 | ||
7722 | if (dev->flags & IFF_PROMISC) { | 7722 | if (dev->flags & IFF_PROMISC) { |
7723 | /* Promiscuous mode. */ | 7723 | /* Promiscuous mode. */ |
7724 | rx_mode |= RX_MODE_PROMISC; | 7724 | rx_mode |= RX_MODE_PROMISC; |
7725 | } else if (dev->flags & IFF_ALLMULTI) { | 7725 | } else if (dev->flags & IFF_ALLMULTI) { |
7726 | /* Accept all multicast. */ | 7726 | /* Accept all multicast. */ |
7727 | tg3_set_multi (tp, 1); | 7727 | tg3_set_multi (tp, 1); |
7728 | } else if (dev->mc_count < 1) { | 7728 | } else if (dev->mc_count < 1) { |
7729 | /* Reject all multicast. */ | 7729 | /* Reject all multicast. */ |
7730 | tg3_set_multi (tp, 0); | 7730 | tg3_set_multi (tp, 0); |
7731 | } else { | 7731 | } else { |
7732 | /* Accept one or more multicast(s). */ | 7732 | /* Accept one or more multicast(s). */ |
7733 | struct dev_mc_list *mclist; | 7733 | struct dev_mc_list *mclist; |
7734 | unsigned int i; | 7734 | unsigned int i; |
7735 | u32 mc_filter[4] = { 0, }; | 7735 | u32 mc_filter[4] = { 0, }; |
7736 | u32 regidx; | 7736 | u32 regidx; |
7737 | u32 bit; | 7737 | u32 bit; |
7738 | u32 crc; | 7738 | u32 crc; |
7739 | 7739 | ||
7740 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; | 7740 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; |
7741 | i++, mclist = mclist->next) { | 7741 | i++, mclist = mclist->next) { |
7742 | 7742 | ||
7743 | crc = calc_crc (mclist->dmi_addr, ETH_ALEN); | 7743 | crc = calc_crc (mclist->dmi_addr, ETH_ALEN); |
7744 | bit = ~crc & 0x7f; | 7744 | bit = ~crc & 0x7f; |
7745 | regidx = (bit & 0x60) >> 5; | 7745 | regidx = (bit & 0x60) >> 5; |
7746 | bit &= 0x1f; | 7746 | bit &= 0x1f; |
7747 | mc_filter[regidx] |= (1 << bit); | 7747 | mc_filter[regidx] |= (1 << bit); |
7748 | } | 7748 | } |
7749 | 7749 | ||
7750 | tw32(MAC_HASH_REG_0, mc_filter[0]); | 7750 | tw32(MAC_HASH_REG_0, mc_filter[0]); |
7751 | tw32(MAC_HASH_REG_1, mc_filter[1]); | 7751 | tw32(MAC_HASH_REG_1, mc_filter[1]); |
7752 | tw32(MAC_HASH_REG_2, mc_filter[2]); | 7752 | tw32(MAC_HASH_REG_2, mc_filter[2]); |
7753 | tw32(MAC_HASH_REG_3, mc_filter[3]); | 7753 | tw32(MAC_HASH_REG_3, mc_filter[3]); |
7754 | } | 7754 | } |
7755 | 7755 | ||
7756 | if (rx_mode != tp->rx_mode) { | 7756 | if (rx_mode != tp->rx_mode) { |
7757 | tp->rx_mode = rx_mode; | 7757 | tp->rx_mode = rx_mode; |
7758 | tw32_f(MAC_RX_MODE, rx_mode); | 7758 | tw32_f(MAC_RX_MODE, rx_mode); |
7759 | udelay(10); | 7759 | udelay(10); |
7760 | } | 7760 | } |
7761 | } | 7761 | } |
7762 | 7762 | ||
7763 | static void tg3_set_rx_mode(struct net_device *dev) | 7763 | static void tg3_set_rx_mode(struct net_device *dev) |
7764 | { | 7764 | { |
7765 | struct tg3 *tp = netdev_priv(dev); | 7765 | struct tg3 *tp = netdev_priv(dev); |
7766 | 7766 | ||
7767 | if (!netif_running(dev)) | 7767 | if (!netif_running(dev)) |
7768 | return; | 7768 | return; |
7769 | 7769 | ||
7770 | tg3_full_lock(tp, 0); | 7770 | tg3_full_lock(tp, 0); |
7771 | __tg3_set_rx_mode(dev); | 7771 | __tg3_set_rx_mode(dev); |
7772 | tg3_full_unlock(tp); | 7772 | tg3_full_unlock(tp); |
7773 | } | 7773 | } |
7774 | 7774 | ||
7775 | #define TG3_REGDUMP_LEN (32 * 1024) | 7775 | #define TG3_REGDUMP_LEN (32 * 1024) |
7776 | 7776 | ||
7777 | static int tg3_get_regs_len(struct net_device *dev) | 7777 | static int tg3_get_regs_len(struct net_device *dev) |
7778 | { | 7778 | { |
7779 | return TG3_REGDUMP_LEN; | 7779 | return TG3_REGDUMP_LEN; |
7780 | } | 7780 | } |
7781 | 7781 | ||
7782 | static void tg3_get_regs(struct net_device *dev, | 7782 | static void tg3_get_regs(struct net_device *dev, |
7783 | struct ethtool_regs *regs, void *_p) | 7783 | struct ethtool_regs *regs, void *_p) |
7784 | { | 7784 | { |
7785 | u32 *p = _p; | 7785 | u32 *p = _p; |
7786 | struct tg3 *tp = netdev_priv(dev); | 7786 | struct tg3 *tp = netdev_priv(dev); |
7787 | u8 *orig_p = _p; | 7787 | u8 *orig_p = _p; |
7788 | int i; | 7788 | int i; |
7789 | 7789 | ||
7790 | regs->version = 0; | 7790 | regs->version = 0; |
7791 | 7791 | ||
7792 | memset(p, 0, TG3_REGDUMP_LEN); | 7792 | memset(p, 0, TG3_REGDUMP_LEN); |
7793 | 7793 | ||
7794 | if (tp->link_config.phy_is_low_power) | 7794 | if (tp->link_config.phy_is_low_power) |
7795 | return; | 7795 | return; |
7796 | 7796 | ||
7797 | tg3_full_lock(tp, 0); | 7797 | tg3_full_lock(tp, 0); |
7798 | 7798 | ||
7799 | #define __GET_REG32(reg) (*(p)++ = tr32(reg)) | 7799 | #define __GET_REG32(reg) (*(p)++ = tr32(reg)) |
7800 | #define GET_REG32_LOOP(base,len) \ | 7800 | #define GET_REG32_LOOP(base,len) \ |
7801 | do { p = (u32 *)(orig_p + (base)); \ | 7801 | do { p = (u32 *)(orig_p + (base)); \ |
7802 | for (i = 0; i < len; i += 4) \ | 7802 | for (i = 0; i < len; i += 4) \ |
7803 | __GET_REG32((base) + i); \ | 7803 | __GET_REG32((base) + i); \ |
7804 | } while (0) | 7804 | } while (0) |
7805 | #define GET_REG32_1(reg) \ | 7805 | #define GET_REG32_1(reg) \ |
7806 | do { p = (u32 *)(orig_p + (reg)); \ | 7806 | do { p = (u32 *)(orig_p + (reg)); \ |
7807 | __GET_REG32((reg)); \ | 7807 | __GET_REG32((reg)); \ |
7808 | } while (0) | 7808 | } while (0) |
7809 | 7809 | ||
7810 | GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0); | 7810 | GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0); |
7811 | GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200); | 7811 | GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200); |
7812 | GET_REG32_LOOP(MAC_MODE, 0x4f0); | 7812 | GET_REG32_LOOP(MAC_MODE, 0x4f0); |
7813 | GET_REG32_LOOP(SNDDATAI_MODE, 0xe0); | 7813 | GET_REG32_LOOP(SNDDATAI_MODE, 0xe0); |
7814 | GET_REG32_1(SNDDATAC_MODE); | 7814 | GET_REG32_1(SNDDATAC_MODE); |
7815 | GET_REG32_LOOP(SNDBDS_MODE, 0x80); | 7815 | GET_REG32_LOOP(SNDBDS_MODE, 0x80); |
7816 | GET_REG32_LOOP(SNDBDI_MODE, 0x48); | 7816 | GET_REG32_LOOP(SNDBDI_MODE, 0x48); |
7817 | GET_REG32_1(SNDBDC_MODE); | 7817 | GET_REG32_1(SNDBDC_MODE); |
7818 | GET_REG32_LOOP(RCVLPC_MODE, 0x20); | 7818 | GET_REG32_LOOP(RCVLPC_MODE, 0x20); |
7819 | GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c); | 7819 | GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c); |
7820 | GET_REG32_LOOP(RCVDBDI_MODE, 0x0c); | 7820 | GET_REG32_LOOP(RCVDBDI_MODE, 0x0c); |
7821 | GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c); | 7821 | GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c); |
7822 | GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44); | 7822 | GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44); |
7823 | GET_REG32_1(RCVDCC_MODE); | 7823 | GET_REG32_1(RCVDCC_MODE); |
7824 | GET_REG32_LOOP(RCVBDI_MODE, 0x20); | 7824 | GET_REG32_LOOP(RCVBDI_MODE, 0x20); |
7825 | GET_REG32_LOOP(RCVCC_MODE, 0x14); | 7825 | GET_REG32_LOOP(RCVCC_MODE, 0x14); |
7826 | GET_REG32_LOOP(RCVLSC_MODE, 0x08); | 7826 | GET_REG32_LOOP(RCVLSC_MODE, 0x08); |
7827 | GET_REG32_1(MBFREE_MODE); | 7827 | GET_REG32_1(MBFREE_MODE); |
7828 | GET_REG32_LOOP(HOSTCC_MODE, 0x100); | 7828 | GET_REG32_LOOP(HOSTCC_MODE, 0x100); |
7829 | GET_REG32_LOOP(MEMARB_MODE, 0x10); | 7829 | GET_REG32_LOOP(MEMARB_MODE, 0x10); |
7830 | GET_REG32_LOOP(BUFMGR_MODE, 0x58); | 7830 | GET_REG32_LOOP(BUFMGR_MODE, 0x58); |
7831 | GET_REG32_LOOP(RDMAC_MODE, 0x08); | 7831 | GET_REG32_LOOP(RDMAC_MODE, 0x08); |
7832 | GET_REG32_LOOP(WDMAC_MODE, 0x08); | 7832 | GET_REG32_LOOP(WDMAC_MODE, 0x08); |
7833 | GET_REG32_1(RX_CPU_MODE); | 7833 | GET_REG32_1(RX_CPU_MODE); |
7834 | GET_REG32_1(RX_CPU_STATE); | 7834 | GET_REG32_1(RX_CPU_STATE); |
7835 | GET_REG32_1(RX_CPU_PGMCTR); | 7835 | GET_REG32_1(RX_CPU_PGMCTR); |
7836 | GET_REG32_1(RX_CPU_HWBKPT); | 7836 | GET_REG32_1(RX_CPU_HWBKPT); |
7837 | GET_REG32_1(TX_CPU_MODE); | 7837 | GET_REG32_1(TX_CPU_MODE); |
7838 | GET_REG32_1(TX_CPU_STATE); | 7838 | GET_REG32_1(TX_CPU_STATE); |
7839 | GET_REG32_1(TX_CPU_PGMCTR); | 7839 | GET_REG32_1(TX_CPU_PGMCTR); |
7840 | GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110); | 7840 | GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110); |
7841 | GET_REG32_LOOP(FTQ_RESET, 0x120); | 7841 | GET_REG32_LOOP(FTQ_RESET, 0x120); |
7842 | GET_REG32_LOOP(MSGINT_MODE, 0x0c); | 7842 | GET_REG32_LOOP(MSGINT_MODE, 0x0c); |
7843 | GET_REG32_1(DMAC_MODE); | 7843 | GET_REG32_1(DMAC_MODE); |
7844 | GET_REG32_LOOP(GRC_MODE, 0x4c); | 7844 | GET_REG32_LOOP(GRC_MODE, 0x4c); |
7845 | if (tp->tg3_flags & TG3_FLAG_NVRAM) | 7845 | if (tp->tg3_flags & TG3_FLAG_NVRAM) |
7846 | GET_REG32_LOOP(NVRAM_CMD, 0x24); | 7846 | GET_REG32_LOOP(NVRAM_CMD, 0x24); |
7847 | 7847 | ||
7848 | #undef __GET_REG32 | 7848 | #undef __GET_REG32 |
7849 | #undef GET_REG32_LOOP | 7849 | #undef GET_REG32_LOOP |
7850 | #undef GET_REG32_1 | 7850 | #undef GET_REG32_1 |
7851 | 7851 | ||
7852 | tg3_full_unlock(tp); | 7852 | tg3_full_unlock(tp); |
7853 | } | 7853 | } |
7854 | 7854 | ||
7855 | static int tg3_get_eeprom_len(struct net_device *dev) | 7855 | static int tg3_get_eeprom_len(struct net_device *dev) |
7856 | { | 7856 | { |
7857 | struct tg3 *tp = netdev_priv(dev); | 7857 | struct tg3 *tp = netdev_priv(dev); |
7858 | 7858 | ||
7859 | return tp->nvram_size; | 7859 | return tp->nvram_size; |
7860 | } | 7860 | } |
7861 | 7861 | ||
7862 | static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val); | 7862 | static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val); |
7863 | static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val); | 7863 | static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val); |
7864 | 7864 | ||
7865 | static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) | 7865 | static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) |
7866 | { | 7866 | { |
7867 | struct tg3 *tp = netdev_priv(dev); | 7867 | struct tg3 *tp = netdev_priv(dev); |
7868 | int ret; | 7868 | int ret; |
7869 | u8 *pd; | 7869 | u8 *pd; |
7870 | u32 i, offset, len, val, b_offset, b_count; | 7870 | u32 i, offset, len, val, b_offset, b_count; |
7871 | 7871 | ||
7872 | if (tp->link_config.phy_is_low_power) | 7872 | if (tp->link_config.phy_is_low_power) |
7873 | return -EAGAIN; | 7873 | return -EAGAIN; |
7874 | 7874 | ||
7875 | offset = eeprom->offset; | 7875 | offset = eeprom->offset; |
7876 | len = eeprom->len; | 7876 | len = eeprom->len; |
7877 | eeprom->len = 0; | 7877 | eeprom->len = 0; |
7878 | 7878 | ||
7879 | eeprom->magic = TG3_EEPROM_MAGIC; | 7879 | eeprom->magic = TG3_EEPROM_MAGIC; |
7880 | 7880 | ||
7881 | if (offset & 3) { | 7881 | if (offset & 3) { |
7882 | /* adjustments to start on required 4 byte boundary */ | 7882 | /* adjustments to start on required 4 byte boundary */ |
7883 | b_offset = offset & 3; | 7883 | b_offset = offset & 3; |
7884 | b_count = 4 - b_offset; | 7884 | b_count = 4 - b_offset; |
7885 | if (b_count > len) { | 7885 | if (b_count > len) { |
7886 | /* i.e. offset=1 len=2 */ | 7886 | /* i.e. offset=1 len=2 */ |
7887 | b_count = len; | 7887 | b_count = len; |
7888 | } | 7888 | } |
7889 | ret = tg3_nvram_read(tp, offset-b_offset, &val); | 7889 | ret = tg3_nvram_read(tp, offset-b_offset, &val); |
7890 | if (ret) | 7890 | if (ret) |
7891 | return ret; | 7891 | return ret; |
7892 | val = cpu_to_le32(val); | 7892 | val = cpu_to_le32(val); |
7893 | memcpy(data, ((char*)&val) + b_offset, b_count); | 7893 | memcpy(data, ((char*)&val) + b_offset, b_count); |
7894 | len -= b_count; | 7894 | len -= b_count; |
7895 | offset += b_count; | 7895 | offset += b_count; |
7896 | eeprom->len += b_count; | 7896 | eeprom->len += b_count; |
7897 | } | 7897 | } |
7898 | 7898 | ||
7899 | /* read bytes upto the last 4 byte boundary */ | 7899 | /* read bytes upto the last 4 byte boundary */ |
7900 | pd = &data[eeprom->len]; | 7900 | pd = &data[eeprom->len]; |
7901 | for (i = 0; i < (len - (len & 3)); i += 4) { | 7901 | for (i = 0; i < (len - (len & 3)); i += 4) { |
7902 | ret = tg3_nvram_read(tp, offset + i, &val); | 7902 | ret = tg3_nvram_read(tp, offset + i, &val); |
7903 | if (ret) { | 7903 | if (ret) { |
7904 | eeprom->len += i; | 7904 | eeprom->len += i; |
7905 | return ret; | 7905 | return ret; |
7906 | } | 7906 | } |
7907 | val = cpu_to_le32(val); | 7907 | val = cpu_to_le32(val); |
7908 | memcpy(pd + i, &val, 4); | 7908 | memcpy(pd + i, &val, 4); |
7909 | } | 7909 | } |
7910 | eeprom->len += i; | 7910 | eeprom->len += i; |
7911 | 7911 | ||
7912 | if (len & 3) { | 7912 | if (len & 3) { |
7913 | /* read last bytes not ending on 4 byte boundary */ | 7913 | /* read last bytes not ending on 4 byte boundary */ |
7914 | pd = &data[eeprom->len]; | 7914 | pd = &data[eeprom->len]; |
7915 | b_count = len & 3; | 7915 | b_count = len & 3; |
7916 | b_offset = offset + len - b_count; | 7916 | b_offset = offset + len - b_count; |
7917 | ret = tg3_nvram_read(tp, b_offset, &val); | 7917 | ret = tg3_nvram_read(tp, b_offset, &val); |
7918 | if (ret) | 7918 | if (ret) |
7919 | return ret; | 7919 | return ret; |
7920 | val = cpu_to_le32(val); | 7920 | val = cpu_to_le32(val); |
7921 | memcpy(pd, ((char*)&val), b_count); | 7921 | memcpy(pd, ((char*)&val), b_count); |
7922 | eeprom->len += b_count; | 7922 | eeprom->len += b_count; |
7923 | } | 7923 | } |
7924 | return 0; | 7924 | return 0; |
7925 | } | 7925 | } |
7926 | 7926 | ||
7927 | static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); | 7927 | static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); |
7928 | 7928 | ||
7929 | static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) | 7929 | static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) |
7930 | { | 7930 | { |
7931 | struct tg3 *tp = netdev_priv(dev); | 7931 | struct tg3 *tp = netdev_priv(dev); |
7932 | int ret; | 7932 | int ret; |
7933 | u32 offset, len, b_offset, odd_len, start, end; | 7933 | u32 offset, len, b_offset, odd_len, start, end; |
7934 | u8 *buf; | 7934 | u8 *buf; |
7935 | 7935 | ||
7936 | if (tp->link_config.phy_is_low_power) | 7936 | if (tp->link_config.phy_is_low_power) |
7937 | return -EAGAIN; | 7937 | return -EAGAIN; |
7938 | 7938 | ||
7939 | if (eeprom->magic != TG3_EEPROM_MAGIC) | 7939 | if (eeprom->magic != TG3_EEPROM_MAGIC) |
7940 | return -EINVAL; | 7940 | return -EINVAL; |
7941 | 7941 | ||
7942 | offset = eeprom->offset; | 7942 | offset = eeprom->offset; |
7943 | len = eeprom->len; | 7943 | len = eeprom->len; |
7944 | 7944 | ||
7945 | if ((b_offset = (offset & 3))) { | 7945 | if ((b_offset = (offset & 3))) { |
7946 | /* adjustments to start on required 4 byte boundary */ | 7946 | /* adjustments to start on required 4 byte boundary */ |
7947 | ret = tg3_nvram_read(tp, offset-b_offset, &start); | 7947 | ret = tg3_nvram_read(tp, offset-b_offset, &start); |
7948 | if (ret) | 7948 | if (ret) |
7949 | return ret; | 7949 | return ret; |
7950 | start = cpu_to_le32(start); | 7950 | start = cpu_to_le32(start); |
7951 | len += b_offset; | 7951 | len += b_offset; |
7952 | offset &= ~3; | 7952 | offset &= ~3; |
7953 | if (len < 4) | 7953 | if (len < 4) |
7954 | len = 4; | 7954 | len = 4; |
7955 | } | 7955 | } |
7956 | 7956 | ||
7957 | odd_len = 0; | 7957 | odd_len = 0; |
7958 | if (len & 3) { | 7958 | if (len & 3) { |
7959 | /* adjustments to end on required 4 byte boundary */ | 7959 | /* adjustments to end on required 4 byte boundary */ |
7960 | odd_len = 1; | 7960 | odd_len = 1; |
7961 | len = (len + 3) & ~3; | 7961 | len = (len + 3) & ~3; |
7962 | ret = tg3_nvram_read(tp, offset+len-4, &end); | 7962 | ret = tg3_nvram_read(tp, offset+len-4, &end); |
7963 | if (ret) | 7963 | if (ret) |
7964 | return ret; | 7964 | return ret; |
7965 | end = cpu_to_le32(end); | 7965 | end = cpu_to_le32(end); |
7966 | } | 7966 | } |
7967 | 7967 | ||
7968 | buf = data; | 7968 | buf = data; |
7969 | if (b_offset || odd_len) { | 7969 | if (b_offset || odd_len) { |
7970 | buf = kmalloc(len, GFP_KERNEL); | 7970 | buf = kmalloc(len, GFP_KERNEL); |
7971 | if (buf == 0) | 7971 | if (buf == 0) |
7972 | return -ENOMEM; | 7972 | return -ENOMEM; |
7973 | if (b_offset) | 7973 | if (b_offset) |
7974 | memcpy(buf, &start, 4); | 7974 | memcpy(buf, &start, 4); |
7975 | if (odd_len) | 7975 | if (odd_len) |
7976 | memcpy(buf+len-4, &end, 4); | 7976 | memcpy(buf+len-4, &end, 4); |
7977 | memcpy(buf + b_offset, data, eeprom->len); | 7977 | memcpy(buf + b_offset, data, eeprom->len); |
7978 | } | 7978 | } |
7979 | 7979 | ||
7980 | ret = tg3_nvram_write_block(tp, offset, len, buf); | 7980 | ret = tg3_nvram_write_block(tp, offset, len, buf); |
7981 | 7981 | ||
7982 | if (buf != data) | 7982 | if (buf != data) |
7983 | kfree(buf); | 7983 | kfree(buf); |
7984 | 7984 | ||
7985 | return ret; | 7985 | return ret; |
7986 | } | 7986 | } |
7987 | 7987 | ||
7988 | static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 7988 | static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
7989 | { | 7989 | { |
7990 | struct tg3 *tp = netdev_priv(dev); | 7990 | struct tg3 *tp = netdev_priv(dev); |
7991 | 7991 | ||
7992 | cmd->supported = (SUPPORTED_Autoneg); | 7992 | cmd->supported = (SUPPORTED_Autoneg); |
7993 | 7993 | ||
7994 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) | 7994 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) |
7995 | cmd->supported |= (SUPPORTED_1000baseT_Half | | 7995 | cmd->supported |= (SUPPORTED_1000baseT_Half | |
7996 | SUPPORTED_1000baseT_Full); | 7996 | SUPPORTED_1000baseT_Full); |
7997 | 7997 | ||
7998 | if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { | 7998 | if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { |
7999 | cmd->supported |= (SUPPORTED_100baseT_Half | | 7999 | cmd->supported |= (SUPPORTED_100baseT_Half | |
8000 | SUPPORTED_100baseT_Full | | 8000 | SUPPORTED_100baseT_Full | |
8001 | SUPPORTED_10baseT_Half | | 8001 | SUPPORTED_10baseT_Half | |
8002 | SUPPORTED_10baseT_Full | | 8002 | SUPPORTED_10baseT_Full | |
8003 | SUPPORTED_MII); | 8003 | SUPPORTED_MII); |
8004 | cmd->port = PORT_TP; | 8004 | cmd->port = PORT_TP; |
8005 | } else { | 8005 | } else { |
8006 | cmd->supported |= SUPPORTED_FIBRE; | 8006 | cmd->supported |= SUPPORTED_FIBRE; |
8007 | cmd->port = PORT_FIBRE; | 8007 | cmd->port = PORT_FIBRE; |
8008 | } | 8008 | } |
8009 | 8009 | ||
8010 | cmd->advertising = tp->link_config.advertising; | 8010 | cmd->advertising = tp->link_config.advertising; |
8011 | if (netif_running(dev)) { | 8011 | if (netif_running(dev)) { |
8012 | cmd->speed = tp->link_config.active_speed; | 8012 | cmd->speed = tp->link_config.active_speed; |
8013 | cmd->duplex = tp->link_config.active_duplex; | 8013 | cmd->duplex = tp->link_config.active_duplex; |
8014 | } | 8014 | } |
8015 | cmd->phy_address = PHY_ADDR; | 8015 | cmd->phy_address = PHY_ADDR; |
8016 | cmd->transceiver = 0; | 8016 | cmd->transceiver = 0; |
8017 | cmd->autoneg = tp->link_config.autoneg; | 8017 | cmd->autoneg = tp->link_config.autoneg; |
8018 | cmd->maxtxpkt = 0; | 8018 | cmd->maxtxpkt = 0; |
8019 | cmd->maxrxpkt = 0; | 8019 | cmd->maxrxpkt = 0; |
8020 | return 0; | 8020 | return 0; |
8021 | } | 8021 | } |
8022 | 8022 | ||
8023 | static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 8023 | static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
8024 | { | 8024 | { |
8025 | struct tg3 *tp = netdev_priv(dev); | 8025 | struct tg3 *tp = netdev_priv(dev); |
8026 | 8026 | ||
8027 | if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { | 8027 | if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { |
8028 | /* These are the only valid advertisement bits allowed. */ | 8028 | /* These are the only valid advertisement bits allowed. */ |
8029 | if (cmd->autoneg == AUTONEG_ENABLE && | 8029 | if (cmd->autoneg == AUTONEG_ENABLE && |
8030 | (cmd->advertising & ~(ADVERTISED_1000baseT_Half | | 8030 | (cmd->advertising & ~(ADVERTISED_1000baseT_Half | |
8031 | ADVERTISED_1000baseT_Full | | 8031 | ADVERTISED_1000baseT_Full | |
8032 | ADVERTISED_Autoneg | | 8032 | ADVERTISED_Autoneg | |
8033 | ADVERTISED_FIBRE))) | 8033 | ADVERTISED_FIBRE))) |
8034 | return -EINVAL; | 8034 | return -EINVAL; |
8035 | /* Fiber can only do SPEED_1000. */ | 8035 | /* Fiber can only do SPEED_1000. */ |
8036 | else if ((cmd->autoneg != AUTONEG_ENABLE) && | 8036 | else if ((cmd->autoneg != AUTONEG_ENABLE) && |
8037 | (cmd->speed != SPEED_1000)) | 8037 | (cmd->speed != SPEED_1000)) |
8038 | return -EINVAL; | 8038 | return -EINVAL; |
8039 | /* Copper cannot force SPEED_1000. */ | 8039 | /* Copper cannot force SPEED_1000. */ |
8040 | } else if ((cmd->autoneg != AUTONEG_ENABLE) && | 8040 | } else if ((cmd->autoneg != AUTONEG_ENABLE) && |
8041 | (cmd->speed == SPEED_1000)) | 8041 | (cmd->speed == SPEED_1000)) |
8042 | return -EINVAL; | 8042 | return -EINVAL; |
8043 | else if ((cmd->speed == SPEED_1000) && | 8043 | else if ((cmd->speed == SPEED_1000) && |
8044 | (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) | 8044 | (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) |
8045 | return -EINVAL; | 8045 | return -EINVAL; |
8046 | 8046 | ||
8047 | tg3_full_lock(tp, 0); | 8047 | tg3_full_lock(tp, 0); |
8048 | 8048 | ||
8049 | tp->link_config.autoneg = cmd->autoneg; | 8049 | tp->link_config.autoneg = cmd->autoneg; |
8050 | if (cmd->autoneg == AUTONEG_ENABLE) { | 8050 | if (cmd->autoneg == AUTONEG_ENABLE) { |
8051 | tp->link_config.advertising = cmd->advertising; | 8051 | tp->link_config.advertising = cmd->advertising; |
8052 | tp->link_config.speed = SPEED_INVALID; | 8052 | tp->link_config.speed = SPEED_INVALID; |
8053 | tp->link_config.duplex = DUPLEX_INVALID; | 8053 | tp->link_config.duplex = DUPLEX_INVALID; |
8054 | } else { | 8054 | } else { |
8055 | tp->link_config.advertising = 0; | 8055 | tp->link_config.advertising = 0; |
8056 | tp->link_config.speed = cmd->speed; | 8056 | tp->link_config.speed = cmd->speed; |
8057 | tp->link_config.duplex = cmd->duplex; | 8057 | tp->link_config.duplex = cmd->duplex; |
8058 | } | 8058 | } |
8059 | 8059 | ||
8060 | tp->link_config.orig_speed = tp->link_config.speed; | 8060 | tp->link_config.orig_speed = tp->link_config.speed; |
8061 | tp->link_config.orig_duplex = tp->link_config.duplex; | 8061 | tp->link_config.orig_duplex = tp->link_config.duplex; |
8062 | tp->link_config.orig_autoneg = tp->link_config.autoneg; | 8062 | tp->link_config.orig_autoneg = tp->link_config.autoneg; |
8063 | 8063 | ||
8064 | if (netif_running(dev)) | 8064 | if (netif_running(dev)) |
8065 | tg3_setup_phy(tp, 1); | 8065 | tg3_setup_phy(tp, 1); |
8066 | 8066 | ||
8067 | tg3_full_unlock(tp); | 8067 | tg3_full_unlock(tp); |
8068 | 8068 | ||
8069 | return 0; | 8069 | return 0; |
8070 | } | 8070 | } |
8071 | 8071 | ||
8072 | static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 8072 | static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
8073 | { | 8073 | { |
8074 | struct tg3 *tp = netdev_priv(dev); | 8074 | struct tg3 *tp = netdev_priv(dev); |
8075 | 8075 | ||
8076 | strcpy(info->driver, DRV_MODULE_NAME); | 8076 | strcpy(info->driver, DRV_MODULE_NAME); |
8077 | strcpy(info->version, DRV_MODULE_VERSION); | 8077 | strcpy(info->version, DRV_MODULE_VERSION); |
8078 | strcpy(info->fw_version, tp->fw_ver); | 8078 | strcpy(info->fw_version, tp->fw_ver); |
8079 | strcpy(info->bus_info, pci_name(tp->pdev)); | 8079 | strcpy(info->bus_info, pci_name(tp->pdev)); |
8080 | } | 8080 | } |
8081 | 8081 | ||
8082 | static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 8082 | static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
8083 | { | 8083 | { |
8084 | struct tg3 *tp = netdev_priv(dev); | 8084 | struct tg3 *tp = netdev_priv(dev); |
8085 | 8085 | ||
8086 | if (tp->tg3_flags & TG3_FLAG_WOL_CAP) | 8086 | if (tp->tg3_flags & TG3_FLAG_WOL_CAP) |
8087 | wol->supported = WAKE_MAGIC; | 8087 | wol->supported = WAKE_MAGIC; |
8088 | else | 8088 | else |
8089 | wol->supported = 0; | 8089 | wol->supported = 0; |
8090 | wol->wolopts = 0; | 8090 | wol->wolopts = 0; |
8091 | if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) | 8091 | if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) |
8092 | wol->wolopts = WAKE_MAGIC; | 8092 | wol->wolopts = WAKE_MAGIC; |
8093 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | 8093 | memset(&wol->sopass, 0, sizeof(wol->sopass)); |
8094 | } | 8094 | } |
8095 | 8095 | ||
8096 | static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 8096 | static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
8097 | { | 8097 | { |
8098 | struct tg3 *tp = netdev_priv(dev); | 8098 | struct tg3 *tp = netdev_priv(dev); |
8099 | 8099 | ||
8100 | if (wol->wolopts & ~WAKE_MAGIC) | 8100 | if (wol->wolopts & ~WAKE_MAGIC) |
8101 | return -EINVAL; | 8101 | return -EINVAL; |
8102 | if ((wol->wolopts & WAKE_MAGIC) && | 8102 | if ((wol->wolopts & WAKE_MAGIC) && |
8103 | !(tp->tg3_flags & TG3_FLAG_WOL_CAP)) | 8103 | !(tp->tg3_flags & TG3_FLAG_WOL_CAP)) |
8104 | return -EINVAL; | 8104 | return -EINVAL; |
8105 | 8105 | ||
8106 | spin_lock_bh(&tp->lock); | 8106 | spin_lock_bh(&tp->lock); |
8107 | if (wol->wolopts & WAKE_MAGIC) | 8107 | if (wol->wolopts & WAKE_MAGIC) |
8108 | tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; | 8108 | tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; |
8109 | else | 8109 | else |
8110 | tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; | 8110 | tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; |
8111 | spin_unlock_bh(&tp->lock); | 8111 | spin_unlock_bh(&tp->lock); |
8112 | 8112 | ||
8113 | return 0; | 8113 | return 0; |
8114 | } | 8114 | } |
8115 | 8115 | ||
8116 | static u32 tg3_get_msglevel(struct net_device *dev) | 8116 | static u32 tg3_get_msglevel(struct net_device *dev) |
8117 | { | 8117 | { |
8118 | struct tg3 *tp = netdev_priv(dev); | 8118 | struct tg3 *tp = netdev_priv(dev); |
8119 | return tp->msg_enable; | 8119 | return tp->msg_enable; |
8120 | } | 8120 | } |
8121 | 8121 | ||
8122 | static void tg3_set_msglevel(struct net_device *dev, u32 value) | 8122 | static void tg3_set_msglevel(struct net_device *dev, u32 value) |
8123 | { | 8123 | { |
8124 | struct tg3 *tp = netdev_priv(dev); | 8124 | struct tg3 *tp = netdev_priv(dev); |
8125 | tp->msg_enable = value; | 8125 | tp->msg_enable = value; |
8126 | } | 8126 | } |
8127 | 8127 | ||
8128 | static int tg3_set_tso(struct net_device *dev, u32 value) | 8128 | static int tg3_set_tso(struct net_device *dev, u32 value) |
8129 | { | 8129 | { |
8130 | struct tg3 *tp = netdev_priv(dev); | 8130 | struct tg3 *tp = netdev_priv(dev); |
8131 | 8131 | ||
8132 | if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { | 8132 | if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { |
8133 | if (value) | 8133 | if (value) |
8134 | return -EINVAL; | 8134 | return -EINVAL; |
8135 | return 0; | 8135 | return 0; |
8136 | } | 8136 | } |
8137 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && | 8137 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && |
8138 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) { | 8138 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) { |
8139 | if (value) | 8139 | if (value) |
8140 | dev->features |= NETIF_F_TSO6; | 8140 | dev->features |= NETIF_F_TSO6; |
8141 | else | 8141 | else |
8142 | dev->features &= ~NETIF_F_TSO6; | 8142 | dev->features &= ~NETIF_F_TSO6; |
8143 | } | 8143 | } |
8144 | return ethtool_op_set_tso(dev, value); | 8144 | return ethtool_op_set_tso(dev, value); |
8145 | } | 8145 | } |
8146 | 8146 | ||
8147 | static int tg3_nway_reset(struct net_device *dev) | 8147 | static int tg3_nway_reset(struct net_device *dev) |
8148 | { | 8148 | { |
8149 | struct tg3 *tp = netdev_priv(dev); | 8149 | struct tg3 *tp = netdev_priv(dev); |
8150 | u32 bmcr; | 8150 | u32 bmcr; |
8151 | int r; | 8151 | int r; |
8152 | 8152 | ||
8153 | if (!netif_running(dev)) | 8153 | if (!netif_running(dev)) |
8154 | return -EAGAIN; | 8154 | return -EAGAIN; |
8155 | 8155 | ||
8156 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) | 8156 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) |
8157 | return -EINVAL; | 8157 | return -EINVAL; |
8158 | 8158 | ||
8159 | spin_lock_bh(&tp->lock); | 8159 | spin_lock_bh(&tp->lock); |
8160 | r = -EINVAL; | 8160 | r = -EINVAL; |
8161 | tg3_readphy(tp, MII_BMCR, &bmcr); | 8161 | tg3_readphy(tp, MII_BMCR, &bmcr); |
8162 | if (!tg3_readphy(tp, MII_BMCR, &bmcr) && | 8162 | if (!tg3_readphy(tp, MII_BMCR, &bmcr) && |
8163 | ((bmcr & BMCR_ANENABLE) || | 8163 | ((bmcr & BMCR_ANENABLE) || |
8164 | (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { | 8164 | (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { |
8165 | tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | | 8165 | tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | |
8166 | BMCR_ANENABLE); | 8166 | BMCR_ANENABLE); |
8167 | r = 0; | 8167 | r = 0; |
8168 | } | 8168 | } |
8169 | spin_unlock_bh(&tp->lock); | 8169 | spin_unlock_bh(&tp->lock); |
8170 | 8170 | ||
8171 | return r; | 8171 | return r; |
8172 | } | 8172 | } |
8173 | 8173 | ||
8174 | static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 8174 | static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) |
8175 | { | 8175 | { |
8176 | struct tg3 *tp = netdev_priv(dev); | 8176 | struct tg3 *tp = netdev_priv(dev); |
8177 | 8177 | ||
8178 | ering->rx_max_pending = TG3_RX_RING_SIZE - 1; | 8178 | ering->rx_max_pending = TG3_RX_RING_SIZE - 1; |
8179 | ering->rx_mini_max_pending = 0; | 8179 | ering->rx_mini_max_pending = 0; |
8180 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) | 8180 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) |
8181 | ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; | 8181 | ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; |
8182 | else | 8182 | else |
8183 | ering->rx_jumbo_max_pending = 0; | 8183 | ering->rx_jumbo_max_pending = 0; |
8184 | 8184 | ||
8185 | ering->tx_max_pending = TG3_TX_RING_SIZE - 1; | 8185 | ering->tx_max_pending = TG3_TX_RING_SIZE - 1; |
8186 | 8186 | ||
8187 | ering->rx_pending = tp->rx_pending; | 8187 | ering->rx_pending = tp->rx_pending; |
8188 | ering->rx_mini_pending = 0; | 8188 | ering->rx_mini_pending = 0; |
8189 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) | 8189 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) |
8190 | ering->rx_jumbo_pending = tp->rx_jumbo_pending; | 8190 | ering->rx_jumbo_pending = tp->rx_jumbo_pending; |
8191 | else | 8191 | else |
8192 | ering->rx_jumbo_pending = 0; | 8192 | ering->rx_jumbo_pending = 0; |
8193 | 8193 | ||
8194 | ering->tx_pending = tp->tx_pending; | 8194 | ering->tx_pending = tp->tx_pending; |
8195 | } | 8195 | } |
8196 | 8196 | ||
8197 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 8197 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) |
8198 | { | 8198 | { |
8199 | struct tg3 *tp = netdev_priv(dev); | 8199 | struct tg3 *tp = netdev_priv(dev); |
8200 | int irq_sync = 0, err = 0; | 8200 | int irq_sync = 0, err = 0; |
8201 | 8201 | ||
8202 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || | 8202 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || |
8203 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || | 8203 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || |
8204 | (ering->tx_pending > TG3_TX_RING_SIZE - 1) || | 8204 | (ering->tx_pending > TG3_TX_RING_SIZE - 1) || |
8205 | (ering->tx_pending <= MAX_SKB_FRAGS) || | 8205 | (ering->tx_pending <= MAX_SKB_FRAGS) || |
8206 | ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) && | 8206 | ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) && |
8207 | (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) | 8207 | (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) |
8208 | return -EINVAL; | 8208 | return -EINVAL; |
8209 | 8209 | ||
8210 | if (netif_running(dev)) { | 8210 | if (netif_running(dev)) { |
8211 | tg3_netif_stop(tp); | 8211 | tg3_netif_stop(tp); |
8212 | irq_sync = 1; | 8212 | irq_sync = 1; |
8213 | } | 8213 | } |
8214 | 8214 | ||
8215 | tg3_full_lock(tp, irq_sync); | 8215 | tg3_full_lock(tp, irq_sync); |
8216 | 8216 | ||
8217 | tp->rx_pending = ering->rx_pending; | 8217 | tp->rx_pending = ering->rx_pending; |
8218 | 8218 | ||
8219 | if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) && | 8219 | if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) && |
8220 | tp->rx_pending > 63) | 8220 | tp->rx_pending > 63) |
8221 | tp->rx_pending = 63; | 8221 | tp->rx_pending = 63; |
8222 | tp->rx_jumbo_pending = ering->rx_jumbo_pending; | 8222 | tp->rx_jumbo_pending = ering->rx_jumbo_pending; |
8223 | tp->tx_pending = ering->tx_pending; | 8223 | tp->tx_pending = ering->tx_pending; |
8224 | 8224 | ||
8225 | if (netif_running(dev)) { | 8225 | if (netif_running(dev)) { |
8226 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8226 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8227 | err = tg3_restart_hw(tp, 1); | 8227 | err = tg3_restart_hw(tp, 1); |
8228 | if (!err) | 8228 | if (!err) |
8229 | tg3_netif_start(tp); | 8229 | tg3_netif_start(tp); |
8230 | } | 8230 | } |
8231 | 8231 | ||
8232 | tg3_full_unlock(tp); | 8232 | tg3_full_unlock(tp); |
8233 | 8233 | ||
8234 | return err; | 8234 | return err; |
8235 | } | 8235 | } |
8236 | 8236 | ||
8237 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 8237 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
8238 | { | 8238 | { |
8239 | struct tg3 *tp = netdev_priv(dev); | 8239 | struct tg3 *tp = netdev_priv(dev); |
8240 | 8240 | ||
8241 | epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; | 8241 | epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; |
8242 | epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0; | 8242 | epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0; |
8243 | epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0; | 8243 | epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0; |
8244 | } | 8244 | } |
8245 | 8245 | ||
8246 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 8246 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
8247 | { | 8247 | { |
8248 | struct tg3 *tp = netdev_priv(dev); | 8248 | struct tg3 *tp = netdev_priv(dev); |
8249 | int irq_sync = 0, err = 0; | 8249 | int irq_sync = 0, err = 0; |
8250 | 8250 | ||
8251 | if (netif_running(dev)) { | 8251 | if (netif_running(dev)) { |
8252 | tg3_netif_stop(tp); | 8252 | tg3_netif_stop(tp); |
8253 | irq_sync = 1; | 8253 | irq_sync = 1; |
8254 | } | 8254 | } |
8255 | 8255 | ||
8256 | tg3_full_lock(tp, irq_sync); | 8256 | tg3_full_lock(tp, irq_sync); |
8257 | 8257 | ||
8258 | if (epause->autoneg) | 8258 | if (epause->autoneg) |
8259 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | 8259 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; |
8260 | else | 8260 | else |
8261 | tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; | 8261 | tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; |
8262 | if (epause->rx_pause) | 8262 | if (epause->rx_pause) |
8263 | tp->tg3_flags |= TG3_FLAG_RX_PAUSE; | 8263 | tp->tg3_flags |= TG3_FLAG_RX_PAUSE; |
8264 | else | 8264 | else |
8265 | tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE; | 8265 | tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE; |
8266 | if (epause->tx_pause) | 8266 | if (epause->tx_pause) |
8267 | tp->tg3_flags |= TG3_FLAG_TX_PAUSE; | 8267 | tp->tg3_flags |= TG3_FLAG_TX_PAUSE; |
8268 | else | 8268 | else |
8269 | tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; | 8269 | tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; |
8270 | 8270 | ||
8271 | if (netif_running(dev)) { | 8271 | if (netif_running(dev)) { |
8272 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8272 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8273 | err = tg3_restart_hw(tp, 1); | 8273 | err = tg3_restart_hw(tp, 1); |
8274 | if (!err) | 8274 | if (!err) |
8275 | tg3_netif_start(tp); | 8275 | tg3_netif_start(tp); |
8276 | } | 8276 | } |
8277 | 8277 | ||
8278 | tg3_full_unlock(tp); | 8278 | tg3_full_unlock(tp); |
8279 | 8279 | ||
8280 | return err; | 8280 | return err; |
8281 | } | 8281 | } |
8282 | 8282 | ||
8283 | static u32 tg3_get_rx_csum(struct net_device *dev) | 8283 | static u32 tg3_get_rx_csum(struct net_device *dev) |
8284 | { | 8284 | { |
8285 | struct tg3 *tp = netdev_priv(dev); | 8285 | struct tg3 *tp = netdev_priv(dev); |
8286 | return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0; | 8286 | return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0; |
8287 | } | 8287 | } |
8288 | 8288 | ||
8289 | static int tg3_set_rx_csum(struct net_device *dev, u32 data) | 8289 | static int tg3_set_rx_csum(struct net_device *dev, u32 data) |
8290 | { | 8290 | { |
8291 | struct tg3 *tp = netdev_priv(dev); | 8291 | struct tg3 *tp = netdev_priv(dev); |
8292 | 8292 | ||
8293 | if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { | 8293 | if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { |
8294 | if (data != 0) | 8294 | if (data != 0) |
8295 | return -EINVAL; | 8295 | return -EINVAL; |
8296 | return 0; | 8296 | return 0; |
8297 | } | 8297 | } |
8298 | 8298 | ||
8299 | spin_lock_bh(&tp->lock); | 8299 | spin_lock_bh(&tp->lock); |
8300 | if (data) | 8300 | if (data) |
8301 | tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; | 8301 | tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; |
8302 | else | 8302 | else |
8303 | tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; | 8303 | tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; |
8304 | spin_unlock_bh(&tp->lock); | 8304 | spin_unlock_bh(&tp->lock); |
8305 | 8305 | ||
8306 | return 0; | 8306 | return 0; |
8307 | } | 8307 | } |
8308 | 8308 | ||
8309 | static int tg3_set_tx_csum(struct net_device *dev, u32 data) | 8309 | static int tg3_set_tx_csum(struct net_device *dev, u32 data) |
8310 | { | 8310 | { |
8311 | struct tg3 *tp = netdev_priv(dev); | 8311 | struct tg3 *tp = netdev_priv(dev); |
8312 | 8312 | ||
8313 | if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { | 8313 | if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { |
8314 | if (data != 0) | 8314 | if (data != 0) |
8315 | return -EINVAL; | 8315 | return -EINVAL; |
8316 | return 0; | 8316 | return 0; |
8317 | } | 8317 | } |
8318 | 8318 | ||
8319 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 8319 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
8320 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) | 8320 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) |
8321 | ethtool_op_set_tx_hw_csum(dev, data); | 8321 | ethtool_op_set_tx_ipv6_csum(dev, data); |
8322 | else | 8322 | else |
8323 | ethtool_op_set_tx_csum(dev, data); | 8323 | ethtool_op_set_tx_csum(dev, data); |
8324 | 8324 | ||
8325 | return 0; | 8325 | return 0; |
8326 | } | 8326 | } |
8327 | 8327 | ||
8328 | static int tg3_get_stats_count (struct net_device *dev) | 8328 | static int tg3_get_stats_count (struct net_device *dev) |
8329 | { | 8329 | { |
8330 | return TG3_NUM_STATS; | 8330 | return TG3_NUM_STATS; |
8331 | } | 8331 | } |
8332 | 8332 | ||
8333 | static int tg3_get_test_count (struct net_device *dev) | 8333 | static int tg3_get_test_count (struct net_device *dev) |
8334 | { | 8334 | { |
8335 | return TG3_NUM_TEST; | 8335 | return TG3_NUM_TEST; |
8336 | } | 8336 | } |
8337 | 8337 | ||
8338 | static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) | 8338 | static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) |
8339 | { | 8339 | { |
8340 | switch (stringset) { | 8340 | switch (stringset) { |
8341 | case ETH_SS_STATS: | 8341 | case ETH_SS_STATS: |
8342 | memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); | 8342 | memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); |
8343 | break; | 8343 | break; |
8344 | case ETH_SS_TEST: | 8344 | case ETH_SS_TEST: |
8345 | memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); | 8345 | memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); |
8346 | break; | 8346 | break; |
8347 | default: | 8347 | default: |
8348 | WARN_ON(1); /* we need a WARN() */ | 8348 | WARN_ON(1); /* we need a WARN() */ |
8349 | break; | 8349 | break; |
8350 | } | 8350 | } |
8351 | } | 8351 | } |
8352 | 8352 | ||
8353 | static int tg3_phys_id(struct net_device *dev, u32 data) | 8353 | static int tg3_phys_id(struct net_device *dev, u32 data) |
8354 | { | 8354 | { |
8355 | struct tg3 *tp = netdev_priv(dev); | 8355 | struct tg3 *tp = netdev_priv(dev); |
8356 | int i; | 8356 | int i; |
8357 | 8357 | ||
8358 | if (!netif_running(tp->dev)) | 8358 | if (!netif_running(tp->dev)) |
8359 | return -EAGAIN; | 8359 | return -EAGAIN; |
8360 | 8360 | ||
8361 | if (data == 0) | 8361 | if (data == 0) |
8362 | data = 2; | 8362 | data = 2; |
8363 | 8363 | ||
8364 | for (i = 0; i < (data * 2); i++) { | 8364 | for (i = 0; i < (data * 2); i++) { |
8365 | if ((i % 2) == 0) | 8365 | if ((i % 2) == 0) |
8366 | tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | | 8366 | tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | |
8367 | LED_CTRL_1000MBPS_ON | | 8367 | LED_CTRL_1000MBPS_ON | |
8368 | LED_CTRL_100MBPS_ON | | 8368 | LED_CTRL_100MBPS_ON | |
8369 | LED_CTRL_10MBPS_ON | | 8369 | LED_CTRL_10MBPS_ON | |
8370 | LED_CTRL_TRAFFIC_OVERRIDE | | 8370 | LED_CTRL_TRAFFIC_OVERRIDE | |
8371 | LED_CTRL_TRAFFIC_BLINK | | 8371 | LED_CTRL_TRAFFIC_BLINK | |
8372 | LED_CTRL_TRAFFIC_LED); | 8372 | LED_CTRL_TRAFFIC_LED); |
8373 | 8373 | ||
8374 | else | 8374 | else |
8375 | tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | | 8375 | tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | |
8376 | LED_CTRL_TRAFFIC_OVERRIDE); | 8376 | LED_CTRL_TRAFFIC_OVERRIDE); |
8377 | 8377 | ||
8378 | if (msleep_interruptible(500)) | 8378 | if (msleep_interruptible(500)) |
8379 | break; | 8379 | break; |
8380 | } | 8380 | } |
8381 | tw32(MAC_LED_CTRL, tp->led_ctrl); | 8381 | tw32(MAC_LED_CTRL, tp->led_ctrl); |
8382 | return 0; | 8382 | return 0; |
8383 | } | 8383 | } |
8384 | 8384 | ||
8385 | static void tg3_get_ethtool_stats (struct net_device *dev, | 8385 | static void tg3_get_ethtool_stats (struct net_device *dev, |
8386 | struct ethtool_stats *estats, u64 *tmp_stats) | 8386 | struct ethtool_stats *estats, u64 *tmp_stats) |
8387 | { | 8387 | { |
8388 | struct tg3 *tp = netdev_priv(dev); | 8388 | struct tg3 *tp = netdev_priv(dev); |
8389 | memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); | 8389 | memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); |
8390 | } | 8390 | } |
8391 | 8391 | ||
8392 | #define NVRAM_TEST_SIZE 0x100 | 8392 | #define NVRAM_TEST_SIZE 0x100 |
8393 | #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14 | 8393 | #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14 |
8394 | #define NVRAM_SELFBOOT_HW_SIZE 0x20 | 8394 | #define NVRAM_SELFBOOT_HW_SIZE 0x20 |
8395 | #define NVRAM_SELFBOOT_DATA_SIZE 0x1c | 8395 | #define NVRAM_SELFBOOT_DATA_SIZE 0x1c |
8396 | 8396 | ||
8397 | static int tg3_test_nvram(struct tg3 *tp) | 8397 | static int tg3_test_nvram(struct tg3 *tp) |
8398 | { | 8398 | { |
8399 | u32 *buf, csum, magic; | 8399 | u32 *buf, csum, magic; |
8400 | int i, j, err = 0, size; | 8400 | int i, j, err = 0, size; |
8401 | 8401 | ||
8402 | if (tg3_nvram_read_swab(tp, 0, &magic) != 0) | 8402 | if (tg3_nvram_read_swab(tp, 0, &magic) != 0) |
8403 | return -EIO; | 8403 | return -EIO; |
8404 | 8404 | ||
8405 | if (magic == TG3_EEPROM_MAGIC) | 8405 | if (magic == TG3_EEPROM_MAGIC) |
8406 | size = NVRAM_TEST_SIZE; | 8406 | size = NVRAM_TEST_SIZE; |
8407 | else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { | 8407 | else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { |
8408 | if ((magic & 0xe00000) == 0x200000) | 8408 | if ((magic & 0xe00000) == 0x200000) |
8409 | size = NVRAM_SELFBOOT_FORMAT1_SIZE; | 8409 | size = NVRAM_SELFBOOT_FORMAT1_SIZE; |
8410 | else | 8410 | else |
8411 | return 0; | 8411 | return 0; |
8412 | } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) | 8412 | } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) |
8413 | size = NVRAM_SELFBOOT_HW_SIZE; | 8413 | size = NVRAM_SELFBOOT_HW_SIZE; |
8414 | else | 8414 | else |
8415 | return -EIO; | 8415 | return -EIO; |
8416 | 8416 | ||
8417 | buf = kmalloc(size, GFP_KERNEL); | 8417 | buf = kmalloc(size, GFP_KERNEL); |
8418 | if (buf == NULL) | 8418 | if (buf == NULL) |
8419 | return -ENOMEM; | 8419 | return -ENOMEM; |
8420 | 8420 | ||
8421 | err = -EIO; | 8421 | err = -EIO; |
8422 | for (i = 0, j = 0; i < size; i += 4, j++) { | 8422 | for (i = 0, j = 0; i < size; i += 4, j++) { |
8423 | u32 val; | 8423 | u32 val; |
8424 | 8424 | ||
8425 | if ((err = tg3_nvram_read(tp, i, &val)) != 0) | 8425 | if ((err = tg3_nvram_read(tp, i, &val)) != 0) |
8426 | break; | 8426 | break; |
8427 | buf[j] = cpu_to_le32(val); | 8427 | buf[j] = cpu_to_le32(val); |
8428 | } | 8428 | } |
8429 | if (i < size) | 8429 | if (i < size) |
8430 | goto out; | 8430 | goto out; |
8431 | 8431 | ||
8432 | /* Selfboot format */ | 8432 | /* Selfboot format */ |
8433 | if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) == | 8433 | if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) == |
8434 | TG3_EEPROM_MAGIC_FW) { | 8434 | TG3_EEPROM_MAGIC_FW) { |
8435 | u8 *buf8 = (u8 *) buf, csum8 = 0; | 8435 | u8 *buf8 = (u8 *) buf, csum8 = 0; |
8436 | 8436 | ||
8437 | for (i = 0; i < size; i++) | 8437 | for (i = 0; i < size; i++) |
8438 | csum8 += buf8[i]; | 8438 | csum8 += buf8[i]; |
8439 | 8439 | ||
8440 | if (csum8 == 0) { | 8440 | if (csum8 == 0) { |
8441 | err = 0; | 8441 | err = 0; |
8442 | goto out; | 8442 | goto out; |
8443 | } | 8443 | } |
8444 | 8444 | ||
8445 | err = -EIO; | 8445 | err = -EIO; |
8446 | goto out; | 8446 | goto out; |
8447 | } | 8447 | } |
8448 | 8448 | ||
8449 | if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) == | 8449 | if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) == |
8450 | TG3_EEPROM_MAGIC_HW) { | 8450 | TG3_EEPROM_MAGIC_HW) { |
8451 | u8 data[NVRAM_SELFBOOT_DATA_SIZE]; | 8451 | u8 data[NVRAM_SELFBOOT_DATA_SIZE]; |
8452 | u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; | 8452 | u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; |
8453 | u8 *buf8 = (u8 *) buf; | 8453 | u8 *buf8 = (u8 *) buf; |
8454 | int j, k; | 8454 | int j, k; |
8455 | 8455 | ||
8456 | /* Separate the parity bits and the data bytes. */ | 8456 | /* Separate the parity bits and the data bytes. */ |
8457 | for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { | 8457 | for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { |
8458 | if ((i == 0) || (i == 8)) { | 8458 | if ((i == 0) || (i == 8)) { |
8459 | int l; | 8459 | int l; |
8460 | u8 msk; | 8460 | u8 msk; |
8461 | 8461 | ||
8462 | for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) | 8462 | for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) |
8463 | parity[k++] = buf8[i] & msk; | 8463 | parity[k++] = buf8[i] & msk; |
8464 | i++; | 8464 | i++; |
8465 | } | 8465 | } |
8466 | else if (i == 16) { | 8466 | else if (i == 16) { |
8467 | int l; | 8467 | int l; |
8468 | u8 msk; | 8468 | u8 msk; |
8469 | 8469 | ||
8470 | for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) | 8470 | for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) |
8471 | parity[k++] = buf8[i] & msk; | 8471 | parity[k++] = buf8[i] & msk; |
8472 | i++; | 8472 | i++; |
8473 | 8473 | ||
8474 | for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) | 8474 | for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) |
8475 | parity[k++] = buf8[i] & msk; | 8475 | parity[k++] = buf8[i] & msk; |
8476 | i++; | 8476 | i++; |
8477 | } | 8477 | } |
8478 | data[j++] = buf8[i]; | 8478 | data[j++] = buf8[i]; |
8479 | } | 8479 | } |
8480 | 8480 | ||
8481 | err = -EIO; | 8481 | err = -EIO; |
8482 | for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { | 8482 | for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { |
8483 | u8 hw8 = hweight8(data[i]); | 8483 | u8 hw8 = hweight8(data[i]); |
8484 | 8484 | ||
8485 | if ((hw8 & 0x1) && parity[i]) | 8485 | if ((hw8 & 0x1) && parity[i]) |
8486 | goto out; | 8486 | goto out; |
8487 | else if (!(hw8 & 0x1) && !parity[i]) | 8487 | else if (!(hw8 & 0x1) && !parity[i]) |
8488 | goto out; | 8488 | goto out; |
8489 | } | 8489 | } |
8490 | err = 0; | 8490 | err = 0; |
8491 | goto out; | 8491 | goto out; |
8492 | } | 8492 | } |
8493 | 8493 | ||
8494 | /* Bootstrap checksum at offset 0x10 */ | 8494 | /* Bootstrap checksum at offset 0x10 */ |
8495 | csum = calc_crc((unsigned char *) buf, 0x10); | 8495 | csum = calc_crc((unsigned char *) buf, 0x10); |
8496 | if(csum != cpu_to_le32(buf[0x10/4])) | 8496 | if(csum != cpu_to_le32(buf[0x10/4])) |
8497 | goto out; | 8497 | goto out; |
8498 | 8498 | ||
8499 | /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ | 8499 | /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ |
8500 | csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); | 8500 | csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); |
8501 | if (csum != cpu_to_le32(buf[0xfc/4])) | 8501 | if (csum != cpu_to_le32(buf[0xfc/4])) |
8502 | goto out; | 8502 | goto out; |
8503 | 8503 | ||
8504 | err = 0; | 8504 | err = 0; |
8505 | 8505 | ||
8506 | out: | 8506 | out: |
8507 | kfree(buf); | 8507 | kfree(buf); |
8508 | return err; | 8508 | return err; |
8509 | } | 8509 | } |
8510 | 8510 | ||
8511 | #define TG3_SERDES_TIMEOUT_SEC 2 | 8511 | #define TG3_SERDES_TIMEOUT_SEC 2 |
8512 | #define TG3_COPPER_TIMEOUT_SEC 6 | 8512 | #define TG3_COPPER_TIMEOUT_SEC 6 |
8513 | 8513 | ||
8514 | static int tg3_test_link(struct tg3 *tp) | 8514 | static int tg3_test_link(struct tg3 *tp) |
8515 | { | 8515 | { |
8516 | int i, max; | 8516 | int i, max; |
8517 | 8517 | ||
8518 | if (!netif_running(tp->dev)) | 8518 | if (!netif_running(tp->dev)) |
8519 | return -ENODEV; | 8519 | return -ENODEV; |
8520 | 8520 | ||
8521 | if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) | 8521 | if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) |
8522 | max = TG3_SERDES_TIMEOUT_SEC; | 8522 | max = TG3_SERDES_TIMEOUT_SEC; |
8523 | else | 8523 | else |
8524 | max = TG3_COPPER_TIMEOUT_SEC; | 8524 | max = TG3_COPPER_TIMEOUT_SEC; |
8525 | 8525 | ||
8526 | for (i = 0; i < max; i++) { | 8526 | for (i = 0; i < max; i++) { |
8527 | if (netif_carrier_ok(tp->dev)) | 8527 | if (netif_carrier_ok(tp->dev)) |
8528 | return 0; | 8528 | return 0; |
8529 | 8529 | ||
8530 | if (msleep_interruptible(1000)) | 8530 | if (msleep_interruptible(1000)) |
8531 | break; | 8531 | break; |
8532 | } | 8532 | } |
8533 | 8533 | ||
8534 | return -EIO; | 8534 | return -EIO; |
8535 | } | 8535 | } |
8536 | 8536 | ||
8537 | /* Only test the commonly used registers */ | 8537 | /* Only test the commonly used registers */ |
8538 | static int tg3_test_registers(struct tg3 *tp) | 8538 | static int tg3_test_registers(struct tg3 *tp) |
8539 | { | 8539 | { |
8540 | int i, is_5705, is_5750; | 8540 | int i, is_5705, is_5750; |
8541 | u32 offset, read_mask, write_mask, val, save_val, read_val; | 8541 | u32 offset, read_mask, write_mask, val, save_val, read_val; |
8542 | static struct { | 8542 | static struct { |
8543 | u16 offset; | 8543 | u16 offset; |
8544 | u16 flags; | 8544 | u16 flags; |
8545 | #define TG3_FL_5705 0x1 | 8545 | #define TG3_FL_5705 0x1 |
8546 | #define TG3_FL_NOT_5705 0x2 | 8546 | #define TG3_FL_NOT_5705 0x2 |
8547 | #define TG3_FL_NOT_5788 0x4 | 8547 | #define TG3_FL_NOT_5788 0x4 |
8548 | #define TG3_FL_NOT_5750 0x8 | 8548 | #define TG3_FL_NOT_5750 0x8 |
8549 | u32 read_mask; | 8549 | u32 read_mask; |
8550 | u32 write_mask; | 8550 | u32 write_mask; |
8551 | } reg_tbl[] = { | 8551 | } reg_tbl[] = { |
8552 | /* MAC Control Registers */ | 8552 | /* MAC Control Registers */ |
8553 | { MAC_MODE, TG3_FL_NOT_5705, | 8553 | { MAC_MODE, TG3_FL_NOT_5705, |
8554 | 0x00000000, 0x00ef6f8c }, | 8554 | 0x00000000, 0x00ef6f8c }, |
8555 | { MAC_MODE, TG3_FL_5705, | 8555 | { MAC_MODE, TG3_FL_5705, |
8556 | 0x00000000, 0x01ef6b8c }, | 8556 | 0x00000000, 0x01ef6b8c }, |
8557 | { MAC_STATUS, TG3_FL_NOT_5705, | 8557 | { MAC_STATUS, TG3_FL_NOT_5705, |
8558 | 0x03800107, 0x00000000 }, | 8558 | 0x03800107, 0x00000000 }, |
8559 | { MAC_STATUS, TG3_FL_5705, | 8559 | { MAC_STATUS, TG3_FL_5705, |
8560 | 0x03800100, 0x00000000 }, | 8560 | 0x03800100, 0x00000000 }, |
8561 | { MAC_ADDR_0_HIGH, 0x0000, | 8561 | { MAC_ADDR_0_HIGH, 0x0000, |
8562 | 0x00000000, 0x0000ffff }, | 8562 | 0x00000000, 0x0000ffff }, |
8563 | { MAC_ADDR_0_LOW, 0x0000, | 8563 | { MAC_ADDR_0_LOW, 0x0000, |
8564 | 0x00000000, 0xffffffff }, | 8564 | 0x00000000, 0xffffffff }, |
8565 | { MAC_RX_MTU_SIZE, 0x0000, | 8565 | { MAC_RX_MTU_SIZE, 0x0000, |
8566 | 0x00000000, 0x0000ffff }, | 8566 | 0x00000000, 0x0000ffff }, |
8567 | { MAC_TX_MODE, 0x0000, | 8567 | { MAC_TX_MODE, 0x0000, |
8568 | 0x00000000, 0x00000070 }, | 8568 | 0x00000000, 0x00000070 }, |
8569 | { MAC_TX_LENGTHS, 0x0000, | 8569 | { MAC_TX_LENGTHS, 0x0000, |
8570 | 0x00000000, 0x00003fff }, | 8570 | 0x00000000, 0x00003fff }, |
8571 | { MAC_RX_MODE, TG3_FL_NOT_5705, | 8571 | { MAC_RX_MODE, TG3_FL_NOT_5705, |
8572 | 0x00000000, 0x000007fc }, | 8572 | 0x00000000, 0x000007fc }, |
8573 | { MAC_RX_MODE, TG3_FL_5705, | 8573 | { MAC_RX_MODE, TG3_FL_5705, |
8574 | 0x00000000, 0x000007dc }, | 8574 | 0x00000000, 0x000007dc }, |
8575 | { MAC_HASH_REG_0, 0x0000, | 8575 | { MAC_HASH_REG_0, 0x0000, |
8576 | 0x00000000, 0xffffffff }, | 8576 | 0x00000000, 0xffffffff }, |
8577 | { MAC_HASH_REG_1, 0x0000, | 8577 | { MAC_HASH_REG_1, 0x0000, |
8578 | 0x00000000, 0xffffffff }, | 8578 | 0x00000000, 0xffffffff }, |
8579 | { MAC_HASH_REG_2, 0x0000, | 8579 | { MAC_HASH_REG_2, 0x0000, |
8580 | 0x00000000, 0xffffffff }, | 8580 | 0x00000000, 0xffffffff }, |
8581 | { MAC_HASH_REG_3, 0x0000, | 8581 | { MAC_HASH_REG_3, 0x0000, |
8582 | 0x00000000, 0xffffffff }, | 8582 | 0x00000000, 0xffffffff }, |
8583 | 8583 | ||
8584 | /* Receive Data and Receive BD Initiator Control Registers. */ | 8584 | /* Receive Data and Receive BD Initiator Control Registers. */ |
8585 | { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, | 8585 | { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, |
8586 | 0x00000000, 0xffffffff }, | 8586 | 0x00000000, 0xffffffff }, |
8587 | { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, | 8587 | { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, |
8588 | 0x00000000, 0xffffffff }, | 8588 | 0x00000000, 0xffffffff }, |
8589 | { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, | 8589 | { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, |
8590 | 0x00000000, 0x00000003 }, | 8590 | 0x00000000, 0x00000003 }, |
8591 | { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, | 8591 | { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, |
8592 | 0x00000000, 0xffffffff }, | 8592 | 0x00000000, 0xffffffff }, |
8593 | { RCVDBDI_STD_BD+0, 0x0000, | 8593 | { RCVDBDI_STD_BD+0, 0x0000, |
8594 | 0x00000000, 0xffffffff }, | 8594 | 0x00000000, 0xffffffff }, |
8595 | { RCVDBDI_STD_BD+4, 0x0000, | 8595 | { RCVDBDI_STD_BD+4, 0x0000, |
8596 | 0x00000000, 0xffffffff }, | 8596 | 0x00000000, 0xffffffff }, |
8597 | { RCVDBDI_STD_BD+8, 0x0000, | 8597 | { RCVDBDI_STD_BD+8, 0x0000, |
8598 | 0x00000000, 0xffff0002 }, | 8598 | 0x00000000, 0xffff0002 }, |
8599 | { RCVDBDI_STD_BD+0xc, 0x0000, | 8599 | { RCVDBDI_STD_BD+0xc, 0x0000, |
8600 | 0x00000000, 0xffffffff }, | 8600 | 0x00000000, 0xffffffff }, |
8601 | 8601 | ||
8602 | /* Receive BD Initiator Control Registers. */ | 8602 | /* Receive BD Initiator Control Registers. */ |
8603 | { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, | 8603 | { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, |
8604 | 0x00000000, 0xffffffff }, | 8604 | 0x00000000, 0xffffffff }, |
8605 | { RCVBDI_STD_THRESH, TG3_FL_5705, | 8605 | { RCVBDI_STD_THRESH, TG3_FL_5705, |
8606 | 0x00000000, 0x000003ff }, | 8606 | 0x00000000, 0x000003ff }, |
8607 | { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, | 8607 | { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, |
8608 | 0x00000000, 0xffffffff }, | 8608 | 0x00000000, 0xffffffff }, |
8609 | 8609 | ||
8610 | /* Host Coalescing Control Registers. */ | 8610 | /* Host Coalescing Control Registers. */ |
8611 | { HOSTCC_MODE, TG3_FL_NOT_5705, | 8611 | { HOSTCC_MODE, TG3_FL_NOT_5705, |
8612 | 0x00000000, 0x00000004 }, | 8612 | 0x00000000, 0x00000004 }, |
8613 | { HOSTCC_MODE, TG3_FL_5705, | 8613 | { HOSTCC_MODE, TG3_FL_5705, |
8614 | 0x00000000, 0x000000f6 }, | 8614 | 0x00000000, 0x000000f6 }, |
8615 | { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, | 8615 | { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, |
8616 | 0x00000000, 0xffffffff }, | 8616 | 0x00000000, 0xffffffff }, |
8617 | { HOSTCC_RXCOL_TICKS, TG3_FL_5705, | 8617 | { HOSTCC_RXCOL_TICKS, TG3_FL_5705, |
8618 | 0x00000000, 0x000003ff }, | 8618 | 0x00000000, 0x000003ff }, |
8619 | { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, | 8619 | { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, |
8620 | 0x00000000, 0xffffffff }, | 8620 | 0x00000000, 0xffffffff }, |
8621 | { HOSTCC_TXCOL_TICKS, TG3_FL_5705, | 8621 | { HOSTCC_TXCOL_TICKS, TG3_FL_5705, |
8622 | 0x00000000, 0x000003ff }, | 8622 | 0x00000000, 0x000003ff }, |
8623 | { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, | 8623 | { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, |
8624 | 0x00000000, 0xffffffff }, | 8624 | 0x00000000, 0xffffffff }, |
8625 | { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, | 8625 | { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, |
8626 | 0x00000000, 0x000000ff }, | 8626 | 0x00000000, 0x000000ff }, |
8627 | { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, | 8627 | { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, |
8628 | 0x00000000, 0xffffffff }, | 8628 | 0x00000000, 0xffffffff }, |
8629 | { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, | 8629 | { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, |
8630 | 0x00000000, 0x000000ff }, | 8630 | 0x00000000, 0x000000ff }, |
8631 | { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, | 8631 | { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, |
8632 | 0x00000000, 0xffffffff }, | 8632 | 0x00000000, 0xffffffff }, |
8633 | { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, | 8633 | { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, |
8634 | 0x00000000, 0xffffffff }, | 8634 | 0x00000000, 0xffffffff }, |
8635 | { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, | 8635 | { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, |
8636 | 0x00000000, 0xffffffff }, | 8636 | 0x00000000, 0xffffffff }, |
8637 | { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, | 8637 | { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, |
8638 | 0x00000000, 0x000000ff }, | 8638 | 0x00000000, 0x000000ff }, |
8639 | { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, | 8639 | { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, |
8640 | 0x00000000, 0xffffffff }, | 8640 | 0x00000000, 0xffffffff }, |
8641 | { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, | 8641 | { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, |
8642 | 0x00000000, 0x000000ff }, | 8642 | 0x00000000, 0x000000ff }, |
8643 | { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, | 8643 | { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, |
8644 | 0x00000000, 0xffffffff }, | 8644 | 0x00000000, 0xffffffff }, |
8645 | { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, | 8645 | { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, |
8646 | 0x00000000, 0xffffffff }, | 8646 | 0x00000000, 0xffffffff }, |
8647 | { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, | 8647 | { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, |
8648 | 0x00000000, 0xffffffff }, | 8648 | 0x00000000, 0xffffffff }, |
8649 | { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, | 8649 | { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, |
8650 | 0x00000000, 0xffffffff }, | 8650 | 0x00000000, 0xffffffff }, |
8651 | { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, | 8651 | { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, |
8652 | 0x00000000, 0xffffffff }, | 8652 | 0x00000000, 0xffffffff }, |
8653 | { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, | 8653 | { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, |
8654 | 0xffffffff, 0x00000000 }, | 8654 | 0xffffffff, 0x00000000 }, |
8655 | { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, | 8655 | { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, |
8656 | 0xffffffff, 0x00000000 }, | 8656 | 0xffffffff, 0x00000000 }, |
8657 | 8657 | ||
8658 | /* Buffer Manager Control Registers. */ | 8658 | /* Buffer Manager Control Registers. */ |
8659 | { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, | 8659 | { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, |
8660 | 0x00000000, 0x007fff80 }, | 8660 | 0x00000000, 0x007fff80 }, |
8661 | { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, | 8661 | { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, |
8662 | 0x00000000, 0x007fffff }, | 8662 | 0x00000000, 0x007fffff }, |
8663 | { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, | 8663 | { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, |
8664 | 0x00000000, 0x0000003f }, | 8664 | 0x00000000, 0x0000003f }, |
8665 | { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, | 8665 | { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, |
8666 | 0x00000000, 0x000001ff }, | 8666 | 0x00000000, 0x000001ff }, |
8667 | { BUFMGR_MB_HIGH_WATER, 0x0000, | 8667 | { BUFMGR_MB_HIGH_WATER, 0x0000, |
8668 | 0x00000000, 0x000001ff }, | 8668 | 0x00000000, 0x000001ff }, |
8669 | { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, | 8669 | { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, |
8670 | 0xffffffff, 0x00000000 }, | 8670 | 0xffffffff, 0x00000000 }, |
8671 | { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, | 8671 | { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, |
8672 | 0xffffffff, 0x00000000 }, | 8672 | 0xffffffff, 0x00000000 }, |
8673 | 8673 | ||
8674 | /* Mailbox Registers */ | 8674 | /* Mailbox Registers */ |
8675 | { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, | 8675 | { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, |
8676 | 0x00000000, 0x000001ff }, | 8676 | 0x00000000, 0x000001ff }, |
8677 | { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, | 8677 | { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, |
8678 | 0x00000000, 0x000001ff }, | 8678 | 0x00000000, 0x000001ff }, |
8679 | { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, | 8679 | { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, |
8680 | 0x00000000, 0x000007ff }, | 8680 | 0x00000000, 0x000007ff }, |
8681 | { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, | 8681 | { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, |
8682 | 0x00000000, 0x000001ff }, | 8682 | 0x00000000, 0x000001ff }, |
8683 | 8683 | ||
8684 | { 0xffff, 0x0000, 0x00000000, 0x00000000 }, | 8684 | { 0xffff, 0x0000, 0x00000000, 0x00000000 }, |
8685 | }; | 8685 | }; |
8686 | 8686 | ||
8687 | is_5705 = is_5750 = 0; | 8687 | is_5705 = is_5750 = 0; |
8688 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 8688 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
8689 | is_5705 = 1; | 8689 | is_5705 = 1; |
8690 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 8690 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) |
8691 | is_5750 = 1; | 8691 | is_5750 = 1; |
8692 | } | 8692 | } |
8693 | 8693 | ||
8694 | for (i = 0; reg_tbl[i].offset != 0xffff; i++) { | 8694 | for (i = 0; reg_tbl[i].offset != 0xffff; i++) { |
8695 | if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) | 8695 | if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) |
8696 | continue; | 8696 | continue; |
8697 | 8697 | ||
8698 | if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) | 8698 | if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) |
8699 | continue; | 8699 | continue; |
8700 | 8700 | ||
8701 | if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && | 8701 | if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && |
8702 | (reg_tbl[i].flags & TG3_FL_NOT_5788)) | 8702 | (reg_tbl[i].flags & TG3_FL_NOT_5788)) |
8703 | continue; | 8703 | continue; |
8704 | 8704 | ||
8705 | if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) | 8705 | if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) |
8706 | continue; | 8706 | continue; |
8707 | 8707 | ||
8708 | offset = (u32) reg_tbl[i].offset; | 8708 | offset = (u32) reg_tbl[i].offset; |
8709 | read_mask = reg_tbl[i].read_mask; | 8709 | read_mask = reg_tbl[i].read_mask; |
8710 | write_mask = reg_tbl[i].write_mask; | 8710 | write_mask = reg_tbl[i].write_mask; |
8711 | 8711 | ||
8712 | /* Save the original register content */ | 8712 | /* Save the original register content */ |
8713 | save_val = tr32(offset); | 8713 | save_val = tr32(offset); |
8714 | 8714 | ||
8715 | /* Determine the read-only value. */ | 8715 | /* Determine the read-only value. */ |
8716 | read_val = save_val & read_mask; | 8716 | read_val = save_val & read_mask; |
8717 | 8717 | ||
8718 | /* Write zero to the register, then make sure the read-only bits | 8718 | /* Write zero to the register, then make sure the read-only bits |
8719 | * are not changed and the read/write bits are all zeros. | 8719 | * are not changed and the read/write bits are all zeros. |
8720 | */ | 8720 | */ |
8721 | tw32(offset, 0); | 8721 | tw32(offset, 0); |
8722 | 8722 | ||
8723 | val = tr32(offset); | 8723 | val = tr32(offset); |
8724 | 8724 | ||
8725 | /* Test the read-only and read/write bits. */ | 8725 | /* Test the read-only and read/write bits. */ |
8726 | if (((val & read_mask) != read_val) || (val & write_mask)) | 8726 | if (((val & read_mask) != read_val) || (val & write_mask)) |
8727 | goto out; | 8727 | goto out; |
8728 | 8728 | ||
8729 | /* Write ones to all the bits defined by RdMask and WrMask, then | 8729 | /* Write ones to all the bits defined by RdMask and WrMask, then |
8730 | * make sure the read-only bits are not changed and the | 8730 | * make sure the read-only bits are not changed and the |
8731 | * read/write bits are all ones. | 8731 | * read/write bits are all ones. |
8732 | */ | 8732 | */ |
8733 | tw32(offset, read_mask | write_mask); | 8733 | tw32(offset, read_mask | write_mask); |
8734 | 8734 | ||
8735 | val = tr32(offset); | 8735 | val = tr32(offset); |
8736 | 8736 | ||
8737 | /* Test the read-only bits. */ | 8737 | /* Test the read-only bits. */ |
8738 | if ((val & read_mask) != read_val) | 8738 | if ((val & read_mask) != read_val) |
8739 | goto out; | 8739 | goto out; |
8740 | 8740 | ||
8741 | /* Test the read/write bits. */ | 8741 | /* Test the read/write bits. */ |
8742 | if ((val & write_mask) != write_mask) | 8742 | if ((val & write_mask) != write_mask) |
8743 | goto out; | 8743 | goto out; |
8744 | 8744 | ||
8745 | tw32(offset, save_val); | 8745 | tw32(offset, save_val); |
8746 | } | 8746 | } |
8747 | 8747 | ||
8748 | return 0; | 8748 | return 0; |
8749 | 8749 | ||
8750 | out: | 8750 | out: |
8751 | if (netif_msg_hw(tp)) | 8751 | if (netif_msg_hw(tp)) |
8752 | printk(KERN_ERR PFX "Register test failed at offset %x\n", | 8752 | printk(KERN_ERR PFX "Register test failed at offset %x\n", |
8753 | offset); | 8753 | offset); |
8754 | tw32(offset, save_val); | 8754 | tw32(offset, save_val); |
8755 | return -EIO; | 8755 | return -EIO; |
8756 | } | 8756 | } |
8757 | 8757 | ||
8758 | static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) | 8758 | static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) |
8759 | { | 8759 | { |
8760 | static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; | 8760 | static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; |
8761 | int i; | 8761 | int i; |
8762 | u32 j; | 8762 | u32 j; |
8763 | 8763 | ||
8764 | for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) { | 8764 | for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) { |
8765 | for (j = 0; j < len; j += 4) { | 8765 | for (j = 0; j < len; j += 4) { |
8766 | u32 val; | 8766 | u32 val; |
8767 | 8767 | ||
8768 | tg3_write_mem(tp, offset + j, test_pattern[i]); | 8768 | tg3_write_mem(tp, offset + j, test_pattern[i]); |
8769 | tg3_read_mem(tp, offset + j, &val); | 8769 | tg3_read_mem(tp, offset + j, &val); |
8770 | if (val != test_pattern[i]) | 8770 | if (val != test_pattern[i]) |
8771 | return -EIO; | 8771 | return -EIO; |
8772 | } | 8772 | } |
8773 | } | 8773 | } |
8774 | return 0; | 8774 | return 0; |
8775 | } | 8775 | } |
8776 | 8776 | ||
8777 | static int tg3_test_memory(struct tg3 *tp) | 8777 | static int tg3_test_memory(struct tg3 *tp) |
8778 | { | 8778 | { |
8779 | static struct mem_entry { | 8779 | static struct mem_entry { |
8780 | u32 offset; | 8780 | u32 offset; |
8781 | u32 len; | 8781 | u32 len; |
8782 | } mem_tbl_570x[] = { | 8782 | } mem_tbl_570x[] = { |
8783 | { 0x00000000, 0x00b50}, | 8783 | { 0x00000000, 0x00b50}, |
8784 | { 0x00002000, 0x1c000}, | 8784 | { 0x00002000, 0x1c000}, |
8785 | { 0xffffffff, 0x00000} | 8785 | { 0xffffffff, 0x00000} |
8786 | }, mem_tbl_5705[] = { | 8786 | }, mem_tbl_5705[] = { |
8787 | { 0x00000100, 0x0000c}, | 8787 | { 0x00000100, 0x0000c}, |
8788 | { 0x00000200, 0x00008}, | 8788 | { 0x00000200, 0x00008}, |
8789 | { 0x00004000, 0x00800}, | 8789 | { 0x00004000, 0x00800}, |
8790 | { 0x00006000, 0x01000}, | 8790 | { 0x00006000, 0x01000}, |
8791 | { 0x00008000, 0x02000}, | 8791 | { 0x00008000, 0x02000}, |
8792 | { 0x00010000, 0x0e000}, | 8792 | { 0x00010000, 0x0e000}, |
8793 | { 0xffffffff, 0x00000} | 8793 | { 0xffffffff, 0x00000} |
8794 | }, mem_tbl_5755[] = { | 8794 | }, mem_tbl_5755[] = { |
8795 | { 0x00000200, 0x00008}, | 8795 | { 0x00000200, 0x00008}, |
8796 | { 0x00004000, 0x00800}, | 8796 | { 0x00004000, 0x00800}, |
8797 | { 0x00006000, 0x00800}, | 8797 | { 0x00006000, 0x00800}, |
8798 | { 0x00008000, 0x02000}, | 8798 | { 0x00008000, 0x02000}, |
8799 | { 0x00010000, 0x0c000}, | 8799 | { 0x00010000, 0x0c000}, |
8800 | { 0xffffffff, 0x00000} | 8800 | { 0xffffffff, 0x00000} |
8801 | }, mem_tbl_5906[] = { | 8801 | }, mem_tbl_5906[] = { |
8802 | { 0x00000200, 0x00008}, | 8802 | { 0x00000200, 0x00008}, |
8803 | { 0x00004000, 0x00400}, | 8803 | { 0x00004000, 0x00400}, |
8804 | { 0x00006000, 0x00400}, | 8804 | { 0x00006000, 0x00400}, |
8805 | { 0x00008000, 0x01000}, | 8805 | { 0x00008000, 0x01000}, |
8806 | { 0x00010000, 0x01000}, | 8806 | { 0x00010000, 0x01000}, |
8807 | { 0xffffffff, 0x00000} | 8807 | { 0xffffffff, 0x00000} |
8808 | }; | 8808 | }; |
8809 | struct mem_entry *mem_tbl; | 8809 | struct mem_entry *mem_tbl; |
8810 | int err = 0; | 8810 | int err = 0; |
8811 | int i; | 8811 | int i; |
8812 | 8812 | ||
8813 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 8813 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
8814 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 8814 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
8815 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) | 8815 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) |
8816 | mem_tbl = mem_tbl_5755; | 8816 | mem_tbl = mem_tbl_5755; |
8817 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 8817 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
8818 | mem_tbl = mem_tbl_5906; | 8818 | mem_tbl = mem_tbl_5906; |
8819 | else | 8819 | else |
8820 | mem_tbl = mem_tbl_5705; | 8820 | mem_tbl = mem_tbl_5705; |
8821 | } else | 8821 | } else |
8822 | mem_tbl = mem_tbl_570x; | 8822 | mem_tbl = mem_tbl_570x; |
8823 | 8823 | ||
8824 | for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { | 8824 | for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { |
8825 | if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset, | 8825 | if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset, |
8826 | mem_tbl[i].len)) != 0) | 8826 | mem_tbl[i].len)) != 0) |
8827 | break; | 8827 | break; |
8828 | } | 8828 | } |
8829 | 8829 | ||
8830 | return err; | 8830 | return err; |
8831 | } | 8831 | } |
8832 | 8832 | ||
8833 | #define TG3_MAC_LOOPBACK 0 | 8833 | #define TG3_MAC_LOOPBACK 0 |
8834 | #define TG3_PHY_LOOPBACK 1 | 8834 | #define TG3_PHY_LOOPBACK 1 |
8835 | 8835 | ||
8836 | static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | 8836 | static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) |
8837 | { | 8837 | { |
8838 | u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; | 8838 | u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; |
8839 | u32 desc_idx; | 8839 | u32 desc_idx; |
8840 | struct sk_buff *skb, *rx_skb; | 8840 | struct sk_buff *skb, *rx_skb; |
8841 | u8 *tx_data; | 8841 | u8 *tx_data; |
8842 | dma_addr_t map; | 8842 | dma_addr_t map; |
8843 | int num_pkts, tx_len, rx_len, i, err; | 8843 | int num_pkts, tx_len, rx_len, i, err; |
8844 | struct tg3_rx_buffer_desc *desc; | 8844 | struct tg3_rx_buffer_desc *desc; |
8845 | 8845 | ||
8846 | if (loopback_mode == TG3_MAC_LOOPBACK) { | 8846 | if (loopback_mode == TG3_MAC_LOOPBACK) { |
8847 | /* HW errata - mac loopback fails in some cases on 5780. | 8847 | /* HW errata - mac loopback fails in some cases on 5780. |
8848 | * Normal traffic and PHY loopback are not affected by | 8848 | * Normal traffic and PHY loopback are not affected by |
8849 | * errata. | 8849 | * errata. |
8850 | */ | 8850 | */ |
8851 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) | 8851 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) |
8852 | return 0; | 8852 | return 0; |
8853 | 8853 | ||
8854 | mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | | 8854 | mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | |
8855 | MAC_MODE_PORT_INT_LPBACK; | 8855 | MAC_MODE_PORT_INT_LPBACK; |
8856 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 8856 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
8857 | mac_mode |= MAC_MODE_LINK_POLARITY; | 8857 | mac_mode |= MAC_MODE_LINK_POLARITY; |
8858 | if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) | 8858 | if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) |
8859 | mac_mode |= MAC_MODE_PORT_MODE_MII; | 8859 | mac_mode |= MAC_MODE_PORT_MODE_MII; |
8860 | else | 8860 | else |
8861 | mac_mode |= MAC_MODE_PORT_MODE_GMII; | 8861 | mac_mode |= MAC_MODE_PORT_MODE_GMII; |
8862 | tw32(MAC_MODE, mac_mode); | 8862 | tw32(MAC_MODE, mac_mode); |
8863 | } else if (loopback_mode == TG3_PHY_LOOPBACK) { | 8863 | } else if (loopback_mode == TG3_PHY_LOOPBACK) { |
8864 | u32 val; | 8864 | u32 val; |
8865 | 8865 | ||
8866 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 8866 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
8867 | u32 phytest; | 8867 | u32 phytest; |
8868 | 8868 | ||
8869 | if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) { | 8869 | if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) { |
8870 | u32 phy; | 8870 | u32 phy; |
8871 | 8871 | ||
8872 | tg3_writephy(tp, MII_TG3_EPHY_TEST, | 8872 | tg3_writephy(tp, MII_TG3_EPHY_TEST, |
8873 | phytest | MII_TG3_EPHY_SHADOW_EN); | 8873 | phytest | MII_TG3_EPHY_SHADOW_EN); |
8874 | if (!tg3_readphy(tp, 0x1b, &phy)) | 8874 | if (!tg3_readphy(tp, 0x1b, &phy)) |
8875 | tg3_writephy(tp, 0x1b, phy & ~0x20); | 8875 | tg3_writephy(tp, 0x1b, phy & ~0x20); |
8876 | tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest); | 8876 | tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest); |
8877 | } | 8877 | } |
8878 | val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; | 8878 | val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; |
8879 | } else | 8879 | } else |
8880 | val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; | 8880 | val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; |
8881 | 8881 | ||
8882 | tg3_phy_toggle_automdix(tp, 0); | 8882 | tg3_phy_toggle_automdix(tp, 0); |
8883 | 8883 | ||
8884 | tg3_writephy(tp, MII_BMCR, val); | 8884 | tg3_writephy(tp, MII_BMCR, val); |
8885 | udelay(40); | 8885 | udelay(40); |
8886 | 8886 | ||
8887 | mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; | 8887 | mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; |
8888 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 8888 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
8889 | tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800); | 8889 | tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800); |
8890 | mac_mode |= MAC_MODE_PORT_MODE_MII; | 8890 | mac_mode |= MAC_MODE_PORT_MODE_MII; |
8891 | } else | 8891 | } else |
8892 | mac_mode |= MAC_MODE_PORT_MODE_GMII; | 8892 | mac_mode |= MAC_MODE_PORT_MODE_GMII; |
8893 | 8893 | ||
8894 | /* reset to prevent losing 1st rx packet intermittently */ | 8894 | /* reset to prevent losing 1st rx packet intermittently */ |
8895 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { | 8895 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { |
8896 | tw32_f(MAC_RX_MODE, RX_MODE_RESET); | 8896 | tw32_f(MAC_RX_MODE, RX_MODE_RESET); |
8897 | udelay(10); | 8897 | udelay(10); |
8898 | tw32_f(MAC_RX_MODE, tp->rx_mode); | 8898 | tw32_f(MAC_RX_MODE, tp->rx_mode); |
8899 | } | 8899 | } |
8900 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { | 8900 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { |
8901 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) | 8901 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) |
8902 | mac_mode &= ~MAC_MODE_LINK_POLARITY; | 8902 | mac_mode &= ~MAC_MODE_LINK_POLARITY; |
8903 | else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) | 8903 | else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) |
8904 | mac_mode |= MAC_MODE_LINK_POLARITY; | 8904 | mac_mode |= MAC_MODE_LINK_POLARITY; |
8905 | tg3_writephy(tp, MII_TG3_EXT_CTRL, | 8905 | tg3_writephy(tp, MII_TG3_EXT_CTRL, |
8906 | MII_TG3_EXT_CTRL_LNK3_LED_MODE); | 8906 | MII_TG3_EXT_CTRL_LNK3_LED_MODE); |
8907 | } | 8907 | } |
8908 | tw32(MAC_MODE, mac_mode); | 8908 | tw32(MAC_MODE, mac_mode); |
8909 | } | 8909 | } |
8910 | else | 8910 | else |
8911 | return -EINVAL; | 8911 | return -EINVAL; |
8912 | 8912 | ||
8913 | err = -EIO; | 8913 | err = -EIO; |
8914 | 8914 | ||
8915 | tx_len = 1514; | 8915 | tx_len = 1514; |
8916 | skb = netdev_alloc_skb(tp->dev, tx_len); | 8916 | skb = netdev_alloc_skb(tp->dev, tx_len); |
8917 | if (!skb) | 8917 | if (!skb) |
8918 | return -ENOMEM; | 8918 | return -ENOMEM; |
8919 | 8919 | ||
8920 | tx_data = skb_put(skb, tx_len); | 8920 | tx_data = skb_put(skb, tx_len); |
8921 | memcpy(tx_data, tp->dev->dev_addr, 6); | 8921 | memcpy(tx_data, tp->dev->dev_addr, 6); |
8922 | memset(tx_data + 6, 0x0, 8); | 8922 | memset(tx_data + 6, 0x0, 8); |
8923 | 8923 | ||
8924 | tw32(MAC_RX_MTU_SIZE, tx_len + 4); | 8924 | tw32(MAC_RX_MTU_SIZE, tx_len + 4); |
8925 | 8925 | ||
8926 | for (i = 14; i < tx_len; i++) | 8926 | for (i = 14; i < tx_len; i++) |
8927 | tx_data[i] = (u8) (i & 0xff); | 8927 | tx_data[i] = (u8) (i & 0xff); |
8928 | 8928 | ||
8929 | map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); | 8929 | map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); |
8930 | 8930 | ||
8931 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | | 8931 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | |
8932 | HOSTCC_MODE_NOW); | 8932 | HOSTCC_MODE_NOW); |
8933 | 8933 | ||
8934 | udelay(10); | 8934 | udelay(10); |
8935 | 8935 | ||
8936 | rx_start_idx = tp->hw_status->idx[0].rx_producer; | 8936 | rx_start_idx = tp->hw_status->idx[0].rx_producer; |
8937 | 8937 | ||
8938 | num_pkts = 0; | 8938 | num_pkts = 0; |
8939 | 8939 | ||
8940 | tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1); | 8940 | tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1); |
8941 | 8941 | ||
8942 | tp->tx_prod++; | 8942 | tp->tx_prod++; |
8943 | num_pkts++; | 8943 | num_pkts++; |
8944 | 8944 | ||
8945 | tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, | 8945 | tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, |
8946 | tp->tx_prod); | 8946 | tp->tx_prod); |
8947 | tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); | 8947 | tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); |
8948 | 8948 | ||
8949 | udelay(10); | 8949 | udelay(10); |
8950 | 8950 | ||
8951 | /* 250 usec to allow enough time on some 10/100 Mbps devices. */ | 8951 | /* 250 usec to allow enough time on some 10/100 Mbps devices. */ |
8952 | for (i = 0; i < 25; i++) { | 8952 | for (i = 0; i < 25; i++) { |
8953 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | | 8953 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | |
8954 | HOSTCC_MODE_NOW); | 8954 | HOSTCC_MODE_NOW); |
8955 | 8955 | ||
8956 | udelay(10); | 8956 | udelay(10); |
8957 | 8957 | ||
8958 | tx_idx = tp->hw_status->idx[0].tx_consumer; | 8958 | tx_idx = tp->hw_status->idx[0].tx_consumer; |
8959 | rx_idx = tp->hw_status->idx[0].rx_producer; | 8959 | rx_idx = tp->hw_status->idx[0].rx_producer; |
8960 | if ((tx_idx == tp->tx_prod) && | 8960 | if ((tx_idx == tp->tx_prod) && |
8961 | (rx_idx == (rx_start_idx + num_pkts))) | 8961 | (rx_idx == (rx_start_idx + num_pkts))) |
8962 | break; | 8962 | break; |
8963 | } | 8963 | } |
8964 | 8964 | ||
8965 | pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); | 8965 | pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); |
8966 | dev_kfree_skb(skb); | 8966 | dev_kfree_skb(skb); |
8967 | 8967 | ||
8968 | if (tx_idx != tp->tx_prod) | 8968 | if (tx_idx != tp->tx_prod) |
8969 | goto out; | 8969 | goto out; |
8970 | 8970 | ||
8971 | if (rx_idx != rx_start_idx + num_pkts) | 8971 | if (rx_idx != rx_start_idx + num_pkts) |
8972 | goto out; | 8972 | goto out; |
8973 | 8973 | ||
8974 | desc = &tp->rx_rcb[rx_start_idx]; | 8974 | desc = &tp->rx_rcb[rx_start_idx]; |
8975 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; | 8975 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; |
8976 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; | 8976 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; |
8977 | if (opaque_key != RXD_OPAQUE_RING_STD) | 8977 | if (opaque_key != RXD_OPAQUE_RING_STD) |
8978 | goto out; | 8978 | goto out; |
8979 | 8979 | ||
8980 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 8980 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && |
8981 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) | 8981 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) |
8982 | goto out; | 8982 | goto out; |
8983 | 8983 | ||
8984 | rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; | 8984 | rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; |
8985 | if (rx_len != tx_len) | 8985 | if (rx_len != tx_len) |
8986 | goto out; | 8986 | goto out; |
8987 | 8987 | ||
8988 | rx_skb = tp->rx_std_buffers[desc_idx].skb; | 8988 | rx_skb = tp->rx_std_buffers[desc_idx].skb; |
8989 | 8989 | ||
8990 | map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping); | 8990 | map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping); |
8991 | pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); | 8991 | pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); |
8992 | 8992 | ||
8993 | for (i = 14; i < tx_len; i++) { | 8993 | for (i = 14; i < tx_len; i++) { |
8994 | if (*(rx_skb->data + i) != (u8) (i & 0xff)) | 8994 | if (*(rx_skb->data + i) != (u8) (i & 0xff)) |
8995 | goto out; | 8995 | goto out; |
8996 | } | 8996 | } |
8997 | err = 0; | 8997 | err = 0; |
8998 | 8998 | ||
8999 | /* tg3_free_rings will unmap and free the rx_skb */ | 8999 | /* tg3_free_rings will unmap and free the rx_skb */ |
9000 | out: | 9000 | out: |
9001 | return err; | 9001 | return err; |
9002 | } | 9002 | } |
9003 | 9003 | ||
9004 | #define TG3_MAC_LOOPBACK_FAILED 1 | 9004 | #define TG3_MAC_LOOPBACK_FAILED 1 |
9005 | #define TG3_PHY_LOOPBACK_FAILED 2 | 9005 | #define TG3_PHY_LOOPBACK_FAILED 2 |
9006 | #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \ | 9006 | #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \ |
9007 | TG3_PHY_LOOPBACK_FAILED) | 9007 | TG3_PHY_LOOPBACK_FAILED) |
9008 | 9008 | ||
9009 | static int tg3_test_loopback(struct tg3 *tp) | 9009 | static int tg3_test_loopback(struct tg3 *tp) |
9010 | { | 9010 | { |
9011 | int err = 0; | 9011 | int err = 0; |
9012 | 9012 | ||
9013 | if (!netif_running(tp->dev)) | 9013 | if (!netif_running(tp->dev)) |
9014 | return TG3_LOOPBACK_FAILED; | 9014 | return TG3_LOOPBACK_FAILED; |
9015 | 9015 | ||
9016 | err = tg3_reset_hw(tp, 1); | 9016 | err = tg3_reset_hw(tp, 1); |
9017 | if (err) | 9017 | if (err) |
9018 | return TG3_LOOPBACK_FAILED; | 9018 | return TG3_LOOPBACK_FAILED; |
9019 | 9019 | ||
9020 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) | 9020 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) |
9021 | err |= TG3_MAC_LOOPBACK_FAILED; | 9021 | err |= TG3_MAC_LOOPBACK_FAILED; |
9022 | if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { | 9022 | if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { |
9023 | if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) | 9023 | if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) |
9024 | err |= TG3_PHY_LOOPBACK_FAILED; | 9024 | err |= TG3_PHY_LOOPBACK_FAILED; |
9025 | } | 9025 | } |
9026 | 9026 | ||
9027 | return err; | 9027 | return err; |
9028 | } | 9028 | } |
9029 | 9029 | ||
9030 | static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | 9030 | static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, |
9031 | u64 *data) | 9031 | u64 *data) |
9032 | { | 9032 | { |
9033 | struct tg3 *tp = netdev_priv(dev); | 9033 | struct tg3 *tp = netdev_priv(dev); |
9034 | 9034 | ||
9035 | if (tp->link_config.phy_is_low_power) | 9035 | if (tp->link_config.phy_is_low_power) |
9036 | tg3_set_power_state(tp, PCI_D0); | 9036 | tg3_set_power_state(tp, PCI_D0); |
9037 | 9037 | ||
9038 | memset(data, 0, sizeof(u64) * TG3_NUM_TEST); | 9038 | memset(data, 0, sizeof(u64) * TG3_NUM_TEST); |
9039 | 9039 | ||
9040 | if (tg3_test_nvram(tp) != 0) { | 9040 | if (tg3_test_nvram(tp) != 0) { |
9041 | etest->flags |= ETH_TEST_FL_FAILED; | 9041 | etest->flags |= ETH_TEST_FL_FAILED; |
9042 | data[0] = 1; | 9042 | data[0] = 1; |
9043 | } | 9043 | } |
9044 | if (tg3_test_link(tp) != 0) { | 9044 | if (tg3_test_link(tp) != 0) { |
9045 | etest->flags |= ETH_TEST_FL_FAILED; | 9045 | etest->flags |= ETH_TEST_FL_FAILED; |
9046 | data[1] = 1; | 9046 | data[1] = 1; |
9047 | } | 9047 | } |
9048 | if (etest->flags & ETH_TEST_FL_OFFLINE) { | 9048 | if (etest->flags & ETH_TEST_FL_OFFLINE) { |
9049 | int err, irq_sync = 0; | 9049 | int err, irq_sync = 0; |
9050 | 9050 | ||
9051 | if (netif_running(dev)) { | 9051 | if (netif_running(dev)) { |
9052 | tg3_netif_stop(tp); | 9052 | tg3_netif_stop(tp); |
9053 | irq_sync = 1; | 9053 | irq_sync = 1; |
9054 | } | 9054 | } |
9055 | 9055 | ||
9056 | tg3_full_lock(tp, irq_sync); | 9056 | tg3_full_lock(tp, irq_sync); |
9057 | 9057 | ||
9058 | tg3_halt(tp, RESET_KIND_SUSPEND, 1); | 9058 | tg3_halt(tp, RESET_KIND_SUSPEND, 1); |
9059 | err = tg3_nvram_lock(tp); | 9059 | err = tg3_nvram_lock(tp); |
9060 | tg3_halt_cpu(tp, RX_CPU_BASE); | 9060 | tg3_halt_cpu(tp, RX_CPU_BASE); |
9061 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 9061 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
9062 | tg3_halt_cpu(tp, TX_CPU_BASE); | 9062 | tg3_halt_cpu(tp, TX_CPU_BASE); |
9063 | if (!err) | 9063 | if (!err) |
9064 | tg3_nvram_unlock(tp); | 9064 | tg3_nvram_unlock(tp); |
9065 | 9065 | ||
9066 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) | 9066 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) |
9067 | tg3_phy_reset(tp); | 9067 | tg3_phy_reset(tp); |
9068 | 9068 | ||
9069 | if (tg3_test_registers(tp) != 0) { | 9069 | if (tg3_test_registers(tp) != 0) { |
9070 | etest->flags |= ETH_TEST_FL_FAILED; | 9070 | etest->flags |= ETH_TEST_FL_FAILED; |
9071 | data[2] = 1; | 9071 | data[2] = 1; |
9072 | } | 9072 | } |
9073 | if (tg3_test_memory(tp) != 0) { | 9073 | if (tg3_test_memory(tp) != 0) { |
9074 | etest->flags |= ETH_TEST_FL_FAILED; | 9074 | etest->flags |= ETH_TEST_FL_FAILED; |
9075 | data[3] = 1; | 9075 | data[3] = 1; |
9076 | } | 9076 | } |
9077 | if ((data[4] = tg3_test_loopback(tp)) != 0) | 9077 | if ((data[4] = tg3_test_loopback(tp)) != 0) |
9078 | etest->flags |= ETH_TEST_FL_FAILED; | 9078 | etest->flags |= ETH_TEST_FL_FAILED; |
9079 | 9079 | ||
9080 | tg3_full_unlock(tp); | 9080 | tg3_full_unlock(tp); |
9081 | 9081 | ||
9082 | if (tg3_test_interrupt(tp) != 0) { | 9082 | if (tg3_test_interrupt(tp) != 0) { |
9083 | etest->flags |= ETH_TEST_FL_FAILED; | 9083 | etest->flags |= ETH_TEST_FL_FAILED; |
9084 | data[5] = 1; | 9084 | data[5] = 1; |
9085 | } | 9085 | } |
9086 | 9086 | ||
9087 | tg3_full_lock(tp, 0); | 9087 | tg3_full_lock(tp, 0); |
9088 | 9088 | ||
9089 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 9089 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
9090 | if (netif_running(dev)) { | 9090 | if (netif_running(dev)) { |
9091 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 9091 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
9092 | if (!tg3_restart_hw(tp, 1)) | 9092 | if (!tg3_restart_hw(tp, 1)) |
9093 | tg3_netif_start(tp); | 9093 | tg3_netif_start(tp); |
9094 | } | 9094 | } |
9095 | 9095 | ||
9096 | tg3_full_unlock(tp); | 9096 | tg3_full_unlock(tp); |
9097 | } | 9097 | } |
9098 | if (tp->link_config.phy_is_low_power) | 9098 | if (tp->link_config.phy_is_low_power) |
9099 | tg3_set_power_state(tp, PCI_D3hot); | 9099 | tg3_set_power_state(tp, PCI_D3hot); |
9100 | 9100 | ||
9101 | } | 9101 | } |
9102 | 9102 | ||
9103 | static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 9103 | static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
9104 | { | 9104 | { |
9105 | struct mii_ioctl_data *data = if_mii(ifr); | 9105 | struct mii_ioctl_data *data = if_mii(ifr); |
9106 | struct tg3 *tp = netdev_priv(dev); | 9106 | struct tg3 *tp = netdev_priv(dev); |
9107 | int err; | 9107 | int err; |
9108 | 9108 | ||
9109 | switch(cmd) { | 9109 | switch(cmd) { |
9110 | case SIOCGMIIPHY: | 9110 | case SIOCGMIIPHY: |
9111 | data->phy_id = PHY_ADDR; | 9111 | data->phy_id = PHY_ADDR; |
9112 | 9112 | ||
9113 | /* fallthru */ | 9113 | /* fallthru */ |
9114 | case SIOCGMIIREG: { | 9114 | case SIOCGMIIREG: { |
9115 | u32 mii_regval; | 9115 | u32 mii_regval; |
9116 | 9116 | ||
9117 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) | 9117 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) |
9118 | break; /* We have no PHY */ | 9118 | break; /* We have no PHY */ |
9119 | 9119 | ||
9120 | if (tp->link_config.phy_is_low_power) | 9120 | if (tp->link_config.phy_is_low_power) |
9121 | return -EAGAIN; | 9121 | return -EAGAIN; |
9122 | 9122 | ||
9123 | spin_lock_bh(&tp->lock); | 9123 | spin_lock_bh(&tp->lock); |
9124 | err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); | 9124 | err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); |
9125 | spin_unlock_bh(&tp->lock); | 9125 | spin_unlock_bh(&tp->lock); |
9126 | 9126 | ||
9127 | data->val_out = mii_regval; | 9127 | data->val_out = mii_regval; |
9128 | 9128 | ||
9129 | return err; | 9129 | return err; |
9130 | } | 9130 | } |
9131 | 9131 | ||
9132 | case SIOCSMIIREG: | 9132 | case SIOCSMIIREG: |
9133 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) | 9133 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) |
9134 | break; /* We have no PHY */ | 9134 | break; /* We have no PHY */ |
9135 | 9135 | ||
9136 | if (!capable(CAP_NET_ADMIN)) | 9136 | if (!capable(CAP_NET_ADMIN)) |
9137 | return -EPERM; | 9137 | return -EPERM; |
9138 | 9138 | ||
9139 | if (tp->link_config.phy_is_low_power) | 9139 | if (tp->link_config.phy_is_low_power) |
9140 | return -EAGAIN; | 9140 | return -EAGAIN; |
9141 | 9141 | ||
9142 | spin_lock_bh(&tp->lock); | 9142 | spin_lock_bh(&tp->lock); |
9143 | err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); | 9143 | err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); |
9144 | spin_unlock_bh(&tp->lock); | 9144 | spin_unlock_bh(&tp->lock); |
9145 | 9145 | ||
9146 | return err; | 9146 | return err; |
9147 | 9147 | ||
9148 | default: | 9148 | default: |
9149 | /* do nothing */ | 9149 | /* do nothing */ |
9150 | break; | 9150 | break; |
9151 | } | 9151 | } |
9152 | return -EOPNOTSUPP; | 9152 | return -EOPNOTSUPP; |
9153 | } | 9153 | } |
9154 | 9154 | ||
9155 | #if TG3_VLAN_TAG_USED | 9155 | #if TG3_VLAN_TAG_USED |
9156 | static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | 9156 | static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) |
9157 | { | 9157 | { |
9158 | struct tg3 *tp = netdev_priv(dev); | 9158 | struct tg3 *tp = netdev_priv(dev); |
9159 | 9159 | ||
9160 | if (netif_running(dev)) | 9160 | if (netif_running(dev)) |
9161 | tg3_netif_stop(tp); | 9161 | tg3_netif_stop(tp); |
9162 | 9162 | ||
9163 | tg3_full_lock(tp, 0); | 9163 | tg3_full_lock(tp, 0); |
9164 | 9164 | ||
9165 | tp->vlgrp = grp; | 9165 | tp->vlgrp = grp; |
9166 | 9166 | ||
9167 | /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ | 9167 | /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ |
9168 | __tg3_set_rx_mode(dev); | 9168 | __tg3_set_rx_mode(dev); |
9169 | 9169 | ||
9170 | if (netif_running(dev)) | 9170 | if (netif_running(dev)) |
9171 | tg3_netif_start(tp); | 9171 | tg3_netif_start(tp); |
9172 | 9172 | ||
9173 | tg3_full_unlock(tp); | 9173 | tg3_full_unlock(tp); |
9174 | } | 9174 | } |
9175 | #endif | 9175 | #endif |
9176 | 9176 | ||
9177 | static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | 9177 | static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) |
9178 | { | 9178 | { |
9179 | struct tg3 *tp = netdev_priv(dev); | 9179 | struct tg3 *tp = netdev_priv(dev); |
9180 | 9180 | ||
9181 | memcpy(ec, &tp->coal, sizeof(*ec)); | 9181 | memcpy(ec, &tp->coal, sizeof(*ec)); |
9182 | return 0; | 9182 | return 0; |
9183 | } | 9183 | } |
9184 | 9184 | ||
9185 | static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | 9185 | static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) |
9186 | { | 9186 | { |
9187 | struct tg3 *tp = netdev_priv(dev); | 9187 | struct tg3 *tp = netdev_priv(dev); |
9188 | u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; | 9188 | u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; |
9189 | u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; | 9189 | u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; |
9190 | 9190 | ||
9191 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 9191 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
9192 | max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; | 9192 | max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; |
9193 | max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; | 9193 | max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; |
9194 | max_stat_coal_ticks = MAX_STAT_COAL_TICKS; | 9194 | max_stat_coal_ticks = MAX_STAT_COAL_TICKS; |
9195 | min_stat_coal_ticks = MIN_STAT_COAL_TICKS; | 9195 | min_stat_coal_ticks = MIN_STAT_COAL_TICKS; |
9196 | } | 9196 | } |
9197 | 9197 | ||
9198 | if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || | 9198 | if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || |
9199 | (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || | 9199 | (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || |
9200 | (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || | 9200 | (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || |
9201 | (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || | 9201 | (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || |
9202 | (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || | 9202 | (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || |
9203 | (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || | 9203 | (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || |
9204 | (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || | 9204 | (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || |
9205 | (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || | 9205 | (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || |
9206 | (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || | 9206 | (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || |
9207 | (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) | 9207 | (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) |
9208 | return -EINVAL; | 9208 | return -EINVAL; |
9209 | 9209 | ||
9210 | /* No rx interrupts will be generated if both are zero */ | 9210 | /* No rx interrupts will be generated if both are zero */ |
9211 | if ((ec->rx_coalesce_usecs == 0) && | 9211 | if ((ec->rx_coalesce_usecs == 0) && |
9212 | (ec->rx_max_coalesced_frames == 0)) | 9212 | (ec->rx_max_coalesced_frames == 0)) |
9213 | return -EINVAL; | 9213 | return -EINVAL; |
9214 | 9214 | ||
9215 | /* No tx interrupts will be generated if both are zero */ | 9215 | /* No tx interrupts will be generated if both are zero */ |
9216 | if ((ec->tx_coalesce_usecs == 0) && | 9216 | if ((ec->tx_coalesce_usecs == 0) && |
9217 | (ec->tx_max_coalesced_frames == 0)) | 9217 | (ec->tx_max_coalesced_frames == 0)) |
9218 | return -EINVAL; | 9218 | return -EINVAL; |
9219 | 9219 | ||
9220 | /* Only copy relevant parameters, ignore all others. */ | 9220 | /* Only copy relevant parameters, ignore all others. */ |
9221 | tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; | 9221 | tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; |
9222 | tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; | 9222 | tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; |
9223 | tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; | 9223 | tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; |
9224 | tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; | 9224 | tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; |
9225 | tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; | 9225 | tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; |
9226 | tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; | 9226 | tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; |
9227 | tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; | 9227 | tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; |
9228 | tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; | 9228 | tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; |
9229 | tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; | 9229 | tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; |
9230 | 9230 | ||
9231 | if (netif_running(dev)) { | 9231 | if (netif_running(dev)) { |
9232 | tg3_full_lock(tp, 0); | 9232 | tg3_full_lock(tp, 0); |
9233 | __tg3_set_coalesce(tp, &tp->coal); | 9233 | __tg3_set_coalesce(tp, &tp->coal); |
9234 | tg3_full_unlock(tp); | 9234 | tg3_full_unlock(tp); |
9235 | } | 9235 | } |
9236 | return 0; | 9236 | return 0; |
9237 | } | 9237 | } |
9238 | 9238 | ||
9239 | static const struct ethtool_ops tg3_ethtool_ops = { | 9239 | static const struct ethtool_ops tg3_ethtool_ops = { |
9240 | .get_settings = tg3_get_settings, | 9240 | .get_settings = tg3_get_settings, |
9241 | .set_settings = tg3_set_settings, | 9241 | .set_settings = tg3_set_settings, |
9242 | .get_drvinfo = tg3_get_drvinfo, | 9242 | .get_drvinfo = tg3_get_drvinfo, |
9243 | .get_regs_len = tg3_get_regs_len, | 9243 | .get_regs_len = tg3_get_regs_len, |
9244 | .get_regs = tg3_get_regs, | 9244 | .get_regs = tg3_get_regs, |
9245 | .get_wol = tg3_get_wol, | 9245 | .get_wol = tg3_get_wol, |
9246 | .set_wol = tg3_set_wol, | 9246 | .set_wol = tg3_set_wol, |
9247 | .get_msglevel = tg3_get_msglevel, | 9247 | .get_msglevel = tg3_get_msglevel, |
9248 | .set_msglevel = tg3_set_msglevel, | 9248 | .set_msglevel = tg3_set_msglevel, |
9249 | .nway_reset = tg3_nway_reset, | 9249 | .nway_reset = tg3_nway_reset, |
9250 | .get_link = ethtool_op_get_link, | 9250 | .get_link = ethtool_op_get_link, |
9251 | .get_eeprom_len = tg3_get_eeprom_len, | 9251 | .get_eeprom_len = tg3_get_eeprom_len, |
9252 | .get_eeprom = tg3_get_eeprom, | 9252 | .get_eeprom = tg3_get_eeprom, |
9253 | .set_eeprom = tg3_set_eeprom, | 9253 | .set_eeprom = tg3_set_eeprom, |
9254 | .get_ringparam = tg3_get_ringparam, | 9254 | .get_ringparam = tg3_get_ringparam, |
9255 | .set_ringparam = tg3_set_ringparam, | 9255 | .set_ringparam = tg3_set_ringparam, |
9256 | .get_pauseparam = tg3_get_pauseparam, | 9256 | .get_pauseparam = tg3_get_pauseparam, |
9257 | .set_pauseparam = tg3_set_pauseparam, | 9257 | .set_pauseparam = tg3_set_pauseparam, |
9258 | .get_rx_csum = tg3_get_rx_csum, | 9258 | .get_rx_csum = tg3_get_rx_csum, |
9259 | .set_rx_csum = tg3_set_rx_csum, | 9259 | .set_rx_csum = tg3_set_rx_csum, |
9260 | .get_tx_csum = ethtool_op_get_tx_csum, | 9260 | .get_tx_csum = ethtool_op_get_tx_csum, |
9261 | .set_tx_csum = tg3_set_tx_csum, | 9261 | .set_tx_csum = tg3_set_tx_csum, |
9262 | .get_sg = ethtool_op_get_sg, | 9262 | .get_sg = ethtool_op_get_sg, |
9263 | .set_sg = ethtool_op_set_sg, | 9263 | .set_sg = ethtool_op_set_sg, |
9264 | .get_tso = ethtool_op_get_tso, | 9264 | .get_tso = ethtool_op_get_tso, |
9265 | .set_tso = tg3_set_tso, | 9265 | .set_tso = tg3_set_tso, |
9266 | .self_test_count = tg3_get_test_count, | 9266 | .self_test_count = tg3_get_test_count, |
9267 | .self_test = tg3_self_test, | 9267 | .self_test = tg3_self_test, |
9268 | .get_strings = tg3_get_strings, | 9268 | .get_strings = tg3_get_strings, |
9269 | .phys_id = tg3_phys_id, | 9269 | .phys_id = tg3_phys_id, |
9270 | .get_stats_count = tg3_get_stats_count, | 9270 | .get_stats_count = tg3_get_stats_count, |
9271 | .get_ethtool_stats = tg3_get_ethtool_stats, | 9271 | .get_ethtool_stats = tg3_get_ethtool_stats, |
9272 | .get_coalesce = tg3_get_coalesce, | 9272 | .get_coalesce = tg3_get_coalesce, |
9273 | .set_coalesce = tg3_set_coalesce, | 9273 | .set_coalesce = tg3_set_coalesce, |
9274 | .get_perm_addr = ethtool_op_get_perm_addr, | 9274 | .get_perm_addr = ethtool_op_get_perm_addr, |
9275 | }; | 9275 | }; |
9276 | 9276 | ||
9277 | static void __devinit tg3_get_eeprom_size(struct tg3 *tp) | 9277 | static void __devinit tg3_get_eeprom_size(struct tg3 *tp) |
9278 | { | 9278 | { |
9279 | u32 cursize, val, magic; | 9279 | u32 cursize, val, magic; |
9280 | 9280 | ||
9281 | tp->nvram_size = EEPROM_CHIP_SIZE; | 9281 | tp->nvram_size = EEPROM_CHIP_SIZE; |
9282 | 9282 | ||
9283 | if (tg3_nvram_read_swab(tp, 0, &magic) != 0) | 9283 | if (tg3_nvram_read_swab(tp, 0, &magic) != 0) |
9284 | return; | 9284 | return; |
9285 | 9285 | ||
9286 | if ((magic != TG3_EEPROM_MAGIC) && | 9286 | if ((magic != TG3_EEPROM_MAGIC) && |
9287 | ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && | 9287 | ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && |
9288 | ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) | 9288 | ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) |
9289 | return; | 9289 | return; |
9290 | 9290 | ||
9291 | /* | 9291 | /* |
9292 | * Size the chip by reading offsets at increasing powers of two. | 9292 | * Size the chip by reading offsets at increasing powers of two. |
9293 | * When we encounter our validation signature, we know the addressing | 9293 | * When we encounter our validation signature, we know the addressing |
9294 | * has wrapped around, and thus have our chip size. | 9294 | * has wrapped around, and thus have our chip size. |
9295 | */ | 9295 | */ |
9296 | cursize = 0x10; | 9296 | cursize = 0x10; |
9297 | 9297 | ||
9298 | while (cursize < tp->nvram_size) { | 9298 | while (cursize < tp->nvram_size) { |
9299 | if (tg3_nvram_read_swab(tp, cursize, &val) != 0) | 9299 | if (tg3_nvram_read_swab(tp, cursize, &val) != 0) |
9300 | return; | 9300 | return; |
9301 | 9301 | ||
9302 | if (val == magic) | 9302 | if (val == magic) |
9303 | break; | 9303 | break; |
9304 | 9304 | ||
9305 | cursize <<= 1; | 9305 | cursize <<= 1; |
9306 | } | 9306 | } |
9307 | 9307 | ||
9308 | tp->nvram_size = cursize; | 9308 | tp->nvram_size = cursize; |
9309 | } | 9309 | } |
9310 | 9310 | ||
9311 | static void __devinit tg3_get_nvram_size(struct tg3 *tp) | 9311 | static void __devinit tg3_get_nvram_size(struct tg3 *tp) |
9312 | { | 9312 | { |
9313 | u32 val; | 9313 | u32 val; |
9314 | 9314 | ||
9315 | if (tg3_nvram_read_swab(tp, 0, &val) != 0) | 9315 | if (tg3_nvram_read_swab(tp, 0, &val) != 0) |
9316 | return; | 9316 | return; |
9317 | 9317 | ||
9318 | /* Selfboot format */ | 9318 | /* Selfboot format */ |
9319 | if (val != TG3_EEPROM_MAGIC) { | 9319 | if (val != TG3_EEPROM_MAGIC) { |
9320 | tg3_get_eeprom_size(tp); | 9320 | tg3_get_eeprom_size(tp); |
9321 | return; | 9321 | return; |
9322 | } | 9322 | } |
9323 | 9323 | ||
9324 | if (tg3_nvram_read(tp, 0xf0, &val) == 0) { | 9324 | if (tg3_nvram_read(tp, 0xf0, &val) == 0) { |
9325 | if (val != 0) { | 9325 | if (val != 0) { |
9326 | tp->nvram_size = (val >> 16) * 1024; | 9326 | tp->nvram_size = (val >> 16) * 1024; |
9327 | return; | 9327 | return; |
9328 | } | 9328 | } |
9329 | } | 9329 | } |
9330 | tp->nvram_size = 0x80000; | 9330 | tp->nvram_size = 0x80000; |
9331 | } | 9331 | } |
9332 | 9332 | ||
9333 | static void __devinit tg3_get_nvram_info(struct tg3 *tp) | 9333 | static void __devinit tg3_get_nvram_info(struct tg3 *tp) |
9334 | { | 9334 | { |
9335 | u32 nvcfg1; | 9335 | u32 nvcfg1; |
9336 | 9336 | ||
9337 | nvcfg1 = tr32(NVRAM_CFG1); | 9337 | nvcfg1 = tr32(NVRAM_CFG1); |
9338 | if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { | 9338 | if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { |
9339 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 9339 | tp->tg3_flags2 |= TG3_FLG2_FLASH; |
9340 | } | 9340 | } |
9341 | else { | 9341 | else { |
9342 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 9342 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; |
9343 | tw32(NVRAM_CFG1, nvcfg1); | 9343 | tw32(NVRAM_CFG1, nvcfg1); |
9344 | } | 9344 | } |
9345 | 9345 | ||
9346 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || | 9346 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || |
9347 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 9347 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { |
9348 | switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { | 9348 | switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { |
9349 | case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: | 9349 | case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: |
9350 | tp->nvram_jedecnum = JEDEC_ATMEL; | 9350 | tp->nvram_jedecnum = JEDEC_ATMEL; |
9351 | tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; | 9351 | tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; |
9352 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9352 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9353 | break; | 9353 | break; |
9354 | case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: | 9354 | case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: |
9355 | tp->nvram_jedecnum = JEDEC_ATMEL; | 9355 | tp->nvram_jedecnum = JEDEC_ATMEL; |
9356 | tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; | 9356 | tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; |
9357 | break; | 9357 | break; |
9358 | case FLASH_VENDOR_ATMEL_EEPROM: | 9358 | case FLASH_VENDOR_ATMEL_EEPROM: |
9359 | tp->nvram_jedecnum = JEDEC_ATMEL; | 9359 | tp->nvram_jedecnum = JEDEC_ATMEL; |
9360 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 9360 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; |
9361 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9361 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9362 | break; | 9362 | break; |
9363 | case FLASH_VENDOR_ST: | 9363 | case FLASH_VENDOR_ST: |
9364 | tp->nvram_jedecnum = JEDEC_ST; | 9364 | tp->nvram_jedecnum = JEDEC_ST; |
9365 | tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; | 9365 | tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; |
9366 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9366 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9367 | break; | 9367 | break; |
9368 | case FLASH_VENDOR_SAIFUN: | 9368 | case FLASH_VENDOR_SAIFUN: |
9369 | tp->nvram_jedecnum = JEDEC_SAIFUN; | 9369 | tp->nvram_jedecnum = JEDEC_SAIFUN; |
9370 | tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; | 9370 | tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; |
9371 | break; | 9371 | break; |
9372 | case FLASH_VENDOR_SST_SMALL: | 9372 | case FLASH_VENDOR_SST_SMALL: |
9373 | case FLASH_VENDOR_SST_LARGE: | 9373 | case FLASH_VENDOR_SST_LARGE: |
9374 | tp->nvram_jedecnum = JEDEC_SST; | 9374 | tp->nvram_jedecnum = JEDEC_SST; |
9375 | tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; | 9375 | tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; |
9376 | break; | 9376 | break; |
9377 | } | 9377 | } |
9378 | } | 9378 | } |
9379 | else { | 9379 | else { |
9380 | tp->nvram_jedecnum = JEDEC_ATMEL; | 9380 | tp->nvram_jedecnum = JEDEC_ATMEL; |
9381 | tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; | 9381 | tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; |
9382 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9382 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9383 | } | 9383 | } |
9384 | } | 9384 | } |
9385 | 9385 | ||
9386 | static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) | 9386 | static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) |
9387 | { | 9387 | { |
9388 | u32 nvcfg1; | 9388 | u32 nvcfg1; |
9389 | 9389 | ||
9390 | nvcfg1 = tr32(NVRAM_CFG1); | 9390 | nvcfg1 = tr32(NVRAM_CFG1); |
9391 | 9391 | ||
9392 | /* NVRAM protection for TPM */ | 9392 | /* NVRAM protection for TPM */ |
9393 | if (nvcfg1 & (1 << 27)) | 9393 | if (nvcfg1 & (1 << 27)) |
9394 | tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; | 9394 | tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; |
9395 | 9395 | ||
9396 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 9396 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { |
9397 | case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: | 9397 | case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: |
9398 | case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: | 9398 | case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: |
9399 | tp->nvram_jedecnum = JEDEC_ATMEL; | 9399 | tp->nvram_jedecnum = JEDEC_ATMEL; |
9400 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9400 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9401 | break; | 9401 | break; |
9402 | case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: | 9402 | case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: |
9403 | tp->nvram_jedecnum = JEDEC_ATMEL; | 9403 | tp->nvram_jedecnum = JEDEC_ATMEL; |
9404 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9404 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9405 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 9405 | tp->tg3_flags2 |= TG3_FLG2_FLASH; |
9406 | break; | 9406 | break; |
9407 | case FLASH_5752VENDOR_ST_M45PE10: | 9407 | case FLASH_5752VENDOR_ST_M45PE10: |
9408 | case FLASH_5752VENDOR_ST_M45PE20: | 9408 | case FLASH_5752VENDOR_ST_M45PE20: |
9409 | case FLASH_5752VENDOR_ST_M45PE40: | 9409 | case FLASH_5752VENDOR_ST_M45PE40: |
9410 | tp->nvram_jedecnum = JEDEC_ST; | 9410 | tp->nvram_jedecnum = JEDEC_ST; |
9411 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9411 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9412 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 9412 | tp->tg3_flags2 |= TG3_FLG2_FLASH; |
9413 | break; | 9413 | break; |
9414 | } | 9414 | } |
9415 | 9415 | ||
9416 | if (tp->tg3_flags2 & TG3_FLG2_FLASH) { | 9416 | if (tp->tg3_flags2 & TG3_FLG2_FLASH) { |
9417 | switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { | 9417 | switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { |
9418 | case FLASH_5752PAGE_SIZE_256: | 9418 | case FLASH_5752PAGE_SIZE_256: |
9419 | tp->nvram_pagesize = 256; | 9419 | tp->nvram_pagesize = 256; |
9420 | break; | 9420 | break; |
9421 | case FLASH_5752PAGE_SIZE_512: | 9421 | case FLASH_5752PAGE_SIZE_512: |
9422 | tp->nvram_pagesize = 512; | 9422 | tp->nvram_pagesize = 512; |
9423 | break; | 9423 | break; |
9424 | case FLASH_5752PAGE_SIZE_1K: | 9424 | case FLASH_5752PAGE_SIZE_1K: |
9425 | tp->nvram_pagesize = 1024; | 9425 | tp->nvram_pagesize = 1024; |
9426 | break; | 9426 | break; |
9427 | case FLASH_5752PAGE_SIZE_2K: | 9427 | case FLASH_5752PAGE_SIZE_2K: |
9428 | tp->nvram_pagesize = 2048; | 9428 | tp->nvram_pagesize = 2048; |
9429 | break; | 9429 | break; |
9430 | case FLASH_5752PAGE_SIZE_4K: | 9430 | case FLASH_5752PAGE_SIZE_4K: |
9431 | tp->nvram_pagesize = 4096; | 9431 | tp->nvram_pagesize = 4096; |
9432 | break; | 9432 | break; |
9433 | case FLASH_5752PAGE_SIZE_264: | 9433 | case FLASH_5752PAGE_SIZE_264: |
9434 | tp->nvram_pagesize = 264; | 9434 | tp->nvram_pagesize = 264; |
9435 | break; | 9435 | break; |
9436 | } | 9436 | } |
9437 | } | 9437 | } |
9438 | else { | 9438 | else { |
9439 | /* For eeprom, set pagesize to maximum eeprom size */ | 9439 | /* For eeprom, set pagesize to maximum eeprom size */ |
9440 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 9440 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; |
9441 | 9441 | ||
9442 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 9442 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; |
9443 | tw32(NVRAM_CFG1, nvcfg1); | 9443 | tw32(NVRAM_CFG1, nvcfg1); |
9444 | } | 9444 | } |
9445 | } | 9445 | } |
9446 | 9446 | ||
9447 | static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) | 9447 | static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) |
9448 | { | 9448 | { |
9449 | u32 nvcfg1, protect = 0; | 9449 | u32 nvcfg1, protect = 0; |
9450 | 9450 | ||
9451 | nvcfg1 = tr32(NVRAM_CFG1); | 9451 | nvcfg1 = tr32(NVRAM_CFG1); |
9452 | 9452 | ||
9453 | /* NVRAM protection for TPM */ | 9453 | /* NVRAM protection for TPM */ |
9454 | if (nvcfg1 & (1 << 27)) { | 9454 | if (nvcfg1 & (1 << 27)) { |
9455 | tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; | 9455 | tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; |
9456 | protect = 1; | 9456 | protect = 1; |
9457 | } | 9457 | } |
9458 | 9458 | ||
9459 | nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; | 9459 | nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; |
9460 | switch (nvcfg1) { | 9460 | switch (nvcfg1) { |
9461 | case FLASH_5755VENDOR_ATMEL_FLASH_1: | 9461 | case FLASH_5755VENDOR_ATMEL_FLASH_1: |
9462 | case FLASH_5755VENDOR_ATMEL_FLASH_2: | 9462 | case FLASH_5755VENDOR_ATMEL_FLASH_2: |
9463 | case FLASH_5755VENDOR_ATMEL_FLASH_3: | 9463 | case FLASH_5755VENDOR_ATMEL_FLASH_3: |
9464 | case FLASH_5755VENDOR_ATMEL_FLASH_5: | 9464 | case FLASH_5755VENDOR_ATMEL_FLASH_5: |
9465 | tp->nvram_jedecnum = JEDEC_ATMEL; | 9465 | tp->nvram_jedecnum = JEDEC_ATMEL; |
9466 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9466 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9467 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 9467 | tp->tg3_flags2 |= TG3_FLG2_FLASH; |
9468 | tp->nvram_pagesize = 264; | 9468 | tp->nvram_pagesize = 264; |
9469 | if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || | 9469 | if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || |
9470 | nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) | 9470 | nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) |
9471 | tp->nvram_size = (protect ? 0x3e200 : 0x80000); | 9471 | tp->nvram_size = (protect ? 0x3e200 : 0x80000); |
9472 | else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) | 9472 | else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) |
9473 | tp->nvram_size = (protect ? 0x1f200 : 0x40000); | 9473 | tp->nvram_size = (protect ? 0x1f200 : 0x40000); |
9474 | else | 9474 | else |
9475 | tp->nvram_size = (protect ? 0x1f200 : 0x20000); | 9475 | tp->nvram_size = (protect ? 0x1f200 : 0x20000); |
9476 | break; | 9476 | break; |
9477 | case FLASH_5752VENDOR_ST_M45PE10: | 9477 | case FLASH_5752VENDOR_ST_M45PE10: |
9478 | case FLASH_5752VENDOR_ST_M45PE20: | 9478 | case FLASH_5752VENDOR_ST_M45PE20: |
9479 | case FLASH_5752VENDOR_ST_M45PE40: | 9479 | case FLASH_5752VENDOR_ST_M45PE40: |
9480 | tp->nvram_jedecnum = JEDEC_ST; | 9480 | tp->nvram_jedecnum = JEDEC_ST; |
9481 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9481 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9482 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 9482 | tp->tg3_flags2 |= TG3_FLG2_FLASH; |
9483 | tp->nvram_pagesize = 256; | 9483 | tp->nvram_pagesize = 256; |
9484 | if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) | 9484 | if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) |
9485 | tp->nvram_size = (protect ? 0x10000 : 0x20000); | 9485 | tp->nvram_size = (protect ? 0x10000 : 0x20000); |
9486 | else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) | 9486 | else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) |
9487 | tp->nvram_size = (protect ? 0x10000 : 0x40000); | 9487 | tp->nvram_size = (protect ? 0x10000 : 0x40000); |
9488 | else | 9488 | else |
9489 | tp->nvram_size = (protect ? 0x20000 : 0x80000); | 9489 | tp->nvram_size = (protect ? 0x20000 : 0x80000); |
9490 | break; | 9490 | break; |
9491 | } | 9491 | } |
9492 | } | 9492 | } |
9493 | 9493 | ||
9494 | static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) | 9494 | static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) |
9495 | { | 9495 | { |
9496 | u32 nvcfg1; | 9496 | u32 nvcfg1; |
9497 | 9497 | ||
9498 | nvcfg1 = tr32(NVRAM_CFG1); | 9498 | nvcfg1 = tr32(NVRAM_CFG1); |
9499 | 9499 | ||
9500 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 9500 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { |
9501 | case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: | 9501 | case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: |
9502 | case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: | 9502 | case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: |
9503 | case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: | 9503 | case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: |
9504 | case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: | 9504 | case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: |
9505 | tp->nvram_jedecnum = JEDEC_ATMEL; | 9505 | tp->nvram_jedecnum = JEDEC_ATMEL; |
9506 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9506 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9507 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 9507 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; |
9508 | 9508 | ||
9509 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 9509 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; |
9510 | tw32(NVRAM_CFG1, nvcfg1); | 9510 | tw32(NVRAM_CFG1, nvcfg1); |
9511 | break; | 9511 | break; |
9512 | case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: | 9512 | case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: |
9513 | case FLASH_5755VENDOR_ATMEL_FLASH_1: | 9513 | case FLASH_5755VENDOR_ATMEL_FLASH_1: |
9514 | case FLASH_5755VENDOR_ATMEL_FLASH_2: | 9514 | case FLASH_5755VENDOR_ATMEL_FLASH_2: |
9515 | case FLASH_5755VENDOR_ATMEL_FLASH_3: | 9515 | case FLASH_5755VENDOR_ATMEL_FLASH_3: |
9516 | tp->nvram_jedecnum = JEDEC_ATMEL; | 9516 | tp->nvram_jedecnum = JEDEC_ATMEL; |
9517 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9517 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9518 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 9518 | tp->tg3_flags2 |= TG3_FLG2_FLASH; |
9519 | tp->nvram_pagesize = 264; | 9519 | tp->nvram_pagesize = 264; |
9520 | break; | 9520 | break; |
9521 | case FLASH_5752VENDOR_ST_M45PE10: | 9521 | case FLASH_5752VENDOR_ST_M45PE10: |
9522 | case FLASH_5752VENDOR_ST_M45PE20: | 9522 | case FLASH_5752VENDOR_ST_M45PE20: |
9523 | case FLASH_5752VENDOR_ST_M45PE40: | 9523 | case FLASH_5752VENDOR_ST_M45PE40: |
9524 | tp->nvram_jedecnum = JEDEC_ST; | 9524 | tp->nvram_jedecnum = JEDEC_ST; |
9525 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9525 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9526 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | 9526 | tp->tg3_flags2 |= TG3_FLG2_FLASH; |
9527 | tp->nvram_pagesize = 256; | 9527 | tp->nvram_pagesize = 256; |
9528 | break; | 9528 | break; |
9529 | } | 9529 | } |
9530 | } | 9530 | } |
9531 | 9531 | ||
9532 | static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) | 9532 | static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) |
9533 | { | 9533 | { |
9534 | tp->nvram_jedecnum = JEDEC_ATMEL; | 9534 | tp->nvram_jedecnum = JEDEC_ATMEL; |
9535 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 9535 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; |
9536 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 9536 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; |
9537 | } | 9537 | } |
9538 | 9538 | ||
9539 | /* Chips other than 5700/5701 use the NVRAM for fetching info. */ | 9539 | /* Chips other than 5700/5701 use the NVRAM for fetching info. */ |
9540 | static void __devinit tg3_nvram_init(struct tg3 *tp) | 9540 | static void __devinit tg3_nvram_init(struct tg3 *tp) |
9541 | { | 9541 | { |
9542 | tw32_f(GRC_EEPROM_ADDR, | 9542 | tw32_f(GRC_EEPROM_ADDR, |
9543 | (EEPROM_ADDR_FSM_RESET | | 9543 | (EEPROM_ADDR_FSM_RESET | |
9544 | (EEPROM_DEFAULT_CLOCK_PERIOD << | 9544 | (EEPROM_DEFAULT_CLOCK_PERIOD << |
9545 | EEPROM_ADDR_CLKPERD_SHIFT))); | 9545 | EEPROM_ADDR_CLKPERD_SHIFT))); |
9546 | 9546 | ||
9547 | msleep(1); | 9547 | msleep(1); |
9548 | 9548 | ||
9549 | /* Enable seeprom accesses. */ | 9549 | /* Enable seeprom accesses. */ |
9550 | tw32_f(GRC_LOCAL_CTRL, | 9550 | tw32_f(GRC_LOCAL_CTRL, |
9551 | tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); | 9551 | tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); |
9552 | udelay(100); | 9552 | udelay(100); |
9553 | 9553 | ||
9554 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 9554 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
9555 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { | 9555 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { |
9556 | tp->tg3_flags |= TG3_FLAG_NVRAM; | 9556 | tp->tg3_flags |= TG3_FLAG_NVRAM; |
9557 | 9557 | ||
9558 | if (tg3_nvram_lock(tp)) { | 9558 | if (tg3_nvram_lock(tp)) { |
9559 | printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, " | 9559 | printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, " |
9560 | "tg3_nvram_init failed.\n", tp->dev->name); | 9560 | "tg3_nvram_init failed.\n", tp->dev->name); |
9561 | return; | 9561 | return; |
9562 | } | 9562 | } |
9563 | tg3_enable_nvram_access(tp); | 9563 | tg3_enable_nvram_access(tp); |
9564 | 9564 | ||
9565 | tp->nvram_size = 0; | 9565 | tp->nvram_size = 0; |
9566 | 9566 | ||
9567 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) | 9567 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) |
9568 | tg3_get_5752_nvram_info(tp); | 9568 | tg3_get_5752_nvram_info(tp); |
9569 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 9569 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) |
9570 | tg3_get_5755_nvram_info(tp); | 9570 | tg3_get_5755_nvram_info(tp); |
9571 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) | 9571 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) |
9572 | tg3_get_5787_nvram_info(tp); | 9572 | tg3_get_5787_nvram_info(tp); |
9573 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 9573 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
9574 | tg3_get_5906_nvram_info(tp); | 9574 | tg3_get_5906_nvram_info(tp); |
9575 | else | 9575 | else |
9576 | tg3_get_nvram_info(tp); | 9576 | tg3_get_nvram_info(tp); |
9577 | 9577 | ||
9578 | if (tp->nvram_size == 0) | 9578 | if (tp->nvram_size == 0) |
9579 | tg3_get_nvram_size(tp); | 9579 | tg3_get_nvram_size(tp); |
9580 | 9580 | ||
9581 | tg3_disable_nvram_access(tp); | 9581 | tg3_disable_nvram_access(tp); |
9582 | tg3_nvram_unlock(tp); | 9582 | tg3_nvram_unlock(tp); |
9583 | 9583 | ||
9584 | } else { | 9584 | } else { |
9585 | tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); | 9585 | tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); |
9586 | 9586 | ||
9587 | tg3_get_eeprom_size(tp); | 9587 | tg3_get_eeprom_size(tp); |
9588 | } | 9588 | } |
9589 | } | 9589 | } |
9590 | 9590 | ||
9591 | static int tg3_nvram_read_using_eeprom(struct tg3 *tp, | 9591 | static int tg3_nvram_read_using_eeprom(struct tg3 *tp, |
9592 | u32 offset, u32 *val) | 9592 | u32 offset, u32 *val) |
9593 | { | 9593 | { |
9594 | u32 tmp; | 9594 | u32 tmp; |
9595 | int i; | 9595 | int i; |
9596 | 9596 | ||
9597 | if (offset > EEPROM_ADDR_ADDR_MASK || | 9597 | if (offset > EEPROM_ADDR_ADDR_MASK || |
9598 | (offset % 4) != 0) | 9598 | (offset % 4) != 0) |
9599 | return -EINVAL; | 9599 | return -EINVAL; |
9600 | 9600 | ||
9601 | tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | | 9601 | tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | |
9602 | EEPROM_ADDR_DEVID_MASK | | 9602 | EEPROM_ADDR_DEVID_MASK | |
9603 | EEPROM_ADDR_READ); | 9603 | EEPROM_ADDR_READ); |
9604 | tw32(GRC_EEPROM_ADDR, | 9604 | tw32(GRC_EEPROM_ADDR, |
9605 | tmp | | 9605 | tmp | |
9606 | (0 << EEPROM_ADDR_DEVID_SHIFT) | | 9606 | (0 << EEPROM_ADDR_DEVID_SHIFT) | |
9607 | ((offset << EEPROM_ADDR_ADDR_SHIFT) & | 9607 | ((offset << EEPROM_ADDR_ADDR_SHIFT) & |
9608 | EEPROM_ADDR_ADDR_MASK) | | 9608 | EEPROM_ADDR_ADDR_MASK) | |
9609 | EEPROM_ADDR_READ | EEPROM_ADDR_START); | 9609 | EEPROM_ADDR_READ | EEPROM_ADDR_START); |
9610 | 9610 | ||
9611 | for (i = 0; i < 1000; i++) { | 9611 | for (i = 0; i < 1000; i++) { |
9612 | tmp = tr32(GRC_EEPROM_ADDR); | 9612 | tmp = tr32(GRC_EEPROM_ADDR); |
9613 | 9613 | ||
9614 | if (tmp & EEPROM_ADDR_COMPLETE) | 9614 | if (tmp & EEPROM_ADDR_COMPLETE) |
9615 | break; | 9615 | break; |
9616 | msleep(1); | 9616 | msleep(1); |
9617 | } | 9617 | } |
9618 | if (!(tmp & EEPROM_ADDR_COMPLETE)) | 9618 | if (!(tmp & EEPROM_ADDR_COMPLETE)) |
9619 | return -EBUSY; | 9619 | return -EBUSY; |
9620 | 9620 | ||
9621 | *val = tr32(GRC_EEPROM_DATA); | 9621 | *val = tr32(GRC_EEPROM_DATA); |
9622 | return 0; | 9622 | return 0; |
9623 | } | 9623 | } |
9624 | 9624 | ||
9625 | #define NVRAM_CMD_TIMEOUT 10000 | 9625 | #define NVRAM_CMD_TIMEOUT 10000 |
9626 | 9626 | ||
9627 | static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) | 9627 | static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) |
9628 | { | 9628 | { |
9629 | int i; | 9629 | int i; |
9630 | 9630 | ||
9631 | tw32(NVRAM_CMD, nvram_cmd); | 9631 | tw32(NVRAM_CMD, nvram_cmd); |
9632 | for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { | 9632 | for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { |
9633 | udelay(10); | 9633 | udelay(10); |
9634 | if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { | 9634 | if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { |
9635 | udelay(10); | 9635 | udelay(10); |
9636 | break; | 9636 | break; |
9637 | } | 9637 | } |
9638 | } | 9638 | } |
9639 | if (i == NVRAM_CMD_TIMEOUT) { | 9639 | if (i == NVRAM_CMD_TIMEOUT) { |
9640 | return -EBUSY; | 9640 | return -EBUSY; |
9641 | } | 9641 | } |
9642 | return 0; | 9642 | return 0; |
9643 | } | 9643 | } |
9644 | 9644 | ||
9645 | static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) | 9645 | static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) |
9646 | { | 9646 | { |
9647 | if ((tp->tg3_flags & TG3_FLAG_NVRAM) && | 9647 | if ((tp->tg3_flags & TG3_FLAG_NVRAM) && |
9648 | (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && | 9648 | (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && |
9649 | (tp->tg3_flags2 & TG3_FLG2_FLASH) && | 9649 | (tp->tg3_flags2 & TG3_FLG2_FLASH) && |
9650 | (tp->nvram_jedecnum == JEDEC_ATMEL)) | 9650 | (tp->nvram_jedecnum == JEDEC_ATMEL)) |
9651 | 9651 | ||
9652 | addr = ((addr / tp->nvram_pagesize) << | 9652 | addr = ((addr / tp->nvram_pagesize) << |
9653 | ATMEL_AT45DB0X1B_PAGE_POS) + | 9653 | ATMEL_AT45DB0X1B_PAGE_POS) + |
9654 | (addr % tp->nvram_pagesize); | 9654 | (addr % tp->nvram_pagesize); |
9655 | 9655 | ||
9656 | return addr; | 9656 | return addr; |
9657 | } | 9657 | } |
9658 | 9658 | ||
9659 | static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) | 9659 | static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) |
9660 | { | 9660 | { |
9661 | if ((tp->tg3_flags & TG3_FLAG_NVRAM) && | 9661 | if ((tp->tg3_flags & TG3_FLAG_NVRAM) && |
9662 | (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && | 9662 | (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && |
9663 | (tp->tg3_flags2 & TG3_FLG2_FLASH) && | 9663 | (tp->tg3_flags2 & TG3_FLG2_FLASH) && |
9664 | (tp->nvram_jedecnum == JEDEC_ATMEL)) | 9664 | (tp->nvram_jedecnum == JEDEC_ATMEL)) |
9665 | 9665 | ||
9666 | addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * | 9666 | addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * |
9667 | tp->nvram_pagesize) + | 9667 | tp->nvram_pagesize) + |
9668 | (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); | 9668 | (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); |
9669 | 9669 | ||
9670 | return addr; | 9670 | return addr; |
9671 | } | 9671 | } |
9672 | 9672 | ||
9673 | static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) | 9673 | static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) |
9674 | { | 9674 | { |
9675 | int ret; | 9675 | int ret; |
9676 | 9676 | ||
9677 | if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) | 9677 | if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) |
9678 | return tg3_nvram_read_using_eeprom(tp, offset, val); | 9678 | return tg3_nvram_read_using_eeprom(tp, offset, val); |
9679 | 9679 | ||
9680 | offset = tg3_nvram_phys_addr(tp, offset); | 9680 | offset = tg3_nvram_phys_addr(tp, offset); |
9681 | 9681 | ||
9682 | if (offset > NVRAM_ADDR_MSK) | 9682 | if (offset > NVRAM_ADDR_MSK) |
9683 | return -EINVAL; | 9683 | return -EINVAL; |
9684 | 9684 | ||
9685 | ret = tg3_nvram_lock(tp); | 9685 | ret = tg3_nvram_lock(tp); |
9686 | if (ret) | 9686 | if (ret) |
9687 | return ret; | 9687 | return ret; |
9688 | 9688 | ||
9689 | tg3_enable_nvram_access(tp); | 9689 | tg3_enable_nvram_access(tp); |
9690 | 9690 | ||
9691 | tw32(NVRAM_ADDR, offset); | 9691 | tw32(NVRAM_ADDR, offset); |
9692 | ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | | 9692 | ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | |
9693 | NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); | 9693 | NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); |
9694 | 9694 | ||
9695 | if (ret == 0) | 9695 | if (ret == 0) |
9696 | *val = swab32(tr32(NVRAM_RDDATA)); | 9696 | *val = swab32(tr32(NVRAM_RDDATA)); |
9697 | 9697 | ||
9698 | tg3_disable_nvram_access(tp); | 9698 | tg3_disable_nvram_access(tp); |
9699 | 9699 | ||
9700 | tg3_nvram_unlock(tp); | 9700 | tg3_nvram_unlock(tp); |
9701 | 9701 | ||
9702 | return ret; | 9702 | return ret; |
9703 | } | 9703 | } |
9704 | 9704 | ||
9705 | static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val) | 9705 | static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val) |
9706 | { | 9706 | { |
9707 | int err; | 9707 | int err; |
9708 | u32 tmp; | 9708 | u32 tmp; |
9709 | 9709 | ||
9710 | err = tg3_nvram_read(tp, offset, &tmp); | 9710 | err = tg3_nvram_read(tp, offset, &tmp); |
9711 | *val = swab32(tmp); | 9711 | *val = swab32(tmp); |
9712 | return err; | 9712 | return err; |
9713 | } | 9713 | } |
9714 | 9714 | ||
9715 | static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, | 9715 | static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, |
9716 | u32 offset, u32 len, u8 *buf) | 9716 | u32 offset, u32 len, u8 *buf) |
9717 | { | 9717 | { |
9718 | int i, j, rc = 0; | 9718 | int i, j, rc = 0; |
9719 | u32 val; | 9719 | u32 val; |
9720 | 9720 | ||
9721 | for (i = 0; i < len; i += 4) { | 9721 | for (i = 0; i < len; i += 4) { |
9722 | u32 addr, data; | 9722 | u32 addr, data; |
9723 | 9723 | ||
9724 | addr = offset + i; | 9724 | addr = offset + i; |
9725 | 9725 | ||
9726 | memcpy(&data, buf + i, 4); | 9726 | memcpy(&data, buf + i, 4); |
9727 | 9727 | ||
9728 | tw32(GRC_EEPROM_DATA, cpu_to_le32(data)); | 9728 | tw32(GRC_EEPROM_DATA, cpu_to_le32(data)); |
9729 | 9729 | ||
9730 | val = tr32(GRC_EEPROM_ADDR); | 9730 | val = tr32(GRC_EEPROM_ADDR); |
9731 | tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); | 9731 | tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); |
9732 | 9732 | ||
9733 | val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | | 9733 | val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | |
9734 | EEPROM_ADDR_READ); | 9734 | EEPROM_ADDR_READ); |
9735 | tw32(GRC_EEPROM_ADDR, val | | 9735 | tw32(GRC_EEPROM_ADDR, val | |
9736 | (0 << EEPROM_ADDR_DEVID_SHIFT) | | 9736 | (0 << EEPROM_ADDR_DEVID_SHIFT) | |
9737 | (addr & EEPROM_ADDR_ADDR_MASK) | | 9737 | (addr & EEPROM_ADDR_ADDR_MASK) | |
9738 | EEPROM_ADDR_START | | 9738 | EEPROM_ADDR_START | |
9739 | EEPROM_ADDR_WRITE); | 9739 | EEPROM_ADDR_WRITE); |
9740 | 9740 | ||
9741 | for (j = 0; j < 1000; j++) { | 9741 | for (j = 0; j < 1000; j++) { |
9742 | val = tr32(GRC_EEPROM_ADDR); | 9742 | val = tr32(GRC_EEPROM_ADDR); |
9743 | 9743 | ||
9744 | if (val & EEPROM_ADDR_COMPLETE) | 9744 | if (val & EEPROM_ADDR_COMPLETE) |
9745 | break; | 9745 | break; |
9746 | msleep(1); | 9746 | msleep(1); |
9747 | } | 9747 | } |
9748 | if (!(val & EEPROM_ADDR_COMPLETE)) { | 9748 | if (!(val & EEPROM_ADDR_COMPLETE)) { |
9749 | rc = -EBUSY; | 9749 | rc = -EBUSY; |
9750 | break; | 9750 | break; |
9751 | } | 9751 | } |
9752 | } | 9752 | } |
9753 | 9753 | ||
9754 | return rc; | 9754 | return rc; |
9755 | } | 9755 | } |
9756 | 9756 | ||
9757 | /* offset and length are dword aligned */ | 9757 | /* offset and length are dword aligned */ |
9758 | static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, | 9758 | static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, |
9759 | u8 *buf) | 9759 | u8 *buf) |
9760 | { | 9760 | { |
9761 | int ret = 0; | 9761 | int ret = 0; |
9762 | u32 pagesize = tp->nvram_pagesize; | 9762 | u32 pagesize = tp->nvram_pagesize; |
9763 | u32 pagemask = pagesize - 1; | 9763 | u32 pagemask = pagesize - 1; |
9764 | u32 nvram_cmd; | 9764 | u32 nvram_cmd; |
9765 | u8 *tmp; | 9765 | u8 *tmp; |
9766 | 9766 | ||
9767 | tmp = kmalloc(pagesize, GFP_KERNEL); | 9767 | tmp = kmalloc(pagesize, GFP_KERNEL); |
9768 | if (tmp == NULL) | 9768 | if (tmp == NULL) |
9769 | return -ENOMEM; | 9769 | return -ENOMEM; |
9770 | 9770 | ||
9771 | while (len) { | 9771 | while (len) { |
9772 | int j; | 9772 | int j; |
9773 | u32 phy_addr, page_off, size; | 9773 | u32 phy_addr, page_off, size; |
9774 | 9774 | ||
9775 | phy_addr = offset & ~pagemask; | 9775 | phy_addr = offset & ~pagemask; |
9776 | 9776 | ||
9777 | for (j = 0; j < pagesize; j += 4) { | 9777 | for (j = 0; j < pagesize; j += 4) { |
9778 | if ((ret = tg3_nvram_read(tp, phy_addr + j, | 9778 | if ((ret = tg3_nvram_read(tp, phy_addr + j, |
9779 | (u32 *) (tmp + j)))) | 9779 | (u32 *) (tmp + j)))) |
9780 | break; | 9780 | break; |
9781 | } | 9781 | } |
9782 | if (ret) | 9782 | if (ret) |
9783 | break; | 9783 | break; |
9784 | 9784 | ||
9785 | page_off = offset & pagemask; | 9785 | page_off = offset & pagemask; |
9786 | size = pagesize; | 9786 | size = pagesize; |
9787 | if (len < size) | 9787 | if (len < size) |
9788 | size = len; | 9788 | size = len; |
9789 | 9789 | ||
9790 | len -= size; | 9790 | len -= size; |
9791 | 9791 | ||
9792 | memcpy(tmp + page_off, buf, size); | 9792 | memcpy(tmp + page_off, buf, size); |
9793 | 9793 | ||
9794 | offset = offset + (pagesize - page_off); | 9794 | offset = offset + (pagesize - page_off); |
9795 | 9795 | ||
9796 | tg3_enable_nvram_access(tp); | 9796 | tg3_enable_nvram_access(tp); |
9797 | 9797 | ||
9798 | /* | 9798 | /* |
9799 | * Before we can erase the flash page, we need | 9799 | * Before we can erase the flash page, we need |
9800 | * to issue a special "write enable" command. | 9800 | * to issue a special "write enable" command. |
9801 | */ | 9801 | */ |
9802 | nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; | 9802 | nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; |
9803 | 9803 | ||
9804 | if (tg3_nvram_exec_cmd(tp, nvram_cmd)) | 9804 | if (tg3_nvram_exec_cmd(tp, nvram_cmd)) |
9805 | break; | 9805 | break; |
9806 | 9806 | ||
9807 | /* Erase the target page */ | 9807 | /* Erase the target page */ |
9808 | tw32(NVRAM_ADDR, phy_addr); | 9808 | tw32(NVRAM_ADDR, phy_addr); |
9809 | 9809 | ||
9810 | nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | | 9810 | nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | |
9811 | NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; | 9811 | NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; |
9812 | 9812 | ||
9813 | if (tg3_nvram_exec_cmd(tp, nvram_cmd)) | 9813 | if (tg3_nvram_exec_cmd(tp, nvram_cmd)) |
9814 | break; | 9814 | break; |
9815 | 9815 | ||
9816 | /* Issue another write enable to start the write. */ | 9816 | /* Issue another write enable to start the write. */ |
9817 | nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; | 9817 | nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; |
9818 | 9818 | ||
9819 | if (tg3_nvram_exec_cmd(tp, nvram_cmd)) | 9819 | if (tg3_nvram_exec_cmd(tp, nvram_cmd)) |
9820 | break; | 9820 | break; |
9821 | 9821 | ||
9822 | for (j = 0; j < pagesize; j += 4) { | 9822 | for (j = 0; j < pagesize; j += 4) { |
9823 | u32 data; | 9823 | u32 data; |
9824 | 9824 | ||
9825 | data = *((u32 *) (tmp + j)); | 9825 | data = *((u32 *) (tmp + j)); |
9826 | tw32(NVRAM_WRDATA, cpu_to_be32(data)); | 9826 | tw32(NVRAM_WRDATA, cpu_to_be32(data)); |
9827 | 9827 | ||
9828 | tw32(NVRAM_ADDR, phy_addr + j); | 9828 | tw32(NVRAM_ADDR, phy_addr + j); |
9829 | 9829 | ||
9830 | nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | | 9830 | nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | |
9831 | NVRAM_CMD_WR; | 9831 | NVRAM_CMD_WR; |
9832 | 9832 | ||
9833 | if (j == 0) | 9833 | if (j == 0) |
9834 | nvram_cmd |= NVRAM_CMD_FIRST; | 9834 | nvram_cmd |= NVRAM_CMD_FIRST; |
9835 | else if (j == (pagesize - 4)) | 9835 | else if (j == (pagesize - 4)) |
9836 | nvram_cmd |= NVRAM_CMD_LAST; | 9836 | nvram_cmd |= NVRAM_CMD_LAST; |
9837 | 9837 | ||
9838 | if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) | 9838 | if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) |
9839 | break; | 9839 | break; |
9840 | } | 9840 | } |
9841 | if (ret) | 9841 | if (ret) |
9842 | break; | 9842 | break; |
9843 | } | 9843 | } |
9844 | 9844 | ||
9845 | nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; | 9845 | nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; |
9846 | tg3_nvram_exec_cmd(tp, nvram_cmd); | 9846 | tg3_nvram_exec_cmd(tp, nvram_cmd); |
9847 | 9847 | ||
9848 | kfree(tmp); | 9848 | kfree(tmp); |
9849 | 9849 | ||
9850 | return ret; | 9850 | return ret; |
9851 | } | 9851 | } |
9852 | 9852 | ||
9853 | /* offset and length are dword aligned */ | 9853 | /* offset and length are dword aligned */ |
9854 | static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, | 9854 | static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, |
9855 | u8 *buf) | 9855 | u8 *buf) |
9856 | { | 9856 | { |
9857 | int i, ret = 0; | 9857 | int i, ret = 0; |
9858 | 9858 | ||
9859 | for (i = 0; i < len; i += 4, offset += 4) { | 9859 | for (i = 0; i < len; i += 4, offset += 4) { |
9860 | u32 data, page_off, phy_addr, nvram_cmd; | 9860 | u32 data, page_off, phy_addr, nvram_cmd; |
9861 | 9861 | ||
9862 | memcpy(&data, buf + i, 4); | 9862 | memcpy(&data, buf + i, 4); |
9863 | tw32(NVRAM_WRDATA, cpu_to_be32(data)); | 9863 | tw32(NVRAM_WRDATA, cpu_to_be32(data)); |
9864 | 9864 | ||
9865 | page_off = offset % tp->nvram_pagesize; | 9865 | page_off = offset % tp->nvram_pagesize; |
9866 | 9866 | ||
9867 | phy_addr = tg3_nvram_phys_addr(tp, offset); | 9867 | phy_addr = tg3_nvram_phys_addr(tp, offset); |
9868 | 9868 | ||
9869 | tw32(NVRAM_ADDR, phy_addr); | 9869 | tw32(NVRAM_ADDR, phy_addr); |
9870 | 9870 | ||
9871 | nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; | 9871 | nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; |
9872 | 9872 | ||
9873 | if ((page_off == 0) || (i == 0)) | 9873 | if ((page_off == 0) || (i == 0)) |
9874 | nvram_cmd |= NVRAM_CMD_FIRST; | 9874 | nvram_cmd |= NVRAM_CMD_FIRST; |
9875 | if (page_off == (tp->nvram_pagesize - 4)) | 9875 | if (page_off == (tp->nvram_pagesize - 4)) |
9876 | nvram_cmd |= NVRAM_CMD_LAST; | 9876 | nvram_cmd |= NVRAM_CMD_LAST; |
9877 | 9877 | ||
9878 | if (i == (len - 4)) | 9878 | if (i == (len - 4)) |
9879 | nvram_cmd |= NVRAM_CMD_LAST; | 9879 | nvram_cmd |= NVRAM_CMD_LAST; |
9880 | 9880 | ||
9881 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && | 9881 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && |
9882 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && | 9882 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && |
9883 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && | 9883 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && |
9884 | (tp->nvram_jedecnum == JEDEC_ST) && | 9884 | (tp->nvram_jedecnum == JEDEC_ST) && |
9885 | (nvram_cmd & NVRAM_CMD_FIRST)) { | 9885 | (nvram_cmd & NVRAM_CMD_FIRST)) { |
9886 | 9886 | ||
9887 | if ((ret = tg3_nvram_exec_cmd(tp, | 9887 | if ((ret = tg3_nvram_exec_cmd(tp, |
9888 | NVRAM_CMD_WREN | NVRAM_CMD_GO | | 9888 | NVRAM_CMD_WREN | NVRAM_CMD_GO | |
9889 | NVRAM_CMD_DONE))) | 9889 | NVRAM_CMD_DONE))) |
9890 | 9890 | ||
9891 | break; | 9891 | break; |
9892 | } | 9892 | } |
9893 | if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) { | 9893 | if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) { |
9894 | /* We always do complete word writes to eeprom. */ | 9894 | /* We always do complete word writes to eeprom. */ |
9895 | nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); | 9895 | nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); |
9896 | } | 9896 | } |
9897 | 9897 | ||
9898 | if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) | 9898 | if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) |
9899 | break; | 9899 | break; |
9900 | } | 9900 | } |
9901 | return ret; | 9901 | return ret; |
9902 | } | 9902 | } |
9903 | 9903 | ||
9904 | /* offset and length are dword aligned */ | 9904 | /* offset and length are dword aligned */ |
9905 | static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) | 9905 | static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) |
9906 | { | 9906 | { |
9907 | int ret; | 9907 | int ret; |
9908 | 9908 | ||
9909 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { | 9909 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { |
9910 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & | 9910 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & |
9911 | ~GRC_LCLCTRL_GPIO_OUTPUT1); | 9911 | ~GRC_LCLCTRL_GPIO_OUTPUT1); |
9912 | udelay(40); | 9912 | udelay(40); |
9913 | } | 9913 | } |
9914 | 9914 | ||
9915 | if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { | 9915 | if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { |
9916 | ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); | 9916 | ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); |
9917 | } | 9917 | } |
9918 | else { | 9918 | else { |
9919 | u32 grc_mode; | 9919 | u32 grc_mode; |
9920 | 9920 | ||
9921 | ret = tg3_nvram_lock(tp); | 9921 | ret = tg3_nvram_lock(tp); |
9922 | if (ret) | 9922 | if (ret) |
9923 | return ret; | 9923 | return ret; |
9924 | 9924 | ||
9925 | tg3_enable_nvram_access(tp); | 9925 | tg3_enable_nvram_access(tp); |
9926 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 9926 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
9927 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) | 9927 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) |
9928 | tw32(NVRAM_WRITE1, 0x406); | 9928 | tw32(NVRAM_WRITE1, 0x406); |
9929 | 9929 | ||
9930 | grc_mode = tr32(GRC_MODE); | 9930 | grc_mode = tr32(GRC_MODE); |
9931 | tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); | 9931 | tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); |
9932 | 9932 | ||
9933 | if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) || | 9933 | if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) || |
9934 | !(tp->tg3_flags2 & TG3_FLG2_FLASH)) { | 9934 | !(tp->tg3_flags2 & TG3_FLG2_FLASH)) { |
9935 | 9935 | ||
9936 | ret = tg3_nvram_write_block_buffered(tp, offset, len, | 9936 | ret = tg3_nvram_write_block_buffered(tp, offset, len, |
9937 | buf); | 9937 | buf); |
9938 | } | 9938 | } |
9939 | else { | 9939 | else { |
9940 | ret = tg3_nvram_write_block_unbuffered(tp, offset, len, | 9940 | ret = tg3_nvram_write_block_unbuffered(tp, offset, len, |
9941 | buf); | 9941 | buf); |
9942 | } | 9942 | } |
9943 | 9943 | ||
9944 | grc_mode = tr32(GRC_MODE); | 9944 | grc_mode = tr32(GRC_MODE); |
9945 | tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); | 9945 | tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); |
9946 | 9946 | ||
9947 | tg3_disable_nvram_access(tp); | 9947 | tg3_disable_nvram_access(tp); |
9948 | tg3_nvram_unlock(tp); | 9948 | tg3_nvram_unlock(tp); |
9949 | } | 9949 | } |
9950 | 9950 | ||
9951 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { | 9951 | if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { |
9952 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | 9952 | tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); |
9953 | udelay(40); | 9953 | udelay(40); |
9954 | } | 9954 | } |
9955 | 9955 | ||
9956 | return ret; | 9956 | return ret; |
9957 | } | 9957 | } |
9958 | 9958 | ||
9959 | struct subsys_tbl_ent { | 9959 | struct subsys_tbl_ent { |
9960 | u16 subsys_vendor, subsys_devid; | 9960 | u16 subsys_vendor, subsys_devid; |
9961 | u32 phy_id; | 9961 | u32 phy_id; |
9962 | }; | 9962 | }; |
9963 | 9963 | ||
9964 | static struct subsys_tbl_ent subsys_id_to_phy_id[] = { | 9964 | static struct subsys_tbl_ent subsys_id_to_phy_id[] = { |
9965 | /* Broadcom boards. */ | 9965 | /* Broadcom boards. */ |
9966 | { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */ | 9966 | { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */ |
9967 | { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */ | 9967 | { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */ |
9968 | { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */ | 9968 | { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */ |
9969 | { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */ | 9969 | { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */ |
9970 | { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */ | 9970 | { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */ |
9971 | { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */ | 9971 | { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */ |
9972 | { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */ | 9972 | { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */ |
9973 | { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */ | 9973 | { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */ |
9974 | { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */ | 9974 | { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */ |
9975 | { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */ | 9975 | { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */ |
9976 | { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */ | 9976 | { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */ |
9977 | 9977 | ||
9978 | /* 3com boards. */ | 9978 | /* 3com boards. */ |
9979 | { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */ | 9979 | { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */ |
9980 | { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */ | 9980 | { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */ |
9981 | { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */ | 9981 | { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */ |
9982 | { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */ | 9982 | { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */ |
9983 | { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */ | 9983 | { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */ |
9984 | 9984 | ||
9985 | /* DELL boards. */ | 9985 | /* DELL boards. */ |
9986 | { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */ | 9986 | { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */ |
9987 | { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */ | 9987 | { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */ |
9988 | { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */ | 9988 | { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */ |
9989 | { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */ | 9989 | { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */ |
9990 | 9990 | ||
9991 | /* Compaq boards. */ | 9991 | /* Compaq boards. */ |
9992 | { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */ | 9992 | { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */ |
9993 | { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */ | 9993 | { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */ |
9994 | { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */ | 9994 | { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */ |
9995 | { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */ | 9995 | { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */ |
9996 | { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */ | 9996 | { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */ |
9997 | 9997 | ||
9998 | /* IBM boards. */ | 9998 | /* IBM boards. */ |
9999 | { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */ | 9999 | { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */ |
10000 | }; | 10000 | }; |
10001 | 10001 | ||
10002 | static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp) | 10002 | static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp) |
10003 | { | 10003 | { |
10004 | int i; | 10004 | int i; |
10005 | 10005 | ||
10006 | for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { | 10006 | for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { |
10007 | if ((subsys_id_to_phy_id[i].subsys_vendor == | 10007 | if ((subsys_id_to_phy_id[i].subsys_vendor == |
10008 | tp->pdev->subsystem_vendor) && | 10008 | tp->pdev->subsystem_vendor) && |
10009 | (subsys_id_to_phy_id[i].subsys_devid == | 10009 | (subsys_id_to_phy_id[i].subsys_devid == |
10010 | tp->pdev->subsystem_device)) | 10010 | tp->pdev->subsystem_device)) |
10011 | return &subsys_id_to_phy_id[i]; | 10011 | return &subsys_id_to_phy_id[i]; |
10012 | } | 10012 | } |
10013 | return NULL; | 10013 | return NULL; |
10014 | } | 10014 | } |
10015 | 10015 | ||
10016 | static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | 10016 | static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) |
10017 | { | 10017 | { |
10018 | u32 val; | 10018 | u32 val; |
10019 | u16 pmcsr; | 10019 | u16 pmcsr; |
10020 | 10020 | ||
10021 | /* On some early chips the SRAM cannot be accessed in D3hot state, | 10021 | /* On some early chips the SRAM cannot be accessed in D3hot state, |
10022 | * so need make sure we're in D0. | 10022 | * so need make sure we're in D0. |
10023 | */ | 10023 | */ |
10024 | pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr); | 10024 | pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr); |
10025 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | 10025 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
10026 | pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr); | 10026 | pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr); |
10027 | msleep(1); | 10027 | msleep(1); |
10028 | 10028 | ||
10029 | /* Make sure register accesses (indirect or otherwise) | 10029 | /* Make sure register accesses (indirect or otherwise) |
10030 | * will function correctly. | 10030 | * will function correctly. |
10031 | */ | 10031 | */ |
10032 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 10032 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, |
10033 | tp->misc_host_ctrl); | 10033 | tp->misc_host_ctrl); |
10034 | 10034 | ||
10035 | /* The memory arbiter has to be enabled in order for SRAM accesses | 10035 | /* The memory arbiter has to be enabled in order for SRAM accesses |
10036 | * to succeed. Normally on powerup the tg3 chip firmware will make | 10036 | * to succeed. Normally on powerup the tg3 chip firmware will make |
10037 | * sure it is enabled, but other entities such as system netboot | 10037 | * sure it is enabled, but other entities such as system netboot |
10038 | * code might disable it. | 10038 | * code might disable it. |
10039 | */ | 10039 | */ |
10040 | val = tr32(MEMARB_MODE); | 10040 | val = tr32(MEMARB_MODE); |
10041 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); | 10041 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); |
10042 | 10042 | ||
10043 | tp->phy_id = PHY_ID_INVALID; | 10043 | tp->phy_id = PHY_ID_INVALID; |
10044 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 10044 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; |
10045 | 10045 | ||
10046 | /* Assume an onboard device and WOL capable by default. */ | 10046 | /* Assume an onboard device and WOL capable by default. */ |
10047 | tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP; | 10047 | tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP; |
10048 | 10048 | ||
10049 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 10049 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
10050 | if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { | 10050 | if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { |
10051 | tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; | 10051 | tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; |
10052 | tp->tg3_flags2 |= TG3_FLG2_IS_NIC; | 10052 | tp->tg3_flags2 |= TG3_FLG2_IS_NIC; |
10053 | } | 10053 | } |
10054 | if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC) | 10054 | if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC) |
10055 | tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; | 10055 | tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; |
10056 | return; | 10056 | return; |
10057 | } | 10057 | } |
10058 | 10058 | ||
10059 | tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); | 10059 | tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); |
10060 | if (val == NIC_SRAM_DATA_SIG_MAGIC) { | 10060 | if (val == NIC_SRAM_DATA_SIG_MAGIC) { |
10061 | u32 nic_cfg, led_cfg; | 10061 | u32 nic_cfg, led_cfg; |
10062 | u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id; | 10062 | u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id; |
10063 | int eeprom_phy_serdes = 0; | 10063 | int eeprom_phy_serdes = 0; |
10064 | 10064 | ||
10065 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); | 10065 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); |
10066 | tp->nic_sram_data_cfg = nic_cfg; | 10066 | tp->nic_sram_data_cfg = nic_cfg; |
10067 | 10067 | ||
10068 | tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); | 10068 | tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); |
10069 | ver >>= NIC_SRAM_DATA_VER_SHIFT; | 10069 | ver >>= NIC_SRAM_DATA_VER_SHIFT; |
10070 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) && | 10070 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) && |
10071 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) && | 10071 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) && |
10072 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) && | 10072 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) && |
10073 | (ver > 0) && (ver < 0x100)) | 10073 | (ver > 0) && (ver < 0x100)) |
10074 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); | 10074 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); |
10075 | 10075 | ||
10076 | if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == | 10076 | if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == |
10077 | NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) | 10077 | NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) |
10078 | eeprom_phy_serdes = 1; | 10078 | eeprom_phy_serdes = 1; |
10079 | 10079 | ||
10080 | tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); | 10080 | tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); |
10081 | if (nic_phy_id != 0) { | 10081 | if (nic_phy_id != 0) { |
10082 | u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; | 10082 | u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; |
10083 | u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; | 10083 | u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; |
10084 | 10084 | ||
10085 | eeprom_phy_id = (id1 >> 16) << 10; | 10085 | eeprom_phy_id = (id1 >> 16) << 10; |
10086 | eeprom_phy_id |= (id2 & 0xfc00) << 16; | 10086 | eeprom_phy_id |= (id2 & 0xfc00) << 16; |
10087 | eeprom_phy_id |= (id2 & 0x03ff) << 0; | 10087 | eeprom_phy_id |= (id2 & 0x03ff) << 0; |
10088 | } else | 10088 | } else |
10089 | eeprom_phy_id = 0; | 10089 | eeprom_phy_id = 0; |
10090 | 10090 | ||
10091 | tp->phy_id = eeprom_phy_id; | 10091 | tp->phy_id = eeprom_phy_id; |
10092 | if (eeprom_phy_serdes) { | 10092 | if (eeprom_phy_serdes) { |
10093 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) | 10093 | if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) |
10094 | tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; | 10094 | tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; |
10095 | else | 10095 | else |
10096 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; | 10096 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; |
10097 | } | 10097 | } |
10098 | 10098 | ||
10099 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 10099 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) |
10100 | led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | | 10100 | led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | |
10101 | SHASTA_EXT_LED_MODE_MASK); | 10101 | SHASTA_EXT_LED_MODE_MASK); |
10102 | else | 10102 | else |
10103 | led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; | 10103 | led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; |
10104 | 10104 | ||
10105 | switch (led_cfg) { | 10105 | switch (led_cfg) { |
10106 | default: | 10106 | default: |
10107 | case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: | 10107 | case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: |
10108 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 10108 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; |
10109 | break; | 10109 | break; |
10110 | 10110 | ||
10111 | case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: | 10111 | case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: |
10112 | tp->led_ctrl = LED_CTRL_MODE_PHY_2; | 10112 | tp->led_ctrl = LED_CTRL_MODE_PHY_2; |
10113 | break; | 10113 | break; |
10114 | 10114 | ||
10115 | case NIC_SRAM_DATA_CFG_LED_MODE_MAC: | 10115 | case NIC_SRAM_DATA_CFG_LED_MODE_MAC: |
10116 | tp->led_ctrl = LED_CTRL_MODE_MAC; | 10116 | tp->led_ctrl = LED_CTRL_MODE_MAC; |
10117 | 10117 | ||
10118 | /* Default to PHY_1_MODE if 0 (MAC_MODE) is | 10118 | /* Default to PHY_1_MODE if 0 (MAC_MODE) is |
10119 | * read on some older 5700/5701 bootcode. | 10119 | * read on some older 5700/5701 bootcode. |
10120 | */ | 10120 | */ |
10121 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 10121 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == |
10122 | ASIC_REV_5700 || | 10122 | ASIC_REV_5700 || |
10123 | GET_ASIC_REV(tp->pci_chip_rev_id) == | 10123 | GET_ASIC_REV(tp->pci_chip_rev_id) == |
10124 | ASIC_REV_5701) | 10124 | ASIC_REV_5701) |
10125 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 10125 | tp->led_ctrl = LED_CTRL_MODE_PHY_1; |
10126 | 10126 | ||
10127 | break; | 10127 | break; |
10128 | 10128 | ||
10129 | case SHASTA_EXT_LED_SHARED: | 10129 | case SHASTA_EXT_LED_SHARED: |
10130 | tp->led_ctrl = LED_CTRL_MODE_SHARED; | 10130 | tp->led_ctrl = LED_CTRL_MODE_SHARED; |
10131 | if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && | 10131 | if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && |
10132 | tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) | 10132 | tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) |
10133 | tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | | 10133 | tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | |
10134 | LED_CTRL_MODE_PHY_2); | 10134 | LED_CTRL_MODE_PHY_2); |
10135 | break; | 10135 | break; |
10136 | 10136 | ||
10137 | case SHASTA_EXT_LED_MAC: | 10137 | case SHASTA_EXT_LED_MAC: |
10138 | tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; | 10138 | tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; |
10139 | break; | 10139 | break; |
10140 | 10140 | ||
10141 | case SHASTA_EXT_LED_COMBO: | 10141 | case SHASTA_EXT_LED_COMBO: |
10142 | tp->led_ctrl = LED_CTRL_MODE_COMBO; | 10142 | tp->led_ctrl = LED_CTRL_MODE_COMBO; |
10143 | if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) | 10143 | if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) |
10144 | tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | | 10144 | tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | |
10145 | LED_CTRL_MODE_PHY_2); | 10145 | LED_CTRL_MODE_PHY_2); |
10146 | break; | 10146 | break; |
10147 | 10147 | ||
10148 | }; | 10148 | }; |
10149 | 10149 | ||
10150 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 10150 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
10151 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && | 10151 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && |
10152 | tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) | 10152 | tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) |
10153 | tp->led_ctrl = LED_CTRL_MODE_PHY_2; | 10153 | tp->led_ctrl = LED_CTRL_MODE_PHY_2; |
10154 | 10154 | ||
10155 | if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { | 10155 | if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { |
10156 | tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; | 10156 | tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; |
10157 | if ((tp->pdev->subsystem_vendor == | 10157 | if ((tp->pdev->subsystem_vendor == |
10158 | PCI_VENDOR_ID_ARIMA) && | 10158 | PCI_VENDOR_ID_ARIMA) && |
10159 | (tp->pdev->subsystem_device == 0x205a || | 10159 | (tp->pdev->subsystem_device == 0x205a || |
10160 | tp->pdev->subsystem_device == 0x2063)) | 10160 | tp->pdev->subsystem_device == 0x2063)) |
10161 | tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; | 10161 | tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; |
10162 | } else { | 10162 | } else { |
10163 | tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; | 10163 | tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; |
10164 | tp->tg3_flags2 |= TG3_FLG2_IS_NIC; | 10164 | tp->tg3_flags2 |= TG3_FLG2_IS_NIC; |
10165 | } | 10165 | } |
10166 | 10166 | ||
10167 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { | 10167 | if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { |
10168 | tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; | 10168 | tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; |
10169 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 10169 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) |
10170 | tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; | 10170 | tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; |
10171 | } | 10171 | } |
10172 | if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && | 10172 | if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && |
10173 | !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) | 10173 | !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) |
10174 | tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; | 10174 | tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; |
10175 | 10175 | ||
10176 | if (cfg2 & (1 << 17)) | 10176 | if (cfg2 & (1 << 17)) |
10177 | tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; | 10177 | tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; |
10178 | 10178 | ||
10179 | /* serdes signal pre-emphasis in register 0x590 set by */ | 10179 | /* serdes signal pre-emphasis in register 0x590 set by */ |
10180 | /* bootcode if bit 18 is set */ | 10180 | /* bootcode if bit 18 is set */ |
10181 | if (cfg2 & (1 << 18)) | 10181 | if (cfg2 & (1 << 18)) |
10182 | tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS; | 10182 | tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS; |
10183 | 10183 | ||
10184 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 10184 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { |
10185 | u32 cfg3; | 10185 | u32 cfg3; |
10186 | 10186 | ||
10187 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); | 10187 | tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); |
10188 | if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) | 10188 | if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) |
10189 | tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; | 10189 | tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; |
10190 | } | 10190 | } |
10191 | } | 10191 | } |
10192 | } | 10192 | } |
10193 | 10193 | ||
10194 | static int __devinit tg3_phy_probe(struct tg3 *tp) | 10194 | static int __devinit tg3_phy_probe(struct tg3 *tp) |
10195 | { | 10195 | { |
10196 | u32 hw_phy_id_1, hw_phy_id_2; | 10196 | u32 hw_phy_id_1, hw_phy_id_2; |
10197 | u32 hw_phy_id, hw_phy_id_masked; | 10197 | u32 hw_phy_id, hw_phy_id_masked; |
10198 | int err; | 10198 | int err; |
10199 | 10199 | ||
10200 | /* Reading the PHY ID register can conflict with ASF | 10200 | /* Reading the PHY ID register can conflict with ASF |
10201 | * firwmare access to the PHY hardware. | 10201 | * firwmare access to the PHY hardware. |
10202 | */ | 10202 | */ |
10203 | err = 0; | 10203 | err = 0; |
10204 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { | 10204 | if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { |
10205 | hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID; | 10205 | hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID; |
10206 | } else { | 10206 | } else { |
10207 | /* Now read the physical PHY_ID from the chip and verify | 10207 | /* Now read the physical PHY_ID from the chip and verify |
10208 | * that it is sane. If it doesn't look good, we fall back | 10208 | * that it is sane. If it doesn't look good, we fall back |
10209 | * to either the hard-coded table based PHY_ID and failing | 10209 | * to either the hard-coded table based PHY_ID and failing |
10210 | * that the value found in the eeprom area. | 10210 | * that the value found in the eeprom area. |
10211 | */ | 10211 | */ |
10212 | err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); | 10212 | err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); |
10213 | err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); | 10213 | err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); |
10214 | 10214 | ||
10215 | hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; | 10215 | hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; |
10216 | hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; | 10216 | hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; |
10217 | hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; | 10217 | hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; |
10218 | 10218 | ||
10219 | hw_phy_id_masked = hw_phy_id & PHY_ID_MASK; | 10219 | hw_phy_id_masked = hw_phy_id & PHY_ID_MASK; |
10220 | } | 10220 | } |
10221 | 10221 | ||
10222 | if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) { | 10222 | if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) { |
10223 | tp->phy_id = hw_phy_id; | 10223 | tp->phy_id = hw_phy_id; |
10224 | if (hw_phy_id_masked == PHY_ID_BCM8002) | 10224 | if (hw_phy_id_masked == PHY_ID_BCM8002) |
10225 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; | 10225 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; |
10226 | else | 10226 | else |
10227 | tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; | 10227 | tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; |
10228 | } else { | 10228 | } else { |
10229 | if (tp->phy_id != PHY_ID_INVALID) { | 10229 | if (tp->phy_id != PHY_ID_INVALID) { |
10230 | /* Do nothing, phy ID already set up in | 10230 | /* Do nothing, phy ID already set up in |
10231 | * tg3_get_eeprom_hw_cfg(). | 10231 | * tg3_get_eeprom_hw_cfg(). |
10232 | */ | 10232 | */ |
10233 | } else { | 10233 | } else { |
10234 | struct subsys_tbl_ent *p; | 10234 | struct subsys_tbl_ent *p; |
10235 | 10235 | ||
10236 | /* No eeprom signature? Try the hardcoded | 10236 | /* No eeprom signature? Try the hardcoded |
10237 | * subsys device table. | 10237 | * subsys device table. |
10238 | */ | 10238 | */ |
10239 | p = lookup_by_subsys(tp); | 10239 | p = lookup_by_subsys(tp); |
10240 | if (!p) | 10240 | if (!p) |
10241 | return -ENODEV; | 10241 | return -ENODEV; |
10242 | 10242 | ||
10243 | tp->phy_id = p->phy_id; | 10243 | tp->phy_id = p->phy_id; |
10244 | if (!tp->phy_id || | 10244 | if (!tp->phy_id || |
10245 | tp->phy_id == PHY_ID_BCM8002) | 10245 | tp->phy_id == PHY_ID_BCM8002) |
10246 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; | 10246 | tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; |
10247 | } | 10247 | } |
10248 | } | 10248 | } |
10249 | 10249 | ||
10250 | if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) && | 10250 | if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) && |
10251 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { | 10251 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { |
10252 | u32 bmsr, adv_reg, tg3_ctrl, mask; | 10252 | u32 bmsr, adv_reg, tg3_ctrl, mask; |
10253 | 10253 | ||
10254 | tg3_readphy(tp, MII_BMSR, &bmsr); | 10254 | tg3_readphy(tp, MII_BMSR, &bmsr); |
10255 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && | 10255 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && |
10256 | (bmsr & BMSR_LSTATUS)) | 10256 | (bmsr & BMSR_LSTATUS)) |
10257 | goto skip_phy_reset; | 10257 | goto skip_phy_reset; |
10258 | 10258 | ||
10259 | err = tg3_phy_reset(tp); | 10259 | err = tg3_phy_reset(tp); |
10260 | if (err) | 10260 | if (err) |
10261 | return err; | 10261 | return err; |
10262 | 10262 | ||
10263 | adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL | | 10263 | adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL | |
10264 | ADVERTISE_100HALF | ADVERTISE_100FULL | | 10264 | ADVERTISE_100HALF | ADVERTISE_100FULL | |
10265 | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); | 10265 | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); |
10266 | tg3_ctrl = 0; | 10266 | tg3_ctrl = 0; |
10267 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { | 10267 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { |
10268 | tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF | | 10268 | tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF | |
10269 | MII_TG3_CTRL_ADV_1000_FULL); | 10269 | MII_TG3_CTRL_ADV_1000_FULL); |
10270 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 10270 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || |
10271 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) | 10271 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) |
10272 | tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER | | 10272 | tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER | |
10273 | MII_TG3_CTRL_ENABLE_AS_MASTER); | 10273 | MII_TG3_CTRL_ENABLE_AS_MASTER); |
10274 | } | 10274 | } |
10275 | 10275 | ||
10276 | mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | 10276 | mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | |
10277 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | | 10277 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | |
10278 | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); | 10278 | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); |
10279 | if (!tg3_copper_is_advertising_all(tp, mask)) { | 10279 | if (!tg3_copper_is_advertising_all(tp, mask)) { |
10280 | tg3_writephy(tp, MII_ADVERTISE, adv_reg); | 10280 | tg3_writephy(tp, MII_ADVERTISE, adv_reg); |
10281 | 10281 | ||
10282 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) | 10282 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) |
10283 | tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); | 10283 | tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); |
10284 | 10284 | ||
10285 | tg3_writephy(tp, MII_BMCR, | 10285 | tg3_writephy(tp, MII_BMCR, |
10286 | BMCR_ANENABLE | BMCR_ANRESTART); | 10286 | BMCR_ANENABLE | BMCR_ANRESTART); |
10287 | } | 10287 | } |
10288 | tg3_phy_set_wirespeed(tp); | 10288 | tg3_phy_set_wirespeed(tp); |
10289 | 10289 | ||
10290 | tg3_writephy(tp, MII_ADVERTISE, adv_reg); | 10290 | tg3_writephy(tp, MII_ADVERTISE, adv_reg); |
10291 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) | 10291 | if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) |
10292 | tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); | 10292 | tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); |
10293 | } | 10293 | } |
10294 | 10294 | ||
10295 | skip_phy_reset: | 10295 | skip_phy_reset: |
10296 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { | 10296 | if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { |
10297 | err = tg3_init_5401phy_dsp(tp); | 10297 | err = tg3_init_5401phy_dsp(tp); |
10298 | if (err) | 10298 | if (err) |
10299 | return err; | 10299 | return err; |
10300 | } | 10300 | } |
10301 | 10301 | ||
10302 | if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) { | 10302 | if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) { |
10303 | err = tg3_init_5401phy_dsp(tp); | 10303 | err = tg3_init_5401phy_dsp(tp); |
10304 | } | 10304 | } |
10305 | 10305 | ||
10306 | if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) | 10306 | if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) |
10307 | tp->link_config.advertising = | 10307 | tp->link_config.advertising = |
10308 | (ADVERTISED_1000baseT_Half | | 10308 | (ADVERTISED_1000baseT_Half | |
10309 | ADVERTISED_1000baseT_Full | | 10309 | ADVERTISED_1000baseT_Full | |
10310 | ADVERTISED_Autoneg | | 10310 | ADVERTISED_Autoneg | |
10311 | ADVERTISED_FIBRE); | 10311 | ADVERTISED_FIBRE); |
10312 | if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) | 10312 | if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) |
10313 | tp->link_config.advertising &= | 10313 | tp->link_config.advertising &= |
10314 | ~(ADVERTISED_1000baseT_Half | | 10314 | ~(ADVERTISED_1000baseT_Half | |
10315 | ADVERTISED_1000baseT_Full); | 10315 | ADVERTISED_1000baseT_Full); |
10316 | 10316 | ||
10317 | return err; | 10317 | return err; |
10318 | } | 10318 | } |
10319 | 10319 | ||
10320 | static void __devinit tg3_read_partno(struct tg3 *tp) | 10320 | static void __devinit tg3_read_partno(struct tg3 *tp) |
10321 | { | 10321 | { |
10322 | unsigned char vpd_data[256]; | 10322 | unsigned char vpd_data[256]; |
10323 | unsigned int i; | 10323 | unsigned int i; |
10324 | u32 magic; | 10324 | u32 magic; |
10325 | 10325 | ||
10326 | if (tg3_nvram_read_swab(tp, 0x0, &magic)) | 10326 | if (tg3_nvram_read_swab(tp, 0x0, &magic)) |
10327 | goto out_not_found; | 10327 | goto out_not_found; |
10328 | 10328 | ||
10329 | if (magic == TG3_EEPROM_MAGIC) { | 10329 | if (magic == TG3_EEPROM_MAGIC) { |
10330 | for (i = 0; i < 256; i += 4) { | 10330 | for (i = 0; i < 256; i += 4) { |
10331 | u32 tmp; | 10331 | u32 tmp; |
10332 | 10332 | ||
10333 | if (tg3_nvram_read(tp, 0x100 + i, &tmp)) | 10333 | if (tg3_nvram_read(tp, 0x100 + i, &tmp)) |
10334 | goto out_not_found; | 10334 | goto out_not_found; |
10335 | 10335 | ||
10336 | vpd_data[i + 0] = ((tmp >> 0) & 0xff); | 10336 | vpd_data[i + 0] = ((tmp >> 0) & 0xff); |
10337 | vpd_data[i + 1] = ((tmp >> 8) & 0xff); | 10337 | vpd_data[i + 1] = ((tmp >> 8) & 0xff); |
10338 | vpd_data[i + 2] = ((tmp >> 16) & 0xff); | 10338 | vpd_data[i + 2] = ((tmp >> 16) & 0xff); |
10339 | vpd_data[i + 3] = ((tmp >> 24) & 0xff); | 10339 | vpd_data[i + 3] = ((tmp >> 24) & 0xff); |
10340 | } | 10340 | } |
10341 | } else { | 10341 | } else { |
10342 | int vpd_cap; | 10342 | int vpd_cap; |
10343 | 10343 | ||
10344 | vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD); | 10344 | vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD); |
10345 | for (i = 0; i < 256; i += 4) { | 10345 | for (i = 0; i < 256; i += 4) { |
10346 | u32 tmp, j = 0; | 10346 | u32 tmp, j = 0; |
10347 | u16 tmp16; | 10347 | u16 tmp16; |
10348 | 10348 | ||
10349 | pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR, | 10349 | pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR, |
10350 | i); | 10350 | i); |
10351 | while (j++ < 100) { | 10351 | while (j++ < 100) { |
10352 | pci_read_config_word(tp->pdev, vpd_cap + | 10352 | pci_read_config_word(tp->pdev, vpd_cap + |
10353 | PCI_VPD_ADDR, &tmp16); | 10353 | PCI_VPD_ADDR, &tmp16); |
10354 | if (tmp16 & 0x8000) | 10354 | if (tmp16 & 0x8000) |
10355 | break; | 10355 | break; |
10356 | msleep(1); | 10356 | msleep(1); |
10357 | } | 10357 | } |
10358 | if (!(tmp16 & 0x8000)) | 10358 | if (!(tmp16 & 0x8000)) |
10359 | goto out_not_found; | 10359 | goto out_not_found; |
10360 | 10360 | ||
10361 | pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, | 10361 | pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, |
10362 | &tmp); | 10362 | &tmp); |
10363 | tmp = cpu_to_le32(tmp); | 10363 | tmp = cpu_to_le32(tmp); |
10364 | memcpy(&vpd_data[i], &tmp, 4); | 10364 | memcpy(&vpd_data[i], &tmp, 4); |
10365 | } | 10365 | } |
10366 | } | 10366 | } |
10367 | 10367 | ||
10368 | /* Now parse and find the part number. */ | 10368 | /* Now parse and find the part number. */ |
10369 | for (i = 0; i < 254; ) { | 10369 | for (i = 0; i < 254; ) { |
10370 | unsigned char val = vpd_data[i]; | 10370 | unsigned char val = vpd_data[i]; |
10371 | unsigned int block_end; | 10371 | unsigned int block_end; |
10372 | 10372 | ||
10373 | if (val == 0x82 || val == 0x91) { | 10373 | if (val == 0x82 || val == 0x91) { |
10374 | i = (i + 3 + | 10374 | i = (i + 3 + |
10375 | (vpd_data[i + 1] + | 10375 | (vpd_data[i + 1] + |
10376 | (vpd_data[i + 2] << 8))); | 10376 | (vpd_data[i + 2] << 8))); |
10377 | continue; | 10377 | continue; |
10378 | } | 10378 | } |
10379 | 10379 | ||
10380 | if (val != 0x90) | 10380 | if (val != 0x90) |
10381 | goto out_not_found; | 10381 | goto out_not_found; |
10382 | 10382 | ||
10383 | block_end = (i + 3 + | 10383 | block_end = (i + 3 + |
10384 | (vpd_data[i + 1] + | 10384 | (vpd_data[i + 1] + |
10385 | (vpd_data[i + 2] << 8))); | 10385 | (vpd_data[i + 2] << 8))); |
10386 | i += 3; | 10386 | i += 3; |
10387 | 10387 | ||
10388 | if (block_end > 256) | 10388 | if (block_end > 256) |
10389 | goto out_not_found; | 10389 | goto out_not_found; |
10390 | 10390 | ||
10391 | while (i < (block_end - 2)) { | 10391 | while (i < (block_end - 2)) { |
10392 | if (vpd_data[i + 0] == 'P' && | 10392 | if (vpd_data[i + 0] == 'P' && |
10393 | vpd_data[i + 1] == 'N') { | 10393 | vpd_data[i + 1] == 'N') { |
10394 | int partno_len = vpd_data[i + 2]; | 10394 | int partno_len = vpd_data[i + 2]; |
10395 | 10395 | ||
10396 | i += 3; | 10396 | i += 3; |
10397 | if (partno_len > 24 || (partno_len + i) > 256) | 10397 | if (partno_len > 24 || (partno_len + i) > 256) |
10398 | goto out_not_found; | 10398 | goto out_not_found; |
10399 | 10399 | ||
10400 | memcpy(tp->board_part_number, | 10400 | memcpy(tp->board_part_number, |
10401 | &vpd_data[i], partno_len); | 10401 | &vpd_data[i], partno_len); |
10402 | 10402 | ||
10403 | /* Success. */ | 10403 | /* Success. */ |
10404 | return; | 10404 | return; |
10405 | } | 10405 | } |
10406 | i += 3 + vpd_data[i + 2]; | 10406 | i += 3 + vpd_data[i + 2]; |
10407 | } | 10407 | } |
10408 | 10408 | ||
10409 | /* Part number not found. */ | 10409 | /* Part number not found. */ |
10410 | goto out_not_found; | 10410 | goto out_not_found; |
10411 | } | 10411 | } |
10412 | 10412 | ||
10413 | out_not_found: | 10413 | out_not_found: |
10414 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 10414 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
10415 | strcpy(tp->board_part_number, "BCM95906"); | 10415 | strcpy(tp->board_part_number, "BCM95906"); |
10416 | else | 10416 | else |
10417 | strcpy(tp->board_part_number, "none"); | 10417 | strcpy(tp->board_part_number, "none"); |
10418 | } | 10418 | } |
10419 | 10419 | ||
10420 | static void __devinit tg3_read_fw_ver(struct tg3 *tp) | 10420 | static void __devinit tg3_read_fw_ver(struct tg3 *tp) |
10421 | { | 10421 | { |
10422 | u32 val, offset, start; | 10422 | u32 val, offset, start; |
10423 | 10423 | ||
10424 | if (tg3_nvram_read_swab(tp, 0, &val)) | 10424 | if (tg3_nvram_read_swab(tp, 0, &val)) |
10425 | return; | 10425 | return; |
10426 | 10426 | ||
10427 | if (val != TG3_EEPROM_MAGIC) | 10427 | if (val != TG3_EEPROM_MAGIC) |
10428 | return; | 10428 | return; |
10429 | 10429 | ||
10430 | if (tg3_nvram_read_swab(tp, 0xc, &offset) || | 10430 | if (tg3_nvram_read_swab(tp, 0xc, &offset) || |
10431 | tg3_nvram_read_swab(tp, 0x4, &start)) | 10431 | tg3_nvram_read_swab(tp, 0x4, &start)) |
10432 | return; | 10432 | return; |
10433 | 10433 | ||
10434 | offset = tg3_nvram_logical_addr(tp, offset); | 10434 | offset = tg3_nvram_logical_addr(tp, offset); |
10435 | if (tg3_nvram_read_swab(tp, offset, &val)) | 10435 | if (tg3_nvram_read_swab(tp, offset, &val)) |
10436 | return; | 10436 | return; |
10437 | 10437 | ||
10438 | if ((val & 0xfc000000) == 0x0c000000) { | 10438 | if ((val & 0xfc000000) == 0x0c000000) { |
10439 | u32 ver_offset, addr; | 10439 | u32 ver_offset, addr; |
10440 | int i; | 10440 | int i; |
10441 | 10441 | ||
10442 | if (tg3_nvram_read_swab(tp, offset + 4, &val) || | 10442 | if (tg3_nvram_read_swab(tp, offset + 4, &val) || |
10443 | tg3_nvram_read_swab(tp, offset + 8, &ver_offset)) | 10443 | tg3_nvram_read_swab(tp, offset + 8, &ver_offset)) |
10444 | return; | 10444 | return; |
10445 | 10445 | ||
10446 | if (val != 0) | 10446 | if (val != 0) |
10447 | return; | 10447 | return; |
10448 | 10448 | ||
10449 | addr = offset + ver_offset - start; | 10449 | addr = offset + ver_offset - start; |
10450 | for (i = 0; i < 16; i += 4) { | 10450 | for (i = 0; i < 16; i += 4) { |
10451 | if (tg3_nvram_read(tp, addr + i, &val)) | 10451 | if (tg3_nvram_read(tp, addr + i, &val)) |
10452 | return; | 10452 | return; |
10453 | 10453 | ||
10454 | val = cpu_to_le32(val); | 10454 | val = cpu_to_le32(val); |
10455 | memcpy(tp->fw_ver + i, &val, 4); | 10455 | memcpy(tp->fw_ver + i, &val, 4); |
10456 | } | 10456 | } |
10457 | } | 10457 | } |
10458 | } | 10458 | } |
10459 | 10459 | ||
10460 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); | 10460 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); |
10461 | 10461 | ||
10462 | static int __devinit tg3_get_invariants(struct tg3 *tp) | 10462 | static int __devinit tg3_get_invariants(struct tg3 *tp) |
10463 | { | 10463 | { |
10464 | static struct pci_device_id write_reorder_chipsets[] = { | 10464 | static struct pci_device_id write_reorder_chipsets[] = { |
10465 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | 10465 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, |
10466 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, | 10466 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, |
10467 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | 10467 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, |
10468 | PCI_DEVICE_ID_AMD_8131_BRIDGE) }, | 10468 | PCI_DEVICE_ID_AMD_8131_BRIDGE) }, |
10469 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, | 10469 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, |
10470 | PCI_DEVICE_ID_VIA_8385_0) }, | 10470 | PCI_DEVICE_ID_VIA_8385_0) }, |
10471 | { }, | 10471 | { }, |
10472 | }; | 10472 | }; |
10473 | u32 misc_ctrl_reg; | 10473 | u32 misc_ctrl_reg; |
10474 | u32 cacheline_sz_reg; | 10474 | u32 cacheline_sz_reg; |
10475 | u32 pci_state_reg, grc_misc_cfg; | 10475 | u32 pci_state_reg, grc_misc_cfg; |
10476 | u32 val; | 10476 | u32 val; |
10477 | u16 pci_cmd; | 10477 | u16 pci_cmd; |
10478 | int err, pcie_cap; | 10478 | int err, pcie_cap; |
10479 | 10479 | ||
10480 | /* Force memory write invalidate off. If we leave it on, | 10480 | /* Force memory write invalidate off. If we leave it on, |
10481 | * then on 5700_BX chips we have to enable a workaround. | 10481 | * then on 5700_BX chips we have to enable a workaround. |
10482 | * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary | 10482 | * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary |
10483 | * to match the cacheline size. The Broadcom driver have this | 10483 | * to match the cacheline size. The Broadcom driver have this |
10484 | * workaround but turns MWI off all the times so never uses | 10484 | * workaround but turns MWI off all the times so never uses |
10485 | * it. This seems to suggest that the workaround is insufficient. | 10485 | * it. This seems to suggest that the workaround is insufficient. |
10486 | */ | 10486 | */ |
10487 | pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); | 10487 | pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); |
10488 | pci_cmd &= ~PCI_COMMAND_INVALIDATE; | 10488 | pci_cmd &= ~PCI_COMMAND_INVALIDATE; |
10489 | pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); | 10489 | pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); |
10490 | 10490 | ||
10491 | /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL | 10491 | /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL |
10492 | * has the register indirect write enable bit set before | 10492 | * has the register indirect write enable bit set before |
10493 | * we try to access any of the MMIO registers. It is also | 10493 | * we try to access any of the MMIO registers. It is also |
10494 | * critical that the PCI-X hw workaround situation is decided | 10494 | * critical that the PCI-X hw workaround situation is decided |
10495 | * before that as well. | 10495 | * before that as well. |
10496 | */ | 10496 | */ |
10497 | pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 10497 | pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, |
10498 | &misc_ctrl_reg); | 10498 | &misc_ctrl_reg); |
10499 | 10499 | ||
10500 | tp->pci_chip_rev_id = (misc_ctrl_reg >> | 10500 | tp->pci_chip_rev_id = (misc_ctrl_reg >> |
10501 | MISC_HOST_CTRL_CHIPREV_SHIFT); | 10501 | MISC_HOST_CTRL_CHIPREV_SHIFT); |
10502 | 10502 | ||
10503 | /* Wrong chip ID in 5752 A0. This code can be removed later | 10503 | /* Wrong chip ID in 5752 A0. This code can be removed later |
10504 | * as A0 is not in production. | 10504 | * as A0 is not in production. |
10505 | */ | 10505 | */ |
10506 | if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) | 10506 | if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) |
10507 | tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; | 10507 | tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; |
10508 | 10508 | ||
10509 | /* If we have 5702/03 A1 or A2 on certain ICH chipsets, | 10509 | /* If we have 5702/03 A1 or A2 on certain ICH chipsets, |
10510 | * we need to disable memory and use config. cycles | 10510 | * we need to disable memory and use config. cycles |
10511 | * only to access all registers. The 5702/03 chips | 10511 | * only to access all registers. The 5702/03 chips |
10512 | * can mistakenly decode the special cycles from the | 10512 | * can mistakenly decode the special cycles from the |
10513 | * ICH chipsets as memory write cycles, causing corruption | 10513 | * ICH chipsets as memory write cycles, causing corruption |
10514 | * of register and memory space. Only certain ICH bridges | 10514 | * of register and memory space. Only certain ICH bridges |
10515 | * will drive special cycles with non-zero data during the | 10515 | * will drive special cycles with non-zero data during the |
10516 | * address phase which can fall within the 5703's address | 10516 | * address phase which can fall within the 5703's address |
10517 | * range. This is not an ICH bug as the PCI spec allows | 10517 | * range. This is not an ICH bug as the PCI spec allows |
10518 | * non-zero address during special cycles. However, only | 10518 | * non-zero address during special cycles. However, only |
10519 | * these ICH bridges are known to drive non-zero addresses | 10519 | * these ICH bridges are known to drive non-zero addresses |
10520 | * during special cycles. | 10520 | * during special cycles. |
10521 | * | 10521 | * |
10522 | * Since special cycles do not cross PCI bridges, we only | 10522 | * Since special cycles do not cross PCI bridges, we only |
10523 | * enable this workaround if the 5703 is on the secondary | 10523 | * enable this workaround if the 5703 is on the secondary |
10524 | * bus of these ICH bridges. | 10524 | * bus of these ICH bridges. |
10525 | */ | 10525 | */ |
10526 | if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || | 10526 | if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || |
10527 | (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { | 10527 | (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { |
10528 | static struct tg3_dev_id { | 10528 | static struct tg3_dev_id { |
10529 | u32 vendor; | 10529 | u32 vendor; |
10530 | u32 device; | 10530 | u32 device; |
10531 | u32 rev; | 10531 | u32 rev; |
10532 | } ich_chipsets[] = { | 10532 | } ich_chipsets[] = { |
10533 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, | 10533 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, |
10534 | PCI_ANY_ID }, | 10534 | PCI_ANY_ID }, |
10535 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, | 10535 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, |
10536 | PCI_ANY_ID }, | 10536 | PCI_ANY_ID }, |
10537 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, | 10537 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, |
10538 | 0xa }, | 10538 | 0xa }, |
10539 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, | 10539 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, |
10540 | PCI_ANY_ID }, | 10540 | PCI_ANY_ID }, |
10541 | { }, | 10541 | { }, |
10542 | }; | 10542 | }; |
10543 | struct tg3_dev_id *pci_id = &ich_chipsets[0]; | 10543 | struct tg3_dev_id *pci_id = &ich_chipsets[0]; |
10544 | struct pci_dev *bridge = NULL; | 10544 | struct pci_dev *bridge = NULL; |
10545 | 10545 | ||
10546 | while (pci_id->vendor != 0) { | 10546 | while (pci_id->vendor != 0) { |
10547 | bridge = pci_get_device(pci_id->vendor, pci_id->device, | 10547 | bridge = pci_get_device(pci_id->vendor, pci_id->device, |
10548 | bridge); | 10548 | bridge); |
10549 | if (!bridge) { | 10549 | if (!bridge) { |
10550 | pci_id++; | 10550 | pci_id++; |
10551 | continue; | 10551 | continue; |
10552 | } | 10552 | } |
10553 | if (pci_id->rev != PCI_ANY_ID) { | 10553 | if (pci_id->rev != PCI_ANY_ID) { |
10554 | if (bridge->revision > pci_id->rev) | 10554 | if (bridge->revision > pci_id->rev) |
10555 | continue; | 10555 | continue; |
10556 | } | 10556 | } |
10557 | if (bridge->subordinate && | 10557 | if (bridge->subordinate && |
10558 | (bridge->subordinate->number == | 10558 | (bridge->subordinate->number == |
10559 | tp->pdev->bus->number)) { | 10559 | tp->pdev->bus->number)) { |
10560 | 10560 | ||
10561 | tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND; | 10561 | tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND; |
10562 | pci_dev_put(bridge); | 10562 | pci_dev_put(bridge); |
10563 | break; | 10563 | break; |
10564 | } | 10564 | } |
10565 | } | 10565 | } |
10566 | } | 10566 | } |
10567 | 10567 | ||
10568 | /* The EPB bridge inside 5714, 5715, and 5780 cannot support | 10568 | /* The EPB bridge inside 5714, 5715, and 5780 cannot support |
10569 | * DMA addresses > 40-bit. This bridge may have other additional | 10569 | * DMA addresses > 40-bit. This bridge may have other additional |
10570 | * 57xx devices behind it in some 4-port NIC designs for example. | 10570 | * 57xx devices behind it in some 4-port NIC designs for example. |
10571 | * Any tg3 device found behind the bridge will also need the 40-bit | 10571 | * Any tg3 device found behind the bridge will also need the 40-bit |
10572 | * DMA workaround. | 10572 | * DMA workaround. |
10573 | */ | 10573 | */ |
10574 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || | 10574 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || |
10575 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { | 10575 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { |
10576 | tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; | 10576 | tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; |
10577 | tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; | 10577 | tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; |
10578 | tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); | 10578 | tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); |
10579 | } | 10579 | } |
10580 | else { | 10580 | else { |
10581 | struct pci_dev *bridge = NULL; | 10581 | struct pci_dev *bridge = NULL; |
10582 | 10582 | ||
10583 | do { | 10583 | do { |
10584 | bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, | 10584 | bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, |
10585 | PCI_DEVICE_ID_SERVERWORKS_EPB, | 10585 | PCI_DEVICE_ID_SERVERWORKS_EPB, |
10586 | bridge); | 10586 | bridge); |
10587 | if (bridge && bridge->subordinate && | 10587 | if (bridge && bridge->subordinate && |
10588 | (bridge->subordinate->number <= | 10588 | (bridge->subordinate->number <= |
10589 | tp->pdev->bus->number) && | 10589 | tp->pdev->bus->number) && |
10590 | (bridge->subordinate->subordinate >= | 10590 | (bridge->subordinate->subordinate >= |
10591 | tp->pdev->bus->number)) { | 10591 | tp->pdev->bus->number)) { |
10592 | tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; | 10592 | tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; |
10593 | pci_dev_put(bridge); | 10593 | pci_dev_put(bridge); |
10594 | break; | 10594 | break; |
10595 | } | 10595 | } |
10596 | } while (bridge); | 10596 | } while (bridge); |
10597 | } | 10597 | } |
10598 | 10598 | ||
10599 | /* Initialize misc host control in PCI block. */ | 10599 | /* Initialize misc host control in PCI block. */ |
10600 | tp->misc_host_ctrl |= (misc_ctrl_reg & | 10600 | tp->misc_host_ctrl |= (misc_ctrl_reg & |
10601 | MISC_HOST_CTRL_CHIPREV); | 10601 | MISC_HOST_CTRL_CHIPREV); |
10602 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 10602 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, |
10603 | tp->misc_host_ctrl); | 10603 | tp->misc_host_ctrl); |
10604 | 10604 | ||
10605 | pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ, | 10605 | pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ, |
10606 | &cacheline_sz_reg); | 10606 | &cacheline_sz_reg); |
10607 | 10607 | ||
10608 | tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff; | 10608 | tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff; |
10609 | tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff; | 10609 | tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff; |
10610 | tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff; | 10610 | tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff; |
10611 | tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff; | 10611 | tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff; |
10612 | 10612 | ||
10613 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || | 10613 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || |
10614 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) | 10614 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) |
10615 | tp->pdev_peer = tg3_find_peer(tp); | 10615 | tp->pdev_peer = tg3_find_peer(tp); |
10616 | 10616 | ||
10617 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || | 10617 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || |
10618 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | 10618 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || |
10619 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 10619 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
10620 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || | 10620 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || |
10621 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || | 10621 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || |
10622 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | 10622 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) |
10623 | tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; | 10623 | tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; |
10624 | 10624 | ||
10625 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || | 10625 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || |
10626 | (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) | 10626 | (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) |
10627 | tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; | 10627 | tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; |
10628 | 10628 | ||
10629 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 10629 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { |
10630 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; | 10630 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; |
10631 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || | 10631 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || |
10632 | GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || | 10632 | GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || |
10633 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && | 10633 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && |
10634 | tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && | 10634 | tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && |
10635 | tp->pdev_peer == tp->pdev)) | 10635 | tp->pdev_peer == tp->pdev)) |
10636 | tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI; | 10636 | tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI; |
10637 | 10637 | ||
10638 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 10638 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
10639 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || | 10639 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || |
10640 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 10640 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
10641 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; | 10641 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; |
10642 | tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; | 10642 | tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; |
10643 | } else { | 10643 | } else { |
10644 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; | 10644 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; |
10645 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 10645 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == |
10646 | ASIC_REV_5750 && | 10646 | ASIC_REV_5750 && |
10647 | tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) | 10647 | tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) |
10648 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; | 10648 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; |
10649 | } | 10649 | } |
10650 | } | 10650 | } |
10651 | 10651 | ||
10652 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && | 10652 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && |
10653 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && | 10653 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && |
10654 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && | 10654 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && |
10655 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 && | 10655 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 && |
10656 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 && | 10656 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 && |
10657 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) | 10657 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) |
10658 | tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; | 10658 | tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; |
10659 | 10659 | ||
10660 | pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); | 10660 | pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); |
10661 | if (pcie_cap != 0) { | 10661 | if (pcie_cap != 0) { |
10662 | tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; | 10662 | tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; |
10663 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 10663 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
10664 | u16 lnkctl; | 10664 | u16 lnkctl; |
10665 | 10665 | ||
10666 | pci_read_config_word(tp->pdev, | 10666 | pci_read_config_word(tp->pdev, |
10667 | pcie_cap + PCI_EXP_LNKCTL, | 10667 | pcie_cap + PCI_EXP_LNKCTL, |
10668 | &lnkctl); | 10668 | &lnkctl); |
10669 | if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) | 10669 | if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) |
10670 | tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; | 10670 | tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; |
10671 | } | 10671 | } |
10672 | } | 10672 | } |
10673 | 10673 | ||
10674 | /* If we have an AMD 762 or VIA K8T800 chipset, write | 10674 | /* If we have an AMD 762 or VIA K8T800 chipset, write |
10675 | * reordering to the mailbox registers done by the host | 10675 | * reordering to the mailbox registers done by the host |
10676 | * controller can cause major troubles. We read back from | 10676 | * controller can cause major troubles. We read back from |
10677 | * every mailbox register write to force the writes to be | 10677 | * every mailbox register write to force the writes to be |
10678 | * posted to the chip in order. | 10678 | * posted to the chip in order. |
10679 | */ | 10679 | */ |
10680 | if (pci_dev_present(write_reorder_chipsets) && | 10680 | if (pci_dev_present(write_reorder_chipsets) && |
10681 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | 10681 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) |
10682 | tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; | 10682 | tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; |
10683 | 10683 | ||
10684 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && | 10684 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && |
10685 | tp->pci_lat_timer < 64) { | 10685 | tp->pci_lat_timer < 64) { |
10686 | tp->pci_lat_timer = 64; | 10686 | tp->pci_lat_timer = 64; |
10687 | 10687 | ||
10688 | cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0); | 10688 | cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0); |
10689 | cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8); | 10689 | cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8); |
10690 | cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16); | 10690 | cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16); |
10691 | cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24); | 10691 | cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24); |
10692 | 10692 | ||
10693 | pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ, | 10693 | pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ, |
10694 | cacheline_sz_reg); | 10694 | cacheline_sz_reg); |
10695 | } | 10695 | } |
10696 | 10696 | ||
10697 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, | 10697 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, |
10698 | &pci_state_reg); | 10698 | &pci_state_reg); |
10699 | 10699 | ||
10700 | if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) { | 10700 | if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) { |
10701 | tp->tg3_flags |= TG3_FLAG_PCIX_MODE; | 10701 | tp->tg3_flags |= TG3_FLAG_PCIX_MODE; |
10702 | 10702 | ||
10703 | /* If this is a 5700 BX chipset, and we are in PCI-X | 10703 | /* If this is a 5700 BX chipset, and we are in PCI-X |
10704 | * mode, enable register write workaround. | 10704 | * mode, enable register write workaround. |
10705 | * | 10705 | * |
10706 | * The workaround is to use indirect register accesses | 10706 | * The workaround is to use indirect register accesses |
10707 | * for all chip writes not to mailbox registers. | 10707 | * for all chip writes not to mailbox registers. |
10708 | */ | 10708 | */ |
10709 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { | 10709 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { |
10710 | u32 pm_reg; | 10710 | u32 pm_reg; |
10711 | u16 pci_cmd; | 10711 | u16 pci_cmd; |
10712 | 10712 | ||
10713 | tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; | 10713 | tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; |
10714 | 10714 | ||
10715 | /* The chip can have it's power management PCI config | 10715 | /* The chip can have it's power management PCI config |
10716 | * space registers clobbered due to this bug. | 10716 | * space registers clobbered due to this bug. |
10717 | * So explicitly force the chip into D0 here. | 10717 | * So explicitly force the chip into D0 here. |
10718 | */ | 10718 | */ |
10719 | pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT, | 10719 | pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT, |
10720 | &pm_reg); | 10720 | &pm_reg); |
10721 | pm_reg &= ~PCI_PM_CTRL_STATE_MASK; | 10721 | pm_reg &= ~PCI_PM_CTRL_STATE_MASK; |
10722 | pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; | 10722 | pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; |
10723 | pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT, | 10723 | pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT, |
10724 | pm_reg); | 10724 | pm_reg); |
10725 | 10725 | ||
10726 | /* Also, force SERR#/PERR# in PCI command. */ | 10726 | /* Also, force SERR#/PERR# in PCI command. */ |
10727 | pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); | 10727 | pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); |
10728 | pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; | 10728 | pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; |
10729 | pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); | 10729 | pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); |
10730 | } | 10730 | } |
10731 | } | 10731 | } |
10732 | 10732 | ||
10733 | /* 5700 BX chips need to have their TX producer index mailboxes | 10733 | /* 5700 BX chips need to have their TX producer index mailboxes |
10734 | * written twice to workaround a bug. | 10734 | * written twice to workaround a bug. |
10735 | */ | 10735 | */ |
10736 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) | 10736 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) |
10737 | tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; | 10737 | tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; |
10738 | 10738 | ||
10739 | if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) | 10739 | if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) |
10740 | tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; | 10740 | tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; |
10741 | if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) | 10741 | if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) |
10742 | tp->tg3_flags |= TG3_FLAG_PCI_32BIT; | 10742 | tp->tg3_flags |= TG3_FLAG_PCI_32BIT; |
10743 | 10743 | ||
10744 | /* Chip-specific fixup from Broadcom driver */ | 10744 | /* Chip-specific fixup from Broadcom driver */ |
10745 | if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && | 10745 | if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && |
10746 | (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { | 10746 | (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { |
10747 | pci_state_reg |= PCISTATE_RETRY_SAME_DMA; | 10747 | pci_state_reg |= PCISTATE_RETRY_SAME_DMA; |
10748 | pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); | 10748 | pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); |
10749 | } | 10749 | } |
10750 | 10750 | ||
10751 | /* Default fast path register access methods */ | 10751 | /* Default fast path register access methods */ |
10752 | tp->read32 = tg3_read32; | 10752 | tp->read32 = tg3_read32; |
10753 | tp->write32 = tg3_write32; | 10753 | tp->write32 = tg3_write32; |
10754 | tp->read32_mbox = tg3_read32; | 10754 | tp->read32_mbox = tg3_read32; |
10755 | tp->write32_mbox = tg3_write32; | 10755 | tp->write32_mbox = tg3_write32; |
10756 | tp->write32_tx_mbox = tg3_write32; | 10756 | tp->write32_tx_mbox = tg3_write32; |
10757 | tp->write32_rx_mbox = tg3_write32; | 10757 | tp->write32_rx_mbox = tg3_write32; |
10758 | 10758 | ||
10759 | /* Various workaround register access methods */ | 10759 | /* Various workaround register access methods */ |
10760 | if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) | 10760 | if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) |
10761 | tp->write32 = tg3_write_indirect_reg32; | 10761 | tp->write32 = tg3_write_indirect_reg32; |
10762 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || | 10762 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || |
10763 | ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && | 10763 | ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && |
10764 | tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { | 10764 | tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { |
10765 | /* | 10765 | /* |
10766 | * Back to back register writes can cause problems on these | 10766 | * Back to back register writes can cause problems on these |
10767 | * chips, the workaround is to read back all reg writes | 10767 | * chips, the workaround is to read back all reg writes |
10768 | * except those to mailbox regs. | 10768 | * except those to mailbox regs. |
10769 | * | 10769 | * |
10770 | * See tg3_write_indirect_reg32(). | 10770 | * See tg3_write_indirect_reg32(). |
10771 | */ | 10771 | */ |
10772 | tp->write32 = tg3_write_flush_reg32; | 10772 | tp->write32 = tg3_write_flush_reg32; |
10773 | } | 10773 | } |
10774 | 10774 | ||
10775 | 10775 | ||
10776 | if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || | 10776 | if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || |
10777 | (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) { | 10777 | (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) { |
10778 | tp->write32_tx_mbox = tg3_write32_tx_mbox; | 10778 | tp->write32_tx_mbox = tg3_write32_tx_mbox; |
10779 | if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) | 10779 | if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) |
10780 | tp->write32_rx_mbox = tg3_write_flush_reg32; | 10780 | tp->write32_rx_mbox = tg3_write_flush_reg32; |
10781 | } | 10781 | } |
10782 | 10782 | ||
10783 | if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) { | 10783 | if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) { |
10784 | tp->read32 = tg3_read_indirect_reg32; | 10784 | tp->read32 = tg3_read_indirect_reg32; |
10785 | tp->write32 = tg3_write_indirect_reg32; | 10785 | tp->write32 = tg3_write_indirect_reg32; |
10786 | tp->read32_mbox = tg3_read_indirect_mbox; | 10786 | tp->read32_mbox = tg3_read_indirect_mbox; |
10787 | tp->write32_mbox = tg3_write_indirect_mbox; | 10787 | tp->write32_mbox = tg3_write_indirect_mbox; |
10788 | tp->write32_tx_mbox = tg3_write_indirect_mbox; | 10788 | tp->write32_tx_mbox = tg3_write_indirect_mbox; |
10789 | tp->write32_rx_mbox = tg3_write_indirect_mbox; | 10789 | tp->write32_rx_mbox = tg3_write_indirect_mbox; |
10790 | 10790 | ||
10791 | iounmap(tp->regs); | 10791 | iounmap(tp->regs); |
10792 | tp->regs = NULL; | 10792 | tp->regs = NULL; |
10793 | 10793 | ||
10794 | pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); | 10794 | pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); |
10795 | pci_cmd &= ~PCI_COMMAND_MEMORY; | 10795 | pci_cmd &= ~PCI_COMMAND_MEMORY; |
10796 | pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); | 10796 | pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); |
10797 | } | 10797 | } |
10798 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 10798 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
10799 | tp->read32_mbox = tg3_read32_mbox_5906; | 10799 | tp->read32_mbox = tg3_read32_mbox_5906; |
10800 | tp->write32_mbox = tg3_write32_mbox_5906; | 10800 | tp->write32_mbox = tg3_write32_mbox_5906; |
10801 | tp->write32_tx_mbox = tg3_write32_mbox_5906; | 10801 | tp->write32_tx_mbox = tg3_write32_mbox_5906; |
10802 | tp->write32_rx_mbox = tg3_write32_mbox_5906; | 10802 | tp->write32_rx_mbox = tg3_write32_mbox_5906; |
10803 | } | 10803 | } |
10804 | 10804 | ||
10805 | if (tp->write32 == tg3_write_indirect_reg32 || | 10805 | if (tp->write32 == tg3_write_indirect_reg32 || |
10806 | ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && | 10806 | ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && |
10807 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 10807 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
10808 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) | 10808 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) |
10809 | tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; | 10809 | tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; |
10810 | 10810 | ||
10811 | /* Get eeprom hw config before calling tg3_set_power_state(). | 10811 | /* Get eeprom hw config before calling tg3_set_power_state(). |
10812 | * In particular, the TG3_FLG2_IS_NIC flag must be | 10812 | * In particular, the TG3_FLG2_IS_NIC flag must be |
10813 | * determined before calling tg3_set_power_state() so that | 10813 | * determined before calling tg3_set_power_state() so that |
10814 | * we know whether or not to switch out of Vaux power. | 10814 | * we know whether or not to switch out of Vaux power. |
10815 | * When the flag is set, it means that GPIO1 is used for eeprom | 10815 | * When the flag is set, it means that GPIO1 is used for eeprom |
10816 | * write protect and also implies that it is a LOM where GPIOs | 10816 | * write protect and also implies that it is a LOM where GPIOs |
10817 | * are not used to switch power. | 10817 | * are not used to switch power. |
10818 | */ | 10818 | */ |
10819 | tg3_get_eeprom_hw_cfg(tp); | 10819 | tg3_get_eeprom_hw_cfg(tp); |
10820 | 10820 | ||
10821 | /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). | 10821 | /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). |
10822 | * GPIO1 driven high will bring 5700's external PHY out of reset. | 10822 | * GPIO1 driven high will bring 5700's external PHY out of reset. |
10823 | * It is also used as eeprom write protect on LOMs. | 10823 | * It is also used as eeprom write protect on LOMs. |
10824 | */ | 10824 | */ |
10825 | tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; | 10825 | tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; |
10826 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || | 10826 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || |
10827 | (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) | 10827 | (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) |
10828 | tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | | 10828 | tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | |
10829 | GRC_LCLCTRL_GPIO_OUTPUT1); | 10829 | GRC_LCLCTRL_GPIO_OUTPUT1); |
10830 | /* Unused GPIO3 must be driven as output on 5752 because there | 10830 | /* Unused GPIO3 must be driven as output on 5752 because there |
10831 | * are no pull-up resistors on unused GPIO pins. | 10831 | * are no pull-up resistors on unused GPIO pins. |
10832 | */ | 10832 | */ |
10833 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) | 10833 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) |
10834 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; | 10834 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; |
10835 | 10835 | ||
10836 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 10836 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) |
10837 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; | 10837 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; |
10838 | 10838 | ||
10839 | /* Force the chip into D0. */ | 10839 | /* Force the chip into D0. */ |
10840 | err = tg3_set_power_state(tp, PCI_D0); | 10840 | err = tg3_set_power_state(tp, PCI_D0); |
10841 | if (err) { | 10841 | if (err) { |
10842 | printk(KERN_ERR PFX "(%s) transition to D0 failed\n", | 10842 | printk(KERN_ERR PFX "(%s) transition to D0 failed\n", |
10843 | pci_name(tp->pdev)); | 10843 | pci_name(tp->pdev)); |
10844 | return err; | 10844 | return err; |
10845 | } | 10845 | } |
10846 | 10846 | ||
10847 | /* 5700 B0 chips do not support checksumming correctly due | 10847 | /* 5700 B0 chips do not support checksumming correctly due |
10848 | * to hardware bugs. | 10848 | * to hardware bugs. |
10849 | */ | 10849 | */ |
10850 | if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) | 10850 | if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) |
10851 | tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; | 10851 | tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; |
10852 | 10852 | ||
10853 | /* Derive initial jumbo mode from MTU assigned in | 10853 | /* Derive initial jumbo mode from MTU assigned in |
10854 | * ether_setup() via the alloc_etherdev() call | 10854 | * ether_setup() via the alloc_etherdev() call |
10855 | */ | 10855 | */ |
10856 | if (tp->dev->mtu > ETH_DATA_LEN && | 10856 | if (tp->dev->mtu > ETH_DATA_LEN && |
10857 | !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | 10857 | !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) |
10858 | tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; | 10858 | tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; |
10859 | 10859 | ||
10860 | /* Determine WakeOnLan speed to use. */ | 10860 | /* Determine WakeOnLan speed to use. */ |
10861 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 10861 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
10862 | tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 10862 | tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || |
10863 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || | 10863 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || |
10864 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { | 10864 | tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { |
10865 | tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB); | 10865 | tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB); |
10866 | } else { | 10866 | } else { |
10867 | tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; | 10867 | tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; |
10868 | } | 10868 | } |
10869 | 10869 | ||
10870 | /* A few boards don't want Ethernet@WireSpeed phy feature */ | 10870 | /* A few boards don't want Ethernet@WireSpeed phy feature */ |
10871 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || | 10871 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || |
10872 | ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && | 10872 | ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && |
10873 | (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && | 10873 | (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && |
10874 | (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || | 10874 | (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || |
10875 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) || | 10875 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) || |
10876 | (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) | 10876 | (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) |
10877 | tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; | 10877 | tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; |
10878 | 10878 | ||
10879 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || | 10879 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || |
10880 | GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) | 10880 | GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) |
10881 | tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG; | 10881 | tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG; |
10882 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) | 10882 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) |
10883 | tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; | 10883 | tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; |
10884 | 10884 | ||
10885 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 10885 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
10886 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 10886 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
10887 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { | 10887 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { |
10888 | if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && | 10888 | if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && |
10889 | tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) | 10889 | tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) |
10890 | tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; | 10890 | tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; |
10891 | if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) | 10891 | if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) |
10892 | tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; | 10892 | tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; |
10893 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) | 10893 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) |
10894 | tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; | 10894 | tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; |
10895 | } | 10895 | } |
10896 | 10896 | ||
10897 | tp->coalesce_mode = 0; | 10897 | tp->coalesce_mode = 0; |
10898 | if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && | 10898 | if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && |
10899 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) | 10899 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) |
10900 | tp->coalesce_mode |= HOSTCC_MODE_32BYTE; | 10900 | tp->coalesce_mode |= HOSTCC_MODE_32BYTE; |
10901 | 10901 | ||
10902 | /* Initialize MAC MI mode, polling disabled. */ | 10902 | /* Initialize MAC MI mode, polling disabled. */ |
10903 | tw32_f(MAC_MI_MODE, tp->mi_mode); | 10903 | tw32_f(MAC_MI_MODE, tp->mi_mode); |
10904 | udelay(80); | 10904 | udelay(80); |
10905 | 10905 | ||
10906 | /* Initialize data/descriptor byte/word swapping. */ | 10906 | /* Initialize data/descriptor byte/word swapping. */ |
10907 | val = tr32(GRC_MODE); | 10907 | val = tr32(GRC_MODE); |
10908 | val &= GRC_MODE_HOST_STACKUP; | 10908 | val &= GRC_MODE_HOST_STACKUP; |
10909 | tw32(GRC_MODE, val | tp->grc_mode); | 10909 | tw32(GRC_MODE, val | tp->grc_mode); |
10910 | 10910 | ||
10911 | tg3_switch_clocks(tp); | 10911 | tg3_switch_clocks(tp); |
10912 | 10912 | ||
10913 | /* Clear this out for sanity. */ | 10913 | /* Clear this out for sanity. */ |
10914 | tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 10914 | tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); |
10915 | 10915 | ||
10916 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, | 10916 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, |
10917 | &pci_state_reg); | 10917 | &pci_state_reg); |
10918 | if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && | 10918 | if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && |
10919 | (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) { | 10919 | (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) { |
10920 | u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); | 10920 | u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); |
10921 | 10921 | ||
10922 | if (chiprevid == CHIPREV_ID_5701_A0 || | 10922 | if (chiprevid == CHIPREV_ID_5701_A0 || |
10923 | chiprevid == CHIPREV_ID_5701_B0 || | 10923 | chiprevid == CHIPREV_ID_5701_B0 || |
10924 | chiprevid == CHIPREV_ID_5701_B2 || | 10924 | chiprevid == CHIPREV_ID_5701_B2 || |
10925 | chiprevid == CHIPREV_ID_5701_B5) { | 10925 | chiprevid == CHIPREV_ID_5701_B5) { |
10926 | void __iomem *sram_base; | 10926 | void __iomem *sram_base; |
10927 | 10927 | ||
10928 | /* Write some dummy words into the SRAM status block | 10928 | /* Write some dummy words into the SRAM status block |
10929 | * area, see if it reads back correctly. If the return | 10929 | * area, see if it reads back correctly. If the return |
10930 | * value is bad, force enable the PCIX workaround. | 10930 | * value is bad, force enable the PCIX workaround. |
10931 | */ | 10931 | */ |
10932 | sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; | 10932 | sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; |
10933 | 10933 | ||
10934 | writel(0x00000000, sram_base); | 10934 | writel(0x00000000, sram_base); |
10935 | writel(0x00000000, sram_base + 4); | 10935 | writel(0x00000000, sram_base + 4); |
10936 | writel(0xffffffff, sram_base + 4); | 10936 | writel(0xffffffff, sram_base + 4); |
10937 | if (readl(sram_base) != 0x00000000) | 10937 | if (readl(sram_base) != 0x00000000) |
10938 | tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; | 10938 | tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; |
10939 | } | 10939 | } |
10940 | } | 10940 | } |
10941 | 10941 | ||
10942 | udelay(50); | 10942 | udelay(50); |
10943 | tg3_nvram_init(tp); | 10943 | tg3_nvram_init(tp); |
10944 | 10944 | ||
10945 | grc_misc_cfg = tr32(GRC_MISC_CFG); | 10945 | grc_misc_cfg = tr32(GRC_MISC_CFG); |
10946 | grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; | 10946 | grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; |
10947 | 10947 | ||
10948 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 10948 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && |
10949 | (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || | 10949 | (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || |
10950 | grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) | 10950 | grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) |
10951 | tp->tg3_flags2 |= TG3_FLG2_IS_5788; | 10951 | tp->tg3_flags2 |= TG3_FLG2_IS_5788; |
10952 | 10952 | ||
10953 | if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && | 10953 | if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && |
10954 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) | 10954 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) |
10955 | tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; | 10955 | tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; |
10956 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { | 10956 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { |
10957 | tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | | 10957 | tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | |
10958 | HOSTCC_MODE_CLRTICK_TXBD); | 10958 | HOSTCC_MODE_CLRTICK_TXBD); |
10959 | 10959 | ||
10960 | tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; | 10960 | tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; |
10961 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 10961 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, |
10962 | tp->misc_host_ctrl); | 10962 | tp->misc_host_ctrl); |
10963 | } | 10963 | } |
10964 | 10964 | ||
10965 | /* these are limited to 10/100 only */ | 10965 | /* these are limited to 10/100 only */ |
10966 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && | 10966 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && |
10967 | (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || | 10967 | (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || |
10968 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 10968 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && |
10969 | tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && | 10969 | tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && |
10970 | (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || | 10970 | (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || |
10971 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || | 10971 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || |
10972 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || | 10972 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || |
10973 | (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && | 10973 | (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && |
10974 | (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || | 10974 | (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || |
10975 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || | 10975 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || |
10976 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || | 10976 | tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || |
10977 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 10977 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
10978 | tp->tg3_flags |= TG3_FLAG_10_100_ONLY; | 10978 | tp->tg3_flags |= TG3_FLAG_10_100_ONLY; |
10979 | 10979 | ||
10980 | err = tg3_phy_probe(tp); | 10980 | err = tg3_phy_probe(tp); |
10981 | if (err) { | 10981 | if (err) { |
10982 | printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n", | 10982 | printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n", |
10983 | pci_name(tp->pdev), err); | 10983 | pci_name(tp->pdev), err); |
10984 | /* ... but do not return immediately ... */ | 10984 | /* ... but do not return immediately ... */ |
10985 | } | 10985 | } |
10986 | 10986 | ||
10987 | tg3_read_partno(tp); | 10987 | tg3_read_partno(tp); |
10988 | tg3_read_fw_ver(tp); | 10988 | tg3_read_fw_ver(tp); |
10989 | 10989 | ||
10990 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 10990 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { |
10991 | tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; | 10991 | tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; |
10992 | } else { | 10992 | } else { |
10993 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) | 10993 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) |
10994 | tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT; | 10994 | tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT; |
10995 | else | 10995 | else |
10996 | tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; | 10996 | tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; |
10997 | } | 10997 | } |
10998 | 10998 | ||
10999 | /* 5700 {AX,BX} chips have a broken status block link | 10999 | /* 5700 {AX,BX} chips have a broken status block link |
11000 | * change bit implementation, so we must use the | 11000 | * change bit implementation, so we must use the |
11001 | * status register in those cases. | 11001 | * status register in those cases. |
11002 | */ | 11002 | */ |
11003 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) | 11003 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) |
11004 | tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; | 11004 | tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; |
11005 | else | 11005 | else |
11006 | tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG; | 11006 | tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG; |
11007 | 11007 | ||
11008 | /* The led_ctrl is set during tg3_phy_probe, here we might | 11008 | /* The led_ctrl is set during tg3_phy_probe, here we might |
11009 | * have to force the link status polling mechanism based | 11009 | * have to force the link status polling mechanism based |
11010 | * upon subsystem IDs. | 11010 | * upon subsystem IDs. |
11011 | */ | 11011 | */ |
11012 | if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && | 11012 | if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && |
11013 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && | 11013 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && |
11014 | !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { | 11014 | !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { |
11015 | tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT | | 11015 | tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT | |
11016 | TG3_FLAG_USE_LINKCHG_REG); | 11016 | TG3_FLAG_USE_LINKCHG_REG); |
11017 | } | 11017 | } |
11018 | 11018 | ||
11019 | /* For all SERDES we poll the MAC status register. */ | 11019 | /* For all SERDES we poll the MAC status register. */ |
11020 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) | 11020 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) |
11021 | tp->tg3_flags |= TG3_FLAG_POLL_SERDES; | 11021 | tp->tg3_flags |= TG3_FLAG_POLL_SERDES; |
11022 | else | 11022 | else |
11023 | tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; | 11023 | tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; |
11024 | 11024 | ||
11025 | /* All chips before 5787 can get confused if TX buffers | 11025 | /* All chips before 5787 can get confused if TX buffers |
11026 | * straddle the 4GB address boundary in some cases. | 11026 | * straddle the 4GB address boundary in some cases. |
11027 | */ | 11027 | */ |
11028 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 11028 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
11029 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || | 11029 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || |
11030 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 11030 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
11031 | tp->dev->hard_start_xmit = tg3_start_xmit; | 11031 | tp->dev->hard_start_xmit = tg3_start_xmit; |
11032 | else | 11032 | else |
11033 | tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug; | 11033 | tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug; |
11034 | 11034 | ||
11035 | tp->rx_offset = 2; | 11035 | tp->rx_offset = 2; |
11036 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && | 11036 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && |
11037 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) | 11037 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) |
11038 | tp->rx_offset = 0; | 11038 | tp->rx_offset = 0; |
11039 | 11039 | ||
11040 | tp->rx_std_max_post = TG3_RX_RING_SIZE; | 11040 | tp->rx_std_max_post = TG3_RX_RING_SIZE; |
11041 | 11041 | ||
11042 | /* Increment the rx prod index on the rx std ring by at most | 11042 | /* Increment the rx prod index on the rx std ring by at most |
11043 | * 8 for these chips to workaround hw errata. | 11043 | * 8 for these chips to workaround hw errata. |
11044 | */ | 11044 | */ |
11045 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || | 11045 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || |
11046 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | 11046 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || |
11047 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 11047 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) |
11048 | tp->rx_std_max_post = 8; | 11048 | tp->rx_std_max_post = 8; |
11049 | 11049 | ||
11050 | /* By default, disable wake-on-lan. User can change this | 11050 | /* By default, disable wake-on-lan. User can change this |
11051 | * using ETHTOOL_SWOL. | 11051 | * using ETHTOOL_SWOL. |
11052 | */ | 11052 | */ |
11053 | tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; | 11053 | tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; |
11054 | 11054 | ||
11055 | if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) | 11055 | if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) |
11056 | tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & | 11056 | tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & |
11057 | PCIE_PWR_MGMT_L1_THRESH_MSK; | 11057 | PCIE_PWR_MGMT_L1_THRESH_MSK; |
11058 | 11058 | ||
11059 | return err; | 11059 | return err; |
11060 | } | 11060 | } |
11061 | 11061 | ||
11062 | #ifdef CONFIG_SPARC | 11062 | #ifdef CONFIG_SPARC |
11063 | static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) | 11063 | static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) |
11064 | { | 11064 | { |
11065 | struct net_device *dev = tp->dev; | 11065 | struct net_device *dev = tp->dev; |
11066 | struct pci_dev *pdev = tp->pdev; | 11066 | struct pci_dev *pdev = tp->pdev; |
11067 | struct device_node *dp = pci_device_to_OF_node(pdev); | 11067 | struct device_node *dp = pci_device_to_OF_node(pdev); |
11068 | const unsigned char *addr; | 11068 | const unsigned char *addr; |
11069 | int len; | 11069 | int len; |
11070 | 11070 | ||
11071 | addr = of_get_property(dp, "local-mac-address", &len); | 11071 | addr = of_get_property(dp, "local-mac-address", &len); |
11072 | if (addr && len == 6) { | 11072 | if (addr && len == 6) { |
11073 | memcpy(dev->dev_addr, addr, 6); | 11073 | memcpy(dev->dev_addr, addr, 6); |
11074 | memcpy(dev->perm_addr, dev->dev_addr, 6); | 11074 | memcpy(dev->perm_addr, dev->dev_addr, 6); |
11075 | return 0; | 11075 | return 0; |
11076 | } | 11076 | } |
11077 | return -ENODEV; | 11077 | return -ENODEV; |
11078 | } | 11078 | } |
11079 | 11079 | ||
11080 | static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) | 11080 | static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) |
11081 | { | 11081 | { |
11082 | struct net_device *dev = tp->dev; | 11082 | struct net_device *dev = tp->dev; |
11083 | 11083 | ||
11084 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); | 11084 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); |
11085 | memcpy(dev->perm_addr, idprom->id_ethaddr, 6); | 11085 | memcpy(dev->perm_addr, idprom->id_ethaddr, 6); |
11086 | return 0; | 11086 | return 0; |
11087 | } | 11087 | } |
11088 | #endif | 11088 | #endif |
11089 | 11089 | ||
11090 | static int __devinit tg3_get_device_address(struct tg3 *tp) | 11090 | static int __devinit tg3_get_device_address(struct tg3 *tp) |
11091 | { | 11091 | { |
11092 | struct net_device *dev = tp->dev; | 11092 | struct net_device *dev = tp->dev; |
11093 | u32 hi, lo, mac_offset; | 11093 | u32 hi, lo, mac_offset; |
11094 | int addr_ok = 0; | 11094 | int addr_ok = 0; |
11095 | 11095 | ||
11096 | #ifdef CONFIG_SPARC | 11096 | #ifdef CONFIG_SPARC |
11097 | if (!tg3_get_macaddr_sparc(tp)) | 11097 | if (!tg3_get_macaddr_sparc(tp)) |
11098 | return 0; | 11098 | return 0; |
11099 | #endif | 11099 | #endif |
11100 | 11100 | ||
11101 | mac_offset = 0x7c; | 11101 | mac_offset = 0x7c; |
11102 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || | 11102 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || |
11103 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 11103 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { |
11104 | if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) | 11104 | if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) |
11105 | mac_offset = 0xcc; | 11105 | mac_offset = 0xcc; |
11106 | if (tg3_nvram_lock(tp)) | 11106 | if (tg3_nvram_lock(tp)) |
11107 | tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); | 11107 | tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); |
11108 | else | 11108 | else |
11109 | tg3_nvram_unlock(tp); | 11109 | tg3_nvram_unlock(tp); |
11110 | } | 11110 | } |
11111 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 11111 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
11112 | mac_offset = 0x10; | 11112 | mac_offset = 0x10; |
11113 | 11113 | ||
11114 | /* First try to get it from MAC address mailbox. */ | 11114 | /* First try to get it from MAC address mailbox. */ |
11115 | tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); | 11115 | tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); |
11116 | if ((hi >> 16) == 0x484b) { | 11116 | if ((hi >> 16) == 0x484b) { |
11117 | dev->dev_addr[0] = (hi >> 8) & 0xff; | 11117 | dev->dev_addr[0] = (hi >> 8) & 0xff; |
11118 | dev->dev_addr[1] = (hi >> 0) & 0xff; | 11118 | dev->dev_addr[1] = (hi >> 0) & 0xff; |
11119 | 11119 | ||
11120 | tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); | 11120 | tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); |
11121 | dev->dev_addr[2] = (lo >> 24) & 0xff; | 11121 | dev->dev_addr[2] = (lo >> 24) & 0xff; |
11122 | dev->dev_addr[3] = (lo >> 16) & 0xff; | 11122 | dev->dev_addr[3] = (lo >> 16) & 0xff; |
11123 | dev->dev_addr[4] = (lo >> 8) & 0xff; | 11123 | dev->dev_addr[4] = (lo >> 8) & 0xff; |
11124 | dev->dev_addr[5] = (lo >> 0) & 0xff; | 11124 | dev->dev_addr[5] = (lo >> 0) & 0xff; |
11125 | 11125 | ||
11126 | /* Some old bootcode may report a 0 MAC address in SRAM */ | 11126 | /* Some old bootcode may report a 0 MAC address in SRAM */ |
11127 | addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); | 11127 | addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); |
11128 | } | 11128 | } |
11129 | if (!addr_ok) { | 11129 | if (!addr_ok) { |
11130 | /* Next, try NVRAM. */ | 11130 | /* Next, try NVRAM. */ |
11131 | if (!tg3_nvram_read(tp, mac_offset + 0, &hi) && | 11131 | if (!tg3_nvram_read(tp, mac_offset + 0, &hi) && |
11132 | !tg3_nvram_read(tp, mac_offset + 4, &lo)) { | 11132 | !tg3_nvram_read(tp, mac_offset + 4, &lo)) { |
11133 | dev->dev_addr[0] = ((hi >> 16) & 0xff); | 11133 | dev->dev_addr[0] = ((hi >> 16) & 0xff); |
11134 | dev->dev_addr[1] = ((hi >> 24) & 0xff); | 11134 | dev->dev_addr[1] = ((hi >> 24) & 0xff); |
11135 | dev->dev_addr[2] = ((lo >> 0) & 0xff); | 11135 | dev->dev_addr[2] = ((lo >> 0) & 0xff); |
11136 | dev->dev_addr[3] = ((lo >> 8) & 0xff); | 11136 | dev->dev_addr[3] = ((lo >> 8) & 0xff); |
11137 | dev->dev_addr[4] = ((lo >> 16) & 0xff); | 11137 | dev->dev_addr[4] = ((lo >> 16) & 0xff); |
11138 | dev->dev_addr[5] = ((lo >> 24) & 0xff); | 11138 | dev->dev_addr[5] = ((lo >> 24) & 0xff); |
11139 | } | 11139 | } |
11140 | /* Finally just fetch it out of the MAC control regs. */ | 11140 | /* Finally just fetch it out of the MAC control regs. */ |
11141 | else { | 11141 | else { |
11142 | hi = tr32(MAC_ADDR_0_HIGH); | 11142 | hi = tr32(MAC_ADDR_0_HIGH); |
11143 | lo = tr32(MAC_ADDR_0_LOW); | 11143 | lo = tr32(MAC_ADDR_0_LOW); |
11144 | 11144 | ||
11145 | dev->dev_addr[5] = lo & 0xff; | 11145 | dev->dev_addr[5] = lo & 0xff; |
11146 | dev->dev_addr[4] = (lo >> 8) & 0xff; | 11146 | dev->dev_addr[4] = (lo >> 8) & 0xff; |
11147 | dev->dev_addr[3] = (lo >> 16) & 0xff; | 11147 | dev->dev_addr[3] = (lo >> 16) & 0xff; |
11148 | dev->dev_addr[2] = (lo >> 24) & 0xff; | 11148 | dev->dev_addr[2] = (lo >> 24) & 0xff; |
11149 | dev->dev_addr[1] = hi & 0xff; | 11149 | dev->dev_addr[1] = hi & 0xff; |
11150 | dev->dev_addr[0] = (hi >> 8) & 0xff; | 11150 | dev->dev_addr[0] = (hi >> 8) & 0xff; |
11151 | } | 11151 | } |
11152 | } | 11152 | } |
11153 | 11153 | ||
11154 | if (!is_valid_ether_addr(&dev->dev_addr[0])) { | 11154 | if (!is_valid_ether_addr(&dev->dev_addr[0])) { |
11155 | #ifdef CONFIG_SPARC64 | 11155 | #ifdef CONFIG_SPARC64 |
11156 | if (!tg3_get_default_macaddr_sparc(tp)) | 11156 | if (!tg3_get_default_macaddr_sparc(tp)) |
11157 | return 0; | 11157 | return 0; |
11158 | #endif | 11158 | #endif |
11159 | return -EINVAL; | 11159 | return -EINVAL; |
11160 | } | 11160 | } |
11161 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | 11161 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
11162 | return 0; | 11162 | return 0; |
11163 | } | 11163 | } |
11164 | 11164 | ||
11165 | #define BOUNDARY_SINGLE_CACHELINE 1 | 11165 | #define BOUNDARY_SINGLE_CACHELINE 1 |
11166 | #define BOUNDARY_MULTI_CACHELINE 2 | 11166 | #define BOUNDARY_MULTI_CACHELINE 2 |
11167 | 11167 | ||
11168 | static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) | 11168 | static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) |
11169 | { | 11169 | { |
11170 | int cacheline_size; | 11170 | int cacheline_size; |
11171 | u8 byte; | 11171 | u8 byte; |
11172 | int goal; | 11172 | int goal; |
11173 | 11173 | ||
11174 | pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); | 11174 | pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); |
11175 | if (byte == 0) | 11175 | if (byte == 0) |
11176 | cacheline_size = 1024; | 11176 | cacheline_size = 1024; |
11177 | else | 11177 | else |
11178 | cacheline_size = (int) byte * 4; | 11178 | cacheline_size = (int) byte * 4; |
11179 | 11179 | ||
11180 | /* On 5703 and later chips, the boundary bits have no | 11180 | /* On 5703 and later chips, the boundary bits have no |
11181 | * effect. | 11181 | * effect. |
11182 | */ | 11182 | */ |
11183 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 11183 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
11184 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && | 11184 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && |
11185 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | 11185 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) |
11186 | goto out; | 11186 | goto out; |
11187 | 11187 | ||
11188 | #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) | 11188 | #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) |
11189 | goal = BOUNDARY_MULTI_CACHELINE; | 11189 | goal = BOUNDARY_MULTI_CACHELINE; |
11190 | #else | 11190 | #else |
11191 | #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) | 11191 | #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) |
11192 | goal = BOUNDARY_SINGLE_CACHELINE; | 11192 | goal = BOUNDARY_SINGLE_CACHELINE; |
11193 | #else | 11193 | #else |
11194 | goal = 0; | 11194 | goal = 0; |
11195 | #endif | 11195 | #endif |
11196 | #endif | 11196 | #endif |
11197 | 11197 | ||
11198 | if (!goal) | 11198 | if (!goal) |
11199 | goto out; | 11199 | goto out; |
11200 | 11200 | ||
11201 | /* PCI controllers on most RISC systems tend to disconnect | 11201 | /* PCI controllers on most RISC systems tend to disconnect |
11202 | * when a device tries to burst across a cache-line boundary. | 11202 | * when a device tries to burst across a cache-line boundary. |
11203 | * Therefore, letting tg3 do so just wastes PCI bandwidth. | 11203 | * Therefore, letting tg3 do so just wastes PCI bandwidth. |
11204 | * | 11204 | * |
11205 | * Unfortunately, for PCI-E there are only limited | 11205 | * Unfortunately, for PCI-E there are only limited |
11206 | * write-side controls for this, and thus for reads | 11206 | * write-side controls for this, and thus for reads |
11207 | * we will still get the disconnects. We'll also waste | 11207 | * we will still get the disconnects. We'll also waste |
11208 | * these PCI cycles for both read and write for chips | 11208 | * these PCI cycles for both read and write for chips |
11209 | * other than 5700 and 5701 which do not implement the | 11209 | * other than 5700 and 5701 which do not implement the |
11210 | * boundary bits. | 11210 | * boundary bits. |
11211 | */ | 11211 | */ |
11212 | if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && | 11212 | if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && |
11213 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { | 11213 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { |
11214 | switch (cacheline_size) { | 11214 | switch (cacheline_size) { |
11215 | case 16: | 11215 | case 16: |
11216 | case 32: | 11216 | case 32: |
11217 | case 64: | 11217 | case 64: |
11218 | case 128: | 11218 | case 128: |
11219 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | 11219 | if (goal == BOUNDARY_SINGLE_CACHELINE) { |
11220 | val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | | 11220 | val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | |
11221 | DMA_RWCTRL_WRITE_BNDRY_128_PCIX); | 11221 | DMA_RWCTRL_WRITE_BNDRY_128_PCIX); |
11222 | } else { | 11222 | } else { |
11223 | val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | | 11223 | val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | |
11224 | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); | 11224 | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); |
11225 | } | 11225 | } |
11226 | break; | 11226 | break; |
11227 | 11227 | ||
11228 | case 256: | 11228 | case 256: |
11229 | val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | | 11229 | val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | |
11230 | DMA_RWCTRL_WRITE_BNDRY_256_PCIX); | 11230 | DMA_RWCTRL_WRITE_BNDRY_256_PCIX); |
11231 | break; | 11231 | break; |
11232 | 11232 | ||
11233 | default: | 11233 | default: |
11234 | val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | | 11234 | val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | |
11235 | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); | 11235 | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); |
11236 | break; | 11236 | break; |
11237 | }; | 11237 | }; |
11238 | } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 11238 | } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { |
11239 | switch (cacheline_size) { | 11239 | switch (cacheline_size) { |
11240 | case 16: | 11240 | case 16: |
11241 | case 32: | 11241 | case 32: |
11242 | case 64: | 11242 | case 64: |
11243 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | 11243 | if (goal == BOUNDARY_SINGLE_CACHELINE) { |
11244 | val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; | 11244 | val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; |
11245 | val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; | 11245 | val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; |
11246 | break; | 11246 | break; |
11247 | } | 11247 | } |
11248 | /* fallthrough */ | 11248 | /* fallthrough */ |
11249 | case 128: | 11249 | case 128: |
11250 | default: | 11250 | default: |
11251 | val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; | 11251 | val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; |
11252 | val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; | 11252 | val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; |
11253 | break; | 11253 | break; |
11254 | }; | 11254 | }; |
11255 | } else { | 11255 | } else { |
11256 | switch (cacheline_size) { | 11256 | switch (cacheline_size) { |
11257 | case 16: | 11257 | case 16: |
11258 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | 11258 | if (goal == BOUNDARY_SINGLE_CACHELINE) { |
11259 | val |= (DMA_RWCTRL_READ_BNDRY_16 | | 11259 | val |= (DMA_RWCTRL_READ_BNDRY_16 | |
11260 | DMA_RWCTRL_WRITE_BNDRY_16); | 11260 | DMA_RWCTRL_WRITE_BNDRY_16); |
11261 | break; | 11261 | break; |
11262 | } | 11262 | } |
11263 | /* fallthrough */ | 11263 | /* fallthrough */ |
11264 | case 32: | 11264 | case 32: |
11265 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | 11265 | if (goal == BOUNDARY_SINGLE_CACHELINE) { |
11266 | val |= (DMA_RWCTRL_READ_BNDRY_32 | | 11266 | val |= (DMA_RWCTRL_READ_BNDRY_32 | |
11267 | DMA_RWCTRL_WRITE_BNDRY_32); | 11267 | DMA_RWCTRL_WRITE_BNDRY_32); |
11268 | break; | 11268 | break; |
11269 | } | 11269 | } |
11270 | /* fallthrough */ | 11270 | /* fallthrough */ |
11271 | case 64: | 11271 | case 64: |
11272 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | 11272 | if (goal == BOUNDARY_SINGLE_CACHELINE) { |
11273 | val |= (DMA_RWCTRL_READ_BNDRY_64 | | 11273 | val |= (DMA_RWCTRL_READ_BNDRY_64 | |
11274 | DMA_RWCTRL_WRITE_BNDRY_64); | 11274 | DMA_RWCTRL_WRITE_BNDRY_64); |
11275 | break; | 11275 | break; |
11276 | } | 11276 | } |
11277 | /* fallthrough */ | 11277 | /* fallthrough */ |
11278 | case 128: | 11278 | case 128: |
11279 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | 11279 | if (goal == BOUNDARY_SINGLE_CACHELINE) { |
11280 | val |= (DMA_RWCTRL_READ_BNDRY_128 | | 11280 | val |= (DMA_RWCTRL_READ_BNDRY_128 | |
11281 | DMA_RWCTRL_WRITE_BNDRY_128); | 11281 | DMA_RWCTRL_WRITE_BNDRY_128); |
11282 | break; | 11282 | break; |
11283 | } | 11283 | } |
11284 | /* fallthrough */ | 11284 | /* fallthrough */ |
11285 | case 256: | 11285 | case 256: |
11286 | val |= (DMA_RWCTRL_READ_BNDRY_256 | | 11286 | val |= (DMA_RWCTRL_READ_BNDRY_256 | |
11287 | DMA_RWCTRL_WRITE_BNDRY_256); | 11287 | DMA_RWCTRL_WRITE_BNDRY_256); |
11288 | break; | 11288 | break; |
11289 | case 512: | 11289 | case 512: |
11290 | val |= (DMA_RWCTRL_READ_BNDRY_512 | | 11290 | val |= (DMA_RWCTRL_READ_BNDRY_512 | |
11291 | DMA_RWCTRL_WRITE_BNDRY_512); | 11291 | DMA_RWCTRL_WRITE_BNDRY_512); |
11292 | break; | 11292 | break; |
11293 | case 1024: | 11293 | case 1024: |
11294 | default: | 11294 | default: |
11295 | val |= (DMA_RWCTRL_READ_BNDRY_1024 | | 11295 | val |= (DMA_RWCTRL_READ_BNDRY_1024 | |
11296 | DMA_RWCTRL_WRITE_BNDRY_1024); | 11296 | DMA_RWCTRL_WRITE_BNDRY_1024); |
11297 | break; | 11297 | break; |
11298 | }; | 11298 | }; |
11299 | } | 11299 | } |
11300 | 11300 | ||
11301 | out: | 11301 | out: |
11302 | return val; | 11302 | return val; |
11303 | } | 11303 | } |
11304 | 11304 | ||
11305 | static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) | 11305 | static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) |
11306 | { | 11306 | { |
11307 | struct tg3_internal_buffer_desc test_desc; | 11307 | struct tg3_internal_buffer_desc test_desc; |
11308 | u32 sram_dma_descs; | 11308 | u32 sram_dma_descs; |
11309 | int i, ret; | 11309 | int i, ret; |
11310 | 11310 | ||
11311 | sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; | 11311 | sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; |
11312 | 11312 | ||
11313 | tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); | 11313 | tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); |
11314 | tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); | 11314 | tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); |
11315 | tw32(RDMAC_STATUS, 0); | 11315 | tw32(RDMAC_STATUS, 0); |
11316 | tw32(WDMAC_STATUS, 0); | 11316 | tw32(WDMAC_STATUS, 0); |
11317 | 11317 | ||
11318 | tw32(BUFMGR_MODE, 0); | 11318 | tw32(BUFMGR_MODE, 0); |
11319 | tw32(FTQ_RESET, 0); | 11319 | tw32(FTQ_RESET, 0); |
11320 | 11320 | ||
11321 | test_desc.addr_hi = ((u64) buf_dma) >> 32; | 11321 | test_desc.addr_hi = ((u64) buf_dma) >> 32; |
11322 | test_desc.addr_lo = buf_dma & 0xffffffff; | 11322 | test_desc.addr_lo = buf_dma & 0xffffffff; |
11323 | test_desc.nic_mbuf = 0x00002100; | 11323 | test_desc.nic_mbuf = 0x00002100; |
11324 | test_desc.len = size; | 11324 | test_desc.len = size; |
11325 | 11325 | ||
11326 | /* | 11326 | /* |
11327 | * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz | 11327 | * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz |
11328 | * the *second* time the tg3 driver was getting loaded after an | 11328 | * the *second* time the tg3 driver was getting loaded after an |
11329 | * initial scan. | 11329 | * initial scan. |
11330 | * | 11330 | * |
11331 | * Broadcom tells me: | 11331 | * Broadcom tells me: |
11332 | * ...the DMA engine is connected to the GRC block and a DMA | 11332 | * ...the DMA engine is connected to the GRC block and a DMA |
11333 | * reset may affect the GRC block in some unpredictable way... | 11333 | * reset may affect the GRC block in some unpredictable way... |
11334 | * The behavior of resets to individual blocks has not been tested. | 11334 | * The behavior of resets to individual blocks has not been tested. |
11335 | * | 11335 | * |
11336 | * Broadcom noted the GRC reset will also reset all sub-components. | 11336 | * Broadcom noted the GRC reset will also reset all sub-components. |
11337 | */ | 11337 | */ |
11338 | if (to_device) { | 11338 | if (to_device) { |
11339 | test_desc.cqid_sqid = (13 << 8) | 2; | 11339 | test_desc.cqid_sqid = (13 << 8) | 2; |
11340 | 11340 | ||
11341 | tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); | 11341 | tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); |
11342 | udelay(40); | 11342 | udelay(40); |
11343 | } else { | 11343 | } else { |
11344 | test_desc.cqid_sqid = (16 << 8) | 7; | 11344 | test_desc.cqid_sqid = (16 << 8) | 7; |
11345 | 11345 | ||
11346 | tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); | 11346 | tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); |
11347 | udelay(40); | 11347 | udelay(40); |
11348 | } | 11348 | } |
11349 | test_desc.flags = 0x00000005; | 11349 | test_desc.flags = 0x00000005; |
11350 | 11350 | ||
11351 | for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { | 11351 | for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { |
11352 | u32 val; | 11352 | u32 val; |
11353 | 11353 | ||
11354 | val = *(((u32 *)&test_desc) + i); | 11354 | val = *(((u32 *)&test_desc) + i); |
11355 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, | 11355 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, |
11356 | sram_dma_descs + (i * sizeof(u32))); | 11356 | sram_dma_descs + (i * sizeof(u32))); |
11357 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); | 11357 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); |
11358 | } | 11358 | } |
11359 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); | 11359 | pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); |
11360 | 11360 | ||
11361 | if (to_device) { | 11361 | if (to_device) { |
11362 | tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); | 11362 | tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); |
11363 | } else { | 11363 | } else { |
11364 | tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); | 11364 | tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); |
11365 | } | 11365 | } |
11366 | 11366 | ||
11367 | ret = -ENODEV; | 11367 | ret = -ENODEV; |
11368 | for (i = 0; i < 40; i++) { | 11368 | for (i = 0; i < 40; i++) { |
11369 | u32 val; | 11369 | u32 val; |
11370 | 11370 | ||
11371 | if (to_device) | 11371 | if (to_device) |
11372 | val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); | 11372 | val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); |
11373 | else | 11373 | else |
11374 | val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); | 11374 | val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); |
11375 | if ((val & 0xffff) == sram_dma_descs) { | 11375 | if ((val & 0xffff) == sram_dma_descs) { |
11376 | ret = 0; | 11376 | ret = 0; |
11377 | break; | 11377 | break; |
11378 | } | 11378 | } |
11379 | 11379 | ||
11380 | udelay(100); | 11380 | udelay(100); |
11381 | } | 11381 | } |
11382 | 11382 | ||
11383 | return ret; | 11383 | return ret; |
11384 | } | 11384 | } |
11385 | 11385 | ||
11386 | #define TEST_BUFFER_SIZE 0x2000 | 11386 | #define TEST_BUFFER_SIZE 0x2000 |
11387 | 11387 | ||
11388 | static int __devinit tg3_test_dma(struct tg3 *tp) | 11388 | static int __devinit tg3_test_dma(struct tg3 *tp) |
11389 | { | 11389 | { |
11390 | dma_addr_t buf_dma; | 11390 | dma_addr_t buf_dma; |
11391 | u32 *buf, saved_dma_rwctrl; | 11391 | u32 *buf, saved_dma_rwctrl; |
11392 | int ret; | 11392 | int ret; |
11393 | 11393 | ||
11394 | buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); | 11394 | buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); |
11395 | if (!buf) { | 11395 | if (!buf) { |
11396 | ret = -ENOMEM; | 11396 | ret = -ENOMEM; |
11397 | goto out_nofree; | 11397 | goto out_nofree; |
11398 | } | 11398 | } |
11399 | 11399 | ||
11400 | tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | | 11400 | tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | |
11401 | (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); | 11401 | (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); |
11402 | 11402 | ||
11403 | tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); | 11403 | tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); |
11404 | 11404 | ||
11405 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 11405 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { |
11406 | /* DMA read watermark not used on PCIE */ | 11406 | /* DMA read watermark not used on PCIE */ |
11407 | tp->dma_rwctrl |= 0x00180000; | 11407 | tp->dma_rwctrl |= 0x00180000; |
11408 | } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { | 11408 | } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { |
11409 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || | 11409 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || |
11410 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) | 11410 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) |
11411 | tp->dma_rwctrl |= 0x003f0000; | 11411 | tp->dma_rwctrl |= 0x003f0000; |
11412 | else | 11412 | else |
11413 | tp->dma_rwctrl |= 0x003f000f; | 11413 | tp->dma_rwctrl |= 0x003f000f; |
11414 | } else { | 11414 | } else { |
11415 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 11415 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || |
11416 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | 11416 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { |
11417 | u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); | 11417 | u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); |
11418 | u32 read_water = 0x7; | 11418 | u32 read_water = 0x7; |
11419 | 11419 | ||
11420 | /* If the 5704 is behind the EPB bridge, we can | 11420 | /* If the 5704 is behind the EPB bridge, we can |
11421 | * do the less restrictive ONE_DMA workaround for | 11421 | * do the less restrictive ONE_DMA workaround for |
11422 | * better performance. | 11422 | * better performance. |
11423 | */ | 11423 | */ |
11424 | if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) && | 11424 | if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) && |
11425 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) | 11425 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) |
11426 | tp->dma_rwctrl |= 0x8000; | 11426 | tp->dma_rwctrl |= 0x8000; |
11427 | else if (ccval == 0x6 || ccval == 0x7) | 11427 | else if (ccval == 0x6 || ccval == 0x7) |
11428 | tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; | 11428 | tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; |
11429 | 11429 | ||
11430 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) | 11430 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) |
11431 | read_water = 4; | 11431 | read_water = 4; |
11432 | /* Set bit 23 to enable PCIX hw bug fix */ | 11432 | /* Set bit 23 to enable PCIX hw bug fix */ |
11433 | tp->dma_rwctrl |= | 11433 | tp->dma_rwctrl |= |
11434 | (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | | 11434 | (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | |
11435 | (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | | 11435 | (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | |
11436 | (1 << 23); | 11436 | (1 << 23); |
11437 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { | 11437 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { |
11438 | /* 5780 always in PCIX mode */ | 11438 | /* 5780 always in PCIX mode */ |
11439 | tp->dma_rwctrl |= 0x00144000; | 11439 | tp->dma_rwctrl |= 0x00144000; |
11440 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { | 11440 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { |
11441 | /* 5714 always in PCIX mode */ | 11441 | /* 5714 always in PCIX mode */ |
11442 | tp->dma_rwctrl |= 0x00148000; | 11442 | tp->dma_rwctrl |= 0x00148000; |
11443 | } else { | 11443 | } else { |
11444 | tp->dma_rwctrl |= 0x001b000f; | 11444 | tp->dma_rwctrl |= 0x001b000f; |
11445 | } | 11445 | } |
11446 | } | 11446 | } |
11447 | 11447 | ||
11448 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 11448 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || |
11449 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) | 11449 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) |
11450 | tp->dma_rwctrl &= 0xfffffff0; | 11450 | tp->dma_rwctrl &= 0xfffffff0; |
11451 | 11451 | ||
11452 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 11452 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
11453 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 11453 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { |
11454 | /* Remove this if it causes problems for some boards. */ | 11454 | /* Remove this if it causes problems for some boards. */ |
11455 | tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; | 11455 | tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; |
11456 | 11456 | ||
11457 | /* On 5700/5701 chips, we need to set this bit. | 11457 | /* On 5700/5701 chips, we need to set this bit. |
11458 | * Otherwise the chip will issue cacheline transactions | 11458 | * Otherwise the chip will issue cacheline transactions |
11459 | * to streamable DMA memory with not all the byte | 11459 | * to streamable DMA memory with not all the byte |
11460 | * enables turned on. This is an error on several | 11460 | * enables turned on. This is an error on several |
11461 | * RISC PCI controllers, in particular sparc64. | 11461 | * RISC PCI controllers, in particular sparc64. |
11462 | * | 11462 | * |
11463 | * On 5703/5704 chips, this bit has been reassigned | 11463 | * On 5703/5704 chips, this bit has been reassigned |
11464 | * a different meaning. In particular, it is used | 11464 | * a different meaning. In particular, it is used |
11465 | * on those chips to enable a PCI-X workaround. | 11465 | * on those chips to enable a PCI-X workaround. |
11466 | */ | 11466 | */ |
11467 | tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; | 11467 | tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; |
11468 | } | 11468 | } |
11469 | 11469 | ||
11470 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 11470 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); |
11471 | 11471 | ||
11472 | #if 0 | 11472 | #if 0 |
11473 | /* Unneeded, already done by tg3_get_invariants. */ | 11473 | /* Unneeded, already done by tg3_get_invariants. */ |
11474 | tg3_switch_clocks(tp); | 11474 | tg3_switch_clocks(tp); |
11475 | #endif | 11475 | #endif |
11476 | 11476 | ||
11477 | ret = 0; | 11477 | ret = 0; |
11478 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 11478 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
11479 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) | 11479 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) |
11480 | goto out; | 11480 | goto out; |
11481 | 11481 | ||
11482 | /* It is best to perform DMA test with maximum write burst size | 11482 | /* It is best to perform DMA test with maximum write burst size |
11483 | * to expose the 5700/5701 write DMA bug. | 11483 | * to expose the 5700/5701 write DMA bug. |
11484 | */ | 11484 | */ |
11485 | saved_dma_rwctrl = tp->dma_rwctrl; | 11485 | saved_dma_rwctrl = tp->dma_rwctrl; |
11486 | tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; | 11486 | tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; |
11487 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 11487 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); |
11488 | 11488 | ||
11489 | while (1) { | 11489 | while (1) { |
11490 | u32 *p = buf, i; | 11490 | u32 *p = buf, i; |
11491 | 11491 | ||
11492 | for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) | 11492 | for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) |
11493 | p[i] = i; | 11493 | p[i] = i; |
11494 | 11494 | ||
11495 | /* Send the buffer to the chip. */ | 11495 | /* Send the buffer to the chip. */ |
11496 | ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); | 11496 | ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); |
11497 | if (ret) { | 11497 | if (ret) { |
11498 | printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret); | 11498 | printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret); |
11499 | break; | 11499 | break; |
11500 | } | 11500 | } |
11501 | 11501 | ||
11502 | #if 0 | 11502 | #if 0 |
11503 | /* validate data reached card RAM correctly. */ | 11503 | /* validate data reached card RAM correctly. */ |
11504 | for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { | 11504 | for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { |
11505 | u32 val; | 11505 | u32 val; |
11506 | tg3_read_mem(tp, 0x2100 + (i*4), &val); | 11506 | tg3_read_mem(tp, 0x2100 + (i*4), &val); |
11507 | if (le32_to_cpu(val) != p[i]) { | 11507 | if (le32_to_cpu(val) != p[i]) { |
11508 | printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i); | 11508 | printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i); |
11509 | /* ret = -ENODEV here? */ | 11509 | /* ret = -ENODEV here? */ |
11510 | } | 11510 | } |
11511 | p[i] = 0; | 11511 | p[i] = 0; |
11512 | } | 11512 | } |
11513 | #endif | 11513 | #endif |
11514 | /* Now read it back. */ | 11514 | /* Now read it back. */ |
11515 | ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); | 11515 | ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); |
11516 | if (ret) { | 11516 | if (ret) { |
11517 | printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret); | 11517 | printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret); |
11518 | 11518 | ||
11519 | break; | 11519 | break; |
11520 | } | 11520 | } |
11521 | 11521 | ||
11522 | /* Verify it. */ | 11522 | /* Verify it. */ |
11523 | for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { | 11523 | for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { |
11524 | if (p[i] == i) | 11524 | if (p[i] == i) |
11525 | continue; | 11525 | continue; |
11526 | 11526 | ||
11527 | if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != | 11527 | if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != |
11528 | DMA_RWCTRL_WRITE_BNDRY_16) { | 11528 | DMA_RWCTRL_WRITE_BNDRY_16) { |
11529 | tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; | 11529 | tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; |
11530 | tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; | 11530 | tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; |
11531 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 11531 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); |
11532 | break; | 11532 | break; |
11533 | } else { | 11533 | } else { |
11534 | printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i); | 11534 | printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i); |
11535 | ret = -ENODEV; | 11535 | ret = -ENODEV; |
11536 | goto out; | 11536 | goto out; |
11537 | } | 11537 | } |
11538 | } | 11538 | } |
11539 | 11539 | ||
11540 | if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { | 11540 | if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { |
11541 | /* Success. */ | 11541 | /* Success. */ |
11542 | ret = 0; | 11542 | ret = 0; |
11543 | break; | 11543 | break; |
11544 | } | 11544 | } |
11545 | } | 11545 | } |
11546 | if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != | 11546 | if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != |
11547 | DMA_RWCTRL_WRITE_BNDRY_16) { | 11547 | DMA_RWCTRL_WRITE_BNDRY_16) { |
11548 | static struct pci_device_id dma_wait_state_chipsets[] = { | 11548 | static struct pci_device_id dma_wait_state_chipsets[] = { |
11549 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, | 11549 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, |
11550 | PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, | 11550 | PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, |
11551 | { }, | 11551 | { }, |
11552 | }; | 11552 | }; |
11553 | 11553 | ||
11554 | /* DMA test passed without adjusting DMA boundary, | 11554 | /* DMA test passed without adjusting DMA boundary, |
11555 | * now look for chipsets that are known to expose the | 11555 | * now look for chipsets that are known to expose the |
11556 | * DMA bug without failing the test. | 11556 | * DMA bug without failing the test. |
11557 | */ | 11557 | */ |
11558 | if (pci_dev_present(dma_wait_state_chipsets)) { | 11558 | if (pci_dev_present(dma_wait_state_chipsets)) { |
11559 | tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; | 11559 | tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; |
11560 | tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; | 11560 | tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; |
11561 | } | 11561 | } |
11562 | else | 11562 | else |
11563 | /* Safe to use the calculated DMA boundary. */ | 11563 | /* Safe to use the calculated DMA boundary. */ |
11564 | tp->dma_rwctrl = saved_dma_rwctrl; | 11564 | tp->dma_rwctrl = saved_dma_rwctrl; |
11565 | 11565 | ||
11566 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 11566 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); |
11567 | } | 11567 | } |
11568 | 11568 | ||
11569 | out: | 11569 | out: |
11570 | pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); | 11570 | pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); |
11571 | out_nofree: | 11571 | out_nofree: |
11572 | return ret; | 11572 | return ret; |
11573 | } | 11573 | } |
11574 | 11574 | ||
11575 | static void __devinit tg3_init_link_config(struct tg3 *tp) | 11575 | static void __devinit tg3_init_link_config(struct tg3 *tp) |
11576 | { | 11576 | { |
11577 | tp->link_config.advertising = | 11577 | tp->link_config.advertising = |
11578 | (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | 11578 | (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | |
11579 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | | 11579 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | |
11580 | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | | 11580 | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | |
11581 | ADVERTISED_Autoneg | ADVERTISED_MII); | 11581 | ADVERTISED_Autoneg | ADVERTISED_MII); |
11582 | tp->link_config.speed = SPEED_INVALID; | 11582 | tp->link_config.speed = SPEED_INVALID; |
11583 | tp->link_config.duplex = DUPLEX_INVALID; | 11583 | tp->link_config.duplex = DUPLEX_INVALID; |
11584 | tp->link_config.autoneg = AUTONEG_ENABLE; | 11584 | tp->link_config.autoneg = AUTONEG_ENABLE; |
11585 | tp->link_config.active_speed = SPEED_INVALID; | 11585 | tp->link_config.active_speed = SPEED_INVALID; |
11586 | tp->link_config.active_duplex = DUPLEX_INVALID; | 11586 | tp->link_config.active_duplex = DUPLEX_INVALID; |
11587 | tp->link_config.phy_is_low_power = 0; | 11587 | tp->link_config.phy_is_low_power = 0; |
11588 | tp->link_config.orig_speed = SPEED_INVALID; | 11588 | tp->link_config.orig_speed = SPEED_INVALID; |
11589 | tp->link_config.orig_duplex = DUPLEX_INVALID; | 11589 | tp->link_config.orig_duplex = DUPLEX_INVALID; |
11590 | tp->link_config.orig_autoneg = AUTONEG_INVALID; | 11590 | tp->link_config.orig_autoneg = AUTONEG_INVALID; |
11591 | } | 11591 | } |
11592 | 11592 | ||
11593 | static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) | 11593 | static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) |
11594 | { | 11594 | { |
11595 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 11595 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
11596 | tp->bufmgr_config.mbuf_read_dma_low_water = | 11596 | tp->bufmgr_config.mbuf_read_dma_low_water = |
11597 | DEFAULT_MB_RDMA_LOW_WATER_5705; | 11597 | DEFAULT_MB_RDMA_LOW_WATER_5705; |
11598 | tp->bufmgr_config.mbuf_mac_rx_low_water = | 11598 | tp->bufmgr_config.mbuf_mac_rx_low_water = |
11599 | DEFAULT_MB_MACRX_LOW_WATER_5705; | 11599 | DEFAULT_MB_MACRX_LOW_WATER_5705; |
11600 | tp->bufmgr_config.mbuf_high_water = | 11600 | tp->bufmgr_config.mbuf_high_water = |
11601 | DEFAULT_MB_HIGH_WATER_5705; | 11601 | DEFAULT_MB_HIGH_WATER_5705; |
11602 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 11602 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
11603 | tp->bufmgr_config.mbuf_mac_rx_low_water = | 11603 | tp->bufmgr_config.mbuf_mac_rx_low_water = |
11604 | DEFAULT_MB_MACRX_LOW_WATER_5906; | 11604 | DEFAULT_MB_MACRX_LOW_WATER_5906; |
11605 | tp->bufmgr_config.mbuf_high_water = | 11605 | tp->bufmgr_config.mbuf_high_water = |
11606 | DEFAULT_MB_HIGH_WATER_5906; | 11606 | DEFAULT_MB_HIGH_WATER_5906; |
11607 | } | 11607 | } |
11608 | 11608 | ||
11609 | tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = | 11609 | tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = |
11610 | DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; | 11610 | DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; |
11611 | tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = | 11611 | tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = |
11612 | DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; | 11612 | DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; |
11613 | tp->bufmgr_config.mbuf_high_water_jumbo = | 11613 | tp->bufmgr_config.mbuf_high_water_jumbo = |
11614 | DEFAULT_MB_HIGH_WATER_JUMBO_5780; | 11614 | DEFAULT_MB_HIGH_WATER_JUMBO_5780; |
11615 | } else { | 11615 | } else { |
11616 | tp->bufmgr_config.mbuf_read_dma_low_water = | 11616 | tp->bufmgr_config.mbuf_read_dma_low_water = |
11617 | DEFAULT_MB_RDMA_LOW_WATER; | 11617 | DEFAULT_MB_RDMA_LOW_WATER; |
11618 | tp->bufmgr_config.mbuf_mac_rx_low_water = | 11618 | tp->bufmgr_config.mbuf_mac_rx_low_water = |
11619 | DEFAULT_MB_MACRX_LOW_WATER; | 11619 | DEFAULT_MB_MACRX_LOW_WATER; |
11620 | tp->bufmgr_config.mbuf_high_water = | 11620 | tp->bufmgr_config.mbuf_high_water = |
11621 | DEFAULT_MB_HIGH_WATER; | 11621 | DEFAULT_MB_HIGH_WATER; |
11622 | 11622 | ||
11623 | tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = | 11623 | tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = |
11624 | DEFAULT_MB_RDMA_LOW_WATER_JUMBO; | 11624 | DEFAULT_MB_RDMA_LOW_WATER_JUMBO; |
11625 | tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = | 11625 | tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = |
11626 | DEFAULT_MB_MACRX_LOW_WATER_JUMBO; | 11626 | DEFAULT_MB_MACRX_LOW_WATER_JUMBO; |
11627 | tp->bufmgr_config.mbuf_high_water_jumbo = | 11627 | tp->bufmgr_config.mbuf_high_water_jumbo = |
11628 | DEFAULT_MB_HIGH_WATER_JUMBO; | 11628 | DEFAULT_MB_HIGH_WATER_JUMBO; |
11629 | } | 11629 | } |
11630 | 11630 | ||
11631 | tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; | 11631 | tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; |
11632 | tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; | 11632 | tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; |
11633 | } | 11633 | } |
11634 | 11634 | ||
11635 | static char * __devinit tg3_phy_string(struct tg3 *tp) | 11635 | static char * __devinit tg3_phy_string(struct tg3 *tp) |
11636 | { | 11636 | { |
11637 | switch (tp->phy_id & PHY_ID_MASK) { | 11637 | switch (tp->phy_id & PHY_ID_MASK) { |
11638 | case PHY_ID_BCM5400: return "5400"; | 11638 | case PHY_ID_BCM5400: return "5400"; |
11639 | case PHY_ID_BCM5401: return "5401"; | 11639 | case PHY_ID_BCM5401: return "5401"; |
11640 | case PHY_ID_BCM5411: return "5411"; | 11640 | case PHY_ID_BCM5411: return "5411"; |
11641 | case PHY_ID_BCM5701: return "5701"; | 11641 | case PHY_ID_BCM5701: return "5701"; |
11642 | case PHY_ID_BCM5703: return "5703"; | 11642 | case PHY_ID_BCM5703: return "5703"; |
11643 | case PHY_ID_BCM5704: return "5704"; | 11643 | case PHY_ID_BCM5704: return "5704"; |
11644 | case PHY_ID_BCM5705: return "5705"; | 11644 | case PHY_ID_BCM5705: return "5705"; |
11645 | case PHY_ID_BCM5750: return "5750"; | 11645 | case PHY_ID_BCM5750: return "5750"; |
11646 | case PHY_ID_BCM5752: return "5752"; | 11646 | case PHY_ID_BCM5752: return "5752"; |
11647 | case PHY_ID_BCM5714: return "5714"; | 11647 | case PHY_ID_BCM5714: return "5714"; |
11648 | case PHY_ID_BCM5780: return "5780"; | 11648 | case PHY_ID_BCM5780: return "5780"; |
11649 | case PHY_ID_BCM5755: return "5755"; | 11649 | case PHY_ID_BCM5755: return "5755"; |
11650 | case PHY_ID_BCM5787: return "5787"; | 11650 | case PHY_ID_BCM5787: return "5787"; |
11651 | case PHY_ID_BCM5756: return "5722/5756"; | 11651 | case PHY_ID_BCM5756: return "5722/5756"; |
11652 | case PHY_ID_BCM5906: return "5906"; | 11652 | case PHY_ID_BCM5906: return "5906"; |
11653 | case PHY_ID_BCM8002: return "8002/serdes"; | 11653 | case PHY_ID_BCM8002: return "8002/serdes"; |
11654 | case 0: return "serdes"; | 11654 | case 0: return "serdes"; |
11655 | default: return "unknown"; | 11655 | default: return "unknown"; |
11656 | }; | 11656 | }; |
11657 | } | 11657 | } |
11658 | 11658 | ||
11659 | static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) | 11659 | static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) |
11660 | { | 11660 | { |
11661 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 11661 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { |
11662 | strcpy(str, "PCI Express"); | 11662 | strcpy(str, "PCI Express"); |
11663 | return str; | 11663 | return str; |
11664 | } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { | 11664 | } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { |
11665 | u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; | 11665 | u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; |
11666 | 11666 | ||
11667 | strcpy(str, "PCIX:"); | 11667 | strcpy(str, "PCIX:"); |
11668 | 11668 | ||
11669 | if ((clock_ctrl == 7) || | 11669 | if ((clock_ctrl == 7) || |
11670 | ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == | 11670 | ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == |
11671 | GRC_MISC_CFG_BOARD_ID_5704CIOBE)) | 11671 | GRC_MISC_CFG_BOARD_ID_5704CIOBE)) |
11672 | strcat(str, "133MHz"); | 11672 | strcat(str, "133MHz"); |
11673 | else if (clock_ctrl == 0) | 11673 | else if (clock_ctrl == 0) |
11674 | strcat(str, "33MHz"); | 11674 | strcat(str, "33MHz"); |
11675 | else if (clock_ctrl == 2) | 11675 | else if (clock_ctrl == 2) |
11676 | strcat(str, "50MHz"); | 11676 | strcat(str, "50MHz"); |
11677 | else if (clock_ctrl == 4) | 11677 | else if (clock_ctrl == 4) |
11678 | strcat(str, "66MHz"); | 11678 | strcat(str, "66MHz"); |
11679 | else if (clock_ctrl == 6) | 11679 | else if (clock_ctrl == 6) |
11680 | strcat(str, "100MHz"); | 11680 | strcat(str, "100MHz"); |
11681 | } else { | 11681 | } else { |
11682 | strcpy(str, "PCI:"); | 11682 | strcpy(str, "PCI:"); |
11683 | if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) | 11683 | if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) |
11684 | strcat(str, "66MHz"); | 11684 | strcat(str, "66MHz"); |
11685 | else | 11685 | else |
11686 | strcat(str, "33MHz"); | 11686 | strcat(str, "33MHz"); |
11687 | } | 11687 | } |
11688 | if (tp->tg3_flags & TG3_FLAG_PCI_32BIT) | 11688 | if (tp->tg3_flags & TG3_FLAG_PCI_32BIT) |
11689 | strcat(str, ":32-bit"); | 11689 | strcat(str, ":32-bit"); |
11690 | else | 11690 | else |
11691 | strcat(str, ":64-bit"); | 11691 | strcat(str, ":64-bit"); |
11692 | return str; | 11692 | return str; |
11693 | } | 11693 | } |
11694 | 11694 | ||
11695 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) | 11695 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) |
11696 | { | 11696 | { |
11697 | struct pci_dev *peer; | 11697 | struct pci_dev *peer; |
11698 | unsigned int func, devnr = tp->pdev->devfn & ~7; | 11698 | unsigned int func, devnr = tp->pdev->devfn & ~7; |
11699 | 11699 | ||
11700 | for (func = 0; func < 8; func++) { | 11700 | for (func = 0; func < 8; func++) { |
11701 | peer = pci_get_slot(tp->pdev->bus, devnr | func); | 11701 | peer = pci_get_slot(tp->pdev->bus, devnr | func); |
11702 | if (peer && peer != tp->pdev) | 11702 | if (peer && peer != tp->pdev) |
11703 | break; | 11703 | break; |
11704 | pci_dev_put(peer); | 11704 | pci_dev_put(peer); |
11705 | } | 11705 | } |
11706 | /* 5704 can be configured in single-port mode, set peer to | 11706 | /* 5704 can be configured in single-port mode, set peer to |
11707 | * tp->pdev in that case. | 11707 | * tp->pdev in that case. |
11708 | */ | 11708 | */ |
11709 | if (!peer) { | 11709 | if (!peer) { |
11710 | peer = tp->pdev; | 11710 | peer = tp->pdev; |
11711 | return peer; | 11711 | return peer; |
11712 | } | 11712 | } |
11713 | 11713 | ||
11714 | /* | 11714 | /* |
11715 | * We don't need to keep the refcount elevated; there's no way | 11715 | * We don't need to keep the refcount elevated; there's no way |
11716 | * to remove one half of this device without removing the other | 11716 | * to remove one half of this device without removing the other |
11717 | */ | 11717 | */ |
11718 | pci_dev_put(peer); | 11718 | pci_dev_put(peer); |
11719 | 11719 | ||
11720 | return peer; | 11720 | return peer; |
11721 | } | 11721 | } |
11722 | 11722 | ||
11723 | static void __devinit tg3_init_coal(struct tg3 *tp) | 11723 | static void __devinit tg3_init_coal(struct tg3 *tp) |
11724 | { | 11724 | { |
11725 | struct ethtool_coalesce *ec = &tp->coal; | 11725 | struct ethtool_coalesce *ec = &tp->coal; |
11726 | 11726 | ||
11727 | memset(ec, 0, sizeof(*ec)); | 11727 | memset(ec, 0, sizeof(*ec)); |
11728 | ec->cmd = ETHTOOL_GCOALESCE; | 11728 | ec->cmd = ETHTOOL_GCOALESCE; |
11729 | ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; | 11729 | ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; |
11730 | ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; | 11730 | ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; |
11731 | ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; | 11731 | ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; |
11732 | ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; | 11732 | ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; |
11733 | ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; | 11733 | ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; |
11734 | ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; | 11734 | ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; |
11735 | ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; | 11735 | ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; |
11736 | ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; | 11736 | ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; |
11737 | ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; | 11737 | ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; |
11738 | 11738 | ||
11739 | if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | | 11739 | if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | |
11740 | HOSTCC_MODE_CLRTICK_TXBD)) { | 11740 | HOSTCC_MODE_CLRTICK_TXBD)) { |
11741 | ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; | 11741 | ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; |
11742 | ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; | 11742 | ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; |
11743 | ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; | 11743 | ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; |
11744 | ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; | 11744 | ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; |
11745 | } | 11745 | } |
11746 | 11746 | ||
11747 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 11747 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
11748 | ec->rx_coalesce_usecs_irq = 0; | 11748 | ec->rx_coalesce_usecs_irq = 0; |
11749 | ec->tx_coalesce_usecs_irq = 0; | 11749 | ec->tx_coalesce_usecs_irq = 0; |
11750 | ec->stats_block_coalesce_usecs = 0; | 11750 | ec->stats_block_coalesce_usecs = 0; |
11751 | } | 11751 | } |
11752 | } | 11752 | } |
11753 | 11753 | ||
11754 | static int __devinit tg3_init_one(struct pci_dev *pdev, | 11754 | static int __devinit tg3_init_one(struct pci_dev *pdev, |
11755 | const struct pci_device_id *ent) | 11755 | const struct pci_device_id *ent) |
11756 | { | 11756 | { |
11757 | static int tg3_version_printed = 0; | 11757 | static int tg3_version_printed = 0; |
11758 | unsigned long tg3reg_base, tg3reg_len; | 11758 | unsigned long tg3reg_base, tg3reg_len; |
11759 | struct net_device *dev; | 11759 | struct net_device *dev; |
11760 | struct tg3 *tp; | 11760 | struct tg3 *tp; |
11761 | int i, err, pm_cap; | 11761 | int i, err, pm_cap; |
11762 | char str[40]; | 11762 | char str[40]; |
11763 | u64 dma_mask, persist_dma_mask; | 11763 | u64 dma_mask, persist_dma_mask; |
11764 | 11764 | ||
11765 | if (tg3_version_printed++ == 0) | 11765 | if (tg3_version_printed++ == 0) |
11766 | printk(KERN_INFO "%s", version); | 11766 | printk(KERN_INFO "%s", version); |
11767 | 11767 | ||
11768 | err = pci_enable_device(pdev); | 11768 | err = pci_enable_device(pdev); |
11769 | if (err) { | 11769 | if (err) { |
11770 | printk(KERN_ERR PFX "Cannot enable PCI device, " | 11770 | printk(KERN_ERR PFX "Cannot enable PCI device, " |
11771 | "aborting.\n"); | 11771 | "aborting.\n"); |
11772 | return err; | 11772 | return err; |
11773 | } | 11773 | } |
11774 | 11774 | ||
11775 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | 11775 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
11776 | printk(KERN_ERR PFX "Cannot find proper PCI device " | 11776 | printk(KERN_ERR PFX "Cannot find proper PCI device " |
11777 | "base address, aborting.\n"); | 11777 | "base address, aborting.\n"); |
11778 | err = -ENODEV; | 11778 | err = -ENODEV; |
11779 | goto err_out_disable_pdev; | 11779 | goto err_out_disable_pdev; |
11780 | } | 11780 | } |
11781 | 11781 | ||
11782 | err = pci_request_regions(pdev, DRV_MODULE_NAME); | 11782 | err = pci_request_regions(pdev, DRV_MODULE_NAME); |
11783 | if (err) { | 11783 | if (err) { |
11784 | printk(KERN_ERR PFX "Cannot obtain PCI resources, " | 11784 | printk(KERN_ERR PFX "Cannot obtain PCI resources, " |
11785 | "aborting.\n"); | 11785 | "aborting.\n"); |
11786 | goto err_out_disable_pdev; | 11786 | goto err_out_disable_pdev; |
11787 | } | 11787 | } |
11788 | 11788 | ||
11789 | pci_set_master(pdev); | 11789 | pci_set_master(pdev); |
11790 | 11790 | ||
11791 | /* Find power-management capability. */ | 11791 | /* Find power-management capability. */ |
11792 | pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); | 11792 | pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); |
11793 | if (pm_cap == 0) { | 11793 | if (pm_cap == 0) { |
11794 | printk(KERN_ERR PFX "Cannot find PowerManagement capability, " | 11794 | printk(KERN_ERR PFX "Cannot find PowerManagement capability, " |
11795 | "aborting.\n"); | 11795 | "aborting.\n"); |
11796 | err = -EIO; | 11796 | err = -EIO; |
11797 | goto err_out_free_res; | 11797 | goto err_out_free_res; |
11798 | } | 11798 | } |
11799 | 11799 | ||
11800 | tg3reg_base = pci_resource_start(pdev, 0); | 11800 | tg3reg_base = pci_resource_start(pdev, 0); |
11801 | tg3reg_len = pci_resource_len(pdev, 0); | 11801 | tg3reg_len = pci_resource_len(pdev, 0); |
11802 | 11802 | ||
11803 | dev = alloc_etherdev(sizeof(*tp)); | 11803 | dev = alloc_etherdev(sizeof(*tp)); |
11804 | if (!dev) { | 11804 | if (!dev) { |
11805 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | 11805 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); |
11806 | err = -ENOMEM; | 11806 | err = -ENOMEM; |
11807 | goto err_out_free_res; | 11807 | goto err_out_free_res; |
11808 | } | 11808 | } |
11809 | 11809 | ||
11810 | SET_MODULE_OWNER(dev); | 11810 | SET_MODULE_OWNER(dev); |
11811 | SET_NETDEV_DEV(dev, &pdev->dev); | 11811 | SET_NETDEV_DEV(dev, &pdev->dev); |
11812 | 11812 | ||
11813 | #if TG3_VLAN_TAG_USED | 11813 | #if TG3_VLAN_TAG_USED |
11814 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 11814 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
11815 | dev->vlan_rx_register = tg3_vlan_rx_register; | 11815 | dev->vlan_rx_register = tg3_vlan_rx_register; |
11816 | #endif | 11816 | #endif |
11817 | 11817 | ||
11818 | tp = netdev_priv(dev); | 11818 | tp = netdev_priv(dev); |
11819 | tp->pdev = pdev; | 11819 | tp->pdev = pdev; |
11820 | tp->dev = dev; | 11820 | tp->dev = dev; |
11821 | tp->pm_cap = pm_cap; | 11821 | tp->pm_cap = pm_cap; |
11822 | tp->mac_mode = TG3_DEF_MAC_MODE; | 11822 | tp->mac_mode = TG3_DEF_MAC_MODE; |
11823 | tp->rx_mode = TG3_DEF_RX_MODE; | 11823 | tp->rx_mode = TG3_DEF_RX_MODE; |
11824 | tp->tx_mode = TG3_DEF_TX_MODE; | 11824 | tp->tx_mode = TG3_DEF_TX_MODE; |
11825 | tp->mi_mode = MAC_MI_MODE_BASE; | 11825 | tp->mi_mode = MAC_MI_MODE_BASE; |
11826 | if (tg3_debug > 0) | 11826 | if (tg3_debug > 0) |
11827 | tp->msg_enable = tg3_debug; | 11827 | tp->msg_enable = tg3_debug; |
11828 | else | 11828 | else |
11829 | tp->msg_enable = TG3_DEF_MSG_ENABLE; | 11829 | tp->msg_enable = TG3_DEF_MSG_ENABLE; |
11830 | 11830 | ||
11831 | /* The word/byte swap controls here control register access byte | 11831 | /* The word/byte swap controls here control register access byte |
11832 | * swapping. DMA data byte swapping is controlled in the GRC_MODE | 11832 | * swapping. DMA data byte swapping is controlled in the GRC_MODE |
11833 | * setting below. | 11833 | * setting below. |
11834 | */ | 11834 | */ |
11835 | tp->misc_host_ctrl = | 11835 | tp->misc_host_ctrl = |
11836 | MISC_HOST_CTRL_MASK_PCI_INT | | 11836 | MISC_HOST_CTRL_MASK_PCI_INT | |
11837 | MISC_HOST_CTRL_WORD_SWAP | | 11837 | MISC_HOST_CTRL_WORD_SWAP | |
11838 | MISC_HOST_CTRL_INDIR_ACCESS | | 11838 | MISC_HOST_CTRL_INDIR_ACCESS | |
11839 | MISC_HOST_CTRL_PCISTATE_RW; | 11839 | MISC_HOST_CTRL_PCISTATE_RW; |
11840 | 11840 | ||
11841 | /* The NONFRM (non-frame) byte/word swap controls take effect | 11841 | /* The NONFRM (non-frame) byte/word swap controls take effect |
11842 | * on descriptor entries, anything which isn't packet data. | 11842 | * on descriptor entries, anything which isn't packet data. |
11843 | * | 11843 | * |
11844 | * The StrongARM chips on the board (one for tx, one for rx) | 11844 | * The StrongARM chips on the board (one for tx, one for rx) |
11845 | * are running in big-endian mode. | 11845 | * are running in big-endian mode. |
11846 | */ | 11846 | */ |
11847 | tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | | 11847 | tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | |
11848 | GRC_MODE_WSWAP_NONFRM_DATA); | 11848 | GRC_MODE_WSWAP_NONFRM_DATA); |
11849 | #ifdef __BIG_ENDIAN | 11849 | #ifdef __BIG_ENDIAN |
11850 | tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; | 11850 | tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; |
11851 | #endif | 11851 | #endif |
11852 | spin_lock_init(&tp->lock); | 11852 | spin_lock_init(&tp->lock); |
11853 | spin_lock_init(&tp->indirect_lock); | 11853 | spin_lock_init(&tp->indirect_lock); |
11854 | INIT_WORK(&tp->reset_task, tg3_reset_task); | 11854 | INIT_WORK(&tp->reset_task, tg3_reset_task); |
11855 | 11855 | ||
11856 | tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); | 11856 | tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); |
11857 | if (tp->regs == 0UL) { | 11857 | if (tp->regs == 0UL) { |
11858 | printk(KERN_ERR PFX "Cannot map device registers, " | 11858 | printk(KERN_ERR PFX "Cannot map device registers, " |
11859 | "aborting.\n"); | 11859 | "aborting.\n"); |
11860 | err = -ENOMEM; | 11860 | err = -ENOMEM; |
11861 | goto err_out_free_dev; | 11861 | goto err_out_free_dev; |
11862 | } | 11862 | } |
11863 | 11863 | ||
11864 | tg3_init_link_config(tp); | 11864 | tg3_init_link_config(tp); |
11865 | 11865 | ||
11866 | tp->rx_pending = TG3_DEF_RX_RING_PENDING; | 11866 | tp->rx_pending = TG3_DEF_RX_RING_PENDING; |
11867 | tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; | 11867 | tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; |
11868 | tp->tx_pending = TG3_DEF_TX_RING_PENDING; | 11868 | tp->tx_pending = TG3_DEF_TX_RING_PENDING; |
11869 | 11869 | ||
11870 | dev->open = tg3_open; | 11870 | dev->open = tg3_open; |
11871 | dev->stop = tg3_close; | 11871 | dev->stop = tg3_close; |
11872 | dev->get_stats = tg3_get_stats; | 11872 | dev->get_stats = tg3_get_stats; |
11873 | dev->set_multicast_list = tg3_set_rx_mode; | 11873 | dev->set_multicast_list = tg3_set_rx_mode; |
11874 | dev->set_mac_address = tg3_set_mac_addr; | 11874 | dev->set_mac_address = tg3_set_mac_addr; |
11875 | dev->do_ioctl = tg3_ioctl; | 11875 | dev->do_ioctl = tg3_ioctl; |
11876 | dev->tx_timeout = tg3_tx_timeout; | 11876 | dev->tx_timeout = tg3_tx_timeout; |
11877 | dev->poll = tg3_poll; | 11877 | dev->poll = tg3_poll; |
11878 | dev->ethtool_ops = &tg3_ethtool_ops; | 11878 | dev->ethtool_ops = &tg3_ethtool_ops; |
11879 | dev->weight = 64; | 11879 | dev->weight = 64; |
11880 | dev->watchdog_timeo = TG3_TX_TIMEOUT; | 11880 | dev->watchdog_timeo = TG3_TX_TIMEOUT; |
11881 | dev->change_mtu = tg3_change_mtu; | 11881 | dev->change_mtu = tg3_change_mtu; |
11882 | dev->irq = pdev->irq; | 11882 | dev->irq = pdev->irq; |
11883 | #ifdef CONFIG_NET_POLL_CONTROLLER | 11883 | #ifdef CONFIG_NET_POLL_CONTROLLER |
11884 | dev->poll_controller = tg3_poll_controller; | 11884 | dev->poll_controller = tg3_poll_controller; |
11885 | #endif | 11885 | #endif |
11886 | 11886 | ||
11887 | err = tg3_get_invariants(tp); | 11887 | err = tg3_get_invariants(tp); |
11888 | if (err) { | 11888 | if (err) { |
11889 | printk(KERN_ERR PFX "Problem fetching invariants of chip, " | 11889 | printk(KERN_ERR PFX "Problem fetching invariants of chip, " |
11890 | "aborting.\n"); | 11890 | "aborting.\n"); |
11891 | goto err_out_iounmap; | 11891 | goto err_out_iounmap; |
11892 | } | 11892 | } |
11893 | 11893 | ||
11894 | /* The EPB bridge inside 5714, 5715, and 5780 and any | 11894 | /* The EPB bridge inside 5714, 5715, and 5780 and any |
11895 | * device behind the EPB cannot support DMA addresses > 40-bit. | 11895 | * device behind the EPB cannot support DMA addresses > 40-bit. |
11896 | * On 64-bit systems with IOMMU, use 40-bit dma_mask. | 11896 | * On 64-bit systems with IOMMU, use 40-bit dma_mask. |
11897 | * On 64-bit systems without IOMMU, use 64-bit dma_mask and | 11897 | * On 64-bit systems without IOMMU, use 64-bit dma_mask and |
11898 | * do DMA address check in tg3_start_xmit(). | 11898 | * do DMA address check in tg3_start_xmit(). |
11899 | */ | 11899 | */ |
11900 | if (tp->tg3_flags2 & TG3_FLG2_IS_5788) | 11900 | if (tp->tg3_flags2 & TG3_FLG2_IS_5788) |
11901 | persist_dma_mask = dma_mask = DMA_32BIT_MASK; | 11901 | persist_dma_mask = dma_mask = DMA_32BIT_MASK; |
11902 | else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { | 11902 | else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { |
11903 | persist_dma_mask = dma_mask = DMA_40BIT_MASK; | 11903 | persist_dma_mask = dma_mask = DMA_40BIT_MASK; |
11904 | #ifdef CONFIG_HIGHMEM | 11904 | #ifdef CONFIG_HIGHMEM |
11905 | dma_mask = DMA_64BIT_MASK; | 11905 | dma_mask = DMA_64BIT_MASK; |
11906 | #endif | 11906 | #endif |
11907 | } else | 11907 | } else |
11908 | persist_dma_mask = dma_mask = DMA_64BIT_MASK; | 11908 | persist_dma_mask = dma_mask = DMA_64BIT_MASK; |
11909 | 11909 | ||
11910 | /* Configure DMA attributes. */ | 11910 | /* Configure DMA attributes. */ |
11911 | if (dma_mask > DMA_32BIT_MASK) { | 11911 | if (dma_mask > DMA_32BIT_MASK) { |
11912 | err = pci_set_dma_mask(pdev, dma_mask); | 11912 | err = pci_set_dma_mask(pdev, dma_mask); |
11913 | if (!err) { | 11913 | if (!err) { |
11914 | dev->features |= NETIF_F_HIGHDMA; | 11914 | dev->features |= NETIF_F_HIGHDMA; |
11915 | err = pci_set_consistent_dma_mask(pdev, | 11915 | err = pci_set_consistent_dma_mask(pdev, |
11916 | persist_dma_mask); | 11916 | persist_dma_mask); |
11917 | if (err < 0) { | 11917 | if (err < 0) { |
11918 | printk(KERN_ERR PFX "Unable to obtain 64 bit " | 11918 | printk(KERN_ERR PFX "Unable to obtain 64 bit " |
11919 | "DMA for consistent allocations\n"); | 11919 | "DMA for consistent allocations\n"); |
11920 | goto err_out_iounmap; | 11920 | goto err_out_iounmap; |
11921 | } | 11921 | } |
11922 | } | 11922 | } |
11923 | } | 11923 | } |
11924 | if (err || dma_mask == DMA_32BIT_MASK) { | 11924 | if (err || dma_mask == DMA_32BIT_MASK) { |
11925 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 11925 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
11926 | if (err) { | 11926 | if (err) { |
11927 | printk(KERN_ERR PFX "No usable DMA configuration, " | 11927 | printk(KERN_ERR PFX "No usable DMA configuration, " |
11928 | "aborting.\n"); | 11928 | "aborting.\n"); |
11929 | goto err_out_iounmap; | 11929 | goto err_out_iounmap; |
11930 | } | 11930 | } |
11931 | } | 11931 | } |
11932 | 11932 | ||
11933 | tg3_init_bufmgr_config(tp); | 11933 | tg3_init_bufmgr_config(tp); |
11934 | 11934 | ||
11935 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | 11935 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { |
11936 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 11936 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; |
11937 | } | 11937 | } |
11938 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 11938 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || |
11939 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || | 11939 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || |
11940 | tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 || | 11940 | tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 || |
11941 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || | 11941 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || |
11942 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { | 11942 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { |
11943 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; | 11943 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; |
11944 | } else { | 11944 | } else { |
11945 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; | 11945 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; |
11946 | } | 11946 | } |
11947 | 11947 | ||
11948 | /* TSO is on by default on chips that support hardware TSO. | 11948 | /* TSO is on by default on chips that support hardware TSO. |
11949 | * Firmware TSO on older chips gives lower performance, so it | 11949 | * Firmware TSO on older chips gives lower performance, so it |
11950 | * is off by default, but can be enabled using ethtool. | 11950 | * is off by default, but can be enabled using ethtool. |
11951 | */ | 11951 | */ |
11952 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | 11952 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { |
11953 | dev->features |= NETIF_F_TSO; | 11953 | dev->features |= NETIF_F_TSO; |
11954 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && | 11954 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && |
11955 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) | 11955 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) |
11956 | dev->features |= NETIF_F_TSO6; | 11956 | dev->features |= NETIF_F_TSO6; |
11957 | } | 11957 | } |
11958 | 11958 | ||
11959 | 11959 | ||
11960 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && | 11960 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && |
11961 | !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && | 11961 | !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && |
11962 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { | 11962 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { |
11963 | tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64; | 11963 | tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64; |
11964 | tp->rx_pending = 63; | 11964 | tp->rx_pending = 63; |
11965 | } | 11965 | } |
11966 | 11966 | ||
11967 | err = tg3_get_device_address(tp); | 11967 | err = tg3_get_device_address(tp); |
11968 | if (err) { | 11968 | if (err) { |
11969 | printk(KERN_ERR PFX "Could not obtain valid ethernet address, " | 11969 | printk(KERN_ERR PFX "Could not obtain valid ethernet address, " |
11970 | "aborting.\n"); | 11970 | "aborting.\n"); |
11971 | goto err_out_iounmap; | 11971 | goto err_out_iounmap; |
11972 | } | 11972 | } |
11973 | 11973 | ||
11974 | /* | 11974 | /* |
11975 | * Reset chip in case UNDI or EFI driver did not shutdown | 11975 | * Reset chip in case UNDI or EFI driver did not shutdown |
11976 | * DMA self test will enable WDMAC and we'll see (spurious) | 11976 | * DMA self test will enable WDMAC and we'll see (spurious) |
11977 | * pending DMA on the PCI bus at that point. | 11977 | * pending DMA on the PCI bus at that point. |
11978 | */ | 11978 | */ |
11979 | if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || | 11979 | if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || |
11980 | (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { | 11980 | (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { |
11981 | pci_save_state(tp->pdev); | 11981 | pci_save_state(tp->pdev); |
11982 | tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); | 11982 | tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); |
11983 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 11983 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
11984 | } | 11984 | } |
11985 | 11985 | ||
11986 | err = tg3_test_dma(tp); | 11986 | err = tg3_test_dma(tp); |
11987 | if (err) { | 11987 | if (err) { |
11988 | printk(KERN_ERR PFX "DMA engine test failed, aborting.\n"); | 11988 | printk(KERN_ERR PFX "DMA engine test failed, aborting.\n"); |
11989 | goto err_out_iounmap; | 11989 | goto err_out_iounmap; |
11990 | } | 11990 | } |
11991 | 11991 | ||
11992 | /* Tigon3 can do ipv4 only... and some chips have buggy | 11992 | /* Tigon3 can do ipv4 only... and some chips have buggy |
11993 | * checksumming. | 11993 | * checksumming. |
11994 | */ | 11994 | */ |
11995 | if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { | 11995 | if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { |
11996 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 11996 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
11997 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 11997 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
11998 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) | 11998 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) |
11999 | dev->features |= NETIF_F_IPV6_CSUM; | 11999 | dev->features |= NETIF_F_IPV6_CSUM; |
12000 | 12000 | ||
12001 | tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; | 12001 | tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; |
12002 | } else | 12002 | } else |
12003 | tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; | 12003 | tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; |
12004 | 12004 | ||
12005 | /* flow control autonegotiation is default behavior */ | 12005 | /* flow control autonegotiation is default behavior */ |
12006 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | 12006 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; |
12007 | 12007 | ||
12008 | tg3_init_coal(tp); | 12008 | tg3_init_coal(tp); |
12009 | 12009 | ||
12010 | /* Now that we have fully setup the chip, save away a snapshot | 12010 | /* Now that we have fully setup the chip, save away a snapshot |
12011 | * of the PCI config space. We need to restore this after | 12011 | * of the PCI config space. We need to restore this after |
12012 | * GRC_MISC_CFG core clock resets and some resume events. | 12012 | * GRC_MISC_CFG core clock resets and some resume events. |
12013 | */ | 12013 | */ |
12014 | pci_save_state(tp->pdev); | 12014 | pci_save_state(tp->pdev); |
12015 | 12015 | ||
12016 | pci_set_drvdata(pdev, dev); | 12016 | pci_set_drvdata(pdev, dev); |
12017 | 12017 | ||
12018 | err = register_netdev(dev); | 12018 | err = register_netdev(dev); |
12019 | if (err) { | 12019 | if (err) { |
12020 | printk(KERN_ERR PFX "Cannot register net device, " | 12020 | printk(KERN_ERR PFX "Cannot register net device, " |
12021 | "aborting.\n"); | 12021 | "aborting.\n"); |
12022 | goto err_out_iounmap; | 12022 | goto err_out_iounmap; |
12023 | } | 12023 | } |
12024 | 12024 | ||
12025 | printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ", | 12025 | printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ", |
12026 | dev->name, | 12026 | dev->name, |
12027 | tp->board_part_number, | 12027 | tp->board_part_number, |
12028 | tp->pci_chip_rev_id, | 12028 | tp->pci_chip_rev_id, |
12029 | tg3_phy_string(tp), | 12029 | tg3_phy_string(tp), |
12030 | tg3_bus_string(tp, str), | 12030 | tg3_bus_string(tp, str), |
12031 | ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : | 12031 | ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : |
12032 | ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : | 12032 | ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : |
12033 | "10/100/1000Base-T"))); | 12033 | "10/100/1000Base-T"))); |
12034 | 12034 | ||
12035 | for (i = 0; i < 6; i++) | 12035 | for (i = 0; i < 6; i++) |
12036 | printk("%2.2x%c", dev->dev_addr[i], | 12036 | printk("%2.2x%c", dev->dev_addr[i], |
12037 | i == 5 ? '\n' : ':'); | 12037 | i == 5 ? '\n' : ':'); |
12038 | 12038 | ||
12039 | printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] " | 12039 | printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] " |
12040 | "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n", | 12040 | "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n", |
12041 | dev->name, | 12041 | dev->name, |
12042 | (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, | 12042 | (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, |
12043 | (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, | 12043 | (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, |
12044 | (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, | 12044 | (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, |
12045 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, | 12045 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, |
12046 | (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, | 12046 | (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, |
12047 | (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); | 12047 | (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); |
12048 | printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n", | 12048 | printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n", |
12049 | dev->name, tp->dma_rwctrl, | 12049 | dev->name, tp->dma_rwctrl, |
12050 | (pdev->dma_mask == DMA_32BIT_MASK) ? 32 : | 12050 | (pdev->dma_mask == DMA_32BIT_MASK) ? 32 : |
12051 | (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64)); | 12051 | (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64)); |
12052 | 12052 | ||
12053 | return 0; | 12053 | return 0; |
12054 | 12054 | ||
12055 | err_out_iounmap: | 12055 | err_out_iounmap: |
12056 | if (tp->regs) { | 12056 | if (tp->regs) { |
12057 | iounmap(tp->regs); | 12057 | iounmap(tp->regs); |
12058 | tp->regs = NULL; | 12058 | tp->regs = NULL; |
12059 | } | 12059 | } |
12060 | 12060 | ||
12061 | err_out_free_dev: | 12061 | err_out_free_dev: |
12062 | free_netdev(dev); | 12062 | free_netdev(dev); |
12063 | 12063 | ||
12064 | err_out_free_res: | 12064 | err_out_free_res: |
12065 | pci_release_regions(pdev); | 12065 | pci_release_regions(pdev); |
12066 | 12066 | ||
12067 | err_out_disable_pdev: | 12067 | err_out_disable_pdev: |
12068 | pci_disable_device(pdev); | 12068 | pci_disable_device(pdev); |
12069 | pci_set_drvdata(pdev, NULL); | 12069 | pci_set_drvdata(pdev, NULL); |
12070 | return err; | 12070 | return err; |
12071 | } | 12071 | } |
12072 | 12072 | ||
12073 | static void __devexit tg3_remove_one(struct pci_dev *pdev) | 12073 | static void __devexit tg3_remove_one(struct pci_dev *pdev) |
12074 | { | 12074 | { |
12075 | struct net_device *dev = pci_get_drvdata(pdev); | 12075 | struct net_device *dev = pci_get_drvdata(pdev); |
12076 | 12076 | ||
12077 | if (dev) { | 12077 | if (dev) { |
12078 | struct tg3 *tp = netdev_priv(dev); | 12078 | struct tg3 *tp = netdev_priv(dev); |
12079 | 12079 | ||
12080 | flush_scheduled_work(); | 12080 | flush_scheduled_work(); |
12081 | unregister_netdev(dev); | 12081 | unregister_netdev(dev); |
12082 | if (tp->regs) { | 12082 | if (tp->regs) { |
12083 | iounmap(tp->regs); | 12083 | iounmap(tp->regs); |
12084 | tp->regs = NULL; | 12084 | tp->regs = NULL; |
12085 | } | 12085 | } |
12086 | free_netdev(dev); | 12086 | free_netdev(dev); |
12087 | pci_release_regions(pdev); | 12087 | pci_release_regions(pdev); |
12088 | pci_disable_device(pdev); | 12088 | pci_disable_device(pdev); |
12089 | pci_set_drvdata(pdev, NULL); | 12089 | pci_set_drvdata(pdev, NULL); |
12090 | } | 12090 | } |
12091 | } | 12091 | } |
12092 | 12092 | ||
12093 | static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | 12093 | static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) |
12094 | { | 12094 | { |
12095 | struct net_device *dev = pci_get_drvdata(pdev); | 12095 | struct net_device *dev = pci_get_drvdata(pdev); |
12096 | struct tg3 *tp = netdev_priv(dev); | 12096 | struct tg3 *tp = netdev_priv(dev); |
12097 | int err; | 12097 | int err; |
12098 | 12098 | ||
12099 | if (!netif_running(dev)) | 12099 | if (!netif_running(dev)) |
12100 | return 0; | 12100 | return 0; |
12101 | 12101 | ||
12102 | flush_scheduled_work(); | 12102 | flush_scheduled_work(); |
12103 | tg3_netif_stop(tp); | 12103 | tg3_netif_stop(tp); |
12104 | 12104 | ||
12105 | del_timer_sync(&tp->timer); | 12105 | del_timer_sync(&tp->timer); |
12106 | 12106 | ||
12107 | tg3_full_lock(tp, 1); | 12107 | tg3_full_lock(tp, 1); |
12108 | tg3_disable_ints(tp); | 12108 | tg3_disable_ints(tp); |
12109 | tg3_full_unlock(tp); | 12109 | tg3_full_unlock(tp); |
12110 | 12110 | ||
12111 | netif_device_detach(dev); | 12111 | netif_device_detach(dev); |
12112 | 12112 | ||
12113 | tg3_full_lock(tp, 0); | 12113 | tg3_full_lock(tp, 0); |
12114 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 12114 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
12115 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; | 12115 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; |
12116 | tg3_full_unlock(tp); | 12116 | tg3_full_unlock(tp); |
12117 | 12117 | ||
12118 | /* Save MSI address and data for resume. */ | 12118 | /* Save MSI address and data for resume. */ |
12119 | pci_save_state(pdev); | 12119 | pci_save_state(pdev); |
12120 | 12120 | ||
12121 | err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); | 12121 | err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); |
12122 | if (err) { | 12122 | if (err) { |
12123 | tg3_full_lock(tp, 0); | 12123 | tg3_full_lock(tp, 0); |
12124 | 12124 | ||
12125 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 12125 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
12126 | if (tg3_restart_hw(tp, 1)) | 12126 | if (tg3_restart_hw(tp, 1)) |
12127 | goto out; | 12127 | goto out; |
12128 | 12128 | ||
12129 | tp->timer.expires = jiffies + tp->timer_offset; | 12129 | tp->timer.expires = jiffies + tp->timer_offset; |
12130 | add_timer(&tp->timer); | 12130 | add_timer(&tp->timer); |
12131 | 12131 | ||
12132 | netif_device_attach(dev); | 12132 | netif_device_attach(dev); |
12133 | tg3_netif_start(tp); | 12133 | tg3_netif_start(tp); |
12134 | 12134 | ||
12135 | out: | 12135 | out: |
12136 | tg3_full_unlock(tp); | 12136 | tg3_full_unlock(tp); |
12137 | } | 12137 | } |
12138 | 12138 | ||
12139 | return err; | 12139 | return err; |
12140 | } | 12140 | } |
12141 | 12141 | ||
12142 | static int tg3_resume(struct pci_dev *pdev) | 12142 | static int tg3_resume(struct pci_dev *pdev) |
12143 | { | 12143 | { |
12144 | struct net_device *dev = pci_get_drvdata(pdev); | 12144 | struct net_device *dev = pci_get_drvdata(pdev); |
12145 | struct tg3 *tp = netdev_priv(dev); | 12145 | struct tg3 *tp = netdev_priv(dev); |
12146 | int err; | 12146 | int err; |
12147 | 12147 | ||
12148 | if (!netif_running(dev)) | 12148 | if (!netif_running(dev)) |
12149 | return 0; | 12149 | return 0; |
12150 | 12150 | ||
12151 | pci_restore_state(tp->pdev); | 12151 | pci_restore_state(tp->pdev); |
12152 | 12152 | ||
12153 | err = tg3_set_power_state(tp, PCI_D0); | 12153 | err = tg3_set_power_state(tp, PCI_D0); |
12154 | if (err) | 12154 | if (err) |
12155 | return err; | 12155 | return err; |
12156 | 12156 | ||
12157 | netif_device_attach(dev); | 12157 | netif_device_attach(dev); |
12158 | 12158 | ||
12159 | tg3_full_lock(tp, 0); | 12159 | tg3_full_lock(tp, 0); |
12160 | 12160 | ||
12161 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 12161 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
12162 | err = tg3_restart_hw(tp, 1); | 12162 | err = tg3_restart_hw(tp, 1); |
12163 | if (err) | 12163 | if (err) |
12164 | goto out; | 12164 | goto out; |
12165 | 12165 | ||
12166 | tp->timer.expires = jiffies + tp->timer_offset; | 12166 | tp->timer.expires = jiffies + tp->timer_offset; |
12167 | add_timer(&tp->timer); | 12167 | add_timer(&tp->timer); |
12168 | 12168 | ||
12169 | tg3_netif_start(tp); | 12169 | tg3_netif_start(tp); |
12170 | 12170 | ||
12171 | out: | 12171 | out: |
12172 | tg3_full_unlock(tp); | 12172 | tg3_full_unlock(tp); |
12173 | 12173 | ||
12174 | return err; | 12174 | return err; |
12175 | } | 12175 | } |
12176 | 12176 | ||
12177 | static struct pci_driver tg3_driver = { | 12177 | static struct pci_driver tg3_driver = { |
12178 | .name = DRV_MODULE_NAME, | 12178 | .name = DRV_MODULE_NAME, |
12179 | .id_table = tg3_pci_tbl, | 12179 | .id_table = tg3_pci_tbl, |
12180 | .probe = tg3_init_one, | 12180 | .probe = tg3_init_one, |
12181 | .remove = __devexit_p(tg3_remove_one), | 12181 | .remove = __devexit_p(tg3_remove_one), |
12182 | .suspend = tg3_suspend, | 12182 | .suspend = tg3_suspend, |
12183 | .resume = tg3_resume | 12183 | .resume = tg3_resume |
12184 | }; | 12184 | }; |
12185 | 12185 | ||
12186 | static int __init tg3_init(void) | 12186 | static int __init tg3_init(void) |
12187 | { | 12187 | { |
12188 | return pci_register_driver(&tg3_driver); | 12188 | return pci_register_driver(&tg3_driver); |
12189 | } | 12189 | } |
12190 | 12190 | ||
12191 | static void __exit tg3_cleanup(void) | 12191 | static void __exit tg3_cleanup(void) |
12192 | { | 12192 | { |
12193 | pci_unregister_driver(&tg3_driver); | 12193 | pci_unregister_driver(&tg3_driver); |
12194 | } | 12194 | } |
12195 | 12195 | ||
12196 | module_init(tg3_init); | 12196 | module_init(tg3_init); |
12197 | module_exit(tg3_cleanup); | 12197 | module_exit(tg3_cleanup); |
12198 | 12198 |
include/linux/ethtool.h
1 | /* | 1 | /* |
2 | * ethtool.h: Defines for Linux ethtool. | 2 | * ethtool.h: Defines for Linux ethtool. |
3 | * | 3 | * |
4 | * Copyright (C) 1998 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 1998 David S. Miller (davem@redhat.com) |
5 | * Copyright 2001 Jeff Garzik <jgarzik@pobox.com> | 5 | * Copyright 2001 Jeff Garzik <jgarzik@pobox.com> |
6 | * Portions Copyright 2001 Sun Microsystems (thockin@sun.com) | 6 | * Portions Copyright 2001 Sun Microsystems (thockin@sun.com) |
7 | * Portions Copyright 2002 Intel (eli.kupermann@intel.com, | 7 | * Portions Copyright 2002 Intel (eli.kupermann@intel.com, |
8 | * christopher.leech@intel.com, | 8 | * christopher.leech@intel.com, |
9 | * scott.feldman@intel.com) | 9 | * scott.feldman@intel.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef _LINUX_ETHTOOL_H | 12 | #ifndef _LINUX_ETHTOOL_H |
13 | #define _LINUX_ETHTOOL_H | 13 | #define _LINUX_ETHTOOL_H |
14 | 14 | ||
15 | 15 | ||
16 | /* This should work for both 32 and 64 bit userland. */ | 16 | /* This should work for both 32 and 64 bit userland. */ |
17 | struct ethtool_cmd { | 17 | struct ethtool_cmd { |
18 | __u32 cmd; | 18 | __u32 cmd; |
19 | __u32 supported; /* Features this interface supports */ | 19 | __u32 supported; /* Features this interface supports */ |
20 | __u32 advertising; /* Features this interface advertises */ | 20 | __u32 advertising; /* Features this interface advertises */ |
21 | __u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */ | 21 | __u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */ |
22 | __u8 duplex; /* Duplex, half or full */ | 22 | __u8 duplex; /* Duplex, half or full */ |
23 | __u8 port; /* Which connector port */ | 23 | __u8 port; /* Which connector port */ |
24 | __u8 phy_address; | 24 | __u8 phy_address; |
25 | __u8 transceiver; /* Which transceiver to use */ | 25 | __u8 transceiver; /* Which transceiver to use */ |
26 | __u8 autoneg; /* Enable or disable autonegotiation */ | 26 | __u8 autoneg; /* Enable or disable autonegotiation */ |
27 | __u32 maxtxpkt; /* Tx pkts before generating tx int */ | 27 | __u32 maxtxpkt; /* Tx pkts before generating tx int */ |
28 | __u32 maxrxpkt; /* Rx pkts before generating rx int */ | 28 | __u32 maxrxpkt; /* Rx pkts before generating rx int */ |
29 | __u32 reserved[4]; | 29 | __u32 reserved[4]; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | #define ETHTOOL_BUSINFO_LEN 32 | 32 | #define ETHTOOL_BUSINFO_LEN 32 |
33 | /* these strings are set to whatever the driver author decides... */ | 33 | /* these strings are set to whatever the driver author decides... */ |
34 | struct ethtool_drvinfo { | 34 | struct ethtool_drvinfo { |
35 | __u32 cmd; | 35 | __u32 cmd; |
36 | char driver[32]; /* driver short name, "tulip", "eepro100" */ | 36 | char driver[32]; /* driver short name, "tulip", "eepro100" */ |
37 | char version[32]; /* driver version string */ | 37 | char version[32]; /* driver version string */ |
38 | char fw_version[32]; /* firmware version string, if applicable */ | 38 | char fw_version[32]; /* firmware version string, if applicable */ |
39 | char bus_info[ETHTOOL_BUSINFO_LEN]; /* Bus info for this IF. */ | 39 | char bus_info[ETHTOOL_BUSINFO_LEN]; /* Bus info for this IF. */ |
40 | /* For PCI devices, use pci_name(pci_dev). */ | 40 | /* For PCI devices, use pci_name(pci_dev). */ |
41 | char reserved1[32]; | 41 | char reserved1[32]; |
42 | char reserved2[16]; | 42 | char reserved2[16]; |
43 | __u32 n_stats; /* number of u64's from ETHTOOL_GSTATS */ | 43 | __u32 n_stats; /* number of u64's from ETHTOOL_GSTATS */ |
44 | __u32 testinfo_len; | 44 | __u32 testinfo_len; |
45 | __u32 eedump_len; /* Size of data from ETHTOOL_GEEPROM (bytes) */ | 45 | __u32 eedump_len; /* Size of data from ETHTOOL_GEEPROM (bytes) */ |
46 | __u32 regdump_len; /* Size of data from ETHTOOL_GREGS (bytes) */ | 46 | __u32 regdump_len; /* Size of data from ETHTOOL_GREGS (bytes) */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | #define SOPASS_MAX 6 | 49 | #define SOPASS_MAX 6 |
50 | /* wake-on-lan settings */ | 50 | /* wake-on-lan settings */ |
51 | struct ethtool_wolinfo { | 51 | struct ethtool_wolinfo { |
52 | __u32 cmd; | 52 | __u32 cmd; |
53 | __u32 supported; | 53 | __u32 supported; |
54 | __u32 wolopts; | 54 | __u32 wolopts; |
55 | __u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ | 55 | __u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ |
56 | }; | 56 | }; |
57 | 57 | ||
58 | /* for passing single values */ | 58 | /* for passing single values */ |
59 | struct ethtool_value { | 59 | struct ethtool_value { |
60 | __u32 cmd; | 60 | __u32 cmd; |
61 | __u32 data; | 61 | __u32 data; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | /* for passing big chunks of data */ | 64 | /* for passing big chunks of data */ |
65 | struct ethtool_regs { | 65 | struct ethtool_regs { |
66 | __u32 cmd; | 66 | __u32 cmd; |
67 | __u32 version; /* driver-specific, indicates different chips/revs */ | 67 | __u32 version; /* driver-specific, indicates different chips/revs */ |
68 | __u32 len; /* bytes */ | 68 | __u32 len; /* bytes */ |
69 | __u8 data[0]; | 69 | __u8 data[0]; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* for passing EEPROM chunks */ | 72 | /* for passing EEPROM chunks */ |
73 | struct ethtool_eeprom { | 73 | struct ethtool_eeprom { |
74 | __u32 cmd; | 74 | __u32 cmd; |
75 | __u32 magic; | 75 | __u32 magic; |
76 | __u32 offset; /* in bytes */ | 76 | __u32 offset; /* in bytes */ |
77 | __u32 len; /* in bytes */ | 77 | __u32 len; /* in bytes */ |
78 | __u8 data[0]; | 78 | __u8 data[0]; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | /* for configuring coalescing parameters of chip */ | 81 | /* for configuring coalescing parameters of chip */ |
82 | struct ethtool_coalesce { | 82 | struct ethtool_coalesce { |
83 | __u32 cmd; /* ETHTOOL_{G,S}COALESCE */ | 83 | __u32 cmd; /* ETHTOOL_{G,S}COALESCE */ |
84 | 84 | ||
85 | /* How many usecs to delay an RX interrupt after | 85 | /* How many usecs to delay an RX interrupt after |
86 | * a packet arrives. If 0, only rx_max_coalesced_frames | 86 | * a packet arrives. If 0, only rx_max_coalesced_frames |
87 | * is used. | 87 | * is used. |
88 | */ | 88 | */ |
89 | __u32 rx_coalesce_usecs; | 89 | __u32 rx_coalesce_usecs; |
90 | 90 | ||
91 | /* How many packets to delay an RX interrupt after | 91 | /* How many packets to delay an RX interrupt after |
92 | * a packet arrives. If 0, only rx_coalesce_usecs is | 92 | * a packet arrives. If 0, only rx_coalesce_usecs is |
93 | * used. It is illegal to set both usecs and max frames | 93 | * used. It is illegal to set both usecs and max frames |
94 | * to zero as this would cause RX interrupts to never be | 94 | * to zero as this would cause RX interrupts to never be |
95 | * generated. | 95 | * generated. |
96 | */ | 96 | */ |
97 | __u32 rx_max_coalesced_frames; | 97 | __u32 rx_max_coalesced_frames; |
98 | 98 | ||
99 | /* Same as above two parameters, except that these values | 99 | /* Same as above two parameters, except that these values |
100 | * apply while an IRQ is being serviced by the host. Not | 100 | * apply while an IRQ is being serviced by the host. Not |
101 | * all cards support this feature and the values are ignored | 101 | * all cards support this feature and the values are ignored |
102 | * in that case. | 102 | * in that case. |
103 | */ | 103 | */ |
104 | __u32 rx_coalesce_usecs_irq; | 104 | __u32 rx_coalesce_usecs_irq; |
105 | __u32 rx_max_coalesced_frames_irq; | 105 | __u32 rx_max_coalesced_frames_irq; |
106 | 106 | ||
107 | /* How many usecs to delay a TX interrupt after | 107 | /* How many usecs to delay a TX interrupt after |
108 | * a packet is sent. If 0, only tx_max_coalesced_frames | 108 | * a packet is sent. If 0, only tx_max_coalesced_frames |
109 | * is used. | 109 | * is used. |
110 | */ | 110 | */ |
111 | __u32 tx_coalesce_usecs; | 111 | __u32 tx_coalesce_usecs; |
112 | 112 | ||
113 | /* How many packets to delay a TX interrupt after | 113 | /* How many packets to delay a TX interrupt after |
114 | * a packet is sent. If 0, only tx_coalesce_usecs is | 114 | * a packet is sent. If 0, only tx_coalesce_usecs is |
115 | * used. It is illegal to set both usecs and max frames | 115 | * used. It is illegal to set both usecs and max frames |
116 | * to zero as this would cause TX interrupts to never be | 116 | * to zero as this would cause TX interrupts to never be |
117 | * generated. | 117 | * generated. |
118 | */ | 118 | */ |
119 | __u32 tx_max_coalesced_frames; | 119 | __u32 tx_max_coalesced_frames; |
120 | 120 | ||
121 | /* Same as above two parameters, except that these values | 121 | /* Same as above two parameters, except that these values |
122 | * apply while an IRQ is being serviced by the host. Not | 122 | * apply while an IRQ is being serviced by the host. Not |
123 | * all cards support this feature and the values are ignored | 123 | * all cards support this feature and the values are ignored |
124 | * in that case. | 124 | * in that case. |
125 | */ | 125 | */ |
126 | __u32 tx_coalesce_usecs_irq; | 126 | __u32 tx_coalesce_usecs_irq; |
127 | __u32 tx_max_coalesced_frames_irq; | 127 | __u32 tx_max_coalesced_frames_irq; |
128 | 128 | ||
129 | /* How many usecs to delay in-memory statistics | 129 | /* How many usecs to delay in-memory statistics |
130 | * block updates. Some drivers do not have an in-memory | 130 | * block updates. Some drivers do not have an in-memory |
131 | * statistic block, and in such cases this value is ignored. | 131 | * statistic block, and in such cases this value is ignored. |
132 | * This value must not be zero. | 132 | * This value must not be zero. |
133 | */ | 133 | */ |
134 | __u32 stats_block_coalesce_usecs; | 134 | __u32 stats_block_coalesce_usecs; |
135 | 135 | ||
136 | /* Adaptive RX/TX coalescing is an algorithm implemented by | 136 | /* Adaptive RX/TX coalescing is an algorithm implemented by |
137 | * some drivers to improve latency under low packet rates and | 137 | * some drivers to improve latency under low packet rates and |
138 | * improve throughput under high packet rates. Some drivers | 138 | * improve throughput under high packet rates. Some drivers |
139 | * only implement one of RX or TX adaptive coalescing. Anything | 139 | * only implement one of RX or TX adaptive coalescing. Anything |
140 | * not implemented by the driver causes these values to be | 140 | * not implemented by the driver causes these values to be |
141 | * silently ignored. | 141 | * silently ignored. |
142 | */ | 142 | */ |
143 | __u32 use_adaptive_rx_coalesce; | 143 | __u32 use_adaptive_rx_coalesce; |
144 | __u32 use_adaptive_tx_coalesce; | 144 | __u32 use_adaptive_tx_coalesce; |
145 | 145 | ||
146 | /* When the packet rate (measured in packets per second) | 146 | /* When the packet rate (measured in packets per second) |
147 | * is below pkt_rate_low, the {rx,tx}_*_low parameters are | 147 | * is below pkt_rate_low, the {rx,tx}_*_low parameters are |
148 | * used. | 148 | * used. |
149 | */ | 149 | */ |
150 | __u32 pkt_rate_low; | 150 | __u32 pkt_rate_low; |
151 | __u32 rx_coalesce_usecs_low; | 151 | __u32 rx_coalesce_usecs_low; |
152 | __u32 rx_max_coalesced_frames_low; | 152 | __u32 rx_max_coalesced_frames_low; |
153 | __u32 tx_coalesce_usecs_low; | 153 | __u32 tx_coalesce_usecs_low; |
154 | __u32 tx_max_coalesced_frames_low; | 154 | __u32 tx_max_coalesced_frames_low; |
155 | 155 | ||
156 | /* When the packet rate is below pkt_rate_high but above | 156 | /* When the packet rate is below pkt_rate_high but above |
157 | * pkt_rate_low (both measured in packets per second) the | 157 | * pkt_rate_low (both measured in packets per second) the |
158 | * normal {rx,tx}_* coalescing parameters are used. | 158 | * normal {rx,tx}_* coalescing parameters are used. |
159 | */ | 159 | */ |
160 | 160 | ||
161 | /* When the packet rate is (measured in packets per second) | 161 | /* When the packet rate is (measured in packets per second) |
162 | * is above pkt_rate_high, the {rx,tx}_*_high parameters are | 162 | * is above pkt_rate_high, the {rx,tx}_*_high parameters are |
163 | * used. | 163 | * used. |
164 | */ | 164 | */ |
165 | __u32 pkt_rate_high; | 165 | __u32 pkt_rate_high; |
166 | __u32 rx_coalesce_usecs_high; | 166 | __u32 rx_coalesce_usecs_high; |
167 | __u32 rx_max_coalesced_frames_high; | 167 | __u32 rx_max_coalesced_frames_high; |
168 | __u32 tx_coalesce_usecs_high; | 168 | __u32 tx_coalesce_usecs_high; |
169 | __u32 tx_max_coalesced_frames_high; | 169 | __u32 tx_max_coalesced_frames_high; |
170 | 170 | ||
171 | /* How often to do adaptive coalescing packet rate sampling, | 171 | /* How often to do adaptive coalescing packet rate sampling, |
172 | * measured in seconds. Must not be zero. | 172 | * measured in seconds. Must not be zero. |
173 | */ | 173 | */ |
174 | __u32 rate_sample_interval; | 174 | __u32 rate_sample_interval; |
175 | }; | 175 | }; |
176 | 176 | ||
177 | /* for configuring RX/TX ring parameters */ | 177 | /* for configuring RX/TX ring parameters */ |
178 | struct ethtool_ringparam { | 178 | struct ethtool_ringparam { |
179 | __u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ | 179 | __u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ |
180 | 180 | ||
181 | /* Read only attributes. These indicate the maximum number | 181 | /* Read only attributes. These indicate the maximum number |
182 | * of pending RX/TX ring entries the driver will allow the | 182 | * of pending RX/TX ring entries the driver will allow the |
183 | * user to set. | 183 | * user to set. |
184 | */ | 184 | */ |
185 | __u32 rx_max_pending; | 185 | __u32 rx_max_pending; |
186 | __u32 rx_mini_max_pending; | 186 | __u32 rx_mini_max_pending; |
187 | __u32 rx_jumbo_max_pending; | 187 | __u32 rx_jumbo_max_pending; |
188 | __u32 tx_max_pending; | 188 | __u32 tx_max_pending; |
189 | 189 | ||
190 | /* Values changeable by the user. The valid values are | 190 | /* Values changeable by the user. The valid values are |
191 | * in the range 1 to the "*_max_pending" counterpart above. | 191 | * in the range 1 to the "*_max_pending" counterpart above. |
192 | */ | 192 | */ |
193 | __u32 rx_pending; | 193 | __u32 rx_pending; |
194 | __u32 rx_mini_pending; | 194 | __u32 rx_mini_pending; |
195 | __u32 rx_jumbo_pending; | 195 | __u32 rx_jumbo_pending; |
196 | __u32 tx_pending; | 196 | __u32 tx_pending; |
197 | }; | 197 | }; |
198 | 198 | ||
199 | /* for configuring link flow control parameters */ | 199 | /* for configuring link flow control parameters */ |
200 | struct ethtool_pauseparam { | 200 | struct ethtool_pauseparam { |
201 | __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ | 201 | __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ |
202 | 202 | ||
203 | /* If the link is being auto-negotiated (via ethtool_cmd.autoneg | 203 | /* If the link is being auto-negotiated (via ethtool_cmd.autoneg |
204 | * being true) the user may set 'autonet' here non-zero to have the | 204 | * being true) the user may set 'autonet' here non-zero to have the |
205 | * pause parameters be auto-negotiated too. In such a case, the | 205 | * pause parameters be auto-negotiated too. In such a case, the |
206 | * {rx,tx}_pause values below determine what capabilities are | 206 | * {rx,tx}_pause values below determine what capabilities are |
207 | * advertised. | 207 | * advertised. |
208 | * | 208 | * |
209 | * If 'autoneg' is zero or the link is not being auto-negotiated, | 209 | * If 'autoneg' is zero or the link is not being auto-negotiated, |
210 | * then {rx,tx}_pause force the driver to use/not-use pause | 210 | * then {rx,tx}_pause force the driver to use/not-use pause |
211 | * flow control. | 211 | * flow control. |
212 | */ | 212 | */ |
213 | __u32 autoneg; | 213 | __u32 autoneg; |
214 | __u32 rx_pause; | 214 | __u32 rx_pause; |
215 | __u32 tx_pause; | 215 | __u32 tx_pause; |
216 | }; | 216 | }; |
217 | 217 | ||
218 | #define ETH_GSTRING_LEN 32 | 218 | #define ETH_GSTRING_LEN 32 |
219 | enum ethtool_stringset { | 219 | enum ethtool_stringset { |
220 | ETH_SS_TEST = 0, | 220 | ETH_SS_TEST = 0, |
221 | ETH_SS_STATS, | 221 | ETH_SS_STATS, |
222 | }; | 222 | }; |
223 | 223 | ||
224 | /* for passing string sets for data tagging */ | 224 | /* for passing string sets for data tagging */ |
225 | struct ethtool_gstrings { | 225 | struct ethtool_gstrings { |
226 | __u32 cmd; /* ETHTOOL_GSTRINGS */ | 226 | __u32 cmd; /* ETHTOOL_GSTRINGS */ |
227 | __u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ | 227 | __u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ |
228 | __u32 len; /* number of strings in the string set */ | 228 | __u32 len; /* number of strings in the string set */ |
229 | __u8 data[0]; | 229 | __u8 data[0]; |
230 | }; | 230 | }; |
231 | 231 | ||
232 | enum ethtool_test_flags { | 232 | enum ethtool_test_flags { |
233 | ETH_TEST_FL_OFFLINE = (1 << 0), /* online / offline */ | 233 | ETH_TEST_FL_OFFLINE = (1 << 0), /* online / offline */ |
234 | ETH_TEST_FL_FAILED = (1 << 1), /* test passed / failed */ | 234 | ETH_TEST_FL_FAILED = (1 << 1), /* test passed / failed */ |
235 | }; | 235 | }; |
236 | 236 | ||
237 | /* for requesting NIC test and getting results*/ | 237 | /* for requesting NIC test and getting results*/ |
238 | struct ethtool_test { | 238 | struct ethtool_test { |
239 | __u32 cmd; /* ETHTOOL_TEST */ | 239 | __u32 cmd; /* ETHTOOL_TEST */ |
240 | __u32 flags; /* ETH_TEST_FL_xxx */ | 240 | __u32 flags; /* ETH_TEST_FL_xxx */ |
241 | __u32 reserved; | 241 | __u32 reserved; |
242 | __u32 len; /* result length, in number of u64 elements */ | 242 | __u32 len; /* result length, in number of u64 elements */ |
243 | __u64 data[0]; | 243 | __u64 data[0]; |
244 | }; | 244 | }; |
245 | 245 | ||
246 | /* for dumping NIC-specific statistics */ | 246 | /* for dumping NIC-specific statistics */ |
247 | struct ethtool_stats { | 247 | struct ethtool_stats { |
248 | __u32 cmd; /* ETHTOOL_GSTATS */ | 248 | __u32 cmd; /* ETHTOOL_GSTATS */ |
249 | __u32 n_stats; /* number of u64's being returned */ | 249 | __u32 n_stats; /* number of u64's being returned */ |
250 | __u64 data[0]; | 250 | __u64 data[0]; |
251 | }; | 251 | }; |
252 | 252 | ||
253 | struct ethtool_perm_addr { | 253 | struct ethtool_perm_addr { |
254 | __u32 cmd; /* ETHTOOL_GPERMADDR */ | 254 | __u32 cmd; /* ETHTOOL_GPERMADDR */ |
255 | __u32 size; | 255 | __u32 size; |
256 | __u8 data[0]; | 256 | __u8 data[0]; |
257 | }; | 257 | }; |
258 | 258 | ||
259 | #ifdef __KERNEL__ | 259 | #ifdef __KERNEL__ |
260 | 260 | ||
261 | struct net_device; | 261 | struct net_device; |
262 | 262 | ||
263 | /* Some generic methods drivers may use in their ethtool_ops */ | 263 | /* Some generic methods drivers may use in their ethtool_ops */ |
264 | u32 ethtool_op_get_link(struct net_device *dev); | 264 | u32 ethtool_op_get_link(struct net_device *dev); |
265 | u32 ethtool_op_get_tx_csum(struct net_device *dev); | 265 | u32 ethtool_op_get_tx_csum(struct net_device *dev); |
266 | int ethtool_op_set_tx_csum(struct net_device *dev, u32 data); | 266 | int ethtool_op_set_tx_csum(struct net_device *dev, u32 data); |
267 | int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data); | 267 | int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data); |
268 | int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data); | ||
268 | u32 ethtool_op_get_sg(struct net_device *dev); | 269 | u32 ethtool_op_get_sg(struct net_device *dev); |
269 | int ethtool_op_set_sg(struct net_device *dev, u32 data); | 270 | int ethtool_op_set_sg(struct net_device *dev, u32 data); |
270 | u32 ethtool_op_get_tso(struct net_device *dev); | 271 | u32 ethtool_op_get_tso(struct net_device *dev); |
271 | int ethtool_op_set_tso(struct net_device *dev, u32 data); | 272 | int ethtool_op_set_tso(struct net_device *dev, u32 data); |
272 | int ethtool_op_get_perm_addr(struct net_device *dev, | 273 | int ethtool_op_get_perm_addr(struct net_device *dev, |
273 | struct ethtool_perm_addr *addr, u8 *data); | 274 | struct ethtool_perm_addr *addr, u8 *data); |
274 | u32 ethtool_op_get_ufo(struct net_device *dev); | 275 | u32 ethtool_op_get_ufo(struct net_device *dev); |
275 | int ethtool_op_set_ufo(struct net_device *dev, u32 data); | 276 | int ethtool_op_set_ufo(struct net_device *dev, u32 data); |
276 | 277 | ||
277 | /** | 278 | /** |
278 | * ðtool_ops - Alter and report network device settings | 279 | * ðtool_ops - Alter and report network device settings |
279 | * get_settings: Get device-specific settings | 280 | * get_settings: Get device-specific settings |
280 | * set_settings: Set device-specific settings | 281 | * set_settings: Set device-specific settings |
281 | * get_drvinfo: Report driver information | 282 | * get_drvinfo: Report driver information |
282 | * get_regs: Get device registers | 283 | * get_regs: Get device registers |
283 | * get_wol: Report whether Wake-on-Lan is enabled | 284 | * get_wol: Report whether Wake-on-Lan is enabled |
284 | * set_wol: Turn Wake-on-Lan on or off | 285 | * set_wol: Turn Wake-on-Lan on or off |
285 | * get_msglevel: Report driver message level | 286 | * get_msglevel: Report driver message level |
286 | * set_msglevel: Set driver message level | 287 | * set_msglevel: Set driver message level |
287 | * nway_reset: Restart autonegotiation | 288 | * nway_reset: Restart autonegotiation |
288 | * get_link: Get link status | 289 | * get_link: Get link status |
289 | * get_eeprom: Read data from the device EEPROM | 290 | * get_eeprom: Read data from the device EEPROM |
290 | * set_eeprom: Write data to the device EEPROM | 291 | * set_eeprom: Write data to the device EEPROM |
291 | * get_coalesce: Get interrupt coalescing parameters | 292 | * get_coalesce: Get interrupt coalescing parameters |
292 | * set_coalesce: Set interrupt coalescing parameters | 293 | * set_coalesce: Set interrupt coalescing parameters |
293 | * get_ringparam: Report ring sizes | 294 | * get_ringparam: Report ring sizes |
294 | * set_ringparam: Set ring sizes | 295 | * set_ringparam: Set ring sizes |
295 | * get_pauseparam: Report pause parameters | 296 | * get_pauseparam: Report pause parameters |
296 | * set_pauseparam: Set pause paramters | 297 | * set_pauseparam: Set pause paramters |
297 | * get_rx_csum: Report whether receive checksums are turned on or off | 298 | * get_rx_csum: Report whether receive checksums are turned on or off |
298 | * set_rx_csum: Turn receive checksum on or off | 299 | * set_rx_csum: Turn receive checksum on or off |
299 | * get_tx_csum: Report whether transmit checksums are turned on or off | 300 | * get_tx_csum: Report whether transmit checksums are turned on or off |
300 | * set_tx_csum: Turn transmit checksums on or off | 301 | * set_tx_csum: Turn transmit checksums on or off |
301 | * get_sg: Report whether scatter-gather is enabled | 302 | * get_sg: Report whether scatter-gather is enabled |
302 | * set_sg: Turn scatter-gather on or off | 303 | * set_sg: Turn scatter-gather on or off |
303 | * get_tso: Report whether TCP segmentation offload is enabled | 304 | * get_tso: Report whether TCP segmentation offload is enabled |
304 | * set_tso: Turn TCP segmentation offload on or off | 305 | * set_tso: Turn TCP segmentation offload on or off |
305 | * get_ufo: Report whether UDP fragmentation offload is enabled | 306 | * get_ufo: Report whether UDP fragmentation offload is enabled |
306 | * set_ufo: Turn UDP fragmentation offload on or off | 307 | * set_ufo: Turn UDP fragmentation offload on or off |
307 | * self_test: Run specified self-tests | 308 | * self_test: Run specified self-tests |
308 | * get_strings: Return a set of strings that describe the requested objects | 309 | * get_strings: Return a set of strings that describe the requested objects |
309 | * phys_id: Identify the device | 310 | * phys_id: Identify the device |
310 | * get_stats: Return statistics about the device | 311 | * get_stats: Return statistics about the device |
311 | * get_perm_addr: Gets the permanent hardware address | 312 | * get_perm_addr: Gets the permanent hardware address |
312 | * | 313 | * |
313 | * Description: | 314 | * Description: |
314 | * | 315 | * |
315 | * get_settings: | 316 | * get_settings: |
316 | * @get_settings is passed an ðtool_cmd to fill in. It returns | 317 | * @get_settings is passed an ðtool_cmd to fill in. It returns |
317 | * an negative errno or zero. | 318 | * an negative errno or zero. |
318 | * | 319 | * |
319 | * set_settings: | 320 | * set_settings: |
320 | * @set_settings is passed an ðtool_cmd and should attempt to set | 321 | * @set_settings is passed an ðtool_cmd and should attempt to set |
321 | * all the settings this device supports. It may return an error value | 322 | * all the settings this device supports. It may return an error value |
322 | * if something goes wrong (otherwise 0). | 323 | * if something goes wrong (otherwise 0). |
323 | * | 324 | * |
324 | * get_eeprom: | 325 | * get_eeprom: |
325 | * Should fill in the magic field. Don't need to check len for zero | 326 | * Should fill in the magic field. Don't need to check len for zero |
326 | * or wraparound. Fill in the data argument with the eeprom values | 327 | * or wraparound. Fill in the data argument with the eeprom values |
327 | * from offset to offset + len. Update len to the amount read. | 328 | * from offset to offset + len. Update len to the amount read. |
328 | * Returns an error or zero. | 329 | * Returns an error or zero. |
329 | * | 330 | * |
330 | * set_eeprom: | 331 | * set_eeprom: |
331 | * Should validate the magic field. Don't need to check len for zero | 332 | * Should validate the magic field. Don't need to check len for zero |
332 | * or wraparound. Update len to the amount written. Returns an error | 333 | * or wraparound. Update len to the amount written. Returns an error |
333 | * or zero. | 334 | * or zero. |
334 | */ | 335 | */ |
335 | struct ethtool_ops { | 336 | struct ethtool_ops { |
336 | int (*get_settings)(struct net_device *, struct ethtool_cmd *); | 337 | int (*get_settings)(struct net_device *, struct ethtool_cmd *); |
337 | int (*set_settings)(struct net_device *, struct ethtool_cmd *); | 338 | int (*set_settings)(struct net_device *, struct ethtool_cmd *); |
338 | void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); | 339 | void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); |
339 | int (*get_regs_len)(struct net_device *); | 340 | int (*get_regs_len)(struct net_device *); |
340 | void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); | 341 | void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); |
341 | void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); | 342 | void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); |
342 | int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); | 343 | int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); |
343 | u32 (*get_msglevel)(struct net_device *); | 344 | u32 (*get_msglevel)(struct net_device *); |
344 | void (*set_msglevel)(struct net_device *, u32); | 345 | void (*set_msglevel)(struct net_device *, u32); |
345 | int (*nway_reset)(struct net_device *); | 346 | int (*nway_reset)(struct net_device *); |
346 | u32 (*get_link)(struct net_device *); | 347 | u32 (*get_link)(struct net_device *); |
347 | int (*get_eeprom_len)(struct net_device *); | 348 | int (*get_eeprom_len)(struct net_device *); |
348 | int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); | 349 | int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); |
349 | int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); | 350 | int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); |
350 | int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); | 351 | int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); |
351 | int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); | 352 | int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); |
352 | void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); | 353 | void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); |
353 | int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); | 354 | int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); |
354 | void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam*); | 355 | void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam*); |
355 | int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam*); | 356 | int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam*); |
356 | u32 (*get_rx_csum)(struct net_device *); | 357 | u32 (*get_rx_csum)(struct net_device *); |
357 | int (*set_rx_csum)(struct net_device *, u32); | 358 | int (*set_rx_csum)(struct net_device *, u32); |
358 | u32 (*get_tx_csum)(struct net_device *); | 359 | u32 (*get_tx_csum)(struct net_device *); |
359 | int (*set_tx_csum)(struct net_device *, u32); | 360 | int (*set_tx_csum)(struct net_device *, u32); |
360 | u32 (*get_sg)(struct net_device *); | 361 | u32 (*get_sg)(struct net_device *); |
361 | int (*set_sg)(struct net_device *, u32); | 362 | int (*set_sg)(struct net_device *, u32); |
362 | u32 (*get_tso)(struct net_device *); | 363 | u32 (*get_tso)(struct net_device *); |
363 | int (*set_tso)(struct net_device *, u32); | 364 | int (*set_tso)(struct net_device *, u32); |
364 | int (*self_test_count)(struct net_device *); | 365 | int (*self_test_count)(struct net_device *); |
365 | void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); | 366 | void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); |
366 | void (*get_strings)(struct net_device *, u32 stringset, u8 *); | 367 | void (*get_strings)(struct net_device *, u32 stringset, u8 *); |
367 | int (*phys_id)(struct net_device *, u32); | 368 | int (*phys_id)(struct net_device *, u32); |
368 | int (*get_stats_count)(struct net_device *); | 369 | int (*get_stats_count)(struct net_device *); |
369 | void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); | 370 | void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); |
370 | int (*get_perm_addr)(struct net_device *, struct ethtool_perm_addr *, u8 *); | 371 | int (*get_perm_addr)(struct net_device *, struct ethtool_perm_addr *, u8 *); |
371 | int (*begin)(struct net_device *); | 372 | int (*begin)(struct net_device *); |
372 | void (*complete)(struct net_device *); | 373 | void (*complete)(struct net_device *); |
373 | u32 (*get_ufo)(struct net_device *); | 374 | u32 (*get_ufo)(struct net_device *); |
374 | int (*set_ufo)(struct net_device *, u32); | 375 | int (*set_ufo)(struct net_device *, u32); |
375 | }; | 376 | }; |
376 | #endif /* __KERNEL__ */ | 377 | #endif /* __KERNEL__ */ |
377 | 378 | ||
378 | /* CMDs currently supported */ | 379 | /* CMDs currently supported */ |
379 | #define ETHTOOL_GSET 0x00000001 /* Get settings. */ | 380 | #define ETHTOOL_GSET 0x00000001 /* Get settings. */ |
380 | #define ETHTOOL_SSET 0x00000002 /* Set settings. */ | 381 | #define ETHTOOL_SSET 0x00000002 /* Set settings. */ |
381 | #define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */ | 382 | #define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */ |
382 | #define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ | 383 | #define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ |
383 | #define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ | 384 | #define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ |
384 | #define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */ | 385 | #define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */ |
385 | #define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ | 386 | #define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ |
386 | #define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ | 387 | #define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ |
387 | #define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */ | 388 | #define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */ |
388 | #define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */ | 389 | #define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */ |
389 | #define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ | 390 | #define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ |
390 | #define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ | 391 | #define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ |
391 | #define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ | 392 | #define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ |
392 | #define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ | 393 | #define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ |
393 | #define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ | 394 | #define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ |
394 | #define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */ | 395 | #define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */ |
395 | #define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ | 396 | #define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ |
396 | #define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ | 397 | #define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ |
397 | #define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ | 398 | #define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ |
398 | #define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ | 399 | #define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ |
399 | #define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ | 400 | #define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ |
400 | #define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ | 401 | #define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ |
401 | #define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable | 402 | #define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable |
402 | * (ethtool_value) */ | 403 | * (ethtool_value) */ |
403 | #define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable | 404 | #define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable |
404 | * (ethtool_value). */ | 405 | * (ethtool_value). */ |
405 | #define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */ | 406 | #define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */ |
406 | #define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ | 407 | #define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ |
407 | #define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ | 408 | #define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ |
408 | #define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ | 409 | #define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ |
409 | #define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ | 410 | #define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ |
410 | #define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ | 411 | #define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ |
411 | #define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */ | 412 | #define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */ |
412 | #define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */ | 413 | #define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */ |
413 | #define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */ | 414 | #define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */ |
414 | #define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */ | 415 | #define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */ |
415 | #define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ | 416 | #define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ |
416 | 417 | ||
417 | /* compatibility with older code */ | 418 | /* compatibility with older code */ |
418 | #define SPARC_ETH_GSET ETHTOOL_GSET | 419 | #define SPARC_ETH_GSET ETHTOOL_GSET |
419 | #define SPARC_ETH_SSET ETHTOOL_SSET | 420 | #define SPARC_ETH_SSET ETHTOOL_SSET |
420 | 421 | ||
421 | /* Indicates what features are supported by the interface. */ | 422 | /* Indicates what features are supported by the interface. */ |
422 | #define SUPPORTED_10baseT_Half (1 << 0) | 423 | #define SUPPORTED_10baseT_Half (1 << 0) |
423 | #define SUPPORTED_10baseT_Full (1 << 1) | 424 | #define SUPPORTED_10baseT_Full (1 << 1) |
424 | #define SUPPORTED_100baseT_Half (1 << 2) | 425 | #define SUPPORTED_100baseT_Half (1 << 2) |
425 | #define SUPPORTED_100baseT_Full (1 << 3) | 426 | #define SUPPORTED_100baseT_Full (1 << 3) |
426 | #define SUPPORTED_1000baseT_Half (1 << 4) | 427 | #define SUPPORTED_1000baseT_Half (1 << 4) |
427 | #define SUPPORTED_1000baseT_Full (1 << 5) | 428 | #define SUPPORTED_1000baseT_Full (1 << 5) |
428 | #define SUPPORTED_Autoneg (1 << 6) | 429 | #define SUPPORTED_Autoneg (1 << 6) |
429 | #define SUPPORTED_TP (1 << 7) | 430 | #define SUPPORTED_TP (1 << 7) |
430 | #define SUPPORTED_AUI (1 << 8) | 431 | #define SUPPORTED_AUI (1 << 8) |
431 | #define SUPPORTED_MII (1 << 9) | 432 | #define SUPPORTED_MII (1 << 9) |
432 | #define SUPPORTED_FIBRE (1 << 10) | 433 | #define SUPPORTED_FIBRE (1 << 10) |
433 | #define SUPPORTED_BNC (1 << 11) | 434 | #define SUPPORTED_BNC (1 << 11) |
434 | #define SUPPORTED_10000baseT_Full (1 << 12) | 435 | #define SUPPORTED_10000baseT_Full (1 << 12) |
435 | #define SUPPORTED_Pause (1 << 13) | 436 | #define SUPPORTED_Pause (1 << 13) |
436 | #define SUPPORTED_Asym_Pause (1 << 14) | 437 | #define SUPPORTED_Asym_Pause (1 << 14) |
437 | #define SUPPORTED_2500baseX_Full (1 << 15) | 438 | #define SUPPORTED_2500baseX_Full (1 << 15) |
438 | 439 | ||
439 | /* Indicates what features are advertised by the interface. */ | 440 | /* Indicates what features are advertised by the interface. */ |
440 | #define ADVERTISED_10baseT_Half (1 << 0) | 441 | #define ADVERTISED_10baseT_Half (1 << 0) |
441 | #define ADVERTISED_10baseT_Full (1 << 1) | 442 | #define ADVERTISED_10baseT_Full (1 << 1) |
442 | #define ADVERTISED_100baseT_Half (1 << 2) | 443 | #define ADVERTISED_100baseT_Half (1 << 2) |
443 | #define ADVERTISED_100baseT_Full (1 << 3) | 444 | #define ADVERTISED_100baseT_Full (1 << 3) |
444 | #define ADVERTISED_1000baseT_Half (1 << 4) | 445 | #define ADVERTISED_1000baseT_Half (1 << 4) |
445 | #define ADVERTISED_1000baseT_Full (1 << 5) | 446 | #define ADVERTISED_1000baseT_Full (1 << 5) |
446 | #define ADVERTISED_Autoneg (1 << 6) | 447 | #define ADVERTISED_Autoneg (1 << 6) |
447 | #define ADVERTISED_TP (1 << 7) | 448 | #define ADVERTISED_TP (1 << 7) |
448 | #define ADVERTISED_AUI (1 << 8) | 449 | #define ADVERTISED_AUI (1 << 8) |
449 | #define ADVERTISED_MII (1 << 9) | 450 | #define ADVERTISED_MII (1 << 9) |
450 | #define ADVERTISED_FIBRE (1 << 10) | 451 | #define ADVERTISED_FIBRE (1 << 10) |
451 | #define ADVERTISED_BNC (1 << 11) | 452 | #define ADVERTISED_BNC (1 << 11) |
452 | #define ADVERTISED_10000baseT_Full (1 << 12) | 453 | #define ADVERTISED_10000baseT_Full (1 << 12) |
453 | #define ADVERTISED_Pause (1 << 13) | 454 | #define ADVERTISED_Pause (1 << 13) |
454 | #define ADVERTISED_Asym_Pause (1 << 14) | 455 | #define ADVERTISED_Asym_Pause (1 << 14) |
455 | #define ADVERTISED_2500baseX_Full (1 << 15) | 456 | #define ADVERTISED_2500baseX_Full (1 << 15) |
456 | 457 | ||
457 | /* The following are all involved in forcing a particular link | 458 | /* The following are all involved in forcing a particular link |
458 | * mode for the device for setting things. When getting the | 459 | * mode for the device for setting things. When getting the |
459 | * devices settings, these indicate the current mode and whether | 460 | * devices settings, these indicate the current mode and whether |
460 | * it was foced up into this mode or autonegotiated. | 461 | * it was foced up into this mode or autonegotiated. |
461 | */ | 462 | */ |
462 | 463 | ||
463 | /* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ | 464 | /* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ |
464 | #define SPEED_10 10 | 465 | #define SPEED_10 10 |
465 | #define SPEED_100 100 | 466 | #define SPEED_100 100 |
466 | #define SPEED_1000 1000 | 467 | #define SPEED_1000 1000 |
467 | #define SPEED_2500 2500 | 468 | #define SPEED_2500 2500 |
468 | #define SPEED_10000 10000 | 469 | #define SPEED_10000 10000 |
469 | 470 | ||
470 | /* Duplex, half or full. */ | 471 | /* Duplex, half or full. */ |
471 | #define DUPLEX_HALF 0x00 | 472 | #define DUPLEX_HALF 0x00 |
472 | #define DUPLEX_FULL 0x01 | 473 | #define DUPLEX_FULL 0x01 |
473 | 474 | ||
474 | /* Which connector port. */ | 475 | /* Which connector port. */ |
475 | #define PORT_TP 0x00 | 476 | #define PORT_TP 0x00 |
476 | #define PORT_AUI 0x01 | 477 | #define PORT_AUI 0x01 |
477 | #define PORT_MII 0x02 | 478 | #define PORT_MII 0x02 |
478 | #define PORT_FIBRE 0x03 | 479 | #define PORT_FIBRE 0x03 |
479 | #define PORT_BNC 0x04 | 480 | #define PORT_BNC 0x04 |
480 | 481 | ||
481 | /* Which transceiver to use. */ | 482 | /* Which transceiver to use. */ |
482 | #define XCVR_INTERNAL 0x00 | 483 | #define XCVR_INTERNAL 0x00 |
483 | #define XCVR_EXTERNAL 0x01 | 484 | #define XCVR_EXTERNAL 0x01 |
484 | #define XCVR_DUMMY1 0x02 | 485 | #define XCVR_DUMMY1 0x02 |
485 | #define XCVR_DUMMY2 0x03 | 486 | #define XCVR_DUMMY2 0x03 |
486 | #define XCVR_DUMMY3 0x04 | 487 | #define XCVR_DUMMY3 0x04 |
487 | 488 | ||
488 | /* Enable or disable autonegotiation. If this is set to enable, | 489 | /* Enable or disable autonegotiation. If this is set to enable, |
489 | * the forced link modes above are completely ignored. | 490 | * the forced link modes above are completely ignored. |
490 | */ | 491 | */ |
491 | #define AUTONEG_DISABLE 0x00 | 492 | #define AUTONEG_DISABLE 0x00 |
492 | #define AUTONEG_ENABLE 0x01 | 493 | #define AUTONEG_ENABLE 0x01 |
493 | 494 | ||
494 | /* Wake-On-Lan options. */ | 495 | /* Wake-On-Lan options. */ |
495 | #define WAKE_PHY (1 << 0) | 496 | #define WAKE_PHY (1 << 0) |
496 | #define WAKE_UCAST (1 << 1) | 497 | #define WAKE_UCAST (1 << 1) |
497 | #define WAKE_MCAST (1 << 2) | 498 | #define WAKE_MCAST (1 << 2) |
498 | #define WAKE_BCAST (1 << 3) | 499 | #define WAKE_BCAST (1 << 3) |
499 | #define WAKE_ARP (1 << 4) | 500 | #define WAKE_ARP (1 << 4) |
500 | #define WAKE_MAGIC (1 << 5) | 501 | #define WAKE_MAGIC (1 << 5) |
501 | #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ | 502 | #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ |
502 | 503 | ||
503 | #endif /* _LINUX_ETHTOOL_H */ | 504 | #endif /* _LINUX_ETHTOOL_H */ |
504 | 505 |
net/core/ethtool.c
1 | /* | 1 | /* |
2 | * net/core/ethtool.c - Ethtool ioctl handler | 2 | * net/core/ethtool.c - Ethtool ioctl handler |
3 | * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx> | 3 | * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx> |
4 | * | 4 | * |
5 | * This file is where we call all the ethtool_ops commands to get | 5 | * This file is where we call all the ethtool_ops commands to get |
6 | * the information ethtool needs. We fall back to calling do_ioctl() | 6 | * the information ethtool needs. We fall back to calling do_ioctl() |
7 | * for drivers which haven't been converted to ethtool_ops yet. | 7 | * for drivers which haven't been converted to ethtool_ops yet. |
8 | * | 8 | * |
9 | * It's GPL, stupid. | 9 | * It's GPL, stupid. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/capability.h> | 14 | #include <linux/capability.h> |
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/ethtool.h> | 16 | #include <linux/ethtool.h> |
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * Some useful ethtool_ops methods that're device independent. | 21 | * Some useful ethtool_ops methods that're device independent. |
22 | * If we find that all drivers want to do the same thing here, | 22 | * If we find that all drivers want to do the same thing here, |
23 | * we can turn these into dev_() function calls. | 23 | * we can turn these into dev_() function calls. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | u32 ethtool_op_get_link(struct net_device *dev) | 26 | u32 ethtool_op_get_link(struct net_device *dev) |
27 | { | 27 | { |
28 | return netif_carrier_ok(dev) ? 1 : 0; | 28 | return netif_carrier_ok(dev) ? 1 : 0; |
29 | } | 29 | } |
30 | 30 | ||
31 | u32 ethtool_op_get_tx_csum(struct net_device *dev) | 31 | u32 ethtool_op_get_tx_csum(struct net_device *dev) |
32 | { | 32 | { |
33 | return (dev->features & NETIF_F_ALL_CSUM) != 0; | 33 | return (dev->features & NETIF_F_ALL_CSUM) != 0; |
34 | } | 34 | } |
35 | 35 | ||
36 | int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) | 36 | int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) |
37 | { | 37 | { |
38 | if (data) | 38 | if (data) |
39 | dev->features |= NETIF_F_IP_CSUM; | 39 | dev->features |= NETIF_F_IP_CSUM; |
40 | else | 40 | else |
41 | dev->features &= ~NETIF_F_IP_CSUM; | 41 | dev->features &= ~NETIF_F_IP_CSUM; |
42 | 42 | ||
43 | return 0; | 43 | return 0; |
44 | } | 44 | } |
45 | 45 | ||
46 | int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data) | 46 | int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data) |
47 | { | 47 | { |
48 | if (data) | 48 | if (data) |
49 | dev->features |= NETIF_F_HW_CSUM; | 49 | dev->features |= NETIF_F_HW_CSUM; |
50 | else | 50 | else |
51 | dev->features &= ~NETIF_F_HW_CSUM; | 51 | dev->features &= ~NETIF_F_HW_CSUM; |
52 | 52 | ||
53 | return 0; | 53 | return 0; |
54 | } | 54 | } |
55 | |||
56 | int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) | ||
57 | { | ||
58 | if (data) | ||
59 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | ||
60 | else | ||
61 | dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
55 | u32 ethtool_op_get_sg(struct net_device *dev) | 66 | u32 ethtool_op_get_sg(struct net_device *dev) |
56 | { | 67 | { |
57 | return (dev->features & NETIF_F_SG) != 0; | 68 | return (dev->features & NETIF_F_SG) != 0; |
58 | } | 69 | } |
59 | 70 | ||
60 | int ethtool_op_set_sg(struct net_device *dev, u32 data) | 71 | int ethtool_op_set_sg(struct net_device *dev, u32 data) |
61 | { | 72 | { |
62 | if (data) | 73 | if (data) |
63 | dev->features |= NETIF_F_SG; | 74 | dev->features |= NETIF_F_SG; |
64 | else | 75 | else |
65 | dev->features &= ~NETIF_F_SG; | 76 | dev->features &= ~NETIF_F_SG; |
66 | 77 | ||
67 | return 0; | 78 | return 0; |
68 | } | 79 | } |
69 | 80 | ||
70 | u32 ethtool_op_get_tso(struct net_device *dev) | 81 | u32 ethtool_op_get_tso(struct net_device *dev) |
71 | { | 82 | { |
72 | return (dev->features & NETIF_F_TSO) != 0; | 83 | return (dev->features & NETIF_F_TSO) != 0; |
73 | } | 84 | } |
74 | 85 | ||
75 | int ethtool_op_set_tso(struct net_device *dev, u32 data) | 86 | int ethtool_op_set_tso(struct net_device *dev, u32 data) |
76 | { | 87 | { |
77 | if (data) | 88 | if (data) |
78 | dev->features |= NETIF_F_TSO; | 89 | dev->features |= NETIF_F_TSO; |
79 | else | 90 | else |
80 | dev->features &= ~NETIF_F_TSO; | 91 | dev->features &= ~NETIF_F_TSO; |
81 | 92 | ||
82 | return 0; | 93 | return 0; |
83 | } | 94 | } |
84 | 95 | ||
85 | int ethtool_op_get_perm_addr(struct net_device *dev, struct ethtool_perm_addr *addr, u8 *data) | 96 | int ethtool_op_get_perm_addr(struct net_device *dev, struct ethtool_perm_addr *addr, u8 *data) |
86 | { | 97 | { |
87 | unsigned char len = dev->addr_len; | 98 | unsigned char len = dev->addr_len; |
88 | if ( addr->size < len ) | 99 | if ( addr->size < len ) |
89 | return -ETOOSMALL; | 100 | return -ETOOSMALL; |
90 | 101 | ||
91 | addr->size = len; | 102 | addr->size = len; |
92 | memcpy(data, dev->perm_addr, len); | 103 | memcpy(data, dev->perm_addr, len); |
93 | return 0; | 104 | return 0; |
94 | } | 105 | } |
95 | 106 | ||
96 | 107 | ||
97 | u32 ethtool_op_get_ufo(struct net_device *dev) | 108 | u32 ethtool_op_get_ufo(struct net_device *dev) |
98 | { | 109 | { |
99 | return (dev->features & NETIF_F_UFO) != 0; | 110 | return (dev->features & NETIF_F_UFO) != 0; |
100 | } | 111 | } |
101 | 112 | ||
102 | int ethtool_op_set_ufo(struct net_device *dev, u32 data) | 113 | int ethtool_op_set_ufo(struct net_device *dev, u32 data) |
103 | { | 114 | { |
104 | if (data) | 115 | if (data) |
105 | dev->features |= NETIF_F_UFO; | 116 | dev->features |= NETIF_F_UFO; |
106 | else | 117 | else |
107 | dev->features &= ~NETIF_F_UFO; | 118 | dev->features &= ~NETIF_F_UFO; |
108 | return 0; | 119 | return 0; |
109 | } | 120 | } |
110 | 121 | ||
111 | /* Handlers for each ethtool command */ | 122 | /* Handlers for each ethtool command */ |
112 | 123 | ||
113 | static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) | 124 | static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) |
114 | { | 125 | { |
115 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | 126 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; |
116 | int err; | 127 | int err; |
117 | 128 | ||
118 | if (!dev->ethtool_ops->get_settings) | 129 | if (!dev->ethtool_ops->get_settings) |
119 | return -EOPNOTSUPP; | 130 | return -EOPNOTSUPP; |
120 | 131 | ||
121 | err = dev->ethtool_ops->get_settings(dev, &cmd); | 132 | err = dev->ethtool_ops->get_settings(dev, &cmd); |
122 | if (err < 0) | 133 | if (err < 0) |
123 | return err; | 134 | return err; |
124 | 135 | ||
125 | if (copy_to_user(useraddr, &cmd, sizeof(cmd))) | 136 | if (copy_to_user(useraddr, &cmd, sizeof(cmd))) |
126 | return -EFAULT; | 137 | return -EFAULT; |
127 | return 0; | 138 | return 0; |
128 | } | 139 | } |
129 | 140 | ||
130 | static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) | 141 | static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) |
131 | { | 142 | { |
132 | struct ethtool_cmd cmd; | 143 | struct ethtool_cmd cmd; |
133 | 144 | ||
134 | if (!dev->ethtool_ops->set_settings) | 145 | if (!dev->ethtool_ops->set_settings) |
135 | return -EOPNOTSUPP; | 146 | return -EOPNOTSUPP; |
136 | 147 | ||
137 | if (copy_from_user(&cmd, useraddr, sizeof(cmd))) | 148 | if (copy_from_user(&cmd, useraddr, sizeof(cmd))) |
138 | return -EFAULT; | 149 | return -EFAULT; |
139 | 150 | ||
140 | return dev->ethtool_ops->set_settings(dev, &cmd); | 151 | return dev->ethtool_ops->set_settings(dev, &cmd); |
141 | } | 152 | } |
142 | 153 | ||
143 | static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | 154 | static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) |
144 | { | 155 | { |
145 | struct ethtool_drvinfo info; | 156 | struct ethtool_drvinfo info; |
146 | const struct ethtool_ops *ops = dev->ethtool_ops; | 157 | const struct ethtool_ops *ops = dev->ethtool_ops; |
147 | 158 | ||
148 | if (!ops->get_drvinfo) | 159 | if (!ops->get_drvinfo) |
149 | return -EOPNOTSUPP; | 160 | return -EOPNOTSUPP; |
150 | 161 | ||
151 | memset(&info, 0, sizeof(info)); | 162 | memset(&info, 0, sizeof(info)); |
152 | info.cmd = ETHTOOL_GDRVINFO; | 163 | info.cmd = ETHTOOL_GDRVINFO; |
153 | ops->get_drvinfo(dev, &info); | 164 | ops->get_drvinfo(dev, &info); |
154 | 165 | ||
155 | if (ops->self_test_count) | 166 | if (ops->self_test_count) |
156 | info.testinfo_len = ops->self_test_count(dev); | 167 | info.testinfo_len = ops->self_test_count(dev); |
157 | if (ops->get_stats_count) | 168 | if (ops->get_stats_count) |
158 | info.n_stats = ops->get_stats_count(dev); | 169 | info.n_stats = ops->get_stats_count(dev); |
159 | if (ops->get_regs_len) | 170 | if (ops->get_regs_len) |
160 | info.regdump_len = ops->get_regs_len(dev); | 171 | info.regdump_len = ops->get_regs_len(dev); |
161 | if (ops->get_eeprom_len) | 172 | if (ops->get_eeprom_len) |
162 | info.eedump_len = ops->get_eeprom_len(dev); | 173 | info.eedump_len = ops->get_eeprom_len(dev); |
163 | 174 | ||
164 | if (copy_to_user(useraddr, &info, sizeof(info))) | 175 | if (copy_to_user(useraddr, &info, sizeof(info))) |
165 | return -EFAULT; | 176 | return -EFAULT; |
166 | return 0; | 177 | return 0; |
167 | } | 178 | } |
168 | 179 | ||
169 | static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) | 180 | static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) |
170 | { | 181 | { |
171 | struct ethtool_regs regs; | 182 | struct ethtool_regs regs; |
172 | const struct ethtool_ops *ops = dev->ethtool_ops; | 183 | const struct ethtool_ops *ops = dev->ethtool_ops; |
173 | void *regbuf; | 184 | void *regbuf; |
174 | int reglen, ret; | 185 | int reglen, ret; |
175 | 186 | ||
176 | if (!ops->get_regs || !ops->get_regs_len) | 187 | if (!ops->get_regs || !ops->get_regs_len) |
177 | return -EOPNOTSUPP; | 188 | return -EOPNOTSUPP; |
178 | 189 | ||
179 | if (copy_from_user(®s, useraddr, sizeof(regs))) | 190 | if (copy_from_user(®s, useraddr, sizeof(regs))) |
180 | return -EFAULT; | 191 | return -EFAULT; |
181 | 192 | ||
182 | reglen = ops->get_regs_len(dev); | 193 | reglen = ops->get_regs_len(dev); |
183 | if (regs.len > reglen) | 194 | if (regs.len > reglen) |
184 | regs.len = reglen; | 195 | regs.len = reglen; |
185 | 196 | ||
186 | regbuf = kmalloc(reglen, GFP_USER); | 197 | regbuf = kmalloc(reglen, GFP_USER); |
187 | if (!regbuf) | 198 | if (!regbuf) |
188 | return -ENOMEM; | 199 | return -ENOMEM; |
189 | 200 | ||
190 | ops->get_regs(dev, ®s, regbuf); | 201 | ops->get_regs(dev, ®s, regbuf); |
191 | 202 | ||
192 | ret = -EFAULT; | 203 | ret = -EFAULT; |
193 | if (copy_to_user(useraddr, ®s, sizeof(regs))) | 204 | if (copy_to_user(useraddr, ®s, sizeof(regs))) |
194 | goto out; | 205 | goto out; |
195 | useraddr += offsetof(struct ethtool_regs, data); | 206 | useraddr += offsetof(struct ethtool_regs, data); |
196 | if (copy_to_user(useraddr, regbuf, regs.len)) | 207 | if (copy_to_user(useraddr, regbuf, regs.len)) |
197 | goto out; | 208 | goto out; |
198 | ret = 0; | 209 | ret = 0; |
199 | 210 | ||
200 | out: | 211 | out: |
201 | kfree(regbuf); | 212 | kfree(regbuf); |
202 | return ret; | 213 | return ret; |
203 | } | 214 | } |
204 | 215 | ||
205 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) | 216 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) |
206 | { | 217 | { |
207 | struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; | 218 | struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; |
208 | 219 | ||
209 | if (!dev->ethtool_ops->get_wol) | 220 | if (!dev->ethtool_ops->get_wol) |
210 | return -EOPNOTSUPP; | 221 | return -EOPNOTSUPP; |
211 | 222 | ||
212 | dev->ethtool_ops->get_wol(dev, &wol); | 223 | dev->ethtool_ops->get_wol(dev, &wol); |
213 | 224 | ||
214 | if (copy_to_user(useraddr, &wol, sizeof(wol))) | 225 | if (copy_to_user(useraddr, &wol, sizeof(wol))) |
215 | return -EFAULT; | 226 | return -EFAULT; |
216 | return 0; | 227 | return 0; |
217 | } | 228 | } |
218 | 229 | ||
219 | static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) | 230 | static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) |
220 | { | 231 | { |
221 | struct ethtool_wolinfo wol; | 232 | struct ethtool_wolinfo wol; |
222 | 233 | ||
223 | if (!dev->ethtool_ops->set_wol) | 234 | if (!dev->ethtool_ops->set_wol) |
224 | return -EOPNOTSUPP; | 235 | return -EOPNOTSUPP; |
225 | 236 | ||
226 | if (copy_from_user(&wol, useraddr, sizeof(wol))) | 237 | if (copy_from_user(&wol, useraddr, sizeof(wol))) |
227 | return -EFAULT; | 238 | return -EFAULT; |
228 | 239 | ||
229 | return dev->ethtool_ops->set_wol(dev, &wol); | 240 | return dev->ethtool_ops->set_wol(dev, &wol); |
230 | } | 241 | } |
231 | 242 | ||
232 | static int ethtool_get_msglevel(struct net_device *dev, char __user *useraddr) | 243 | static int ethtool_get_msglevel(struct net_device *dev, char __user *useraddr) |
233 | { | 244 | { |
234 | struct ethtool_value edata = { ETHTOOL_GMSGLVL }; | 245 | struct ethtool_value edata = { ETHTOOL_GMSGLVL }; |
235 | 246 | ||
236 | if (!dev->ethtool_ops->get_msglevel) | 247 | if (!dev->ethtool_ops->get_msglevel) |
237 | return -EOPNOTSUPP; | 248 | return -EOPNOTSUPP; |
238 | 249 | ||
239 | edata.data = dev->ethtool_ops->get_msglevel(dev); | 250 | edata.data = dev->ethtool_ops->get_msglevel(dev); |
240 | 251 | ||
241 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 252 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
242 | return -EFAULT; | 253 | return -EFAULT; |
243 | return 0; | 254 | return 0; |
244 | } | 255 | } |
245 | 256 | ||
246 | static int ethtool_set_msglevel(struct net_device *dev, char __user *useraddr) | 257 | static int ethtool_set_msglevel(struct net_device *dev, char __user *useraddr) |
247 | { | 258 | { |
248 | struct ethtool_value edata; | 259 | struct ethtool_value edata; |
249 | 260 | ||
250 | if (!dev->ethtool_ops->set_msglevel) | 261 | if (!dev->ethtool_ops->set_msglevel) |
251 | return -EOPNOTSUPP; | 262 | return -EOPNOTSUPP; |
252 | 263 | ||
253 | if (copy_from_user(&edata, useraddr, sizeof(edata))) | 264 | if (copy_from_user(&edata, useraddr, sizeof(edata))) |
254 | return -EFAULT; | 265 | return -EFAULT; |
255 | 266 | ||
256 | dev->ethtool_ops->set_msglevel(dev, edata.data); | 267 | dev->ethtool_ops->set_msglevel(dev, edata.data); |
257 | return 0; | 268 | return 0; |
258 | } | 269 | } |
259 | 270 | ||
260 | static int ethtool_nway_reset(struct net_device *dev) | 271 | static int ethtool_nway_reset(struct net_device *dev) |
261 | { | 272 | { |
262 | if (!dev->ethtool_ops->nway_reset) | 273 | if (!dev->ethtool_ops->nway_reset) |
263 | return -EOPNOTSUPP; | 274 | return -EOPNOTSUPP; |
264 | 275 | ||
265 | return dev->ethtool_ops->nway_reset(dev); | 276 | return dev->ethtool_ops->nway_reset(dev); |
266 | } | 277 | } |
267 | 278 | ||
268 | static int ethtool_get_link(struct net_device *dev, void __user *useraddr) | 279 | static int ethtool_get_link(struct net_device *dev, void __user *useraddr) |
269 | { | 280 | { |
270 | struct ethtool_value edata = { ETHTOOL_GLINK }; | 281 | struct ethtool_value edata = { ETHTOOL_GLINK }; |
271 | 282 | ||
272 | if (!dev->ethtool_ops->get_link) | 283 | if (!dev->ethtool_ops->get_link) |
273 | return -EOPNOTSUPP; | 284 | return -EOPNOTSUPP; |
274 | 285 | ||
275 | edata.data = dev->ethtool_ops->get_link(dev); | 286 | edata.data = dev->ethtool_ops->get_link(dev); |
276 | 287 | ||
277 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 288 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
278 | return -EFAULT; | 289 | return -EFAULT; |
279 | return 0; | 290 | return 0; |
280 | } | 291 | } |
281 | 292 | ||
282 | static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) | 293 | static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) |
283 | { | 294 | { |
284 | struct ethtool_eeprom eeprom; | 295 | struct ethtool_eeprom eeprom; |
285 | const struct ethtool_ops *ops = dev->ethtool_ops; | 296 | const struct ethtool_ops *ops = dev->ethtool_ops; |
286 | u8 *data; | 297 | u8 *data; |
287 | int ret; | 298 | int ret; |
288 | 299 | ||
289 | if (!ops->get_eeprom || !ops->get_eeprom_len) | 300 | if (!ops->get_eeprom || !ops->get_eeprom_len) |
290 | return -EOPNOTSUPP; | 301 | return -EOPNOTSUPP; |
291 | 302 | ||
292 | if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) | 303 | if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) |
293 | return -EFAULT; | 304 | return -EFAULT; |
294 | 305 | ||
295 | /* Check for wrap and zero */ | 306 | /* Check for wrap and zero */ |
296 | if (eeprom.offset + eeprom.len <= eeprom.offset) | 307 | if (eeprom.offset + eeprom.len <= eeprom.offset) |
297 | return -EINVAL; | 308 | return -EINVAL; |
298 | 309 | ||
299 | /* Check for exceeding total eeprom len */ | 310 | /* Check for exceeding total eeprom len */ |
300 | if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) | 311 | if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) |
301 | return -EINVAL; | 312 | return -EINVAL; |
302 | 313 | ||
303 | data = kmalloc(eeprom.len, GFP_USER); | 314 | data = kmalloc(eeprom.len, GFP_USER); |
304 | if (!data) | 315 | if (!data) |
305 | return -ENOMEM; | 316 | return -ENOMEM; |
306 | 317 | ||
307 | ret = -EFAULT; | 318 | ret = -EFAULT; |
308 | if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) | 319 | if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) |
309 | goto out; | 320 | goto out; |
310 | 321 | ||
311 | ret = ops->get_eeprom(dev, &eeprom, data); | 322 | ret = ops->get_eeprom(dev, &eeprom, data); |
312 | if (ret) | 323 | if (ret) |
313 | goto out; | 324 | goto out; |
314 | 325 | ||
315 | ret = -EFAULT; | 326 | ret = -EFAULT; |
316 | if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) | 327 | if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) |
317 | goto out; | 328 | goto out; |
318 | if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) | 329 | if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) |
319 | goto out; | 330 | goto out; |
320 | ret = 0; | 331 | ret = 0; |
321 | 332 | ||
322 | out: | 333 | out: |
323 | kfree(data); | 334 | kfree(data); |
324 | return ret; | 335 | return ret; |
325 | } | 336 | } |
326 | 337 | ||
327 | static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) | 338 | static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) |
328 | { | 339 | { |
329 | struct ethtool_eeprom eeprom; | 340 | struct ethtool_eeprom eeprom; |
330 | const struct ethtool_ops *ops = dev->ethtool_ops; | 341 | const struct ethtool_ops *ops = dev->ethtool_ops; |
331 | u8 *data; | 342 | u8 *data; |
332 | int ret; | 343 | int ret; |
333 | 344 | ||
334 | if (!ops->set_eeprom || !ops->get_eeprom_len) | 345 | if (!ops->set_eeprom || !ops->get_eeprom_len) |
335 | return -EOPNOTSUPP; | 346 | return -EOPNOTSUPP; |
336 | 347 | ||
337 | if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) | 348 | if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) |
338 | return -EFAULT; | 349 | return -EFAULT; |
339 | 350 | ||
340 | /* Check for wrap and zero */ | 351 | /* Check for wrap and zero */ |
341 | if (eeprom.offset + eeprom.len <= eeprom.offset) | 352 | if (eeprom.offset + eeprom.len <= eeprom.offset) |
342 | return -EINVAL; | 353 | return -EINVAL; |
343 | 354 | ||
344 | /* Check for exceeding total eeprom len */ | 355 | /* Check for exceeding total eeprom len */ |
345 | if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) | 356 | if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) |
346 | return -EINVAL; | 357 | return -EINVAL; |
347 | 358 | ||
348 | data = kmalloc(eeprom.len, GFP_USER); | 359 | data = kmalloc(eeprom.len, GFP_USER); |
349 | if (!data) | 360 | if (!data) |
350 | return -ENOMEM; | 361 | return -ENOMEM; |
351 | 362 | ||
352 | ret = -EFAULT; | 363 | ret = -EFAULT; |
353 | if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) | 364 | if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) |
354 | goto out; | 365 | goto out; |
355 | 366 | ||
356 | ret = ops->set_eeprom(dev, &eeprom, data); | 367 | ret = ops->set_eeprom(dev, &eeprom, data); |
357 | if (ret) | 368 | if (ret) |
358 | goto out; | 369 | goto out; |
359 | 370 | ||
360 | if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) | 371 | if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) |
361 | ret = -EFAULT; | 372 | ret = -EFAULT; |
362 | 373 | ||
363 | out: | 374 | out: |
364 | kfree(data); | 375 | kfree(data); |
365 | return ret; | 376 | return ret; |
366 | } | 377 | } |
367 | 378 | ||
368 | static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) | 379 | static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) |
369 | { | 380 | { |
370 | struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; | 381 | struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; |
371 | 382 | ||
372 | if (!dev->ethtool_ops->get_coalesce) | 383 | if (!dev->ethtool_ops->get_coalesce) |
373 | return -EOPNOTSUPP; | 384 | return -EOPNOTSUPP; |
374 | 385 | ||
375 | dev->ethtool_ops->get_coalesce(dev, &coalesce); | 386 | dev->ethtool_ops->get_coalesce(dev, &coalesce); |
376 | 387 | ||
377 | if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) | 388 | if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) |
378 | return -EFAULT; | 389 | return -EFAULT; |
379 | return 0; | 390 | return 0; |
380 | } | 391 | } |
381 | 392 | ||
382 | static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) | 393 | static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) |
383 | { | 394 | { |
384 | struct ethtool_coalesce coalesce; | 395 | struct ethtool_coalesce coalesce; |
385 | 396 | ||
386 | if (!dev->ethtool_ops->set_coalesce) | 397 | if (!dev->ethtool_ops->set_coalesce) |
387 | return -EOPNOTSUPP; | 398 | return -EOPNOTSUPP; |
388 | 399 | ||
389 | if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) | 400 | if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) |
390 | return -EFAULT; | 401 | return -EFAULT; |
391 | 402 | ||
392 | return dev->ethtool_ops->set_coalesce(dev, &coalesce); | 403 | return dev->ethtool_ops->set_coalesce(dev, &coalesce); |
393 | } | 404 | } |
394 | 405 | ||
395 | static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) | 406 | static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) |
396 | { | 407 | { |
397 | struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; | 408 | struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; |
398 | 409 | ||
399 | if (!dev->ethtool_ops->get_ringparam) | 410 | if (!dev->ethtool_ops->get_ringparam) |
400 | return -EOPNOTSUPP; | 411 | return -EOPNOTSUPP; |
401 | 412 | ||
402 | dev->ethtool_ops->get_ringparam(dev, &ringparam); | 413 | dev->ethtool_ops->get_ringparam(dev, &ringparam); |
403 | 414 | ||
404 | if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) | 415 | if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) |
405 | return -EFAULT; | 416 | return -EFAULT; |
406 | return 0; | 417 | return 0; |
407 | } | 418 | } |
408 | 419 | ||
409 | static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) | 420 | static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) |
410 | { | 421 | { |
411 | struct ethtool_ringparam ringparam; | 422 | struct ethtool_ringparam ringparam; |
412 | 423 | ||
413 | if (!dev->ethtool_ops->set_ringparam) | 424 | if (!dev->ethtool_ops->set_ringparam) |
414 | return -EOPNOTSUPP; | 425 | return -EOPNOTSUPP; |
415 | 426 | ||
416 | if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) | 427 | if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) |
417 | return -EFAULT; | 428 | return -EFAULT; |
418 | 429 | ||
419 | return dev->ethtool_ops->set_ringparam(dev, &ringparam); | 430 | return dev->ethtool_ops->set_ringparam(dev, &ringparam); |
420 | } | 431 | } |
421 | 432 | ||
422 | static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) | 433 | static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) |
423 | { | 434 | { |
424 | struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; | 435 | struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; |
425 | 436 | ||
426 | if (!dev->ethtool_ops->get_pauseparam) | 437 | if (!dev->ethtool_ops->get_pauseparam) |
427 | return -EOPNOTSUPP; | 438 | return -EOPNOTSUPP; |
428 | 439 | ||
429 | dev->ethtool_ops->get_pauseparam(dev, &pauseparam); | 440 | dev->ethtool_ops->get_pauseparam(dev, &pauseparam); |
430 | 441 | ||
431 | if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) | 442 | if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) |
432 | return -EFAULT; | 443 | return -EFAULT; |
433 | return 0; | 444 | return 0; |
434 | } | 445 | } |
435 | 446 | ||
436 | static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) | 447 | static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) |
437 | { | 448 | { |
438 | struct ethtool_pauseparam pauseparam; | 449 | struct ethtool_pauseparam pauseparam; |
439 | 450 | ||
440 | if (!dev->ethtool_ops->set_pauseparam) | 451 | if (!dev->ethtool_ops->set_pauseparam) |
441 | return -EOPNOTSUPP; | 452 | return -EOPNOTSUPP; |
442 | 453 | ||
443 | if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) | 454 | if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) |
444 | return -EFAULT; | 455 | return -EFAULT; |
445 | 456 | ||
446 | return dev->ethtool_ops->set_pauseparam(dev, &pauseparam); | 457 | return dev->ethtool_ops->set_pauseparam(dev, &pauseparam); |
447 | } | 458 | } |
448 | 459 | ||
449 | static int ethtool_get_rx_csum(struct net_device *dev, char __user *useraddr) | 460 | static int ethtool_get_rx_csum(struct net_device *dev, char __user *useraddr) |
450 | { | 461 | { |
451 | struct ethtool_value edata = { ETHTOOL_GRXCSUM }; | 462 | struct ethtool_value edata = { ETHTOOL_GRXCSUM }; |
452 | 463 | ||
453 | if (!dev->ethtool_ops->get_rx_csum) | 464 | if (!dev->ethtool_ops->get_rx_csum) |
454 | return -EOPNOTSUPP; | 465 | return -EOPNOTSUPP; |
455 | 466 | ||
456 | edata.data = dev->ethtool_ops->get_rx_csum(dev); | 467 | edata.data = dev->ethtool_ops->get_rx_csum(dev); |
457 | 468 | ||
458 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 469 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
459 | return -EFAULT; | 470 | return -EFAULT; |
460 | return 0; | 471 | return 0; |
461 | } | 472 | } |
462 | 473 | ||
463 | static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) | 474 | static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) |
464 | { | 475 | { |
465 | struct ethtool_value edata; | 476 | struct ethtool_value edata; |
466 | 477 | ||
467 | if (!dev->ethtool_ops->set_rx_csum) | 478 | if (!dev->ethtool_ops->set_rx_csum) |
468 | return -EOPNOTSUPP; | 479 | return -EOPNOTSUPP; |
469 | 480 | ||
470 | if (copy_from_user(&edata, useraddr, sizeof(edata))) | 481 | if (copy_from_user(&edata, useraddr, sizeof(edata))) |
471 | return -EFAULT; | 482 | return -EFAULT; |
472 | 483 | ||
473 | dev->ethtool_ops->set_rx_csum(dev, edata.data); | 484 | dev->ethtool_ops->set_rx_csum(dev, edata.data); |
474 | return 0; | 485 | return 0; |
475 | } | 486 | } |
476 | 487 | ||
477 | static int ethtool_get_tx_csum(struct net_device *dev, char __user *useraddr) | 488 | static int ethtool_get_tx_csum(struct net_device *dev, char __user *useraddr) |
478 | { | 489 | { |
479 | struct ethtool_value edata = { ETHTOOL_GTXCSUM }; | 490 | struct ethtool_value edata = { ETHTOOL_GTXCSUM }; |
480 | 491 | ||
481 | if (!dev->ethtool_ops->get_tx_csum) | 492 | if (!dev->ethtool_ops->get_tx_csum) |
482 | return -EOPNOTSUPP; | 493 | return -EOPNOTSUPP; |
483 | 494 | ||
484 | edata.data = dev->ethtool_ops->get_tx_csum(dev); | 495 | edata.data = dev->ethtool_ops->get_tx_csum(dev); |
485 | 496 | ||
486 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 497 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
487 | return -EFAULT; | 498 | return -EFAULT; |
488 | return 0; | 499 | return 0; |
489 | } | 500 | } |
490 | 501 | ||
491 | static int __ethtool_set_sg(struct net_device *dev, u32 data) | 502 | static int __ethtool_set_sg(struct net_device *dev, u32 data) |
492 | { | 503 | { |
493 | int err; | 504 | int err; |
494 | 505 | ||
495 | if (!data && dev->ethtool_ops->set_tso) { | 506 | if (!data && dev->ethtool_ops->set_tso) { |
496 | err = dev->ethtool_ops->set_tso(dev, 0); | 507 | err = dev->ethtool_ops->set_tso(dev, 0); |
497 | if (err) | 508 | if (err) |
498 | return err; | 509 | return err; |
499 | } | 510 | } |
500 | 511 | ||
501 | if (!data && dev->ethtool_ops->set_ufo) { | 512 | if (!data && dev->ethtool_ops->set_ufo) { |
502 | err = dev->ethtool_ops->set_ufo(dev, 0); | 513 | err = dev->ethtool_ops->set_ufo(dev, 0); |
503 | if (err) | 514 | if (err) |
504 | return err; | 515 | return err; |
505 | } | 516 | } |
506 | return dev->ethtool_ops->set_sg(dev, data); | 517 | return dev->ethtool_ops->set_sg(dev, data); |
507 | } | 518 | } |
508 | 519 | ||
509 | static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr) | 520 | static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr) |
510 | { | 521 | { |
511 | struct ethtool_value edata; | 522 | struct ethtool_value edata; |
512 | int err; | 523 | int err; |
513 | 524 | ||
514 | if (!dev->ethtool_ops->set_tx_csum) | 525 | if (!dev->ethtool_ops->set_tx_csum) |
515 | return -EOPNOTSUPP; | 526 | return -EOPNOTSUPP; |
516 | 527 | ||
517 | if (copy_from_user(&edata, useraddr, sizeof(edata))) | 528 | if (copy_from_user(&edata, useraddr, sizeof(edata))) |
518 | return -EFAULT; | 529 | return -EFAULT; |
519 | 530 | ||
520 | if (!edata.data && dev->ethtool_ops->set_sg) { | 531 | if (!edata.data && dev->ethtool_ops->set_sg) { |
521 | err = __ethtool_set_sg(dev, 0); | 532 | err = __ethtool_set_sg(dev, 0); |
522 | if (err) | 533 | if (err) |
523 | return err; | 534 | return err; |
524 | } | 535 | } |
525 | 536 | ||
526 | return dev->ethtool_ops->set_tx_csum(dev, edata.data); | 537 | return dev->ethtool_ops->set_tx_csum(dev, edata.data); |
527 | } | 538 | } |
528 | 539 | ||
529 | static int ethtool_get_sg(struct net_device *dev, char __user *useraddr) | 540 | static int ethtool_get_sg(struct net_device *dev, char __user *useraddr) |
530 | { | 541 | { |
531 | struct ethtool_value edata = { ETHTOOL_GSG }; | 542 | struct ethtool_value edata = { ETHTOOL_GSG }; |
532 | 543 | ||
533 | if (!dev->ethtool_ops->get_sg) | 544 | if (!dev->ethtool_ops->get_sg) |
534 | return -EOPNOTSUPP; | 545 | return -EOPNOTSUPP; |
535 | 546 | ||
536 | edata.data = dev->ethtool_ops->get_sg(dev); | 547 | edata.data = dev->ethtool_ops->get_sg(dev); |
537 | 548 | ||
538 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 549 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
539 | return -EFAULT; | 550 | return -EFAULT; |
540 | return 0; | 551 | return 0; |
541 | } | 552 | } |
542 | 553 | ||
543 | static int ethtool_set_sg(struct net_device *dev, char __user *useraddr) | 554 | static int ethtool_set_sg(struct net_device *dev, char __user *useraddr) |
544 | { | 555 | { |
545 | struct ethtool_value edata; | 556 | struct ethtool_value edata; |
546 | 557 | ||
547 | if (!dev->ethtool_ops->set_sg) | 558 | if (!dev->ethtool_ops->set_sg) |
548 | return -EOPNOTSUPP; | 559 | return -EOPNOTSUPP; |
549 | 560 | ||
550 | if (copy_from_user(&edata, useraddr, sizeof(edata))) | 561 | if (copy_from_user(&edata, useraddr, sizeof(edata))) |
551 | return -EFAULT; | 562 | return -EFAULT; |
552 | 563 | ||
553 | if (edata.data && | 564 | if (edata.data && |
554 | !(dev->features & NETIF_F_ALL_CSUM)) | 565 | !(dev->features & NETIF_F_ALL_CSUM)) |
555 | return -EINVAL; | 566 | return -EINVAL; |
556 | 567 | ||
557 | return __ethtool_set_sg(dev, edata.data); | 568 | return __ethtool_set_sg(dev, edata.data); |
558 | } | 569 | } |
559 | 570 | ||
560 | static int ethtool_get_tso(struct net_device *dev, char __user *useraddr) | 571 | static int ethtool_get_tso(struct net_device *dev, char __user *useraddr) |
561 | { | 572 | { |
562 | struct ethtool_value edata = { ETHTOOL_GTSO }; | 573 | struct ethtool_value edata = { ETHTOOL_GTSO }; |
563 | 574 | ||
564 | if (!dev->ethtool_ops->get_tso) | 575 | if (!dev->ethtool_ops->get_tso) |
565 | return -EOPNOTSUPP; | 576 | return -EOPNOTSUPP; |
566 | 577 | ||
567 | edata.data = dev->ethtool_ops->get_tso(dev); | 578 | edata.data = dev->ethtool_ops->get_tso(dev); |
568 | 579 | ||
569 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 580 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
570 | return -EFAULT; | 581 | return -EFAULT; |
571 | return 0; | 582 | return 0; |
572 | } | 583 | } |
573 | 584 | ||
574 | static int ethtool_set_tso(struct net_device *dev, char __user *useraddr) | 585 | static int ethtool_set_tso(struct net_device *dev, char __user *useraddr) |
575 | { | 586 | { |
576 | struct ethtool_value edata; | 587 | struct ethtool_value edata; |
577 | 588 | ||
578 | if (!dev->ethtool_ops->set_tso) | 589 | if (!dev->ethtool_ops->set_tso) |
579 | return -EOPNOTSUPP; | 590 | return -EOPNOTSUPP; |
580 | 591 | ||
581 | if (copy_from_user(&edata, useraddr, sizeof(edata))) | 592 | if (copy_from_user(&edata, useraddr, sizeof(edata))) |
582 | return -EFAULT; | 593 | return -EFAULT; |
583 | 594 | ||
584 | if (edata.data && !(dev->features & NETIF_F_SG)) | 595 | if (edata.data && !(dev->features & NETIF_F_SG)) |
585 | return -EINVAL; | 596 | return -EINVAL; |
586 | 597 | ||
587 | return dev->ethtool_ops->set_tso(dev, edata.data); | 598 | return dev->ethtool_ops->set_tso(dev, edata.data); |
588 | } | 599 | } |
589 | 600 | ||
590 | static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr) | 601 | static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr) |
591 | { | 602 | { |
592 | struct ethtool_value edata = { ETHTOOL_GUFO }; | 603 | struct ethtool_value edata = { ETHTOOL_GUFO }; |
593 | 604 | ||
594 | if (!dev->ethtool_ops->get_ufo) | 605 | if (!dev->ethtool_ops->get_ufo) |
595 | return -EOPNOTSUPP; | 606 | return -EOPNOTSUPP; |
596 | edata.data = dev->ethtool_ops->get_ufo(dev); | 607 | edata.data = dev->ethtool_ops->get_ufo(dev); |
597 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 608 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
598 | return -EFAULT; | 609 | return -EFAULT; |
599 | return 0; | 610 | return 0; |
600 | } | 611 | } |
601 | 612 | ||
602 | static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr) | 613 | static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr) |
603 | { | 614 | { |
604 | struct ethtool_value edata; | 615 | struct ethtool_value edata; |
605 | 616 | ||
606 | if (!dev->ethtool_ops->set_ufo) | 617 | if (!dev->ethtool_ops->set_ufo) |
607 | return -EOPNOTSUPP; | 618 | return -EOPNOTSUPP; |
608 | if (copy_from_user(&edata, useraddr, sizeof(edata))) | 619 | if (copy_from_user(&edata, useraddr, sizeof(edata))) |
609 | return -EFAULT; | 620 | return -EFAULT; |
610 | if (edata.data && !(dev->features & NETIF_F_SG)) | 621 | if (edata.data && !(dev->features & NETIF_F_SG)) |
611 | return -EINVAL; | 622 | return -EINVAL; |
612 | if (edata.data && !(dev->features & NETIF_F_HW_CSUM)) | 623 | if (edata.data && !(dev->features & NETIF_F_HW_CSUM)) |
613 | return -EINVAL; | 624 | return -EINVAL; |
614 | return dev->ethtool_ops->set_ufo(dev, edata.data); | 625 | return dev->ethtool_ops->set_ufo(dev, edata.data); |
615 | } | 626 | } |
616 | 627 | ||
617 | static int ethtool_get_gso(struct net_device *dev, char __user *useraddr) | 628 | static int ethtool_get_gso(struct net_device *dev, char __user *useraddr) |
618 | { | 629 | { |
619 | struct ethtool_value edata = { ETHTOOL_GGSO }; | 630 | struct ethtool_value edata = { ETHTOOL_GGSO }; |
620 | 631 | ||
621 | edata.data = dev->features & NETIF_F_GSO; | 632 | edata.data = dev->features & NETIF_F_GSO; |
622 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 633 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
623 | return -EFAULT; | 634 | return -EFAULT; |
624 | return 0; | 635 | return 0; |
625 | } | 636 | } |
626 | 637 | ||
627 | static int ethtool_set_gso(struct net_device *dev, char __user *useraddr) | 638 | static int ethtool_set_gso(struct net_device *dev, char __user *useraddr) |
628 | { | 639 | { |
629 | struct ethtool_value edata; | 640 | struct ethtool_value edata; |
630 | 641 | ||
631 | if (copy_from_user(&edata, useraddr, sizeof(edata))) | 642 | if (copy_from_user(&edata, useraddr, sizeof(edata))) |
632 | return -EFAULT; | 643 | return -EFAULT; |
633 | if (edata.data) | 644 | if (edata.data) |
634 | dev->features |= NETIF_F_GSO; | 645 | dev->features |= NETIF_F_GSO; |
635 | else | 646 | else |
636 | dev->features &= ~NETIF_F_GSO; | 647 | dev->features &= ~NETIF_F_GSO; |
637 | return 0; | 648 | return 0; |
638 | } | 649 | } |
639 | 650 | ||
640 | static int ethtool_self_test(struct net_device *dev, char __user *useraddr) | 651 | static int ethtool_self_test(struct net_device *dev, char __user *useraddr) |
641 | { | 652 | { |
642 | struct ethtool_test test; | 653 | struct ethtool_test test; |
643 | const struct ethtool_ops *ops = dev->ethtool_ops; | 654 | const struct ethtool_ops *ops = dev->ethtool_ops; |
644 | u64 *data; | 655 | u64 *data; |
645 | int ret; | 656 | int ret; |
646 | 657 | ||
647 | if (!ops->self_test || !ops->self_test_count) | 658 | if (!ops->self_test || !ops->self_test_count) |
648 | return -EOPNOTSUPP; | 659 | return -EOPNOTSUPP; |
649 | 660 | ||
650 | if (copy_from_user(&test, useraddr, sizeof(test))) | 661 | if (copy_from_user(&test, useraddr, sizeof(test))) |
651 | return -EFAULT; | 662 | return -EFAULT; |
652 | 663 | ||
653 | test.len = ops->self_test_count(dev); | 664 | test.len = ops->self_test_count(dev); |
654 | data = kmalloc(test.len * sizeof(u64), GFP_USER); | 665 | data = kmalloc(test.len * sizeof(u64), GFP_USER); |
655 | if (!data) | 666 | if (!data) |
656 | return -ENOMEM; | 667 | return -ENOMEM; |
657 | 668 | ||
658 | ops->self_test(dev, &test, data); | 669 | ops->self_test(dev, &test, data); |
659 | 670 | ||
660 | ret = -EFAULT; | 671 | ret = -EFAULT; |
661 | if (copy_to_user(useraddr, &test, sizeof(test))) | 672 | if (copy_to_user(useraddr, &test, sizeof(test))) |
662 | goto out; | 673 | goto out; |
663 | useraddr += sizeof(test); | 674 | useraddr += sizeof(test); |
664 | if (copy_to_user(useraddr, data, test.len * sizeof(u64))) | 675 | if (copy_to_user(useraddr, data, test.len * sizeof(u64))) |
665 | goto out; | 676 | goto out; |
666 | ret = 0; | 677 | ret = 0; |
667 | 678 | ||
668 | out: | 679 | out: |
669 | kfree(data); | 680 | kfree(data); |
670 | return ret; | 681 | return ret; |
671 | } | 682 | } |
672 | 683 | ||
673 | static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) | 684 | static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) |
674 | { | 685 | { |
675 | struct ethtool_gstrings gstrings; | 686 | struct ethtool_gstrings gstrings; |
676 | const struct ethtool_ops *ops = dev->ethtool_ops; | 687 | const struct ethtool_ops *ops = dev->ethtool_ops; |
677 | u8 *data; | 688 | u8 *data; |
678 | int ret; | 689 | int ret; |
679 | 690 | ||
680 | if (!ops->get_strings) | 691 | if (!ops->get_strings) |
681 | return -EOPNOTSUPP; | 692 | return -EOPNOTSUPP; |
682 | 693 | ||
683 | if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) | 694 | if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) |
684 | return -EFAULT; | 695 | return -EFAULT; |
685 | 696 | ||
686 | switch (gstrings.string_set) { | 697 | switch (gstrings.string_set) { |
687 | case ETH_SS_TEST: | 698 | case ETH_SS_TEST: |
688 | if (!ops->self_test_count) | 699 | if (!ops->self_test_count) |
689 | return -EOPNOTSUPP; | 700 | return -EOPNOTSUPP; |
690 | gstrings.len = ops->self_test_count(dev); | 701 | gstrings.len = ops->self_test_count(dev); |
691 | break; | 702 | break; |
692 | case ETH_SS_STATS: | 703 | case ETH_SS_STATS: |
693 | if (!ops->get_stats_count) | 704 | if (!ops->get_stats_count) |
694 | return -EOPNOTSUPP; | 705 | return -EOPNOTSUPP; |
695 | gstrings.len = ops->get_stats_count(dev); | 706 | gstrings.len = ops->get_stats_count(dev); |
696 | break; | 707 | break; |
697 | default: | 708 | default: |
698 | return -EINVAL; | 709 | return -EINVAL; |
699 | } | 710 | } |
700 | 711 | ||
701 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); | 712 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); |
702 | if (!data) | 713 | if (!data) |
703 | return -ENOMEM; | 714 | return -ENOMEM; |
704 | 715 | ||
705 | ops->get_strings(dev, gstrings.string_set, data); | 716 | ops->get_strings(dev, gstrings.string_set, data); |
706 | 717 | ||
707 | ret = -EFAULT; | 718 | ret = -EFAULT; |
708 | if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) | 719 | if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) |
709 | goto out; | 720 | goto out; |
710 | useraddr += sizeof(gstrings); | 721 | useraddr += sizeof(gstrings); |
711 | if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) | 722 | if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) |
712 | goto out; | 723 | goto out; |
713 | ret = 0; | 724 | ret = 0; |
714 | 725 | ||
715 | out: | 726 | out: |
716 | kfree(data); | 727 | kfree(data); |
717 | return ret; | 728 | return ret; |
718 | } | 729 | } |
719 | 730 | ||
720 | static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) | 731 | static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) |
721 | { | 732 | { |
722 | struct ethtool_value id; | 733 | struct ethtool_value id; |
723 | 734 | ||
724 | if (!dev->ethtool_ops->phys_id) | 735 | if (!dev->ethtool_ops->phys_id) |
725 | return -EOPNOTSUPP; | 736 | return -EOPNOTSUPP; |
726 | 737 | ||
727 | if (copy_from_user(&id, useraddr, sizeof(id))) | 738 | if (copy_from_user(&id, useraddr, sizeof(id))) |
728 | return -EFAULT; | 739 | return -EFAULT; |
729 | 740 | ||
730 | return dev->ethtool_ops->phys_id(dev, id.data); | 741 | return dev->ethtool_ops->phys_id(dev, id.data); |
731 | } | 742 | } |
732 | 743 | ||
733 | static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) | 744 | static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) |
734 | { | 745 | { |
735 | struct ethtool_stats stats; | 746 | struct ethtool_stats stats; |
736 | const struct ethtool_ops *ops = dev->ethtool_ops; | 747 | const struct ethtool_ops *ops = dev->ethtool_ops; |
737 | u64 *data; | 748 | u64 *data; |
738 | int ret; | 749 | int ret; |
739 | 750 | ||
740 | if (!ops->get_ethtool_stats || !ops->get_stats_count) | 751 | if (!ops->get_ethtool_stats || !ops->get_stats_count) |
741 | return -EOPNOTSUPP; | 752 | return -EOPNOTSUPP; |
742 | 753 | ||
743 | if (copy_from_user(&stats, useraddr, sizeof(stats))) | 754 | if (copy_from_user(&stats, useraddr, sizeof(stats))) |
744 | return -EFAULT; | 755 | return -EFAULT; |
745 | 756 | ||
746 | stats.n_stats = ops->get_stats_count(dev); | 757 | stats.n_stats = ops->get_stats_count(dev); |
747 | data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); | 758 | data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); |
748 | if (!data) | 759 | if (!data) |
749 | return -ENOMEM; | 760 | return -ENOMEM; |
750 | 761 | ||
751 | ops->get_ethtool_stats(dev, &stats, data); | 762 | ops->get_ethtool_stats(dev, &stats, data); |
752 | 763 | ||
753 | ret = -EFAULT; | 764 | ret = -EFAULT; |
754 | if (copy_to_user(useraddr, &stats, sizeof(stats))) | 765 | if (copy_to_user(useraddr, &stats, sizeof(stats))) |
755 | goto out; | 766 | goto out; |
756 | useraddr += sizeof(stats); | 767 | useraddr += sizeof(stats); |
757 | if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) | 768 | if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) |
758 | goto out; | 769 | goto out; |
759 | ret = 0; | 770 | ret = 0; |
760 | 771 | ||
761 | out: | 772 | out: |
762 | kfree(data); | 773 | kfree(data); |
763 | return ret; | 774 | return ret; |
764 | } | 775 | } |
765 | 776 | ||
766 | static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) | 777 | static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) |
767 | { | 778 | { |
768 | struct ethtool_perm_addr epaddr; | 779 | struct ethtool_perm_addr epaddr; |
769 | u8 *data; | 780 | u8 *data; |
770 | int ret; | 781 | int ret; |
771 | 782 | ||
772 | if (!dev->ethtool_ops->get_perm_addr) | 783 | if (!dev->ethtool_ops->get_perm_addr) |
773 | return -EOPNOTSUPP; | 784 | return -EOPNOTSUPP; |
774 | 785 | ||
775 | if (copy_from_user(&epaddr,useraddr,sizeof(epaddr))) | 786 | if (copy_from_user(&epaddr,useraddr,sizeof(epaddr))) |
776 | return -EFAULT; | 787 | return -EFAULT; |
777 | 788 | ||
778 | data = kmalloc(epaddr.size, GFP_USER); | 789 | data = kmalloc(epaddr.size, GFP_USER); |
779 | if (!data) | 790 | if (!data) |
780 | return -ENOMEM; | 791 | return -ENOMEM; |
781 | 792 | ||
782 | ret = dev->ethtool_ops->get_perm_addr(dev,&epaddr,data); | 793 | ret = dev->ethtool_ops->get_perm_addr(dev,&epaddr,data); |
783 | if (ret) | 794 | if (ret) |
784 | return ret; | 795 | return ret; |
785 | 796 | ||
786 | ret = -EFAULT; | 797 | ret = -EFAULT; |
787 | if (copy_to_user(useraddr, &epaddr, sizeof(epaddr))) | 798 | if (copy_to_user(useraddr, &epaddr, sizeof(epaddr))) |
788 | goto out; | 799 | goto out; |
789 | useraddr += sizeof(epaddr); | 800 | useraddr += sizeof(epaddr); |
790 | if (copy_to_user(useraddr, data, epaddr.size)) | 801 | if (copy_to_user(useraddr, data, epaddr.size)) |
791 | goto out; | 802 | goto out; |
792 | ret = 0; | 803 | ret = 0; |
793 | 804 | ||
794 | out: | 805 | out: |
795 | kfree(data); | 806 | kfree(data); |
796 | return ret; | 807 | return ret; |
797 | } | 808 | } |
798 | 809 | ||
799 | /* The main entry point in this file. Called from net/core/dev.c */ | 810 | /* The main entry point in this file. Called from net/core/dev.c */ |
800 | 811 | ||
801 | int dev_ethtool(struct ifreq *ifr) | 812 | int dev_ethtool(struct ifreq *ifr) |
802 | { | 813 | { |
803 | struct net_device *dev = __dev_get_by_name(ifr->ifr_name); | 814 | struct net_device *dev = __dev_get_by_name(ifr->ifr_name); |
804 | void __user *useraddr = ifr->ifr_data; | 815 | void __user *useraddr = ifr->ifr_data; |
805 | u32 ethcmd; | 816 | u32 ethcmd; |
806 | int rc; | 817 | int rc; |
807 | unsigned long old_features; | 818 | unsigned long old_features; |
808 | 819 | ||
809 | if (!dev || !netif_device_present(dev)) | 820 | if (!dev || !netif_device_present(dev)) |
810 | return -ENODEV; | 821 | return -ENODEV; |
811 | 822 | ||
812 | if (!dev->ethtool_ops) | 823 | if (!dev->ethtool_ops) |
813 | goto ioctl; | 824 | goto ioctl; |
814 | 825 | ||
815 | if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) | 826 | if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) |
816 | return -EFAULT; | 827 | return -EFAULT; |
817 | 828 | ||
818 | /* Allow some commands to be done by anyone */ | 829 | /* Allow some commands to be done by anyone */ |
819 | switch(ethcmd) { | 830 | switch(ethcmd) { |
820 | case ETHTOOL_GDRVINFO: | 831 | case ETHTOOL_GDRVINFO: |
821 | case ETHTOOL_GMSGLVL: | 832 | case ETHTOOL_GMSGLVL: |
822 | case ETHTOOL_GCOALESCE: | 833 | case ETHTOOL_GCOALESCE: |
823 | case ETHTOOL_GRINGPARAM: | 834 | case ETHTOOL_GRINGPARAM: |
824 | case ETHTOOL_GPAUSEPARAM: | 835 | case ETHTOOL_GPAUSEPARAM: |
825 | case ETHTOOL_GRXCSUM: | 836 | case ETHTOOL_GRXCSUM: |
826 | case ETHTOOL_GTXCSUM: | 837 | case ETHTOOL_GTXCSUM: |
827 | case ETHTOOL_GSG: | 838 | case ETHTOOL_GSG: |
828 | case ETHTOOL_GSTRINGS: | 839 | case ETHTOOL_GSTRINGS: |
829 | case ETHTOOL_GTSO: | 840 | case ETHTOOL_GTSO: |
830 | case ETHTOOL_GPERMADDR: | 841 | case ETHTOOL_GPERMADDR: |
831 | case ETHTOOL_GUFO: | 842 | case ETHTOOL_GUFO: |
832 | case ETHTOOL_GGSO: | 843 | case ETHTOOL_GGSO: |
833 | break; | 844 | break; |
834 | default: | 845 | default: |
835 | if (!capable(CAP_NET_ADMIN)) | 846 | if (!capable(CAP_NET_ADMIN)) |
836 | return -EPERM; | 847 | return -EPERM; |
837 | } | 848 | } |
838 | 849 | ||
839 | if (dev->ethtool_ops->begin) | 850 | if (dev->ethtool_ops->begin) |
840 | if ((rc = dev->ethtool_ops->begin(dev)) < 0) | 851 | if ((rc = dev->ethtool_ops->begin(dev)) < 0) |
841 | return rc; | 852 | return rc; |
842 | 853 | ||
843 | old_features = dev->features; | 854 | old_features = dev->features; |
844 | 855 | ||
845 | switch (ethcmd) { | 856 | switch (ethcmd) { |
846 | case ETHTOOL_GSET: | 857 | case ETHTOOL_GSET: |
847 | rc = ethtool_get_settings(dev, useraddr); | 858 | rc = ethtool_get_settings(dev, useraddr); |
848 | break; | 859 | break; |
849 | case ETHTOOL_SSET: | 860 | case ETHTOOL_SSET: |
850 | rc = ethtool_set_settings(dev, useraddr); | 861 | rc = ethtool_set_settings(dev, useraddr); |
851 | break; | 862 | break; |
852 | case ETHTOOL_GDRVINFO: | 863 | case ETHTOOL_GDRVINFO: |
853 | rc = ethtool_get_drvinfo(dev, useraddr); | 864 | rc = ethtool_get_drvinfo(dev, useraddr); |
854 | break; | 865 | break; |
855 | case ETHTOOL_GREGS: | 866 | case ETHTOOL_GREGS: |
856 | rc = ethtool_get_regs(dev, useraddr); | 867 | rc = ethtool_get_regs(dev, useraddr); |
857 | break; | 868 | break; |
858 | case ETHTOOL_GWOL: | 869 | case ETHTOOL_GWOL: |
859 | rc = ethtool_get_wol(dev, useraddr); | 870 | rc = ethtool_get_wol(dev, useraddr); |
860 | break; | 871 | break; |
861 | case ETHTOOL_SWOL: | 872 | case ETHTOOL_SWOL: |
862 | rc = ethtool_set_wol(dev, useraddr); | 873 | rc = ethtool_set_wol(dev, useraddr); |
863 | break; | 874 | break; |
864 | case ETHTOOL_GMSGLVL: | 875 | case ETHTOOL_GMSGLVL: |
865 | rc = ethtool_get_msglevel(dev, useraddr); | 876 | rc = ethtool_get_msglevel(dev, useraddr); |
866 | break; | 877 | break; |
867 | case ETHTOOL_SMSGLVL: | 878 | case ETHTOOL_SMSGLVL: |
868 | rc = ethtool_set_msglevel(dev, useraddr); | 879 | rc = ethtool_set_msglevel(dev, useraddr); |
869 | break; | 880 | break; |
870 | case ETHTOOL_NWAY_RST: | 881 | case ETHTOOL_NWAY_RST: |
871 | rc = ethtool_nway_reset(dev); | 882 | rc = ethtool_nway_reset(dev); |
872 | break; | 883 | break; |
873 | case ETHTOOL_GLINK: | 884 | case ETHTOOL_GLINK: |
874 | rc = ethtool_get_link(dev, useraddr); | 885 | rc = ethtool_get_link(dev, useraddr); |
875 | break; | 886 | break; |
876 | case ETHTOOL_GEEPROM: | 887 | case ETHTOOL_GEEPROM: |
877 | rc = ethtool_get_eeprom(dev, useraddr); | 888 | rc = ethtool_get_eeprom(dev, useraddr); |
878 | break; | 889 | break; |
879 | case ETHTOOL_SEEPROM: | 890 | case ETHTOOL_SEEPROM: |
880 | rc = ethtool_set_eeprom(dev, useraddr); | 891 | rc = ethtool_set_eeprom(dev, useraddr); |
881 | break; | 892 | break; |
882 | case ETHTOOL_GCOALESCE: | 893 | case ETHTOOL_GCOALESCE: |
883 | rc = ethtool_get_coalesce(dev, useraddr); | 894 | rc = ethtool_get_coalesce(dev, useraddr); |
884 | break; | 895 | break; |
885 | case ETHTOOL_SCOALESCE: | 896 | case ETHTOOL_SCOALESCE: |
886 | rc = ethtool_set_coalesce(dev, useraddr); | 897 | rc = ethtool_set_coalesce(dev, useraddr); |
887 | break; | 898 | break; |
888 | case ETHTOOL_GRINGPARAM: | 899 | case ETHTOOL_GRINGPARAM: |
889 | rc = ethtool_get_ringparam(dev, useraddr); | 900 | rc = ethtool_get_ringparam(dev, useraddr); |
890 | break; | 901 | break; |
891 | case ETHTOOL_SRINGPARAM: | 902 | case ETHTOOL_SRINGPARAM: |
892 | rc = ethtool_set_ringparam(dev, useraddr); | 903 | rc = ethtool_set_ringparam(dev, useraddr); |
893 | break; | 904 | break; |
894 | case ETHTOOL_GPAUSEPARAM: | 905 | case ETHTOOL_GPAUSEPARAM: |
895 | rc = ethtool_get_pauseparam(dev, useraddr); | 906 | rc = ethtool_get_pauseparam(dev, useraddr); |
896 | break; | 907 | break; |
897 | case ETHTOOL_SPAUSEPARAM: | 908 | case ETHTOOL_SPAUSEPARAM: |
898 | rc = ethtool_set_pauseparam(dev, useraddr); | 909 | rc = ethtool_set_pauseparam(dev, useraddr); |
899 | break; | 910 | break; |
900 | case ETHTOOL_GRXCSUM: | 911 | case ETHTOOL_GRXCSUM: |
901 | rc = ethtool_get_rx_csum(dev, useraddr); | 912 | rc = ethtool_get_rx_csum(dev, useraddr); |
902 | break; | 913 | break; |
903 | case ETHTOOL_SRXCSUM: | 914 | case ETHTOOL_SRXCSUM: |
904 | rc = ethtool_set_rx_csum(dev, useraddr); | 915 | rc = ethtool_set_rx_csum(dev, useraddr); |
905 | break; | 916 | break; |
906 | case ETHTOOL_GTXCSUM: | 917 | case ETHTOOL_GTXCSUM: |
907 | rc = ethtool_get_tx_csum(dev, useraddr); | 918 | rc = ethtool_get_tx_csum(dev, useraddr); |
908 | break; | 919 | break; |
909 | case ETHTOOL_STXCSUM: | 920 | case ETHTOOL_STXCSUM: |
910 | rc = ethtool_set_tx_csum(dev, useraddr); | 921 | rc = ethtool_set_tx_csum(dev, useraddr); |
911 | break; | 922 | break; |
912 | case ETHTOOL_GSG: | 923 | case ETHTOOL_GSG: |
913 | rc = ethtool_get_sg(dev, useraddr); | 924 | rc = ethtool_get_sg(dev, useraddr); |
914 | break; | 925 | break; |
915 | case ETHTOOL_SSG: | 926 | case ETHTOOL_SSG: |
916 | rc = ethtool_set_sg(dev, useraddr); | 927 | rc = ethtool_set_sg(dev, useraddr); |
917 | break; | 928 | break; |
918 | case ETHTOOL_GTSO: | 929 | case ETHTOOL_GTSO: |
919 | rc = ethtool_get_tso(dev, useraddr); | 930 | rc = ethtool_get_tso(dev, useraddr); |
920 | break; | 931 | break; |
921 | case ETHTOOL_STSO: | 932 | case ETHTOOL_STSO: |
922 | rc = ethtool_set_tso(dev, useraddr); | 933 | rc = ethtool_set_tso(dev, useraddr); |
923 | break; | 934 | break; |
924 | case ETHTOOL_TEST: | 935 | case ETHTOOL_TEST: |
925 | rc = ethtool_self_test(dev, useraddr); | 936 | rc = ethtool_self_test(dev, useraddr); |
926 | break; | 937 | break; |
927 | case ETHTOOL_GSTRINGS: | 938 | case ETHTOOL_GSTRINGS: |
928 | rc = ethtool_get_strings(dev, useraddr); | 939 | rc = ethtool_get_strings(dev, useraddr); |
929 | break; | 940 | break; |
930 | case ETHTOOL_PHYS_ID: | 941 | case ETHTOOL_PHYS_ID: |
931 | rc = ethtool_phys_id(dev, useraddr); | 942 | rc = ethtool_phys_id(dev, useraddr); |
932 | break; | 943 | break; |
933 | case ETHTOOL_GSTATS: | 944 | case ETHTOOL_GSTATS: |
934 | rc = ethtool_get_stats(dev, useraddr); | 945 | rc = ethtool_get_stats(dev, useraddr); |
935 | break; | 946 | break; |
936 | case ETHTOOL_GPERMADDR: | 947 | case ETHTOOL_GPERMADDR: |
937 | rc = ethtool_get_perm_addr(dev, useraddr); | 948 | rc = ethtool_get_perm_addr(dev, useraddr); |
938 | break; | 949 | break; |
939 | case ETHTOOL_GUFO: | 950 | case ETHTOOL_GUFO: |
940 | rc = ethtool_get_ufo(dev, useraddr); | 951 | rc = ethtool_get_ufo(dev, useraddr); |
941 | break; | 952 | break; |
942 | case ETHTOOL_SUFO: | 953 | case ETHTOOL_SUFO: |
943 | rc = ethtool_set_ufo(dev, useraddr); | 954 | rc = ethtool_set_ufo(dev, useraddr); |
944 | break; | 955 | break; |
945 | case ETHTOOL_GGSO: | 956 | case ETHTOOL_GGSO: |
946 | rc = ethtool_get_gso(dev, useraddr); | 957 | rc = ethtool_get_gso(dev, useraddr); |
947 | break; | 958 | break; |
948 | case ETHTOOL_SGSO: | 959 | case ETHTOOL_SGSO: |
949 | rc = ethtool_set_gso(dev, useraddr); | 960 | rc = ethtool_set_gso(dev, useraddr); |
950 | break; | 961 | break; |
951 | default: | 962 | default: |
952 | rc = -EOPNOTSUPP; | 963 | rc = -EOPNOTSUPP; |
953 | } | 964 | } |
954 | 965 | ||
955 | if (dev->ethtool_ops->complete) | 966 | if (dev->ethtool_ops->complete) |
956 | dev->ethtool_ops->complete(dev); | 967 | dev->ethtool_ops->complete(dev); |
957 | 968 | ||
958 | if (old_features != dev->features) | 969 | if (old_features != dev->features) |
959 | netdev_features_change(dev); | 970 | netdev_features_change(dev); |
960 | 971 | ||
961 | return rc; | 972 | return rc; |
962 | 973 | ||
963 | ioctl: | 974 | ioctl: |
964 | /* Keep existing behaviour for the moment. */ | 975 | /* Keep existing behaviour for the moment. */ |
965 | if (!capable(CAP_NET_ADMIN)) | 976 | if (!capable(CAP_NET_ADMIN)) |
966 | return -EPERM; | 977 | return -EPERM; |
967 | 978 | ||
968 | if (dev->do_ioctl) | 979 | if (dev->do_ioctl) |
969 | return dev->do_ioctl(dev, ifr, SIOCETHTOOL); | 980 | return dev->do_ioctl(dev, ifr, SIOCETHTOOL); |
970 | return -EOPNOTSUPP; | 981 | return -EOPNOTSUPP; |
971 | } | 982 | } |
972 | 983 | ||
973 | EXPORT_SYMBOL(dev_ethtool); | 984 | EXPORT_SYMBOL(dev_ethtool); |
974 | EXPORT_SYMBOL(ethtool_op_get_link); | 985 | EXPORT_SYMBOL(ethtool_op_get_link); |
975 | EXPORT_SYMBOL_GPL(ethtool_op_get_perm_addr); | 986 | EXPORT_SYMBOL_GPL(ethtool_op_get_perm_addr); |
976 | EXPORT_SYMBOL(ethtool_op_get_sg); | 987 | EXPORT_SYMBOL(ethtool_op_get_sg); |
977 | EXPORT_SYMBOL(ethtool_op_get_tso); | 988 | EXPORT_SYMBOL(ethtool_op_get_tso); |
978 | EXPORT_SYMBOL(ethtool_op_get_tx_csum); | 989 | EXPORT_SYMBOL(ethtool_op_get_tx_csum); |
979 | EXPORT_SYMBOL(ethtool_op_set_sg); | 990 | EXPORT_SYMBOL(ethtool_op_set_sg); |
980 | EXPORT_SYMBOL(ethtool_op_set_tso); | 991 | EXPORT_SYMBOL(ethtool_op_set_tso); |
981 | EXPORT_SYMBOL(ethtool_op_set_tx_csum); | 992 | EXPORT_SYMBOL(ethtool_op_set_tx_csum); |
982 | EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); | 993 | EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); |
994 | EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); | ||
983 | EXPORT_SYMBOL(ethtool_op_set_ufo); | 995 | EXPORT_SYMBOL(ethtool_op_set_ufo); |
984 | EXPORT_SYMBOL(ethtool_op_get_ufo); | 996 | EXPORT_SYMBOL(ethtool_op_get_ufo); |
985 | 997 |