Commit c1a9fa4ba67971f45ee106250340963cef7d020a

Authored by Michal Simek
1 parent 52fded7b94

net: zynq: Use predefined macros instead of hardcoded value

MII is used by this driver.

Signed-off-by: Michal Simek <michal.simek@xilinx.com>

Showing 1 changed file with 2 additions and 1 deletions Inline Diff

drivers/net/zynq_gem.c
1 /* 1 /*
2 * (C) Copyright 2011 Michal Simek 2 * (C) Copyright 2011 Michal Simek
3 * 3 *
4 * Michal SIMEK <monstr@monstr.eu> 4 * Michal SIMEK <monstr@monstr.eu>
5 * 5 *
6 * Based on Xilinx gmac driver: 6 * Based on Xilinx gmac driver:
7 * (C) Copyright 2011 Xilinx 7 * (C) Copyright 2011 Xilinx
8 * 8 *
9 * SPDX-License-Identifier: GPL-2.0+ 9 * SPDX-License-Identifier: GPL-2.0+
10 */ 10 */
11 11
12 #include <common.h> 12 #include <common.h>
13 #include <net.h> 13 #include <net.h>
14 #include <config.h> 14 #include <config.h>
15 #include <fdtdec.h> 15 #include <fdtdec.h>
16 #include <libfdt.h> 16 #include <libfdt.h>
17 #include <malloc.h> 17 #include <malloc.h>
18 #include <asm/io.h> 18 #include <asm/io.h>
19 #include <phy.h> 19 #include <phy.h>
20 #include <miiphy.h> 20 #include <miiphy.h>
21 #include <watchdog.h> 21 #include <watchdog.h>
22 #include <asm/arch/hardware.h> 22 #include <asm/arch/hardware.h>
23 #include <asm/arch/sys_proto.h> 23 #include <asm/arch/sys_proto.h>
24 24
25 #if !defined(CONFIG_PHYLIB) 25 #if !defined(CONFIG_PHYLIB)
26 # error XILINX_GEM_ETHERNET requires PHYLIB 26 # error XILINX_GEM_ETHERNET requires PHYLIB
27 #endif 27 #endif
28 28
29 /* Bit/mask specification */ 29 /* Bit/mask specification */
30 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */ 30 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
31 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */ 31 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
32 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */ 32 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
33 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */ 33 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
34 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */ 34 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
35 35
36 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */ 36 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
37 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */ 37 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
38 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */ 38 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
39 39
40 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */ 40 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
41 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */ 41 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
42 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */ 42 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
43 43
44 /* Wrap bit, last descriptor */ 44 /* Wrap bit, last descriptor */
45 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000 45 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
46 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */ 46 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
47 47
48 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */ 48 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
49 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */ 49 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
50 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */ 50 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
51 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */ 51 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
52 52
53 #define ZYNQ_GEM_NWCFG_SPEED100 0x000000001 /* 100 Mbps operation */ 53 #define ZYNQ_GEM_NWCFG_SPEED100 0x000000001 /* 100 Mbps operation */
54 #define ZYNQ_GEM_NWCFG_SPEED1000 0x000000400 /* 1Gbps operation */ 54 #define ZYNQ_GEM_NWCFG_SPEED1000 0x000000400 /* 1Gbps operation */
55 #define ZYNQ_GEM_NWCFG_FDEN 0x000000002 /* Full Duplex mode */ 55 #define ZYNQ_GEM_NWCFG_FDEN 0x000000002 /* Full Duplex mode */
56 #define ZYNQ_GEM_NWCFG_FSREM 0x000020000 /* FCS removal */ 56 #define ZYNQ_GEM_NWCFG_FSREM 0x000020000 /* FCS removal */
57 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000080000 /* Div pclk by 32, 80MHz */ 57 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000080000 /* Div pclk by 32, 80MHz */
58 #define ZYNQ_GEM_NWCFG_MDCCLKDIV2 0x0000c0000 /* Div pclk by 48, 120MHz */ 58 #define ZYNQ_GEM_NWCFG_MDCCLKDIV2 0x0000c0000 /* Div pclk by 48, 120MHz */
59 59
60 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_NWCFG_FDEN | \ 60 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_NWCFG_FDEN | \
61 ZYNQ_GEM_NWCFG_FSREM | \ 61 ZYNQ_GEM_NWCFG_FSREM | \
62 ZYNQ_GEM_NWCFG_MDCCLKDIV) 62 ZYNQ_GEM_NWCFG_MDCCLKDIV)
63 63
64 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */ 64 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
65 65
66 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */ 66 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */
67 /* Use full configured addressable space (8 Kb) */ 67 /* Use full configured addressable space (8 Kb) */
68 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300 68 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
69 /* Use full configured addressable space (4 Kb) */ 69 /* Use full configured addressable space (4 Kb) */
70 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400 70 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
71 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */ 71 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
72 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000 72 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000
73 73
74 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \ 74 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
75 ZYNQ_GEM_DMACR_RXSIZE | \ 75 ZYNQ_GEM_DMACR_RXSIZE | \
76 ZYNQ_GEM_DMACR_TXSIZE | \ 76 ZYNQ_GEM_DMACR_TXSIZE | \
77 ZYNQ_GEM_DMACR_RXBUF) 77 ZYNQ_GEM_DMACR_RXBUF)
78 78
79 /* Use MII register 1 (MII status register) to detect PHY */ 79 /* Use MII register 1 (MII status register) to detect PHY */
80 #define PHY_DETECT_REG 1 80 #define PHY_DETECT_REG 1
81 81
82 /* Mask used to verify certain PHY features (or register contents) 82 /* Mask used to verify certain PHY features (or register contents)
83 * in the register above: 83 * in the register above:
84 * 0x1000: 10Mbps full duplex support 84 * 0x1000: 10Mbps full duplex support
85 * 0x0800: 10Mbps half duplex support 85 * 0x0800: 10Mbps half duplex support
86 * 0x0008: Auto-negotiation support 86 * 0x0008: Auto-negotiation support
87 */ 87 */
88 #define PHY_DETECT_MASK 0x1808 88 #define PHY_DETECT_MASK 0x1808
89 89
90 /* TX BD status masks */ 90 /* TX BD status masks */
91 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff 91 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
92 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000 92 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
93 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000 93 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
94 94
95 /* Clock frequencies for different speeds */ 95 /* Clock frequencies for different speeds */
96 #define ZYNQ_GEM_FREQUENCY_10 2500000UL 96 #define ZYNQ_GEM_FREQUENCY_10 2500000UL
97 #define ZYNQ_GEM_FREQUENCY_100 25000000UL 97 #define ZYNQ_GEM_FREQUENCY_100 25000000UL
98 #define ZYNQ_GEM_FREQUENCY_1000 125000000UL 98 #define ZYNQ_GEM_FREQUENCY_1000 125000000UL
99 99
100 /* Device registers */ 100 /* Device registers */
101 struct zynq_gem_regs { 101 struct zynq_gem_regs {
102 u32 nwctrl; /* Network Control reg */ 102 u32 nwctrl; /* Network Control reg */
103 u32 nwcfg; /* Network Config reg */ 103 u32 nwcfg; /* Network Config reg */
104 u32 nwsr; /* Network Status reg */ 104 u32 nwsr; /* Network Status reg */
105 u32 reserved1; 105 u32 reserved1;
106 u32 dmacr; /* DMA Control reg */ 106 u32 dmacr; /* DMA Control reg */
107 u32 txsr; /* TX Status reg */ 107 u32 txsr; /* TX Status reg */
108 u32 rxqbase; /* RX Q Base address reg */ 108 u32 rxqbase; /* RX Q Base address reg */
109 u32 txqbase; /* TX Q Base address reg */ 109 u32 txqbase; /* TX Q Base address reg */
110 u32 rxsr; /* RX Status reg */ 110 u32 rxsr; /* RX Status reg */
111 u32 reserved2[2]; 111 u32 reserved2[2];
112 u32 idr; /* Interrupt Disable reg */ 112 u32 idr; /* Interrupt Disable reg */
113 u32 reserved3; 113 u32 reserved3;
114 u32 phymntnc; /* Phy Maintaince reg */ 114 u32 phymntnc; /* Phy Maintaince reg */
115 u32 reserved4[18]; 115 u32 reserved4[18];
116 u32 hashl; /* Hash Low address reg */ 116 u32 hashl; /* Hash Low address reg */
117 u32 hashh; /* Hash High address reg */ 117 u32 hashh; /* Hash High address reg */
118 #define LADDR_LOW 0 118 #define LADDR_LOW 0
119 #define LADDR_HIGH 1 119 #define LADDR_HIGH 1
120 u32 laddr[4][LADDR_HIGH + 1]; /* Specific1 addr low/high reg */ 120 u32 laddr[4][LADDR_HIGH + 1]; /* Specific1 addr low/high reg */
121 u32 match[4]; /* Type ID1 Match reg */ 121 u32 match[4]; /* Type ID1 Match reg */
122 u32 reserved6[18]; 122 u32 reserved6[18];
123 u32 stat[44]; /* Octects transmitted Low reg - stat start */ 123 u32 stat[44]; /* Octects transmitted Low reg - stat start */
124 }; 124 };
125 125
126 /* BD descriptors */ 126 /* BD descriptors */
127 struct emac_bd { 127 struct emac_bd {
128 u32 addr; /* Next descriptor pointer */ 128 u32 addr; /* Next descriptor pointer */
129 u32 status; 129 u32 status;
130 }; 130 };
131 131
132 #define RX_BUF 3 132 #define RX_BUF 3
133 /* Page table entries are set to 1MB, or multiples of 1MB 133 /* Page table entries are set to 1MB, or multiples of 1MB
134 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 134 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
135 */ 135 */
136 #define BD_SPACE 0x100000 136 #define BD_SPACE 0x100000
137 /* BD separation space */ 137 /* BD separation space */
138 #define BD_SEPRN_SPACE 64 138 #define BD_SEPRN_SPACE 64
139 139
140 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */ 140 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */
141 struct zynq_gem_priv { 141 struct zynq_gem_priv {
142 struct emac_bd *tx_bd; 142 struct emac_bd *tx_bd;
143 struct emac_bd *rx_bd; 143 struct emac_bd *rx_bd;
144 char *rxbuffers; 144 char *rxbuffers;
145 u32 rxbd_current; 145 u32 rxbd_current;
146 u32 rx_first_buf; 146 u32 rx_first_buf;
147 int phyaddr; 147 int phyaddr;
148 u32 emio; 148 u32 emio;
149 int init; 149 int init;
150 struct phy_device *phydev; 150 struct phy_device *phydev;
151 struct mii_dev *bus; 151 struct mii_dev *bus;
152 }; 152 };
153 153
154 static inline int mdio_wait(struct eth_device *dev) 154 static inline int mdio_wait(struct eth_device *dev)
155 { 155 {
156 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 156 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
157 u32 timeout = 200; 157 u32 timeout = 200;
158 158
159 /* Wait till MDIO interface is ready to accept a new transaction. */ 159 /* Wait till MDIO interface is ready to accept a new transaction. */
160 while (--timeout) { 160 while (--timeout) {
161 if (readl(&regs->nwsr) & ZYNQ_GEM_NWSR_MDIOIDLE_MASK) 161 if (readl(&regs->nwsr) & ZYNQ_GEM_NWSR_MDIOIDLE_MASK)
162 break; 162 break;
163 WATCHDOG_RESET(); 163 WATCHDOG_RESET();
164 } 164 }
165 165
166 if (!timeout) { 166 if (!timeout) {
167 printf("%s: Timeout\n", __func__); 167 printf("%s: Timeout\n", __func__);
168 return 1; 168 return 1;
169 } 169 }
170 170
171 return 0; 171 return 0;
172 } 172 }
173 173
174 static u32 phy_setup_op(struct eth_device *dev, u32 phy_addr, u32 regnum, 174 static u32 phy_setup_op(struct eth_device *dev, u32 phy_addr, u32 regnum,
175 u32 op, u16 *data) 175 u32 op, u16 *data)
176 { 176 {
177 u32 mgtcr; 177 u32 mgtcr;
178 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 178 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
179 179
180 if (mdio_wait(dev)) 180 if (mdio_wait(dev))
181 return 1; 181 return 1;
182 182
183 /* Construct mgtcr mask for the operation */ 183 /* Construct mgtcr mask for the operation */
184 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op | 184 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
185 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) | 185 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
186 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data; 186 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
187 187
188 /* Write mgtcr and wait for completion */ 188 /* Write mgtcr and wait for completion */
189 writel(mgtcr, &regs->phymntnc); 189 writel(mgtcr, &regs->phymntnc);
190 190
191 if (mdio_wait(dev)) 191 if (mdio_wait(dev))
192 return 1; 192 return 1;
193 193
194 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK) 194 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
195 *data = readl(&regs->phymntnc); 195 *data = readl(&regs->phymntnc);
196 196
197 return 0; 197 return 0;
198 } 198 }
199 199
200 static u32 phyread(struct eth_device *dev, u32 phy_addr, u32 regnum, u16 *val) 200 static u32 phyread(struct eth_device *dev, u32 phy_addr, u32 regnum, u16 *val)
201 { 201 {
202 return phy_setup_op(dev, phy_addr, regnum, 202 return phy_setup_op(dev, phy_addr, regnum,
203 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val); 203 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
204 } 204 }
205 205
206 static u32 phywrite(struct eth_device *dev, u32 phy_addr, u32 regnum, u16 data) 206 static u32 phywrite(struct eth_device *dev, u32 phy_addr, u32 regnum, u16 data)
207 { 207 {
208 return phy_setup_op(dev, phy_addr, regnum, 208 return phy_setup_op(dev, phy_addr, regnum,
209 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data); 209 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
210 } 210 }
211 211
212 static void phy_detection(struct eth_device *dev) 212 static void phy_detection(struct eth_device *dev)
213 { 213 {
214 int i; 214 int i;
215 u16 phyreg; 215 u16 phyreg;
216 struct zynq_gem_priv *priv = dev->priv; 216 struct zynq_gem_priv *priv = dev->priv;
217 217
218 if (priv->phyaddr != -1) { 218 if (priv->phyaddr != -1) {
219 phyread(dev, priv->phyaddr, PHY_DETECT_REG, &phyreg); 219 phyread(dev, priv->phyaddr, PHY_DETECT_REG, &phyreg);
220 if ((phyreg != 0xFFFF) && 220 if ((phyreg != 0xFFFF) &&
221 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) { 221 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
222 /* Found a valid PHY address */ 222 /* Found a valid PHY address */
223 debug("Default phy address %d is valid\n", 223 debug("Default phy address %d is valid\n",
224 priv->phyaddr); 224 priv->phyaddr);
225 return; 225 return;
226 } else { 226 } else {
227 debug("PHY address is not setup correctly %d\n", 227 debug("PHY address is not setup correctly %d\n",
228 priv->phyaddr); 228 priv->phyaddr);
229 priv->phyaddr = -1; 229 priv->phyaddr = -1;
230 } 230 }
231 } 231 }
232 232
233 debug("detecting phy address\n"); 233 debug("detecting phy address\n");
234 if (priv->phyaddr == -1) { 234 if (priv->phyaddr == -1) {
235 /* detect the PHY address */ 235 /* detect the PHY address */
236 for (i = 31; i >= 0; i--) { 236 for (i = 31; i >= 0; i--) {
237 phyread(dev, i, PHY_DETECT_REG, &phyreg); 237 phyread(dev, i, PHY_DETECT_REG, &phyreg);
238 if ((phyreg != 0xFFFF) && 238 if ((phyreg != 0xFFFF) &&
239 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) { 239 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
240 /* Found a valid PHY address */ 240 /* Found a valid PHY address */
241 priv->phyaddr = i; 241 priv->phyaddr = i;
242 debug("Found valid phy address, %d\n", i); 242 debug("Found valid phy address, %d\n", i);
243 return; 243 return;
244 } 244 }
245 } 245 }
246 } 246 }
247 printf("PHY is not detected\n"); 247 printf("PHY is not detected\n");
248 } 248 }
249 249
250 static int zynq_gem_setup_mac(struct eth_device *dev) 250 static int zynq_gem_setup_mac(struct eth_device *dev)
251 { 251 {
252 u32 i, macaddrlow, macaddrhigh; 252 u32 i, macaddrlow, macaddrhigh;
253 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 253 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
254 254
255 /* Set the MAC bits [31:0] in BOT */ 255 /* Set the MAC bits [31:0] in BOT */
256 macaddrlow = dev->enetaddr[0]; 256 macaddrlow = dev->enetaddr[0];
257 macaddrlow |= dev->enetaddr[1] << 8; 257 macaddrlow |= dev->enetaddr[1] << 8;
258 macaddrlow |= dev->enetaddr[2] << 16; 258 macaddrlow |= dev->enetaddr[2] << 16;
259 macaddrlow |= dev->enetaddr[3] << 24; 259 macaddrlow |= dev->enetaddr[3] << 24;
260 260
261 /* Set MAC bits [47:32] in TOP */ 261 /* Set MAC bits [47:32] in TOP */
262 macaddrhigh = dev->enetaddr[4]; 262 macaddrhigh = dev->enetaddr[4];
263 macaddrhigh |= dev->enetaddr[5] << 8; 263 macaddrhigh |= dev->enetaddr[5] << 8;
264 264
265 for (i = 0; i < 4; i++) { 265 for (i = 0; i < 4; i++) {
266 writel(0, &regs->laddr[i][LADDR_LOW]); 266 writel(0, &regs->laddr[i][LADDR_LOW]);
267 writel(0, &regs->laddr[i][LADDR_HIGH]); 267 writel(0, &regs->laddr[i][LADDR_HIGH]);
268 /* Do not use MATCHx register */ 268 /* Do not use MATCHx register */
269 writel(0, &regs->match[i]); 269 writel(0, &regs->match[i]);
270 } 270 }
271 271
272 writel(macaddrlow, &regs->laddr[0][LADDR_LOW]); 272 writel(macaddrlow, &regs->laddr[0][LADDR_LOW]);
273 writel(macaddrhigh, &regs->laddr[0][LADDR_HIGH]); 273 writel(macaddrhigh, &regs->laddr[0][LADDR_HIGH]);
274 274
275 return 0; 275 return 0;
276 } 276 }
277 277
278 static int zynq_gem_init(struct eth_device *dev, bd_t * bis) 278 static int zynq_gem_init(struct eth_device *dev, bd_t * bis)
279 { 279 {
280 u32 i; 280 u32 i;
281 unsigned long clk_rate = 0; 281 unsigned long clk_rate = 0;
282 struct phy_device *phydev; 282 struct phy_device *phydev;
283 const u32 stat_size = (sizeof(struct zynq_gem_regs) - 283 const u32 stat_size = (sizeof(struct zynq_gem_regs) -
284 offsetof(struct zynq_gem_regs, stat)) / 4; 284 offsetof(struct zynq_gem_regs, stat)) / 4;
285 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 285 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
286 struct zynq_gem_priv *priv = dev->priv; 286 struct zynq_gem_priv *priv = dev->priv;
287 const u32 supported = SUPPORTED_10baseT_Half | 287 const u32 supported = SUPPORTED_10baseT_Half |
288 SUPPORTED_10baseT_Full | 288 SUPPORTED_10baseT_Full |
289 SUPPORTED_100baseT_Half | 289 SUPPORTED_100baseT_Half |
290 SUPPORTED_100baseT_Full | 290 SUPPORTED_100baseT_Full |
291 SUPPORTED_1000baseT_Half | 291 SUPPORTED_1000baseT_Half |
292 SUPPORTED_1000baseT_Full; 292 SUPPORTED_1000baseT_Full;
293 293
294 if (!priv->init) { 294 if (!priv->init) {
295 /* Disable all interrupts */ 295 /* Disable all interrupts */
296 writel(0xFFFFFFFF, &regs->idr); 296 writel(0xFFFFFFFF, &regs->idr);
297 297
298 /* Disable the receiver & transmitter */ 298 /* Disable the receiver & transmitter */
299 writel(0, &regs->nwctrl); 299 writel(0, &regs->nwctrl);
300 writel(0, &regs->txsr); 300 writel(0, &regs->txsr);
301 writel(0, &regs->rxsr); 301 writel(0, &regs->rxsr);
302 writel(0, &regs->phymntnc); 302 writel(0, &regs->phymntnc);
303 303
304 /* Clear the Hash registers for the mac address 304 /* Clear the Hash registers for the mac address
305 * pointed by AddressPtr 305 * pointed by AddressPtr
306 */ 306 */
307 writel(0x0, &regs->hashl); 307 writel(0x0, &regs->hashl);
308 /* Write bits [63:32] in TOP */ 308 /* Write bits [63:32] in TOP */
309 writel(0x0, &regs->hashh); 309 writel(0x0, &regs->hashh);
310 310
311 /* Clear all counters */ 311 /* Clear all counters */
312 for (i = 0; i <= stat_size; i++) 312 for (i = 0; i <= stat_size; i++)
313 readl(&regs->stat[i]); 313 readl(&regs->stat[i]);
314 314
315 /* Setup RxBD space */ 315 /* Setup RxBD space */
316 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd)); 316 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
317 317
318 for (i = 0; i < RX_BUF; i++) { 318 for (i = 0; i < RX_BUF; i++) {
319 priv->rx_bd[i].status = 0xF0000000; 319 priv->rx_bd[i].status = 0xF0000000;
320 priv->rx_bd[i].addr = 320 priv->rx_bd[i].addr =
321 ((u32)(priv->rxbuffers) + 321 ((u32)(priv->rxbuffers) +
322 (i * PKTSIZE_ALIGN)); 322 (i * PKTSIZE_ALIGN));
323 } 323 }
324 /* WRAP bit to last BD */ 324 /* WRAP bit to last BD */
325 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK; 325 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
326 /* Write RxBDs to IP */ 326 /* Write RxBDs to IP */
327 writel((u32)priv->rx_bd, &regs->rxqbase); 327 writel((u32)priv->rx_bd, &regs->rxqbase);
328 328
329 /* Setup for DMA Configuration register */ 329 /* Setup for DMA Configuration register */
330 writel(ZYNQ_GEM_DMACR_INIT, &regs->dmacr); 330 writel(ZYNQ_GEM_DMACR_INIT, &regs->dmacr);
331 331
332 /* Setup for Network Control register, MDIO, Rx and Tx enable */ 332 /* Setup for Network Control register, MDIO, Rx and Tx enable */
333 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK); 333 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
334 334
335 priv->init++; 335 priv->init++;
336 } 336 }
337 337
338 phy_detection(dev); 338 phy_detection(dev);
339 339
340 /* interface - look at tsec */ 340 /* interface - look at tsec */
341 phydev = phy_connect(priv->bus, priv->phyaddr, dev, 0); 341 phydev = phy_connect(priv->bus, priv->phyaddr, dev,
342 PHY_INTERFACE_MODE_MII);
342 343
343 phydev->supported = supported | ADVERTISED_Pause | 344 phydev->supported = supported | ADVERTISED_Pause |
344 ADVERTISED_Asym_Pause; 345 ADVERTISED_Asym_Pause;
345 phydev->advertising = phydev->supported; 346 phydev->advertising = phydev->supported;
346 priv->phydev = phydev; 347 priv->phydev = phydev;
347 phy_config(phydev); 348 phy_config(phydev);
348 phy_startup(phydev); 349 phy_startup(phydev);
349 350
350 if (!phydev->link) { 351 if (!phydev->link) {
351 printf("%s: No link.\n", phydev->dev->name); 352 printf("%s: No link.\n", phydev->dev->name);
352 return -1; 353 return -1;
353 } 354 }
354 355
355 switch (phydev->speed) { 356 switch (phydev->speed) {
356 case SPEED_1000: 357 case SPEED_1000:
357 writel(ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED1000, 358 writel(ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED1000,
358 &regs->nwcfg); 359 &regs->nwcfg);
359 clk_rate = ZYNQ_GEM_FREQUENCY_1000; 360 clk_rate = ZYNQ_GEM_FREQUENCY_1000;
360 break; 361 break;
361 case SPEED_100: 362 case SPEED_100:
362 clrsetbits_le32(&regs->nwcfg, ZYNQ_GEM_NWCFG_SPEED1000, 363 clrsetbits_le32(&regs->nwcfg, ZYNQ_GEM_NWCFG_SPEED1000,
363 ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED100); 364 ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED100);
364 clk_rate = ZYNQ_GEM_FREQUENCY_100; 365 clk_rate = ZYNQ_GEM_FREQUENCY_100;
365 break; 366 break;
366 case SPEED_10: 367 case SPEED_10:
367 clk_rate = ZYNQ_GEM_FREQUENCY_10; 368 clk_rate = ZYNQ_GEM_FREQUENCY_10;
368 break; 369 break;
369 } 370 }
370 371
371 /* Change the rclk and clk only not using EMIO interface */ 372 /* Change the rclk and clk only not using EMIO interface */
372 if (!priv->emio) 373 if (!priv->emio)
373 zynq_slcr_gem_clk_setup(dev->iobase != 374 zynq_slcr_gem_clk_setup(dev->iobase !=
374 ZYNQ_GEM_BASEADDR0, clk_rate); 375 ZYNQ_GEM_BASEADDR0, clk_rate);
375 376
376 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK | 377 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
377 ZYNQ_GEM_NWCTRL_TXEN_MASK); 378 ZYNQ_GEM_NWCTRL_TXEN_MASK);
378 379
379 return 0; 380 return 0;
380 } 381 }
381 382
382 static int zynq_gem_send(struct eth_device *dev, void *ptr, int len) 383 static int zynq_gem_send(struct eth_device *dev, void *ptr, int len)
383 { 384 {
384 u32 addr, size; 385 u32 addr, size;
385 struct zynq_gem_priv *priv = dev->priv; 386 struct zynq_gem_priv *priv = dev->priv;
386 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 387 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
387 388
388 /* setup BD */ 389 /* setup BD */
389 writel((u32)priv->tx_bd, &regs->txqbase); 390 writel((u32)priv->tx_bd, &regs->txqbase);
390 391
391 /* Setup Tx BD */ 392 /* Setup Tx BD */
392 memset(priv->tx_bd, 0, sizeof(struct emac_bd)); 393 memset(priv->tx_bd, 0, sizeof(struct emac_bd));
393 394
394 priv->tx_bd->addr = (u32)ptr; 395 priv->tx_bd->addr = (u32)ptr;
395 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) | 396 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
396 ZYNQ_GEM_TXBUF_LAST_MASK; 397 ZYNQ_GEM_TXBUF_LAST_MASK;
397 398
398 addr = (u32) ptr; 399 addr = (u32) ptr;
399 addr &= ~(ARCH_DMA_MINALIGN - 1); 400 addr &= ~(ARCH_DMA_MINALIGN - 1);
400 size = roundup(len, ARCH_DMA_MINALIGN); 401 size = roundup(len, ARCH_DMA_MINALIGN);
401 flush_dcache_range(addr, addr + size); 402 flush_dcache_range(addr, addr + size);
402 barrier(); 403 barrier();
403 404
404 /* Start transmit */ 405 /* Start transmit */
405 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK); 406 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
406 407
407 /* Read TX BD status */ 408 /* Read TX BD status */
408 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_UNDERRUN) 409 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_UNDERRUN)
409 printf("TX underrun\n"); 410 printf("TX underrun\n");
410 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED) 411 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
411 printf("TX buffers exhausted in mid frame\n"); 412 printf("TX buffers exhausted in mid frame\n");
412 413
413 return 0; 414 return 0;
414 } 415 }
415 416
416 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */ 417 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
417 static int zynq_gem_recv(struct eth_device *dev) 418 static int zynq_gem_recv(struct eth_device *dev)
418 { 419 {
419 int frame_len; 420 int frame_len;
420 struct zynq_gem_priv *priv = dev->priv; 421 struct zynq_gem_priv *priv = dev->priv;
421 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current]; 422 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
422 struct emac_bd *first_bd; 423 struct emac_bd *first_bd;
423 424
424 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK)) 425 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
425 return 0; 426 return 0;
426 427
427 if (!(current_bd->status & 428 if (!(current_bd->status &
428 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) { 429 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
429 printf("GEM: SOF or EOF not set for last buffer received!\n"); 430 printf("GEM: SOF or EOF not set for last buffer received!\n");
430 return 0; 431 return 0;
431 } 432 }
432 433
433 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK; 434 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
434 if (frame_len) { 435 if (frame_len) {
435 u32 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK; 436 u32 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
436 addr &= ~(ARCH_DMA_MINALIGN - 1); 437 addr &= ~(ARCH_DMA_MINALIGN - 1);
437 u32 size = roundup(frame_len, ARCH_DMA_MINALIGN); 438 u32 size = roundup(frame_len, ARCH_DMA_MINALIGN);
438 invalidate_dcache_range(addr, addr + size); 439 invalidate_dcache_range(addr, addr + size);
439 440
440 NetReceive((u8 *)addr, frame_len); 441 NetReceive((u8 *)addr, frame_len);
441 442
442 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) 443 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK)
443 priv->rx_first_buf = priv->rxbd_current; 444 priv->rx_first_buf = priv->rxbd_current;
444 else { 445 else {
445 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK; 446 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
446 current_bd->status = 0xF0000000; /* FIXME */ 447 current_bd->status = 0xF0000000; /* FIXME */
447 } 448 }
448 449
449 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) { 450 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
450 first_bd = &priv->rx_bd[priv->rx_first_buf]; 451 first_bd = &priv->rx_bd[priv->rx_first_buf];
451 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK; 452 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
452 first_bd->status = 0xF0000000; 453 first_bd->status = 0xF0000000;
453 } 454 }
454 455
455 if ((++priv->rxbd_current) >= RX_BUF) 456 if ((++priv->rxbd_current) >= RX_BUF)
456 priv->rxbd_current = 0; 457 priv->rxbd_current = 0;
457 } 458 }
458 459
459 return frame_len; 460 return frame_len;
460 } 461 }
461 462
462 static void zynq_gem_halt(struct eth_device *dev) 463 static void zynq_gem_halt(struct eth_device *dev)
463 { 464 {
464 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 465 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
465 466
466 clrsetbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK | 467 clrsetbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
467 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0); 468 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
468 } 469 }
469 470
470 static int zynq_gem_miiphyread(const char *devname, uchar addr, 471 static int zynq_gem_miiphyread(const char *devname, uchar addr,
471 uchar reg, ushort *val) 472 uchar reg, ushort *val)
472 { 473 {
473 struct eth_device *dev = eth_get_dev(); 474 struct eth_device *dev = eth_get_dev();
474 int ret; 475 int ret;
475 476
476 ret = phyread(dev, addr, reg, val); 477 ret = phyread(dev, addr, reg, val);
477 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, *val); 478 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, *val);
478 return ret; 479 return ret;
479 } 480 }
480 481
481 static int zynq_gem_miiphy_write(const char *devname, uchar addr, 482 static int zynq_gem_miiphy_write(const char *devname, uchar addr,
482 uchar reg, ushort val) 483 uchar reg, ushort val)
483 { 484 {
484 struct eth_device *dev = eth_get_dev(); 485 struct eth_device *dev = eth_get_dev();
485 486
486 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val); 487 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val);
487 return phywrite(dev, addr, reg, val); 488 return phywrite(dev, addr, reg, val);
488 } 489 }
489 490
490 int zynq_gem_initialize(bd_t *bis, int base_addr, int phy_addr, u32 emio) 491 int zynq_gem_initialize(bd_t *bis, int base_addr, int phy_addr, u32 emio)
491 { 492 {
492 struct eth_device *dev; 493 struct eth_device *dev;
493 struct zynq_gem_priv *priv; 494 struct zynq_gem_priv *priv;
494 void *bd_space; 495 void *bd_space;
495 496
496 dev = calloc(1, sizeof(*dev)); 497 dev = calloc(1, sizeof(*dev));
497 if (dev == NULL) 498 if (dev == NULL)
498 return -1; 499 return -1;
499 500
500 dev->priv = calloc(1, sizeof(struct zynq_gem_priv)); 501 dev->priv = calloc(1, sizeof(struct zynq_gem_priv));
501 if (dev->priv == NULL) { 502 if (dev->priv == NULL) {
502 free(dev); 503 free(dev);
503 return -1; 504 return -1;
504 } 505 }
505 priv = dev->priv; 506 priv = dev->priv;
506 507
507 /* Align rxbuffers to ARCH_DMA_MINALIGN */ 508 /* Align rxbuffers to ARCH_DMA_MINALIGN */
508 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN); 509 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
509 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN); 510 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
510 511
511 /* Align bd_space to 1MB */ 512 /* Align bd_space to 1MB */
512 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 513 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
513 mmu_set_region_dcache_behaviour((u32)bd_space, BD_SPACE, DCACHE_OFF); 514 mmu_set_region_dcache_behaviour((u32)bd_space, BD_SPACE, DCACHE_OFF);
514 515
515 /* Initialize the bd spaces for tx and rx bd's */ 516 /* Initialize the bd spaces for tx and rx bd's */
516 priv->tx_bd = (struct emac_bd *)bd_space; 517 priv->tx_bd = (struct emac_bd *)bd_space;
517 priv->rx_bd = (struct emac_bd *)((u32)bd_space + BD_SEPRN_SPACE); 518 priv->rx_bd = (struct emac_bd *)((u32)bd_space + BD_SEPRN_SPACE);
518 519
519 priv->phyaddr = phy_addr; 520 priv->phyaddr = phy_addr;
520 priv->emio = emio; 521 priv->emio = emio;
521 522
522 sprintf(dev->name, "Gem.%x", base_addr); 523 sprintf(dev->name, "Gem.%x", base_addr);
523 524
524 dev->iobase = base_addr; 525 dev->iobase = base_addr;
525 526
526 dev->init = zynq_gem_init; 527 dev->init = zynq_gem_init;
527 dev->halt = zynq_gem_halt; 528 dev->halt = zynq_gem_halt;
528 dev->send = zynq_gem_send; 529 dev->send = zynq_gem_send;
529 dev->recv = zynq_gem_recv; 530 dev->recv = zynq_gem_recv;
530 dev->write_hwaddr = zynq_gem_setup_mac; 531 dev->write_hwaddr = zynq_gem_setup_mac;
531 532
532 eth_register(dev); 533 eth_register(dev);
533 534
534 miiphy_register(dev->name, zynq_gem_miiphyread, zynq_gem_miiphy_write); 535 miiphy_register(dev->name, zynq_gem_miiphyread, zynq_gem_miiphy_write);
535 priv->bus = miiphy_get_dev_by_name(dev->name); 536 priv->bus = miiphy_get_dev_by_name(dev->name);
536 537
537 return 1; 538 return 1;
538 } 539 }
539 540
540 #ifdef CONFIG_OF_CONTROL 541 #ifdef CONFIG_OF_CONTROL
541 int zynq_gem_of_init(const void *blob) 542 int zynq_gem_of_init(const void *blob)
542 { 543 {
543 int offset = 0; 544 int offset = 0;
544 u32 ret = 0; 545 u32 ret = 0;
545 u32 reg, phy_reg; 546 u32 reg, phy_reg;
546 547
547 debug("ZYNQ GEM: Initialization\n"); 548 debug("ZYNQ GEM: Initialization\n");
548 549
549 do { 550 do {
550 offset = fdt_node_offset_by_compatible(blob, offset, 551 offset = fdt_node_offset_by_compatible(blob, offset,
551 "xlnx,ps7-ethernet-1.00.a"); 552 "xlnx,ps7-ethernet-1.00.a");
552 if (offset != -1) { 553 if (offset != -1) {
553 reg = fdtdec_get_addr(blob, offset, "reg"); 554 reg = fdtdec_get_addr(blob, offset, "reg");
554 if (reg != FDT_ADDR_T_NONE) { 555 if (reg != FDT_ADDR_T_NONE) {
555 offset = fdtdec_lookup_phandle(blob, offset, 556 offset = fdtdec_lookup_phandle(blob, offset,
556 "phy-handle"); 557 "phy-handle");
557 if (offset != -1) 558 if (offset != -1)
558 phy_reg = fdtdec_get_addr(blob, offset, 559 phy_reg = fdtdec_get_addr(blob, offset,
559 "reg"); 560 "reg");
560 else 561 else
561 phy_reg = 0; 562 phy_reg = 0;
562 563
563 debug("ZYNQ GEM: addr %x, phyaddr %x\n", 564 debug("ZYNQ GEM: addr %x, phyaddr %x\n",
564 reg, phy_reg); 565 reg, phy_reg);
565 566
566 ret |= zynq_gem_initialize(NULL, reg, 567 ret |= zynq_gem_initialize(NULL, reg,
567 phy_reg, 0); 568 phy_reg, 0);
568 569
569 } else { 570 } else {
570 debug("ZYNQ GEM: Can't get base address\n"); 571 debug("ZYNQ GEM: Can't get base address\n");
571 return -1; 572 return -1;
572 } 573 }
573 } 574 }
574 } while (offset != -1); 575 } while (offset != -1);
575 576
576 return ret; 577 return ret;
577 } 578 }
578 #endif 579 #endif
579 580