Commit f20136eb03a1dbdfb04f3c62fd11c0d02d02b726
Committed by
Kevin Hilman
1 parent
8774882394
Exists in
master
and in
39 other branches
net: davinci_emac: separate out davinci mdio
Davinci's MDIO controller is present on other TI devices, without an accompanying EMAC. For example, on tnetv107x, the same MDIO module is used in conjunction with a 3-port switch hardware. By separating the MDIO controller code into its own platform driver, this patch allows common logic to be reused on such platforms. Signed-off-by: Cyril Chemparathy <cyril@ti.com> Tested-by: Michael Williamson <michael.williamson@criticallink.com> Tested-by: Caglar Akyuz <caglarakyuz@gmail.com> Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
Showing 4 changed files with 490 additions and 0 deletions Side-by-side Diff
drivers/net/Kconfig
... | ... | @@ -958,6 +958,16 @@ |
958 | 958 | To compile this driver as a module, choose M here: the module |
959 | 959 | will be called davinci_emac_driver. This is recommended. |
960 | 960 | |
961 | +config TI_DAVINCI_MDIO | |
962 | + tristate "TI DaVinci MDIO Support" | |
963 | + depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) | |
964 | + select PHYLIB | |
965 | + help | |
966 | + This driver supports TI's DaVinci MDIO module. | |
967 | + | |
968 | + To compile this driver as a module, choose M here: the module | |
969 | + will be called davinci_mdio. This is recommended. | |
970 | + | |
961 | 971 | config DM9000 |
962 | 972 | tristate "DM9000 support" |
963 | 973 | depends on ARM || BLACKFIN || MIPS |
drivers/net/Makefile
drivers/net/davinci_mdio.c
1 | +/* | |
2 | + * DaVinci MDIO Module driver | |
3 | + * | |
4 | + * Copyright (C) 2010 Texas Instruments. | |
5 | + * | |
6 | + * Shamelessly ripped out of davinci_emac.c, original copyrights follow: | |
7 | + * | |
8 | + * Copyright (C) 2009 Texas Instruments. | |
9 | + * | |
10 | + * --------------------------------------------------------------------------- | |
11 | + * | |
12 | + * This program is free software; you can redistribute it and/or modify | |
13 | + * it under the terms of the GNU General Public License as published by | |
14 | + * the Free Software Foundation; either version 2 of the License, or | |
15 | + * (at your option) any later version. | |
16 | + * | |
17 | + * This program is distributed in the hope that it will be useful, | |
18 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
20 | + * GNU General Public License for more details. | |
21 | + * | |
22 | + * You should have received a copy of the GNU General Public License | |
23 | + * along with this program; if not, write to the Free Software | |
24 | + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
25 | + * --------------------------------------------------------------------------- | |
26 | + */ | |
27 | +#include <linux/module.h> | |
28 | +#include <linux/kernel.h> | |
29 | +#include <linux/platform_device.h> | |
30 | +#include <linux/delay.h> | |
31 | +#include <linux/sched.h> | |
32 | +#include <linux/slab.h> | |
33 | +#include <linux/phy.h> | |
34 | +#include <linux/clk.h> | |
35 | +#include <linux/err.h> | |
36 | +#include <linux/io.h> | |
37 | +#include <linux/davinci_emac.h> | |
38 | + | |
39 | +/* | |
40 | + * This timeout definition is a worst-case ultra defensive measure against | |
41 | + * unexpected controller lock ups. Ideally, we should never ever hit this | |
42 | + * scenario in practice. | |
43 | + */ | |
44 | +#define MDIO_TIMEOUT 100 /* msecs */ | |
45 | + | |
46 | +#define PHY_REG_MASK 0x1f | |
47 | +#define PHY_ID_MASK 0x1f | |
48 | + | |
49 | +#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */ | |
50 | + | |
51 | +struct davinci_mdio_regs { | |
52 | + u32 version; | |
53 | + u32 control; | |
54 | +#define CONTROL_IDLE BIT(31) | |
55 | +#define CONTROL_ENABLE BIT(30) | |
56 | +#define CONTROL_MAX_DIV (0xff) | |
57 | + | |
58 | + u32 alive; | |
59 | + u32 link; | |
60 | + u32 linkintraw; | |
61 | + u32 linkintmasked; | |
62 | + u32 __reserved_0[2]; | |
63 | + u32 userintraw; | |
64 | + u32 userintmasked; | |
65 | + u32 userintmaskset; | |
66 | + u32 userintmaskclr; | |
67 | + u32 __reserved_1[20]; | |
68 | + | |
69 | + struct { | |
70 | + u32 access; | |
71 | +#define USERACCESS_GO BIT(31) | |
72 | +#define USERACCESS_WRITE BIT(30) | |
73 | +#define USERACCESS_ACK BIT(29) | |
74 | +#define USERACCESS_READ (0) | |
75 | +#define USERACCESS_DATA (0xffff) | |
76 | + | |
77 | + u32 physel; | |
78 | + } user[0]; | |
79 | +}; | |
80 | + | |
81 | +struct mdio_platform_data default_pdata = { | |
82 | + .bus_freq = DEF_OUT_FREQ, | |
83 | +}; | |
84 | + | |
85 | +struct davinci_mdio_data { | |
86 | + struct mdio_platform_data pdata; | |
87 | + struct davinci_mdio_regs __iomem *regs; | |
88 | + spinlock_t lock; | |
89 | + struct clk *clk; | |
90 | + struct device *dev; | |
91 | + struct mii_bus *bus; | |
92 | + bool suspended; | |
93 | + unsigned long access_time; /* jiffies */ | |
94 | +}; | |
95 | + | |
96 | +static void __davinci_mdio_reset(struct davinci_mdio_data *data) | |
97 | +{ | |
98 | + u32 mdio_in, div, mdio_out_khz, access_time; | |
99 | + | |
100 | + mdio_in = clk_get_rate(data->clk); | |
101 | + div = (mdio_in / data->pdata.bus_freq) - 1; | |
102 | + if (div > CONTROL_MAX_DIV) | |
103 | + div = CONTROL_MAX_DIV; | |
104 | + | |
105 | + /* set enable and clock divider */ | |
106 | + __raw_writel(div | CONTROL_ENABLE, &data->regs->control); | |
107 | + | |
108 | + /* | |
109 | + * One mdio transaction consists of: | |
110 | + * 32 bits of preamble | |
111 | + * 32 bits of transferred data | |
112 | + * 24 bits of bus yield (not needed unless shared?) | |
113 | + */ | |
114 | + mdio_out_khz = mdio_in / (1000 * (div + 1)); | |
115 | + access_time = (88 * 1000) / mdio_out_khz; | |
116 | + | |
117 | + /* | |
118 | + * In the worst case, we could be kicking off a user-access immediately | |
119 | + * after the mdio bus scan state-machine triggered its own read. If | |
120 | + * so, our request could get deferred by one access cycle. We | |
121 | + * defensively allow for 4 access cycles. | |
122 | + */ | |
123 | + data->access_time = usecs_to_jiffies(access_time * 4); | |
124 | + if (!data->access_time) | |
125 | + data->access_time = 1; | |
126 | +} | |
127 | + | |
128 | +static int davinci_mdio_reset(struct mii_bus *bus) | |
129 | +{ | |
130 | + struct davinci_mdio_data *data = bus->priv; | |
131 | + u32 phy_mask, ver; | |
132 | + | |
133 | + __davinci_mdio_reset(data); | |
134 | + | |
135 | + /* wait for scan logic to settle */ | |
136 | + msleep(PHY_MAX_ADDR * data->access_time); | |
137 | + | |
138 | + /* dump hardware version info */ | |
139 | + ver = __raw_readl(&data->regs->version); | |
140 | + dev_info(data->dev, "davinci mdio revision %d.%d\n", | |
141 | + (ver >> 8) & 0xff, ver & 0xff); | |
142 | + | |
143 | + /* get phy mask from the alive register */ | |
144 | + phy_mask = __raw_readl(&data->regs->alive); | |
145 | + if (phy_mask) { | |
146 | + /* restrict mdio bus to live phys only */ | |
147 | + dev_info(data->dev, "detected phy mask %x\n", ~phy_mask); | |
148 | + phy_mask = ~phy_mask; | |
149 | + } else { | |
150 | + /* desperately scan all phys */ | |
151 | + dev_warn(data->dev, "no live phy, scanning all\n"); | |
152 | + phy_mask = 0; | |
153 | + } | |
154 | + data->bus->phy_mask = phy_mask; | |
155 | + | |
156 | + return 0; | |
157 | +} | |
158 | + | |
159 | +/* wait until hardware is ready for another user access */ | |
160 | +static inline int wait_for_user_access(struct davinci_mdio_data *data) | |
161 | +{ | |
162 | + struct davinci_mdio_regs __iomem *regs = data->regs; | |
163 | + unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); | |
164 | + u32 reg; | |
165 | + | |
166 | + while (time_after(timeout, jiffies)) { | |
167 | + reg = __raw_readl(®s->user[0].access); | |
168 | + if ((reg & USERACCESS_GO) == 0) | |
169 | + return 0; | |
170 | + | |
171 | + reg = __raw_readl(®s->control); | |
172 | + if ((reg & CONTROL_IDLE) == 0) | |
173 | + continue; | |
174 | + | |
175 | + /* | |
176 | + * An emac soft_reset may have clobbered the mdio controller's | |
177 | + * state machine. We need to reset and retry the current | |
178 | + * operation | |
179 | + */ | |
180 | + dev_warn(data->dev, "resetting idled controller\n"); | |
181 | + __davinci_mdio_reset(data); | |
182 | + return -EAGAIN; | |
183 | + } | |
184 | + dev_err(data->dev, "timed out waiting for user access\n"); | |
185 | + return -ETIMEDOUT; | |
186 | +} | |
187 | + | |
188 | +/* wait until hardware state machine is idle */ | |
189 | +static inline int wait_for_idle(struct davinci_mdio_data *data) | |
190 | +{ | |
191 | + struct davinci_mdio_regs __iomem *regs = data->regs; | |
192 | + unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); | |
193 | + | |
194 | + while (time_after(timeout, jiffies)) { | |
195 | + if (__raw_readl(®s->control) & CONTROL_IDLE) | |
196 | + return 0; | |
197 | + } | |
198 | + dev_err(data->dev, "timed out waiting for idle\n"); | |
199 | + return -ETIMEDOUT; | |
200 | +} | |
201 | + | |
202 | +static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) | |
203 | +{ | |
204 | + struct davinci_mdio_data *data = bus->priv; | |
205 | + u32 reg; | |
206 | + int ret; | |
207 | + | |
208 | + if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) | |
209 | + return -EINVAL; | |
210 | + | |
211 | + spin_lock(&data->lock); | |
212 | + | |
213 | + if (data->suspended) { | |
214 | + spin_unlock(&data->lock); | |
215 | + return -ENODEV; | |
216 | + } | |
217 | + | |
218 | + reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | | |
219 | + (phy_id << 16)); | |
220 | + | |
221 | + while (1) { | |
222 | + ret = wait_for_user_access(data); | |
223 | + if (ret == -EAGAIN) | |
224 | + continue; | |
225 | + if (ret < 0) | |
226 | + break; | |
227 | + | |
228 | + __raw_writel(reg, &data->regs->user[0].access); | |
229 | + | |
230 | + ret = wait_for_user_access(data); | |
231 | + if (ret == -EAGAIN) | |
232 | + continue; | |
233 | + if (ret < 0) | |
234 | + break; | |
235 | + | |
236 | + reg = __raw_readl(&data->regs->user[0].access); | |
237 | + ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO; | |
238 | + break; | |
239 | + } | |
240 | + | |
241 | + spin_unlock(&data->lock); | |
242 | + | |
243 | + return ret; | |
244 | +} | |
245 | + | |
246 | +static int davinci_mdio_write(struct mii_bus *bus, int phy_id, | |
247 | + int phy_reg, u16 phy_data) | |
248 | +{ | |
249 | + struct davinci_mdio_data *data = bus->priv; | |
250 | + u32 reg; | |
251 | + int ret; | |
252 | + | |
253 | + if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) | |
254 | + return -EINVAL; | |
255 | + | |
256 | + spin_lock(&data->lock); | |
257 | + | |
258 | + if (data->suspended) { | |
259 | + spin_unlock(&data->lock); | |
260 | + return -ENODEV; | |
261 | + } | |
262 | + | |
263 | + reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | | |
264 | + (phy_id << 16) | (phy_data & USERACCESS_DATA)); | |
265 | + | |
266 | + while (1) { | |
267 | + ret = wait_for_user_access(data); | |
268 | + if (ret == -EAGAIN) | |
269 | + continue; | |
270 | + if (ret < 0) | |
271 | + break; | |
272 | + | |
273 | + __raw_writel(reg, &data->regs->user[0].access); | |
274 | + | |
275 | + ret = wait_for_user_access(data); | |
276 | + if (ret == -EAGAIN) | |
277 | + continue; | |
278 | + break; | |
279 | + } | |
280 | + | |
281 | + spin_unlock(&data->lock); | |
282 | + | |
283 | + return 0; | |
284 | +} | |
285 | + | |
286 | +static int __devinit davinci_mdio_probe(struct platform_device *pdev) | |
287 | +{ | |
288 | + struct mdio_platform_data *pdata = pdev->dev.platform_data; | |
289 | + struct device *dev = &pdev->dev; | |
290 | + struct davinci_mdio_data *data; | |
291 | + struct resource *res; | |
292 | + struct phy_device *phy; | |
293 | + int ret, addr; | |
294 | + | |
295 | + data = kzalloc(sizeof(*data), GFP_KERNEL); | |
296 | + if (!data) { | |
297 | + dev_err(dev, "failed to alloc device data\n"); | |
298 | + return -ENOMEM; | |
299 | + } | |
300 | + | |
301 | + data->pdata = pdata ? (*pdata) : default_pdata; | |
302 | + | |
303 | + data->bus = mdiobus_alloc(); | |
304 | + if (!data->bus) { | |
305 | + dev_err(dev, "failed to alloc mii bus\n"); | |
306 | + ret = -ENOMEM; | |
307 | + goto bail_out; | |
308 | + } | |
309 | + | |
310 | + data->bus->name = dev_name(dev); | |
311 | + data->bus->read = davinci_mdio_read, | |
312 | + data->bus->write = davinci_mdio_write, | |
313 | + data->bus->reset = davinci_mdio_reset, | |
314 | + data->bus->parent = dev; | |
315 | + data->bus->priv = data; | |
316 | + snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); | |
317 | + | |
318 | + data->clk = clk_get(dev, NULL); | |
319 | + if (IS_ERR(data->clk)) { | |
320 | + data->clk = NULL; | |
321 | + dev_err(dev, "failed to get device clock\n"); | |
322 | + ret = PTR_ERR(data->clk); | |
323 | + goto bail_out; | |
324 | + } | |
325 | + | |
326 | + clk_enable(data->clk); | |
327 | + | |
328 | + dev_set_drvdata(dev, data); | |
329 | + data->dev = dev; | |
330 | + spin_lock_init(&data->lock); | |
331 | + | |
332 | + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
333 | + if (!res) { | |
334 | + dev_err(dev, "could not find register map resource\n"); | |
335 | + ret = -ENOENT; | |
336 | + goto bail_out; | |
337 | + } | |
338 | + | |
339 | + res = devm_request_mem_region(dev, res->start, resource_size(res), | |
340 | + dev_name(dev)); | |
341 | + if (!res) { | |
342 | + dev_err(dev, "could not allocate register map resource\n"); | |
343 | + ret = -ENXIO; | |
344 | + goto bail_out; | |
345 | + } | |
346 | + | |
347 | + data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); | |
348 | + if (!data->regs) { | |
349 | + dev_err(dev, "could not map mdio registers\n"); | |
350 | + ret = -ENOMEM; | |
351 | + goto bail_out; | |
352 | + } | |
353 | + | |
354 | + /* register the mii bus */ | |
355 | + ret = mdiobus_register(data->bus); | |
356 | + if (ret) | |
357 | + goto bail_out; | |
358 | + | |
359 | + /* scan and dump the bus */ | |
360 | + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { | |
361 | + phy = data->bus->phy_map[addr]; | |
362 | + if (phy) { | |
363 | + dev_info(dev, "phy[%d]: device %s, driver %s\n", | |
364 | + phy->addr, dev_name(&phy->dev), | |
365 | + phy->drv ? phy->drv->name : "unknown"); | |
366 | + } | |
367 | + } | |
368 | + | |
369 | + return 0; | |
370 | + | |
371 | +bail_out: | |
372 | + if (data->bus) | |
373 | + mdiobus_free(data->bus); | |
374 | + | |
375 | + if (data->clk) { | |
376 | + clk_disable(data->clk); | |
377 | + clk_put(data->clk); | |
378 | + } | |
379 | + | |
380 | + kfree(data); | |
381 | + | |
382 | + return ret; | |
383 | +} | |
384 | + | |
385 | +static int __devexit davinci_mdio_remove(struct platform_device *pdev) | |
386 | +{ | |
387 | + struct device *dev = &pdev->dev; | |
388 | + struct davinci_mdio_data *data = dev_get_drvdata(dev); | |
389 | + | |
390 | + if (data->bus) | |
391 | + mdiobus_free(data->bus); | |
392 | + | |
393 | + if (data->clk) { | |
394 | + clk_disable(data->clk); | |
395 | + clk_put(data->clk); | |
396 | + } | |
397 | + | |
398 | + dev_set_drvdata(dev, NULL); | |
399 | + | |
400 | + kfree(data); | |
401 | + | |
402 | + return 0; | |
403 | +} | |
404 | + | |
405 | +static int davinci_mdio_suspend(struct device *dev) | |
406 | +{ | |
407 | + struct davinci_mdio_data *data = dev_get_drvdata(dev); | |
408 | + u32 ctrl; | |
409 | + | |
410 | + spin_lock(&data->lock); | |
411 | + | |
412 | + /* shutdown the scan state machine */ | |
413 | + ctrl = __raw_readl(&data->regs->control); | |
414 | + ctrl &= ~CONTROL_ENABLE; | |
415 | + __raw_writel(ctrl, &data->regs->control); | |
416 | + wait_for_idle(data); | |
417 | + | |
418 | + if (data->clk) | |
419 | + clk_disable(data->clk); | |
420 | + | |
421 | + data->suspended = true; | |
422 | + spin_unlock(&data->lock); | |
423 | + | |
424 | + return 0; | |
425 | +} | |
426 | + | |
427 | +static int davinci_mdio_resume(struct device *dev) | |
428 | +{ | |
429 | + struct davinci_mdio_data *data = dev_get_drvdata(dev); | |
430 | + u32 ctrl; | |
431 | + | |
432 | + spin_lock(&data->lock); | |
433 | + if (data->clk) | |
434 | + clk_enable(data->clk); | |
435 | + | |
436 | + /* restart the scan state machine */ | |
437 | + ctrl = __raw_readl(&data->regs->control); | |
438 | + ctrl |= CONTROL_ENABLE; | |
439 | + __raw_writel(ctrl, &data->regs->control); | |
440 | + | |
441 | + data->suspended = false; | |
442 | + spin_unlock(&data->lock); | |
443 | + | |
444 | + return 0; | |
445 | +} | |
446 | + | |
447 | +static const struct dev_pm_ops davinci_mdio_pm_ops = { | |
448 | + .suspend = davinci_mdio_suspend, | |
449 | + .resume = davinci_mdio_resume, | |
450 | +}; | |
451 | + | |
452 | +static struct platform_driver davinci_mdio_driver = { | |
453 | + .driver = { | |
454 | + .name = "davinci_mdio", | |
455 | + .owner = THIS_MODULE, | |
456 | + .pm = &davinci_mdio_pm_ops, | |
457 | + }, | |
458 | + .probe = davinci_mdio_probe, | |
459 | + .remove = __devexit_p(davinci_mdio_remove), | |
460 | +}; | |
461 | + | |
462 | +static int __init davinci_mdio_init(void) | |
463 | +{ | |
464 | + return platform_driver_register(&davinci_mdio_driver); | |
465 | +} | |
466 | +device_initcall(davinci_mdio_init); | |
467 | + | |
468 | +static void __exit davinci_mdio_exit(void) | |
469 | +{ | |
470 | + platform_driver_unregister(&davinci_mdio_driver); | |
471 | +} | |
472 | +module_exit(davinci_mdio_exit); | |
473 | + | |
474 | +MODULE_LICENSE("GPL"); | |
475 | +MODULE_DESCRIPTION("DaVinci MDIO driver"); |
include/linux/davinci_emac.h