Commit f4fcba5c5baaaa9d477d753f97124efdb8e45893

Authored by Philipp Tomsich
1 parent a45f17e8b9

clk: implement clk_set_defaults()

Linux uses the properties 'assigned-clocks', 'assigned-clock-parents'
and 'assigned-clock-rates' to configure the clock subsystem for use
with various peripheral nodes.

This implements clk_set_defaults() and hooks it up with the general
device probibin in drivers/core/device.c: when a new device is probed,
clk_set_defaults() will be called for it and will process the
properties mentioned above.

Note that this functionality is designed to fail gracefully (i.e. if a
clock-driver does not implement set_parent(), we simply accept this
and ignore the error) as not to break existing board-support.

Signed-off-by: Philipp Tomsich <philipp.tomsich@theobroma-systems.com>
Tested-by: David Wu <david.wu@rock-chips.com>

Series-changes: 2
- Fixed David's email address.

Series-version: 2

Cover-letter:
clk: support assigned-clock, assigned-clock-parents, assigned-clock-rates

For various peripherals on Rockchip SoCs (e.g. for the Ethernet GMAC),
the parent-clock needs to be set via the DTS.  This adds the required
plumbing and implements the GMAC case for the RK3399.
END

Showing 3 changed files with 141 additions and 0 deletions Inline Diff

drivers/clk/clk-uclass.c
1 /* 1 /*
2 * Copyright (C) 2015 Google, Inc 2 * Copyright (C) 2015 Google, Inc
3 * Written by Simon Glass <sjg@chromium.org> 3 * Written by Simon Glass <sjg@chromium.org>
4 * Copyright (c) 2016, NVIDIA CORPORATION. 4 * Copyright (c) 2016, NVIDIA CORPORATION.
5 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
5 * 6 *
6 * SPDX-License-Identifier: GPL-2.0+ 7 * SPDX-License-Identifier: GPL-2.0+
7 */ 8 */
8 9
9 #include <common.h> 10 #include <common.h>
10 #include <clk.h> 11 #include <clk.h>
11 #include <clk-uclass.h> 12 #include <clk-uclass.h>
12 #include <dm.h> 13 #include <dm.h>
14 #include <dm/read.h>
13 #include <dt-structs.h> 15 #include <dt-structs.h>
14 #include <errno.h> 16 #include <errno.h>
15 17
16 static inline const struct clk_ops *clk_dev_ops(struct udevice *dev) 18 static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
17 { 19 {
18 return (const struct clk_ops *)dev->driver->ops; 20 return (const struct clk_ops *)dev->driver->ops;
19 } 21 }
20 22
21 #if CONFIG_IS_ENABLED(OF_CONTROL) 23 #if CONFIG_IS_ENABLED(OF_CONTROL)
22 # if CONFIG_IS_ENABLED(OF_PLATDATA) 24 # if CONFIG_IS_ENABLED(OF_PLATDATA)
23 int clk_get_by_index_platdata(struct udevice *dev, int index, 25 int clk_get_by_index_platdata(struct udevice *dev, int index,
24 struct phandle_1_arg *cells, struct clk *clk) 26 struct phandle_1_arg *cells, struct clk *clk)
25 { 27 {
26 int ret; 28 int ret;
27 29
28 if (index != 0) 30 if (index != 0)
29 return -ENOSYS; 31 return -ENOSYS;
30 ret = uclass_get_device(UCLASS_CLK, 0, &clk->dev); 32 ret = uclass_get_device(UCLASS_CLK, 0, &clk->dev);
31 if (ret) 33 if (ret)
32 return ret; 34 return ret;
33 clk->id = cells[0].arg[0]; 35 clk->id = cells[0].arg[0];
34 36
35 return 0; 37 return 0;
36 } 38 }
37 # else 39 # else
38 static int clk_of_xlate_default(struct clk *clk, 40 static int clk_of_xlate_default(struct clk *clk,
39 struct ofnode_phandle_args *args) 41 struct ofnode_phandle_args *args)
40 { 42 {
41 debug("%s(clk=%p)\n", __func__, clk); 43 debug("%s(clk=%p)\n", __func__, clk);
42 44
43 if (args->args_count > 1) { 45 if (args->args_count > 1) {
44 debug("Invaild args_count: %d\n", args->args_count); 46 debug("Invaild args_count: %d\n", args->args_count);
45 return -EINVAL; 47 return -EINVAL;
46 } 48 }
47 49
48 if (args->args_count) 50 if (args->args_count)
49 clk->id = args->args[0]; 51 clk->id = args->args[0];
50 else 52 else
51 clk->id = 0; 53 clk->id = 0;
52 54
53 return 0; 55 return 0;
54 } 56 }
55 57
56 static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name, 58 static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
57 int index, struct clk *clk) 59 int index, struct clk *clk)
58 { 60 {
59 int ret; 61 int ret;
60 struct ofnode_phandle_args args; 62 struct ofnode_phandle_args args;
61 struct udevice *dev_clk; 63 struct udevice *dev_clk;
62 const struct clk_ops *ops; 64 const struct clk_ops *ops;
63 65
64 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk); 66 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
65 67
66 assert(clk); 68 assert(clk);
67 clk->dev = NULL; 69 clk->dev = NULL;
68 70
69 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0, 71 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
70 index, &args); 72 index, &args);
71 if (ret) { 73 if (ret) {
72 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n", 74 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
73 __func__, ret); 75 __func__, ret);
74 return ret; 76 return ret;
75 } 77 }
76 78
77 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args.node, &dev_clk); 79 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args.node, &dev_clk);
78 if (ret) { 80 if (ret) {
79 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n", 81 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
80 __func__, ret); 82 __func__, ret);
81 return ret; 83 return ret;
82 } 84 }
83 85
84 clk->dev = dev_clk; 86 clk->dev = dev_clk;
85 87
86 ops = clk_dev_ops(dev_clk); 88 ops = clk_dev_ops(dev_clk);
87 89
88 if (ops->of_xlate) 90 if (ops->of_xlate)
89 ret = ops->of_xlate(clk, &args); 91 ret = ops->of_xlate(clk, &args);
90 else 92 else
91 ret = clk_of_xlate_default(clk, &args); 93 ret = clk_of_xlate_default(clk, &args);
92 if (ret) { 94 if (ret) {
93 debug("of_xlate() failed: %d\n", ret); 95 debug("of_xlate() failed: %d\n", ret);
94 return ret; 96 return ret;
95 } 97 }
96 98
97 return clk_request(dev_clk, clk); 99 return clk_request(dev_clk, clk);
98 } 100 }
99 101
100 int clk_get_by_index(struct udevice *dev, int index, struct clk *clk) 102 int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
101 { 103 {
102 return clk_get_by_indexed_prop(dev, "clocks", index, clk); 104 return clk_get_by_indexed_prop(dev, "clocks", index, clk);
105 }
106
107 static int clk_set_default_parents(struct udevice *dev)
108 {
109 struct clk clk, parent_clk;
110 int index;
111 int num_parents;
112 int ret;
113
114 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
115 "#clock-cells");
116 if (num_parents < 0) {
117 debug("%s: could not read assigned-clock-parents for %p\n",
118 __func__, dev);
119 return 0;
120 }
121
122 for (index = 0; index < num_parents; index++) {
123 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
124 index, &parent_clk);
125 if (ret) {
126 debug("%s: could not get parent clock %d for %s\n",
127 __func__, index, dev_read_name(dev));
128 return ret;
129 }
130
131 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
132 index, &clk);
133 if (ret) {
134 debug("%s: could not get assigned clock %d for %s\n",
135 __func__, index, dev_read_name(dev));
136 return ret;
137 }
138
139 ret = clk_set_parent(&clk, &parent_clk);
140
141 /*
142 * Not all drivers may support clock-reparenting (as of now).
143 * Ignore errors due to this.
144 */
145 if (ret == -ENOSYS)
146 continue;
147
148 if (ret) {
149 debug("%s: failed to reparent clock %d for %s\n",
150 __func__, index, dev_read_name(dev));
151 return ret;
152 }
153 }
154
155 return 0;
156 }
157
158 static int clk_set_default_rates(struct udevice *dev)
159 {
160 struct clk clk;
161 int index;
162 int num_rates;
163 int size;
164 int ret = 0;
165 u32 *rates = NULL;
166
167 size = dev_read_size(dev, "assigned-clock-rates");
168 if (size < 0)
169 return 0;
170
171 num_rates = size / sizeof(u32);
172 rates = calloc(num_rates, sizeof(u32));
173 if (!rates)
174 return -ENOMEM;
175
176 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
177 if (ret)
178 goto fail;
179
180 for (index = 0; index < num_rates; index++) {
181 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
182 index, &clk);
183 if (ret) {
184 debug("%s: could not get assigned clock %d for %s\n",
185 __func__, index, dev_read_name(dev));
186 continue;
187 }
188
189 ret = clk_set_rate(&clk, rates[index]);
190 if (ret < 0) {
191 debug("%s: failed to set rate on clock %d for %s\n",
192 __func__, index, dev_read_name(dev));
193 break;
194 }
195 }
196
197 fail:
198 free(rates);
199 return ret;
200 }
201
202 int clk_set_defaults(struct udevice *dev)
203 {
204 int ret;
205
206 /* If this is running pre-reloc state, don't take any action. */
207 if (!(gd->flags & GD_FLG_RELOC))
208 return 0;
209
210 debug("%s(%s)\n", __func__, dev_read_name(dev));
211
212 ret = clk_set_default_parents(dev);
213 if (ret)
214 return ret;
215
216 ret = clk_set_default_rates(dev);
217 if (ret < 0)
218 return ret;
219
220 return 0;
103 } 221 }
104 # endif /* OF_PLATDATA */ 222 # endif /* OF_PLATDATA */
105 223
106 int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk) 224 int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
107 { 225 {
108 int index; 226 int index;
109 227
110 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk); 228 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
111 clk->dev = NULL; 229 clk->dev = NULL;
112 230
113 index = dev_read_stringlist_search(dev, "clock-names", name); 231 index = dev_read_stringlist_search(dev, "clock-names", name);
114 if (index < 0) { 232 if (index < 0) {
115 debug("fdt_stringlist_search() failed: %d\n", index); 233 debug("fdt_stringlist_search() failed: %d\n", index);
116 return index; 234 return index;
117 } 235 }
118 236
119 return clk_get_by_index(dev, index, clk); 237 return clk_get_by_index(dev, index, clk);
120 } 238 }
121 239
122 int clk_release_all(struct clk *clk, int count) 240 int clk_release_all(struct clk *clk, int count)
123 { 241 {
124 int i, ret; 242 int i, ret;
125 243
126 for (i = 0; i < count; i++) { 244 for (i = 0; i < count; i++) {
127 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]); 245 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
128 246
129 /* check if clock has been previously requested */ 247 /* check if clock has been previously requested */
130 if (!clk[i].dev) 248 if (!clk[i].dev)
131 continue; 249 continue;
132 250
133 ret = clk_disable(&clk[i]); 251 ret = clk_disable(&clk[i]);
134 if (ret && ret != -ENOSYS) 252 if (ret && ret != -ENOSYS)
135 return ret; 253 return ret;
136 254
137 ret = clk_free(&clk[i]); 255 ret = clk_free(&clk[i]);
138 if (ret && ret != -ENOSYS) 256 if (ret && ret != -ENOSYS)
139 return ret; 257 return ret;
140 } 258 }
141 259
142 return 0; 260 return 0;
143 } 261 }
144 262
145 #endif /* OF_CONTROL */ 263 #endif /* OF_CONTROL */
146 264
147 int clk_request(struct udevice *dev, struct clk *clk) 265 int clk_request(struct udevice *dev, struct clk *clk)
148 { 266 {
149 const struct clk_ops *ops = clk_dev_ops(dev); 267 const struct clk_ops *ops = clk_dev_ops(dev);
150 268
151 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk); 269 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
152 270
153 clk->dev = dev; 271 clk->dev = dev;
154 272
155 if (!ops->request) 273 if (!ops->request)
156 return 0; 274 return 0;
157 275
158 return ops->request(clk); 276 return ops->request(clk);
159 } 277 }
160 278
161 int clk_free(struct clk *clk) 279 int clk_free(struct clk *clk)
162 { 280 {
163 const struct clk_ops *ops = clk_dev_ops(clk->dev); 281 const struct clk_ops *ops = clk_dev_ops(clk->dev);
164 282
165 debug("%s(clk=%p)\n", __func__, clk); 283 debug("%s(clk=%p)\n", __func__, clk);
166 284
167 if (!ops->free) 285 if (!ops->free)
168 return 0; 286 return 0;
169 287
170 return ops->free(clk); 288 return ops->free(clk);
171 } 289 }
172 290
173 ulong clk_get_rate(struct clk *clk) 291 ulong clk_get_rate(struct clk *clk)
174 { 292 {
175 const struct clk_ops *ops = clk_dev_ops(clk->dev); 293 const struct clk_ops *ops = clk_dev_ops(clk->dev);
176 294
177 debug("%s(clk=%p)\n", __func__, clk); 295 debug("%s(clk=%p)\n", __func__, clk);
178 296
179 if (!ops->get_rate) 297 if (!ops->get_rate)
180 return -ENOSYS; 298 return -ENOSYS;
181 299
182 return ops->get_rate(clk); 300 return ops->get_rate(clk);
183 } 301 }
184 302
185 ulong clk_set_rate(struct clk *clk, ulong rate) 303 ulong clk_set_rate(struct clk *clk, ulong rate)
186 { 304 {
187 const struct clk_ops *ops = clk_dev_ops(clk->dev); 305 const struct clk_ops *ops = clk_dev_ops(clk->dev);
188 306
189 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate); 307 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
190 308
191 if (!ops->set_rate) 309 if (!ops->set_rate)
192 return -ENOSYS; 310 return -ENOSYS;
193 311
194 return ops->set_rate(clk, rate); 312 return ops->set_rate(clk, rate);
195 } 313 }
196 314
197 int clk_set_parent(struct clk *clk, struct clk *parent) 315 int clk_set_parent(struct clk *clk, struct clk *parent)
198 { 316 {
199 const struct clk_ops *ops = clk_dev_ops(clk->dev); 317 const struct clk_ops *ops = clk_dev_ops(clk->dev);
200 318
201 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent); 319 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
202 320
203 if (!ops->set_parent) 321 if (!ops->set_parent)
204 return -ENOSYS; 322 return -ENOSYS;
205 323
206 return ops->set_parent(clk, parent); 324 return ops->set_parent(clk, parent);
207 } 325 }
208 326
209 int clk_enable(struct clk *clk) 327 int clk_enable(struct clk *clk)
210 { 328 {
211 const struct clk_ops *ops = clk_dev_ops(clk->dev); 329 const struct clk_ops *ops = clk_dev_ops(clk->dev);
212 330
213 debug("%s(clk=%p)\n", __func__, clk); 331 debug("%s(clk=%p)\n", __func__, clk);
214 332
215 if (!ops->enable) 333 if (!ops->enable)
216 return -ENOSYS; 334 return -ENOSYS;
217 335
218 return ops->enable(clk); 336 return ops->enable(clk);
219 } 337 }
220 338
221 int clk_disable(struct clk *clk) 339 int clk_disable(struct clk *clk)
222 { 340 {
223 const struct clk_ops *ops = clk_dev_ops(clk->dev); 341 const struct clk_ops *ops = clk_dev_ops(clk->dev);
224 342
225 debug("%s(clk=%p)\n", __func__, clk); 343 debug("%s(clk=%p)\n", __func__, clk);
226 344
227 if (!ops->disable) 345 if (!ops->disable)
228 return -ENOSYS; 346 return -ENOSYS;
229 347
230 return ops->disable(clk); 348 return ops->disable(clk);
231 } 349 }
232 350
233 UCLASS_DRIVER(clk) = { 351 UCLASS_DRIVER(clk) = {
234 .id = UCLASS_CLK, 352 .id = UCLASS_CLK,
235 .name = "clk", 353 .name = "clk",
236 }; 354 };
237 355
drivers/core/device.c
1 /* 1 /*
2 * Device manager 2 * Device manager
3 * 3 *
4 * Copyright (c) 2013 Google, Inc 4 * Copyright (c) 2013 Google, Inc
5 * 5 *
6 * (C) Copyright 2012 6 * (C) Copyright 2012
7 * Pavel Herrmann <morpheus.ibis@gmail.com> 7 * Pavel Herrmann <morpheus.ibis@gmail.com>
8 * 8 *
9 * SPDX-License-Identifier: GPL-2.0+ 9 * SPDX-License-Identifier: GPL-2.0+
10 */ 10 */
11 11
12 #include <common.h> 12 #include <common.h>
13 #include <asm/io.h> 13 #include <asm/io.h>
14 #include <clk.h>
14 #include <fdtdec.h> 15 #include <fdtdec.h>
15 #include <fdt_support.h> 16 #include <fdt_support.h>
16 #include <malloc.h> 17 #include <malloc.h>
17 #include <dm/device.h> 18 #include <dm/device.h>
18 #include <dm/device-internal.h> 19 #include <dm/device-internal.h>
19 #include <dm/lists.h> 20 #include <dm/lists.h>
20 #include <dm/of_access.h> 21 #include <dm/of_access.h>
21 #include <dm/pinctrl.h> 22 #include <dm/pinctrl.h>
22 #include <dm/platdata.h> 23 #include <dm/platdata.h>
23 #include <dm/read.h> 24 #include <dm/read.h>
24 #include <dm/uclass.h> 25 #include <dm/uclass.h>
25 #include <dm/uclass-internal.h> 26 #include <dm/uclass-internal.h>
26 #include <dm/util.h> 27 #include <dm/util.h>
27 #include <linux/err.h> 28 #include <linux/err.h>
28 #include <linux/list.h> 29 #include <linux/list.h>
29 30
30 DECLARE_GLOBAL_DATA_PTR; 31 DECLARE_GLOBAL_DATA_PTR;
31 32
32 static int device_bind_common(struct udevice *parent, const struct driver *drv, 33 static int device_bind_common(struct udevice *parent, const struct driver *drv,
33 const char *name, void *platdata, 34 const char *name, void *platdata,
34 ulong driver_data, ofnode node, 35 ulong driver_data, ofnode node,
35 uint of_platdata_size, struct udevice **devp) 36 uint of_platdata_size, struct udevice **devp)
36 { 37 {
37 struct udevice *dev; 38 struct udevice *dev;
38 struct uclass *uc; 39 struct uclass *uc;
39 int size, ret = 0; 40 int size, ret = 0;
40 41
41 if (devp) 42 if (devp)
42 *devp = NULL; 43 *devp = NULL;
43 if (!name) 44 if (!name)
44 return -EINVAL; 45 return -EINVAL;
45 46
46 ret = uclass_get(drv->id, &uc); 47 ret = uclass_get(drv->id, &uc);
47 if (ret) { 48 if (ret) {
48 debug("Missing uclass for driver %s\n", drv->name); 49 debug("Missing uclass for driver %s\n", drv->name);
49 return ret; 50 return ret;
50 } 51 }
51 52
52 dev = calloc(1, sizeof(struct udevice)); 53 dev = calloc(1, sizeof(struct udevice));
53 if (!dev) 54 if (!dev)
54 return -ENOMEM; 55 return -ENOMEM;
55 56
56 INIT_LIST_HEAD(&dev->sibling_node); 57 INIT_LIST_HEAD(&dev->sibling_node);
57 INIT_LIST_HEAD(&dev->child_head); 58 INIT_LIST_HEAD(&dev->child_head);
58 INIT_LIST_HEAD(&dev->uclass_node); 59 INIT_LIST_HEAD(&dev->uclass_node);
59 #ifdef CONFIG_DEVRES 60 #ifdef CONFIG_DEVRES
60 INIT_LIST_HEAD(&dev->devres_head); 61 INIT_LIST_HEAD(&dev->devres_head);
61 #endif 62 #endif
62 dev->platdata = platdata; 63 dev->platdata = platdata;
63 dev->driver_data = driver_data; 64 dev->driver_data = driver_data;
64 dev->name = name; 65 dev->name = name;
65 dev->node = node; 66 dev->node = node;
66 dev->parent = parent; 67 dev->parent = parent;
67 dev->driver = drv; 68 dev->driver = drv;
68 dev->uclass = uc; 69 dev->uclass = uc;
69 70
70 dev->seq = -1; 71 dev->seq = -1;
71 dev->req_seq = -1; 72 dev->req_seq = -1;
72 if (CONFIG_IS_ENABLED(OF_CONTROL) && CONFIG_IS_ENABLED(DM_SEQ_ALIAS)) { 73 if (CONFIG_IS_ENABLED(OF_CONTROL) && CONFIG_IS_ENABLED(DM_SEQ_ALIAS)) {
73 /* 74 /*
74 * Some devices, such as a SPI bus, I2C bus and serial ports 75 * Some devices, such as a SPI bus, I2C bus and serial ports
75 * are numbered using aliases. 76 * are numbered using aliases.
76 * 77 *
77 * This is just a 'requested' sequence, and will be 78 * This is just a 'requested' sequence, and will be
78 * resolved (and ->seq updated) when the device is probed. 79 * resolved (and ->seq updated) when the device is probed.
79 */ 80 */
80 if (uc->uc_drv->flags & DM_UC_FLAG_SEQ_ALIAS) { 81 if (uc->uc_drv->flags & DM_UC_FLAG_SEQ_ALIAS) {
81 if (uc->uc_drv->name && ofnode_valid(node)) { 82 if (uc->uc_drv->name && ofnode_valid(node)) {
82 dev_read_alias_seq(dev, &dev->req_seq); 83 dev_read_alias_seq(dev, &dev->req_seq);
83 } 84 }
84 } 85 }
85 } 86 }
86 87
87 if (drv->platdata_auto_alloc_size) { 88 if (drv->platdata_auto_alloc_size) {
88 bool alloc = !platdata; 89 bool alloc = !platdata;
89 90
90 if (CONFIG_IS_ENABLED(OF_PLATDATA)) { 91 if (CONFIG_IS_ENABLED(OF_PLATDATA)) {
91 if (of_platdata_size) { 92 if (of_platdata_size) {
92 dev->flags |= DM_FLAG_OF_PLATDATA; 93 dev->flags |= DM_FLAG_OF_PLATDATA;
93 if (of_platdata_size < 94 if (of_platdata_size <
94 drv->platdata_auto_alloc_size) 95 drv->platdata_auto_alloc_size)
95 alloc = true; 96 alloc = true;
96 } 97 }
97 } 98 }
98 if (alloc) { 99 if (alloc) {
99 dev->flags |= DM_FLAG_ALLOC_PDATA; 100 dev->flags |= DM_FLAG_ALLOC_PDATA;
100 dev->platdata = calloc(1, 101 dev->platdata = calloc(1,
101 drv->platdata_auto_alloc_size); 102 drv->platdata_auto_alloc_size);
102 if (!dev->platdata) { 103 if (!dev->platdata) {
103 ret = -ENOMEM; 104 ret = -ENOMEM;
104 goto fail_alloc1; 105 goto fail_alloc1;
105 } 106 }
106 if (CONFIG_IS_ENABLED(OF_PLATDATA) && platdata) { 107 if (CONFIG_IS_ENABLED(OF_PLATDATA) && platdata) {
107 memcpy(dev->platdata, platdata, 108 memcpy(dev->platdata, platdata,
108 of_platdata_size); 109 of_platdata_size);
109 } 110 }
110 } 111 }
111 } 112 }
112 113
113 size = uc->uc_drv->per_device_platdata_auto_alloc_size; 114 size = uc->uc_drv->per_device_platdata_auto_alloc_size;
114 if (size) { 115 if (size) {
115 dev->flags |= DM_FLAG_ALLOC_UCLASS_PDATA; 116 dev->flags |= DM_FLAG_ALLOC_UCLASS_PDATA;
116 dev->uclass_platdata = calloc(1, size); 117 dev->uclass_platdata = calloc(1, size);
117 if (!dev->uclass_platdata) { 118 if (!dev->uclass_platdata) {
118 ret = -ENOMEM; 119 ret = -ENOMEM;
119 goto fail_alloc2; 120 goto fail_alloc2;
120 } 121 }
121 } 122 }
122 123
123 if (parent) { 124 if (parent) {
124 size = parent->driver->per_child_platdata_auto_alloc_size; 125 size = parent->driver->per_child_platdata_auto_alloc_size;
125 if (!size) { 126 if (!size) {
126 size = parent->uclass->uc_drv-> 127 size = parent->uclass->uc_drv->
127 per_child_platdata_auto_alloc_size; 128 per_child_platdata_auto_alloc_size;
128 } 129 }
129 if (size) { 130 if (size) {
130 dev->flags |= DM_FLAG_ALLOC_PARENT_PDATA; 131 dev->flags |= DM_FLAG_ALLOC_PARENT_PDATA;
131 dev->parent_platdata = calloc(1, size); 132 dev->parent_platdata = calloc(1, size);
132 if (!dev->parent_platdata) { 133 if (!dev->parent_platdata) {
133 ret = -ENOMEM; 134 ret = -ENOMEM;
134 goto fail_alloc3; 135 goto fail_alloc3;
135 } 136 }
136 } 137 }
137 } 138 }
138 139
139 /* put dev into parent's successor list */ 140 /* put dev into parent's successor list */
140 if (parent) 141 if (parent)
141 list_add_tail(&dev->sibling_node, &parent->child_head); 142 list_add_tail(&dev->sibling_node, &parent->child_head);
142 143
143 ret = uclass_bind_device(dev); 144 ret = uclass_bind_device(dev);
144 if (ret) 145 if (ret)
145 goto fail_uclass_bind; 146 goto fail_uclass_bind;
146 147
147 /* if we fail to bind we remove device from successors and free it */ 148 /* if we fail to bind we remove device from successors and free it */
148 if (drv->bind) { 149 if (drv->bind) {
149 ret = drv->bind(dev); 150 ret = drv->bind(dev);
150 if (ret) 151 if (ret)
151 goto fail_bind; 152 goto fail_bind;
152 } 153 }
153 if (parent && parent->driver->child_post_bind) { 154 if (parent && parent->driver->child_post_bind) {
154 ret = parent->driver->child_post_bind(dev); 155 ret = parent->driver->child_post_bind(dev);
155 if (ret) 156 if (ret)
156 goto fail_child_post_bind; 157 goto fail_child_post_bind;
157 } 158 }
158 if (uc->uc_drv->post_bind) { 159 if (uc->uc_drv->post_bind) {
159 ret = uc->uc_drv->post_bind(dev); 160 ret = uc->uc_drv->post_bind(dev);
160 if (ret) 161 if (ret)
161 goto fail_uclass_post_bind; 162 goto fail_uclass_post_bind;
162 } 163 }
163 164
164 if (parent) 165 if (parent)
165 pr_debug("Bound device %s to %s\n", dev->name, parent->name); 166 pr_debug("Bound device %s to %s\n", dev->name, parent->name);
166 if (devp) 167 if (devp)
167 *devp = dev; 168 *devp = dev;
168 169
169 dev->flags |= DM_FLAG_BOUND; 170 dev->flags |= DM_FLAG_BOUND;
170 171
171 return 0; 172 return 0;
172 173
173 fail_uclass_post_bind: 174 fail_uclass_post_bind:
174 /* There is no child unbind() method, so no clean-up required */ 175 /* There is no child unbind() method, so no clean-up required */
175 fail_child_post_bind: 176 fail_child_post_bind:
176 if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) { 177 if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
177 if (drv->unbind && drv->unbind(dev)) { 178 if (drv->unbind && drv->unbind(dev)) {
178 dm_warn("unbind() method failed on dev '%s' on error path\n", 179 dm_warn("unbind() method failed on dev '%s' on error path\n",
179 dev->name); 180 dev->name);
180 } 181 }
181 } 182 }
182 183
183 fail_bind: 184 fail_bind:
184 if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) { 185 if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
185 if (uclass_unbind_device(dev)) { 186 if (uclass_unbind_device(dev)) {
186 dm_warn("Failed to unbind dev '%s' on error path\n", 187 dm_warn("Failed to unbind dev '%s' on error path\n",
187 dev->name); 188 dev->name);
188 } 189 }
189 } 190 }
190 fail_uclass_bind: 191 fail_uclass_bind:
191 if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) { 192 if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
192 list_del(&dev->sibling_node); 193 list_del(&dev->sibling_node);
193 if (dev->flags & DM_FLAG_ALLOC_PARENT_PDATA) { 194 if (dev->flags & DM_FLAG_ALLOC_PARENT_PDATA) {
194 free(dev->parent_platdata); 195 free(dev->parent_platdata);
195 dev->parent_platdata = NULL; 196 dev->parent_platdata = NULL;
196 } 197 }
197 } 198 }
198 fail_alloc3: 199 fail_alloc3:
199 if (dev->flags & DM_FLAG_ALLOC_UCLASS_PDATA) { 200 if (dev->flags & DM_FLAG_ALLOC_UCLASS_PDATA) {
200 free(dev->uclass_platdata); 201 free(dev->uclass_platdata);
201 dev->uclass_platdata = NULL; 202 dev->uclass_platdata = NULL;
202 } 203 }
203 fail_alloc2: 204 fail_alloc2:
204 if (dev->flags & DM_FLAG_ALLOC_PDATA) { 205 if (dev->flags & DM_FLAG_ALLOC_PDATA) {
205 free(dev->platdata); 206 free(dev->platdata);
206 dev->platdata = NULL; 207 dev->platdata = NULL;
207 } 208 }
208 fail_alloc1: 209 fail_alloc1:
209 devres_release_all(dev); 210 devres_release_all(dev);
210 211
211 free(dev); 212 free(dev);
212 213
213 return ret; 214 return ret;
214 } 215 }
215 216
216 int device_bind_with_driver_data(struct udevice *parent, 217 int device_bind_with_driver_data(struct udevice *parent,
217 const struct driver *drv, const char *name, 218 const struct driver *drv, const char *name,
218 ulong driver_data, ofnode node, 219 ulong driver_data, ofnode node,
219 struct udevice **devp) 220 struct udevice **devp)
220 { 221 {
221 return device_bind_common(parent, drv, name, NULL, driver_data, node, 222 return device_bind_common(parent, drv, name, NULL, driver_data, node,
222 0, devp); 223 0, devp);
223 } 224 }
224 225
225 int device_bind(struct udevice *parent, const struct driver *drv, 226 int device_bind(struct udevice *parent, const struct driver *drv,
226 const char *name, void *platdata, int of_offset, 227 const char *name, void *platdata, int of_offset,
227 struct udevice **devp) 228 struct udevice **devp)
228 { 229 {
229 return device_bind_common(parent, drv, name, platdata, 0, 230 return device_bind_common(parent, drv, name, platdata, 0,
230 offset_to_ofnode(of_offset), 0, devp); 231 offset_to_ofnode(of_offset), 0, devp);
231 } 232 }
232 233
233 int device_bind_by_name(struct udevice *parent, bool pre_reloc_only, 234 int device_bind_by_name(struct udevice *parent, bool pre_reloc_only,
234 const struct driver_info *info, struct udevice **devp) 235 const struct driver_info *info, struct udevice **devp)
235 { 236 {
236 struct driver *drv; 237 struct driver *drv;
237 uint platdata_size = 0; 238 uint platdata_size = 0;
238 239
239 drv = lists_driver_lookup_name(info->name); 240 drv = lists_driver_lookup_name(info->name);
240 if (!drv) 241 if (!drv)
241 return -ENOENT; 242 return -ENOENT;
242 if (pre_reloc_only && !(drv->flags & DM_FLAG_PRE_RELOC)) 243 if (pre_reloc_only && !(drv->flags & DM_FLAG_PRE_RELOC))
243 return -EPERM; 244 return -EPERM;
244 245
245 #if CONFIG_IS_ENABLED(OF_PLATDATA) 246 #if CONFIG_IS_ENABLED(OF_PLATDATA)
246 platdata_size = info->platdata_size; 247 platdata_size = info->platdata_size;
247 #endif 248 #endif
248 return device_bind_common(parent, drv, info->name, 249 return device_bind_common(parent, drv, info->name,
249 (void *)info->platdata, 0, ofnode_null(), platdata_size, 250 (void *)info->platdata, 0, ofnode_null(), platdata_size,
250 devp); 251 devp);
251 } 252 }
252 253
253 static void *alloc_priv(int size, uint flags) 254 static void *alloc_priv(int size, uint flags)
254 { 255 {
255 void *priv; 256 void *priv;
256 257
257 if (flags & DM_FLAG_ALLOC_PRIV_DMA) { 258 if (flags & DM_FLAG_ALLOC_PRIV_DMA) {
258 size = ROUND(size, ARCH_DMA_MINALIGN); 259 size = ROUND(size, ARCH_DMA_MINALIGN);
259 priv = memalign(ARCH_DMA_MINALIGN, size); 260 priv = memalign(ARCH_DMA_MINALIGN, size);
260 if (priv) { 261 if (priv) {
261 memset(priv, '\0', size); 262 memset(priv, '\0', size);
262 263
263 /* 264 /*
264 * Ensure that the zero bytes are flushed to memory. 265 * Ensure that the zero bytes are flushed to memory.
265 * This prevents problems if the driver uses this as 266 * This prevents problems if the driver uses this as
266 * both an input and an output buffer: 267 * both an input and an output buffer:
267 * 268 *
268 * 1. Zeroes written to buffer (here) and sit in the 269 * 1. Zeroes written to buffer (here) and sit in the
269 * cache 270 * cache
270 * 2. Driver issues a read command to DMA 271 * 2. Driver issues a read command to DMA
271 * 3. CPU runs out of cache space and evicts some cache 272 * 3. CPU runs out of cache space and evicts some cache
272 * data in the buffer, writing zeroes to RAM from 273 * data in the buffer, writing zeroes to RAM from
273 * the memset() above 274 * the memset() above
274 * 4. DMA completes 275 * 4. DMA completes
275 * 5. Buffer now has some DMA data and some zeroes 276 * 5. Buffer now has some DMA data and some zeroes
276 * 6. Data being read is now incorrect 277 * 6. Data being read is now incorrect
277 * 278 *
278 * To prevent this, ensure that the cache is clean 279 * To prevent this, ensure that the cache is clean
279 * within this range at the start. The driver can then 280 * within this range at the start. The driver can then
280 * use normal flush-after-write, invalidate-before-read 281 * use normal flush-after-write, invalidate-before-read
281 * procedures. 282 * procedures.
282 * 283 *
283 * TODO(sjg@chromium.org): Drop this microblaze 284 * TODO(sjg@chromium.org): Drop this microblaze
284 * exception. 285 * exception.
285 */ 286 */
286 #ifndef CONFIG_MICROBLAZE 287 #ifndef CONFIG_MICROBLAZE
287 flush_dcache_range((ulong)priv, (ulong)priv + size); 288 flush_dcache_range((ulong)priv, (ulong)priv + size);
288 #endif 289 #endif
289 } 290 }
290 } else { 291 } else {
291 priv = calloc(1, size); 292 priv = calloc(1, size);
292 } 293 }
293 294
294 return priv; 295 return priv;
295 } 296 }
296 297
297 int device_probe(struct udevice *dev) 298 int device_probe(struct udevice *dev)
298 { 299 {
299 const struct driver *drv; 300 const struct driver *drv;
300 int size = 0; 301 int size = 0;
301 int ret; 302 int ret;
302 int seq; 303 int seq;
303 304
304 if (!dev) 305 if (!dev)
305 return -EINVAL; 306 return -EINVAL;
306 307
307 if (dev->flags & DM_FLAG_ACTIVATED) 308 if (dev->flags & DM_FLAG_ACTIVATED)
308 return 0; 309 return 0;
309 310
310 drv = dev->driver; 311 drv = dev->driver;
311 assert(drv); 312 assert(drv);
312 313
313 /* Allocate private data if requested and not reentered */ 314 /* Allocate private data if requested and not reentered */
314 if (drv->priv_auto_alloc_size && !dev->priv) { 315 if (drv->priv_auto_alloc_size && !dev->priv) {
315 dev->priv = alloc_priv(drv->priv_auto_alloc_size, drv->flags); 316 dev->priv = alloc_priv(drv->priv_auto_alloc_size, drv->flags);
316 if (!dev->priv) { 317 if (!dev->priv) {
317 ret = -ENOMEM; 318 ret = -ENOMEM;
318 goto fail; 319 goto fail;
319 } 320 }
320 } 321 }
321 /* Allocate private data if requested and not reentered */ 322 /* Allocate private data if requested and not reentered */
322 size = dev->uclass->uc_drv->per_device_auto_alloc_size; 323 size = dev->uclass->uc_drv->per_device_auto_alloc_size;
323 if (size && !dev->uclass_priv) { 324 if (size && !dev->uclass_priv) {
324 dev->uclass_priv = calloc(1, size); 325 dev->uclass_priv = calloc(1, size);
325 if (!dev->uclass_priv) { 326 if (!dev->uclass_priv) {
326 ret = -ENOMEM; 327 ret = -ENOMEM;
327 goto fail; 328 goto fail;
328 } 329 }
329 } 330 }
330 331
331 /* Ensure all parents are probed */ 332 /* Ensure all parents are probed */
332 if (dev->parent) { 333 if (dev->parent) {
333 size = dev->parent->driver->per_child_auto_alloc_size; 334 size = dev->parent->driver->per_child_auto_alloc_size;
334 if (!size) { 335 if (!size) {
335 size = dev->parent->uclass->uc_drv-> 336 size = dev->parent->uclass->uc_drv->
336 per_child_auto_alloc_size; 337 per_child_auto_alloc_size;
337 } 338 }
338 if (size && !dev->parent_priv) { 339 if (size && !dev->parent_priv) {
339 dev->parent_priv = alloc_priv(size, drv->flags); 340 dev->parent_priv = alloc_priv(size, drv->flags);
340 if (!dev->parent_priv) { 341 if (!dev->parent_priv) {
341 ret = -ENOMEM; 342 ret = -ENOMEM;
342 goto fail; 343 goto fail;
343 } 344 }
344 } 345 }
345 346
346 ret = device_probe(dev->parent); 347 ret = device_probe(dev->parent);
347 if (ret) 348 if (ret)
348 goto fail; 349 goto fail;
349 350
350 /* 351 /*
351 * The device might have already been probed during 352 * The device might have already been probed during
352 * the call to device_probe() on its parent device 353 * the call to device_probe() on its parent device
353 * (e.g. PCI bridge devices). Test the flags again 354 * (e.g. PCI bridge devices). Test the flags again
354 * so that we don't mess up the device. 355 * so that we don't mess up the device.
355 */ 356 */
356 if (dev->flags & DM_FLAG_ACTIVATED) 357 if (dev->flags & DM_FLAG_ACTIVATED)
357 return 0; 358 return 0;
358 } 359 }
359 360
360 seq = uclass_resolve_seq(dev); 361 seq = uclass_resolve_seq(dev);
361 if (seq < 0) { 362 if (seq < 0) {
362 ret = seq; 363 ret = seq;
363 goto fail; 364 goto fail;
364 } 365 }
365 dev->seq = seq; 366 dev->seq = seq;
366 367
367 dev->flags |= DM_FLAG_ACTIVATED; 368 dev->flags |= DM_FLAG_ACTIVATED;
368 369
369 /* 370 /*
370 * Process pinctrl for everything except the root device, and 371 * Process pinctrl for everything except the root device, and
371 * continue regardless of the result of pinctrl. Don't process pinctrl 372 * continue regardless of the result of pinctrl. Don't process pinctrl
372 * settings for pinctrl devices since the device may not yet be 373 * settings for pinctrl devices since the device may not yet be
373 * probed. 374 * probed.
374 */ 375 */
375 if (dev->parent && device_get_uclass_id(dev) != UCLASS_PINCTRL) 376 if (dev->parent && device_get_uclass_id(dev) != UCLASS_PINCTRL)
376 pinctrl_select_state(dev, "default"); 377 pinctrl_select_state(dev, "default");
377 378
378 ret = uclass_pre_probe_device(dev); 379 ret = uclass_pre_probe_device(dev);
379 if (ret) 380 if (ret)
380 goto fail; 381 goto fail;
381 382
382 if (dev->parent && dev->parent->driver->child_pre_probe) { 383 if (dev->parent && dev->parent->driver->child_pre_probe) {
383 ret = dev->parent->driver->child_pre_probe(dev); 384 ret = dev->parent->driver->child_pre_probe(dev);
384 if (ret) 385 if (ret)
385 goto fail; 386 goto fail;
386 } 387 }
387 388
388 if (drv->ofdata_to_platdata && dev_has_of_node(dev)) { 389 if (drv->ofdata_to_platdata && dev_has_of_node(dev)) {
389 ret = drv->ofdata_to_platdata(dev); 390 ret = drv->ofdata_to_platdata(dev);
390 if (ret) 391 if (ret)
391 goto fail; 392 goto fail;
392 } 393 }
394
395 /* Process 'assigned-{clocks/clock-parents/clock-rates}' properties */
396 ret = clk_set_defaults(dev);
397 if (ret)
398 goto fail;
393 399
394 if (drv->probe) { 400 if (drv->probe) {
395 ret = drv->probe(dev); 401 ret = drv->probe(dev);
396 if (ret) { 402 if (ret) {
397 dev->flags &= ~DM_FLAG_ACTIVATED; 403 dev->flags &= ~DM_FLAG_ACTIVATED;
398 goto fail; 404 goto fail;
399 } 405 }
400 } 406 }
401 407
402 ret = uclass_post_probe_device(dev); 408 ret = uclass_post_probe_device(dev);
403 if (ret) 409 if (ret)
404 goto fail_uclass; 410 goto fail_uclass;
405 411
406 if (dev->parent && device_get_uclass_id(dev) == UCLASS_PINCTRL) 412 if (dev->parent && device_get_uclass_id(dev) == UCLASS_PINCTRL)
407 pinctrl_select_state(dev, "default"); 413 pinctrl_select_state(dev, "default");
408 414
409 return 0; 415 return 0;
410 fail_uclass: 416 fail_uclass:
411 if (device_remove(dev, DM_REMOVE_NORMAL)) { 417 if (device_remove(dev, DM_REMOVE_NORMAL)) {
412 dm_warn("%s: Device '%s' failed to remove on error path\n", 418 dm_warn("%s: Device '%s' failed to remove on error path\n",
413 __func__, dev->name); 419 __func__, dev->name);
414 } 420 }
415 fail: 421 fail:
416 dev->flags &= ~DM_FLAG_ACTIVATED; 422 dev->flags &= ~DM_FLAG_ACTIVATED;
417 423
418 dev->seq = -1; 424 dev->seq = -1;
419 device_free(dev); 425 device_free(dev);
420 426
421 return ret; 427 return ret;
422 } 428 }
423 429
424 void *dev_get_platdata(struct udevice *dev) 430 void *dev_get_platdata(struct udevice *dev)
425 { 431 {
426 if (!dev) { 432 if (!dev) {
427 dm_warn("%s: null device\n", __func__); 433 dm_warn("%s: null device\n", __func__);
428 return NULL; 434 return NULL;
429 } 435 }
430 436
431 return dev->platdata; 437 return dev->platdata;
432 } 438 }
433 439
434 void *dev_get_parent_platdata(struct udevice *dev) 440 void *dev_get_parent_platdata(struct udevice *dev)
435 { 441 {
436 if (!dev) { 442 if (!dev) {
437 dm_warn("%s: null device\n", __func__); 443 dm_warn("%s: null device\n", __func__);
438 return NULL; 444 return NULL;
439 } 445 }
440 446
441 return dev->parent_platdata; 447 return dev->parent_platdata;
442 } 448 }
443 449
444 void *dev_get_uclass_platdata(struct udevice *dev) 450 void *dev_get_uclass_platdata(struct udevice *dev)
445 { 451 {
446 if (!dev) { 452 if (!dev) {
447 dm_warn("%s: null device\n", __func__); 453 dm_warn("%s: null device\n", __func__);
448 return NULL; 454 return NULL;
449 } 455 }
450 456
451 return dev->uclass_platdata; 457 return dev->uclass_platdata;
452 } 458 }
453 459
454 void *dev_get_priv(struct udevice *dev) 460 void *dev_get_priv(struct udevice *dev)
455 { 461 {
456 if (!dev) { 462 if (!dev) {
457 dm_warn("%s: null device\n", __func__); 463 dm_warn("%s: null device\n", __func__);
458 return NULL; 464 return NULL;
459 } 465 }
460 466
461 return dev->priv; 467 return dev->priv;
462 } 468 }
463 469
464 void *dev_get_uclass_priv(struct udevice *dev) 470 void *dev_get_uclass_priv(struct udevice *dev)
465 { 471 {
466 if (!dev) { 472 if (!dev) {
467 dm_warn("%s: null device\n", __func__); 473 dm_warn("%s: null device\n", __func__);
468 return NULL; 474 return NULL;
469 } 475 }
470 476
471 return dev->uclass_priv; 477 return dev->uclass_priv;
472 } 478 }
473 479
474 void *dev_get_parent_priv(struct udevice *dev) 480 void *dev_get_parent_priv(struct udevice *dev)
475 { 481 {
476 if (!dev) { 482 if (!dev) {
477 dm_warn("%s: null device\n", __func__); 483 dm_warn("%s: null device\n", __func__);
478 return NULL; 484 return NULL;
479 } 485 }
480 486
481 return dev->parent_priv; 487 return dev->parent_priv;
482 } 488 }
483 489
484 static int device_get_device_tail(struct udevice *dev, int ret, 490 static int device_get_device_tail(struct udevice *dev, int ret,
485 struct udevice **devp) 491 struct udevice **devp)
486 { 492 {
487 if (ret) 493 if (ret)
488 return ret; 494 return ret;
489 495
490 ret = device_probe(dev); 496 ret = device_probe(dev);
491 if (ret) 497 if (ret)
492 return ret; 498 return ret;
493 499
494 *devp = dev; 500 *devp = dev;
495 501
496 return 0; 502 return 0;
497 } 503 }
498 504
499 int device_get_child(struct udevice *parent, int index, struct udevice **devp) 505 int device_get_child(struct udevice *parent, int index, struct udevice **devp)
500 { 506 {
501 struct udevice *dev; 507 struct udevice *dev;
502 508
503 list_for_each_entry(dev, &parent->child_head, sibling_node) { 509 list_for_each_entry(dev, &parent->child_head, sibling_node) {
504 if (!index--) 510 if (!index--)
505 return device_get_device_tail(dev, 0, devp); 511 return device_get_device_tail(dev, 0, devp);
506 } 512 }
507 513
508 return -ENODEV; 514 return -ENODEV;
509 } 515 }
510 516
511 int device_find_child_by_seq(struct udevice *parent, int seq_or_req_seq, 517 int device_find_child_by_seq(struct udevice *parent, int seq_or_req_seq,
512 bool find_req_seq, struct udevice **devp) 518 bool find_req_seq, struct udevice **devp)
513 { 519 {
514 struct udevice *dev; 520 struct udevice *dev;
515 521
516 *devp = NULL; 522 *devp = NULL;
517 if (seq_or_req_seq == -1) 523 if (seq_or_req_seq == -1)
518 return -ENODEV; 524 return -ENODEV;
519 525
520 list_for_each_entry(dev, &parent->child_head, sibling_node) { 526 list_for_each_entry(dev, &parent->child_head, sibling_node) {
521 if ((find_req_seq ? dev->req_seq : dev->seq) == 527 if ((find_req_seq ? dev->req_seq : dev->seq) ==
522 seq_or_req_seq) { 528 seq_or_req_seq) {
523 *devp = dev; 529 *devp = dev;
524 return 0; 530 return 0;
525 } 531 }
526 } 532 }
527 533
528 return -ENODEV; 534 return -ENODEV;
529 } 535 }
530 536
531 int device_get_child_by_seq(struct udevice *parent, int seq, 537 int device_get_child_by_seq(struct udevice *parent, int seq,
532 struct udevice **devp) 538 struct udevice **devp)
533 { 539 {
534 struct udevice *dev; 540 struct udevice *dev;
535 int ret; 541 int ret;
536 542
537 *devp = NULL; 543 *devp = NULL;
538 ret = device_find_child_by_seq(parent, seq, false, &dev); 544 ret = device_find_child_by_seq(parent, seq, false, &dev);
539 if (ret == -ENODEV) { 545 if (ret == -ENODEV) {
540 /* 546 /*
541 * We didn't find it in probed devices. See if there is one 547 * We didn't find it in probed devices. See if there is one
542 * that will request this seq if probed. 548 * that will request this seq if probed.
543 */ 549 */
544 ret = device_find_child_by_seq(parent, seq, true, &dev); 550 ret = device_find_child_by_seq(parent, seq, true, &dev);
545 } 551 }
546 return device_get_device_tail(dev, ret, devp); 552 return device_get_device_tail(dev, ret, devp);
547 } 553 }
548 554
549 int device_find_child_by_of_offset(struct udevice *parent, int of_offset, 555 int device_find_child_by_of_offset(struct udevice *parent, int of_offset,
550 struct udevice **devp) 556 struct udevice **devp)
551 { 557 {
552 struct udevice *dev; 558 struct udevice *dev;
553 559
554 *devp = NULL; 560 *devp = NULL;
555 561
556 list_for_each_entry(dev, &parent->child_head, sibling_node) { 562 list_for_each_entry(dev, &parent->child_head, sibling_node) {
557 if (dev_of_offset(dev) == of_offset) { 563 if (dev_of_offset(dev) == of_offset) {
558 *devp = dev; 564 *devp = dev;
559 return 0; 565 return 0;
560 } 566 }
561 } 567 }
562 568
563 return -ENODEV; 569 return -ENODEV;
564 } 570 }
565 571
566 int device_get_child_by_of_offset(struct udevice *parent, int node, 572 int device_get_child_by_of_offset(struct udevice *parent, int node,
567 struct udevice **devp) 573 struct udevice **devp)
568 { 574 {
569 struct udevice *dev; 575 struct udevice *dev;
570 int ret; 576 int ret;
571 577
572 *devp = NULL; 578 *devp = NULL;
573 ret = device_find_child_by_of_offset(parent, node, &dev); 579 ret = device_find_child_by_of_offset(parent, node, &dev);
574 return device_get_device_tail(dev, ret, devp); 580 return device_get_device_tail(dev, ret, devp);
575 } 581 }
576 582
577 static struct udevice *_device_find_global_by_of_offset(struct udevice *parent, 583 static struct udevice *_device_find_global_by_of_offset(struct udevice *parent,
578 int of_offset) 584 int of_offset)
579 { 585 {
580 struct udevice *dev, *found; 586 struct udevice *dev, *found;
581 587
582 if (dev_of_offset(parent) == of_offset) 588 if (dev_of_offset(parent) == of_offset)
583 return parent; 589 return parent;
584 590
585 list_for_each_entry(dev, &parent->child_head, sibling_node) { 591 list_for_each_entry(dev, &parent->child_head, sibling_node) {
586 found = _device_find_global_by_of_offset(dev, of_offset); 592 found = _device_find_global_by_of_offset(dev, of_offset);
587 if (found) 593 if (found)
588 return found; 594 return found;
589 } 595 }
590 596
591 return NULL; 597 return NULL;
592 } 598 }
593 599
594 int device_get_global_by_of_offset(int of_offset, struct udevice **devp) 600 int device_get_global_by_of_offset(int of_offset, struct udevice **devp)
595 { 601 {
596 struct udevice *dev; 602 struct udevice *dev;
597 603
598 dev = _device_find_global_by_of_offset(gd->dm_root, of_offset); 604 dev = _device_find_global_by_of_offset(gd->dm_root, of_offset);
599 return device_get_device_tail(dev, dev ? 0 : -ENOENT, devp); 605 return device_get_device_tail(dev, dev ? 0 : -ENOENT, devp);
600 } 606 }
601 607
602 int device_find_first_child(struct udevice *parent, struct udevice **devp) 608 int device_find_first_child(struct udevice *parent, struct udevice **devp)
603 { 609 {
604 if (list_empty(&parent->child_head)) { 610 if (list_empty(&parent->child_head)) {
605 *devp = NULL; 611 *devp = NULL;
606 } else { 612 } else {
607 *devp = list_first_entry(&parent->child_head, struct udevice, 613 *devp = list_first_entry(&parent->child_head, struct udevice,
608 sibling_node); 614 sibling_node);
609 } 615 }
610 616
611 return 0; 617 return 0;
612 } 618 }
613 619
614 int device_find_next_child(struct udevice **devp) 620 int device_find_next_child(struct udevice **devp)
615 { 621 {
616 struct udevice *dev = *devp; 622 struct udevice *dev = *devp;
617 struct udevice *parent = dev->parent; 623 struct udevice *parent = dev->parent;
618 624
619 if (list_is_last(&dev->sibling_node, &parent->child_head)) { 625 if (list_is_last(&dev->sibling_node, &parent->child_head)) {
620 *devp = NULL; 626 *devp = NULL;
621 } else { 627 } else {
622 *devp = list_entry(dev->sibling_node.next, struct udevice, 628 *devp = list_entry(dev->sibling_node.next, struct udevice,
623 sibling_node); 629 sibling_node);
624 } 630 }
625 631
626 return 0; 632 return 0;
627 } 633 }
628 634
629 struct udevice *dev_get_parent(struct udevice *child) 635 struct udevice *dev_get_parent(struct udevice *child)
630 { 636 {
631 return child->parent; 637 return child->parent;
632 } 638 }
633 639
634 ulong dev_get_driver_data(struct udevice *dev) 640 ulong dev_get_driver_data(struct udevice *dev)
635 { 641 {
636 return dev->driver_data; 642 return dev->driver_data;
637 } 643 }
638 644
639 const void *dev_get_driver_ops(struct udevice *dev) 645 const void *dev_get_driver_ops(struct udevice *dev)
640 { 646 {
641 if (!dev || !dev->driver->ops) 647 if (!dev || !dev->driver->ops)
642 return NULL; 648 return NULL;
643 649
644 return dev->driver->ops; 650 return dev->driver->ops;
645 } 651 }
646 652
647 enum uclass_id device_get_uclass_id(struct udevice *dev) 653 enum uclass_id device_get_uclass_id(struct udevice *dev)
648 { 654 {
649 return dev->uclass->uc_drv->id; 655 return dev->uclass->uc_drv->id;
650 } 656 }
651 657
652 const char *dev_get_uclass_name(struct udevice *dev) 658 const char *dev_get_uclass_name(struct udevice *dev)
653 { 659 {
654 if (!dev) 660 if (!dev)
655 return NULL; 661 return NULL;
656 662
657 return dev->uclass->uc_drv->name; 663 return dev->uclass->uc_drv->name;
658 } 664 }
659 665
660 bool device_has_children(struct udevice *dev) 666 bool device_has_children(struct udevice *dev)
661 { 667 {
662 return !list_empty(&dev->child_head); 668 return !list_empty(&dev->child_head);
663 } 669 }
664 670
665 bool device_has_active_children(struct udevice *dev) 671 bool device_has_active_children(struct udevice *dev)
666 { 672 {
667 struct udevice *child; 673 struct udevice *child;
668 674
669 for (device_find_first_child(dev, &child); 675 for (device_find_first_child(dev, &child);
670 child; 676 child;
671 device_find_next_child(&child)) { 677 device_find_next_child(&child)) {
672 if (device_active(child)) 678 if (device_active(child))
673 return true; 679 return true;
674 } 680 }
675 681
676 return false; 682 return false;
677 } 683 }
678 684
679 bool device_is_last_sibling(struct udevice *dev) 685 bool device_is_last_sibling(struct udevice *dev)
680 { 686 {
681 struct udevice *parent = dev->parent; 687 struct udevice *parent = dev->parent;
682 688
683 if (!parent) 689 if (!parent)
684 return false; 690 return false;
685 return list_is_last(&dev->sibling_node, &parent->child_head); 691 return list_is_last(&dev->sibling_node, &parent->child_head);
686 } 692 }
687 693
688 void device_set_name_alloced(struct udevice *dev) 694 void device_set_name_alloced(struct udevice *dev)
689 { 695 {
690 dev->flags |= DM_FLAG_NAME_ALLOCED; 696 dev->flags |= DM_FLAG_NAME_ALLOCED;
691 } 697 }
692 698
693 int device_set_name(struct udevice *dev, const char *name) 699 int device_set_name(struct udevice *dev, const char *name)
694 { 700 {
695 name = strdup(name); 701 name = strdup(name);
696 if (!name) 702 if (!name)
697 return -ENOMEM; 703 return -ENOMEM;
698 dev->name = name; 704 dev->name = name;
699 device_set_name_alloced(dev); 705 device_set_name_alloced(dev);
700 706
701 return 0; 707 return 0;
702 } 708 }
703 709
704 bool device_is_compatible(struct udevice *dev, const char *compat) 710 bool device_is_compatible(struct udevice *dev, const char *compat)
705 { 711 {
706 const void *fdt = gd->fdt_blob; 712 const void *fdt = gd->fdt_blob;
707 ofnode node = dev_ofnode(dev); 713 ofnode node = dev_ofnode(dev);
708 714
709 if (ofnode_is_np(node)) 715 if (ofnode_is_np(node))
710 return of_device_is_compatible(ofnode_to_np(node), compat, NULL, NULL); 716 return of_device_is_compatible(ofnode_to_np(node), compat, NULL, NULL);
711 else 717 else
712 return !fdt_node_check_compatible(fdt, ofnode_to_offset(node), compat); 718 return !fdt_node_check_compatible(fdt, ofnode_to_offset(node), compat);
713 } 719 }
714 720
715 bool of_machine_is_compatible(const char *compat) 721 bool of_machine_is_compatible(const char *compat)
716 { 722 {
717 const void *fdt = gd->fdt_blob; 723 const void *fdt = gd->fdt_blob;
718 724
719 return !fdt_node_check_compatible(fdt, 0, compat); 725 return !fdt_node_check_compatible(fdt, 0, compat);
720 } 726 }
721 727
1 /* 1 /*
2 * Copyright (c) 2015 Google, Inc 2 * Copyright (c) 2015 Google, Inc
3 * Written by Simon Glass <sjg@chromium.org> 3 * Written by Simon Glass <sjg@chromium.org>
4 * Copyright (c) 2016, NVIDIA CORPORATION. 4 * Copyright (c) 2016, NVIDIA CORPORATION.
5 * 5 *
6 * SPDX-License-Identifier: GPL-2.0+ 6 * SPDX-License-Identifier: GPL-2.0+
7 */ 7 */
8 8
9 #ifndef _CLK_H_ 9 #ifndef _CLK_H_
10 #define _CLK_H_ 10 #define _CLK_H_
11 11
12 #include <linux/errno.h> 12 #include <linux/errno.h>
13 #include <linux/types.h> 13 #include <linux/types.h>
14 14
15 /** 15 /**
16 * A clock is a hardware signal that oscillates autonomously at a specific 16 * A clock is a hardware signal that oscillates autonomously at a specific
17 * frequency and duty cycle. Most hardware modules require one or more clock 17 * frequency and duty cycle. Most hardware modules require one or more clock
18 * signal to drive their operation. Clock signals are typically generated 18 * signal to drive their operation. Clock signals are typically generated
19 * externally to the HW module consuming them, by an entity this API calls a 19 * externally to the HW module consuming them, by an entity this API calls a
20 * clock provider. This API provides a standard means for drivers to enable and 20 * clock provider. This API provides a standard means for drivers to enable and
21 * disable clocks, and to set the rate at which they oscillate. 21 * disable clocks, and to set the rate at which they oscillate.
22 * 22 *
23 * A driver that implements UCLASS_CLOCK is a clock provider. A provider will 23 * A driver that implements UCLASS_CLOCK is a clock provider. A provider will
24 * often implement multiple separate clocks, since the hardware it manages 24 * often implement multiple separate clocks, since the hardware it manages
25 * often has this capability. clock_uclass.h describes the interface which 25 * often has this capability. clock_uclass.h describes the interface which
26 * clock providers must implement. 26 * clock providers must implement.
27 * 27 *
28 * Clock consumers/clients are the HW modules driven by the clock signals. This 28 * Clock consumers/clients are the HW modules driven by the clock signals. This
29 * header file describes the API used by drivers for those HW modules. 29 * header file describes the API used by drivers for those HW modules.
30 */ 30 */
31 31
32 struct udevice; 32 struct udevice;
33 33
34 /** 34 /**
35 * struct clk - A handle to (allowing control of) a single clock. 35 * struct clk - A handle to (allowing control of) a single clock.
36 * 36 *
37 * Clients provide storage for clock handles. The content of the structure is 37 * Clients provide storage for clock handles. The content of the structure is
38 * managed solely by the clock API and clock drivers. A clock struct is 38 * managed solely by the clock API and clock drivers. A clock struct is
39 * initialized by "get"ing the clock struct. The clock struct is passed to all 39 * initialized by "get"ing the clock struct. The clock struct is passed to all
40 * other clock APIs to identify which clock signal to operate upon. 40 * other clock APIs to identify which clock signal to operate upon.
41 * 41 *
42 * @dev: The device which implements the clock signal. 42 * @dev: The device which implements the clock signal.
43 * @id: The clock signal ID within the provider. 43 * @id: The clock signal ID within the provider.
44 * 44 *
45 * Currently, the clock API assumes that a single integer ID is enough to 45 * Currently, the clock API assumes that a single integer ID is enough to
46 * identify and configure any clock signal for any clock provider. If this 46 * identify and configure any clock signal for any clock provider. If this
47 * assumption becomes invalid in the future, the struct could be expanded to 47 * assumption becomes invalid in the future, the struct could be expanded to
48 * either (a) add more fields to allow clock providers to store additional 48 * either (a) add more fields to allow clock providers to store additional
49 * information, or (b) replace the id field with an opaque pointer, which the 49 * information, or (b) replace the id field with an opaque pointer, which the
50 * provider would dynamically allocated during its .of_xlate op, and process 50 * provider would dynamically allocated during its .of_xlate op, and process
51 * during is .request op. This may require the addition of an extra op to clean 51 * during is .request op. This may require the addition of an extra op to clean
52 * up the allocation. 52 * up the allocation.
53 */ 53 */
54 struct clk { 54 struct clk {
55 struct udevice *dev; 55 struct udevice *dev;
56 /* 56 /*
57 * Written by of_xlate. We assume a single id is enough for now. In the 57 * Written by of_xlate. We assume a single id is enough for now. In the
58 * future, we might add more fields here. 58 * future, we might add more fields here.
59 */ 59 */
60 unsigned long id; 60 unsigned long id;
61 }; 61 };
62 62
63 #if CONFIG_IS_ENABLED(OF_CONTROL) && CONFIG_IS_ENABLED(CLK) 63 #if CONFIG_IS_ENABLED(OF_CONTROL) && CONFIG_IS_ENABLED(CLK)
64 struct phandle_1_arg; 64 struct phandle_1_arg;
65 int clk_get_by_index_platdata(struct udevice *dev, int index, 65 int clk_get_by_index_platdata(struct udevice *dev, int index,
66 struct phandle_1_arg *cells, struct clk *clk); 66 struct phandle_1_arg *cells, struct clk *clk);
67 67
68 /** 68 /**
69 * clock_get_by_index - Get/request a clock by integer index. 69 * clock_get_by_index - Get/request a clock by integer index.
70 * 70 *
71 * This looks up and requests a clock. The index is relative to the client 71 * This looks up and requests a clock. The index is relative to the client
72 * device; each device is assumed to have n clocks associated with it somehow, 72 * device; each device is assumed to have n clocks associated with it somehow,
73 * and this function finds and requests one of them. The mapping of client 73 * and this function finds and requests one of them. The mapping of client
74 * device clock indices to provider clocks may be via device-tree properties, 74 * device clock indices to provider clocks may be via device-tree properties,
75 * board-provided mapping tables, or some other mechanism. 75 * board-provided mapping tables, or some other mechanism.
76 * 76 *
77 * @dev: The client device. 77 * @dev: The client device.
78 * @index: The index of the clock to request, within the client's list of 78 * @index: The index of the clock to request, within the client's list of
79 * clocks. 79 * clocks.
80 * @clock A pointer to a clock struct to initialize. 80 * @clock A pointer to a clock struct to initialize.
81 * @return 0 if OK, or a negative error code. 81 * @return 0 if OK, or a negative error code.
82 */ 82 */
83 int clk_get_by_index(struct udevice *dev, int index, struct clk *clk); 83 int clk_get_by_index(struct udevice *dev, int index, struct clk *clk);
84 84
85 /** 85 /**
86 * clock_get_by_name - Get/request a clock by name. 86 * clock_get_by_name - Get/request a clock by name.
87 * 87 *
88 * This looks up and requests a clock. The name is relative to the client 88 * This looks up and requests a clock. The name is relative to the client
89 * device; each device is assumed to have n clocks associated with it somehow, 89 * device; each device is assumed to have n clocks associated with it somehow,
90 * and this function finds and requests one of them. The mapping of client 90 * and this function finds and requests one of them. The mapping of client
91 * device clock names to provider clocks may be via device-tree properties, 91 * device clock names to provider clocks may be via device-tree properties,
92 * board-provided mapping tables, or some other mechanism. 92 * board-provided mapping tables, or some other mechanism.
93 * 93 *
94 * @dev: The client device. 94 * @dev: The client device.
95 * @name: The name of the clock to request, within the client's list of 95 * @name: The name of the clock to request, within the client's list of
96 * clocks. 96 * clocks.
97 * @clock: A pointer to a clock struct to initialize. 97 * @clock: A pointer to a clock struct to initialize.
98 * @return 0 if OK, or a negative error code. 98 * @return 0 if OK, or a negative error code.
99 */ 99 */
100 int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk); 100 int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk);
101 101
102 /** 102 /**
103 * clk_release_all() - Disable (turn off)/Free an array of previously 103 * clk_release_all() - Disable (turn off)/Free an array of previously
104 * requested clocks. 104 * requested clocks.
105 * 105 *
106 * For each clock contained in the clock array, this function will check if 106 * For each clock contained in the clock array, this function will check if
107 * clock has been previously requested and then will disable and free it. 107 * clock has been previously requested and then will disable and free it.
108 * 108 *
109 * @clk: A clock struct array that was previously successfully 109 * @clk: A clock struct array that was previously successfully
110 * requested by clk_request/get_by_*(). 110 * requested by clk_request/get_by_*().
111 * @count Number of clock contained in the array 111 * @count Number of clock contained in the array
112 * @return zero on success, or -ve error code. 112 * @return zero on success, or -ve error code.
113 */ 113 */
114 int clk_release_all(struct clk *clk, int count); 114 int clk_release_all(struct clk *clk, int count);
115 115
116 #else 116 #else
117 static inline int clk_get_by_index(struct udevice *dev, int index, 117 static inline int clk_get_by_index(struct udevice *dev, int index,
118 struct clk *clk) 118 struct clk *clk)
119 { 119 {
120 return -ENOSYS; 120 return -ENOSYS;
121 } 121 }
122 122
123 static inline int clk_get_by_name(struct udevice *dev, const char *name, 123 static inline int clk_get_by_name(struct udevice *dev, const char *name,
124 struct clk *clk) 124 struct clk *clk)
125 { 125 {
126 return -ENOSYS; 126 return -ENOSYS;
127 } 127 }
128 128
129 static inline int clk_release_all(struct clk *clk, int count) 129 static inline int clk_release_all(struct clk *clk, int count)
130 { 130 {
131 return -ENOSYS; 131 return -ENOSYS;
132 } 132 }
133 133
134 #endif 134 #endif
135 135
136 #if (CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)) && \
137 CONFIG_IS_ENABLED(CLK)
138 /**
139 * clk_set_defaults - Process 'assigned-{clocks/clock-parents/clock-rates}'
140 * properties to configure clocks
141 *
142 * @dev: A device to process (the ofnode associated with this device
143 * will be processed).
144 */
145 int clk_set_defaults(struct udevice *dev);
146 #else
147 static inline int clk_set_defaults(struct udevice *dev)
148 {
149 return 0;
150 }
151 #endif
152
136 /** 153 /**
137 * clk_request - Request a clock by provider-specific ID. 154 * clk_request - Request a clock by provider-specific ID.
138 * 155 *
139 * This requests a clock using a provider-specific ID. Generally, this function 156 * This requests a clock using a provider-specific ID. Generally, this function
140 * should not be used, since clk_get_by_index/name() provide an interface that 157 * should not be used, since clk_get_by_index/name() provide an interface that
141 * better separates clients from intimate knowledge of clock providers. 158 * better separates clients from intimate knowledge of clock providers.
142 * However, this function may be useful in core SoC-specific code. 159 * However, this function may be useful in core SoC-specific code.
143 * 160 *
144 * @dev: The clock provider device. 161 * @dev: The clock provider device.
145 * @clock: A pointer to a clock struct to initialize. The caller must 162 * @clock: A pointer to a clock struct to initialize. The caller must
146 * have already initialized any field in this struct which the 163 * have already initialized any field in this struct which the
147 * clock provider uses to identify the clock. 164 * clock provider uses to identify the clock.
148 * @return 0 if OK, or a negative error code. 165 * @return 0 if OK, or a negative error code.
149 */ 166 */
150 int clk_request(struct udevice *dev, struct clk *clk); 167 int clk_request(struct udevice *dev, struct clk *clk);
151 168
152 /** 169 /**
153 * clock_free - Free a previously requested clock. 170 * clock_free - Free a previously requested clock.
154 * 171 *
155 * @clock: A clock struct that was previously successfully requested by 172 * @clock: A clock struct that was previously successfully requested by
156 * clk_request/get_by_*(). 173 * clk_request/get_by_*().
157 * @return 0 if OK, or a negative error code. 174 * @return 0 if OK, or a negative error code.
158 */ 175 */
159 int clk_free(struct clk *clk); 176 int clk_free(struct clk *clk);
160 177
161 /** 178 /**
162 * clk_get_rate() - Get current clock rate. 179 * clk_get_rate() - Get current clock rate.
163 * 180 *
164 * @clk: A clock struct that was previously successfully requested by 181 * @clk: A clock struct that was previously successfully requested by
165 * clk_request/get_by_*(). 182 * clk_request/get_by_*().
166 * @return clock rate in Hz, or -ve error code. 183 * @return clock rate in Hz, or -ve error code.
167 */ 184 */
168 ulong clk_get_rate(struct clk *clk); 185 ulong clk_get_rate(struct clk *clk);
169 186
170 /** 187 /**
171 * clk_set_rate() - Set current clock rate. 188 * clk_set_rate() - Set current clock rate.
172 * 189 *
173 * @clk: A clock struct that was previously successfully requested by 190 * @clk: A clock struct that was previously successfully requested by
174 * clk_request/get_by_*(). 191 * clk_request/get_by_*().
175 * @rate: New clock rate in Hz. 192 * @rate: New clock rate in Hz.
176 * @return new rate, or -ve error code. 193 * @return new rate, or -ve error code.
177 */ 194 */
178 ulong clk_set_rate(struct clk *clk, ulong rate); 195 ulong clk_set_rate(struct clk *clk, ulong rate);
179 196
180 /** 197 /**
181 * clk_set_parent() - Set current clock parent. 198 * clk_set_parent() - Set current clock parent.
182 * 199 *
183 * @clk: A clock struct that was previously successfully requested by 200 * @clk: A clock struct that was previously successfully requested by
184 * clk_request/get_by_*(). 201 * clk_request/get_by_*().
185 * @parent: A clock struct that was previously successfully requested by 202 * @parent: A clock struct that was previously successfully requested by
186 * clk_request/get_by_*(). 203 * clk_request/get_by_*().
187 * @return new rate, or -ve error code. 204 * @return new rate, or -ve error code.
188 */ 205 */
189 int clk_set_parent(struct clk *clk, struct clk *parent); 206 int clk_set_parent(struct clk *clk, struct clk *parent);
190 207
191 /** 208 /**
192 * clk_enable() - Enable (turn on) a clock. 209 * clk_enable() - Enable (turn on) a clock.
193 * 210 *
194 * @clk: A clock struct that was previously successfully requested by 211 * @clk: A clock struct that was previously successfully requested by
195 * clk_request/get_by_*(). 212 * clk_request/get_by_*().
196 * @return zero on success, or -ve error code. 213 * @return zero on success, or -ve error code.
197 */ 214 */
198 int clk_enable(struct clk *clk); 215 int clk_enable(struct clk *clk);
199 216
200 /** 217 /**
201 * clk_disable() - Disable (turn off) a clock. 218 * clk_disable() - Disable (turn off) a clock.
202 * 219 *
203 * @clk: A clock struct that was previously successfully requested by 220 * @clk: A clock struct that was previously successfully requested by
204 * clk_request/get_by_*(). 221 * clk_request/get_by_*().
205 * @return zero on success, or -ve error code. 222 * @return zero on success, or -ve error code.
206 */ 223 */
207 int clk_disable(struct clk *clk); 224 int clk_disable(struct clk *clk);
208 225
209 int soc_clk_dump(void); 226 int soc_clk_dump(void);
210 227
211 #endif 228 #endif
212 229