Blame view
drivers/clk/clk.c
121 KB
ebafb63dc clk: Tag clk core... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
b2476490e clk: introduce th... |
2 3 4 5 |
/* * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> * |
5fb94e9ca docs: Fix some br... |
6 |
* Standard functionality for the common clock API. See Documentation/driver-api/clk.rst |
b2476490e clk: introduce th... |
7 |
*/ |
3c3731173 clk: Include clk.... |
8 |
#include <linux/clk.h> |
b09d6d991 clk: remove clk-p... |
9 |
#include <linux/clk-provider.h> |
86be408bf clk: Support for ... |
10 |
#include <linux/clk/clk-conf.h> |
b2476490e clk: introduce th... |
11 12 13 14 15 16 |
#include <linux/module.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/err.h> #include <linux/list.h> #include <linux/slab.h> |
766e6a4ec clk: add DT clock... |
17 |
#include <linux/of.h> |
46c8773a5 clk: Add devm_clk... |
18 |
#include <linux/device.h> |
f2f6c2556 clk: add common o... |
19 |
#include <linux/init.h> |
9a34b4539 clk: Add support ... |
20 |
#include <linux/pm_runtime.h> |
533ddeb1e clk: allow reentr... |
21 |
#include <linux/sched.h> |
562ef0b09 clk: Silence spar... |
22 |
#include <linux/clkdev.h> |
b2476490e clk: introduce th... |
23 |
|
d6782c263 clk: Provide not ... |
24 |
#include "clk.h" |
b2476490e clk: introduce th... |
25 26 |
static DEFINE_SPINLOCK(enable_lock); static DEFINE_MUTEX(prepare_lock); |
533ddeb1e clk: allow reentr... |
27 28 29 30 31 |
static struct task_struct *prepare_owner; static struct task_struct *enable_owner; static int prepare_refcnt; static int enable_refcnt; |
b2476490e clk: introduce th... |
32 33 34 |
static HLIST_HEAD(clk_root_list); static HLIST_HEAD(clk_orphan_list); static LIST_HEAD(clk_notifier_list); |
bdcf1dc25 clk: Evict unregi... |
35 36 37 38 39 |
static struct hlist_head *all_lists[] = { &clk_root_list, &clk_orphan_list, NULL, }; |
b09d6d991 clk: remove clk-p... |
40 |
/*** private data structures ***/ |
fc0c209c1 clk: Allow parent... |
41 42 43 44 45 |
struct clk_parent_map { const struct clk_hw *hw; struct clk_core *core; const char *fw_name; const char *name; |
601b6e933 clk: Allow parent... |
46 |
int index; |
fc0c209c1 clk: Allow parent... |
47 |
}; |
b09d6d991 clk: remove clk-p... |
48 49 50 51 52 |
struct clk_core { const char *name; const struct clk_ops *ops; struct clk_hw *hw; struct module *owner; |
9a34b4539 clk: Add support ... |
53 |
struct device *dev; |
89a5ddcc7 clk: Add of_clk_h... |
54 |
struct device_node *of_node; |
b09d6d991 clk: remove clk-p... |
55 |
struct clk_core *parent; |
fc0c209c1 clk: Allow parent... |
56 |
struct clk_parent_map *parents; |
b09d6d991 clk: remove clk-p... |
57 58 59 |
u8 num_parents; u8 new_parent_index; unsigned long rate; |
1c8e60044 clk: Add rate con... |
60 |
unsigned long req_rate; |
b09d6d991 clk: remove clk-p... |
61 62 63 64 |
unsigned long new_rate; struct clk_core *new_parent; struct clk_core *new_child; unsigned long flags; |
e6500344e clk: track the or... |
65 |
bool orphan; |
244788393 clk: core: clarif... |
66 |
bool rpm_enabled; |
b09d6d991 clk: remove clk-p... |
67 68 |
unsigned int enable_count; unsigned int prepare_count; |
e55a839a7 clk: add clock pr... |
69 |
unsigned int protect_count; |
9783c0d98 clk: Allow provid... |
70 71 |
unsigned long min_rate; unsigned long max_rate; |
b09d6d991 clk: remove clk-p... |
72 73 |
unsigned long accuracy; int phase; |
9fba738a5 clk: add duty cyc... |
74 |
struct clk_duty duty; |
b09d6d991 clk: remove clk-p... |
75 76 |
struct hlist_head children; struct hlist_node child_node; |
1c8e60044 clk: Add rate con... |
77 |
struct hlist_head clks; |
b09d6d991 clk: remove clk-p... |
78 79 80 |
unsigned int notifier_count; #ifdef CONFIG_DEBUG_FS struct dentry *dentry; |
8c9a8a8f7 clk: Move debug_n... |
81 |
struct hlist_node debug_node; |
b09d6d991 clk: remove clk-p... |
82 83 84 |
#endif struct kref ref; }; |
dfc202ead clk: Add tracepoi... |
85 86 |
#define CREATE_TRACE_POINTS #include <trace/events/clk.h> |
b09d6d991 clk: remove clk-p... |
87 88 |
struct clk { struct clk_core *core; |
efa850487 clk: Inform the c... |
89 |
struct device *dev; |
b09d6d991 clk: remove clk-p... |
90 91 |
const char *dev_id; const char *con_id; |
1c8e60044 clk: Add rate con... |
92 93 |
unsigned long min_rate; unsigned long max_rate; |
55e9b8b7b clk: add clk_rate... |
94 |
unsigned int exclusive_count; |
50595f8b9 clk: Rename child... |
95 |
struct hlist_node clks_node; |
b09d6d991 clk: remove clk-p... |
96 |
}; |
9a34b4539 clk: Add support ... |
97 98 99 |
/*** runtime pm ***/ static int clk_pm_runtime_get(struct clk_core *core) { |
244788393 clk: core: clarif... |
100 |
int ret; |
9a34b4539 clk: Add support ... |
101 |
|
244788393 clk: core: clarif... |
102 |
if (!core->rpm_enabled) |
9a34b4539 clk: Add support ... |
103 104 105 106 107 108 109 110 |
return 0; ret = pm_runtime_get_sync(core->dev); return ret < 0 ? ret : 0; } static void clk_pm_runtime_put(struct clk_core *core) { |
244788393 clk: core: clarif... |
111 |
if (!core->rpm_enabled) |
9a34b4539 clk: Add support ... |
112 113 114 115 |
return; pm_runtime_put_sync(core->dev); } |
eab89f690 clk: abstract loc... |
116 117 118 |
/*** locking ***/ static void clk_prepare_lock(void) { |
533ddeb1e clk: allow reentr... |
119 120 121 122 123 124 125 126 127 128 129 |
if (!mutex_trylock(&prepare_lock)) { if (prepare_owner == current) { prepare_refcnt++; return; } mutex_lock(&prepare_lock); } WARN_ON_ONCE(prepare_owner != NULL); WARN_ON_ONCE(prepare_refcnt != 0); prepare_owner = current; prepare_refcnt = 1; |
eab89f690 clk: abstract loc... |
130 131 132 133 |
} static void clk_prepare_unlock(void) { |
533ddeb1e clk: allow reentr... |
134 135 136 137 138 139 |
WARN_ON_ONCE(prepare_owner != current); WARN_ON_ONCE(prepare_refcnt == 0); if (--prepare_refcnt) return; prepare_owner = NULL; |
eab89f690 clk: abstract loc... |
140 141 142 143 |
mutex_unlock(&prepare_lock); } static unsigned long clk_enable_lock(void) |
a57aa1853 clk: Silence warn... |
144 |
__acquires(enable_lock) |
eab89f690 clk: abstract loc... |
145 146 |
{ unsigned long flags; |
533ddeb1e clk: allow reentr... |
147 |
|
a12aa8a68 clk: fix reentran... |
148 149 150 151 152 153 154 |
/* * On UP systems, spin_trylock_irqsave() always returns true, even if * we already hold the lock. So, in that case, we rely only on * reference counting. */ if (!IS_ENABLED(CONFIG_SMP) || !spin_trylock_irqsave(&enable_lock, flags)) { |
533ddeb1e clk: allow reentr... |
155 156 |
if (enable_owner == current) { enable_refcnt++; |
a57aa1853 clk: Silence warn... |
157 |
__acquire(enable_lock); |
a12aa8a68 clk: fix reentran... |
158 159 |
if (!IS_ENABLED(CONFIG_SMP)) local_save_flags(flags); |
533ddeb1e clk: allow reentr... |
160 161 162 163 164 165 166 167 |
return flags; } spin_lock_irqsave(&enable_lock, flags); } WARN_ON_ONCE(enable_owner != NULL); WARN_ON_ONCE(enable_refcnt != 0); enable_owner = current; enable_refcnt = 1; |
eab89f690 clk: abstract loc... |
168 169 170 171 |
return flags; } static void clk_enable_unlock(unsigned long flags) |
a57aa1853 clk: Silence warn... |
172 |
__releases(enable_lock) |
eab89f690 clk: abstract loc... |
173 |
{ |
533ddeb1e clk: allow reentr... |
174 175 |
WARN_ON_ONCE(enable_owner != current); WARN_ON_ONCE(enable_refcnt == 0); |
a57aa1853 clk: Silence warn... |
176 177 |
if (--enable_refcnt) { __release(enable_lock); |
533ddeb1e clk: allow reentr... |
178 |
return; |
a57aa1853 clk: Silence warn... |
179 |
} |
533ddeb1e clk: allow reentr... |
180 |
enable_owner = NULL; |
eab89f690 clk: abstract loc... |
181 182 |
spin_unlock_irqrestore(&enable_lock, flags); } |
e55a839a7 clk: add clock pr... |
183 184 185 186 |
static bool clk_core_rate_is_protected(struct clk_core *core) { return core->protect_count; } |
4dff95dc9 clk: Remove forwa... |
187 188 |
static bool clk_core_is_prepared(struct clk_core *core) { |
9a34b4539 clk: Add support ... |
189 |
bool ret = false; |
4dff95dc9 clk: Remove forwa... |
190 191 192 193 194 195 |
/* * .is_prepared is optional for clocks that can prepare * fall back to software usage counter if it is missing */ if (!core->ops->is_prepared) return core->prepare_count; |
b2476490e clk: introduce th... |
196 |
|
9a34b4539 clk: Add support ... |
197 198 199 200 201 202 |
if (!clk_pm_runtime_get(core)) { ret = core->ops->is_prepared(core->hw); clk_pm_runtime_put(core); } return ret; |
4dff95dc9 clk: Remove forwa... |
203 |
} |
b2476490e clk: introduce th... |
204 |
|
4dff95dc9 clk: Remove forwa... |
205 206 |
static bool clk_core_is_enabled(struct clk_core *core) { |
9a34b4539 clk: Add support ... |
207 |
bool ret = false; |
4dff95dc9 clk: Remove forwa... |
208 209 210 211 212 213 |
/* * .is_enabled is only mandatory for clocks that gate * fall back to software usage counter if .is_enabled is missing */ if (!core->ops->is_enabled) return core->enable_count; |
6b44c854b clk: Fix build wa... |
214 |
|
9a34b4539 clk: Add support ... |
215 216 217 218 219 220 221 222 223 224 |
/* * Check if clock controller's device is runtime active before * calling .is_enabled callback. If not, assume that clock is * disabled, because we might be called from atomic context, from * which pm_runtime_get() is not allowed. * This function is called mainly from clk_disable_unused_subtree, * which ensures proper runtime pm activation of controller before * taking enable spinlock, but the below check is needed if one tries * to call it from other places. */ |
244788393 clk: core: clarif... |
225 |
if (core->rpm_enabled) { |
9a34b4539 clk: Add support ... |
226 227 228 229 230 231 232 233 234 |
pm_runtime_get_noresume(core->dev); if (!pm_runtime_active(core->dev)) { ret = false; goto done; } } ret = core->ops->is_enabled(core->hw); done: |
244788393 clk: core: clarif... |
235 |
if (core->rpm_enabled) |
756efe131 clk: use atomic r... |
236 |
pm_runtime_put(core->dev); |
9a34b4539 clk: Add support ... |
237 238 |
return ret; |
4dff95dc9 clk: Remove forwa... |
239 |
} |
6b44c854b clk: Fix build wa... |
240 |
|
4dff95dc9 clk: Remove forwa... |
241 |
/*** helper functions ***/ |
1af599df6 clk: human-readab... |
242 |
|
b76281cb9 clk: Make clk inp... |
243 |
const char *__clk_get_name(const struct clk *clk) |
1af599df6 clk: human-readab... |
244 |
{ |
4dff95dc9 clk: Remove forwa... |
245 |
return !clk ? NULL : clk->core->name; |
1af599df6 clk: human-readab... |
246 |
} |
4dff95dc9 clk: Remove forwa... |
247 |
EXPORT_SYMBOL_GPL(__clk_get_name); |
1af599df6 clk: human-readab... |
248 |
|
e7df6f6e2 clk: Constify clk... |
249 |
const char *clk_hw_get_name(const struct clk_hw *hw) |
1a9c069cb clk: Add clk_hw_*... |
250 251 252 253 |
{ return hw->core->name; } EXPORT_SYMBOL_GPL(clk_hw_get_name); |
4dff95dc9 clk: Remove forwa... |
254 255 256 257 258 |
struct clk_hw *__clk_get_hw(struct clk *clk) { return !clk ? NULL : clk->core->hw; } EXPORT_SYMBOL_GPL(__clk_get_hw); |
1af599df6 clk: human-readab... |
259 |
|
e7df6f6e2 clk: Constify clk... |
260 |
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) |
1a9c069cb clk: Add clk_hw_*... |
261 262 263 264 |
{ return hw->core->num_parents; } EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); |
e7df6f6e2 clk: Constify clk... |
265 |
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) |
1a9c069cb clk: Add clk_hw_*... |
266 267 268 269 |
{ return hw->core->parent ? hw->core->parent->hw : NULL; } EXPORT_SYMBOL_GPL(clk_hw_get_parent); |
4dff95dc9 clk: Remove forwa... |
270 271 |
static struct clk_core *__clk_lookup_subtree(const char *name, struct clk_core *core) |
bddca8944 clk: JSON debugfs... |
272 |
{ |
035a61c31 clk: Make clk API... |
273 |
struct clk_core *child; |
4dff95dc9 clk: Remove forwa... |
274 |
struct clk_core *ret; |
bddca8944 clk: JSON debugfs... |
275 |
|
4dff95dc9 clk: Remove forwa... |
276 277 |
if (!strcmp(core->name, name)) return core; |
bddca8944 clk: JSON debugfs... |
278 |
|
4dff95dc9 clk: Remove forwa... |
279 280 281 282 |
hlist_for_each_entry(child, &core->children, child_node) { ret = __clk_lookup_subtree(name, child); if (ret) return ret; |
bddca8944 clk: JSON debugfs... |
283 |
} |
4dff95dc9 clk: Remove forwa... |
284 |
return NULL; |
bddca8944 clk: JSON debugfs... |
285 |
} |
4dff95dc9 clk: Remove forwa... |
286 |
static struct clk_core *clk_core_lookup(const char *name) |
bddca8944 clk: JSON debugfs... |
287 |
{ |
4dff95dc9 clk: Remove forwa... |
288 289 |
struct clk_core *root_clk; struct clk_core *ret; |
bddca8944 clk: JSON debugfs... |
290 |
|
4dff95dc9 clk: Remove forwa... |
291 292 |
if (!name) return NULL; |
bddca8944 clk: JSON debugfs... |
293 |
|
4dff95dc9 clk: Remove forwa... |
294 295 296 297 298 |
/* search the 'proper' clk tree first */ hlist_for_each_entry(root_clk, &clk_root_list, child_node) { ret = __clk_lookup_subtree(name, root_clk); if (ret) return ret; |
bddca8944 clk: JSON debugfs... |
299 |
} |
4dff95dc9 clk: Remove forwa... |
300 301 302 303 304 305 |
/* if not found, then search the orphan tree */ hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { ret = __clk_lookup_subtree(name, root_clk); if (ret) return ret; } |
bddca8944 clk: JSON debugfs... |
306 |
|
4dff95dc9 clk: Remove forwa... |
307 |
return NULL; |
bddca8944 clk: JSON debugfs... |
308 |
} |
4f8c6aba3 clk: Fix falling ... |
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 |
#ifdef CONFIG_OF static int of_parse_clkspec(const struct device_node *np, int index, const char *name, struct of_phandle_args *out_args); static struct clk_hw * of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); #else static inline int of_parse_clkspec(const struct device_node *np, int index, const char *name, struct of_phandle_args *out_args) { return -ENOENT; } static inline struct clk_hw * of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) { return ERR_PTR(-ENOENT); } #endif |
fc0c209c1 clk: Allow parent... |
327 |
/** |
dde4eff47 clk: Look for par... |
328 |
* clk_core_get - Find the clk_core parent of a clk |
fc0c209c1 clk: Allow parent... |
329 |
* @core: clk to find parent of |
1a079560b clk: Cache core i... |
330 |
* @p_index: parent index to search for |
fc0c209c1 clk: Allow parent... |
331 332 333 334 |
* * This is the preferred method for clk providers to find the parent of a * clk when that parent is external to the clk controller. The parent_names * array is indexed and treated as a local name matching a string in the device |
dde4eff47 clk: Look for par... |
335 336 |
* node's 'clock-names' property or as the 'con_id' matching the device's * dev_name() in a clk_lookup. This allows clk providers to use their own |
fc0c209c1 clk: Allow parent... |
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 |
* namespace instead of looking for a globally unique parent string. * * For example the following DT snippet would allow a clock registered by the * clock-controller@c001 that has a clk_init_data::parent_data array * with 'xtal' in the 'name' member to find the clock provided by the * clock-controller@f00abcd without needing to get the globally unique name of * the xtal clk. * * parent: clock-controller@f00abcd { * reg = <0xf00abcd 0xabcd>; * #clock-cells = <0>; * }; * * clock-controller@c001 { * reg = <0xc001 0xf00d>; * clocks = <&parent>; * clock-names = "xtal"; * #clock-cells = <1>; * }; * * Returns: -ENOENT when the provider can't be found or the clk doesn't |
4f8c6aba3 clk: Fix falling ... |
358 359 360 |
* exist in the provider or the name can't be found in the DT node or * in a clkdev lookup. NULL when the provider knows about the clk but it * isn't provided on this system. |
fc0c209c1 clk: Allow parent... |
361 362 |
* A valid clk_core pointer when the clk can be found in the provider. */ |
1a079560b clk: Cache core i... |
363 |
static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) |
fc0c209c1 clk: Allow parent... |
364 |
{ |
1a079560b clk: Cache core i... |
365 366 |
const char *name = core->parents[p_index].fw_name; int index = core->parents[p_index].index; |
dde4eff47 clk: Look for par... |
367 368 369 |
struct clk_hw *hw = ERR_PTR(-ENOENT); struct device *dev = core->dev; const char *dev_id = dev ? dev_name(dev) : NULL; |
fc0c209c1 clk: Allow parent... |
370 |
struct device_node *np = core->of_node; |
4f8c6aba3 clk: Fix falling ... |
371 |
struct of_phandle_args clkspec; |
fc0c209c1 clk: Allow parent... |
372 |
|
4f8c6aba3 clk: Fix falling ... |
373 374 375 376 377 378 379 380 381 |
if (np && (name || index >= 0) && !of_parse_clkspec(np, index, name, &clkspec)) { hw = of_clk_get_hw_from_clkspec(&clkspec); of_node_put(clkspec.np); } else if (name) { /* * If the DT search above couldn't find the provider fallback to * looking up via clkdev based clk_lookups. */ |
dde4eff47 clk: Look for par... |
382 |
hw = clk_find_hw(dev_id, name); |
4f8c6aba3 clk: Fix falling ... |
383 |
} |
dde4eff47 clk: Look for par... |
384 385 |
if (IS_ERR(hw)) |
fc0c209c1 clk: Allow parent... |
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 |
return ERR_CAST(hw); return hw->core; } static void clk_core_fill_parent_index(struct clk_core *core, u8 index) { struct clk_parent_map *entry = &core->parents[index]; struct clk_core *parent = ERR_PTR(-ENOENT); if (entry->hw) { parent = entry->hw->core; /* * We have a direct reference but it isn't registered yet? * Orphan it and let clk_reparent() update the orphan status * when the parent is registered. */ if (!parent) parent = ERR_PTR(-EPROBE_DEFER); } else { |
1a079560b clk: Cache core i... |
406 |
parent = clk_core_get(core, index); |
4f8c6aba3 clk: Fix falling ... |
407 |
if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name) |
fc0c209c1 clk: Allow parent... |
408 409 410 411 412 413 414 |
parent = clk_core_lookup(entry->name); } /* Only cache it if it's not an error */ if (!IS_ERR(parent)) entry->core = parent; } |
4dff95dc9 clk: Remove forwa... |
415 416 |
static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, u8 index) |
bddca8944 clk: JSON debugfs... |
417 |
{ |
fc0c209c1 clk: Allow parent... |
418 |
if (!core || index >= core->num_parents || !core->parents) |
4dff95dc9 clk: Remove forwa... |
419 |
return NULL; |
88cfbef2a clk: simplify clk... |
420 |
|
fc0c209c1 clk: Allow parent... |
421 422 |
if (!core->parents[index].core) clk_core_fill_parent_index(core, index); |
88cfbef2a clk: simplify clk... |
423 |
|
fc0c209c1 clk: Allow parent... |
424 |
return core->parents[index].core; |
bddca8944 clk: JSON debugfs... |
425 |
} |
e7df6f6e2 clk: Constify clk... |
426 427 |
struct clk_hw * clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) |
1a9c069cb clk: Add clk_hw_*... |
428 429 430 431 432 433 434 435 |
{ struct clk_core *parent; parent = clk_core_get_parent_by_index(hw->core, index); return !parent ? NULL : parent->hw; } EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); |
4dff95dc9 clk: Remove forwa... |
436 437 438 439 |
unsigned int __clk_get_enable_count(struct clk *clk) { return !clk ? 0 : clk->core->enable_count; } |
b2476490e clk: introduce th... |
440 |
|
4dff95dc9 clk: Remove forwa... |
441 442 |
static unsigned long clk_core_get_rate_nolock(struct clk_core *core) { |
73d4f945f clk: Document and... |
443 444 |
if (!core) return 0; |
c646cbf10 clk: support hard... |
445 |
|
73d4f945f clk: Document and... |
446 447 |
if (!core->num_parents || core->parent) return core->rate; |
b2476490e clk: introduce th... |
448 |
|
73d4f945f clk: Document and... |
449 450 451 452 453 454 |
/* * Clk must have a parent because num_parents > 0 but the parent isn't * known yet. Best to return 0 as the rate of this clk until we can * properly recalc the rate based on the parent's rate. */ return 0; |
b2476490e clk: introduce th... |
455 |
} |
e7df6f6e2 clk: Constify clk... |
456 |
unsigned long clk_hw_get_rate(const struct clk_hw *hw) |
1a9c069cb clk: Add clk_hw_*... |
457 458 459 460 |
{ return clk_core_get_rate_nolock(hw->core); } EXPORT_SYMBOL_GPL(clk_hw_get_rate); |
4dff95dc9 clk: Remove forwa... |
461 462 463 464 |
static unsigned long __clk_get_accuracy(struct clk_core *core) { if (!core) return 0; |
b2476490e clk: introduce th... |
465 |
|
4dff95dc9 clk: Remove forwa... |
466 |
return core->accuracy; |
b2476490e clk: introduce th... |
467 |
} |
4dff95dc9 clk: Remove forwa... |
468 |
unsigned long __clk_get_flags(struct clk *clk) |
fcb0ee6a3 clk: Implement cl... |
469 |
{ |
4dff95dc9 clk: Remove forwa... |
470 |
return !clk ? 0 : clk->core->flags; |
fcb0ee6a3 clk: Implement cl... |
471 |
} |
4dff95dc9 clk: Remove forwa... |
472 |
EXPORT_SYMBOL_GPL(__clk_get_flags); |
fcb0ee6a3 clk: Implement cl... |
473 |
|
e7df6f6e2 clk: Constify clk... |
474 |
unsigned long clk_hw_get_flags(const struct clk_hw *hw) |
1a9c069cb clk: Add clk_hw_*... |
475 476 477 478 |
{ return hw->core->flags; } EXPORT_SYMBOL_GPL(clk_hw_get_flags); |
e7df6f6e2 clk: Constify clk... |
479 |
bool clk_hw_is_prepared(const struct clk_hw *hw) |
1a9c069cb clk: Add clk_hw_*... |
480 481 482 |
{ return clk_core_is_prepared(hw->core); } |
12aa377bf clk: export some ... |
483 |
EXPORT_SYMBOL_GPL(clk_hw_is_prepared); |
1a9c069cb clk: Add clk_hw_*... |
484 |
|
e55a839a7 clk: add clock pr... |
485 486 487 488 |
bool clk_hw_rate_is_protected(const struct clk_hw *hw) { return clk_core_rate_is_protected(hw->core); } |
12aa377bf clk: export some ... |
489 |
EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); |
e55a839a7 clk: add clock pr... |
490 |
|
be68bf883 clk: Add clk_hw_i... |
491 492 493 494 |
bool clk_hw_is_enabled(const struct clk_hw *hw) { return clk_core_is_enabled(hw->core); } |
12aa377bf clk: export some ... |
495 |
EXPORT_SYMBOL_GPL(clk_hw_is_enabled); |
be68bf883 clk: Add clk_hw_i... |
496 |
|
4dff95dc9 clk: Remove forwa... |
497 |
bool __clk_is_enabled(struct clk *clk) |
b2476490e clk: introduce th... |
498 |
{ |
4dff95dc9 clk: Remove forwa... |
499 500 |
if (!clk) return false; |
b2476490e clk: introduce th... |
501 |
|
4dff95dc9 clk: Remove forwa... |
502 503 504 |
return clk_core_is_enabled(clk->core); } EXPORT_SYMBOL_GPL(__clk_is_enabled); |
b2476490e clk: introduce th... |
505 |
|
4dff95dc9 clk: Remove forwa... |
506 507 508 509 510 |
static bool mux_is_better_rate(unsigned long rate, unsigned long now, unsigned long best, unsigned long flags) { if (flags & CLK_MUX_ROUND_CLOSEST) return abs(now - rate) < abs(best - rate); |
1af599df6 clk: human-readab... |
511 |
|
4dff95dc9 clk: Remove forwa... |
512 513 |
return now <= rate && now > best; } |
bddca8944 clk: JSON debugfs... |
514 |
|
4ad69b80e clk: honor CLK_MU... |
515 516 517 |
int clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, unsigned long flags) |
4dff95dc9 clk: Remove forwa... |
518 519 |
{ struct clk_core *core = hw->core, *parent, *best_parent = NULL; |
0817b62cc clk: change clk_o... |
520 521 522 |
int i, num_parents, ret; unsigned long best = 0; struct clk_rate_request parent_req = *req; |
b2476490e clk: introduce th... |
523 |
|
4dff95dc9 clk: Remove forwa... |
524 525 526 |
/* if NO_REPARENT flag set, pass through to current parent */ if (core->flags & CLK_SET_RATE_NO_REPARENT) { parent = core->parent; |
0817b62cc clk: change clk_o... |
527 528 529 530 531 532 533 534 |
if (core->flags & CLK_SET_RATE_PARENT) { ret = __clk_determine_rate(parent ? parent->hw : NULL, &parent_req); if (ret) return ret; best = parent_req.rate; } else if (parent) { |
4dff95dc9 clk: Remove forwa... |
535 |
best = clk_core_get_rate_nolock(parent); |
0817b62cc clk: change clk_o... |
536 |
} else { |
4dff95dc9 clk: Remove forwa... |
537 |
best = clk_core_get_rate_nolock(core); |
0817b62cc clk: change clk_o... |
538 |
} |
4dff95dc9 clk: Remove forwa... |
539 540 |
goto out; } |
b2476490e clk: introduce th... |
541 |
|
4dff95dc9 clk: Remove forwa... |
542 543 544 545 546 547 |
/* find the parent that can provide the fastest rate <= rate */ num_parents = core->num_parents; for (i = 0; i < num_parents; i++) { parent = clk_core_get_parent_by_index(core, i); if (!parent) continue; |
0817b62cc clk: change clk_o... |
548 549 550 551 552 553 554 555 556 557 558 559 |
if (core->flags & CLK_SET_RATE_PARENT) { parent_req = *req; ret = __clk_determine_rate(parent->hw, &parent_req); if (ret) continue; } else { parent_req.rate = clk_core_get_rate_nolock(parent); } if (mux_is_better_rate(req->rate, parent_req.rate, best, flags)) { |
4dff95dc9 clk: Remove forwa... |
560 |
best_parent = parent; |
0817b62cc clk: change clk_o... |
561 |
best = parent_req.rate; |
4dff95dc9 clk: Remove forwa... |
562 563 |
} } |
b2476490e clk: introduce th... |
564 |
|
57d866e60 clk: fix some det... |
565 566 |
if (!best_parent) return -EINVAL; |
4dff95dc9 clk: Remove forwa... |
567 568 |
out: if (best_parent) |
0817b62cc clk: change clk_o... |
569 570 571 |
req->best_parent_hw = best_parent->hw; req->best_parent_rate = best; req->rate = best; |
b2476490e clk: introduce th... |
572 |
|
0817b62cc clk: change clk_o... |
573 |
return 0; |
b33d212f4 clk: Restructure ... |
574 |
} |
4ad69b80e clk: honor CLK_MU... |
575 |
EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); |
4dff95dc9 clk: Remove forwa... |
576 577 |
struct clk *__clk_lookup(const char *name) |
fcb0ee6a3 clk: Implement cl... |
578 |
{ |
4dff95dc9 clk: Remove forwa... |
579 580 581 |
struct clk_core *core = clk_core_lookup(name); return !core ? NULL : core->hw->clk; |
fcb0ee6a3 clk: Implement cl... |
582 |
} |
b2476490e clk: introduce th... |
583 |
|
4dff95dc9 clk: Remove forwa... |
584 585 586 |
static void clk_core_get_boundaries(struct clk_core *core, unsigned long *min_rate, unsigned long *max_rate) |
1c155b3df clk: Unprepare th... |
587 |
{ |
4dff95dc9 clk: Remove forwa... |
588 |
struct clk *clk_user; |
1c155b3df clk: Unprepare th... |
589 |
|
9f7767226 clk: Assert prepa... |
590 |
lockdep_assert_held(&prepare_lock); |
9783c0d98 clk: Allow provid... |
591 592 |
*min_rate = core->min_rate; *max_rate = core->max_rate; |
496eadf82 clk: Use lockdep ... |
593 |
|
4dff95dc9 clk: Remove forwa... |
594 595 |
hlist_for_each_entry(clk_user, &core->clks, clks_node) *min_rate = max(*min_rate, clk_user->min_rate); |
1c155b3df clk: Unprepare th... |
596 |
|
4dff95dc9 clk: Remove forwa... |
597 598 599 |
hlist_for_each_entry(clk_user, &core->clks, clks_node) *max_rate = min(*max_rate, clk_user->max_rate); } |
1c155b3df clk: Unprepare th... |
600 |
|
9783c0d98 clk: Allow provid... |
601 602 603 604 605 606 607 |
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, unsigned long max_rate) { hw->core->min_rate = min_rate; hw->core->max_rate = max_rate; } EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); |
4dff95dc9 clk: Remove forwa... |
608 |
/* |
777c1a40a clk: Document __c... |
609 610 611 612 |
* __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk * @hw: mux type clk to determine rate on * @req: rate request, also used to return preferred parent and frequencies * |
4dff95dc9 clk: Remove forwa... |
613 614 615 |
* Helper for finding best parent to provide a given frequency. This can be used * directly as a determine_rate callback (e.g. for a mux), or from a more * complex clock that may combine a mux with other operations. |
777c1a40a clk: Document __c... |
616 617 |
* * Returns: 0 on success, -EERROR value on error |
4dff95dc9 clk: Remove forwa... |
618 |
*/ |
0817b62cc clk: change clk_o... |
619 620 |
int __clk_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) |
4dff95dc9 clk: Remove forwa... |
621 |
{ |
0817b62cc clk: change clk_o... |
622 |
return clk_mux_determine_rate_flags(hw, req, 0); |
1c155b3df clk: Unprepare th... |
623 |
} |
4dff95dc9 clk: Remove forwa... |
624 |
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); |
1c155b3df clk: Unprepare th... |
625 |
|
0817b62cc clk: change clk_o... |
626 627 |
int __clk_mux_determine_rate_closest(struct clk_hw *hw, struct clk_rate_request *req) |
b2476490e clk: introduce th... |
628 |
{ |
0817b62cc clk: change clk_o... |
629 |
return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); |
4dff95dc9 clk: Remove forwa... |
630 631 |
} EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); |
b2476490e clk: introduce th... |
632 |
|
4dff95dc9 clk: Remove forwa... |
633 |
/*** clk api ***/ |
496eadf82 clk: Use lockdep ... |
634 |
|
e55a839a7 clk: add clock pr... |
635 636 637 638 639 640 |
static void clk_core_rate_unprotect(struct clk_core *core) { lockdep_assert_held(&prepare_lock); if (!core) return; |
ab525dccc clk: Print the cl... |
641 642 643 |
if (WARN(core->protect_count == 0, "%s already unprotected ", core->name)) |
e55a839a7 clk: add clock pr... |
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 |
return; if (--core->protect_count > 0) return; clk_core_rate_unprotect(core->parent); } static int clk_core_rate_nuke_protect(struct clk_core *core) { int ret; lockdep_assert_held(&prepare_lock); if (!core) return -EINVAL; if (core->protect_count == 0) return 0; ret = core->protect_count; core->protect_count = 1; clk_core_rate_unprotect(core); return ret; } |
55e9b8b7b clk: add clk_rate... |
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 |
/** * clk_rate_exclusive_put - release exclusivity over clock rate control * @clk: the clk over which the exclusivity is released * * clk_rate_exclusive_put() completes a critical section during which a clock * consumer cannot tolerate any other consumer making any operation on the * clock which could result in a rate change or rate glitch. Exclusive clocks * cannot have their rate changed, either directly or indirectly due to changes * further up the parent chain of clocks. As a result, clocks up parent chain * also get under exclusive control of the calling consumer. * * If exlusivity is claimed more than once on clock, even by the same consumer, * the rate effectively gets locked as exclusivity can't be preempted. * * Calls to clk_rate_exclusive_put() must be balanced with calls to * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return * error status. */ void clk_rate_exclusive_put(struct clk *clk) { if (!clk) return; clk_prepare_lock(); /* * if there is something wrong with this consumer protect count, stop * here before messing with the provider */ if (WARN_ON(clk->exclusive_count <= 0)) goto out; clk_core_rate_unprotect(clk->core); clk->exclusive_count--; out: clk_prepare_unlock(); } EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); |
e55a839a7 clk: add clock pr... |
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 |
static void clk_core_rate_protect(struct clk_core *core) { lockdep_assert_held(&prepare_lock); if (!core) return; if (core->protect_count == 0) clk_core_rate_protect(core->parent); core->protect_count++; } static void clk_core_rate_restore_protect(struct clk_core *core, int count) { lockdep_assert_held(&prepare_lock); if (!core) return; if (count == 0) return; clk_core_rate_protect(core); core->protect_count = count; } |
55e9b8b7b clk: add clk_rate... |
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 |
/** * clk_rate_exclusive_get - get exclusivity over the clk rate control * @clk: the clk over which the exclusity of rate control is requested * * clk_rate_exlusive_get() begins a critical section during which a clock * consumer cannot tolerate any other consumer making any operation on the * clock which could result in a rate change or rate glitch. Exclusive clocks * cannot have their rate changed, either directly or indirectly due to changes * further up the parent chain of clocks. As a result, clocks up parent chain * also get under exclusive control of the calling consumer. * * If exlusivity is claimed more than once on clock, even by the same consumer, * the rate effectively gets locked as exclusivity can't be preempted. * * Calls to clk_rate_exclusive_get() should be balanced with calls to * clk_rate_exclusive_put(). Calls to this function may sleep. * Returns 0 on success, -EERROR otherwise */ int clk_rate_exclusive_get(struct clk *clk) { if (!clk) return 0; clk_prepare_lock(); clk_core_rate_protect(clk->core); clk->exclusive_count++; clk_prepare_unlock(); return 0; } EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); |
4dff95dc9 clk: Remove forwa... |
765 766 |
static void clk_core_unprepare(struct clk_core *core) { |
a63347251 clk: Add some mor... |
767 |
lockdep_assert_held(&prepare_lock); |
4dff95dc9 clk: Remove forwa... |
768 769 |
if (!core) return; |
b2476490e clk: introduce th... |
770 |
|
ab525dccc clk: Print the cl... |
771 772 773 |
if (WARN(core->prepare_count == 0, "%s already unprepared ", core->name)) |
4dff95dc9 clk: Remove forwa... |
774 |
return; |
b2476490e clk: introduce th... |
775 |
|
ab525dccc clk: Print the cl... |
776 777 778 |
if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, "Unpreparing critical %s ", core->name)) |
2e20fbf59 clk: WARN_ON abou... |
779 |
return; |
9461f7b33 clk: fix CLK_SET_... |
780 781 |
if (core->flags & CLK_SET_RATE_GATE) clk_core_rate_unprotect(core); |
4dff95dc9 clk: Remove forwa... |
782 783 |
if (--core->prepare_count > 0) return; |
b2476490e clk: introduce th... |
784 |
|
ab525dccc clk: Print the cl... |
785 786 |
WARN(core->enable_count > 0, "Unpreparing enabled %s ", core->name); |
b2476490e clk: introduce th... |
787 |
|
4dff95dc9 clk: Remove forwa... |
788 |
trace_clk_unprepare(core); |
b2476490e clk: introduce th... |
789 |
|
4dff95dc9 clk: Remove forwa... |
790 791 |
if (core->ops->unprepare) core->ops->unprepare(core->hw); |
9a34b4539 clk: Add support ... |
792 |
clk_pm_runtime_put(core); |
4dff95dc9 clk: Remove forwa... |
793 794 |
trace_clk_unprepare_complete(core); clk_core_unprepare(core->parent); |
b2476490e clk: introduce th... |
795 |
} |
a6adc30ba clk: introduce cl... |
796 797 798 799 800 801 |
static void clk_core_unprepare_lock(struct clk_core *core) { clk_prepare_lock(); clk_core_unprepare(core); clk_prepare_unlock(); } |
4dff95dc9 clk: Remove forwa... |
802 803 804 805 806 807 808 809 810 811 812 813 |
/** * clk_unprepare - undo preparation of a clock source * @clk: the clk being unprepared * * clk_unprepare may sleep, which differentiates it from clk_disable. In a * simple case, clk_unprepare can be used instead of clk_disable to gate a clk * if the operation may sleep. One example is a clk which is accessed over * I2c. In the complex case a clk gate operation may require a fast and a slow * part. It is this reason that clk_unprepare and clk_disable are not mutually * exclusive. In fact clk_disable must be called before clk_unprepare. */ void clk_unprepare(struct clk *clk) |
1e435256d clk: add clk_igno... |
814 |
{ |
4dff95dc9 clk: Remove forwa... |
815 816 |
if (IS_ERR_OR_NULL(clk)) return; |
a6adc30ba clk: introduce cl... |
817 |
clk_core_unprepare_lock(clk->core); |
1e435256d clk: add clk_igno... |
818 |
} |
4dff95dc9 clk: Remove forwa... |
819 |
EXPORT_SYMBOL_GPL(clk_unprepare); |
1e435256d clk: add clk_igno... |
820 |
|
4dff95dc9 clk: Remove forwa... |
821 |
static int clk_core_prepare(struct clk_core *core) |
b2476490e clk: introduce th... |
822 |
{ |
4dff95dc9 clk: Remove forwa... |
823 |
int ret = 0; |
b2476490e clk: introduce th... |
824 |
|
a63347251 clk: Add some mor... |
825 |
lockdep_assert_held(&prepare_lock); |
4dff95dc9 clk: Remove forwa... |
826 |
if (!core) |
1e435256d clk: add clk_igno... |
827 |
return 0; |
1e435256d clk: add clk_igno... |
828 |
|
4dff95dc9 clk: Remove forwa... |
829 |
if (core->prepare_count == 0) { |
9a34b4539 clk: Add support ... |
830 |
ret = clk_pm_runtime_get(core); |
4dff95dc9 clk: Remove forwa... |
831 832 |
if (ret) return ret; |
b2476490e clk: introduce th... |
833 |
|
9a34b4539 clk: Add support ... |
834 835 836 |
ret = clk_core_prepare(core->parent); if (ret) goto runtime_put; |
4dff95dc9 clk: Remove forwa... |
837 |
trace_clk_prepare(core); |
b2476490e clk: introduce th... |
838 |
|
4dff95dc9 clk: Remove forwa... |
839 840 |
if (core->ops->prepare) ret = core->ops->prepare(core->hw); |
b2476490e clk: introduce th... |
841 |
|
4dff95dc9 clk: Remove forwa... |
842 |
trace_clk_prepare_complete(core); |
1c155b3df clk: Unprepare th... |
843 |
|
9a34b4539 clk: Add support ... |
844 845 |
if (ret) goto unprepare; |
4dff95dc9 clk: Remove forwa... |
846 |
} |
1c155b3df clk: Unprepare th... |
847 |
|
4dff95dc9 clk: Remove forwa... |
848 |
core->prepare_count++; |
b2476490e clk: introduce th... |
849 |
|
9461f7b33 clk: fix CLK_SET_... |
850 851 852 853 854 855 856 857 858 |
/* * CLK_SET_RATE_GATE is a special case of clock protection * Instead of a consumer claiming exclusive rate control, it is * actually the provider which prevents any consumer from making any * operation which could result in a rate change or rate glitch while * the clock is prepared. */ if (core->flags & CLK_SET_RATE_GATE) clk_core_rate_protect(core); |
b2476490e clk: introduce th... |
859 |
return 0; |
9a34b4539 clk: Add support ... |
860 861 862 863 864 |
unprepare: clk_core_unprepare(core->parent); runtime_put: clk_pm_runtime_put(core); return ret; |
b2476490e clk: introduce th... |
865 |
} |
b2476490e clk: introduce th... |
866 |
|
a6adc30ba clk: introduce cl... |
867 868 869 870 871 872 873 874 875 876 |
static int clk_core_prepare_lock(struct clk_core *core) { int ret; clk_prepare_lock(); ret = clk_core_prepare(core); clk_prepare_unlock(); return ret; } |
4dff95dc9 clk: Remove forwa... |
877 878 879 880 881 882 883 884 885 886 887 888 889 |
/** * clk_prepare - prepare a clock source * @clk: the clk being prepared * * clk_prepare may sleep, which differentiates it from clk_enable. In a simple * case, clk_prepare can be used instead of clk_enable to ungate a clk if the * operation may sleep. One example is a clk which is accessed over I2c. In * the complex case a clk ungate operation may require a fast and a slow part. * It is this reason that clk_prepare and clk_enable are not mutually * exclusive. In fact clk_prepare must be called before clk_enable. * Returns 0 on success, -EERROR otherwise. */ int clk_prepare(struct clk *clk) |
b2476490e clk: introduce th... |
890 |
{ |
4dff95dc9 clk: Remove forwa... |
891 892 |
if (!clk) return 0; |
b2476490e clk: introduce th... |
893 |
|
a6adc30ba clk: introduce cl... |
894 |
return clk_core_prepare_lock(clk->core); |
b2476490e clk: introduce th... |
895 |
} |
4dff95dc9 clk: Remove forwa... |
896 |
EXPORT_SYMBOL_GPL(clk_prepare); |
b2476490e clk: introduce th... |
897 |
|
4dff95dc9 clk: Remove forwa... |
898 |
static void clk_core_disable(struct clk_core *core) |
b2476490e clk: introduce th... |
899 |
{ |
a63347251 clk: Add some mor... |
900 |
lockdep_assert_held(&enable_lock); |
4dff95dc9 clk: Remove forwa... |
901 902 |
if (!core) return; |
035a61c31 clk: Make clk API... |
903 |
|
ab525dccc clk: Print the cl... |
904 905 |
if (WARN(core->enable_count == 0, "%s already disabled ", core->name)) |
4dff95dc9 clk: Remove forwa... |
906 |
return; |
b2476490e clk: introduce th... |
907 |
|
ab525dccc clk: Print the cl... |
908 909 910 |
if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, "Disabling critical %s ", core->name)) |
2e20fbf59 clk: WARN_ON abou... |
911 |
return; |
4dff95dc9 clk: Remove forwa... |
912 913 |
if (--core->enable_count > 0) return; |
035a61c31 clk: Make clk API... |
914 |
|
2f87a6ea1 clk: Add _rcuidle... |
915 |
trace_clk_disable_rcuidle(core); |
035a61c31 clk: Make clk API... |
916 |
|
4dff95dc9 clk: Remove forwa... |
917 918 |
if (core->ops->disable) core->ops->disable(core->hw); |
035a61c31 clk: Make clk API... |
919 |
|
2f87a6ea1 clk: Add _rcuidle... |
920 |
trace_clk_disable_complete_rcuidle(core); |
035a61c31 clk: Make clk API... |
921 |
|
4dff95dc9 clk: Remove forwa... |
922 |
clk_core_disable(core->parent); |
035a61c31 clk: Make clk API... |
923 |
} |
7ef3dcc81 clk: abstract par... |
924 |
|
a6adc30ba clk: introduce cl... |
925 926 927 928 929 930 931 932 |
static void clk_core_disable_lock(struct clk_core *core) { unsigned long flags; flags = clk_enable_lock(); clk_core_disable(core); clk_enable_unlock(flags); } |
4dff95dc9 clk: Remove forwa... |
933 934 935 936 937 938 939 940 941 942 943 944 945 |
/** * clk_disable - gate a clock * @clk: the clk being gated * * clk_disable must not sleep, which differentiates it from clk_unprepare. In * a simple case, clk_disable can be used instead of clk_unprepare to gate a * clk if the operation is fast and will never sleep. One example is a * SoC-internal clk which is controlled via simple register writes. In the * complex case a clk gate operation may require a fast and a slow part. It is * this reason that clk_unprepare and clk_disable are not mutually exclusive. * In fact clk_disable must be called before clk_unprepare. */ void clk_disable(struct clk *clk) |
b2476490e clk: introduce th... |
946 |
{ |
4dff95dc9 clk: Remove forwa... |
947 948 |
if (IS_ERR_OR_NULL(clk)) return; |
a6adc30ba clk: introduce cl... |
949 |
clk_core_disable_lock(clk->core); |
b2476490e clk: introduce th... |
950 |
} |
4dff95dc9 clk: Remove forwa... |
951 |
EXPORT_SYMBOL_GPL(clk_disable); |
b2476490e clk: introduce th... |
952 |
|
4dff95dc9 clk: Remove forwa... |
953 |
static int clk_core_enable(struct clk_core *core) |
b2476490e clk: introduce th... |
954 |
{ |
4dff95dc9 clk: Remove forwa... |
955 |
int ret = 0; |
b2476490e clk: introduce th... |
956 |
|
a63347251 clk: Add some mor... |
957 |
lockdep_assert_held(&enable_lock); |
4dff95dc9 clk: Remove forwa... |
958 959 |
if (!core) return 0; |
b2476490e clk: introduce th... |
960 |
|
ab525dccc clk: Print the cl... |
961 962 963 |
if (WARN(core->prepare_count == 0, "Enabling unprepared %s ", core->name)) |
4dff95dc9 clk: Remove forwa... |
964 |
return -ESHUTDOWN; |
b2476490e clk: introduce th... |
965 |
|
4dff95dc9 clk: Remove forwa... |
966 967 |
if (core->enable_count == 0) { ret = clk_core_enable(core->parent); |
b2476490e clk: introduce th... |
968 |
|
4dff95dc9 clk: Remove forwa... |
969 970 |
if (ret) return ret; |
b2476490e clk: introduce th... |
971 |
|
f17a0dd1c clk: Use _rcuidle... |
972 |
trace_clk_enable_rcuidle(core); |
035a61c31 clk: Make clk API... |
973 |
|
4dff95dc9 clk: Remove forwa... |
974 975 |
if (core->ops->enable) ret = core->ops->enable(core->hw); |
035a61c31 clk: Make clk API... |
976 |
|
f17a0dd1c clk: Use _rcuidle... |
977 |
trace_clk_enable_complete_rcuidle(core); |
4dff95dc9 clk: Remove forwa... |
978 979 980 981 982 983 984 985 986 |
if (ret) { clk_core_disable(core->parent); return ret; } } core->enable_count++; return 0; |
035a61c31 clk: Make clk API... |
987 |
} |
b2476490e clk: introduce th... |
988 |
|
a6adc30ba clk: introduce cl... |
989 990 991 992 993 994 995 996 997 998 999 |
static int clk_core_enable_lock(struct clk_core *core) { unsigned long flags; int ret; flags = clk_enable_lock(); ret = clk_core_enable(core); clk_enable_unlock(flags); return ret; } |
435365485 clk: clk: Add clk... |
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 |
/** * clk_gate_restore_context - restore context for poweroff * @hw: the clk_hw pointer of clock whose state is to be restored * * The clock gate restore context function enables or disables * the gate clocks based on the enable_count. This is done in cases * where the clock context is lost and based on the enable_count * the clock either needs to be enabled/disabled. This * helps restore the state of gate clocks. */ void clk_gate_restore_context(struct clk_hw *hw) { |
9be766274 clk: Clean up sus... |
1012 1013 1014 1015 |
struct clk_core *core = hw->core; if (core->enable_count) core->ops->enable(hw); |
435365485 clk: clk: Add clk... |
1016 |
else |
9be766274 clk: Clean up sus... |
1017 |
core->ops->disable(hw); |
435365485 clk: clk: Add clk... |
1018 1019 |
} EXPORT_SYMBOL_GPL(clk_gate_restore_context); |
9be766274 clk: Clean up sus... |
1020 |
static int clk_core_save_context(struct clk_core *core) |
8b95d1ce3 clk: Add function... |
1021 1022 1023 |
{ struct clk_core *child; int ret = 0; |
9be766274 clk: Clean up sus... |
1024 1025 |
hlist_for_each_entry(child, &core->children, child_node) { ret = clk_core_save_context(child); |
8b95d1ce3 clk: Add function... |
1026 1027 1028 |
if (ret < 0) return ret; } |
9be766274 clk: Clean up sus... |
1029 1030 |
if (core->ops && core->ops->save_context) ret = core->ops->save_context(core->hw); |
8b95d1ce3 clk: Add function... |
1031 1032 1033 |
return ret; } |
9be766274 clk: Clean up sus... |
1034 |
static void clk_core_restore_context(struct clk_core *core) |
8b95d1ce3 clk: Add function... |
1035 1036 |
{ struct clk_core *child; |
9be766274 clk: Clean up sus... |
1037 1038 |
if (core->ops && core->ops->restore_context) core->ops->restore_context(core->hw); |
8b95d1ce3 clk: Add function... |
1039 |
|
9be766274 clk: Clean up sus... |
1040 1041 |
hlist_for_each_entry(child, &core->children, child_node) clk_core_restore_context(child); |
8b95d1ce3 clk: Add function... |
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 |
} /** * clk_save_context - save clock context for poweroff * * Saves the context of the clock register for powerstates in which the * contents of the registers will be lost. Occurs deep within the suspend * code. Returns 0 on success. */ int clk_save_context(void) { struct clk_core *clk; int ret; hlist_for_each_entry(clk, &clk_root_list, child_node) { |
9be766274 clk: Clean up sus... |
1057 |
ret = clk_core_save_context(clk); |
8b95d1ce3 clk: Add function... |
1058 1059 1060 1061 1062 |
if (ret < 0) return ret; } hlist_for_each_entry(clk, &clk_orphan_list, child_node) { |
9be766274 clk: Clean up sus... |
1063 |
ret = clk_core_save_context(clk); |
8b95d1ce3 clk: Add function... |
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 |
if (ret < 0) return ret; } return 0; } EXPORT_SYMBOL_GPL(clk_save_context); /** * clk_restore_context - restore clock context after poweroff * * Restore the saved clock context upon resume. * */ void clk_restore_context(void) { |
9be766274 clk: Clean up sus... |
1080 |
struct clk_core *core; |
8b95d1ce3 clk: Add function... |
1081 |
|
9be766274 clk: Clean up sus... |
1082 1083 |
hlist_for_each_entry(core, &clk_root_list, child_node) clk_core_restore_context(core); |
8b95d1ce3 clk: Add function... |
1084 |
|
9be766274 clk: Clean up sus... |
1085 1086 |
hlist_for_each_entry(core, &clk_orphan_list, child_node) clk_core_restore_context(core); |
8b95d1ce3 clk: Add function... |
1087 1088 |
} EXPORT_SYMBOL_GPL(clk_restore_context); |
4dff95dc9 clk: Remove forwa... |
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 |
/** * clk_enable - ungate a clock * @clk: the clk being ungated * * clk_enable must not sleep, which differentiates it from clk_prepare. In a * simple case, clk_enable can be used instead of clk_prepare to ungate a clk * if the operation will never sleep. One example is a SoC-internal clk which * is controlled via simple register writes. In the complex case a clk ungate * operation may require a fast and a slow part. It is this reason that * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare * must be called before clk_enable. Returns 0 on success, -EERROR * otherwise. */ int clk_enable(struct clk *clk) |
5279fc402 clk: add clk accu... |
1103 |
{ |
4dff95dc9 clk: Remove forwa... |
1104 |
if (!clk) |
5279fc402 clk: add clk accu... |
1105 |
return 0; |
a6adc30ba clk: introduce cl... |
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 |
return clk_core_enable_lock(clk->core); } EXPORT_SYMBOL_GPL(clk_enable); static int clk_core_prepare_enable(struct clk_core *core) { int ret; ret = clk_core_prepare_lock(core); if (ret) return ret; ret = clk_core_enable_lock(core); if (ret) clk_core_unprepare_lock(core); |
5279fc402 clk: add clk accu... |
1121 |
|
4dff95dc9 clk: Remove forwa... |
1122 |
return ret; |
b2476490e clk: introduce th... |
1123 |
} |
a6adc30ba clk: introduce cl... |
1124 1125 1126 1127 1128 1129 |
static void clk_core_disable_unprepare(struct clk_core *core) { clk_core_disable_lock(core); clk_core_unprepare_lock(core); } |
b2476490e clk: introduce th... |
1130 |
|
7ec986efe clk: move clk_dis... |
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 |
static void clk_unprepare_unused_subtree(struct clk_core *core) { struct clk_core *child; lockdep_assert_held(&prepare_lock); hlist_for_each_entry(child, &core->children, child_node) clk_unprepare_unused_subtree(child); if (core->prepare_count) return; if (core->flags & CLK_IGNORE_UNUSED) return; |
9a34b4539 clk: Add support ... |
1145 1146 |
if (clk_pm_runtime_get(core)) return; |
7ec986efe clk: move clk_dis... |
1147 1148 1149 1150 1151 1152 1153 1154 |
if (clk_core_is_prepared(core)) { trace_clk_unprepare(core); if (core->ops->unprepare_unused) core->ops->unprepare_unused(core->hw); else if (core->ops->unprepare) core->ops->unprepare(core->hw); trace_clk_unprepare_complete(core); } |
9a34b4539 clk: Add support ... |
1155 1156 |
clk_pm_runtime_put(core); |
7ec986efe clk: move clk_dis... |
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 |
} static void clk_disable_unused_subtree(struct clk_core *core) { struct clk_core *child; unsigned long flags; lockdep_assert_held(&prepare_lock); hlist_for_each_entry(child, &core->children, child_node) clk_disable_unused_subtree(child); |
a4b3518d1 clk: core: suppor... |
1168 1169 |
if (core->flags & CLK_OPS_PARENT_ENABLE) clk_core_prepare_enable(core->parent); |
9a34b4539 clk: Add support ... |
1170 1171 |
if (clk_pm_runtime_get(core)) goto unprepare_out; |
7ec986efe clk: move clk_dis... |
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 |
flags = clk_enable_lock(); if (core->enable_count) goto unlock_out; if (core->flags & CLK_IGNORE_UNUSED) goto unlock_out; /* * some gate clocks have special needs during the disable-unused * sequence. call .disable_unused if available, otherwise fall * back to .disable */ if (clk_core_is_enabled(core)) { trace_clk_disable(core); if (core->ops->disable_unused) core->ops->disable_unused(core->hw); else if (core->ops->disable) core->ops->disable(core->hw); trace_clk_disable_complete(core); } unlock_out: clk_enable_unlock(flags); |
9a34b4539 clk: Add support ... |
1196 1197 |
clk_pm_runtime_put(core); unprepare_out: |
a4b3518d1 clk: core: suppor... |
1198 1199 |
if (core->flags & CLK_OPS_PARENT_ENABLE) clk_core_disable_unprepare(core->parent); |
7ec986efe clk: move clk_dis... |
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 |
} static bool clk_ignore_unused; static int __init clk_ignore_unused_setup(char *__unused) { clk_ignore_unused = true; return 1; } __setup("clk_ignore_unused", clk_ignore_unused_setup); static int clk_disable_unused(void) { struct clk_core *core; if (clk_ignore_unused) { pr_warn("clk: Not disabling unused clocks "); return 0; } clk_prepare_lock(); hlist_for_each_entry(core, &clk_root_list, child_node) clk_disable_unused_subtree(core); hlist_for_each_entry(core, &clk_orphan_list, child_node) clk_disable_unused_subtree(core); hlist_for_each_entry(core, &clk_root_list, child_node) clk_unprepare_unused_subtree(core); hlist_for_each_entry(core, &clk_orphan_list, child_node) clk_unprepare_unused_subtree(core); clk_prepare_unlock(); return 0; } late_initcall_sync(clk_disable_unused); |
0f6cc2b8e clk: rework calls... |
1239 1240 |
static int clk_core_determine_round_nolock(struct clk_core *core, struct clk_rate_request *req) |
3d6ee287a clk: Introduce op... |
1241 |
{ |
0817b62cc clk: change clk_o... |
1242 |
long rate; |
4dff95dc9 clk: Remove forwa... |
1243 1244 |
lockdep_assert_held(&prepare_lock); |
3d6ee287a clk: Introduce op... |
1245 |
|
d6968fca7 clk: s/clk/core/ ... |
1246 |
if (!core) |
4dff95dc9 clk: Remove forwa... |
1247 |
return 0; |
3d6ee287a clk: Introduce op... |
1248 |
|
55e9b8b7b clk: add clk_rate... |
1249 1250 1251 1252 1253 1254 |
/* * At this point, core protection will be disabled if * - if the provider is not protected at all * - if the calling consumer is the only one which has exclusivity * over the provider */ |
e55a839a7 clk: add clock pr... |
1255 1256 1257 |
if (clk_core_rate_is_protected(core)) { req->rate = core->rate; } else if (core->ops->determine_rate) { |
0817b62cc clk: change clk_o... |
1258 1259 1260 1261 1262 1263 1264 1265 |
return core->ops->determine_rate(core->hw, req); } else if (core->ops->round_rate) { rate = core->ops->round_rate(core->hw, req->rate, &req->best_parent_rate); if (rate < 0) return rate; req->rate = rate; |
0817b62cc clk: change clk_o... |
1266 |
} else { |
0f6cc2b8e clk: rework calls... |
1267 |
return -EINVAL; |
0817b62cc clk: change clk_o... |
1268 1269 1270 |
} return 0; |
3d6ee287a clk: Introduce op... |
1271 |
} |
0f6cc2b8e clk: rework calls... |
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 |
static void clk_core_init_rate_req(struct clk_core * const core, struct clk_rate_request *req) { struct clk_core *parent; if (WARN_ON(!core || !req)) return; parent = core->parent; if (parent) { req->best_parent_hw = parent->hw; req->best_parent_rate = parent->rate; } else { req->best_parent_hw = NULL; req->best_parent_rate = 0; |
0817b62cc clk: change clk_o... |
1287 |
} |
0f6cc2b8e clk: rework calls... |
1288 |
} |
0817b62cc clk: change clk_o... |
1289 |
|
0f6cc2b8e clk: rework calls... |
1290 1291 |
static bool clk_core_can_round(struct clk_core * const core) { |
eef1f1b6c clk: Simplify clk... |
1292 |
return core->ops->determine_rate || core->ops->round_rate; |
0f6cc2b8e clk: rework calls... |
1293 1294 1295 1296 1297 1298 |
} static int clk_core_round_rate_nolock(struct clk_core *core, struct clk_rate_request *req) { lockdep_assert_held(&prepare_lock); |
04bf9ab33 clk: fix determin... |
1299 1300 |
if (!core) { req->rate = 0; |
0f6cc2b8e clk: rework calls... |
1301 |
return 0; |
04bf9ab33 clk: fix determin... |
1302 |
} |
0817b62cc clk: change clk_o... |
1303 |
|
0f6cc2b8e clk: rework calls... |
1304 1305 1306 1307 1308 1309 1310 1311 |
clk_core_init_rate_req(core, req); if (clk_core_can_round(core)) return clk_core_determine_round_nolock(core, req); else if (core->flags & CLK_SET_RATE_PARENT) return clk_core_round_rate_nolock(core->parent, req); req->rate = core->rate; |
0817b62cc clk: change clk_o... |
1312 |
return 0; |
3d6ee287a clk: Introduce op... |
1313 |
} |
4dff95dc9 clk: Remove forwa... |
1314 1315 1316 |
/** * __clk_determine_rate - get the closest rate actually supported by a clock * @hw: determine the rate of this clock |
2d5b520cf clk: correct comm... |
1317 |
* @req: target rate request |
4dff95dc9 clk: Remove forwa... |
1318 |
* |
6e5ab41b1 clk: Update some ... |
1319 |
* Useful for clk_ops such as .set_rate and .determine_rate. |
4dff95dc9 clk: Remove forwa... |
1320 |
*/ |
0817b62cc clk: change clk_o... |
1321 |
int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) |
035a61c31 clk: Make clk API... |
1322 |
{ |
0817b62cc clk: change clk_o... |
1323 1324 |
if (!hw) { req->rate = 0; |
4dff95dc9 clk: Remove forwa... |
1325 |
return 0; |
0817b62cc clk: change clk_o... |
1326 |
} |
035a61c31 clk: Make clk API... |
1327 |
|
0817b62cc clk: change clk_o... |
1328 |
return clk_core_round_rate_nolock(hw->core, req); |
035a61c31 clk: Make clk API... |
1329 |
} |
4dff95dc9 clk: Remove forwa... |
1330 |
EXPORT_SYMBOL_GPL(__clk_determine_rate); |
035a61c31 clk: Make clk API... |
1331 |
|
1a9c069cb clk: Add clk_hw_*... |
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 |
unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) { int ret; struct clk_rate_request req; clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); req.rate = rate; ret = clk_core_round_rate_nolock(hw->core, &req); if (ret) return 0; return req.rate; } EXPORT_SYMBOL_GPL(clk_hw_round_rate); |
4dff95dc9 clk: Remove forwa... |
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 |
/** * clk_round_rate - round the given rate for a clk * @clk: the clk for which we are rounding a rate * @rate: the rate which is to be rounded * * Takes in a rate as input and rounds it to a rate that the clk can actually * use which is then returned. If clk doesn't support round_rate operation * then the parent rate is returned. */ long clk_round_rate(struct clk *clk, unsigned long rate) |
035a61c31 clk: Make clk API... |
1357 |
{ |
fc4a05d4b clk: Remove unuse... |
1358 1359 |
struct clk_rate_request req; int ret; |
4dff95dc9 clk: Remove forwa... |
1360 |
|
035a61c31 clk: Make clk API... |
1361 |
if (!clk) |
4dff95dc9 clk: Remove forwa... |
1362 |
return 0; |
035a61c31 clk: Make clk API... |
1363 |
|
4dff95dc9 clk: Remove forwa... |
1364 |
clk_prepare_lock(); |
fc4a05d4b clk: Remove unuse... |
1365 |
|
55e9b8b7b clk: add clk_rate... |
1366 1367 |
if (clk->exclusive_count) clk_core_rate_unprotect(clk->core); |
fc4a05d4b clk: Remove unuse... |
1368 1369 1370 1371 |
clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); req.rate = rate; ret = clk_core_round_rate_nolock(clk->core, &req); |
55e9b8b7b clk: add clk_rate... |
1372 1373 1374 |
if (clk->exclusive_count) clk_core_rate_protect(clk->core); |
4dff95dc9 clk: Remove forwa... |
1375 |
clk_prepare_unlock(); |
fc4a05d4b clk: Remove unuse... |
1376 1377 1378 1379 |
if (ret) return ret; return req.rate; |
035a61c31 clk: Make clk API... |
1380 |
} |
4dff95dc9 clk: Remove forwa... |
1381 |
EXPORT_SYMBOL_GPL(clk_round_rate); |
b2476490e clk: introduce th... |
1382 |
|
4dff95dc9 clk: Remove forwa... |
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 |
/** * __clk_notify - call clk notifier chain * @core: clk that is changing rate * @msg: clk notifier type (see include/linux/clk.h) * @old_rate: old clk rate * @new_rate: new clk rate * * Triggers a notifier call chain on the clk rate-change notification * for 'clk'. Passes a pointer to the struct clk and the previous * and current rates to the notifier callback. Intended to be called by * internal clock code only. Returns NOTIFY_DONE from the last driver * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if * a driver returns that. */ static int __clk_notify(struct clk_core *core, unsigned long msg, unsigned long old_rate, unsigned long new_rate) |
b2476490e clk: introduce th... |
1399 |
{ |
4dff95dc9 clk: Remove forwa... |
1400 1401 1402 |
struct clk_notifier *cn; struct clk_notifier_data cnd; int ret = NOTIFY_DONE; |
b2476490e clk: introduce th... |
1403 |
|
4dff95dc9 clk: Remove forwa... |
1404 1405 |
cnd.old_rate = old_rate; cnd.new_rate = new_rate; |
b2476490e clk: introduce th... |
1406 |
|
4dff95dc9 clk: Remove forwa... |
1407 1408 1409 1410 1411 |
list_for_each_entry(cn, &clk_notifier_list, node) { if (cn->clk->core == core) { cnd.clk = cn->clk; ret = srcu_notifier_call_chain(&cn->notifier_head, msg, &cnd); |
17c34c566 clk: aggregate re... |
1412 1413 |
if (ret & NOTIFY_STOP_MASK) return ret; |
4dff95dc9 clk: Remove forwa... |
1414 |
} |
b2476490e clk: introduce th... |
1415 |
} |
4dff95dc9 clk: Remove forwa... |
1416 |
return ret; |
b2476490e clk: introduce th... |
1417 |
} |
4dff95dc9 clk: Remove forwa... |
1418 1419 1420 1421 1422 1423 |
/** * __clk_recalc_accuracies * @core: first clk in the subtree * * Walks the subtree of clks starting with clk and recalculates accuracies as * it goes. Note that if a clk does not implement the .recalc_accuracy |
6e5ab41b1 clk: Update some ... |
1424 |
* callback then it is assumed that the clock will take on the accuracy of its |
4dff95dc9 clk: Remove forwa... |
1425 |
* parent. |
4dff95dc9 clk: Remove forwa... |
1426 1427 |
*/ static void __clk_recalc_accuracies(struct clk_core *core) |
b2476490e clk: introduce th... |
1428 |
{ |
4dff95dc9 clk: Remove forwa... |
1429 1430 |
unsigned long parent_accuracy = 0; struct clk_core *child; |
b2476490e clk: introduce th... |
1431 |
|
4dff95dc9 clk: Remove forwa... |
1432 |
lockdep_assert_held(&prepare_lock); |
b2476490e clk: introduce th... |
1433 |
|
4dff95dc9 clk: Remove forwa... |
1434 1435 |
if (core->parent) parent_accuracy = core->parent->accuracy; |
b2476490e clk: introduce th... |
1436 |
|
4dff95dc9 clk: Remove forwa... |
1437 1438 1439 1440 1441 |
if (core->ops->recalc_accuracy) core->accuracy = core->ops->recalc_accuracy(core->hw, parent_accuracy); else core->accuracy = parent_accuracy; |
b2476490e clk: introduce th... |
1442 |
|
4dff95dc9 clk: Remove forwa... |
1443 1444 |
hlist_for_each_entry(child, &core->children, child_node) __clk_recalc_accuracies(child); |
b2476490e clk: introduce th... |
1445 |
} |
4dff95dc9 clk: Remove forwa... |
1446 |
static long clk_core_get_accuracy(struct clk_core *core) |
e366fdd72 clk: clk-mux: imp... |
1447 |
{ |
4dff95dc9 clk: Remove forwa... |
1448 |
unsigned long accuracy; |
15a02c1f6 clk: Add __clk_mu... |
1449 |
|
4dff95dc9 clk: Remove forwa... |
1450 1451 1452 |
clk_prepare_lock(); if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) __clk_recalc_accuracies(core); |
15a02c1f6 clk: Add __clk_mu... |
1453 |
|
4dff95dc9 clk: Remove forwa... |
1454 1455 |
accuracy = __clk_get_accuracy(core); clk_prepare_unlock(); |
e366fdd72 clk: clk-mux: imp... |
1456 |
|
4dff95dc9 clk: Remove forwa... |
1457 |
return accuracy; |
e366fdd72 clk: clk-mux: imp... |
1458 |
} |
15a02c1f6 clk: Add __clk_mu... |
1459 |
|
4dff95dc9 clk: Remove forwa... |
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 |
/** * clk_get_accuracy - return the accuracy of clk * @clk: the clk whose accuracy is being returned * * Simply returns the cached accuracy of the clk, unless * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be * issued. * If clk is NULL then returns 0. */ long clk_get_accuracy(struct clk *clk) |
035a61c31 clk: Make clk API... |
1470 |
{ |
4dff95dc9 clk: Remove forwa... |
1471 1472 |
if (!clk) return 0; |
035a61c31 clk: Make clk API... |
1473 |
|
4dff95dc9 clk: Remove forwa... |
1474 |
return clk_core_get_accuracy(clk->core); |
035a61c31 clk: Make clk API... |
1475 |
} |
4dff95dc9 clk: Remove forwa... |
1476 |
EXPORT_SYMBOL_GPL(clk_get_accuracy); |
035a61c31 clk: Make clk API... |
1477 |
|
4dff95dc9 clk: Remove forwa... |
1478 1479 |
static unsigned long clk_recalc(struct clk_core *core, unsigned long parent_rate) |
1c8e60044 clk: Add rate con... |
1480 |
{ |
9a34b4539 clk: Add support ... |
1481 1482 1483 1484 1485 1486 1487 |
unsigned long rate = parent_rate; if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { rate = core->ops->recalc_rate(core->hw, parent_rate); clk_pm_runtime_put(core); } return rate; |
1c8e60044 clk: Add rate con... |
1488 |
} |
4dff95dc9 clk: Remove forwa... |
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 |
/** * __clk_recalc_rates * @core: first clk in the subtree * @msg: notification type (see include/linux/clk.h) * * Walks the subtree of clks starting with clk and recalculates rates as it * goes. Note that if a clk does not implement the .recalc_rate callback then * it is assumed that the clock will take on the rate of its parent. * * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, * if necessary. |
15a02c1f6 clk: Add __clk_mu... |
1500 |
*/ |
4dff95dc9 clk: Remove forwa... |
1501 |
static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) |
15a02c1f6 clk: Add __clk_mu... |
1502 |
{ |
4dff95dc9 clk: Remove forwa... |
1503 1504 1505 |
unsigned long old_rate; unsigned long parent_rate = 0; struct clk_core *child; |
e366fdd72 clk: clk-mux: imp... |
1506 |
|
4dff95dc9 clk: Remove forwa... |
1507 |
lockdep_assert_held(&prepare_lock); |
15a02c1f6 clk: Add __clk_mu... |
1508 |
|
4dff95dc9 clk: Remove forwa... |
1509 |
old_rate = core->rate; |
b2476490e clk: introduce th... |
1510 |
|
4dff95dc9 clk: Remove forwa... |
1511 1512 |
if (core->parent) parent_rate = core->parent->rate; |
b2476490e clk: introduce th... |
1513 |
|
4dff95dc9 clk: Remove forwa... |
1514 |
core->rate = clk_recalc(core, parent_rate); |
b2476490e clk: introduce th... |
1515 |
|
4dff95dc9 clk: Remove forwa... |
1516 1517 1518 1519 1520 1521 |
/* * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE * & ABORT_RATE_CHANGE notifiers */ if (core->notifier_count && msg) __clk_notify(core, msg, old_rate, core->rate); |
b2476490e clk: introduce th... |
1522 |
|
4dff95dc9 clk: Remove forwa... |
1523 1524 1525 |
hlist_for_each_entry(child, &core->children, child_node) __clk_recalc_rates(child, msg); } |
b2476490e clk: introduce th... |
1526 |
|
4dff95dc9 clk: Remove forwa... |
1527 1528 1529 |
static unsigned long clk_core_get_rate(struct clk_core *core) { unsigned long rate; |
dfc202ead clk: Add tracepoi... |
1530 |
|
4dff95dc9 clk: Remove forwa... |
1531 |
clk_prepare_lock(); |
b2476490e clk: introduce th... |
1532 |
|
4dff95dc9 clk: Remove forwa... |
1533 1534 1535 1536 1537 1538 1539 |
if (core && (core->flags & CLK_GET_RATE_NOCACHE)) __clk_recalc_rates(core, 0); rate = clk_core_get_rate_nolock(core); clk_prepare_unlock(); return rate; |
b2476490e clk: introduce th... |
1540 1541 1542 |
} /** |
4dff95dc9 clk: Remove forwa... |
1543 1544 |
* clk_get_rate - return the rate of clk * @clk: the clk whose rate is being returned |
b2476490e clk: introduce th... |
1545 |
* |
4dff95dc9 clk: Remove forwa... |
1546 1547 1548 |
* Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag * is set, which means a recalc_rate will be issued. * If clk is NULL then returns 0. |
b2476490e clk: introduce th... |
1549 |
*/ |
4dff95dc9 clk: Remove forwa... |
1550 |
unsigned long clk_get_rate(struct clk *clk) |
b2476490e clk: introduce th... |
1551 |
{ |
4dff95dc9 clk: Remove forwa... |
1552 1553 |
if (!clk) return 0; |
63589e92c clk: Ignore error... |
1554 |
|
4dff95dc9 clk: Remove forwa... |
1555 |
return clk_core_get_rate(clk->core); |
b2476490e clk: introduce th... |
1556 |
} |
4dff95dc9 clk: Remove forwa... |
1557 |
EXPORT_SYMBOL_GPL(clk_get_rate); |
b2476490e clk: introduce th... |
1558 |
|
4dff95dc9 clk: Remove forwa... |
1559 1560 |
static int clk_fetch_parent_index(struct clk_core *core, struct clk_core *parent) |
b2476490e clk: introduce th... |
1561 |
{ |
4dff95dc9 clk: Remove forwa... |
1562 |
int i; |
b2476490e clk: introduce th... |
1563 |
|
508f884a6 clk: make sure pa... |
1564 1565 |
if (!parent) return -EINVAL; |
ede778584 clk: Remove globa... |
1566 |
for (i = 0; i < core->num_parents; i++) { |
1a079560b clk: Cache core i... |
1567 |
/* Found it first try! */ |
fc0c209c1 clk: Allow parent... |
1568 |
if (core->parents[i].core == parent) |
4dff95dc9 clk: Remove forwa... |
1569 |
return i; |
b2476490e clk: introduce th... |
1570 |
|
1a079560b clk: Cache core i... |
1571 |
/* Something else is here, so keep looking */ |
fc0c209c1 clk: Allow parent... |
1572 |
if (core->parents[i].core) |
ede778584 clk: Remove globa... |
1573 |
continue; |
1a079560b clk: Cache core i... |
1574 1575 1576 1577 1578 1579 1580 |
/* Maybe core hasn't been cached but the hw is all we know? */ if (core->parents[i].hw) { if (core->parents[i].hw == parent->hw) break; /* Didn't match, but we're expecting a clk_hw */ continue; |
ede778584 clk: Remove globa... |
1581 |
} |
1a079560b clk: Cache core i... |
1582 1583 1584 1585 1586 1587 |
/* Maybe it hasn't been cached (clk_set_parent() path) */ if (parent == clk_core_get(core, i)) break; /* Fallback to comparing globally unique names */ |
24876f09a clk: Fix potentia... |
1588 1589 |
if (core->parents[i].name && !strcmp(parent->name, core->parents[i].name)) |
1a079560b clk: Cache core i... |
1590 |
break; |
ede778584 clk: Remove globa... |
1591 |
} |
1a079560b clk: Cache core i... |
1592 1593 1594 1595 1596 |
if (i == core->num_parents) return -EINVAL; core->parents[i].core = parent; return i; |
b2476490e clk: introduce th... |
1597 |
} |
e6500344e clk: track the or... |
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 |
/* * Update the orphan status of @core and all its children. */ static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) { struct clk_core *child; core->orphan = is_orphan; hlist_for_each_entry(child, &core->children, child_node) clk_core_update_orphan_status(child, is_orphan); } |
4dff95dc9 clk: Remove forwa... |
1610 |
static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) |
b2476490e clk: introduce th... |
1611 |
{ |
e6500344e clk: track the or... |
1612 |
bool was_orphan = core->orphan; |
4dff95dc9 clk: Remove forwa... |
1613 |
hlist_del(&core->child_node); |
035a61c31 clk: Make clk API... |
1614 |
|
4dff95dc9 clk: Remove forwa... |
1615 |
if (new_parent) { |
e6500344e clk: track the or... |
1616 |
bool becomes_orphan = new_parent->orphan; |
4dff95dc9 clk: Remove forwa... |
1617 1618 1619 |
/* avoid duplicate POST_RATE_CHANGE notifications */ if (new_parent->new_child == core) new_parent->new_child = NULL; |
b2476490e clk: introduce th... |
1620 |
|
4dff95dc9 clk: Remove forwa... |
1621 |
hlist_add_head(&core->child_node, &new_parent->children); |
e6500344e clk: track the or... |
1622 1623 1624 |
if (was_orphan != becomes_orphan) clk_core_update_orphan_status(core, becomes_orphan); |
4dff95dc9 clk: Remove forwa... |
1625 1626 |
} else { hlist_add_head(&core->child_node, &clk_orphan_list); |
e6500344e clk: track the or... |
1627 1628 |
if (!was_orphan) clk_core_update_orphan_status(core, true); |
4dff95dc9 clk: Remove forwa... |
1629 |
} |
dfc202ead clk: Add tracepoi... |
1630 |
|
4dff95dc9 clk: Remove forwa... |
1631 |
core->parent = new_parent; |
035a61c31 clk: Make clk API... |
1632 |
} |
4dff95dc9 clk: Remove forwa... |
1633 1634 |
static struct clk_core *__clk_set_parent_before(struct clk_core *core, struct clk_core *parent) |
b2476490e clk: introduce th... |
1635 1636 |
{ unsigned long flags; |
4dff95dc9 clk: Remove forwa... |
1637 |
struct clk_core *old_parent = core->parent; |
b2476490e clk: introduce th... |
1638 |
|
4dff95dc9 clk: Remove forwa... |
1639 |
/* |
fc8726a2c clk: core: suppor... |
1640 1641 1642 |
* 1. enable parents for CLK_OPS_PARENT_ENABLE clock * * 2. Migrate prepare state between parents and prevent race with |
4dff95dc9 clk: Remove forwa... |
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 |
* clk_enable(). * * If the clock is not prepared, then a race with * clk_enable/disable() is impossible since we already have the * prepare lock (future calls to clk_enable() need to be preceded by * a clk_prepare()). * * If the clock is prepared, migrate the prepared state to the new * parent and also protect against a race with clk_enable() by * forcing the clock and the new parent on. This ensures that all * future calls to clk_enable() are practically NOPs with respect to * hardware and software states. * * See also: Comment for clk_set_parent() below. */ |
fc8726a2c clk: core: suppor... |
1658 1659 1660 1661 1662 1663 1664 1665 |
/* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ if (core->flags & CLK_OPS_PARENT_ENABLE) { clk_core_prepare_enable(old_parent); clk_core_prepare_enable(parent); } /* migrate prepare count if > 0 */ |
4dff95dc9 clk: Remove forwa... |
1666 |
if (core->prepare_count) { |
fc8726a2c clk: core: suppor... |
1667 1668 |
clk_core_prepare_enable(parent); clk_core_enable_lock(core); |
4dff95dc9 clk: Remove forwa... |
1669 |
} |
63589e92c clk: Ignore error... |
1670 |
|
4dff95dc9 clk: Remove forwa... |
1671 |
/* update the clk tree topology */ |
eab89f690 clk: abstract loc... |
1672 |
flags = clk_enable_lock(); |
4dff95dc9 clk: Remove forwa... |
1673 |
clk_reparent(core, parent); |
eab89f690 clk: abstract loc... |
1674 |
clk_enable_unlock(flags); |
4dff95dc9 clk: Remove forwa... |
1675 1676 |
return old_parent; |
b2476490e clk: introduce th... |
1677 |
} |
b2476490e clk: introduce th... |
1678 |
|
4dff95dc9 clk: Remove forwa... |
1679 1680 1681 |
static void __clk_set_parent_after(struct clk_core *core, struct clk_core *parent, struct clk_core *old_parent) |
b2476490e clk: introduce th... |
1682 |
{ |
4dff95dc9 clk: Remove forwa... |
1683 1684 1685 1686 1687 |
/* * Finish the migration of prepare state and undo the changes done * for preventing a race with clk_enable(). */ if (core->prepare_count) { |
fc8726a2c clk: core: suppor... |
1688 1689 1690 1691 1692 1693 1694 1695 |
clk_core_disable_lock(core); clk_core_disable_unprepare(old_parent); } /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ if (core->flags & CLK_OPS_PARENT_ENABLE) { clk_core_disable_unprepare(parent); clk_core_disable_unprepare(old_parent); |
4dff95dc9 clk: Remove forwa... |
1696 1697 |
} } |
b2476490e clk: introduce th... |
1698 |
|
4dff95dc9 clk: Remove forwa... |
1699 1700 1701 1702 1703 1704 |
static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, u8 p_index) { unsigned long flags; int ret = 0; struct clk_core *old_parent; |
b2476490e clk: introduce th... |
1705 |
|
4dff95dc9 clk: Remove forwa... |
1706 |
old_parent = __clk_set_parent_before(core, parent); |
b2476490e clk: introduce th... |
1707 |
|
4dff95dc9 clk: Remove forwa... |
1708 |
trace_clk_set_parent(core, parent); |
b2476490e clk: introduce th... |
1709 |
|
4dff95dc9 clk: Remove forwa... |
1710 1711 1712 |
/* change clock input source */ if (parent && core->ops->set_parent) ret = core->ops->set_parent(core->hw, p_index); |
dfc202ead clk: Add tracepoi... |
1713 |
|
4dff95dc9 clk: Remove forwa... |
1714 |
trace_clk_set_parent_complete(core, parent); |
dfc202ead clk: Add tracepoi... |
1715 |
|
4dff95dc9 clk: Remove forwa... |
1716 1717 1718 1719 |
if (ret) { flags = clk_enable_lock(); clk_reparent(core, old_parent); clk_enable_unlock(flags); |
c660b2ebb clk: remove dupli... |
1720 |
__clk_set_parent_after(core, old_parent, parent); |
dfc202ead clk: Add tracepoi... |
1721 |
|
4dff95dc9 clk: Remove forwa... |
1722 |
return ret; |
b2476490e clk: introduce th... |
1723 |
} |
4dff95dc9 clk: Remove forwa... |
1724 |
__clk_set_parent_after(core, parent, old_parent); |
b2476490e clk: introduce th... |
1725 1726 1727 1728 |
return 0; } /** |
4dff95dc9 clk: Remove forwa... |
1729 1730 1731 |
* __clk_speculate_rates * @core: first clk in the subtree * @parent_rate: the "future" rate of clk's parent |
b2476490e clk: introduce th... |
1732 |
* |
4dff95dc9 clk: Remove forwa... |
1733 1734 1735 1736 1737 1738 1739 1740 |
* Walks the subtree of clks starting with clk, speculating rates as it * goes and firing off PRE_RATE_CHANGE notifications as necessary. * * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending * pre-rate change notifications and returns early if no clks in the * subtree have subscribed to the notifications. Note that if a clk does not * implement the .recalc_rate callback then it is assumed that the clock will * take on the rate of its parent. |
b2476490e clk: introduce th... |
1741 |
*/ |
4dff95dc9 clk: Remove forwa... |
1742 1743 |
static int __clk_speculate_rates(struct clk_core *core, unsigned long parent_rate) |
b2476490e clk: introduce th... |
1744 |
{ |
4dff95dc9 clk: Remove forwa... |
1745 1746 1747 |
struct clk_core *child; unsigned long new_rate; int ret = NOTIFY_DONE; |
b2476490e clk: introduce th... |
1748 |
|
4dff95dc9 clk: Remove forwa... |
1749 |
lockdep_assert_held(&prepare_lock); |
864e160ae clk: Squash __clk... |
1750 |
|
4dff95dc9 clk: Remove forwa... |
1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 |
new_rate = clk_recalc(core, parent_rate); /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ if (core->notifier_count) ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); if (ret & NOTIFY_STOP_MASK) { pr_debug("%s: clk notifier callback for clock %s aborted with error %d ", __func__, core->name, ret); goto out; } hlist_for_each_entry(child, &core->children, child_node) { ret = __clk_speculate_rates(child, new_rate); if (ret & NOTIFY_STOP_MASK) break; } |
b2476490e clk: introduce th... |
1769 |
|
4dff95dc9 clk: Remove forwa... |
1770 |
out: |
b2476490e clk: introduce th... |
1771 1772 |
return ret; } |
b2476490e clk: introduce th... |
1773 |
|
4dff95dc9 clk: Remove forwa... |
1774 1775 |
static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, struct clk_core *new_parent, u8 p_index) |
b2476490e clk: introduce th... |
1776 |
{ |
4dff95dc9 clk: Remove forwa... |
1777 |
struct clk_core *child; |
b2476490e clk: introduce th... |
1778 |
|
4dff95dc9 clk: Remove forwa... |
1779 1780 1781 1782 1783 1784 1785 |
core->new_rate = new_rate; core->new_parent = new_parent; core->new_parent_index = p_index; /* include clk in new parent's PRE_RATE_CHANGE notifications */ core->new_child = NULL; if (new_parent && new_parent != core->parent) new_parent->new_child = core; |
496eadf82 clk: Use lockdep ... |
1786 |
|
4dff95dc9 clk: Remove forwa... |
1787 1788 1789 1790 1791 |
hlist_for_each_entry(child, &core->children, child_node) { child->new_rate = clk_recalc(child, new_rate); clk_calc_subtree(child, child->new_rate, NULL, 0); } } |
b2476490e clk: introduce th... |
1792 |
|
4dff95dc9 clk: Remove forwa... |
1793 1794 1795 1796 1797 1798 1799 1800 1801 |
/* * calculate the new rates returning the topmost clock that has to be * changed. */ static struct clk_core *clk_calc_new_rates(struct clk_core *core, unsigned long rate) { struct clk_core *top = core; struct clk_core *old_parent, *parent; |
4dff95dc9 clk: Remove forwa... |
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 |
unsigned long best_parent_rate = 0; unsigned long new_rate; unsigned long min_rate; unsigned long max_rate; int p_index = 0; long ret; /* sanity */ if (IS_ERR_OR_NULL(core)) return NULL; /* save parent rate, if it exists */ parent = old_parent = core->parent; |
71472c0c0 clk: add support ... |
1815 |
if (parent) |
4dff95dc9 clk: Remove forwa... |
1816 |
best_parent_rate = parent->rate; |
71472c0c0 clk: add support ... |
1817 |
|
4dff95dc9 clk: Remove forwa... |
1818 1819 1820 |
clk_core_get_boundaries(core, &min_rate, &max_rate); /* find the closest rate and parent clk/rate */ |
0f6cc2b8e clk: rework calls... |
1821 |
if (clk_core_can_round(core)) { |
0817b62cc clk: change clk_o... |
1822 1823 1824 1825 1826 |
struct clk_rate_request req; req.rate = rate; req.min_rate = min_rate; req.max_rate = max_rate; |
0817b62cc clk: change clk_o... |
1827 |
|
0f6cc2b8e clk: rework calls... |
1828 1829 1830 |
clk_core_init_rate_req(core, &req); ret = clk_core_determine_round_nolock(core, &req); |
4dff95dc9 clk: Remove forwa... |
1831 1832 |
if (ret < 0) return NULL; |
1c8e60044 clk: Add rate con... |
1833 |
|
0817b62cc clk: change clk_o... |
1834 1835 1836 |
best_parent_rate = req.best_parent_rate; new_rate = req.rate; parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; |
035a61c31 clk: Make clk API... |
1837 |
|
4dff95dc9 clk: Remove forwa... |
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 |
if (new_rate < min_rate || new_rate > max_rate) return NULL; } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { /* pass-through clock without adjustable parent */ core->new_rate = core->rate; return NULL; } else { /* pass-through clock with adjustable parent */ top = clk_calc_new_rates(parent, rate); new_rate = parent->new_rate; goto out; } |
1c8e60044 clk: Add rate con... |
1850 |
|
4dff95dc9 clk: Remove forwa... |
1851 1852 1853 1854 1855 1856 1857 1858 |
/* some clocks must be gated to change parent */ if (parent != old_parent && (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { pr_debug("%s: %s not gated but wants to reparent ", __func__, core->name); return NULL; } |
b2476490e clk: introduce th... |
1859 |
|
4dff95dc9 clk: Remove forwa... |
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 |
/* try finding the new parent index */ if (parent && core->num_parents > 1) { p_index = clk_fetch_parent_index(core, parent); if (p_index < 0) { pr_debug("%s: clk %s can not be parent of clk %s ", __func__, parent->name, core->name); return NULL; } } |
b2476490e clk: introduce th... |
1870 |
|
4dff95dc9 clk: Remove forwa... |
1871 1872 1873 |
if ((core->flags & CLK_SET_RATE_PARENT) && parent && best_parent_rate != parent->rate) top = clk_calc_new_rates(parent, best_parent_rate); |
035a61c31 clk: Make clk API... |
1874 |
|
4dff95dc9 clk: Remove forwa... |
1875 1876 |
out: clk_calc_subtree(core, new_rate, parent, p_index); |
b2476490e clk: introduce th... |
1877 |
|
4dff95dc9 clk: Remove forwa... |
1878 |
return top; |
b2476490e clk: introduce th... |
1879 |
} |
b2476490e clk: introduce th... |
1880 |
|
4dff95dc9 clk: Remove forwa... |
1881 1882 1883 1884 |
/* * Notify about rate changes in a subtree. Always walk down the whole tree * so that in case of an error we can walk down the whole tree again and * abort the change. |
b2476490e clk: introduce th... |
1885 |
*/ |
4dff95dc9 clk: Remove forwa... |
1886 1887 |
static struct clk_core *clk_propagate_rate_change(struct clk_core *core, unsigned long event) |
b2476490e clk: introduce th... |
1888 |
{ |
4dff95dc9 clk: Remove forwa... |
1889 |
struct clk_core *child, *tmp_clk, *fail_clk = NULL; |
b2476490e clk: introduce th... |
1890 |
int ret = NOTIFY_DONE; |
4dff95dc9 clk: Remove forwa... |
1891 1892 |
if (core->rate == core->new_rate) return NULL; |
b2476490e clk: introduce th... |
1893 |
|
4dff95dc9 clk: Remove forwa... |
1894 1895 1896 1897 |
if (core->notifier_count) { ret = __clk_notify(core, event, core->rate, core->new_rate); if (ret & NOTIFY_STOP_MASK) fail_clk = core; |
b2476490e clk: introduce th... |
1898 |
} |
4dff95dc9 clk: Remove forwa... |
1899 1900 1901 1902 1903 1904 1905 1906 |
hlist_for_each_entry(child, &core->children, child_node) { /* Skip children who will be reparented to another clock */ if (child->new_parent && child->new_parent != core) continue; tmp_clk = clk_propagate_rate_change(child, event); if (tmp_clk) fail_clk = tmp_clk; } |
5279fc402 clk: add clk accu... |
1907 |
|
4dff95dc9 clk: Remove forwa... |
1908 1909 1910 1911 1912 1913 |
/* handle the new child who might not be in core->children yet */ if (core->new_child) { tmp_clk = clk_propagate_rate_change(core->new_child, event); if (tmp_clk) fail_clk = tmp_clk; } |
5279fc402 clk: add clk accu... |
1914 |
|
4dff95dc9 clk: Remove forwa... |
1915 |
return fail_clk; |
5279fc402 clk: add clk accu... |
1916 |
} |
4dff95dc9 clk: Remove forwa... |
1917 1918 1919 1920 1921 |
/* * walk down a subtree and set the new rates notifying the rate * change on the way */ static void clk_change_rate(struct clk_core *core) |
035a61c31 clk: Make clk API... |
1922 |
{ |
4dff95dc9 clk: Remove forwa... |
1923 1924 1925 1926 1927 1928 |
struct clk_core *child; struct hlist_node *tmp; unsigned long old_rate; unsigned long best_parent_rate = 0; bool skip_set_rate = false; struct clk_core *old_parent; |
fc8726a2c clk: core: suppor... |
1929 |
struct clk_core *parent = NULL; |
035a61c31 clk: Make clk API... |
1930 |
|
4dff95dc9 clk: Remove forwa... |
1931 |
old_rate = core->rate; |
035a61c31 clk: Make clk API... |
1932 |
|
fc8726a2c clk: core: suppor... |
1933 1934 |
if (core->new_parent) { parent = core->new_parent; |
4dff95dc9 clk: Remove forwa... |
1935 |
best_parent_rate = core->new_parent->rate; |
fc8726a2c clk: core: suppor... |
1936 1937 |
} else if (core->parent) { parent = core->parent; |
4dff95dc9 clk: Remove forwa... |
1938 |
best_parent_rate = core->parent->rate; |
fc8726a2c clk: core: suppor... |
1939 |
} |
035a61c31 clk: Make clk API... |
1940 |
|
588fb54b0 clk: Manage prope... |
1941 1942 |
if (clk_pm_runtime_get(core)) return; |
2eb8c7104 clk: add flag for... |
1943 1944 1945 1946 1947 1948 1949 1950 |
if (core->flags & CLK_SET_RATE_UNGATE) { unsigned long flags; clk_core_prepare(core); flags = clk_enable_lock(); clk_core_enable(core); clk_enable_unlock(flags); } |
4dff95dc9 clk: Remove forwa... |
1951 1952 1953 |
if (core->new_parent && core->new_parent != core->parent) { old_parent = __clk_set_parent_before(core, core->new_parent); trace_clk_set_parent(core, core->new_parent); |
5279fc402 clk: add clk accu... |
1954 |
|
4dff95dc9 clk: Remove forwa... |
1955 1956 1957 1958 1959 1960 1961 1962 |
if (core->ops->set_rate_and_parent) { skip_set_rate = true; core->ops->set_rate_and_parent(core->hw, core->new_rate, best_parent_rate, core->new_parent_index); } else if (core->ops->set_parent) { core->ops->set_parent(core->hw, core->new_parent_index); } |
5279fc402 clk: add clk accu... |
1963 |
|
4dff95dc9 clk: Remove forwa... |
1964 1965 1966 |
trace_clk_set_parent_complete(core, core->new_parent); __clk_set_parent_after(core, core->new_parent, old_parent); } |
8f2c2db13 clk: Consolidate ... |
1967 |
|
fc8726a2c clk: core: suppor... |
1968 1969 |
if (core->flags & CLK_OPS_PARENT_ENABLE) clk_core_prepare_enable(parent); |
4dff95dc9 clk: Remove forwa... |
1970 |
trace_clk_set_rate(core, core->new_rate); |
b2476490e clk: introduce th... |
1971 |
|
4dff95dc9 clk: Remove forwa... |
1972 1973 |
if (!skip_set_rate && core->ops->set_rate) core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); |
496eadf82 clk: Use lockdep ... |
1974 |
|
4dff95dc9 clk: Remove forwa... |
1975 |
trace_clk_set_rate_complete(core, core->new_rate); |
b2476490e clk: introduce th... |
1976 |
|
4dff95dc9 clk: Remove forwa... |
1977 |
core->rate = clk_recalc(core, best_parent_rate); |
b2476490e clk: introduce th... |
1978 |
|
2eb8c7104 clk: add flag for... |
1979 1980 1981 1982 1983 1984 1985 1986 |
if (core->flags & CLK_SET_RATE_UNGATE) { unsigned long flags; flags = clk_enable_lock(); clk_core_disable(core); clk_enable_unlock(flags); clk_core_unprepare(core); } |
fc8726a2c clk: core: suppor... |
1987 1988 |
if (core->flags & CLK_OPS_PARENT_ENABLE) clk_core_disable_unprepare(parent); |
4dff95dc9 clk: Remove forwa... |
1989 1990 |
if (core->notifier_count && old_rate != core->rate) __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); |
b2476490e clk: introduce th... |
1991 |
|
85e88fab1 Merge branch 'clk... |
1992 1993 |
if (core->flags & CLK_RECALC_NEW_RATES) (void)clk_calc_new_rates(core, core->new_rate); |
d8d919879 clk: add CLK_RECA... |
1994 |
|
b2476490e clk: introduce th... |
1995 |
/* |
4dff95dc9 clk: Remove forwa... |
1996 1997 |
* Use safe iteration, as change_rate can actually swap parents * for certain clock types. |
b2476490e clk: introduce th... |
1998 |
*/ |
4dff95dc9 clk: Remove forwa... |
1999 2000 2001 2002 2003 2004 |
hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { /* Skip children who will be reparented to another clock */ if (child->new_parent && child->new_parent != core) continue; clk_change_rate(child); } |
b2476490e clk: introduce th... |
2005 |
|
4dff95dc9 clk: Remove forwa... |
2006 2007 2008 |
/* handle the new child who might not be in core->children yet */ if (core->new_child) clk_change_rate(core->new_child); |
588fb54b0 clk: Manage prope... |
2009 2010 |
clk_pm_runtime_put(core); |
b2476490e clk: introduce th... |
2011 |
} |
ca5e089a3 clk: use round ra... |
2012 2013 2014 |
static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, unsigned long req_rate) { |
e55a839a7 clk: add clock pr... |
2015 |
int ret, cnt; |
ca5e089a3 clk: use round ra... |
2016 2017 2018 2019 2020 2021 |
struct clk_rate_request req; lockdep_assert_held(&prepare_lock); if (!core) return 0; |
e55a839a7 clk: add clock pr... |
2022 2023 2024 2025 |
/* simulate what the rate would be if it could be freely set */ cnt = clk_core_rate_nuke_protect(core); if (cnt < 0) return cnt; |
ca5e089a3 clk: use round ra... |
2026 2027 2028 2029 |
clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); req.rate = req_rate; ret = clk_core_round_rate_nolock(core, &req); |
e55a839a7 clk: add clock pr... |
2030 2031 |
/* restore the protection */ clk_core_rate_restore_protect(core, cnt); |
ca5e089a3 clk: use round ra... |
2032 |
return ret ? 0 : req.rate; |
b2476490e clk: introduce th... |
2033 |
} |
4dff95dc9 clk: Remove forwa... |
2034 2035 |
static int clk_core_set_rate_nolock(struct clk_core *core, unsigned long req_rate) |
a093bde2b clk: Provide opti... |
2036 |
{ |
4dff95dc9 clk: Remove forwa... |
2037 |
struct clk_core *top, *fail_clk; |
ca5e089a3 clk: use round ra... |
2038 |
unsigned long rate; |
9a34b4539 clk: Add support ... |
2039 |
int ret = 0; |
a093bde2b clk: Provide opti... |
2040 |
|
4dff95dc9 clk: Remove forwa... |
2041 2042 |
if (!core) return 0; |
a093bde2b clk: Provide opti... |
2043 |
|
ca5e089a3 clk: use round ra... |
2044 |
rate = clk_core_req_round_rate_nolock(core, req_rate); |
4dff95dc9 clk: Remove forwa... |
2045 2046 2047 |
/* bail early if nothing to do */ if (rate == clk_core_get_rate_nolock(core)) return 0; |
a093bde2b clk: Provide opti... |
2048 |
|
e55a839a7 clk: add clock pr... |
2049 2050 2051 |
/* fail on a direct rate set of a protected provider */ if (clk_core_rate_is_protected(core)) return -EBUSY; |
4dff95dc9 clk: Remove forwa... |
2052 |
/* calculate new rates and get the topmost changed clock */ |
ca5e089a3 clk: use round ra... |
2053 |
top = clk_calc_new_rates(core, req_rate); |
4dff95dc9 clk: Remove forwa... |
2054 2055 |
if (!top) return -EINVAL; |
9a34b4539 clk: Add support ... |
2056 2057 2058 |
ret = clk_pm_runtime_get(core); if (ret) return ret; |
4dff95dc9 clk: Remove forwa... |
2059 2060 2061 2062 2063 2064 2065 |
/* notify that we are about to change rates */ fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); if (fail_clk) { pr_debug("%s: failed to set %s rate ", __func__, fail_clk->name); clk_propagate_rate_change(top, ABORT_RATE_CHANGE); |
9a34b4539 clk: Add support ... |
2066 2067 |
ret = -EBUSY; goto err; |
4dff95dc9 clk: Remove forwa... |
2068 2069 2070 2071 2072 2073 |
} /* change the rates */ clk_change_rate(top); core->req_rate = req_rate; |
9a34b4539 clk: Add support ... |
2074 2075 |
err: clk_pm_runtime_put(core); |
4dff95dc9 clk: Remove forwa... |
2076 |
|
9a34b4539 clk: Add support ... |
2077 |
return ret; |
a093bde2b clk: Provide opti... |
2078 |
} |
035a61c31 clk: Make clk API... |
2079 2080 |
/** |
4dff95dc9 clk: Remove forwa... |
2081 2082 2083 |
* clk_set_rate - specify a new rate for clk * @clk: the clk whose rate is being changed * @rate: the new rate for clk |
035a61c31 clk: Make clk API... |
2084 |
* |
4dff95dc9 clk: Remove forwa... |
2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 |
* In the simplest case clk_set_rate will only adjust the rate of clk. * * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to * propagate up to clk's parent; whether or not this happens depends on the * outcome of clk's .round_rate implementation. If *parent_rate is unchanged * after calling .round_rate then upstream parent propagation is ignored. If * *parent_rate comes back with a new rate for clk's parent then we propagate * up to clk's parent and set its rate. Upward propagation will continue * until either a clk does not support the CLK_SET_RATE_PARENT flag or * .round_rate stops requesting changes to clk's parent_rate. * * Rate changes are accomplished via tree traversal that also recalculates the * rates for the clocks and fires off POST_RATE_CHANGE notifiers. * * Returns 0 on success, -EERROR otherwise. |
035a61c31 clk: Make clk API... |
2100 |
*/ |
4dff95dc9 clk: Remove forwa... |
2101 |
int clk_set_rate(struct clk *clk, unsigned long rate) |
035a61c31 clk: Make clk API... |
2102 |
{ |
4dff95dc9 clk: Remove forwa... |
2103 |
int ret; |
035a61c31 clk: Make clk API... |
2104 2105 |
if (!clk) return 0; |
4dff95dc9 clk: Remove forwa... |
2106 2107 |
/* prevent racing with updates to the clock topology */ clk_prepare_lock(); |
da0f0b2c3 clk: Correct look... |
2108 |
|
55e9b8b7b clk: add clk_rate... |
2109 2110 |
if (clk->exclusive_count) clk_core_rate_unprotect(clk->core); |
4dff95dc9 clk: Remove forwa... |
2111 |
ret = clk_core_set_rate_nolock(clk->core, rate); |
da0f0b2c3 clk: Correct look... |
2112 |
|
55e9b8b7b clk: add clk_rate... |
2113 2114 |
if (clk->exclusive_count) clk_core_rate_protect(clk->core); |
4dff95dc9 clk: Remove forwa... |
2115 |
clk_prepare_unlock(); |
4935b22c4 clk: move some pa... |
2116 |
|
4dff95dc9 clk: Remove forwa... |
2117 |
return ret; |
4935b22c4 clk: move some pa... |
2118 |
} |
4dff95dc9 clk: Remove forwa... |
2119 |
EXPORT_SYMBOL_GPL(clk_set_rate); |
4935b22c4 clk: move some pa... |
2120 |
|
4dff95dc9 clk: Remove forwa... |
2121 |
/** |
65e2218d1 clk: Grammar miss... |
2122 |
* clk_set_rate_exclusive - specify a new rate and get exclusive control |
55e9b8b7b clk: add clk_rate... |
2123 2124 2125 2126 2127 2128 2129 |
* @clk: the clk whose rate is being changed * @rate: the new rate for clk * * This is a combination of clk_set_rate() and clk_rate_exclusive_get() * within a critical section * * This can be used initially to ensure that at least 1 consumer is |
65e2218d1 clk: Grammar miss... |
2130 |
* satisfied when several consumers are competing for exclusivity over the |
55e9b8b7b clk: add clk_rate... |
2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 |
* same clock provider. * * The exclusivity is not applied if setting the rate failed. * * Calls to clk_rate_exclusive_get() should be balanced with calls to * clk_rate_exclusive_put(). * * Returns 0 on success, -EERROR otherwise. */ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) { int ret; if (!clk) return 0; /* prevent racing with updates to the clock topology */ clk_prepare_lock(); /* * The temporary protection removal is not here, on purpose * This function is meant to be used instead of clk_rate_protect, * so before the consumer code path protect the clock provider */ ret = clk_core_set_rate_nolock(clk->core, rate); if (!ret) { clk_core_rate_protect(clk->core); clk->exclusive_count++; } clk_prepare_unlock(); return ret; } EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); /** |
4dff95dc9 clk: Remove forwa... |
2169 2170 2171 2172 2173 2174 2175 2176 |
* clk_set_rate_range - set a rate range for a clock source * @clk: clock source * @min: desired minimum clock rate in Hz, inclusive * @max: desired maximum clock rate in Hz, inclusive * * Returns success (0) or negative errno. */ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) |
4935b22c4 clk: move some pa... |
2177 |
{ |
4dff95dc9 clk: Remove forwa... |
2178 |
int ret = 0; |
6562fbcf3 clk: fix set_rate... |
2179 |
unsigned long old_min, old_max, rate; |
4935b22c4 clk: move some pa... |
2180 |
|
4dff95dc9 clk: Remove forwa... |
2181 2182 |
if (!clk) return 0; |
903efc553 clk: fix new_pare... |
2183 |
|
4dff95dc9 clk: Remove forwa... |
2184 2185 2186 2187 2188 2189 |
if (min > max) { pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu] ", __func__, clk->core->name, clk->dev_id, clk->con_id, min, max); return -EINVAL; |
903efc553 clk: fix new_pare... |
2190 |
} |
4935b22c4 clk: move some pa... |
2191 |
|
4dff95dc9 clk: Remove forwa... |
2192 |
clk_prepare_lock(); |
4935b22c4 clk: move some pa... |
2193 |
|
55e9b8b7b clk: add clk_rate... |
2194 2195 |
if (clk->exclusive_count) clk_core_rate_unprotect(clk->core); |
6562fbcf3 clk: fix set_rate... |
2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 |
/* Save the current values in case we need to rollback the change */ old_min = clk->min_rate; old_max = clk->max_rate; clk->min_rate = min; clk->max_rate = max; rate = clk_core_get_rate_nolock(clk->core); if (rate < min || rate > max) { /* * FIXME: * We are in bit of trouble here, current rate is outside the * the requested range. We are going try to request appropriate * range boundary but there is a catch. It may fail for the * usual reason (clock broken, clock protected, etc) but also * because: * - round_rate() was not favorable and fell on the wrong * side of the boundary * - the determine_rate() callback does not really check for * this corner case when determining the rate */ if (rate < min) rate = min; else rate = max; ret = clk_core_set_rate_nolock(clk->core, rate); if (ret) { /* rollback the changes */ clk->min_rate = old_min; clk->max_rate = old_max; } |
4935b22c4 clk: move some pa... |
2228 |
} |
55e9b8b7b clk: add clk_rate... |
2229 2230 |
if (clk->exclusive_count) clk_core_rate_protect(clk->core); |
4dff95dc9 clk: Remove forwa... |
2231 |
clk_prepare_unlock(); |
4935b22c4 clk: move some pa... |
2232 |
|
4dff95dc9 clk: Remove forwa... |
2233 |
return ret; |
3fa2252b7 clk: Add set_rate... |
2234 |
} |
4dff95dc9 clk: Remove forwa... |
2235 |
EXPORT_SYMBOL_GPL(clk_set_rate_range); |
3fa2252b7 clk: Add set_rate... |
2236 |
|
4dff95dc9 clk: Remove forwa... |
2237 2238 2239 2240 2241 2242 2243 2244 |
/** * clk_set_min_rate - set a minimum clock rate for a clock source * @clk: clock source * @rate: desired minimum clock rate in Hz, inclusive * * Returns success (0) or negative errno. */ int clk_set_min_rate(struct clk *clk, unsigned long rate) |
3fa2252b7 clk: Add set_rate... |
2245 |
{ |
4dff95dc9 clk: Remove forwa... |
2246 2247 2248 2249 |
if (!clk) return 0; return clk_set_rate_range(clk, rate, clk->max_rate); |
3fa2252b7 clk: Add set_rate... |
2250 |
} |
4dff95dc9 clk: Remove forwa... |
2251 |
EXPORT_SYMBOL_GPL(clk_set_min_rate); |
3fa2252b7 clk: Add set_rate... |
2252 |
|
4dff95dc9 clk: Remove forwa... |
2253 2254 2255 2256 2257 2258 2259 2260 |
/** * clk_set_max_rate - set a maximum clock rate for a clock source * @clk: clock source * @rate: desired maximum clock rate in Hz, inclusive * * Returns success (0) or negative errno. */ int clk_set_max_rate(struct clk *clk, unsigned long rate) |
3fa2252b7 clk: Add set_rate... |
2261 |
{ |
4dff95dc9 clk: Remove forwa... |
2262 2263 |
if (!clk) return 0; |
4935b22c4 clk: move some pa... |
2264 |
|
4dff95dc9 clk: Remove forwa... |
2265 |
return clk_set_rate_range(clk, clk->min_rate, rate); |
4935b22c4 clk: move some pa... |
2266 |
} |
4dff95dc9 clk: Remove forwa... |
2267 |
EXPORT_SYMBOL_GPL(clk_set_max_rate); |
4935b22c4 clk: move some pa... |
2268 |
|
a093bde2b clk: Provide opti... |
2269 |
/** |
4dff95dc9 clk: Remove forwa... |
2270 2271 |
* clk_get_parent - return the parent of a clk * @clk: the clk whose parent gets returned |
b2476490e clk: introduce th... |
2272 |
* |
4dff95dc9 clk: Remove forwa... |
2273 |
* Simply returns clk->parent. Returns NULL if clk is NULL. |
b2476490e clk: introduce th... |
2274 |
*/ |
4dff95dc9 clk: Remove forwa... |
2275 |
struct clk *clk_get_parent(struct clk *clk) |
b2476490e clk: introduce th... |
2276 |
{ |
4dff95dc9 clk: Remove forwa... |
2277 |
struct clk *parent; |
b2476490e clk: introduce th... |
2278 |
|
fc4a05d4b clk: Remove unuse... |
2279 2280 |
if (!clk) return NULL; |
4dff95dc9 clk: Remove forwa... |
2281 |
clk_prepare_lock(); |
fc4a05d4b clk: Remove unuse... |
2282 2283 |
/* TODO: Create a per-user clk and change callers to call clk_put */ parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; |
4dff95dc9 clk: Remove forwa... |
2284 |
clk_prepare_unlock(); |
496eadf82 clk: Use lockdep ... |
2285 |
|
4dff95dc9 clk: Remove forwa... |
2286 2287 2288 |
return parent; } EXPORT_SYMBOL_GPL(clk_get_parent); |
b2476490e clk: introduce th... |
2289 |
|
4dff95dc9 clk: Remove forwa... |
2290 2291 |
static struct clk_core *__clk_init_parent(struct clk_core *core) { |
5146e0b05 clk: simplify __c... |
2292 |
u8 index = 0; |
4dff95dc9 clk: Remove forwa... |
2293 |
|
2430a94d1 clk: fix __clk_in... |
2294 |
if (core->num_parents > 1 && core->ops->get_parent) |
5146e0b05 clk: simplify __c... |
2295 |
index = core->ops->get_parent(core->hw); |
b2476490e clk: introduce th... |
2296 |
|
5146e0b05 clk: simplify __c... |
2297 |
return clk_core_get_parent_by_index(core, index); |
b2476490e clk: introduce th... |
2298 |
} |
4dff95dc9 clk: Remove forwa... |
2299 2300 |
static void clk_core_reparent(struct clk_core *core, struct clk_core *new_parent) |
b2476490e clk: introduce th... |
2301 |
{ |
4dff95dc9 clk: Remove forwa... |
2302 2303 2304 |
clk_reparent(core, new_parent); __clk_recalc_accuracies(core); __clk_recalc_rates(core, POST_RATE_CHANGE); |
b2476490e clk: introduce th... |
2305 |
} |
42c86547f clk: Expose clk_h... |
2306 2307 2308 2309 2310 2311 2312 |
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) { if (!hw) return; clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); } |
4dff95dc9 clk: Remove forwa... |
2313 2314 2315 2316 2317 2318 2319 2320 2321 |
/** * clk_has_parent - check if a clock is a possible parent for another * @clk: clock source * @parent: parent clock source * * This function can be used in drivers that need to check that a clock can be * the parent of another without actually changing the parent. * * Returns true if @parent is a possible parent for @clk, false otherwise. |
b2476490e clk: introduce th... |
2322 |
*/ |
4dff95dc9 clk: Remove forwa... |
2323 |
bool clk_has_parent(struct clk *clk, struct clk *parent) |
b2476490e clk: introduce th... |
2324 |
{ |
4dff95dc9 clk: Remove forwa... |
2325 |
struct clk_core *core, *parent_core; |
fc0c209c1 clk: Allow parent... |
2326 |
int i; |
b2476490e clk: introduce th... |
2327 |
|
4dff95dc9 clk: Remove forwa... |
2328 2329 2330 |
/* NULL clocks should be nops, so return success if either is NULL. */ if (!clk || !parent) return true; |
7452b2191 clk: core: clk_ca... |
2331 |
|
4dff95dc9 clk: Remove forwa... |
2332 2333 |
core = clk->core; parent_core = parent->core; |
71472c0c0 clk: add support ... |
2334 |
|
4dff95dc9 clk: Remove forwa... |
2335 2336 2337 |
/* Optimize for the case where the parent is already the parent. */ if (core->parent == parent_core) return true; |
1c8e60044 clk: Add rate con... |
2338 |
|
fc0c209c1 clk: Allow parent... |
2339 2340 2341 2342 2343 |
for (i = 0; i < core->num_parents; i++) if (!strcmp(core->parents[i].name, parent_core->name)) return true; return false; |
4dff95dc9 clk: Remove forwa... |
2344 2345 |
} EXPORT_SYMBOL_GPL(clk_has_parent); |
03bc10ab5 clk: check ->dete... |
2346 |
|
91baa9ffe clk: take the pre... |
2347 2348 |
static int clk_core_set_parent_nolock(struct clk_core *core, struct clk_core *parent) |
4dff95dc9 clk: Remove forwa... |
2349 2350 2351 2352 |
{ int ret = 0; int p_index = 0; unsigned long p_rate = 0; |
91baa9ffe clk: take the pre... |
2353 |
lockdep_assert_held(&prepare_lock); |
4dff95dc9 clk: Remove forwa... |
2354 2355 |
if (!core) return 0; |
07a9d89f7 MLK-21052-08 clk:... |
2356 2357 |
if ((core->parent == parent) && !(core->flags & CLK_SET_PARENT_NOCACHE)) |
91baa9ffe clk: take the pre... |
2358 |
return 0; |
4dff95dc9 clk: Remove forwa... |
2359 |
|
ef13e55c2 clk: Remove extra... |
2360 |
/* verify ops for multi-parent clks */ |
91baa9ffe clk: take the pre... |
2361 2362 |
if (core->num_parents > 1 && !core->ops->set_parent) return -EPERM; |
7452b2191 clk: core: clk_ca... |
2363 |
|
4dff95dc9 clk: Remove forwa... |
2364 |
/* check that we are allowed to re-parent if the clock is in use */ |
91baa9ffe clk: take the pre... |
2365 2366 |
if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) return -EBUSY; |
b2476490e clk: introduce th... |
2367 |
|
e55a839a7 clk: add clock pr... |
2368 2369 |
if (clk_core_rate_is_protected(core)) return -EBUSY; |
b2476490e clk: introduce th... |
2370 |
|
71472c0c0 clk: add support ... |
2371 |
/* try finding the new parent index */ |
4dff95dc9 clk: Remove forwa... |
2372 |
if (parent) { |
d6968fca7 clk: s/clk/core/ ... |
2373 |
p_index = clk_fetch_parent_index(core, parent); |
f1c8b2edf clk: Add error ha... |
2374 |
if (p_index < 0) { |
71472c0c0 clk: add support ... |
2375 2376 |
pr_debug("%s: clk %s can not be parent of clk %s ", |
4dff95dc9 clk: Remove forwa... |
2377 |
__func__, parent->name, core->name); |
91baa9ffe clk: take the pre... |
2378 |
return p_index; |
71472c0c0 clk: add support ... |
2379 |
} |
e8f0e68ec clk: slightly opt... |
2380 |
p_rate = parent->rate; |
b2476490e clk: introduce th... |
2381 |
} |
9a34b4539 clk: Add support ... |
2382 2383 |
ret = clk_pm_runtime_get(core); if (ret) |
91baa9ffe clk: take the pre... |
2384 |
return ret; |
9a34b4539 clk: Add support ... |
2385 |
|
4dff95dc9 clk: Remove forwa... |
2386 2387 |
/* propagate PRE_RATE_CHANGE notifications */ ret = __clk_speculate_rates(core, p_rate); |
b2476490e clk: introduce th... |
2388 |
|
4dff95dc9 clk: Remove forwa... |
2389 2390 |
/* abort if a driver objects */ if (ret & NOTIFY_STOP_MASK) |
9a34b4539 clk: Add support ... |
2391 |
goto runtime_put; |
b2476490e clk: introduce th... |
2392 |
|
4dff95dc9 clk: Remove forwa... |
2393 2394 |
/* do the re-parent */ ret = __clk_set_parent(core, parent, p_index); |
b2476490e clk: introduce th... |
2395 |
|
4dff95dc9 clk: Remove forwa... |
2396 2397 2398 2399 2400 2401 |
/* propagate rate an accuracy recalculation accordingly */ if (ret) { __clk_recalc_rates(core, ABORT_RATE_CHANGE); } else { __clk_recalc_rates(core, POST_RATE_CHANGE); __clk_recalc_accuracies(core); |
b2476490e clk: introduce th... |
2402 |
} |
9a34b4539 clk: Add support ... |
2403 2404 |
runtime_put: clk_pm_runtime_put(core); |
71472c0c0 clk: add support ... |
2405 |
|
4dff95dc9 clk: Remove forwa... |
2406 2407 |
return ret; } |
b2476490e clk: introduce th... |
2408 |
|
3567894b6 clk: core: introd... |
2409 2410 2411 2412 2413 |
int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent) { return clk_core_set_parent_nolock(hw->core, parent->core); } EXPORT_SYMBOL_GPL(clk_hw_set_parent); |
4dff95dc9 clk: Remove forwa... |
2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 |
/** * clk_set_parent - switch the parent of a mux clk * @clk: the mux clk whose input we are switching * @parent: the new input to clk * * Re-parent clk to use parent as its new input source. If clk is in * prepared state, the clk will get enabled for the duration of this call. If * that's not acceptable for a specific clk (Eg: the consumer can't handle * that, the reparenting is glitchy in hardware, etc), use the * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. * * After successfully changing clk's parent clk_set_parent will update the * clk topology, sysfs topology and propagate rate recalculation via * __clk_recalc_rates. * * Returns 0 on success, -EERROR otherwise. */ int clk_set_parent(struct clk *clk, struct clk *parent) { |
91baa9ffe clk: take the pre... |
2433 |
int ret; |
4dff95dc9 clk: Remove forwa... |
2434 2435 |
if (!clk) return 0; |
91baa9ffe clk: take the pre... |
2436 |
clk_prepare_lock(); |
55e9b8b7b clk: add clk_rate... |
2437 2438 2439 |
if (clk->exclusive_count) clk_core_rate_unprotect(clk->core); |
91baa9ffe clk: take the pre... |
2440 2441 |
ret = clk_core_set_parent_nolock(clk->core, parent ? parent->core : NULL); |
55e9b8b7b clk: add clk_rate... |
2442 2443 2444 |
if (clk->exclusive_count) clk_core_rate_protect(clk->core); |
91baa9ffe clk: take the pre... |
2445 2446 2447 |
clk_prepare_unlock(); return ret; |
b2476490e clk: introduce th... |
2448 |
} |
4dff95dc9 clk: Remove forwa... |
2449 |
EXPORT_SYMBOL_GPL(clk_set_parent); |
b2476490e clk: introduce th... |
2450 |
|
9e4d04ade clk: add clk_core... |
2451 2452 2453 2454 2455 2456 2457 2458 |
static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) { int ret = -EINVAL; lockdep_assert_held(&prepare_lock); if (!core) return 0; |
e55a839a7 clk: add clock pr... |
2459 2460 |
if (clk_core_rate_is_protected(core)) return -EBUSY; |
9e4d04ade clk: add clk_core... |
2461 |
trace_clk_set_phase(core, degrees); |
7f95beea3 clk: update cache... |
2462 |
if (core->ops->set_phase) { |
9e4d04ade clk: add clk_core... |
2463 |
ret = core->ops->set_phase(core->hw, degrees); |
7f95beea3 clk: update cache... |
2464 2465 2466 |
if (!ret) core->phase = degrees; } |
9e4d04ade clk: add clk_core... |
2467 2468 2469 2470 2471 |
trace_clk_set_phase_complete(core, degrees); return ret; } |
4dff95dc9 clk: Remove forwa... |
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 |
/** * clk_set_phase - adjust the phase shift of a clock signal * @clk: clock signal source * @degrees: number of degrees the signal is shifted * * Shifts the phase of a clock signal by the specified * degrees. Returns 0 on success, -EERROR otherwise. * * This function makes no distinction about the input or reference * signal that we adjust the clock signal phase against. For example * phase locked-loop clock signal generators we may shift phase with * respect to feedback clock signal input, but for other cases the * clock phase may be shifted with respect to some other, unspecified * signal. * * Additionally the concept of phase shift does not propagate through * the clock tree hierarchy, which sets it apart from clock rates and * clock accuracy. A parent clock phase attribute does not have an * impact on the phase attribute of a child clock. |
b2476490e clk: introduce th... |
2491 |
*/ |
4dff95dc9 clk: Remove forwa... |
2492 |
int clk_set_phase(struct clk *clk, int degrees) |
b2476490e clk: introduce th... |
2493 |
{ |
9e4d04ade clk: add clk_core... |
2494 |
int ret; |
b2476490e clk: introduce th... |
2495 |
|
4dff95dc9 clk: Remove forwa... |
2496 2497 |
if (!clk) return 0; |
b2476490e clk: introduce th... |
2498 |
|
4dff95dc9 clk: Remove forwa... |
2499 2500 2501 2502 |
/* sanity check degrees */ degrees %= 360; if (degrees < 0) degrees += 360; |
bf47b4fd8 clk: Check parent... |
2503 |
|
4dff95dc9 clk: Remove forwa... |
2504 |
clk_prepare_lock(); |
3fa2252b7 clk: Add set_rate... |
2505 |
|
55e9b8b7b clk: add clk_rate... |
2506 2507 |
if (clk->exclusive_count) clk_core_rate_unprotect(clk->core); |
3fa2252b7 clk: Add set_rate... |
2508 |
|
9e4d04ade clk: add clk_core... |
2509 |
ret = clk_core_set_phase_nolock(clk->core, degrees); |
3fa2252b7 clk: Add set_rate... |
2510 |
|
55e9b8b7b clk: add clk_rate... |
2511 2512 |
if (clk->exclusive_count) clk_core_rate_protect(clk->core); |
b2476490e clk: introduce th... |
2513 |
|
4dff95dc9 clk: Remove forwa... |
2514 |
clk_prepare_unlock(); |
dfc202ead clk: Add tracepoi... |
2515 |
|
4dff95dc9 clk: Remove forwa... |
2516 2517 2518 |
return ret; } EXPORT_SYMBOL_GPL(clk_set_phase); |
b2476490e clk: introduce th... |
2519 |
|
4dff95dc9 clk: Remove forwa... |
2520 2521 2522 |
static int clk_core_get_phase(struct clk_core *core) { int ret; |
b2476490e clk: introduce th... |
2523 |
|
4dff95dc9 clk: Remove forwa... |
2524 |
clk_prepare_lock(); |
1f9c63e8d clk: Don't show t... |
2525 2526 2527 |
/* Always try to update cached phase if possible */ if (core->ops->get_phase) core->phase = core->ops->get_phase(core->hw); |
4dff95dc9 clk: Remove forwa... |
2528 2529 |
ret = core->phase; clk_prepare_unlock(); |
71472c0c0 clk: add support ... |
2530 |
|
4dff95dc9 clk: Remove forwa... |
2531 |
return ret; |
b2476490e clk: introduce th... |
2532 |
} |
4dff95dc9 clk: Remove forwa... |
2533 2534 2535 2536 2537 2538 2539 2540 |
/** * clk_get_phase - return the phase shift of a clock signal * @clk: clock signal source * * Returns the phase shift of a clock node in degrees, otherwise returns * -EERROR. */ int clk_get_phase(struct clk *clk) |
1c8e60044 clk: Add rate con... |
2541 |
{ |
4dff95dc9 clk: Remove forwa... |
2542 |
if (!clk) |
1c8e60044 clk: Add rate con... |
2543 |
return 0; |
4dff95dc9 clk: Remove forwa... |
2544 2545 2546 |
return clk_core_get_phase(clk->core); } EXPORT_SYMBOL_GPL(clk_get_phase); |
1c8e60044 clk: Add rate con... |
2547 |
|
9fba738a5 clk: add duty cyc... |
2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 |
static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) { /* Assume a default value of 50% */ core->duty.num = 1; core->duty.den = 2; } static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); static int clk_core_update_duty_cycle_nolock(struct clk_core *core) { struct clk_duty *duty = &core->duty; int ret = 0; if (!core->ops->get_duty_cycle) return clk_core_update_duty_cycle_parent_nolock(core); ret = core->ops->get_duty_cycle(core->hw, duty); if (ret) goto reset; /* Don't trust the clock provider too much */ if (duty->den == 0 || duty->num > duty->den) { ret = -EINVAL; goto reset; } return 0; reset: clk_core_reset_duty_cycle_nolock(core); return ret; } static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) { int ret = 0; if (core->parent && core->flags & CLK_DUTY_CYCLE_PARENT) { ret = clk_core_update_duty_cycle_nolock(core->parent); memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); } else { clk_core_reset_duty_cycle_nolock(core); } return ret; } static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, struct clk_duty *duty); static int clk_core_set_duty_cycle_nolock(struct clk_core *core, struct clk_duty *duty) { int ret; lockdep_assert_held(&prepare_lock); if (clk_core_rate_is_protected(core)) return -EBUSY; trace_clk_set_duty_cycle(core, duty); if (!core->ops->set_duty_cycle) return clk_core_set_duty_cycle_parent_nolock(core, duty); ret = core->ops->set_duty_cycle(core->hw, duty); if (!ret) memcpy(&core->duty, duty, sizeof(*duty)); trace_clk_set_duty_cycle_complete(core, duty); return ret; } static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, struct clk_duty *duty) { int ret = 0; if (core->parent && core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { ret = clk_core_set_duty_cycle_nolock(core->parent, duty); memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); } return ret; } /** * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal * @clk: clock signal source * @num: numerator of the duty cycle ratio to be applied * @den: denominator of the duty cycle ratio to be applied * * Apply the duty cycle ratio if the ratio is valid and the clock can * perform this operation * * Returns (0) on success, a negative errno otherwise. */ int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) { int ret; struct clk_duty duty; if (!clk) return 0; /* sanity check the ratio */ if (den == 0 || num > den) return -EINVAL; duty.num = num; duty.den = den; clk_prepare_lock(); if (clk->exclusive_count) clk_core_rate_unprotect(clk->core); ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); if (clk->exclusive_count) clk_core_rate_protect(clk->core); clk_prepare_unlock(); return ret; } EXPORT_SYMBOL_GPL(clk_set_duty_cycle); static int clk_core_get_scaled_duty_cycle(struct clk_core *core, unsigned int scale) { struct clk_duty *duty = &core->duty; int ret; clk_prepare_lock(); ret = clk_core_update_duty_cycle_nolock(core); if (!ret) ret = mult_frac(scale, duty->num, duty->den); clk_prepare_unlock(); return ret; } /** * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal * @clk: clock signal source * @scale: scaling factor to be applied to represent the ratio as an integer * * Returns the duty cycle ratio of a clock node multiplied by the provided * scaling factor, or negative errno on error. */ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) { if (!clk) return 0; return clk_core_get_scaled_duty_cycle(clk->core, scale); } EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); |
4dff95dc9 clk: Remove forwa... |
2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 |
/** * clk_is_match - check if two clk's point to the same hardware clock * @p: clk compared against q * @q: clk compared against p * * Returns true if the two struct clk pointers both point to the same hardware * clock node. Put differently, returns true if struct clk *p and struct clk *q * share the same struct clk_core object. * * Returns false otherwise. Note that two NULL clks are treated as matching. */ bool clk_is_match(const struct clk *p, const struct clk *q) { /* trivial case: identical struct clk's or both NULL */ if (p == q) return true; |
1c8e60044 clk: Add rate con... |
2729 |
|
3fe003f94 clk: Spelling s/d... |
2730 |
/* true if clk->core pointers match. Avoid dereferencing garbage */ |
4dff95dc9 clk: Remove forwa... |
2731 2732 2733 |
if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) if (p->core == q->core) return true; |
1c8e60044 clk: Add rate con... |
2734 |
|
4dff95dc9 clk: Remove forwa... |
2735 2736 2737 |
return false; } EXPORT_SYMBOL_GPL(clk_is_match); |
1c8e60044 clk: Add rate con... |
2738 |
|
4dff95dc9 clk: Remove forwa... |
2739 |
/*** debugfs support ***/ |
1c8e60044 clk: Add rate con... |
2740 |
|
4dff95dc9 clk: Remove forwa... |
2741 2742 |
#ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> |
1c8e60044 clk: Add rate con... |
2743 |
|
4dff95dc9 clk: Remove forwa... |
2744 2745 2746 2747 |
static struct dentry *rootdir; static int inited = 0; static DEFINE_MUTEX(clk_debug_lock); static HLIST_HEAD(clk_debug_list); |
1c8e60044 clk: Add rate con... |
2748 |
|
4dff95dc9 clk: Remove forwa... |
2749 2750 2751 2752 2753 2754 2755 |
static struct hlist_head *orphan_list[] = { &clk_orphan_list, NULL, }; static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, int level) |
b2476490e clk: introduce th... |
2756 |
{ |
9fba738a5 clk: add duty cyc... |
2757 2758 |
seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d ", |
4dff95dc9 clk: Remove forwa... |
2759 2760 |
level * 3 + 1, "", 30 - level * 3, c->name, |
e55a839a7 clk: add clock pr... |
2761 2762 |
c->enable_count, c->prepare_count, c->protect_count, clk_core_get_rate(c), clk_core_get_accuracy(c), |
9fba738a5 clk: add duty cyc... |
2763 2764 |
clk_core_get_phase(c), clk_core_get_scaled_duty_cycle(c, 100000)); |
4dff95dc9 clk: Remove forwa... |
2765 |
} |
89ac8d7ae clk: handle NULL ... |
2766 |
|
4dff95dc9 clk: Remove forwa... |
2767 2768 2769 2770 |
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, int level) { struct clk_core *child; |
b2476490e clk: introduce th... |
2771 |
|
4dff95dc9 clk: Remove forwa... |
2772 |
clk_summary_show_one(s, c, level); |
0e1c03017 clk: clk_set_rate... |
2773 |
|
4dff95dc9 clk: Remove forwa... |
2774 2775 |
hlist_for_each_entry(child, &c->children, child_node) clk_summary_show_subtree(s, child, level + 1); |
1c8e60044 clk: Add rate con... |
2776 |
} |
b2476490e clk: introduce th... |
2777 |
|
4dff95dc9 clk: Remove forwa... |
2778 |
static int clk_summary_show(struct seq_file *s, void *data) |
1c8e60044 clk: Add rate con... |
2779 |
{ |
4dff95dc9 clk: Remove forwa... |
2780 2781 |
struct clk_core *c; struct hlist_head **lists = (struct hlist_head **)s->private; |
1c8e60044 clk: Add rate con... |
2782 |
|
9fba738a5 clk: add duty cyc... |
2783 2784 2785 2786 2787 2788 |
seq_puts(s, " enable prepare protect duty "); seq_puts(s, " clock count count count rate accuracy phase cycle "); seq_puts(s, "--------------------------------------------------------------------------------------------- "); |
b2476490e clk: introduce th... |
2789 |
|
1c8e60044 clk: Add rate con... |
2790 |
clk_prepare_lock(); |
4dff95dc9 clk: Remove forwa... |
2791 2792 2793 |
for (; *lists; lists++) hlist_for_each_entry(c, *lists, child_node) clk_summary_show_subtree(s, c, 0); |
b2476490e clk: introduce th... |
2794 |
|
eab89f690 clk: abstract loc... |
2795 |
clk_prepare_unlock(); |
b2476490e clk: introduce th... |
2796 |
|
4dff95dc9 clk: Remove forwa... |
2797 |
return 0; |
b2476490e clk: introduce th... |
2798 |
} |
fec0ef3f5 clk: Re-use DEFIN... |
2799 |
DEFINE_SHOW_ATTRIBUTE(clk_summary); |
b2476490e clk: introduce th... |
2800 |
|
4dff95dc9 clk: Remove forwa... |
2801 2802 |
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) { |
1bd37a467 clk: Add clk_min/... |
2803 |
unsigned long min_rate, max_rate; |
1bd37a467 clk: Add clk_min/... |
2804 |
clk_core_get_boundaries(c, &min_rate, &max_rate); |
b2476490e clk: introduce th... |
2805 |
|
7cb81136d clk: Fix JSON out... |
2806 |
/* This should be JSON format, i.e. elements separated with a comma */ |
4dff95dc9 clk: Remove forwa... |
2807 2808 2809 |
seq_printf(s, "\"%s\": { ", c->name); seq_printf(s, "\"enable_count\": %d,", c->enable_count); seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); |
e55a839a7 clk: add clock pr... |
2810 |
seq_printf(s, "\"protect_count\": %d,", c->protect_count); |
7cb81136d clk: Fix JSON out... |
2811 |
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); |
1bd37a467 clk: Add clk_min/... |
2812 2813 |
seq_printf(s, "\"min_rate\": %lu,", min_rate); seq_printf(s, "\"max_rate\": %lu,", max_rate); |
7cb81136d clk: Fix JSON out... |
2814 |
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); |
c6e909972 clk: sysfs: fix i... |
2815 |
seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); |
9fba738a5 clk: add duty cyc... |
2816 2817 |
seq_printf(s, "\"duty_cycle\": %u", clk_core_get_scaled_duty_cycle(c, 100000)); |
b2476490e clk: introduce th... |
2818 |
} |
b2476490e clk: introduce th... |
2819 |
|
4dff95dc9 clk: Remove forwa... |
2820 |
static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) |
b2476490e clk: introduce th... |
2821 |
{ |
4dff95dc9 clk: Remove forwa... |
2822 |
struct clk_core *child; |
b2476490e clk: introduce th... |
2823 |
|
4dff95dc9 clk: Remove forwa... |
2824 |
clk_dump_one(s, c, level); |
b2476490e clk: introduce th... |
2825 |
|
4dff95dc9 clk: Remove forwa... |
2826 |
hlist_for_each_entry(child, &c->children, child_node) { |
4d3275867 clk: Replace four... |
2827 |
seq_putc(s, ','); |
4dff95dc9 clk: Remove forwa... |
2828 |
clk_dump_subtree(s, child, level + 1); |
b2476490e clk: introduce th... |
2829 |
} |
4d3275867 clk: Replace four... |
2830 |
seq_putc(s, '}'); |
b2476490e clk: introduce th... |
2831 |
} |
fec0ef3f5 clk: Re-use DEFIN... |
2832 |
static int clk_dump_show(struct seq_file *s, void *data) |
4e88f3de8 clk: Introduce cl... |
2833 |
{ |
4dff95dc9 clk: Remove forwa... |
2834 2835 2836 |
struct clk_core *c; bool first_node = true; struct hlist_head **lists = (struct hlist_head **)s->private; |
4e88f3de8 clk: Introduce cl... |
2837 |
|
4d3275867 clk: Replace four... |
2838 |
seq_putc(s, '{'); |
4dff95dc9 clk: Remove forwa... |
2839 |
clk_prepare_lock(); |
035a61c31 clk: Make clk API... |
2840 |
|
4dff95dc9 clk: Remove forwa... |
2841 2842 2843 |
for (; *lists; lists++) { hlist_for_each_entry(c, *lists, child_node) { if (!first_node) |
4d3275867 clk: Replace four... |
2844 |
seq_putc(s, ','); |
4dff95dc9 clk: Remove forwa... |
2845 2846 2847 2848 |
first_node = false; clk_dump_subtree(s, c, 0); } } |
4e88f3de8 clk: Introduce cl... |
2849 |
|
4dff95dc9 clk: Remove forwa... |
2850 |
clk_prepare_unlock(); |
4e88f3de8 clk: Introduce cl... |
2851 |
|
70e9f4dde clk: add newline ... |
2852 2853 |
seq_puts(s, "} "); |
4dff95dc9 clk: Remove forwa... |
2854 |
return 0; |
4e88f3de8 clk: Introduce cl... |
2855 |
} |
fec0ef3f5 clk: Re-use DEFIN... |
2856 |
DEFINE_SHOW_ATTRIBUTE(clk_dump); |
89ac8d7ae clk: handle NULL ... |
2857 |
|
a6059ab98 clk: Show symboli... |
2858 2859 2860 2861 |
static const struct { unsigned long flag; const char *name; } clk_flags[] = { |
40dd71c75 clk: Really show ... |
2862 |
#define ENTRY(f) { f, #f } |
a6059ab98 clk: Show symboli... |
2863 2864 2865 2866 |
ENTRY(CLK_SET_RATE_GATE), ENTRY(CLK_SET_PARENT_GATE), ENTRY(CLK_SET_RATE_PARENT), ENTRY(CLK_IGNORE_UNUSED), |
a6059ab98 clk: Show symboli... |
2867 2868 2869 2870 2871 2872 2873 |
ENTRY(CLK_GET_RATE_NOCACHE), ENTRY(CLK_SET_RATE_NO_REPARENT), ENTRY(CLK_GET_ACCURACY_NOCACHE), ENTRY(CLK_RECALC_NEW_RATES), ENTRY(CLK_SET_RATE_UNGATE), ENTRY(CLK_IS_CRITICAL), ENTRY(CLK_OPS_PARENT_ENABLE), |
9fba738a5 clk: add duty cyc... |
2874 |
ENTRY(CLK_DUTY_CYCLE_PARENT), |
a6059ab98 clk: Show symboli... |
2875 2876 |
#undef ENTRY }; |
fec0ef3f5 clk: Re-use DEFIN... |
2877 |
static int clk_flags_show(struct seq_file *s, void *data) |
a6059ab98 clk: Show symboli... |
2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 |
{ struct clk_core *core = s->private; unsigned long flags = core->flags; unsigned int i; for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { if (flags & clk_flags[i].flag) { seq_printf(s, "%s ", clk_flags[i].name); flags &= ~clk_flags[i].flag; } } if (flags) { /* Unknown flags */ seq_printf(s, "0x%lx ", flags); } return 0; } |
fec0ef3f5 clk: Re-use DEFIN... |
2898 |
DEFINE_SHOW_ATTRIBUTE(clk_flags); |
a6059ab98 clk: Show symboli... |
2899 |
|
11f6c2307 clk: Simplify deb... |
2900 2901 |
static void possible_parent_show(struct seq_file *s, struct clk_core *core, unsigned int i, char terminator) |
92031575c clk: add clk_poss... |
2902 |
{ |
2d156b78c clk: Fix debugfs ... |
2903 |
struct clk_core *parent; |
92031575c clk: add clk_poss... |
2904 |
|
2d156b78c clk: Fix debugfs ... |
2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 |
/* * Go through the following options to fetch a parent's name. * * 1. Fetch the registered parent clock and use its name * 2. Use the global (fallback) name if specified * 3. Use the local fw_name if provided * 4. Fetch parent clock's clock-output-name if DT index was set * * This may still fail in some cases, such as when the parent is * specified directly via a struct clk_hw pointer, but it isn't * registered (yet). */ |
2d156b78c clk: Fix debugfs ... |
2917 2918 |
parent = clk_core_get_parent_by_index(core, i); if (parent) |
1ccc0ddf0 clk: Use seq_puts... |
2919 |
seq_puts(s, parent->name); |
2d156b78c clk: Fix debugfs ... |
2920 |
else if (core->parents[i].name) |
1ccc0ddf0 clk: Use seq_puts... |
2921 |
seq_puts(s, core->parents[i].name); |
2d156b78c clk: Fix debugfs ... |
2922 2923 2924 |
else if (core->parents[i].fw_name) seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); else if (core->parents[i].index >= 0) |
1ccc0ddf0 clk: Use seq_puts... |
2925 2926 2927 |
seq_puts(s, of_clk_get_parent_name(core->of_node, core->parents[i].index)); |
2d156b78c clk: Fix debugfs ... |
2928 2929 |
else seq_puts(s, "(missing)"); |
92031575c clk: add clk_poss... |
2930 |
|
11f6c2307 clk: Simplify deb... |
2931 2932 |
seq_putc(s, terminator); } |
fec0ef3f5 clk: Re-use DEFIN... |
2933 |
static int possible_parents_show(struct seq_file *s, void *data) |
92031575c clk: add clk_poss... |
2934 2935 2936 2937 2938 |
{ struct clk_core *core = s->private; int i; for (i = 0; i < core->num_parents - 1; i++) |
11f6c2307 clk: Simplify deb... |
2939 |
possible_parent_show(s, core, i, ' '); |
92031575c clk: add clk_poss... |
2940 |
|
11f6c2307 clk: Simplify deb... |
2941 2942 |
possible_parent_show(s, core, i, ' '); |
92031575c clk: add clk_poss... |
2943 2944 2945 |
return 0; } |
fec0ef3f5 clk: Re-use DEFIN... |
2946 |
DEFINE_SHOW_ATTRIBUTE(possible_parents); |
92031575c clk: add clk_poss... |
2947 |
|
e5e89247a clk: Add clk_pare... |
2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 |
static int current_parent_show(struct seq_file *s, void *data) { struct clk_core *core = s->private; if (core->parent) seq_printf(s, "%s ", core->parent->name); return 0; } DEFINE_SHOW_ATTRIBUTE(current_parent); |
9fba738a5 clk: add duty cyc... |
2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 |
static int clk_duty_cycle_show(struct seq_file *s, void *data) { struct clk_core *core = s->private; struct clk_duty *duty = &core->duty; seq_printf(s, "%u/%u ", duty->num, duty->den); return 0; } DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); |
1bd37a467 clk: Add clk_min/... |
2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 |
static int clk_min_rate_show(struct seq_file *s, void *data) { struct clk_core *core = s->private; unsigned long min_rate, max_rate; clk_prepare_lock(); clk_core_get_boundaries(core, &min_rate, &max_rate); clk_prepare_unlock(); seq_printf(s, "%lu ", min_rate); return 0; } DEFINE_SHOW_ATTRIBUTE(clk_min_rate); static int clk_max_rate_show(struct seq_file *s, void *data) { struct clk_core *core = s->private; unsigned long min_rate, max_rate; clk_prepare_lock(); clk_core_get_boundaries(core, &min_rate, &max_rate); clk_prepare_unlock(); seq_printf(s, "%lu ", max_rate); return 0; } DEFINE_SHOW_ATTRIBUTE(clk_max_rate); |
8a26bbbb9 clk: no need to c... |
2999 |
static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) |
4dff95dc9 clk: Remove forwa... |
3000 |
{ |
8a26bbbb9 clk: no need to c... |
3001 |
struct dentry *root; |
b61c43c09 clk: clk_set_pare... |
3002 |
|
8a26bbbb9 clk: no need to c... |
3003 3004 |
if (!core || !pdentry) return; |
b2476490e clk: introduce th... |
3005 |
|
8a26bbbb9 clk: no need to c... |
3006 3007 |
root = debugfs_create_dir(core->name, pdentry); core->dentry = root; |
92031575c clk: add clk_poss... |
3008 |
|
8a26bbbb9 clk: no need to c... |
3009 |
debugfs_create_ulong("clk_rate", 0444, root, &core->rate); |
1bd37a467 clk: Add clk_min/... |
3010 3011 |
debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops); debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops); |
8a26bbbb9 clk: no need to c... |
3012 3013 3014 3015 3016 3017 3018 |
debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); debugfs_create_u32("clk_phase", 0444, root, &core->phase); debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); |
9fba738a5 clk: add duty cyc... |
3019 3020 |
debugfs_create_file("clk_duty_cycle", 0444, root, core, &clk_duty_cycle_fops); |
b2476490e clk: introduce th... |
3021 |
|
e5e89247a clk: Add clk_pare... |
3022 3023 3024 |
if (core->num_parents > 0) debugfs_create_file("clk_parent", 0444, root, core, ¤t_parent_fops); |
8a26bbbb9 clk: no need to c... |
3025 3026 3027 |
if (core->num_parents > 1) debugfs_create_file("clk_possible_parents", 0444, root, core, &possible_parents_fops); |
b2476490e clk: introduce th... |
3028 |
|
8a26bbbb9 clk: no need to c... |
3029 3030 |
if (core->ops->debug_init) core->ops->debug_init(core->hw, core->dentry); |
b2476490e clk: introduce th... |
3031 |
} |
035a61c31 clk: Make clk API... |
3032 3033 |
/** |
6e5ab41b1 clk: Update some ... |
3034 3035 |
* clk_debug_register - add a clk node to the debugfs clk directory * @core: the clk being added to the debugfs clk directory |
035a61c31 clk: Make clk API... |
3036 |
* |
6e5ab41b1 clk: Update some ... |
3037 3038 |
* Dynamically adds a clk to the debugfs clk directory if debugfs has been * initialized. Otherwise it bails out early since the debugfs clk directory |
4dff95dc9 clk: Remove forwa... |
3039 |
* will be created lazily by clk_debug_init as part of a late_initcall. |
035a61c31 clk: Make clk API... |
3040 |
*/ |
8a26bbbb9 clk: no need to c... |
3041 |
static void clk_debug_register(struct clk_core *core) |
035a61c31 clk: Make clk API... |
3042 |
{ |
4dff95dc9 clk: Remove forwa... |
3043 3044 |
mutex_lock(&clk_debug_lock); hlist_add_head(&core->debug_node, &clk_debug_list); |
db3188fad clk: Simplify deb... |
3045 |
if (inited) |
8a26bbbb9 clk: no need to c... |
3046 |
clk_debug_create_one(core, rootdir); |
4dff95dc9 clk: Remove forwa... |
3047 |
mutex_unlock(&clk_debug_lock); |
035a61c31 clk: Make clk API... |
3048 |
} |
b2476490e clk: introduce th... |
3049 |
|
4dff95dc9 clk: Remove forwa... |
3050 |
/** |
6e5ab41b1 clk: Update some ... |
3051 3052 |
* clk_debug_unregister - remove a clk node from the debugfs clk directory * @core: the clk being removed from the debugfs clk directory |
e59c5371f clk: introduce cl... |
3053 |
* |
6e5ab41b1 clk: Update some ... |
3054 3055 |
* Dynamically removes a clk and all its child nodes from the * debugfs clk directory if clk->dentry points to debugfs created by |
706d5c73e clk: Update some ... |
3056 |
* clk_debug_register in __clk_core_init. |
e59c5371f clk: introduce cl... |
3057 |
*/ |
4dff95dc9 clk: Remove forwa... |
3058 |
static void clk_debug_unregister(struct clk_core *core) |
e59c5371f clk: introduce cl... |
3059 |
{ |
4dff95dc9 clk: Remove forwa... |
3060 3061 3062 3063 3064 3065 |
mutex_lock(&clk_debug_lock); hlist_del_init(&core->debug_node); debugfs_remove_recursive(core->dentry); core->dentry = NULL; mutex_unlock(&clk_debug_lock); } |
e59c5371f clk: introduce cl... |
3066 |
|
4dff95dc9 clk: Remove forwa... |
3067 |
/** |
6e5ab41b1 clk: Update some ... |
3068 |
* clk_debug_init - lazily populate the debugfs clk directory |
4dff95dc9 clk: Remove forwa... |
3069 |
* |
6e5ab41b1 clk: Update some ... |
3070 3071 3072 3073 3074 |
* clks are often initialized very early during boot before memory can be * dynamically allocated and well before debugfs is setup. This function * populates the debugfs clk directory once at boot-time when we know that * debugfs is setup. It should only be called once at boot-time, all other clks * added dynamically will be done so with clk_debug_register. |
4dff95dc9 clk: Remove forwa... |
3075 3076 3077 3078 |
*/ static int __init clk_debug_init(void) { struct clk_core *core; |
dfc202ead clk: Add tracepoi... |
3079 |
|
4dff95dc9 clk: Remove forwa... |
3080 |
rootdir = debugfs_create_dir("clk", NULL); |
e59c5371f clk: introduce cl... |
3081 |
|
8a26bbbb9 clk: no need to c... |
3082 3083 3084 3085 3086 3087 3088 3089 |
debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, &clk_summary_fops); debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, &clk_dump_fops); debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, &clk_summary_fops); debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, &clk_dump_fops); |
e59c5371f clk: introduce cl... |
3090 |
|
4dff95dc9 clk: Remove forwa... |
3091 3092 3093 |
mutex_lock(&clk_debug_lock); hlist_for_each_entry(core, &clk_debug_list, debug_node) clk_debug_create_one(core, rootdir); |
e59c5371f clk: introduce cl... |
3094 |
|
4dff95dc9 clk: Remove forwa... |
3095 3096 |
inited = 1; mutex_unlock(&clk_debug_lock); |
e59c5371f clk: introduce cl... |
3097 |
|
4dff95dc9 clk: Remove forwa... |
3098 3099 3100 3101 |
return 0; } late_initcall(clk_debug_init); #else |
8a26bbbb9 clk: no need to c... |
3102 |
static inline void clk_debug_register(struct clk_core *core) { } |
4dff95dc9 clk: Remove forwa... |
3103 3104 |
static inline void clk_debug_reparent(struct clk_core *core, struct clk_core *new_parent) |
035a61c31 clk: Make clk API... |
3105 |
{ |
035a61c31 clk: Make clk API... |
3106 |
} |
4dff95dc9 clk: Remove forwa... |
3107 |
static inline void clk_debug_unregister(struct clk_core *core) |
3d3801eff clk: introduce cl... |
3108 |
{ |
3d3801eff clk: introduce cl... |
3109 |
} |
4dff95dc9 clk: Remove forwa... |
3110 |
#endif |
3d3801eff clk: introduce cl... |
3111 |
|
b34dd7eb3 clk: walk orphan ... |
3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 |
static void clk_core_reparent_orphans_nolock(void) { struct clk_core *orphan; struct hlist_node *tmp2; /* * walk the list of orphan clocks and reparent any that newly finds a * parent. */ hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { struct clk_core *parent = __clk_init_parent(orphan); /* * We need to use __clk_set_parent_before() and _after() to * to properly migrate any prepare/enable count of the orphan * clock. This is important for CLK_IS_CRITICAL clocks, which * are enabled during init but might not have a parent yet. */ if (parent) { /* update the clk tree topology */ __clk_set_parent_before(orphan, parent); __clk_set_parent_after(orphan, parent, NULL); __clk_recalc_accuracies(orphan); __clk_recalc_rates(orphan, 0); } } } |
3d3801eff clk: introduce cl... |
3139 |
/** |
be45ebf25 clk: rename __clk... |
3140 |
* __clk_core_init - initialize the data structures in a struct clk_core |
d35c80c24 clk: change the a... |
3141 |
* @core: clk_core being initialized |
b2476490e clk: introduce th... |
3142 |
* |
035a61c31 clk: Make clk API... |
3143 |
* Initializes the lists in struct clk_core, queries the hardware for the |
b2476490e clk: introduce th... |
3144 |
* parent and rate and sets them both. |
b2476490e clk: introduce th... |
3145 |
*/ |
be45ebf25 clk: rename __clk... |
3146 |
static int __clk_core_init(struct clk_core *core) |
b2476490e clk: introduce th... |
3147 |
{ |
fc0c209c1 clk: Allow parent... |
3148 |
int ret; |
1c8e60044 clk: Add rate con... |
3149 |
unsigned long rate; |
b2476490e clk: introduce th... |
3150 |
|
d35c80c24 clk: change the a... |
3151 |
if (!core) |
d1302a36a clk: core: copy p... |
3152 |
return -EINVAL; |
b2476490e clk: introduce th... |
3153 |
|
eab89f690 clk: abstract loc... |
3154 |
clk_prepare_lock(); |
b2476490e clk: introduce th... |
3155 |
|
9a34b4539 clk: Add support ... |
3156 3157 3158 |
ret = clk_pm_runtime_get(core); if (ret) goto unlock; |
b2476490e clk: introduce th... |
3159 |
/* check to see if a clock with this name is already registered */ |
d6968fca7 clk: s/clk/core/ ... |
3160 |
if (clk_core_lookup(core->name)) { |
d1302a36a clk: core: copy p... |
3161 3162 |
pr_debug("%s: clk %s already initialized ", |
d6968fca7 clk: s/clk/core/ ... |
3163 |
__func__, core->name); |
d1302a36a clk: core: copy p... |
3164 |
ret = -EEXIST; |
b2476490e clk: introduce th... |
3165 |
goto out; |
d1302a36a clk: core: copy p... |
3166 |
} |
b2476490e clk: introduce th... |
3167 |
|
5fb94e9ca docs: Fix some br... |
3168 |
/* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ |
d6968fca7 clk: s/clk/core/ ... |
3169 3170 3171 |
if (core->ops->set_rate && !((core->ops->round_rate || core->ops->determine_rate) && core->ops->recalc_rate)) { |
c44fccb5f clk: replace pr_w... |
3172 3173 3174 |
pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate ", __func__, core->name); |
d1302a36a clk: core: copy p... |
3175 |
ret = -EINVAL; |
d4d7e3ddc clk: core: enforc... |
3176 3177 |
goto out; } |
d6968fca7 clk: s/clk/core/ ... |
3178 |
if (core->ops->set_parent && !core->ops->get_parent) { |
c44fccb5f clk: replace pr_w... |
3179 3180 3181 |
pr_err("%s: %s must implement .get_parent & .set_parent ", __func__, core->name); |
d1302a36a clk: core: copy p... |
3182 |
ret = -EINVAL; |
d4d7e3ddc clk: core: enforc... |
3183 3184 |
goto out; } |
3c8e77dd2 clk: move checkin... |
3185 3186 3187 3188 3189 3190 3191 |
if (core->num_parents > 1 && !core->ops->get_parent) { pr_err("%s: %s must implement .get_parent as it has multi parents ", __func__, core->name); ret = -EINVAL; goto out; } |
d6968fca7 clk: s/clk/core/ ... |
3192 3193 |
if (core->ops->set_rate_and_parent && !(core->ops->set_parent && core->ops->set_rate)) { |
c44fccb5f clk: replace pr_w... |
3194 3195 |
pr_err("%s: %s must implement .set_parent & .set_rate ", |
d6968fca7 clk: s/clk/core/ ... |
3196 |
__func__, core->name); |
3fa2252b7 clk: Add set_rate... |
3197 3198 3199 |
ret = -EINVAL; goto out; } |
93a3eff6f clk: actually cal... |
3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 |
/* * optional platform-specific magic * * The .init callback is not used by any of the basic clock types, but * exists for weird hardware that must perform initialization magic. * Please consider other ways of solving initialization problems before * using this callback, as its use is discouraged. * * If it exist, this callback should called before any other callback of * the clock */ if (core->ops->init) core->ops->init(core->hw); |
d6968fca7 clk: s/clk/core/ ... |
3213 |
core->parent = __clk_init_parent(core); |
b2476490e clk: introduce th... |
3214 3215 |
/* |
706d5c73e clk: Update some ... |
3216 3217 |
* Populate core->parent if parent has already been clk_core_init'd. If * parent has not yet been clk_core_init'd then place clk in the orphan |
47b0eeb3d clk: Deprecate CL... |
3218 |
* list. If clk doesn't have any parents then place it in the root |
b2476490e clk: introduce th... |
3219 3220 3221 3222 3223 3224 |
* clk list. * * Every time a new clk is clk_init'd then we walk the list of orphan * clocks and re-parent any that are children of the clock currently * being clk_init'd. */ |
e6500344e clk: track the or... |
3225 |
if (core->parent) { |
d6968fca7 clk: s/clk/core/ ... |
3226 3227 |
hlist_add_head(&core->child_node, &core->parent->children); |
e6500344e clk: track the or... |
3228 |
core->orphan = core->parent->orphan; |
47b0eeb3d clk: Deprecate CL... |
3229 |
} else if (!core->num_parents) { |
d6968fca7 clk: s/clk/core/ ... |
3230 |
hlist_add_head(&core->child_node, &clk_root_list); |
e6500344e clk: track the or... |
3231 3232 |
core->orphan = false; } else { |
d6968fca7 clk: s/clk/core/ ... |
3233 |
hlist_add_head(&core->child_node, &clk_orphan_list); |
e6500344e clk: track the or... |
3234 3235 |
core->orphan = true; } |
b2476490e clk: introduce th... |
3236 3237 |
/* |
5279fc402 clk: add clk accu... |
3238 3239 3240 3241 3242 3243 |
* Set clk's accuracy. The preferred method is to use * .recalc_accuracy. For simple clocks and lazy developers the default * fallback is to use the parent's accuracy. If a clock doesn't have a * parent (or is orphaned) then accuracy is set to zero (perfect * clock). */ |
d6968fca7 clk: s/clk/core/ ... |
3244 3245 3246 3247 3248 |
if (core->ops->recalc_accuracy) core->accuracy = core->ops->recalc_accuracy(core->hw, __clk_get_accuracy(core->parent)); else if (core->parent) core->accuracy = core->parent->accuracy; |
5279fc402 clk: add clk accu... |
3249 |
else |
d6968fca7 clk: s/clk/core/ ... |
3250 |
core->accuracy = 0; |
5279fc402 clk: add clk accu... |
3251 3252 |
/* |
9824cf73c clk: Add a functi... |
3253 3254 3255 3256 |
* Set clk's phase. * Since a phase is by definition relative to its parent, just * query the current clock phase, or just assume it's in phase. */ |
d6968fca7 clk: s/clk/core/ ... |
3257 3258 |
if (core->ops->get_phase) core->phase = core->ops->get_phase(core->hw); |
9824cf73c clk: Add a functi... |
3259 |
else |
d6968fca7 clk: s/clk/core/ ... |
3260 |
core->phase = 0; |
9824cf73c clk: Add a functi... |
3261 3262 |
/* |
9fba738a5 clk: add duty cyc... |
3263 3264 3265 3266 3267 |
* Set clk's duty cycle. */ clk_core_update_duty_cycle_nolock(core); /* |
b2476490e clk: introduce th... |
3268 3269 3270 3271 3272 |
* Set clk's rate. The preferred method is to use .recalc_rate. For * simple clocks and lazy developers the default fallback is to use the * parent's rate. If a clock doesn't have a parent (or is orphaned) * then rate is set to zero. */ |
d6968fca7 clk: s/clk/core/ ... |
3273 3274 3275 3276 3277 |
if (core->ops->recalc_rate) rate = core->ops->recalc_rate(core->hw, clk_core_get_rate_nolock(core->parent)); else if (core->parent) rate = core->parent->rate; |
b2476490e clk: introduce th... |
3278 |
else |
1c8e60044 clk: Add rate con... |
3279 |
rate = 0; |
d6968fca7 clk: s/clk/core/ ... |
3280 |
core->rate = core->req_rate = rate; |
b2476490e clk: introduce th... |
3281 3282 |
/* |
99652a469 clk: migrate the ... |
3283 3284 3285 3286 3287 3288 |
* Enable CLK_IS_CRITICAL clocks so newly added critical clocks * don't get accidentally disabled when walking the orphan tree and * reparenting clocks */ if (core->flags & CLK_IS_CRITICAL) { unsigned long flags; |
a4b9a54d5 clk: Don't try to... |
3289 3290 3291 |
ret = clk_core_prepare(core); if (ret) goto out; |
99652a469 clk: migrate the ... |
3292 3293 |
flags = clk_enable_lock(); |
a4b9a54d5 clk: Don't try to... |
3294 |
ret = clk_core_enable(core); |
99652a469 clk: migrate the ... |
3295 |
clk_enable_unlock(flags); |
a4b9a54d5 clk: Don't try to... |
3296 3297 3298 3299 |
if (ret) { clk_core_unprepare(core); goto out; } |
99652a469 clk: migrate the ... |
3300 |
} |
b34dd7eb3 clk: walk orphan ... |
3301 |
clk_core_reparent_orphans_nolock(); |
1f61e5f14 clk: clock multip... |
3302 |
|
b2476490e clk: introduce th... |
3303 |
|
d6968fca7 clk: s/clk/core/ ... |
3304 |
kref_init(&core->ref); |
b2476490e clk: introduce th... |
3305 |
out: |
9a34b4539 clk: Add support ... |
3306 3307 |
clk_pm_runtime_put(core); unlock: |
eab89f690 clk: abstract loc... |
3308 |
clk_prepare_unlock(); |
b2476490e clk: introduce th... |
3309 |
|
89f7e9de5 clk: Really fix d... |
3310 |
if (!ret) |
d6968fca7 clk: s/clk/core/ ... |
3311 |
clk_debug_register(core); |
89f7e9de5 clk: Really fix d... |
3312 |
|
d1302a36a clk: core: copy p... |
3313 |
return ret; |
b2476490e clk: introduce th... |
3314 |
} |
1df4046a9 clk: Combine __cl... |
3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 |
/** * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core * @core: clk to add consumer to * @clk: consumer to link to a clk */ static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) { clk_prepare_lock(); hlist_add_head(&clk->clks_node, &core->clks); clk_prepare_unlock(); } /** * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core * @clk: consumer to unlink */ static void clk_core_unlink_consumer(struct clk *clk) { lockdep_assert_held(&prepare_lock); hlist_del(&clk->clks_node); } /** * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core * @core: clk to allocate a consumer for * @dev_id: string describing device name * @con_id: connection ID string on device * * Returns: clk consumer left unlinked from the consumer list */ static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, |
035a61c31 clk: Make clk API... |
3346 |
const char *con_id) |
0197b3ea0 clk: Use a separa... |
3347 |
{ |
0197b3ea0 clk: Use a separa... |
3348 |
struct clk *clk; |
035a61c31 clk: Make clk API... |
3349 3350 3351 |
clk = kzalloc(sizeof(*clk), GFP_KERNEL); if (!clk) return ERR_PTR(-ENOMEM); |
1df4046a9 clk: Combine __cl... |
3352 |
clk->core = core; |
035a61c31 clk: Make clk API... |
3353 |
clk->dev_id = dev_id; |
253160a8a clk: core: Copy c... |
3354 |
clk->con_id = kstrdup_const(con_id, GFP_KERNEL); |
1c8e60044 clk: Add rate con... |
3355 |
clk->max_rate = ULONG_MAX; |
0197b3ea0 clk: Use a separa... |
3356 3357 |
return clk; } |
035a61c31 clk: Make clk API... |
3358 |
|
1df4046a9 clk: Combine __cl... |
3359 3360 3361 3362 3363 3364 3365 3366 |
/** * free_clk - Free a clk consumer * @clk: clk consumer to free * * Note, this assumes the clk has been unlinked from the clk_core consumer * list. */ static void free_clk(struct clk *clk) |
1c8e60044 clk: Add rate con... |
3367 |
{ |
253160a8a clk: core: Copy c... |
3368 |
kfree_const(clk->con_id); |
1c8e60044 clk: Add rate con... |
3369 3370 |
kfree(clk); } |
0197b3ea0 clk: Use a separa... |
3371 |
|
293ba3b4a clk: Fix double f... |
3372 |
/** |
1df4046a9 clk: Combine __cl... |
3373 3374 |
* clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given * a clk_hw |
efa850487 clk: Inform the c... |
3375 |
* @dev: clk consumer device |
1df4046a9 clk: Combine __cl... |
3376 3377 3378 3379 3380 3381 3382 3383 |
* @hw: clk_hw associated with the clk being consumed * @dev_id: string describing device name * @con_id: connection ID string on device * * This is the main function used to create a clk pointer for use by clk * consumers. It connects a consumer to the clk_core and clk_hw structures * used by the framework and clk provider respectively. */ |
efa850487 clk: Inform the c... |
3384 |
struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, |
1df4046a9 clk: Combine __cl... |
3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 |
const char *dev_id, const char *con_id) { struct clk *clk; struct clk_core *core; /* This is to allow this function to be chained to others */ if (IS_ERR_OR_NULL(hw)) return ERR_CAST(hw); core = hw->core; clk = alloc_clk(core, dev_id, con_id); if (IS_ERR(clk)) return clk; |
efa850487 clk: Inform the c... |
3398 |
clk->dev = dev; |
1df4046a9 clk: Combine __cl... |
3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 |
if (!try_module_get(core->owner)) { free_clk(clk); return ERR_PTR(-ENOENT); } kref_get(&core->ref); clk_core_link_consumer(core, clk); return clk; } |
fc0c209c1 clk: Allow parent... |
3410 |
static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist) |
b2476490e clk: introduce th... |
3411 |
{ |
fc0c209c1 clk: Allow parent... |
3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 |
const char *dst; if (!src) { if (must_exist) return -EINVAL; return 0; } *dst_p = dst = kstrdup_const(src, GFP_KERNEL); if (!dst) return -ENOMEM; return 0; } |
0214f33c4 clk: Overwrite cl... |
3426 3427 |
static int clk_core_populate_parent_map(struct clk_core *core, const struct clk_init_data *init) |
fc0c209c1 clk: Allow parent... |
3428 |
{ |
fc0c209c1 clk: Allow parent... |
3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 |
u8 num_parents = init->num_parents; const char * const *parent_names = init->parent_names; const struct clk_hw **parent_hws = init->parent_hws; const struct clk_parent_data *parent_data = init->parent_data; int i, ret = 0; struct clk_parent_map *parents, *parent; if (!num_parents) return 0; /* * Avoid unnecessary string look-ups of clk_core's possible parents by * having a cache of names/clk_hw pointers to clk_core pointers. */ parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL); core->parents = parents; if (!parents) return -ENOMEM; /* Copy everything over because it might be __initdata */ for (i = 0, parent = parents; i < num_parents; i++, parent++) { |
601b6e933 clk: Allow parent... |
3450 |
parent->index = -1; |
fc0c209c1 clk: Allow parent... |
3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 |
if (parent_names) { /* throw a WARN if any entries are NULL */ WARN(!parent_names[i], "%s: invalid NULL in %s's .parent_names ", __func__, core->name); ret = clk_cpy_name(&parent->name, parent_names[i], true); } else if (parent_data) { parent->hw = parent_data[i].hw; |
601b6e933 clk: Allow parent... |
3461 |
parent->index = parent_data[i].index; |
fc0c209c1 clk: Allow parent... |
3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 |
ret = clk_cpy_name(&parent->fw_name, parent_data[i].fw_name, false); if (!ret) ret = clk_cpy_name(&parent->name, parent_data[i].name, false); } else if (parent_hws) { parent->hw = parent_hws[i]; } else { ret = -EINVAL; WARN(1, "Must specify parents if num_parents > 0 "); } if (ret) { do { kfree_const(parents[i].name); kfree_const(parents[i].fw_name); } while (--i >= 0); kfree(parents); return ret; } } return 0; } static void clk_core_free_parent_map(struct clk_core *core) { int i = core->num_parents; if (!core->num_parents) return; while (--i >= 0) { kfree_const(core->parents[i].name); kfree_const(core->parents[i].fw_name); } kfree(core->parents); } |
89a5ddcc7 clk: Add of_clk_h... |
3504 3505 |
static struct clk * __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) |
b2476490e clk: introduce th... |
3506 |
{ |
fc0c209c1 clk: Allow parent... |
3507 |
int ret; |
d6968fca7 clk: s/clk/core/ ... |
3508 |
struct clk_core *core; |
0214f33c4 clk: Overwrite cl... |
3509 3510 3511 3512 3513 3514 3515 3516 |
const struct clk_init_data *init = hw->init; /* * The init data is not supposed to be used outside of registration path. * Set it to NULL so that provider drivers can't use it either and so that * we catch use of hw->init early on in the core. */ hw->init = NULL; |
293ba3b4a clk: Fix double f... |
3517 |
|
d6968fca7 clk: s/clk/core/ ... |
3518 3519 |
core = kzalloc(sizeof(*core), GFP_KERNEL); if (!core) { |
293ba3b4a clk: Fix double f... |
3520 3521 3522 |
ret = -ENOMEM; goto fail_out; } |
b2476490e clk: introduce th... |
3523 |
|
0214f33c4 clk: Overwrite cl... |
3524 |
core->name = kstrdup_const(init->name, GFP_KERNEL); |
d6968fca7 clk: s/clk/core/ ... |
3525 |
if (!core->name) { |
0197b3ea0 clk: Use a separa... |
3526 3527 3528 |
ret = -ENOMEM; goto fail_name; } |
29fd2a34e clk: check ops po... |
3529 |
|
0214f33c4 clk: Overwrite cl... |
3530 |
if (WARN_ON(!init->ops)) { |
29fd2a34e clk: check ops po... |
3531 3532 3533 |
ret = -EINVAL; goto fail_ops; } |
0214f33c4 clk: Overwrite cl... |
3534 |
core->ops = init->ops; |
29fd2a34e clk: check ops po... |
3535 |
|
9a34b4539 clk: Add support ... |
3536 |
if (dev && pm_runtime_enabled(dev)) |
244788393 clk: core: clarif... |
3537 3538 |
core->rpm_enabled = true; core->dev = dev; |
89a5ddcc7 clk: Add of_clk_h... |
3539 |
core->of_node = np; |
ac2df527f clk: Add common _... |
3540 |
if (dev && dev->driver) |
d6968fca7 clk: s/clk/core/ ... |
3541 3542 |
core->owner = dev->driver->owner; core->hw = hw; |
0214f33c4 clk: Overwrite cl... |
3543 3544 |
core->flags = init->flags; core->num_parents = init->num_parents; |
9783c0d98 clk: Allow provid... |
3545 3546 |
core->min_rate = 0; core->max_rate = ULONG_MAX; |
d6968fca7 clk: s/clk/core/ ... |
3547 |
hw->core = core; |
b2476490e clk: introduce th... |
3548 |
|
0214f33c4 clk: Overwrite cl... |
3549 |
ret = clk_core_populate_parent_map(core, init); |
fc0c209c1 clk: Allow parent... |
3550 |
if (ret) |
176d11690 clk: move core->p... |
3551 |
goto fail_parents; |
176d11690 clk: move core->p... |
3552 |
|
d6968fca7 clk: s/clk/core/ ... |
3553 |
INIT_HLIST_HEAD(&core->clks); |
1c8e60044 clk: Add rate con... |
3554 |
|
1df4046a9 clk: Combine __cl... |
3555 3556 3557 3558 3559 |
/* * Don't call clk_hw_create_clk() here because that would pin the * provider module to itself and prevent it from ever being removed. */ hw->clk = alloc_clk(core, NULL, NULL); |
035a61c31 clk: Make clk API... |
3560 |
if (IS_ERR(hw->clk)) { |
035a61c31 clk: Make clk API... |
3561 |
ret = PTR_ERR(hw->clk); |
fc0c209c1 clk: Allow parent... |
3562 |
goto fail_create_clk; |
035a61c31 clk: Make clk API... |
3563 |
} |
1df4046a9 clk: Combine __cl... |
3564 |
clk_core_link_consumer(hw->core, hw->clk); |
be45ebf25 clk: rename __clk... |
3565 |
ret = __clk_core_init(core); |
d1302a36a clk: core: copy p... |
3566 |
if (!ret) |
035a61c31 clk: Make clk API... |
3567 |
return hw->clk; |
b2476490e clk: introduce th... |
3568 |
|
1df4046a9 clk: Combine __cl... |
3569 3570 3571 3572 3573 |
clk_prepare_lock(); clk_core_unlink_consumer(hw->clk); clk_prepare_unlock(); free_clk(hw->clk); |
035a61c31 clk: Make clk API... |
3574 |
hw->clk = NULL; |
b2476490e clk: introduce th... |
3575 |
|
fc0c209c1 clk: Allow parent... |
3576 3577 |
fail_create_clk: clk_core_free_parent_map(core); |
176d11690 clk: move core->p... |
3578 |
fail_parents: |
29fd2a34e clk: check ops po... |
3579 |
fail_ops: |
d6968fca7 clk: s/clk/core/ ... |
3580 |
kfree_const(core->name); |
0197b3ea0 clk: Use a separa... |
3581 |
fail_name: |
d6968fca7 clk: s/clk/core/ ... |
3582 |
kfree(core); |
d1302a36a clk: core: copy p... |
3583 3584 |
fail_out: return ERR_PTR(ret); |
b2476490e clk: introduce th... |
3585 |
} |
fceaa7d80 clk: Prepare for ... |
3586 3587 |
/** |
6447bfe82 clk: Use parent n... |
3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 |
* dev_or_parent_of_node() - Get device node of @dev or @dev's parent * @dev: Device to get device node of * * Return: device node pointer of @dev, or the device node pointer of * @dev->parent if dev doesn't have a device node, or NULL if neither * @dev or @dev->parent have a device node. */ static struct device_node *dev_or_parent_of_node(struct device *dev) { struct device_node *np; if (!dev) return NULL; np = dev_of_node(dev); if (!np) np = dev_of_node(dev->parent); return np; } /** |
fceaa7d80 clk: Prepare for ... |
3610 3611 3612 3613 |
* clk_register - allocate a new clock, register it and return an opaque cookie * @dev: device that is registering this clock * @hw: link to hardware-specific clock data * |
c1157f60d Merge branch 'clk... |
3614 3615 3616 3617 |
* clk_register is the *deprecated* interface for populating the clock tree with * new clock nodes. Use clk_hw_register() instead. * * Returns: a pointer to the newly allocated struct clk which |
fceaa7d80 clk: Prepare for ... |
3618 3619 3620 3621 3622 3623 |
* cannot be dereferenced by driver code but may be used in conjunction with the * rest of the clock API. In the event of an error clk_register will return an * error code; drivers must test for an error code after calling clk_register. */ struct clk *clk_register(struct device *dev, struct clk_hw *hw) { |
6447bfe82 clk: Use parent n... |
3624 |
return __clk_register(dev, dev_or_parent_of_node(dev), hw); |
fceaa7d80 clk: Prepare for ... |
3625 |
} |
b2476490e clk: introduce th... |
3626 |
EXPORT_SYMBOL_GPL(clk_register); |
4143804c4 clk: Add {devm_}c... |
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 |
/** * clk_hw_register - register a clk_hw and return an error code * @dev: device that is registering this clock * @hw: link to hardware-specific clock data * * clk_hw_register is the primary interface for populating the clock tree with * new clock nodes. It returns an integer equal to zero indicating success or * less than zero indicating failure. Drivers must test for an error code after * calling clk_hw_register(). */ int clk_hw_register(struct device *dev, struct clk_hw *hw) { |
6447bfe82 clk: Use parent n... |
3639 3640 |
return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev), hw)); |
4143804c4 clk: Add {devm_}c... |
3641 3642 |
} EXPORT_SYMBOL_GPL(clk_hw_register); |
89a5ddcc7 clk: Add of_clk_h... |
3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 |
/* * of_clk_hw_register - register a clk_hw and return an error code * @node: device_node of device that is registering this clock * @hw: link to hardware-specific clock data * * of_clk_hw_register() is the primary interface for populating the clock tree * with new clock nodes when a struct device is not available, but a struct * device_node is. It returns an integer equal to zero indicating success or * less than zero indicating failure. Drivers must test for an error code after * calling of_clk_hw_register(). */ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw) { return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw)); } EXPORT_SYMBOL_GPL(of_clk_hw_register); |
6e5ab41b1 clk: Update some ... |
3659 |
/* Free memory allocated for a clock. */ |
fcb0ee6a3 clk: Implement cl... |
3660 3661 |
static void __clk_release(struct kref *ref) { |
d6968fca7 clk: s/clk/core/ ... |
3662 |
struct clk_core *core = container_of(ref, struct clk_core, ref); |
fcb0ee6a3 clk: Implement cl... |
3663 |
|
496eadf82 clk: Use lockdep ... |
3664 |
lockdep_assert_held(&prepare_lock); |
fc0c209c1 clk: Allow parent... |
3665 |
clk_core_free_parent_map(core); |
d6968fca7 clk: s/clk/core/ ... |
3666 3667 |
kfree_const(core->name); kfree(core); |
fcb0ee6a3 clk: Implement cl... |
3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 |
} /* * Empty clk_ops for unregistered clocks. These are used temporarily * after clk_unregister() was called on a clock and until last clock * consumer calls clk_put() and the struct clk object is freed. */ static int clk_nodrv_prepare_enable(struct clk_hw *hw) { return -ENXIO; } static void clk_nodrv_disable_unprepare(struct clk_hw *hw) { WARN_ON_ONCE(1); } static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { return -ENXIO; } static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) { return -ENXIO; } static const struct clk_ops clk_nodrv_ops = { .enable = clk_nodrv_prepare_enable, .disable = clk_nodrv_disable_unprepare, .prepare = clk_nodrv_prepare_enable, .unprepare = clk_nodrv_disable_unprepare, .set_rate = clk_nodrv_set_rate, .set_parent = clk_nodrv_set_parent, }; |
bdcf1dc25 clk: Evict unregi... |
3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 |
static void clk_core_evict_parent_cache_subtree(struct clk_core *root, struct clk_core *target) { int i; struct clk_core *child; for (i = 0; i < root->num_parents; i++) if (root->parents[i].core == target) root->parents[i].core = NULL; hlist_for_each_entry(child, &root->children, child_node) clk_core_evict_parent_cache_subtree(child, target); } /* Remove this clk from all parent caches */ static void clk_core_evict_parent_cache(struct clk_core *core) { struct hlist_head **lists; struct clk_core *root; lockdep_assert_held(&prepare_lock); for (lists = all_lists; *lists; lists++) hlist_for_each_entry(root, *lists, child_node) clk_core_evict_parent_cache_subtree(root, core); } |
1df5c939f clk: Provide dumm... |
3731 3732 3733 |
/** * clk_unregister - unregister a currently registered clock * @clk: clock to unregister |
1df5c939f clk: Provide dumm... |
3734 |
*/ |
fcb0ee6a3 clk: Implement cl... |
3735 3736 3737 |
void clk_unregister(struct clk *clk) { unsigned long flags; |
6314b6796 clk: Don't hold p... |
3738 3739 |
if (!clk || WARN_ON_ONCE(IS_ERR(clk))) return; |
035a61c31 clk: Make clk API... |
3740 |
clk_debug_unregister(clk->core); |
fcb0ee6a3 clk: Implement cl... |
3741 3742 |
clk_prepare_lock(); |
035a61c31 clk: Make clk API... |
3743 3744 3745 3746 |
if (clk->core->ops == &clk_nodrv_ops) { pr_err("%s: unregistered clock: %s ", __func__, clk->core->name); |
4106a3d9e clk: unlock for h... |
3747 |
goto unlock; |
fcb0ee6a3 clk: Implement cl... |
3748 3749 3750 3751 3752 3753 |
} /* * Assign empty clock ops for consumers that might still hold * a reference to this clock. */ flags = clk_enable_lock(); |
035a61c31 clk: Make clk API... |
3754 |
clk->core->ops = &clk_nodrv_ops; |
fcb0ee6a3 clk: Implement cl... |
3755 |
clk_enable_unlock(flags); |
035a61c31 clk: Make clk API... |
3756 3757 |
if (!hlist_empty(&clk->core->children)) { struct clk_core *child; |
874f224cc clk: Fix slab cor... |
3758 |
struct hlist_node *t; |
fcb0ee6a3 clk: Implement cl... |
3759 3760 |
/* Reparent all children to the orphan list. */ |
035a61c31 clk: Make clk API... |
3761 3762 |
hlist_for_each_entry_safe(child, t, &clk->core->children, child_node) |
91baa9ffe clk: take the pre... |
3763 |
clk_core_set_parent_nolock(child, NULL); |
fcb0ee6a3 clk: Implement cl... |
3764 |
} |
bdcf1dc25 clk: Evict unregi... |
3765 |
clk_core_evict_parent_cache(clk->core); |
035a61c31 clk: Make clk API... |
3766 |
hlist_del_init(&clk->core->child_node); |
fcb0ee6a3 clk: Implement cl... |
3767 |
|
035a61c31 clk: Make clk API... |
3768 |
if (clk->core->prepare_count) |
fcb0ee6a3 clk: Implement cl... |
3769 3770 |
pr_warn("%s: unregistering prepared clock: %s ", |
035a61c31 clk: Make clk API... |
3771 |
__func__, clk->core->name); |
e55a839a7 clk: add clock pr... |
3772 3773 3774 3775 3776 |
if (clk->core->protect_count) pr_warn("%s: unregistering protected clock: %s ", __func__, clk->core->name); |
035a61c31 clk: Make clk API... |
3777 |
kref_put(&clk->core->ref, __clk_release); |
5f17dcfa6 clk: Fix memory l... |
3778 |
free_clk(clk); |
4106a3d9e clk: unlock for h... |
3779 |
unlock: |
fcb0ee6a3 clk: Implement cl... |
3780 3781 |
clk_prepare_unlock(); } |
1df5c939f clk: Provide dumm... |
3782 |
EXPORT_SYMBOL_GPL(clk_unregister); |
4143804c4 clk: Add {devm_}c... |
3783 3784 3785 3786 3787 3788 3789 3790 3791 |
/** * clk_hw_unregister - unregister a currently registered clk_hw * @hw: hardware-specific clock data to unregister */ void clk_hw_unregister(struct clk_hw *hw) { clk_unregister(hw->clk); } EXPORT_SYMBOL_GPL(clk_hw_unregister); |
46c8773a5 clk: Add devm_clk... |
3792 3793 |
static void devm_clk_release(struct device *dev, void *res) { |
293ba3b4a clk: Fix double f... |
3794 |
clk_unregister(*(struct clk **)res); |
46c8773a5 clk: Add devm_clk... |
3795 |
} |
4143804c4 clk: Add {devm_}c... |
3796 3797 3798 3799 |
static void devm_clk_hw_release(struct device *dev, void *res) { clk_hw_unregister(*(struct clk_hw **)res); } |
46c8773a5 clk: Add devm_clk... |
3800 3801 3802 3803 3804 |
/** * devm_clk_register - resource managed clk_register() * @dev: device that is registering this clock * @hw: link to hardware-specific clock data * |
9fe9b7ab4 clk: Document dep... |
3805 3806 3807 3808 |
* Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead. * * Clocks returned from this function are automatically clk_unregister()ed on * driver detach. See clk_register() for more information. |
46c8773a5 clk: Add devm_clk... |
3809 3810 3811 3812 |
*/ struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) { struct clk *clk; |
293ba3b4a clk: Fix double f... |
3813 |
struct clk **clkp; |
46c8773a5 clk: Add devm_clk... |
3814 |
|
293ba3b4a clk: Fix double f... |
3815 3816 |
clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); if (!clkp) |
46c8773a5 clk: Add devm_clk... |
3817 |
return ERR_PTR(-ENOMEM); |
293ba3b4a clk: Fix double f... |
3818 3819 3820 3821 |
clk = clk_register(dev, hw); if (!IS_ERR(clk)) { *clkp = clk; devres_add(dev, clkp); |
46c8773a5 clk: Add devm_clk... |
3822 |
} else { |
293ba3b4a clk: Fix double f... |
3823 |
devres_free(clkp); |
46c8773a5 clk: Add devm_clk... |
3824 3825 3826 3827 3828 |
} return clk; } EXPORT_SYMBOL_GPL(devm_clk_register); |
4143804c4 clk: Add {devm_}c... |
3829 3830 3831 3832 3833 |
/** * devm_clk_hw_register - resource managed clk_hw_register() * @dev: device that is registering this clock * @hw: link to hardware-specific clock data * |
c47265ad6 clk: fix comment ... |
3834 |
* Managed clk_hw_register(). Clocks registered by this function are |
4143804c4 clk: Add {devm_}c... |
3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 |
* automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() * for more information. */ int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) { struct clk_hw **hwp; int ret; hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL); if (!hwp) return -ENOMEM; ret = clk_hw_register(dev, hw); if (!ret) { *hwp = hw; devres_add(dev, hwp); } else { devres_free(hwp); } return ret; } EXPORT_SYMBOL_GPL(devm_clk_hw_register); |
46c8773a5 clk: Add devm_clk... |
3858 3859 3860 3861 3862 3863 3864 |
static int devm_clk_match(struct device *dev, void *res, void *data) { struct clk *c = res; if (WARN_ON(!c)) return 0; return c == data; } |
4143804c4 clk: Add {devm_}c... |
3865 3866 3867 3868 3869 3870 3871 3872 |
static int devm_clk_hw_match(struct device *dev, void *res, void *data) { struct clk_hw *hw = res; if (WARN_ON(!hw)) return 0; return hw == data; } |
46c8773a5 clk: Add devm_clk... |
3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 |
/** * devm_clk_unregister - resource managed clk_unregister() * @clk: clock to unregister * * Deallocate a clock allocated with devm_clk_register(). Normally * this function will not need to be called and the resource management * code will ensure that the resource is freed. */ void devm_clk_unregister(struct device *dev, struct clk *clk) { WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); } EXPORT_SYMBOL_GPL(devm_clk_unregister); |
4143804c4 clk: Add {devm_}c... |
3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 |
/** * devm_clk_hw_unregister - resource managed clk_hw_unregister() * @dev: device that is unregistering the hardware-specific clock data * @hw: link to hardware-specific clock data * * Unregister a clk_hw registered with devm_clk_hw_register(). Normally * this function will not need to be called and the resource management * code will ensure that the resource is freed. */ void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw) { WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match, hw)); } EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); |
ac2df527f clk: Add common _... |
3901 3902 3903 |
/* * clkdev helpers */ |
ac2df527f clk: Add common _... |
3904 3905 3906 |
void __clk_put(struct clk *clk) { |
10cdfe54d clk: Don't try to... |
3907 |
struct module *owner; |
00efcb1c8 clk: Correct hand... |
3908 |
if (!clk || WARN_ON_ONCE(IS_ERR(clk))) |
ac2df527f clk: Add common _... |
3909 |
return; |
fcb0ee6a3 clk: Implement cl... |
3910 |
clk_prepare_lock(); |
1c8e60044 clk: Add rate con... |
3911 |
|
55e9b8b7b clk: add clk_rate... |
3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 |
/* * Before calling clk_put, all calls to clk_rate_exclusive_get() from a * given user should be balanced with calls to clk_rate_exclusive_put() * and by that same consumer */ if (WARN_ON(clk->exclusive_count)) { /* We voiced our concern, let's sanitize the situation */ clk->core->protect_count -= (clk->exclusive_count - 1); clk_core_rate_unprotect(clk->core); clk->exclusive_count = 0; } |
50595f8b9 clk: Rename child... |
3923 |
hlist_del(&clk->clks_node); |
ec02ace8c clk: Only recalcu... |
3924 3925 3926 |
if (clk->min_rate > clk->core->req_rate || clk->max_rate < clk->core->req_rate) clk_core_set_rate_nolock(clk->core, clk->core->req_rate); |
1c8e60044 clk: Add rate con... |
3927 3928 |
owner = clk->core->owner; kref_put(&clk->core->ref, __clk_release); |
fcb0ee6a3 clk: Implement cl... |
3929 |
clk_prepare_unlock(); |
10cdfe54d clk: Don't try to... |
3930 |
module_put(owner); |
035a61c31 clk: Make clk API... |
3931 |
|
1df4046a9 clk: Combine __cl... |
3932 |
free_clk(clk); |
ac2df527f clk: Add common _... |
3933 |
} |
b2476490e clk: introduce th... |
3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 |
/*** clk rate change notifiers ***/ /** * clk_notifier_register - add a clk rate change notifier * @clk: struct clk * to watch * @nb: struct notifier_block * with callback info * * Request notification when clk's rate changes. This uses an SRCU * notifier because we want it to block and notifier unregistrations are * uncommon. The callbacks associated with the notifier must not * re-enter into the clk framework by calling any top-level clk APIs; * this will cause a nested prepare_lock mutex. * |
198bb5949 clk: fix a typo i... |
3947 3948 3949 |
* In all notification cases (pre, post and abort rate change) the original * clock rate is passed to the callback via struct clk_notifier_data.old_rate * and the new frequency is passed via struct clk_notifier_data.new_rate. |
b2476490e clk: introduce th... |
3950 |
* |
b2476490e clk: introduce th... |
3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 |
* clk_notifier_register() must be called from non-atomic context. * Returns -EINVAL if called with null arguments, -ENOMEM upon * allocation failure; otherwise, passes along the return value of * srcu_notifier_chain_register(). */ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) { struct clk_notifier *cn; int ret = -ENOMEM; if (!clk || !nb) return -EINVAL; |
eab89f690 clk: abstract loc... |
3963 |
clk_prepare_lock(); |
b2476490e clk: introduce th... |
3964 3965 3966 3967 3968 3969 3970 3971 |
/* search the list of notifiers for this clk */ list_for_each_entry(cn, &clk_notifier_list, node) if (cn->clk == clk) break; /* if clk wasn't in the notifier list, allocate new clk_notifier */ if (cn->clk != clk) { |
1808a3201 clk: Improve a si... |
3972 |
cn = kzalloc(sizeof(*cn), GFP_KERNEL); |
b2476490e clk: introduce th... |
3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 |
if (!cn) goto out; cn->clk = clk; srcu_init_notifier_head(&cn->notifier_head); list_add(&cn->node, &clk_notifier_list); } ret = srcu_notifier_chain_register(&cn->notifier_head, nb); |
035a61c31 clk: Make clk API... |
3983 |
clk->core->notifier_count++; |
b2476490e clk: introduce th... |
3984 3985 |
out: |
eab89f690 clk: abstract loc... |
3986 |
clk_prepare_unlock(); |
b2476490e clk: introduce th... |
3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 |
return ret; } EXPORT_SYMBOL_GPL(clk_notifier_register); /** * clk_notifier_unregister - remove a clk rate change notifier * @clk: struct clk * * @nb: struct notifier_block * with callback info * * Request no further notification for changes to 'clk' and frees memory * allocated in clk_notifier_register. * * Returns -EINVAL if called with null arguments; otherwise, passes * along the return value of srcu_notifier_chain_unregister(). */ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) { struct clk_notifier *cn = NULL; int ret = -EINVAL; if (!clk || !nb) return -EINVAL; |
eab89f690 clk: abstract loc... |
4010 |
clk_prepare_lock(); |
b2476490e clk: introduce th... |
4011 4012 4013 4014 4015 4016 4017 |
list_for_each_entry(cn, &clk_notifier_list, node) if (cn->clk == clk) break; if (cn->clk == clk) { ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); |
035a61c31 clk: Make clk API... |
4018 |
clk->core->notifier_count--; |
b2476490e clk: introduce th... |
4019 4020 4021 4022 |
/* XXX the notifier code should handle this better */ if (!cn->notifier_head.head) { srcu_cleanup_notifier_head(&cn->notifier_head); |
72b5322f1 clk: remove notif... |
4023 |
list_del(&cn->node); |
b2476490e clk: introduce th... |
4024 4025 4026 4027 4028 4029 |
kfree(cn); } } else { ret = -ENOENT; } |
eab89f690 clk: abstract loc... |
4030 |
clk_prepare_unlock(); |
b2476490e clk: introduce th... |
4031 4032 4033 4034 |
return ret; } EXPORT_SYMBOL_GPL(clk_notifier_unregister); |
766e6a4ec clk: add DT clock... |
4035 4036 |
#ifdef CONFIG_OF |
2071f6b8c clk: Move clk_cor... |
4037 4038 4039 4040 4041 4042 |
static void clk_core_reparent_orphans(void) { clk_prepare_lock(); clk_core_reparent_orphans_nolock(); clk_prepare_unlock(); } |
766e6a4ec clk: add DT clock... |
4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 |
/** * struct of_clk_provider - Clock provider registration structure * @link: Entry in global list of clock providers * @node: Pointer to device tree node of clock provider * @get: Get clock callback. Returns NULL or a struct clk for the * given clock specifier * @data: context pointer to be passed into @get callback */ struct of_clk_provider { struct list_head link; struct device_node *node; struct clk *(*get)(struct of_phandle_args *clkspec, void *data); |
0861e5b8c clk: Add clk_hw O... |
4056 |
struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); |
766e6a4ec clk: add DT clock... |
4057 4058 |
void *data; }; |
30d5a9457 clk: Unexport __c... |
4059 |
extern struct of_device_id __clk_of_table; |
f2f6c2556 clk: add common o... |
4060 4061 |
static const struct of_device_id __clk_of_table_sentinel __used __section(__clk_of_table_end); |
766e6a4ec clk: add DT clock... |
4062 |
static LIST_HEAD(of_clk_providers); |
d6782c263 clk: Provide not ... |
4063 |
static DEFINE_MUTEX(of_clk_mutex); |
766e6a4ec clk: add DT clock... |
4064 4065 4066 4067 4068 4069 |
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, void *data) { return data; } EXPORT_SYMBOL_GPL(of_clk_src_simple_get); |
0861e5b8c clk: Add clk_hw O... |
4070 4071 4072 4073 4074 |
struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) { return data; } EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); |
494bfec99 clk: add of_clk_s... |
4075 4076 4077 4078 4079 4080 |
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) { struct clk_onecell_data *clk_data = data; unsigned int idx = clkspec->args[0]; if (idx >= clk_data->clk_num) { |
7e96353c3 clk: Use %u to fo... |
4081 4082 |
pr_err("%s: invalid clock index %u ", __func__, idx); |
494bfec99 clk: add of_clk_s... |
4083 4084 4085 4086 4087 4088 |
return ERR_PTR(-EINVAL); } return clk_data->clks[idx]; } EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); |
0861e5b8c clk: Add clk_hw O... |
4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 |
struct clk_hw * of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) { struct clk_hw_onecell_data *hw_data = data; unsigned int idx = clkspec->args[0]; if (idx >= hw_data->num) { pr_err("%s: invalid index %u ", __func__, idx); return ERR_PTR(-EINVAL); } return hw_data->hws[idx]; } EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); |
766e6a4ec clk: add DT clock... |
4104 4105 4106 4107 4108 |
/** * of_clk_add_provider() - Register a clock provider for a node * @np: Device node pointer associated with clock provider * @clk_src_get: callback for decoding clock * @data: context pointer for @clk_src_get callback. |
9fe9b7ab4 clk: Document dep... |
4109 4110 |
* * This function is *deprecated*. Use of_clk_add_hw_provider() instead. |
766e6a4ec clk: add DT clock... |
4111 4112 4113 4114 4115 4116 4117 |
*/ int of_clk_add_provider(struct device_node *np, struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, void *data), void *data) { struct of_clk_provider *cp; |
86be408bf clk: Support for ... |
4118 |
int ret; |
766e6a4ec clk: add DT clock... |
4119 |
|
1808a3201 clk: Improve a si... |
4120 |
cp = kzalloc(sizeof(*cp), GFP_KERNEL); |
766e6a4ec clk: add DT clock... |
4121 4122 4123 4124 4125 4126 |
if (!cp) return -ENOMEM; cp->node = of_node_get(np); cp->data = data; cp->get = clk_src_get; |
d6782c263 clk: Provide not ... |
4127 |
mutex_lock(&of_clk_mutex); |
766e6a4ec clk: add DT clock... |
4128 |
list_add(&cp->link, &of_clk_providers); |
d6782c263 clk: Provide not ... |
4129 |
mutex_unlock(&of_clk_mutex); |
166739312 clk: Convert to u... |
4130 4131 |
pr_debug("Added clock from %pOF ", np); |
766e6a4ec clk: add DT clock... |
4132 |
|
b34dd7eb3 clk: walk orphan ... |
4133 |
clk_core_reparent_orphans(); |
86be408bf clk: Support for ... |
4134 4135 4136 4137 4138 |
ret = of_clk_set_defaults(np, true); if (ret < 0) of_clk_del_provider(np); return ret; |
766e6a4ec clk: add DT clock... |
4139 4140 4141 4142 |
} EXPORT_SYMBOL_GPL(of_clk_add_provider); /** |
0861e5b8c clk: Add clk_hw O... |
4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 |
* of_clk_add_hw_provider() - Register a clock provider for a node * @np: Device node pointer associated with clock provider * @get: callback for decoding clk_hw * @data: context pointer for @get callback. */ int of_clk_add_hw_provider(struct device_node *np, struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data), void *data) { struct of_clk_provider *cp; int ret; cp = kzalloc(sizeof(*cp), GFP_KERNEL); if (!cp) return -ENOMEM; cp->node = of_node_get(np); cp->data = data; cp->get_hw = get; mutex_lock(&of_clk_mutex); list_add(&cp->link, &of_clk_providers); mutex_unlock(&of_clk_mutex); |
166739312 clk: Convert to u... |
4167 4168 |
pr_debug("Added clk_hw provider from %pOF ", np); |
0861e5b8c clk: Add clk_hw O... |
4169 |
|
b34dd7eb3 clk: walk orphan ... |
4170 |
clk_core_reparent_orphans(); |
0861e5b8c clk: Add clk_hw O... |
4171 4172 4173 4174 4175 4176 4177 |
ret = of_clk_set_defaults(np, true); if (ret < 0) of_clk_del_provider(np); return ret; } EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); |
aa795c41d clk: Add devm_of_... |
4178 4179 4180 4181 |
static void devm_of_clk_release_provider(struct device *dev, void *res) { of_clk_del_provider(*(struct device_node **)res); } |
05502bf9e clk: of-provider:... |
4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 |
/* * We allow a child device to use its parent device as the clock provider node * for cases like MFD sub-devices where the child device driver wants to use * devm_*() APIs but not list the device in DT as a sub-node. */ static struct device_node *get_clk_provider_node(struct device *dev) { struct device_node *np, *parent_np; np = dev->of_node; parent_np = dev->parent ? dev->parent->of_node : NULL; if (!of_find_property(np, "#clock-cells", NULL)) if (of_find_property(parent_np, "#clock-cells", NULL)) np = parent_np; return np; } |
e45838b52 clk: Add kerneldo... |
4200 4201 4202 4203 4204 4205 |
/** * devm_of_clk_add_hw_provider() - Managed clk provider node registration * @dev: Device acting as the clock provider (used for DT node and lifetime) * @get: callback for decoding clk_hw * @data: context pointer for @get callback * |
05502bf9e clk: of-provider:... |
4206 4207 4208 4209 4210 |
* Registers clock provider for given device's node. If the device has no DT * node or if the device node lacks of clock provider information (#clock-cells) * then the parent device's node is scanned for this information. If parent node * has the #clock-cells then it is used in registration. Provider is * automatically released at device exit. |
e45838b52 clk: Add kerneldo... |
4211 4212 4213 |
* * Return: 0 on success or an errno on failure. */ |
aa795c41d clk: Add devm_of_... |
4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 |
int devm_of_clk_add_hw_provider(struct device *dev, struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data), void *data) { struct device_node **ptr, *np; int ret; ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; |
05502bf9e clk: of-provider:... |
4226 |
np = get_clk_provider_node(dev); |
aa795c41d clk: Add devm_of_... |
4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 |
ret = of_clk_add_hw_provider(np, get, data); if (!ret) { *ptr = np; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); |
0861e5b8c clk: Add clk_hw O... |
4238 |
/** |
766e6a4ec clk: add DT clock... |
4239 4240 4241 4242 4243 4244 |
* of_clk_del_provider() - Remove a previously registered clock provider * @np: Device node pointer associated with clock provider */ void of_clk_del_provider(struct device_node *np) { struct of_clk_provider *cp; |
d6782c263 clk: Provide not ... |
4245 |
mutex_lock(&of_clk_mutex); |
766e6a4ec clk: add DT clock... |
4246 4247 4248 4249 4250 4251 4252 4253 |
list_for_each_entry(cp, &of_clk_providers, link) { if (cp->node == np) { list_del(&cp->link); of_node_put(cp->node); kfree(cp); break; } } |
d6782c263 clk: Provide not ... |
4254 |
mutex_unlock(&of_clk_mutex); |
766e6a4ec clk: add DT clock... |
4255 4256 |
} EXPORT_SYMBOL_GPL(of_clk_del_provider); |
aa795c41d clk: Add devm_of_... |
4257 4258 4259 4260 4261 4262 4263 4264 4265 |
static int devm_clk_provider_match(struct device *dev, void *res, void *data) { struct device_node **np = res; if (WARN_ON(!np || !*np)) return 0; return *np == data; } |
e45838b52 clk: Add kerneldo... |
4266 4267 4268 4269 |
/** * devm_of_clk_del_provider() - Remove clock provider registered using devm * @dev: Device to whose lifetime the clock provider was bound */ |
aa795c41d clk: Add devm_of_... |
4270 4271 4272 |
void devm_of_clk_del_provider(struct device *dev) { int ret; |
05502bf9e clk: of-provider:... |
4273 |
struct device_node *np = get_clk_provider_node(dev); |
aa795c41d clk: Add devm_of_... |
4274 4275 |
ret = devres_release(dev, devm_of_clk_release_provider, |
05502bf9e clk: of-provider:... |
4276 |
devm_clk_provider_match, np); |
aa795c41d clk: Add devm_of_... |
4277 4278 4279 4280 |
WARN_ON(ret); } EXPORT_SYMBOL(devm_of_clk_del_provider); |
226fd7020 clk: Document of_... |
4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 |
/** * of_parse_clkspec() - Parse a DT clock specifier for a given device node * @np: device node to parse clock specifier from * @index: index of phandle to parse clock out of. If index < 0, @name is used * @name: clock name to find and parse. If name is NULL, the index is used * @out_args: Result of parsing the clock specifier * * Parses a device node's "clocks" and "clock-names" properties to find the * phandle and cells for the index or name that is desired. The resulting clock * specifier is placed into @out_args, or an errno is returned when there's a * parsing error. The @index argument is ignored if @name is non-NULL. * * Example: * * phandle1: clock-controller@1 { * #clock-cells = <2>; * } * * phandle2: clock-controller@2 { * #clock-cells = <1>; * } * * clock-consumer@3 { * clocks = <&phandle1 1 2 &phandle2 3>; * clock-names = "name1", "name2"; * } * * To get a device_node for `clock-controller@2' node you may call this * function a few different ways: * * of_parse_clkspec(clock-consumer@3, -1, "name2", &args); * of_parse_clkspec(clock-consumer@3, 1, NULL, &args); * of_parse_clkspec(clock-consumer@3, 1, "name2", &args); * * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in * the "clock-names" property of @np. |
5dc7e8426 Merge branch 'clk... |
4318 |
*/ |
cf13f2896 clk: Move of_clk_... |
4319 4320 |
static int of_parse_clkspec(const struct device_node *np, int index, const char *name, struct of_phandle_args *out_args) |
4472287a3 clk: Introduce of... |
4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 |
{ int ret = -ENOENT; /* Walk up the tree of devices looking for a clock property that matches */ while (np) { /* * For named clocks, first look up the name in the * "clock-names" property. If it cannot be found, then index * will be an error code and of_parse_phandle_with_args() will * return -EINVAL. */ if (name) index = of_property_match_string(np, "clock-names", name); ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, out_args); if (!ret) break; if (name && index >= 0) break; /* * No matching clock found on this node. If the parent node * has a "clock-ranges" property, then we can try one of its * clocks. */ np = np->parent; if (np && !of_get_property(np, "clock-ranges", NULL)) break; index = 0; } return ret; } |
0861e5b8c clk: Add clk_hw O... |
4354 4355 4356 4357 4358 |
static struct clk_hw * __of_clk_get_hw_from_provider(struct of_clk_provider *provider, struct of_phandle_args *clkspec) { struct clk *clk; |
0861e5b8c clk: Add clk_hw O... |
4359 |
|
74002fcde clk: Simplify __o... |
4360 4361 |
if (provider->get_hw) return provider->get_hw(clkspec, provider->data); |
0861e5b8c clk: Add clk_hw O... |
4362 |
|
74002fcde clk: Simplify __o... |
4363 4364 4365 4366 |
clk = provider->get(clkspec, provider->data); if (IS_ERR(clk)) return ERR_CAST(clk); return __clk_get_hw(clk); |
0861e5b8c clk: Add clk_hw O... |
4367 |
} |
cf13f2896 clk: Move of_clk_... |
4368 4369 |
static struct clk_hw * of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) |
766e6a4ec clk: add DT clock... |
4370 4371 |
{ struct of_clk_provider *provider; |
1df4046a9 clk: Combine __cl... |
4372 |
struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER); |
766e6a4ec clk: add DT clock... |
4373 |
|
306c342f9 clk: Replace of_c... |
4374 4375 |
if (!clkspec) return ERR_PTR(-EINVAL); |
306c342f9 clk: Replace of_c... |
4376 |
mutex_lock(&of_clk_mutex); |
766e6a4ec clk: add DT clock... |
4377 |
list_for_each_entry(provider, &of_clk_providers, link) { |
f155d15b6 clk: Return error... |
4378 |
if (provider->node == clkspec->np) { |
0861e5b8c clk: Add clk_hw O... |
4379 |
hw = __of_clk_get_hw_from_provider(provider, clkspec); |
1df4046a9 clk: Combine __cl... |
4380 4381 |
if (!IS_ERR(hw)) break; |
73e0e496a clkdev: Always al... |
4382 |
} |
766e6a4ec clk: add DT clock... |
4383 |
} |
306c342f9 clk: Replace of_c... |
4384 |
mutex_unlock(&of_clk_mutex); |
d6782c263 clk: Provide not ... |
4385 |
|
4472287a3 clk: Introduce of... |
4386 |
return hw; |
d6782c263 clk: Provide not ... |
4387 |
} |
306c342f9 clk: Replace of_c... |
4388 4389 4390 4391 4392 4393 4394 4395 |
/** * of_clk_get_from_provider() - Lookup a clock from a clock provider * @clkspec: pointer to a clock specifier data structure * * This function looks up a struct clk from the registered list of clock * providers, an input is a clock specifier data structure as returned * from the of_parse_phandle_with_args() function call. */ |
d6782c263 clk: Provide not ... |
4396 4397 |
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) { |
4472287a3 clk: Introduce of... |
4398 |
struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec); |
efa850487 clk: Inform the c... |
4399 |
return clk_hw_create_clk(NULL, hw, NULL, __func__); |
766e6a4ec clk: add DT clock... |
4400 |
} |
fb4dd2220 clk: Make of_clk_... |
4401 |
EXPORT_SYMBOL_GPL(of_clk_get_from_provider); |
766e6a4ec clk: add DT clock... |
4402 |
|
cf13f2896 clk: Move of_clk_... |
4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 |
struct clk_hw *of_clk_get_hw(struct device_node *np, int index, const char *con_id) { int ret; struct clk_hw *hw; struct of_phandle_args clkspec; ret = of_parse_clkspec(np, index, con_id, &clkspec); if (ret) return ERR_PTR(ret); hw = of_clk_get_hw_from_clkspec(&clkspec); of_node_put(clkspec.np); return hw; } static struct clk *__of_clk_get(struct device_node *np, int index, const char *dev_id, const char *con_id) { struct clk_hw *hw = of_clk_get_hw(np, index, con_id); return clk_hw_create_clk(NULL, hw, dev_id, con_id); } struct clk *of_clk_get(struct device_node *np, int index) { return __of_clk_get(np, index, np->full_name, NULL); } EXPORT_SYMBOL(of_clk_get); /** * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node * @np: pointer to clock consumer node * @name: name of consumer's clock input, or NULL for the first clock reference * * This function parses the clocks and clock-names properties, * and uses them to look up the struct clk from the registered list of clock * providers. */ struct clk *of_clk_get_by_name(struct device_node *np, const char *name) { if (!np) return ERR_PTR(-ENOENT); |
65cf20ad4 clk: fixup defaul... |
4448 |
return __of_clk_get(np, 0, np->full_name, name); |
cf13f2896 clk: Move of_clk_... |
4449 4450 |
} EXPORT_SYMBOL(of_clk_get_by_name); |
929e7f3bc clk: Make of_clk_... |
4451 4452 4453 4454 4455 4456 4457 |
/** * of_clk_get_parent_count() - Count the number of clocks a device node has * @np: device node to count * * Returns: The number of clocks that are possible parents of this node */ unsigned int of_clk_get_parent_count(struct device_node *np) |
f61027426 clk: of: helper f... |
4458 |
{ |
929e7f3bc clk: Make of_clk_... |
4459 4460 4461 4462 4463 4464 4465 |
int count; count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); if (count < 0) return 0; return count; |
f61027426 clk: of: helper f... |
4466 4467 |
} EXPORT_SYMBOL_GPL(of_clk_get_parent_count); |
766e6a4ec clk: add DT clock... |
4468 4469 4470 |
const char *of_clk_get_parent_name(struct device_node *np, int index) { struct of_phandle_args clkspec; |
7a0fc1a3d clk: add clock-in... |
4471 |
struct property *prop; |
766e6a4ec clk: add DT clock... |
4472 |
const char *clk_name; |
7a0fc1a3d clk: add clock-in... |
4473 4474 |
const __be32 *vp; u32 pv; |
766e6a4ec clk: add DT clock... |
4475 |
int rc; |
7a0fc1a3d clk: add clock-in... |
4476 |
int count; |
0a4807c2f clk: Make of_clk_... |
4477 |
struct clk *clk; |
766e6a4ec clk: add DT clock... |
4478 |
|
766e6a4ec clk: add DT clock... |
4479 4480 4481 4482 |
rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, &clkspec); if (rc) return NULL; |
7a0fc1a3d clk: add clock-in... |
4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 |
index = clkspec.args_count ? clkspec.args[0] : 0; count = 0; /* if there is an indices property, use it to transfer the index * specified into an array offset for the clock-output-names property. */ of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { if (index == pv) { index = count; break; } count++; } |
8da411cc1 clk: let of_clk_g... |
4496 4497 4498 |
/* We went off the end of 'clock-indices' without finding it */ if (prop && !vp) return NULL; |
7a0fc1a3d clk: add clock-in... |
4499 |
|
766e6a4ec clk: add DT clock... |
4500 |
if (of_property_read_string_index(clkspec.np, "clock-output-names", |
7a0fc1a3d clk: add clock-in... |
4501 |
index, |
0a4807c2f clk: Make of_clk_... |
4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 |
&clk_name) < 0) { /* * Best effort to get the name if the clock has been * registered with the framework. If the clock isn't * registered, we return the node name as the name of * the clock as long as #clock-cells = 0. */ clk = of_clk_get_from_provider(&clkspec); if (IS_ERR(clk)) { if (clkspec.args_count == 0) clk_name = clkspec.np->name; else clk_name = NULL; } else { clk_name = __clk_get_name(clk); clk_put(clk); } } |
766e6a4ec clk: add DT clock... |
4520 4521 4522 4523 4524 |
of_node_put(clkspec.np); return clk_name; } EXPORT_SYMBOL_GPL(of_clk_get_parent_name); |
2e61dfb36 clk: of: helper f... |
4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 |
/** * of_clk_parent_fill() - Fill @parents with names of @np's parents and return * number of parents * @np: Device node pointer associated with clock provider * @parents: pointer to char array that hold the parents' names * @size: size of the @parents array * * Return: number of parents for the clock node. */ int of_clk_parent_fill(struct device_node *np, const char **parents, unsigned int size) { unsigned int i = 0; while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) i++; return i; } EXPORT_SYMBOL_GPL(of_clk_parent_fill); |
1771b10d6 clk: respect the ... |
4545 |
struct clock_provider { |
a59704330 clk: Remove clk_i... |
4546 |
void (*clk_init_cb)(struct device_node *); |
1771b10d6 clk: respect the ... |
4547 4548 4549 |
struct device_node *np; struct list_head node; }; |
1771b10d6 clk: respect the ... |
4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 |
/* * This function looks for a parent clock. If there is one, then it * checks that the provider for this parent clock was initialized, in * this case the parent clock will be ready. */ static int parent_ready(struct device_node *np) { int i = 0; while (true) { struct clk *clk = of_clk_get(np, i); /* this parent is ready we can check the next one */ if (!IS_ERR(clk)) { clk_put(clk); i++; continue; } /* at least one parent is not ready, we exit now */ if (PTR_ERR(clk) == -EPROBE_DEFER) return 0; /* * Here we make assumption that the device tree is * written correctly. So an error means that there is * no more parent. As we didn't exit yet, then the * previous parent are ready. If there is no clock * parent, no need to wait for them, then we can * consider their absence as being ready */ return 1; } } |
766e6a4ec clk: add DT clock... |
4584 |
/** |
d56f8994b clk: Provide OF h... |
4585 4586 4587 |
* of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree * @np: Device node pointer associated with clock provider * @index: clock index |
f7ae75036 clk: Improve flag... |
4588 |
* @flags: pointer to top-level framework flags |
d56f8994b clk: Provide OF h... |
4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 |
* * Detects if the clock-critical property exists and, if so, sets the * corresponding CLK_IS_CRITICAL flag. * * Do not use this function. It exists only for legacy Device Tree * bindings, such as the one-clock-per-node style that are outdated. * Those bindings typically put all clock data into .dts and the Linux * driver has no clock data, thus making it impossible to set this flag * correctly from the driver. Only those drivers may call * of_clk_detect_critical from their setup functions. * * Return: error code or zero on success */ int of_clk_detect_critical(struct device_node *np, int index, unsigned long *flags) { struct property *prop; const __be32 *cur; uint32_t idx; if (!np || !flags) return -EINVAL; of_property_for_each_u32(np, "clock-critical", prop, cur, idx) if (index == idx) *flags |= CLK_IS_CRITICAL; return 0; } /** |
766e6a4ec clk: add DT clock... |
4620 4621 4622 |
* of_clk_init() - Scan and init clock providers from the DT * @matches: array of compatible values and init functions for providers. * |
1771b10d6 clk: respect the ... |
4623 |
* This function scans the device tree for matching clock providers |
e5ca8fb4c clk: Fix minor er... |
4624 |
* and calls their initialization functions. It also does it by trying |
1771b10d6 clk: respect the ... |
4625 |
* to follow the dependencies. |
766e6a4ec clk: add DT clock... |
4626 4627 4628 |
*/ void __init of_clk_init(const struct of_device_id *matches) { |
7f7ed584d clk: get matching... |
4629 |
const struct of_device_id *match; |
766e6a4ec clk: add DT clock... |
4630 |
struct device_node *np; |
1771b10d6 clk: respect the ... |
4631 4632 4633 |
struct clock_provider *clk_provider, *next; bool is_init_done; bool force = false; |
2573a02aa clk: Move clk_pro... |
4634 |
LIST_HEAD(clk_provider_list); |
766e6a4ec clk: add DT clock... |
4635 |
|
f2f6c2556 clk: add common o... |
4636 |
if (!matches) |
819b4861c CLK: ti: add init... |
4637 |
matches = &__clk_of_table; |
f2f6c2556 clk: add common o... |
4638 |
|
1771b10d6 clk: respect the ... |
4639 |
/* First prepare the list of the clocks providers */ |
7f7ed584d clk: get matching... |
4640 |
for_each_matching_node_and_match(np, matches, &match) { |
2e3b19f13 clk: Check for al... |
4641 |
struct clock_provider *parent; |
3e5dd6f6e clk: Ignore disab... |
4642 4643 |
if (!of_device_is_available(np)) continue; |
2e3b19f13 clk: Check for al... |
4644 4645 4646 4647 4648 |
parent = kzalloc(sizeof(*parent), GFP_KERNEL); if (!parent) { list_for_each_entry_safe(clk_provider, next, &clk_provider_list, node) { list_del(&clk_provider->node); |
6bc9d9d62 clk: add missing ... |
4649 |
of_node_put(clk_provider->np); |
2e3b19f13 clk: Check for al... |
4650 4651 |
kfree(clk_provider); } |
6bc9d9d62 clk: add missing ... |
4652 |
of_node_put(np); |
2e3b19f13 clk: Check for al... |
4653 4654 |
return; } |
1771b10d6 clk: respect the ... |
4655 4656 |
parent->clk_init_cb = match->data; |
6bc9d9d62 clk: add missing ... |
4657 |
parent->np = of_node_get(np); |
3f6d439f2 clk: reverse defa... |
4658 |
list_add_tail(&parent->node, &clk_provider_list); |
1771b10d6 clk: respect the ... |
4659 4660 4661 4662 4663 4664 4665 |
} while (!list_empty(&clk_provider_list)) { is_init_done = false; list_for_each_entry_safe(clk_provider, next, &clk_provider_list, node) { if (force || parent_ready(clk_provider->np)) { |
86be408bf clk: Support for ... |
4666 |
|
989eafd0b clk: core: Avoid ... |
4667 4668 4669 |
/* Don't populate platform devices */ of_node_set_flag(clk_provider->np, OF_POPULATED); |
1771b10d6 clk: respect the ... |
4670 |
clk_provider->clk_init_cb(clk_provider->np); |
86be408bf clk: Support for ... |
4671 |
of_clk_set_defaults(clk_provider->np, true); |
1771b10d6 clk: respect the ... |
4672 |
list_del(&clk_provider->node); |
6bc9d9d62 clk: add missing ... |
4673 |
of_node_put(clk_provider->np); |
1771b10d6 clk: respect the ... |
4674 4675 4676 4677 4678 4679 |
kfree(clk_provider); is_init_done = true; } } /* |
e5ca8fb4c clk: Fix minor er... |
4680 |
* We didn't manage to initialize any of the |
1771b10d6 clk: respect the ... |
4681 4682 4683 4684 4685 4686 |
* remaining providers during the last loop, so now we * initialize all the remaining ones unconditionally * in case the clock parent was not mandatory */ if (!is_init_done) force = true; |
766e6a4ec clk: add DT clock... |
4687 4688 4689 |
} } #endif |