Commit 220069945b298d3998c6598b081c466dca259929

Authored by Stephen Boyd
Committed by Daniel Lezcano
1 parent 60faddf6eb

clocksource: arch_timer: Add support for memory mapped timers

Add support for the memory mapped timers by filling in the
read/write functions and adding some parsing code. Note that we
only register one clocksource, preferring the cp15 based
clocksource over the mmio one.

To keep things simple we register one global clockevent. This
covers the case of UP and SMP systems with only mmio hardware and
systems where the memory mapped timers are used as the broadcast
timer in low power modes.

The DT binding allows for per-CPU memory mapped timers in case we
want to support that in the future, but the code isn't added
here. We also don't do much for hypervisor support, although it
should be possible to support it by searching for at least two
frames where one frame has the virtual capability and then
updating KVM timers to support it.

Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Marc Zyngier <Marc.Zyngier@arm.com>
Cc: Rob Herring <robherring2@gmail.com>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>

Showing 2 changed files with 350 additions and 56 deletions Side-by-side Diff

drivers/clocksource/arm_arch_timer.c
... ... @@ -16,13 +16,39 @@
16 16 #include <linux/clockchips.h>
17 17 #include <linux/interrupt.h>
18 18 #include <linux/of_irq.h>
  19 +#include <linux/of_address.h>
19 20 #include <linux/io.h>
  21 +#include <linux/slab.h>
20 22  
21 23 #include <asm/arch_timer.h>
22 24 #include <asm/virt.h>
23 25  
24 26 #include <clocksource/arm_arch_timer.h>
25 27  
  28 +#define CNTTIDR 0x08
  29 +#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
  30 +
  31 +#define CNTVCT_LO 0x08
  32 +#define CNTVCT_HI 0x0c
  33 +#define CNTFRQ 0x10
  34 +#define CNTP_TVAL 0x28
  35 +#define CNTP_CTL 0x2c
  36 +#define CNTV_TVAL 0x38
  37 +#define CNTV_CTL 0x3c
  38 +
  39 +#define ARCH_CP15_TIMER BIT(0)
  40 +#define ARCH_MEM_TIMER BIT(1)
  41 +static unsigned arch_timers_present __initdata;
  42 +
  43 +static void __iomem *arch_counter_base;
  44 +
  45 +struct arch_timer {
  46 + void __iomem *base;
  47 + struct clock_event_device evt;
  48 +};
  49 +
  50 +#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
  51 +
26 52 static u32 arch_timer_rate;
27 53  
28 54 enum ppi_nr {
... ... @@ -38,6 +64,7 @@
38 64 static struct clock_event_device __percpu *arch_timer_evt;
39 65  
40 66 static bool arch_timer_use_virtual = true;
  67 +static bool arch_timer_mem_use_virtual;
41 68  
42 69 /*
43 70 * Architected system timer support.
44 71  
... ... @@ -47,14 +74,62 @@
47 74 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
48 75 struct clock_event_device *clk)
49 76 {
50   - arch_timer_reg_write_cp15(access, reg, val);
  77 + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  78 + struct arch_timer *timer = to_arch_timer(clk);
  79 + switch (reg) {
  80 + case ARCH_TIMER_REG_CTRL:
  81 + writel_relaxed(val, timer->base + CNTP_CTL);
  82 + break;
  83 + case ARCH_TIMER_REG_TVAL:
  84 + writel_relaxed(val, timer->base + CNTP_TVAL);
  85 + break;
  86 + }
  87 + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  88 + struct arch_timer *timer = to_arch_timer(clk);
  89 + switch (reg) {
  90 + case ARCH_TIMER_REG_CTRL:
  91 + writel_relaxed(val, timer->base + CNTV_CTL);
  92 + break;
  93 + case ARCH_TIMER_REG_TVAL:
  94 + writel_relaxed(val, timer->base + CNTV_TVAL);
  95 + break;
  96 + }
  97 + } else {
  98 + arch_timer_reg_write_cp15(access, reg, val);
  99 + }
51 100 }
52 101  
53 102 static __always_inline
54 103 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
55 104 struct clock_event_device *clk)
56 105 {
57   - return arch_timer_reg_read_cp15(access, reg);
  106 + u32 val;
  107 +
  108 + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  109 + struct arch_timer *timer = to_arch_timer(clk);
  110 + switch (reg) {
  111 + case ARCH_TIMER_REG_CTRL:
  112 + val = readl_relaxed(timer->base + CNTP_CTL);
  113 + break;
  114 + case ARCH_TIMER_REG_TVAL:
  115 + val = readl_relaxed(timer->base + CNTP_TVAL);
  116 + break;
  117 + }
  118 + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  119 + struct arch_timer *timer = to_arch_timer(clk);
  120 + switch (reg) {
  121 + case ARCH_TIMER_REG_CTRL:
  122 + val = readl_relaxed(timer->base + CNTV_CTL);
  123 + break;
  124 + case ARCH_TIMER_REG_TVAL:
  125 + val = readl_relaxed(timer->base + CNTV_TVAL);
  126 + break;
  127 + }
  128 + } else {
  129 + val = arch_timer_reg_read_cp15(access, reg);
  130 + }
  131 +
  132 + return val;
58 133 }
59 134  
60 135 static __always_inline irqreturn_t timer_handler(const int access,
... ... @@ -86,6 +161,20 @@
86 161 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
87 162 }
88 163  
  164 +static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
  165 +{
  166 + struct clock_event_device *evt = dev_id;
  167 +
  168 + return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
  169 +}
  170 +
  171 +static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
  172 +{
  173 + struct clock_event_device *evt = dev_id;
  174 +
  175 + return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
  176 +}
  177 +
89 178 static __always_inline void timer_set_mode(const int access, int mode,
90 179 struct clock_event_device *clk)
91 180 {
... ... @@ -114,6 +203,18 @@
114 203 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk);
115 204 }
116 205  
  206 +static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode,
  207 + struct clock_event_device *clk)
  208 +{
  209 + timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk);
  210 +}
  211 +
  212 +static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode,
  213 + struct clock_event_device *clk)
  214 +{
  215 + timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk);
  216 +}
  217 +
117 218 static __always_inline void set_next_event(const int access, unsigned long evt,
118 219 struct clock_event_device *clk)
119 220 {
120 221  
121 222  
122 223  
123 224  
124 225  
... ... @@ -139,28 +240,63 @@
139 240 return 0;
140 241 }
141 242  
142   -static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
  243 +static int arch_timer_set_next_event_virt_mem(unsigned long evt,
  244 + struct clock_event_device *clk)
143 245 {
144   - clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
145   - clk->name = "arch_sys_timer";
146   - clk->rating = 450;
147   - if (arch_timer_use_virtual) {
148   - clk->irq = arch_timer_ppi[VIRT_PPI];
149   - clk->set_mode = arch_timer_set_mode_virt;
150   - clk->set_next_event = arch_timer_set_next_event_virt;
  246 + set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
  247 + return 0;
  248 +}
  249 +
  250 +static int arch_timer_set_next_event_phys_mem(unsigned long evt,
  251 + struct clock_event_device *clk)
  252 +{
  253 + set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
  254 + return 0;
  255 +}
  256 +
  257 +static void __cpuinit __arch_timer_setup(unsigned type,
  258 + struct clock_event_device *clk)
  259 +{
  260 + clk->features = CLOCK_EVT_FEAT_ONESHOT;
  261 +
  262 + if (type == ARCH_CP15_TIMER) {
  263 + clk->features |= CLOCK_EVT_FEAT_C3STOP;
  264 + clk->name = "arch_sys_timer";
  265 + clk->rating = 450;
  266 + clk->cpumask = cpumask_of(smp_processor_id());
  267 + if (arch_timer_use_virtual) {
  268 + clk->irq = arch_timer_ppi[VIRT_PPI];
  269 + clk->set_mode = arch_timer_set_mode_virt;
  270 + clk->set_next_event = arch_timer_set_next_event_virt;
  271 + } else {
  272 + clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
  273 + clk->set_mode = arch_timer_set_mode_phys;
  274 + clk->set_next_event = arch_timer_set_next_event_phys;
  275 + }
151 276 } else {
152   - clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
153   - clk->set_mode = arch_timer_set_mode_phys;
154   - clk->set_next_event = arch_timer_set_next_event_phys;
  277 + clk->name = "arch_mem_timer";
  278 + clk->rating = 400;
  279 + clk->cpumask = cpu_all_mask;
  280 + if (arch_timer_mem_use_virtual) {
  281 + clk->set_mode = arch_timer_set_mode_virt_mem;
  282 + clk->set_next_event =
  283 + arch_timer_set_next_event_virt_mem;
  284 + } else {
  285 + clk->set_mode = arch_timer_set_mode_phys_mem;
  286 + clk->set_next_event =
  287 + arch_timer_set_next_event_phys_mem;
  288 + }
155 289 }
156 290  
157   - clk->cpumask = cpumask_of(smp_processor_id());
158   -
159 291 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
160 292  
161   - clockevents_config_and_register(clk, arch_timer_rate,
162   - 0xf, 0x7fffffff);
  293 + clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
  294 +}
163 295  
  296 +static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
  297 +{
  298 + __arch_timer_setup(ARCH_CP15_TIMER, clk);
  299 +
164 300 if (arch_timer_use_virtual)
165 301 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
166 302 else {
167 303  
168 304  
169 305  
170 306  
... ... @@ -174,27 +310,41 @@
174 310 return 0;
175 311 }
176 312  
177   -static int arch_timer_available(void)
  313 +static void
  314 +arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
178 315 {
179   - u32 freq;
  316 + /* Who has more than one independent system counter? */
  317 + if (arch_timer_rate)
  318 + return;
180 319  
181   - if (arch_timer_rate == 0) {
182   - freq = arch_timer_get_cntfrq();
183   -
184   - /* Check the timer frequency. */
185   - if (freq == 0) {
186   - pr_warn("Architected timer frequency not available\n");
187   - return -EINVAL;
188   - }
189   -
190   - arch_timer_rate = freq;
  320 + /* Try to determine the frequency from the device tree or CNTFRQ */
  321 + if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
  322 + if (cntbase)
  323 + arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
  324 + else
  325 + arch_timer_rate = arch_timer_get_cntfrq();
191 326 }
192 327  
193   - pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
  328 + /* Check the timer frequency. */
  329 + if (arch_timer_rate == 0)
  330 + pr_warn("Architected timer frequency not available\n");
  331 +}
  332 +
  333 +static void arch_timer_banner(unsigned type)
  334 +{
  335 + pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
  336 + type & ARCH_CP15_TIMER ? "cp15" : "",
  337 + type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
  338 + type & ARCH_MEM_TIMER ? "mmio" : "",
194 339 (unsigned long)arch_timer_rate / 1000000,
195 340 (unsigned long)(arch_timer_rate / 10000) % 100,
196   - arch_timer_use_virtual ? "virt" : "phys");
197   - return 0;
  341 + type & ARCH_CP15_TIMER ?
  342 + arch_timer_use_virtual ? "virt" : "phys" :
  343 + "",
  344 + type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
  345 + type & ARCH_MEM_TIMER ?
  346 + arch_timer_mem_use_virtual ? "virt" : "phys" :
  347 + "");
198 348 }
199 349  
200 350 u32 arch_timer_get_rate(void)
201 351  
202 352  
203 353  
204 354  
... ... @@ -202,19 +352,35 @@
202 352 return arch_timer_rate;
203 353 }
204 354  
205   -u64 arch_timer_read_counter(void)
  355 +static u64 arch_counter_get_cntvct_mem(void)
206 356 {
207   - return arch_counter_get_cntvct();
  357 + u32 vct_lo, vct_hi, tmp_hi;
  358 +
  359 + do {
  360 + vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
  361 + vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
  362 + tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
  363 + } while (vct_hi != tmp_hi);
  364 +
  365 + return ((u64) vct_hi << 32) | vct_lo;
208 366 }
209 367  
  368 +/*
  369 + * Default to cp15 based access because arm64 uses this function for
  370 + * sched_clock() before DT is probed and the cp15 method is guaranteed
  371 + * to exist on arm64. arm doesn't use this before DT is probed so even
  372 + * if we don't have the cp15 accessors we won't have a problem.
  373 + */
  374 +u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
  375 +
210 376 static cycle_t arch_counter_read(struct clocksource *cs)
211 377 {
212   - return arch_counter_get_cntvct();
  378 + return arch_timer_read_counter();
213 379 }
214 380  
215 381 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
216 382 {
217   - return arch_counter_get_cntvct();
  383 + return arch_timer_read_counter();
218 384 }
219 385  
220 386 static struct clocksource clocksource_counter = {
... ... @@ -237,6 +403,23 @@
237 403 return &timecounter;
238 404 }
239 405  
  406 +static void __init arch_counter_register(unsigned type)
  407 +{
  408 + u64 start_count;
  409 +
  410 + /* Register the CP15 based counter if we have one */
  411 + if (type & ARCH_CP15_TIMER)
  412 + arch_timer_read_counter = arch_counter_get_cntvct;
  413 + else
  414 + arch_timer_read_counter = arch_counter_get_cntvct_mem;
  415 +
  416 + start_count = arch_timer_read_counter();
  417 + clocksource_register_hz(&clocksource_counter, arch_timer_rate);
  418 + cyclecounter.mult = clocksource_counter.mult;
  419 + cyclecounter.shift = clocksource_counter.shift;
  420 + timecounter_init(&timecounter, &cyclecounter, start_count);
  421 +}
  422 +
240 423 static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
241 424 {
242 425 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
243 426  
... ... @@ -281,22 +464,12 @@
281 464 int err;
282 465 int ppi;
283 466  
284   - err = arch_timer_available();
285   - if (err)
286   - goto out;
287   -
288 467 arch_timer_evt = alloc_percpu(struct clock_event_device);
289 468 if (!arch_timer_evt) {
290 469 err = -ENOMEM;
291 470 goto out;
292 471 }
293 472  
294   - clocksource_register_hz(&clocksource_counter, arch_timer_rate);
295   - cyclecounter.mult = clocksource_counter.mult;
296   - cyclecounter.shift = clocksource_counter.shift;
297   - timecounter_init(&timecounter, &cyclecounter,
298   - arch_counter_get_cntvct());
299   -
300 473 if (arch_timer_use_virtual) {
301 474 ppi = arch_timer_ppi[VIRT_PPI];
302 475 err = request_percpu_irq(ppi, arch_timer_handler_virt,
303 476  
304 477  
305 478  
306 479  
307 480  
... ... @@ -347,25 +520,78 @@
347 520 return err;
348 521 }
349 522  
  523 +static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
  524 +{
  525 + int ret;
  526 + irq_handler_t func;
  527 + struct arch_timer *t;
  528 +
  529 + t = kzalloc(sizeof(*t), GFP_KERNEL);
  530 + if (!t)
  531 + return -ENOMEM;
  532 +
  533 + t->base = base;
  534 + t->evt.irq = irq;
  535 + __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
  536 +
  537 + if (arch_timer_mem_use_virtual)
  538 + func = arch_timer_handler_virt_mem;
  539 + else
  540 + func = arch_timer_handler_phys_mem;
  541 +
  542 + ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
  543 + if (ret) {
  544 + pr_err("arch_timer: Failed to request mem timer irq\n");
  545 + kfree(t);
  546 + }
  547 +
  548 + return ret;
  549 +}
  550 +
  551 +static const struct of_device_id arch_timer_of_match[] __initconst = {
  552 + { .compatible = "arm,armv7-timer", },
  553 + { .compatible = "arm,armv8-timer", },
  554 + {},
  555 +};
  556 +
  557 +static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
  558 + { .compatible = "arm,armv7-timer-mem", },
  559 + {},
  560 +};
  561 +
  562 +static void __init arch_timer_common_init(void)
  563 +{
  564 + unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
  565 +
  566 + /* Wait until both nodes are probed if we have two timers */
  567 + if ((arch_timers_present & mask) != mask) {
  568 + if (of_find_matching_node(NULL, arch_timer_mem_of_match) &&
  569 + !(arch_timers_present & ARCH_MEM_TIMER))
  570 + return;
  571 + if (of_find_matching_node(NULL, arch_timer_of_match) &&
  572 + !(arch_timers_present & ARCH_CP15_TIMER))
  573 + return;
  574 + }
  575 +
  576 + arch_timer_banner(arch_timers_present);
  577 + arch_counter_register(arch_timers_present);
  578 + arch_timer_arch_init();
  579 +}
  580 +
350 581 static void __init arch_timer_init(struct device_node *np)
351 582 {
352   - u32 freq;
353 583 int i;
354 584  
355   - if (arch_timer_get_rate()) {
  585 + if (arch_timers_present & ARCH_CP15_TIMER) {
356 586 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
357 587 return;
358 588 }
359 589  
360   - /* Try to determine the frequency from the device tree or CNTFRQ */
361   - if (!of_property_read_u32(np, "clock-frequency", &freq))
362   - arch_timer_rate = freq;
363   -
  590 + arch_timers_present |= ARCH_CP15_TIMER;
364 591 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
365 592 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
  593 + arch_timer_detect_rate(NULL, np);
366 594  
367   - of_node_put(np);
368   -
369 595 /*
370 596 * If HYP mode is available, we know that the physical timer
371 597 * has been configured to be accessible from PL1. Use it, so
372 598  
... ... @@ -385,8 +611,74 @@
385 611 }
386 612  
387 613 arch_timer_register();
388   - arch_timer_arch_init();
  614 + arch_timer_common_init();
389 615 }
390 616 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
391 617 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);
  618 +
  619 +static void __init arch_timer_mem_init(struct device_node *np)
  620 +{
  621 + struct device_node *frame, *best_frame = NULL;
  622 + void __iomem *cntctlbase, *base;
  623 + unsigned int irq;
  624 + u32 cnttidr;
  625 +
  626 + arch_timers_present |= ARCH_MEM_TIMER;
  627 + cntctlbase = of_iomap(np, 0);
  628 + if (!cntctlbase) {
  629 + pr_err("arch_timer: Can't find CNTCTLBase\n");
  630 + return;
  631 + }
  632 +
  633 + cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
  634 + iounmap(cntctlbase);
  635 +
  636 + /*
  637 + * Try to find a virtual capable frame. Otherwise fall back to a
  638 + * physical capable frame.
  639 + */
  640 + for_each_available_child_of_node(np, frame) {
  641 + int n;
  642 +
  643 + if (of_property_read_u32(frame, "frame-number", &n)) {
  644 + pr_err("arch_timer: Missing frame-number\n");
  645 + of_node_put(best_frame);
  646 + of_node_put(frame);
  647 + return;
  648 + }
  649 +
  650 + if (cnttidr & CNTTIDR_VIRT(n)) {
  651 + of_node_put(best_frame);
  652 + best_frame = frame;
  653 + arch_timer_mem_use_virtual = true;
  654 + break;
  655 + }
  656 + of_node_put(best_frame);
  657 + best_frame = of_node_get(frame);
  658 + }
  659 +
  660 + base = arch_counter_base = of_iomap(best_frame, 0);
  661 + if (!base) {
  662 + pr_err("arch_timer: Can't map frame's registers\n");
  663 + of_node_put(best_frame);
  664 + return;
  665 + }
  666 +
  667 + if (arch_timer_mem_use_virtual)
  668 + irq = irq_of_parse_and_map(best_frame, 1);
  669 + else
  670 + irq = irq_of_parse_and_map(best_frame, 0);
  671 + of_node_put(best_frame);
  672 + if (!irq) {
  673 + pr_err("arch_timer: Frame missing %s irq",
  674 + arch_timer_mem_use_virtual ? "virt" : "phys");
  675 + return;
  676 + }
  677 +
  678 + arch_timer_detect_rate(base, np);
  679 + arch_timer_mem_register(base, irq);
  680 + arch_timer_common_init();
  681 +}
  682 +CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
  683 + arch_timer_mem_init);
include/clocksource/arm_arch_timer.h
... ... @@ -30,11 +30,13 @@
30 30  
31 31 #define ARCH_TIMER_PHYS_ACCESS 0
32 32 #define ARCH_TIMER_VIRT_ACCESS 1
  33 +#define ARCH_TIMER_MEM_PHYS_ACCESS 2
  34 +#define ARCH_TIMER_MEM_VIRT_ACCESS 3
33 35  
34 36 #ifdef CONFIG_ARM_ARCH_TIMER
35 37  
36 38 extern u32 arch_timer_get_rate(void);
37   -extern u64 arch_timer_read_counter(void);
  39 +extern u64 (*arch_timer_read_counter)(void);
38 40 extern struct timecounter *arch_timer_get_timecounter(void);
39 41  
40 42 #else