blob: f31c9ee18af2cfc07ee9ad90a443c55664f203e6 [file] [log] [blame]
Marc Zyngier022c03a2012-01-11 17:25:17 +00001/*
2 * linux/arch/arm/kernel/arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/smp.h>
16#include <linux/cpu.h>
17#include <linux/jiffies.h>
18#include <linux/clockchips.h>
19#include <linux/interrupt.h>
Marc Zyngier00752422012-01-19 13:53:50 +000020#include <linux/of_irq.h>
Marc Zyngier022c03a2012-01-11 17:25:17 +000021#include <linux/io.h>
22
Jonathan Austin56942fe2012-09-21 18:51:44 +010023#include <asm/delay.h>
Marc Zyngier022c03a2012-01-11 17:25:17 +000024#include <asm/localtimer.h>
25#include <asm/arch_timer.h>
Marc Zyngier3f61c802011-01-14 15:32:36 +000026#include <asm/sched_clock.h>
Marc Zyngier022c03a2012-01-11 17:25:17 +000027
Mark Rutlandef01c1d2012-11-12 14:49:27 +000028static u32 arch_timer_rate;
Marc Zyngierf48b5f12012-09-07 18:09:57 +010029
30enum ppi_nr {
31 PHYS_SECURE_PPI,
32 PHYS_NONSECURE_PPI,
33 VIRT_PPI,
34 HYP_PPI,
35 MAX_TIMER_PPI
36};
37
38static int arch_timer_ppi[MAX_TIMER_PPI];
Marc Zyngier022c03a2012-01-11 17:25:17 +000039
40static struct clock_event_device __percpu **arch_timer_evt;
Jonathan Austin56942fe2012-09-21 18:51:44 +010041static struct delay_timer arch_delay_timer;
Will Deacond0a533b2012-07-06 15:47:17 +010042
Marc Zyngierf48b5f12012-09-07 18:09:57 +010043static bool arch_timer_use_virtual = true;
44
Marc Zyngier022c03a2012-01-11 17:25:17 +000045/*
46 * Architected system timer support.
47 */
48
49#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
50#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
51#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
52
53#define ARCH_TIMER_REG_CTRL 0
Mark Rutlandfd5583a2012-11-12 16:46:40 +000054#define ARCH_TIMER_REG_TVAL 1
Marc Zyngier022c03a2012-01-11 17:25:17 +000055
Marc Zyngierf48b5f12012-09-07 18:09:57 +010056#define ARCH_TIMER_PHYS_ACCESS 0
57#define ARCH_TIMER_VIRT_ACCESS 1
58
59/*
60 * These register accessors are marked inline so the compiler can
61 * nicely work out which register we want, and chuck away the rest of
62 * the code. At least it does so with a recent GCC (4.6.3).
63 */
64static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
Marc Zyngier022c03a2012-01-11 17:25:17 +000065{
Marc Zyngierf48b5f12012-09-07 18:09:57 +010066 if (access == ARCH_TIMER_PHYS_ACCESS) {
67 switch (reg) {
68 case ARCH_TIMER_REG_CTRL:
69 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
70 break;
71 case ARCH_TIMER_REG_TVAL:
72 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
73 break;
74 }
75 }
76
77 if (access == ARCH_TIMER_VIRT_ACCESS) {
78 switch (reg) {
79 case ARCH_TIMER_REG_CTRL:
80 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
81 break;
82 case ARCH_TIMER_REG_TVAL:
83 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
84 break;
85 }
Marc Zyngier022c03a2012-01-11 17:25:17 +000086 }
87
88 isb();
89}
90
Marc Zyngierf48b5f12012-09-07 18:09:57 +010091static inline u32 arch_timer_reg_read(const int access, const int reg)
Marc Zyngier022c03a2012-01-11 17:25:17 +000092{
Marc Zyngierf48b5f12012-09-07 18:09:57 +010093 u32 val = 0;
Marc Zyngier022c03a2012-01-11 17:25:17 +000094
Marc Zyngierf48b5f12012-09-07 18:09:57 +010095 if (access == ARCH_TIMER_PHYS_ACCESS) {
96 switch (reg) {
97 case ARCH_TIMER_REG_CTRL:
98 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
99 break;
100 case ARCH_TIMER_REG_TVAL:
101 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
102 break;
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100103 }
104 }
105
106 if (access == ARCH_TIMER_VIRT_ACCESS) {
107 switch (reg) {
108 case ARCH_TIMER_REG_CTRL:
109 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
110 break;
111 case ARCH_TIMER_REG_TVAL:
112 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
113 break;
114 }
Marc Zyngier022c03a2012-01-11 17:25:17 +0000115 }
116
117 return val;
118}
119
Mark Rutlandfd5583a2012-11-12 16:46:40 +0000120static inline u32 arch_timer_get_cntfrq(void)
121{
122 u32 val;
123 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
124 return val;
125}
126
Mark Rutlandef01c1d2012-11-12 14:49:27 +0000127static inline u64 arch_counter_get_cntpct(void)
Marc Zyngier022c03a2012-01-11 17:25:17 +0000128{
Mark Rutlandef01c1d2012-11-12 14:49:27 +0000129 u64 cval;
130 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100131 return cval;
132}
133
Mark Rutlandef01c1d2012-11-12 14:49:27 +0000134static inline u64 arch_counter_get_cntvct(void)
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100135{
Mark Rutlandef01c1d2012-11-12 14:49:27 +0000136 u64 cval;
137 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
138 return cval;
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100139}
140
141static irqreturn_t inline timer_handler(const int access,
142 struct clock_event_device *evt)
143{
144 unsigned long ctrl;
145 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
Marc Zyngier022c03a2012-01-11 17:25:17 +0000146 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
147 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100148 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
Marc Zyngier022c03a2012-01-11 17:25:17 +0000149 evt->event_handler(evt);
150 return IRQ_HANDLED;
151 }
152
153 return IRQ_NONE;
154}
155
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100156static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
Marc Zyngier022c03a2012-01-11 17:25:17 +0000157{
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100158 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
Marc Zyngier022c03a2012-01-11 17:25:17 +0000159
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100160 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
Marc Zyngier022c03a2012-01-11 17:25:17 +0000161}
162
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100163static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
Marc Zyngier022c03a2012-01-11 17:25:17 +0000164{
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100165 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
166
167 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
168}
169
170static inline void timer_set_mode(const int access, int mode)
171{
172 unsigned long ctrl;
Marc Zyngier022c03a2012-01-11 17:25:17 +0000173 switch (mode) {
174 case CLOCK_EVT_MODE_UNUSED:
175 case CLOCK_EVT_MODE_SHUTDOWN:
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100176 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
177 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
178 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
Marc Zyngier022c03a2012-01-11 17:25:17 +0000179 break;
180 default:
181 break;
182 }
183}
184
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100185static void arch_timer_set_mode_virt(enum clock_event_mode mode,
186 struct clock_event_device *clk)
187{
188 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
189}
190
191static void arch_timer_set_mode_phys(enum clock_event_mode mode,
192 struct clock_event_device *clk)
193{
194 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
195}
196
197static inline void set_next_event(const int access, unsigned long evt)
Marc Zyngier022c03a2012-01-11 17:25:17 +0000198{
199 unsigned long ctrl;
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100200 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
Marc Zyngier022c03a2012-01-11 17:25:17 +0000201 ctrl |= ARCH_TIMER_CTRL_ENABLE;
202 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100203 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
204 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
205}
Marc Zyngier022c03a2012-01-11 17:25:17 +0000206
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100207static int arch_timer_set_next_event_virt(unsigned long evt,
208 struct clock_event_device *unused)
209{
210 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
211 return 0;
212}
Marc Zyngier022c03a2012-01-11 17:25:17 +0000213
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100214static int arch_timer_set_next_event_phys(unsigned long evt,
215 struct clock_event_device *unused)
216{
217 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
Marc Zyngier022c03a2012-01-11 17:25:17 +0000218 return 0;
219}
220
221static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
222{
Lorenzo Pieralisi27a55692012-07-06 11:06:49 +0100223 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
Marc Zyngier022c03a2012-01-11 17:25:17 +0000224 clk->name = "arch_sys_timer";
225 clk->rating = 450;
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100226 if (arch_timer_use_virtual) {
227 clk->irq = arch_timer_ppi[VIRT_PPI];
228 clk->set_mode = arch_timer_set_mode_virt;
229 clk->set_next_event = arch_timer_set_next_event_virt;
230 } else {
231 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
232 clk->set_mode = arch_timer_set_mode_phys;
233 clk->set_next_event = arch_timer_set_next_event_phys;
234 }
235
236 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
Marc Zyngier022c03a2012-01-11 17:25:17 +0000237
238 clockevents_config_and_register(clk, arch_timer_rate,
239 0xf, 0x7fffffff);
240
241 *__this_cpu_ptr(arch_timer_evt) = clk;
242
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100243 if (arch_timer_use_virtual)
244 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
245 else {
246 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
247 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
248 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
249 }
Marc Zyngier022c03a2012-01-11 17:25:17 +0000250
251 return 0;
252}
253
Marc Zyngier022c03a2012-01-11 17:25:17 +0000254static int arch_timer_available(void)
255{
Mark Rutlandef01c1d2012-11-12 14:49:27 +0000256 u32 freq;
Marc Zyngier022c03a2012-01-11 17:25:17 +0000257
Marc Zyngier022c03a2012-01-11 17:25:17 +0000258 if (arch_timer_rate == 0) {
Mark Rutlandfd5583a2012-11-12 16:46:40 +0000259 freq = arch_timer_get_cntfrq();
Marc Zyngier022c03a2012-01-11 17:25:17 +0000260
261 /* Check the timer frequency. */
262 if (freq == 0) {
263 pr_warn("Architected timer frequency not available\n");
264 return -EINVAL;
265 }
266
267 arch_timer_rate = freq;
268 }
269
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100270 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
Mark Rutlandef01c1d2012-11-12 14:49:27 +0000271 (unsigned long)arch_timer_rate / 1000000,
272 (unsigned long)(arch_timer_rate / 10000) % 100,
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100273 arch_timer_use_virtual ? "virt" : "phys");
Marc Zyngier022c03a2012-01-11 17:25:17 +0000274 return 0;
275}
276
Mark Rutlandb8e24342012-11-14 09:50:19 +0000277/*
278 * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
279 * call it before it has been initialised. Rather than incur a performance
280 * penalty checking for initialisation, provide a default implementation that
281 * won't lead to time appearing to jump backwards.
282 */
283static u64 arch_timer_read_zero(void)
Marc Zyngier022c03a2012-01-11 17:25:17 +0000284{
Mark Rutlandb8e24342012-11-14 09:50:19 +0000285 return 0;
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100286}
287
Mark Rutlandb8e24342012-11-14 09:50:19 +0000288u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100289
Mark Rutlandb8e24342012-11-14 09:50:19 +0000290static u32 arch_timer_read_counter32(void)
291{
292 return arch_timer_read_counter();
Marc Zyngier3f61c802011-01-14 15:32:36 +0000293}
294
Marc Zyngier022c03a2012-01-11 17:25:17 +0000295static cycle_t arch_counter_read(struct clocksource *cs)
296{
Mark Rutlandb8e24342012-11-14 09:50:19 +0000297 return arch_timer_read_counter();
Marc Zyngier022c03a2012-01-11 17:25:17 +0000298}
299
Jonathan Austin56942fe2012-09-21 18:51:44 +0100300static unsigned long arch_timer_read_current_timer(void)
Will Deacon923df96b2012-07-06 15:46:45 +0100301{
Mark Rutlandb8e24342012-11-14 09:50:19 +0000302 return arch_timer_read_counter();
Will Deacon923df96b2012-07-06 15:46:45 +0100303}
304
Marc Zyngiera1b2dde2012-09-07 18:09:58 +0100305static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
306{
Mark Rutlandb8e24342012-11-14 09:50:19 +0000307 return arch_timer_read_counter();
Marc Zyngiera1b2dde2012-09-07 18:09:58 +0100308}
309
Marc Zyngier022c03a2012-01-11 17:25:17 +0000310static struct clocksource clocksource_counter = {
311 .name = "arch_sys_counter",
312 .rating = 400,
313 .read = arch_counter_read,
314 .mask = CLOCKSOURCE_MASK(56),
315 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
316};
317
Marc Zyngiera1b2dde2012-09-07 18:09:58 +0100318static struct cyclecounter cyclecounter = {
319 .read = arch_counter_read_cc,
320 .mask = CLOCKSOURCE_MASK(56),
321};
322
323static struct timecounter timecounter;
324
325struct timecounter *arch_timer_get_timecounter(void)
326{
327 return &timecounter;
328}
329
Marc Zyngier022c03a2012-01-11 17:25:17 +0000330static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
331{
332 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
333 clk->irq, smp_processor_id());
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100334
335 if (arch_timer_use_virtual)
336 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
337 else {
338 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
339 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
340 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
341 }
342
343 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
Marc Zyngier022c03a2012-01-11 17:25:17 +0000344}
345
346static struct local_timer_ops arch_timer_ops __cpuinitdata = {
347 .setup = arch_timer_setup,
348 .stop = arch_timer_stop,
349};
350
Marc Zyngier273d16a2012-01-20 10:47:00 +0000351static struct clock_event_device arch_timer_global_evt;
352
Marc Zyngierfb8a99f2012-04-27 13:18:42 +0100353static int __init arch_timer_register(void)
Marc Zyngier022c03a2012-01-11 17:25:17 +0000354{
355 int err;
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100356 int ppi;
Marc Zyngier022c03a2012-01-11 17:25:17 +0000357
Marc Zyngier022c03a2012-01-11 17:25:17 +0000358 err = arch_timer_available();
359 if (err)
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100360 goto out;
Marc Zyngier022c03a2012-01-11 17:25:17 +0000361
362 arch_timer_evt = alloc_percpu(struct clock_event_device *);
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100363 if (!arch_timer_evt) {
364 err = -ENOMEM;
365 goto out;
366 }
Marc Zyngier022c03a2012-01-11 17:25:17 +0000367
368 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
Marc Zyngiera1b2dde2012-09-07 18:09:58 +0100369 cyclecounter.mult = clocksource_counter.mult;
370 cyclecounter.shift = clocksource_counter.shift;
371 timecounter_init(&timecounter, &cyclecounter,
372 arch_counter_get_cntpct());
Marc Zyngier022c03a2012-01-11 17:25:17 +0000373
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100374 if (arch_timer_use_virtual) {
375 ppi = arch_timer_ppi[VIRT_PPI];
376 err = request_percpu_irq(ppi, arch_timer_handler_virt,
377 "arch_timer", arch_timer_evt);
378 } else {
379 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
380 err = request_percpu_irq(ppi, arch_timer_handler_phys,
381 "arch_timer", arch_timer_evt);
382 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
383 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
384 err = request_percpu_irq(ppi, arch_timer_handler_phys,
385 "arch_timer", arch_timer_evt);
386 if (err)
387 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
388 arch_timer_evt);
389 }
Marc Zyngier022c03a2012-01-11 17:25:17 +0000390 }
391
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100392 if (err) {
393 pr_err("arch_timer: can't register interrupt %d (%d)\n",
394 ppi, err);
395 goto out_free;
Marc Zyngier022c03a2012-01-11 17:25:17 +0000396 }
397
398 err = local_timer_register(&arch_timer_ops);
Marc Zyngier273d16a2012-01-20 10:47:00 +0000399 if (err) {
400 /*
401 * We couldn't register as a local timer (could be
402 * because we're on a UP platform, or because some
403 * other local timer is already present...). Try as a
404 * global timer instead.
405 */
406 arch_timer_global_evt.cpumask = cpumask_of(0);
407 err = arch_timer_setup(&arch_timer_global_evt);
408 }
Marc Zyngier022c03a2012-01-11 17:25:17 +0000409 if (err)
410 goto out_free_irq;
411
Jonathan Austin56942fe2012-09-21 18:51:44 +0100412 /* Use the architected timer for the delay loop. */
413 arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
414 arch_delay_timer.freq = arch_timer_rate;
415 register_current_timer_delay(&arch_delay_timer);
Marc Zyngier022c03a2012-01-11 17:25:17 +0000416 return 0;
417
418out_free_irq:
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100419 if (arch_timer_use_virtual)
420 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
421 else {
422 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
423 arch_timer_evt);
424 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
425 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
426 arch_timer_evt);
427 }
Marc Zyngier022c03a2012-01-11 17:25:17 +0000428
429out_free:
430 free_percpu(arch_timer_evt);
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100431out:
Marc Zyngier022c03a2012-01-11 17:25:17 +0000432 return err;
433}
Marc Zyngier3f61c802011-01-14 15:32:36 +0000434
Marc Zyngier00752422012-01-19 13:53:50 +0000435static const struct of_device_id arch_timer_of_match[] __initconst = {
436 { .compatible = "arm,armv7-timer", },
437 {},
438};
439
440int __init arch_timer_of_register(void)
441{
442 struct device_node *np;
443 u32 freq;
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100444 int i;
Marc Zyngier00752422012-01-19 13:53:50 +0000445
446 np = of_find_matching_node(NULL, arch_timer_of_match);
447 if (!np) {
448 pr_err("arch_timer: can't find DT node\n");
449 return -ENODEV;
450 }
451
452 /* Try to determine the frequency from the device tree or CNTFRQ */
453 if (!of_property_read_u32(np, "clock-frequency", &freq))
454 arch_timer_rate = freq;
455
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100456 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
457 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
458
Mark Rutland2b55d102012-12-19 11:37:49 +0000459 of_node_put(np);
460
Marc Zyngierf48b5f12012-09-07 18:09:57 +0100461 /*
462 * If no interrupt provided for virtual timer, we'll have to
463 * stick to the physical timer. It'd better be accessible...
464 */
465 if (!arch_timer_ppi[VIRT_PPI]) {
466 arch_timer_use_virtual = false;
467
468 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
469 !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
470 pr_warn("arch_timer: No interrupt available, giving up\n");
471 return -EINVAL;
472 }
473 }
Marc Zyngier00752422012-01-19 13:53:50 +0000474
Mark Rutlandb8e24342012-11-14 09:50:19 +0000475 if (arch_timer_use_virtual)
476 arch_timer_read_counter = arch_counter_get_cntvct;
477 else
478 arch_timer_read_counter = arch_counter_get_cntpct;
479
Marc Zyngierfb8a99f2012-04-27 13:18:42 +0100480 return arch_timer_register();
Marc Zyngier00752422012-01-19 13:53:50 +0000481}
Marc Zyngier00752422012-01-19 13:53:50 +0000482
Marc Zyngier3f61c802011-01-14 15:32:36 +0000483int __init arch_timer_sched_clock_init(void)
484{
485 int err;
486
487 err = arch_timer_available();
488 if (err)
489 return err;
490
Mark Rutlandb8e24342012-11-14 09:50:19 +0000491 setup_sched_clock(arch_timer_read_counter32,
492 32, arch_timer_rate);
Marc Zyngier3f61c802011-01-14 15:32:36 +0000493 return 0;
494}