Vineet Gupta | ac4c244 | 2013-01-18 15:12:16 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | */ |
| 9 | |
| 10 | #include <linux/interrupt.h> |
Vineet Gupta | c93d8b8 | 2013-04-11 14:47:36 +0530 | [diff] [blame] | 11 | #include <linux/irqchip.h> |
Vineet Gupta | 03a6d28 | 2013-01-18 15:12:26 +0530 | [diff] [blame] | 12 | #include <asm/mach_desc.h> |
Vineet Gupta | 286130e | 2015-10-14 14:38:02 +0530 | [diff] [blame] | 13 | #include <asm/smp.h> |
Vineet Gupta | bacdf48 | 2013-01-18 15:12:18 +0530 | [diff] [blame] | 14 | |
| 15 | /* |
Vineet Gupta | bacdf48 | 2013-01-18 15:12:18 +0530 | [diff] [blame] | 16 | * Late Interrupt system init called from start_kernel for Boot CPU only |
| 17 | * |
| 18 | * Since slab must already be initialized, platforms can start doing any |
| 19 | * needed request_irq( )s |
| 20 | */ |
| 21 | void __init init_IRQ(void) |
| 22 | { |
Vineet Gupta | 4c82f28 | 2015-10-13 08:48:54 +0530 | [diff] [blame] | 23 | /* |
| 24 | * process the entire interrupt tree in one go |
| 25 | * Any external intc will be setup provided DT chains them |
| 26 | * properly |
| 27 | */ |
Vineet Gupta | c93d8b8 | 2013-04-11 14:47:36 +0530 | [diff] [blame] | 28 | irqchip_init(); |
| 29 | |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 30 | #ifdef CONFIG_SMP |
Vineet Gupta | 286130e | 2015-10-14 14:38:02 +0530 | [diff] [blame] | 31 | /* a SMP H/w block could do IPI IRQ request here */ |
Noam Camus | b474a02 | 2015-12-16 03:10:27 +0200 | [diff] [blame^] | 32 | if (plat_smp_ops.init_per_cpu) |
| 33 | plat_smp_ops.init_per_cpu(smp_processor_id()); |
Vineet Gupta | 286130e | 2015-10-14 14:38:02 +0530 | [diff] [blame] | 34 | |
Vineet Gupta | 8721a7f | 2015-10-13 15:26:00 +0530 | [diff] [blame] | 35 | if (machine_desc->init_cpu_smp) |
| 36 | machine_desc->init_cpu_smp(smp_processor_id()); |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 37 | #endif |
Vineet Gupta | bacdf48 | 2013-01-18 15:12:18 +0530 | [diff] [blame] | 38 | } |
| 39 | |
| 40 | /* |
| 41 | * "C" Entry point for any ARC ISR, called from low level vector handler |
| 42 | * @irq is the vector number read from ICAUSE reg of on-chip intc |
| 43 | */ |
| 44 | void arch_do_IRQ(unsigned int irq, struct pt_regs *regs) |
| 45 | { |
| 46 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 47 | |
| 48 | irq_enter(); |
| 49 | generic_handle_irq(irq); |
| 50 | irq_exit(); |
| 51 | set_irq_regs(old_regs); |
| 52 | } |
| 53 | |
Vineet Gupta | c512c6b | 2015-12-11 19:31:23 +0530 | [diff] [blame] | 54 | /* |
| 55 | * API called for requesting percpu interrupts - called by each CPU |
| 56 | * - For boot CPU, actually request the IRQ with genirq core + enables |
| 57 | * - For subsequent callers only enable called locally |
| 58 | * |
| 59 | * Relies on being called by boot cpu first (i.e. request called ahead) of |
| 60 | * any enable as expected by genirq. Hence Suitable only for TIMER, IPI |
| 61 | * which are guaranteed to be setup on boot core first. |
| 62 | * Late probed peripherals such as perf can't use this as there no guarantee |
| 63 | * of being called on boot CPU first. |
| 64 | */ |
| 65 | |
Vineet Gupta | 2b75c0f | 2014-05-07 15:25:10 +0530 | [diff] [blame] | 66 | void arc_request_percpu_irq(int irq, int cpu, |
| 67 | irqreturn_t (*isr)(int irq, void *dev), |
| 68 | const char *irq_nm, |
| 69 | void *percpu_dev) |
| 70 | { |
| 71 | /* Boot cpu calls request, all call enable */ |
| 72 | if (!cpu) { |
| 73 | int rc; |
| 74 | |
Vineet Gupta | 5bf704c | 2015-12-11 16:16:11 +0530 | [diff] [blame] | 75 | #ifdef CONFIG_ISA_ARCOMPACT |
Vineet Gupta | 2b75c0f | 2014-05-07 15:25:10 +0530 | [diff] [blame] | 76 | /* |
Vineet Gupta | 5bf704c | 2015-12-11 16:16:11 +0530 | [diff] [blame] | 77 | * A subsequent request_percpu_irq() fails if percpu_devid is |
| 78 | * not set. That in turns sets NOAUTOEN, meaning each core needs |
| 79 | * to call enable_percpu_irq() |
| 80 | * |
| 81 | * For ARCv2, this is done in irq map function since we know |
| 82 | * which irqs are strictly per cpu |
Vineet Gupta | 2b75c0f | 2014-05-07 15:25:10 +0530 | [diff] [blame] | 83 | */ |
| 84 | irq_set_percpu_devid(irq); |
Vineet Gupta | 5bf704c | 2015-12-11 16:16:11 +0530 | [diff] [blame] | 85 | #endif |
Vineet Gupta | 2b75c0f | 2014-05-07 15:25:10 +0530 | [diff] [blame] | 86 | |
| 87 | rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); |
| 88 | if (rc) |
| 89 | panic("Percpu IRQ request failed for %d\n", irq); |
| 90 | } |
| 91 | |
| 92 | enable_percpu_irq(irq, 0); |
| 93 | } |