Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 1 | /* |
| 2 | * ARC ARConnect (MultiCore IP) support (formerly known as MCIP) |
| 3 | * |
| 4 | * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/smp.h> |
| 12 | #include <linux/irq.h> |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 13 | #include <linux/irqchip/chained_irq.h> |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 14 | #include <linux/spinlock.h> |
Vineet Gupta | 2d7f5c4 | 2016-10-31 11:27:08 -0700 | [diff] [blame] | 15 | #include <soc/arc/mcip.h> |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 16 | #include <asm/irqflags-arcv2.h> |
Vineet Gupta | 964cf28 | 2015-10-02 19:20:27 +0530 | [diff] [blame] | 17 | #include <asm/setup.h> |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 18 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 19 | static DEFINE_RAW_SPINLOCK(mcip_lock); |
| 20 | |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 21 | #ifdef CONFIG_SMP |
| 22 | |
| 23 | static char smp_cpuinfo_buf[128]; |
| 24 | |
Eugeniy Paltsev | 07423d0 | 2018-02-23 19:41:52 +0300 | [diff] [blame^] | 25 | /* |
| 26 | * Set mask to halt GFRC if any online core in SMP cluster is halted. |
| 27 | * Only works for ARC HS v3.0+, on earlier versions has no effect. |
| 28 | */ |
| 29 | static void mcip_update_gfrc_halt_mask(int cpu) |
| 30 | { |
| 31 | struct bcr_generic gfrc; |
| 32 | unsigned long flags; |
| 33 | u32 gfrc_halt_mask; |
| 34 | |
| 35 | READ_BCR(ARC_REG_GFRC_BUILD, gfrc); |
| 36 | |
| 37 | /* |
| 38 | * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in |
| 39 | * GFRC 0x3 version. |
| 40 | */ |
| 41 | if (gfrc.ver < 0x3) |
| 42 | return; |
| 43 | |
| 44 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 45 | |
| 46 | __mcip_cmd(CMD_GFRC_READ_CORE, 0); |
| 47 | gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); |
| 48 | gfrc_halt_mask |= BIT(cpu); |
| 49 | __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); |
| 50 | |
| 51 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 52 | } |
| 53 | |
Vineet Gupta | aa0efcd | 2015-10-12 15:15:48 +0530 | [diff] [blame] | 54 | static void mcip_setup_per_cpu(int cpu) |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 55 | { |
Eugeniy Paltsev | 07423d0 | 2018-02-23 19:41:52 +0300 | [diff] [blame^] | 56 | struct mcip_bcr mp; |
| 57 | |
| 58 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
| 59 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 60 | smp_ipi_irq_setup(cpu, IPI_IRQ); |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 61 | smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); |
Eugeniy Paltsev | 07423d0 | 2018-02-23 19:41:52 +0300 | [diff] [blame^] | 62 | |
| 63 | /* Update GFRC halt mask as new CPU came online */ |
| 64 | if (mp.gfrc) |
| 65 | mcip_update_gfrc_halt_mask(cpu); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | static void mcip_ipi_send(int cpu) |
| 69 | { |
| 70 | unsigned long flags; |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 71 | int ipi_was_pending; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 72 | |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 73 | /* ARConnect can only send IPI to others */ |
| 74 | if (unlikely(cpu == raw_smp_processor_id())) { |
| 75 | arc_softirq_trigger(SOFTIRQ_IRQ); |
| 76 | return; |
| 77 | } |
| 78 | |
Vineet Gupta | 3dea30c | 2016-02-19 07:57:41 +0530 | [diff] [blame] | 79 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 80 | |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 81 | /* |
Vineet Gupta | 3dea30c | 2016-02-19 07:57:41 +0530 | [diff] [blame] | 82 | * If receiver already has a pending interrupt, elide sending this one. |
| 83 | * Linux cross core calling works well with concurrent IPIs |
| 84 | * coalesced into one |
| 85 | * see arch/arc/kernel/smp.c: ipi_send_msg_one() |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 86 | */ |
Vineet Gupta | 3dea30c | 2016-02-19 07:57:41 +0530 | [diff] [blame] | 87 | __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); |
| 88 | ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); |
| 89 | if (!ipi_was_pending) |
| 90 | __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 91 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 92 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 93 | } |
| 94 | |
| 95 | static void mcip_ipi_clear(int irq) |
| 96 | { |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 97 | unsigned int cpu, c; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 98 | unsigned long flags; |
| 99 | |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 100 | if (unlikely(irq == SOFTIRQ_IRQ)) { |
| 101 | arc_softirq_clear(irq); |
| 102 | return; |
| 103 | } |
| 104 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 105 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 106 | |
| 107 | /* Who sent the IPI */ |
| 108 | __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); |
| 109 | |
Vineet Gupta | d73b73f | 2016-02-19 08:18:11 +0530 | [diff] [blame] | 110 | cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 111 | |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 112 | /* |
| 113 | * In rare case, multiple concurrent IPIs sent to same target can |
| 114 | * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be |
| 115 | * "vectored" (multiple bits sets) as opposed to typical single bit |
| 116 | */ |
| 117 | do { |
| 118 | c = __ffs(cpu); /* 0,1,2,3 */ |
| 119 | __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); |
| 120 | cpu &= ~(1U << c); |
| 121 | } while (cpu); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 122 | |
| 123 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 124 | } |
| 125 | |
Vineet Gupta | 26b8f99 | 2015-10-12 16:38:07 +0530 | [diff] [blame] | 126 | static void mcip_probe_n_setup(void) |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 127 | { |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 128 | struct mcip_bcr mp; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 129 | |
| 130 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
| 131 | |
| 132 | sprintf(smp_cpuinfo_buf, |
Vineet Gupta | 517e7610d | 2017-01-19 17:05:00 -0800 | [diff] [blame] | 133 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 134 | mp.ver, mp.num_cores, |
| 135 | IS_AVAIL1(mp.ipi, "IPI "), |
| 136 | IS_AVAIL1(mp.idu, "IDU "), |
| 137 | IS_AVAIL1(mp.dbg, "DEBUG "), |
Vineet Gupta | d584f0f | 2016-01-22 14:27:50 +0530 | [diff] [blame] | 138 | IS_AVAIL1(mp.gfrc, "GFRC")); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 139 | |
Vineet Gupta | e608b53 | 2016-01-01 18:05:48 +0530 | [diff] [blame] | 140 | cpuinfo_arc700[0].extn.gfrc = mp.gfrc; |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 141 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 142 | if (mp.dbg) { |
| 143 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); |
| 144 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); |
| 145 | } |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 146 | } |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 147 | |
Vineet Gupta | 26b8f99 | 2015-10-12 16:38:07 +0530 | [diff] [blame] | 148 | struct plat_smp_ops plat_smp_ops = { |
| 149 | .info = smp_cpuinfo_buf, |
| 150 | .init_early_smp = mcip_probe_n_setup, |
Noam Camus | b474a02 | 2015-12-16 03:10:27 +0200 | [diff] [blame] | 151 | .init_per_cpu = mcip_setup_per_cpu, |
Vineet Gupta | 26b8f99 | 2015-10-12 16:38:07 +0530 | [diff] [blame] | 152 | .ipi_send = mcip_ipi_send, |
| 153 | .ipi_clear = mcip_ipi_clear, |
| 154 | }; |
| 155 | |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 156 | #endif |
| 157 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 158 | /*************************************************************************** |
| 159 | * ARCv2 Interrupt Distribution Unit (IDU) |
| 160 | * |
| 161 | * Connects external "COMMON" IRQs to core intc, providing: |
| 162 | * -dynamic routing (IRQ affinity) |
| 163 | * -load balancing (Round Robin interrupt distribution) |
| 164 | * -1:N distribution |
| 165 | * |
| 166 | * It physically resides in the MCIP hw block |
| 167 | */ |
| 168 | |
| 169 | #include <linux/irqchip.h> |
| 170 | #include <linux/of.h> |
| 171 | #include <linux/of_irq.h> |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 172 | |
| 173 | /* |
| 174 | * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) |
| 175 | */ |
| 176 | static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) |
| 177 | { |
| 178 | __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); |
| 179 | } |
| 180 | |
| 181 | static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, |
| 182 | unsigned int distr) |
| 183 | { |
| 184 | union { |
| 185 | unsigned int word; |
| 186 | struct { |
| 187 | unsigned int distr:2, pad:2, lvl:1, pad2:27; |
| 188 | }; |
| 189 | } data; |
| 190 | |
| 191 | data.distr = distr; |
| 192 | data.lvl = lvl; |
| 193 | __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); |
| 194 | } |
| 195 | |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 196 | static void idu_irq_mask_raw(irq_hw_number_t hwirq) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 197 | { |
| 198 | unsigned long flags; |
| 199 | |
| 200 | raw_spin_lock_irqsave(&mcip_lock, flags); |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 201 | __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 202 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 203 | } |
| 204 | |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 205 | static void idu_irq_mask(struct irq_data *data) |
| 206 | { |
| 207 | idu_irq_mask_raw(data->hwirq); |
| 208 | } |
| 209 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 210 | static void idu_irq_unmask(struct irq_data *data) |
| 211 | { |
| 212 | unsigned long flags; |
| 213 | |
| 214 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 215 | __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0); |
| 216 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 217 | } |
| 218 | |
| 219 | static int |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 220 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, |
| 221 | bool force) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 222 | { |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 223 | unsigned long flags; |
| 224 | cpumask_t online; |
Yuriy Kolerov | 0a0a047 | 2016-11-08 10:08:32 +0300 | [diff] [blame] | 225 | unsigned int destination_bits; |
| 226 | unsigned int distribution_mode; |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 227 | |
| 228 | /* errout if no online cpu per @cpumask */ |
| 229 | if (!cpumask_and(&online, cpumask, cpu_online_mask)) |
| 230 | return -EINVAL; |
| 231 | |
| 232 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 233 | |
Yuriy Kolerov | 0a0a047 | 2016-11-08 10:08:32 +0300 | [diff] [blame] | 234 | destination_bits = cpumask_bits(&online)[0]; |
| 235 | idu_set_dest(data->hwirq, destination_bits); |
| 236 | |
| 237 | if (ffs(destination_bits) == fls(destination_bits)) |
| 238 | distribution_mode = IDU_M_DISTRI_DEST; |
| 239 | else |
| 240 | distribution_mode = IDU_M_DISTRI_RR; |
| 241 | |
| 242 | idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 243 | |
| 244 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 245 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 246 | return IRQ_SET_MASK_OK; |
| 247 | } |
Yuriy Kolerov | 92fdb52 | 2016-12-28 11:46:26 +0300 | [diff] [blame] | 248 | |
| 249 | static void idu_irq_enable(struct irq_data *data) |
| 250 | { |
| 251 | /* |
| 252 | * By default send all common interrupts to all available online CPUs. |
| 253 | * The affinity of common interrupts in IDU must be set manually since |
| 254 | * in some cases the kernel will not call irq_set_affinity() by itself: |
| 255 | * 1. When the kernel is not configured with support of SMP. |
| 256 | * 2. When the kernel is configured with support of SMP but upper |
| 257 | * interrupt controllers does not support setting of the affinity |
| 258 | * and cannot propagate it to IDU. |
| 259 | */ |
| 260 | idu_irq_set_affinity(data, cpu_online_mask, false); |
| 261 | idu_irq_unmask(data); |
| 262 | } |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 263 | |
| 264 | static struct irq_chip idu_irq_chip = { |
| 265 | .name = "MCIP IDU Intc", |
| 266 | .irq_mask = idu_irq_mask, |
| 267 | .irq_unmask = idu_irq_unmask, |
Yuriy Kolerov | 92fdb52 | 2016-12-28 11:46:26 +0300 | [diff] [blame] | 268 | .irq_enable = idu_irq_enable, |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 269 | #ifdef CONFIG_SMP |
| 270 | .irq_set_affinity = idu_irq_set_affinity, |
| 271 | #endif |
| 272 | |
| 273 | }; |
| 274 | |
Thomas Gleixner | bd0b9ac | 2015-09-14 10:42:37 +0200 | [diff] [blame] | 275 | static void idu_cascade_isr(struct irq_desc *desc) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 276 | { |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 277 | struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 278 | struct irq_chip *core_chip = irq_desc_get_chip(desc); |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 279 | irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 280 | irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ; |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 281 | |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 282 | chained_irq_enter(core_chip, desc); |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 283 | generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 284 | chained_irq_exit(core_chip, desc); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 285 | } |
| 286 | |
| 287 | static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) |
| 288 | { |
| 289 | irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq); |
| 290 | irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); |
| 291 | |
| 292 | return 0; |
| 293 | } |
| 294 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 295 | static const struct irq_domain_ops idu_irq_ops = { |
Yuriy Kolerov | ec69b26 | 2017-02-02 03:13:32 +0300 | [diff] [blame] | 296 | .xlate = irq_domain_xlate_onecell, |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 297 | .map = idu_irq_map, |
| 298 | }; |
| 299 | |
| 300 | /* |
| 301 | * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI) |
| 302 | * [24, 23+C]: If C > 0 then "C" common IRQs |
| 303 | * [24+C, N]: Not statically assigned, private-per-core |
| 304 | */ |
| 305 | |
| 306 | |
| 307 | static int __init |
| 308 | idu_of_init(struct device_node *intc, struct device_node *parent) |
| 309 | { |
| 310 | struct irq_domain *domain; |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 311 | int nr_irqs; |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 312 | int i, virq; |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 313 | struct mcip_bcr mp; |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 314 | struct mcip_idu_bcr idu_bcr; |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 315 | |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 316 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
| 317 | |
| 318 | if (!mp.idu) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 319 | panic("IDU not detected, but DeviceTree using it"); |
| 320 | |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 321 | READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); |
| 322 | nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); |
| 323 | |
| 324 | pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 325 | |
| 326 | domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); |
| 327 | |
| 328 | /* Parent interrupts (core-intc) are already mapped */ |
| 329 | |
| 330 | for (i = 0; i < nr_irqs; i++) { |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 331 | /* Mask all common interrupts by default */ |
| 332 | idu_irq_mask_raw(i); |
| 333 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 334 | /* |
| 335 | * Return parent uplink IRQs (towards core intc) 24,25,..... |
| 336 | * this step has been done before already |
| 337 | * however we need it to get the parent virq and set IDU handler |
| 338 | * as first level isr |
| 339 | */ |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 340 | virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ); |
| 341 | BUG_ON(!virq); |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 342 | irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | __mcip_cmd(CMD_IDU_ENABLE, 0); |
| 346 | |
| 347 | return 0; |
| 348 | } |
| 349 | IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init); |