blob: 3eb42ede0114607d8824111345ed7fc8932cf4ab [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Eric W. Biederman3b7d1922006-10-04 02:16:59 -07002#ifndef LINUX_MSI_H
3#define LINUX_MSI_H
4
Neil Hormanb50cac52011-10-06 14:08:18 -04005#include <linux/kobject.h>
Michael Ellerman4aa9bc92007-04-05 17:19:10 +10006#include <linux/list.h>
7
Eric W. Biederman3b7d1922006-10-04 02:16:59 -07008struct msi_msg {
9 u32 address_lo; /* low 32 bits of msi message address */
10 u32 address_hi; /* high 32 bits of msi message address */
11 u32 data; /* 16 bits of msi message data */
12};
13
Yijing Wang38737d82014-10-27 10:44:36 +080014extern int pci_msi_ignore_mask;
Satoru Takeuchic54c1872007-01-18 13:50:05 +090015/* Helper functions */
Thomas Gleixner1c9db522010-09-28 16:46:51 +020016struct irq_data;
Thomas Gleixner39431ac2010-09-28 19:09:51 +020017struct msi_desc;
Jiang Liu25a98bd2015-07-09 16:00:45 +080018struct pci_dev;
Marc Zyngierc09fcc4b2015-07-28 14:46:16 +010019struct platform_msi_priv_data;
Bjorn Helgaas2366d062013-04-18 10:55:46 -060020void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
Arnd Bergmann2f44e292017-02-14 22:53:12 +010021#ifdef CONFIG_GENERIC_MSI_IRQ
Bjorn Helgaas2366d062013-04-18 10:55:46 -060022void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
Arnd Bergmann2f44e292017-02-14 22:53:12 +010023#else
24static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
25{
26}
27#endif
Jiang Liu891d4a42014-11-09 23:10:33 +080028
Marc Zyngierc09fcc4b2015-07-28 14:46:16 +010029typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
30 struct msi_msg *msg);
31
32/**
33 * platform_msi_desc - Platform device specific msi descriptor data
34 * @msi_priv_data: Pointer to platform private data
35 * @msi_index: The index of the MSI descriptor for multi MSI
36 */
37struct platform_msi_desc {
38 struct platform_msi_priv_data *msi_priv_data;
39 u16 msi_index;
40};
41
Jiang Liufc884192015-07-09 16:00:46 +080042/**
J. German Rivera550308e2016-01-06 16:03:20 -060043 * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
44 * @msi_index: The index of the MSI descriptor
45 */
46struct fsl_mc_msi_desc {
47 u16 msi_index;
48};
49
50/**
Lokesh Vutla49b32312019-04-30 15:42:28 +053051 * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
52 * @dev_index: TISCI device index
53 */
54struct ti_sci_inta_msi_desc {
55 u16 dev_index;
56};
57
58/**
Jiang Liufc884192015-07-09 16:00:46 +080059 * struct msi_desc - Descriptor structure for MSI based interrupts
60 * @list: List head for management
61 * @irq: The base interrupt number
62 * @nvec_used: The number of vectors used
63 * @dev: Pointer to the device which uses this descriptor
64 * @msg: The last set MSI message cached for reuse
Thomas Gleixner0972fa52016-07-04 17:39:26 +090065 * @affinity: Optional pointer to a cpu affinity mask for this descriptor
Jiang Liufc884192015-07-09 16:00:46 +080066 *
67 * @masked: [PCI MSI/X] Mask bits
68 * @is_msix: [PCI MSI/X] True if MSI-X
69 * @multiple: [PCI MSI/X] log2 num of messages allocated
70 * @multi_cap: [PCI MSI/X] log2 num of messages supported
71 * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
72 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
73 * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
74 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
75 * @mask_pos: [PCI MSI] Mask register position
76 * @mask_base: [PCI MSI-X] Mask register base address
Marc Zyngierc09fcc4b2015-07-28 14:46:16 +010077 * @platform: [platform] Platform device specific msi descriptor data
Laurentiu Tudor87840fb2017-07-19 14:42:25 +030078 * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
Lokesh Vutla49b32312019-04-30 15:42:28 +053079 * @inta: [INTA] TISCI based INTA specific msi descriptor data
Jiang Liufc884192015-07-09 16:00:46 +080080 */
Eric W. Biederman3b7d1922006-10-04 02:16:59 -070081struct msi_desc {
Jiang Liufc884192015-07-09 16:00:46 +080082 /* Shared device/bus type independent data */
83 struct list_head list;
84 unsigned int irq;
85 unsigned int nvec_used;
86 struct device *dev;
87 struct msi_msg msg;
Dou Liyangbec04032018-12-04 23:51:20 +080088 struct irq_affinity_desc *affinity;
Julien Grallaaebdf82019-05-01 14:58:18 +010089#ifdef CONFIG_IRQ_MSI_IOMMU
90 const void *iommu_cookie;
91#endif
Eric W. Biederman3b7d1922006-10-04 02:16:59 -070092
Matthew Wilcox264d9ca2009-03-17 08:54:08 -040093 union {
Jiang Liufc884192015-07-09 16:00:46 +080094 /* PCI MSI/X specific data */
95 struct {
96 u32 masked;
97 struct {
Logan Gunthorpeddd065e2019-02-08 09:54:38 -070098 u8 is_msix : 1;
99 u8 multiple : 3;
100 u8 multi_cap : 3;
101 u8 maskbit : 1;
102 u8 is_64 : 1;
103 u16 entry_nr;
Jiang Liufc884192015-07-09 16:00:46 +0800104 unsigned default_irq;
105 } msi_attrib;
106 union {
107 u8 mask_pos;
108 void __iomem *mask_base;
109 };
110 };
Eric W. Biederman3b7d1922006-10-04 02:16:59 -0700111
Jiang Liufc884192015-07-09 16:00:46 +0800112 /*
113 * Non PCI variants add their data structure here. New
114 * entries need to use a named structure. We want
115 * proper name spaces for this. The PCI part is
116 * anonymous for now as it would require an immediate
117 * tree wide cleanup.
118 */
Marc Zyngierc09fcc4b2015-07-28 14:46:16 +0100119 struct platform_msi_desc platform;
J. German Rivera550308e2016-01-06 16:03:20 -0600120 struct fsl_mc_msi_desc fsl_mc;
Lokesh Vutla49b32312019-04-30 15:42:28 +0530121 struct ti_sci_inta_msi_desc inta;
Jiang Liufc884192015-07-09 16:00:46 +0800122 };
Eric W. Biederman3b7d1922006-10-04 02:16:59 -0700123};
124
Jiang Liud31eb342014-11-15 22:24:03 +0800125/* Helpers to hide struct msi_desc implementation details */
Jiang Liu25a98bd2015-07-09 16:00:45 +0800126#define msi_desc_to_dev(desc) ((desc)->dev)
Jiang Liu4a7cc832015-07-09 16:00:44 +0800127#define dev_to_msi_list(dev) (&(dev)->msi_list)
Jiang Liud31eb342014-11-15 22:24:03 +0800128#define first_msi_entry(dev) \
129 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
130#define for_each_msi_entry(desc, dev) \
131 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
Miquel Raynal81b1e6e2018-10-11 11:12:34 +0200132#define for_each_msi_entry_safe(desc, tmp, dev) \
133 list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
Jiang Liud31eb342014-11-15 22:24:03 +0800134
Julien Grallaaebdf82019-05-01 14:58:18 +0100135#ifdef CONFIG_IRQ_MSI_IOMMU
136static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
137{
138 return desc->iommu_cookie;
139}
140
141static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
142 const void *iommu_cookie)
143{
144 desc->iommu_cookie = iommu_cookie;
145}
146#else
147static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
148{
149 return NULL;
150}
151
152static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
153 const void *iommu_cookie)
154{
155}
156#endif
157
Jiang Liud31eb342014-11-15 22:24:03 +0800158#ifdef CONFIG_PCI_MSI
159#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
160#define for_each_pci_msi_entry(desc, pdev) \
161 for_each_msi_entry((desc), &(pdev)->dev)
162
Jiang Liu25a98bd2015-07-09 16:00:45 +0800163struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
Jiang Liuc179c9b2015-07-09 16:00:36 +0800164void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
Arnd Bergmann2f44e292017-02-14 22:53:12 +0100165void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
Jiang Liuc179c9b2015-07-09 16:00:36 +0800166#else /* CONFIG_PCI_MSI */
167static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
168{
169 return NULL;
170}
Arnd Bergmann2f44e292017-02-14 22:53:12 +0100171static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
172{
173}
Jiang Liud31eb342014-11-15 22:24:03 +0800174#endif /* CONFIG_PCI_MSI */
175
Thomas Gleixner28f4b042016-09-14 16:18:47 +0200176struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
Dou Liyangbec04032018-12-04 23:51:20 +0800177 const struct irq_affinity_desc *affinity);
Jiang Liuaa48b6f2015-07-09 16:00:47 +0800178void free_msi_entry(struct msi_desc *entry);
Jiang Liu891d4a42014-11-09 23:10:33 +0800179void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
Jiang Liu83a18912014-11-09 23:10:34 +0800180void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
Jiang Liu83a18912014-11-09 23:10:34 +0800181
Thomas Gleixner23ed8d52014-11-23 11:55:58 +0100182u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
183u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
184void pci_msi_mask_irq(struct irq_data *data);
185void pci_msi_unmask_irq(struct irq_data *data);
186
Jiang Liu83a18912014-11-09 23:10:34 +0800187/* Conversion helpers. Should be removed after merging */
188static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
189{
190 __pci_write_msi_msg(entry, msg);
191}
192static inline void write_msi_msg(int irq, struct msi_msg *msg)
193{
194 pci_write_msi_msg(irq, msg);
195}
Thomas Gleixner23ed8d52014-11-23 11:55:58 +0100196static inline void mask_msi_irq(struct irq_data *data)
197{
198 pci_msi_mask_irq(data);
199}
200static inline void unmask_msi_irq(struct irq_data *data)
201{
202 pci_msi_unmask_irq(data);
203}
Jiang Liu891d4a42014-11-09 23:10:33 +0800204
Eric W. Biederman3b7d1922006-10-04 02:16:59 -0700205/*
Thomas Petazzoni4287d822013-08-09 22:27:06 +0200206 * The arch hooks to setup up msi irqs. Those functions are
207 * implemented as weak symbols so that they /can/ be overriden by
208 * architecture specific code if needed.
Eric W. Biederman3b7d1922006-10-04 02:16:59 -0700209 */
Eric W. Biedermanf7feaca2007-01-28 12:56:37 -0700210int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
Eric W. Biederman3b7d1922006-10-04 02:16:59 -0700211void arch_teardown_msi_irq(unsigned int irq);
Bjorn Helgaas2366d062013-04-18 10:55:46 -0600212int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
213void arch_teardown_msi_irqs(struct pci_dev *dev);
DuanZhenzhongac8344c2013-12-04 13:09:16 +0800214void arch_restore_msi_irqs(struct pci_dev *dev);
Thomas Petazzoni4287d822013-08-09 22:27:06 +0200215
216void default_teardown_msi_irqs(struct pci_dev *dev);
DuanZhenzhongac8344c2013-12-04 13:09:16 +0800217void default_restore_msi_irqs(struct pci_dev *dev);
Eric W. Biederman3b7d1922006-10-04 02:16:59 -0700218
Yijing Wangc2791b82014-11-11 17:45:45 -0700219struct msi_controller {
Thierry Reding0cbdcfc2013-08-09 22:27:08 +0200220 struct module *owner;
221 struct device *dev;
Thomas Petazzoni0d5a6db2013-08-09 22:27:09 +0200222 struct device_node *of_node;
223 struct list_head list;
Thierry Reding0cbdcfc2013-08-09 22:27:08 +0200224
Yijing Wangc2791b82014-11-11 17:45:45 -0700225 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
Thierry Reding0cbdcfc2013-08-09 22:27:08 +0200226 struct msi_desc *desc);
Lucas Stach339e5b42015-09-18 13:58:34 -0500227 int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
228 int nvec, int type);
Yijing Wangc2791b82014-11-11 17:45:45 -0700229 void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
Thierry Reding0cbdcfc2013-08-09 22:27:08 +0200230};
231
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100232#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
Jiang Liud9109692014-11-15 22:24:04 +0800233
Jiang Liuaeeb5962014-11-15 22:24:05 +0800234#include <linux/irqhandler.h>
Jiang Liud9109692014-11-15 22:24:04 +0800235#include <asm/msi.h>
236
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100237struct irq_domain;
Marc Zyngier552c4942015-11-23 08:26:07 +0000238struct irq_domain_ops;
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100239struct irq_chip;
240struct device_node;
Marc Zyngierbe5436c2015-10-13 12:51:44 +0100241struct fwnode_handle;
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100242struct msi_domain_info;
243
244/**
245 * struct msi_domain_ops - MSI interrupt domain callbacks
246 * @get_hwirq: Retrieve the resulting hw irq number
247 * @msi_init: Domain specific init function for MSI interrupts
248 * @msi_free: Domain specific function to free a MSI interrupts
Jiang Liud9109692014-11-15 22:24:04 +0800249 * @msi_check: Callback for verification of the domain/info/dev data
250 * @msi_prepare: Prepare the allocation of the interrupts in the domain
Thomas Petazzoni1d1e8cd2015-12-21 14:13:08 +0100251 * @msi_finish: Optional callback to finalize the allocation
Jiang Liud9109692014-11-15 22:24:04 +0800252 * @set_desc: Set the msi descriptor for an interrupt
253 * @handle_error: Optional error handler if the allocation fails
254 *
255 * @get_hwirq, @msi_init and @msi_free are callbacks used by
256 * msi_create_irq_domain() and related interfaces
257 *
258 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
Thomas Petazzoni1d1e8cd2015-12-21 14:13:08 +0100259 * are callbacks used by msi_domain_alloc_irqs() and related
Jiang Liud9109692014-11-15 22:24:04 +0800260 * interfaces which are based on msi_desc.
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100261 */
262struct msi_domain_ops {
Jiang Liuaeeb5962014-11-15 22:24:05 +0800263 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
264 msi_alloc_info_t *arg);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100265 int (*msi_init)(struct irq_domain *domain,
266 struct msi_domain_info *info,
267 unsigned int virq, irq_hw_number_t hwirq,
Jiang Liuaeeb5962014-11-15 22:24:05 +0800268 msi_alloc_info_t *arg);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100269 void (*msi_free)(struct irq_domain *domain,
270 struct msi_domain_info *info,
271 unsigned int virq);
Jiang Liud9109692014-11-15 22:24:04 +0800272 int (*msi_check)(struct irq_domain *domain,
273 struct msi_domain_info *info,
274 struct device *dev);
275 int (*msi_prepare)(struct irq_domain *domain,
276 struct device *dev, int nvec,
277 msi_alloc_info_t *arg);
278 void (*msi_finish)(msi_alloc_info_t *arg, int retval);
279 void (*set_desc)(msi_alloc_info_t *arg,
280 struct msi_desc *desc);
281 int (*handle_error)(struct irq_domain *domain,
282 struct msi_desc *desc, int error);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100283};
284
285/**
286 * struct msi_domain_info - MSI interrupt domain data
Jiang Liuaeeb5962014-11-15 22:24:05 +0800287 * @flags: Flags to decribe features and capabilities
288 * @ops: The callback data structure
289 * @chip: Optional: associated interrupt chip
290 * @chip_data: Optional: associated interrupt chip data
291 * @handler: Optional: associated interrupt flow handler
292 * @handler_data: Optional: associated interrupt flow handler data
293 * @handler_name: Optional: associated interrupt flow handler name
294 * @data: Optional: domain specific data
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100295 */
296struct msi_domain_info {
Jiang Liuaeeb5962014-11-15 22:24:05 +0800297 u32 flags;
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100298 struct msi_domain_ops *ops;
299 struct irq_chip *chip;
Jiang Liuaeeb5962014-11-15 22:24:05 +0800300 void *chip_data;
301 irq_flow_handler_t handler;
302 void *handler_data;
303 const char *handler_name;
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100304 void *data;
305};
306
Jiang Liuaeeb5962014-11-15 22:24:05 +0800307/* Flags for msi_domain_info */
308enum {
309 /*
310 * Init non implemented ops callbacks with default MSI domain
311 * callbacks.
312 */
313 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
314 /*
315 * Init non implemented chip callbacks with default MSI chip
316 * callbacks.
317 */
318 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
Jiang Liuaeeb5962014-11-15 22:24:05 +0800319 /* Support multiple PCI MSI interrupts */
Thomas Gleixnerb6140912016-07-04 17:39:22 +0900320 MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
Jiang Liuaeeb5962014-11-15 22:24:05 +0800321 /* Support PCI MSIX interrupts */
Thomas Gleixnerb6140912016-07-04 17:39:22 +0900322 MSI_FLAG_PCI_MSIX = (1 << 3),
Marc Zyngierf3b09462016-07-13 17:18:33 +0100323 /* Needs early activate, required for PCI */
324 MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
Thomas Gleixner22d0b122017-09-13 23:29:13 +0200325 /*
326 * Must reactivate when irq is started even when
327 * MSI_FLAG_ACTIVATE_EARLY has been set.
328 */
329 MSI_FLAG_MUST_REACTIVATE = (1 << 5),
Marc Zyngier0be81532018-05-08 13:14:30 +0100330 /* Is level-triggered capable, using two messages */
331 MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
Jiang Liuaeeb5962014-11-15 22:24:05 +0800332};
333
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100334int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
335 bool force);
336
Marc Zyngierbe5436c2015-10-13 12:51:44 +0100337struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100338 struct msi_domain_info *info,
339 struct irq_domain *parent);
Jiang Liud9109692014-11-15 22:24:04 +0800340int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
341 int nvec);
342void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100343struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
344
Marc Zyngierbe5436c2015-10-13 12:51:44 +0100345struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
Marc Zyngierc09fcc4b2015-07-28 14:46:16 +0100346 struct msi_domain_info *info,
347 struct irq_domain *parent);
348int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
349 irq_write_msi_msg_t write_msi_msg);
350void platform_msi_domain_free_irqs(struct device *dev);
Marc Zyngierb2eba392015-11-23 08:26:05 +0000351
352/* When an MSI domain is used as an intermediate domain */
353int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
354 int nvec, msi_alloc_info_t *args);
Marc Zyngier2145ac92015-11-23 08:26:06 +0000355int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
356 int virq, int nvec, msi_alloc_info_t *args);
Marc Zyngier552c4942015-11-23 08:26:07 +0000357struct irq_domain *
Marc Zyngier1f835152018-10-01 16:13:45 +0200358__platform_msi_create_device_domain(struct device *dev,
359 unsigned int nvec,
360 bool is_tree,
361 irq_write_msi_msg_t write_msi_msg,
362 const struct irq_domain_ops *ops,
363 void *host_data);
364
365#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
366 __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
367#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
368 __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
369
Marc Zyngier552c4942015-11-23 08:26:07 +0000370int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
371 unsigned int nr_irqs);
372void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
373 unsigned int nvec);
374void *platform_msi_get_host_data(struct irq_domain *domain);
Jiang Liuf3cf8bb2014-11-12 11:39:03 +0100375#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
376
Jiang Liu3878eae2014-11-11 21:02:18 +0800377#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
378void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
Marc Zyngierbe5436c2015-10-13 12:51:44 +0100379struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
Jiang Liu3878eae2014-11-11 21:02:18 +0800380 struct msi_domain_info *info,
381 struct irq_domain *parent);
Jiang Liu3878eae2014-11-11 21:02:18 +0800382irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
383 struct msi_desc *desc);
384int pci_msi_domain_check_cap(struct irq_domain *domain,
385 struct msi_domain_info *info, struct device *dev);
David Daneyb6eec9b2015-10-08 15:10:49 -0700386u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
Marc Zyngier54fa97e2015-10-02 14:43:06 +0100387struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
388#else
389static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
390{
391 return NULL;
392}
Jiang Liu3878eae2014-11-11 21:02:18 +0800393#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
394
Eric W. Biederman3b7d1922006-10-04 02:16:59 -0700395#endif /* LINUX_MSI_H */