Christoph Hellwig | 5e385a6 | 2016-07-04 17:39:27 +0900 | [diff] [blame^] | 1 | |
| 2 | #include <linux/interrupt.h> |
| 3 | #include <linux/kernel.h> |
| 4 | #include <linux/slab.h> |
| 5 | #include <linux/cpu.h> |
| 6 | |
| 7 | static int get_first_sibling(unsigned int cpu) |
| 8 | { |
| 9 | unsigned int ret; |
| 10 | |
| 11 | ret = cpumask_first(topology_sibling_cpumask(cpu)); |
| 12 | if (ret < nr_cpu_ids) |
| 13 | return ret; |
| 14 | return cpu; |
| 15 | } |
| 16 | |
| 17 | /* |
| 18 | * Take a map of online CPUs and the number of available interrupt vectors |
| 19 | * and generate an output cpumask suitable for spreading MSI/MSI-X vectors |
| 20 | * so that they are distributed as good as possible around the CPUs. If |
| 21 | * more vectors than CPUs are available we'll map one to each CPU, |
| 22 | * otherwise we map one to the first sibling of each socket. |
| 23 | * |
| 24 | * If there are more vectors than CPUs we will still only have one bit |
| 25 | * set per CPU, but interrupt code will keep on assigning the vectors from |
| 26 | * the start of the bitmap until we run out of vectors. |
| 27 | */ |
| 28 | struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) |
| 29 | { |
| 30 | struct cpumask *affinity_mask; |
| 31 | unsigned int max_vecs = *nr_vecs; |
| 32 | |
| 33 | if (max_vecs == 1) |
| 34 | return NULL; |
| 35 | |
| 36 | affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL); |
| 37 | if (!affinity_mask) { |
| 38 | *nr_vecs = 1; |
| 39 | return NULL; |
| 40 | } |
| 41 | |
| 42 | if (max_vecs >= num_online_cpus()) { |
| 43 | cpumask_copy(affinity_mask, cpu_online_mask); |
| 44 | *nr_vecs = num_online_cpus(); |
| 45 | } else { |
| 46 | unsigned int vecs = 0, cpu; |
| 47 | |
| 48 | for_each_online_cpu(cpu) { |
| 49 | if (cpu == get_first_sibling(cpu)) { |
| 50 | cpumask_set_cpu(cpu, affinity_mask); |
| 51 | vecs++; |
| 52 | } |
| 53 | |
| 54 | if (--max_vecs == 0) |
| 55 | break; |
| 56 | } |
| 57 | *nr_vecs = vecs; |
| 58 | } |
| 59 | |
| 60 | return affinity_mask; |
| 61 | } |