Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2016 Thomas Gleixner. |
| 3 | * Copyright (C) 2016-2017 Christoph Hellwig. |
| 4 | */ |
Christoph Hellwig | 5e385a6 | 2016-07-04 17:39:27 +0900 | [diff] [blame] | 5 | #include <linux/interrupt.h> |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/slab.h> |
| 8 | #include <linux/cpu.h> |
| 9 | |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 10 | static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, |
| 11 | int cpus_per_vec) |
| 12 | { |
| 13 | const struct cpumask *siblmsk; |
| 14 | int cpu, sibl; |
| 15 | |
| 16 | for ( ; cpus_per_vec > 0; ) { |
| 17 | cpu = cpumask_first(nmsk); |
| 18 | |
| 19 | /* Should not happen, but I'm too lazy to think about it */ |
| 20 | if (cpu >= nr_cpu_ids) |
| 21 | return; |
| 22 | |
| 23 | cpumask_clear_cpu(cpu, nmsk); |
| 24 | cpumask_set_cpu(cpu, irqmsk); |
| 25 | cpus_per_vec--; |
| 26 | |
| 27 | /* If the cpu has siblings, use them first */ |
| 28 | siblmsk = topology_sibling_cpumask(cpu); |
| 29 | for (sibl = -1; cpus_per_vec > 0; ) { |
| 30 | sibl = cpumask_next(sibl, siblmsk); |
| 31 | if (sibl >= nr_cpu_ids) |
| 32 | break; |
| 33 | if (!cpumask_test_and_clear_cpu(sibl, nmsk)) |
| 34 | continue; |
| 35 | cpumask_set_cpu(sibl, irqmsk); |
| 36 | cpus_per_vec--; |
| 37 | } |
| 38 | } |
| 39 | } |
| 40 | |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 41 | static cpumask_var_t *alloc_node_to_present_cpumask(void) |
| 42 | { |
| 43 | cpumask_var_t *masks; |
| 44 | int node; |
| 45 | |
| 46 | masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL); |
| 47 | if (!masks) |
| 48 | return NULL; |
| 49 | |
| 50 | for (node = 0; node < nr_node_ids; node++) { |
| 51 | if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) |
| 52 | goto out_unwind; |
| 53 | } |
| 54 | |
| 55 | return masks; |
| 56 | |
| 57 | out_unwind: |
| 58 | while (--node >= 0) |
| 59 | free_cpumask_var(masks[node]); |
| 60 | kfree(masks); |
| 61 | return NULL; |
| 62 | } |
| 63 | |
| 64 | static void free_node_to_present_cpumask(cpumask_var_t *masks) |
| 65 | { |
| 66 | int node; |
| 67 | |
| 68 | for (node = 0; node < nr_node_ids; node++) |
| 69 | free_cpumask_var(masks[node]); |
| 70 | kfree(masks); |
| 71 | } |
| 72 | |
| 73 | static void build_node_to_present_cpumask(cpumask_var_t *masks) |
| 74 | { |
| 75 | int cpu; |
| 76 | |
| 77 | for_each_present_cpu(cpu) |
| 78 | cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); |
| 79 | } |
| 80 | |
| 81 | static int get_nodes_in_cpumask(cpumask_var_t *node_to_present_cpumask, |
| 82 | const struct cpumask *mask, nodemask_t *nodemsk) |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 83 | { |
Guilherme G. Piccoli | c0af524 | 2016-12-14 16:01:12 -0200 | [diff] [blame] | 84 | int n, nodes = 0; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 85 | |
| 86 | /* Calculate the number of nodes in the supplied affinity mask */ |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 87 | for_each_node(n) { |
| 88 | if (cpumask_intersects(mask, node_to_present_cpumask[n])) { |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 89 | node_set(n, *nodemsk); |
| 90 | nodes++; |
| 91 | } |
| 92 | } |
| 93 | return nodes; |
| 94 | } |
| 95 | |
| 96 | /** |
| 97 | * irq_create_affinity_masks - Create affinity masks for multiqueue spreading |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 98 | * @nvecs: The total number of vectors |
| 99 | * @affd: Description of the affinity requirements |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 100 | * |
| 101 | * Returns the masks pointer or NULL if allocation failed. |
| 102 | */ |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 103 | struct cpumask * |
| 104 | irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 105 | { |
Keith Busch | 7bf8222 | 2017-04-03 15:25:53 -0400 | [diff] [blame] | 106 | int n, nodes, cpus_per_vec, extra_vecs, curvec; |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 107 | int affv = nvecs - affd->pre_vectors - affd->post_vectors; |
Christoph Hellwig | bfe1307 | 2016-11-15 10:12:58 +0100 | [diff] [blame] | 108 | int last_affv = affv + affd->pre_vectors; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 109 | nodemask_t nodemsk = NODE_MASK_NONE; |
| 110 | struct cpumask *masks; |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 111 | cpumask_var_t nmsk, *node_to_present_cpumask; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 112 | |
| 113 | if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) |
| 114 | return NULL; |
| 115 | |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 116 | masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 117 | if (!masks) |
| 118 | goto out; |
| 119 | |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 120 | node_to_present_cpumask = alloc_node_to_present_cpumask(); |
| 121 | if (!node_to_present_cpumask) |
| 122 | goto out; |
| 123 | |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 124 | /* Fill out vectors at the beginning that don't need affinity */ |
| 125 | for (curvec = 0; curvec < affd->pre_vectors; curvec++) |
Thomas Gleixner | b6e5d5b | 2016-11-16 18:36:44 +0100 | [diff] [blame] | 126 | cpumask_copy(masks + curvec, irq_default_affinity); |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 127 | |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 128 | /* Stabilize the cpumasks */ |
| 129 | get_online_cpus(); |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 130 | build_node_to_present_cpumask(node_to_present_cpumask); |
| 131 | nodes = get_nodes_in_cpumask(node_to_present_cpumask, cpu_present_mask, |
| 132 | &nodemsk); |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 133 | |
| 134 | /* |
Guilherme G. Piccoli | c0af524 | 2016-12-14 16:01:12 -0200 | [diff] [blame] | 135 | * If the number of nodes in the mask is greater than or equal the |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 136 | * number of vectors we just spread the vectors across the nodes. |
| 137 | */ |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 138 | if (affv <= nodes) { |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 139 | for_each_node_mask(n, nodemsk) { |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 140 | cpumask_copy(masks + curvec, |
| 141 | node_to_present_cpumask[n]); |
Christoph Hellwig | bfe1307 | 2016-11-15 10:12:58 +0100 | [diff] [blame] | 142 | if (++curvec == last_affv) |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 143 | break; |
| 144 | } |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 145 | goto done; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 146 | } |
| 147 | |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 148 | for_each_node_mask(n, nodemsk) { |
Keith Busch | 7bf8222 | 2017-04-03 15:25:53 -0400 | [diff] [blame] | 149 | int ncpus, v, vecs_to_assign, vecs_per_node; |
| 150 | |
| 151 | /* Spread the vectors per node */ |
Keith Busch | b72f805 | 2017-04-19 19:51:10 -0400 | [diff] [blame] | 152 | vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 153 | |
| 154 | /* Get the cpus on this node which are in the mask */ |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 155 | cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]); |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 156 | |
| 157 | /* Calculate the number of cpus per vector */ |
| 158 | ncpus = cpumask_weight(nmsk); |
Keith Busch | 7bf8222 | 2017-04-03 15:25:53 -0400 | [diff] [blame] | 159 | vecs_to_assign = min(vecs_per_node, ncpus); |
| 160 | |
| 161 | /* Account for rounding errors */ |
Keith Busch | 3412386 | 2017-04-13 13:28:12 -0400 | [diff] [blame] | 162 | extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign); |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 163 | |
Christoph Hellwig | bfe1307 | 2016-11-15 10:12:58 +0100 | [diff] [blame] | 164 | for (v = 0; curvec < last_affv && v < vecs_to_assign; |
| 165 | curvec++, v++) { |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 166 | cpus_per_vec = ncpus / vecs_to_assign; |
| 167 | |
| 168 | /* Account for extra vectors to compensate rounding errors */ |
| 169 | if (extra_vecs) { |
| 170 | cpus_per_vec++; |
Keith Busch | 7bf8222 | 2017-04-03 15:25:53 -0400 | [diff] [blame] | 171 | --extra_vecs; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 172 | } |
| 173 | irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); |
| 174 | } |
| 175 | |
Christoph Hellwig | bfe1307 | 2016-11-15 10:12:58 +0100 | [diff] [blame] | 176 | if (curvec >= last_affv) |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 177 | break; |
Keith Busch | 7bf8222 | 2017-04-03 15:25:53 -0400 | [diff] [blame] | 178 | --nodes; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 179 | } |
| 180 | |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 181 | done: |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 182 | put_online_cpus(); |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 183 | |
| 184 | /* Fill out vectors at the end that don't need affinity */ |
| 185 | for (; curvec < nvecs; curvec++) |
Thomas Gleixner | b6e5d5b | 2016-11-16 18:36:44 +0100 | [diff] [blame] | 186 | cpumask_copy(masks + curvec, irq_default_affinity); |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 187 | free_node_to_present_cpumask(node_to_present_cpumask); |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 188 | out: |
| 189 | free_cpumask_var(nmsk); |
| 190 | return masks; |
| 191 | } |
| 192 | |
| 193 | /** |
Christoph Hellwig | 212bd84 | 2016-11-08 17:15:02 -0800 | [diff] [blame] | 194 | * irq_calc_affinity_vectors - Calculate the optimal number of vectors |
| 195 | * @maxvec: The maximum number of vectors available |
| 196 | * @affd: Description of the affinity requirements |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 197 | */ |
Christoph Hellwig | 212bd84 | 2016-11-08 17:15:02 -0800 | [diff] [blame] | 198 | int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 199 | { |
Christoph Hellwig | 212bd84 | 2016-11-08 17:15:02 -0800 | [diff] [blame] | 200 | int resv = affd->pre_vectors + affd->post_vectors; |
| 201 | int vecs = maxvec - resv; |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 202 | int ret; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 203 | |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 204 | get_online_cpus(); |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 205 | ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 206 | put_online_cpus(); |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame^] | 207 | return ret; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 208 | } |