Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2016 Thomas Gleixner. |
| 4 | * Copyright (C) 2016-2017 Christoph Hellwig. |
| 5 | */ |
Christoph Hellwig | 5e385a6 | 2016-07-04 17:39:27 +0900 | [diff] [blame] | 6 | #include <linux/interrupt.h> |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/cpu.h> |
| 10 | |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 11 | static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 12 | unsigned int cpus_per_vec) |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 13 | { |
| 14 | const struct cpumask *siblmsk; |
| 15 | int cpu, sibl; |
| 16 | |
| 17 | for ( ; cpus_per_vec > 0; ) { |
| 18 | cpu = cpumask_first(nmsk); |
| 19 | |
| 20 | /* Should not happen, but I'm too lazy to think about it */ |
| 21 | if (cpu >= nr_cpu_ids) |
| 22 | return; |
| 23 | |
| 24 | cpumask_clear_cpu(cpu, nmsk); |
| 25 | cpumask_set_cpu(cpu, irqmsk); |
| 26 | cpus_per_vec--; |
| 27 | |
| 28 | /* If the cpu has siblings, use them first */ |
| 29 | siblmsk = topology_sibling_cpumask(cpu); |
| 30 | for (sibl = -1; cpus_per_vec > 0; ) { |
| 31 | sibl = cpumask_next(sibl, siblmsk); |
| 32 | if (sibl >= nr_cpu_ids) |
| 33 | break; |
| 34 | if (!cpumask_test_and_clear_cpu(sibl, nmsk)) |
| 35 | continue; |
| 36 | cpumask_set_cpu(sibl, irqmsk); |
| 37 | cpus_per_vec--; |
| 38 | } |
| 39 | } |
| 40 | } |
| 41 | |
Ming Lei | 47778f33 | 2018-03-08 18:53:55 +0800 | [diff] [blame] | 42 | static cpumask_var_t *alloc_node_to_cpumask(void) |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame] | 43 | { |
| 44 | cpumask_var_t *masks; |
| 45 | int node; |
| 46 | |
| 47 | masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL); |
| 48 | if (!masks) |
| 49 | return NULL; |
| 50 | |
| 51 | for (node = 0; node < nr_node_ids; node++) { |
| 52 | if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) |
| 53 | goto out_unwind; |
| 54 | } |
| 55 | |
| 56 | return masks; |
| 57 | |
| 58 | out_unwind: |
| 59 | while (--node >= 0) |
| 60 | free_cpumask_var(masks[node]); |
| 61 | kfree(masks); |
| 62 | return NULL; |
| 63 | } |
| 64 | |
Ming Lei | 47778f33 | 2018-03-08 18:53:55 +0800 | [diff] [blame] | 65 | static void free_node_to_cpumask(cpumask_var_t *masks) |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame] | 66 | { |
| 67 | int node; |
| 68 | |
| 69 | for (node = 0; node < nr_node_ids; node++) |
| 70 | free_cpumask_var(masks[node]); |
| 71 | kfree(masks); |
| 72 | } |
| 73 | |
Ming Lei | 47778f33 | 2018-03-08 18:53:55 +0800 | [diff] [blame] | 74 | static void build_node_to_cpumask(cpumask_var_t *masks) |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame] | 75 | { |
| 76 | int cpu; |
| 77 | |
Christoph Hellwig | 84676c1 | 2018-01-12 10:53:05 +0800 | [diff] [blame] | 78 | for_each_possible_cpu(cpu) |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame] | 79 | cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); |
| 80 | } |
| 81 | |
Ming Lei | 47778f33 | 2018-03-08 18:53:55 +0800 | [diff] [blame] | 82 | static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask, |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame] | 83 | const struct cpumask *mask, nodemask_t *nodemsk) |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 84 | { |
Guilherme G. Piccoli | c0af524 | 2016-12-14 16:01:12 -0200 | [diff] [blame] | 85 | int n, nodes = 0; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 86 | |
| 87 | /* Calculate the number of nodes in the supplied affinity mask */ |
Christoph Hellwig | 9a0ef98 | 2017-06-20 01:37:55 +0200 | [diff] [blame] | 88 | for_each_node(n) { |
Ming Lei | 47778f33 | 2018-03-08 18:53:55 +0800 | [diff] [blame] | 89 | if (cpumask_intersects(mask, node_to_cpumask[n])) { |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 90 | node_set(n, *nodemsk); |
| 91 | nodes++; |
| 92 | } |
| 93 | } |
| 94 | return nodes; |
| 95 | } |
| 96 | |
Minwoo Im | 0e51833 | 2019-06-02 20:21:17 +0900 | [diff] [blame] | 97 | static int __irq_build_affinity_masks(unsigned int startvec, |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 98 | unsigned int numvecs, |
| 99 | unsigned int firstvec, |
Thomas Gleixner | c2899c3 | 2018-12-18 16:06:53 +0100 | [diff] [blame] | 100 | cpumask_var_t *node_to_cpumask, |
| 101 | const struct cpumask *cpu_mask, |
| 102 | struct cpumask *nmsk, |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 103 | struct irq_affinity_desc *masks) |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 104 | { |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 105 | unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0; |
| 106 | unsigned int last_affv = firstvec + numvecs; |
| 107 | unsigned int curvec = startvec; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 108 | nodemask_t nodemsk = NODE_MASK_NONE; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 109 | |
Ming Lei | d305681 | 2018-03-08 18:53:58 +0800 | [diff] [blame] | 110 | if (!cpumask_weight(cpu_mask)) |
| 111 | return 0; |
| 112 | |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 113 | nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk); |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 114 | |
| 115 | /* |
Guilherme G. Piccoli | c0af524 | 2016-12-14 16:01:12 -0200 | [diff] [blame] | 116 | * If the number of nodes in the mask is greater than or equal the |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 117 | * number of vectors we just spread the vectors across the nodes. |
| 118 | */ |
Ming Lei | 1a2d091 | 2018-03-08 18:53:57 +0800 | [diff] [blame] | 119 | if (numvecs <= nodes) { |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 120 | for_each_node_mask(n, nodemsk) { |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 121 | cpumask_or(&masks[curvec].mask, &masks[curvec].mask, |
| 122 | node_to_cpumask[n]); |
Ming Lei | 1a2d091 | 2018-03-08 18:53:57 +0800 | [diff] [blame] | 123 | if (++curvec == last_affv) |
Ming Lei | 060746d | 2018-11-02 22:59:50 +0800 | [diff] [blame] | 124 | curvec = firstvec; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 125 | } |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 126 | return numvecs; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 127 | } |
| 128 | |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 129 | for_each_node_mask(n, nodemsk) { |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 130 | unsigned int ncpus, v, vecs_to_assign, vecs_per_node; |
Keith Busch | 7bf8222 | 2017-04-03 15:25:53 -0400 | [diff] [blame] | 131 | |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 132 | /* Get the cpus on this node which are in the mask */ |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 133 | cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 134 | ncpus = cpumask_weight(nmsk); |
Ming Lei | 53c1788 | 2019-08-16 10:28:48 +0800 | [diff] [blame^] | 135 | if (!ncpus) |
| 136 | continue; |
| 137 | |
| 138 | /* |
| 139 | * Calculate the number of cpus per vector |
| 140 | * |
| 141 | * Spread the vectors evenly per node. If the requested |
| 142 | * vector number has been reached, simply allocate one |
| 143 | * vector for each remaining node so that all nodes can |
| 144 | * be covered |
| 145 | */ |
| 146 | if (numvecs > done) |
| 147 | vecs_per_node = max_t(unsigned, |
| 148 | (numvecs - done) / nodes, 1); |
| 149 | else |
| 150 | vecs_per_node = 1; |
| 151 | |
Keith Busch | 7bf8222 | 2017-04-03 15:25:53 -0400 | [diff] [blame] | 152 | vecs_to_assign = min(vecs_per_node, ncpus); |
| 153 | |
| 154 | /* Account for rounding errors */ |
Keith Busch | 3412386 | 2017-04-13 13:28:12 -0400 | [diff] [blame] | 155 | extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign); |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 156 | |
Christoph Hellwig | bfe1307 | 2016-11-15 10:12:58 +0100 | [diff] [blame] | 157 | for (v = 0; curvec < last_affv && v < vecs_to_assign; |
| 158 | curvec++, v++) { |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 159 | cpus_per_vec = ncpus / vecs_to_assign; |
| 160 | |
| 161 | /* Account for extra vectors to compensate rounding errors */ |
| 162 | if (extra_vecs) { |
| 163 | cpus_per_vec++; |
Keith Busch | 7bf8222 | 2017-04-03 15:25:53 -0400 | [diff] [blame] | 164 | --extra_vecs; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 165 | } |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 166 | irq_spread_init_one(&masks[curvec].mask, nmsk, |
| 167 | cpus_per_vec); |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 168 | } |
| 169 | |
Ming Lei | 1a2d091 | 2018-03-08 18:53:57 +0800 | [diff] [blame] | 170 | done += v; |
Ming Lei | 1a2d091 | 2018-03-08 18:53:57 +0800 | [diff] [blame] | 171 | if (curvec >= last_affv) |
Ming Lei | 060746d | 2018-11-02 22:59:50 +0800 | [diff] [blame] | 172 | curvec = firstvec; |
Keith Busch | 7bf8222 | 2017-04-03 15:25:53 -0400 | [diff] [blame] | 173 | --nodes; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 174 | } |
Ming Lei | 53c1788 | 2019-08-16 10:28:48 +0800 | [diff] [blame^] | 175 | return done < numvecs ? done : numvecs; |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 176 | } |
| 177 | |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 178 | /* |
| 179 | * build affinity in two stages: |
| 180 | * 1) spread present CPU on these vectors |
| 181 | * 2) spread other possible CPUs on these vectors |
| 182 | */ |
Minwoo Im | 0e51833 | 2019-06-02 20:21:17 +0900 | [diff] [blame] | 183 | static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs, |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 184 | unsigned int firstvec, |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 185 | struct irq_affinity_desc *masks) |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 186 | { |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 187 | unsigned int curvec = startvec, nr_present, nr_others; |
Ming Lei | 347253c | 2019-01-25 17:53:43 +0800 | [diff] [blame] | 188 | cpumask_var_t *node_to_cpumask; |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 189 | cpumask_var_t nmsk, npresmsk; |
| 190 | int ret = -ENOMEM; |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 191 | |
| 192 | if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) |
Thomas Gleixner | c2899c3 | 2018-12-18 16:06:53 +0100 | [diff] [blame] | 193 | return ret; |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 194 | |
| 195 | if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) |
Ming Lei | 347253c | 2019-01-25 17:53:43 +0800 | [diff] [blame] | 196 | goto fail_nmsk; |
| 197 | |
| 198 | node_to_cpumask = alloc_node_to_cpumask(); |
| 199 | if (!node_to_cpumask) |
| 200 | goto fail_npresmsk; |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 201 | |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 202 | ret = 0; |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 203 | /* Stabilize the cpumasks */ |
| 204 | get_online_cpus(); |
| 205 | build_node_to_cpumask(node_to_cpumask); |
| 206 | |
| 207 | /* Spread on present CPUs starting from affd->pre_vectors */ |
Minwoo Im | 0e51833 | 2019-06-02 20:21:17 +0900 | [diff] [blame] | 208 | nr_present = __irq_build_affinity_masks(curvec, numvecs, |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 209 | firstvec, node_to_cpumask, |
| 210 | cpu_present_mask, nmsk, masks); |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 211 | |
| 212 | /* |
| 213 | * Spread on non present CPUs starting from the next vector to be |
| 214 | * handled. If the spreading of present CPUs already exhausted the |
| 215 | * vector space, assign the non present CPUs to the already spread |
| 216 | * out vectors. |
| 217 | */ |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 218 | if (nr_present >= numvecs) |
| 219 | curvec = firstvec; |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 220 | else |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 221 | curvec = firstvec + nr_present; |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 222 | cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); |
Minwoo Im | 0e51833 | 2019-06-02 20:21:17 +0900 | [diff] [blame] | 223 | nr_others = __irq_build_affinity_masks(curvec, numvecs, |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 224 | firstvec, node_to_cpumask, |
| 225 | npresmsk, nmsk, masks); |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 226 | put_online_cpus(); |
| 227 | |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 228 | if (nr_present < numvecs) |
Thomas Gleixner | c2899c3 | 2018-12-18 16:06:53 +0100 | [diff] [blame] | 229 | WARN_ON(nr_present + nr_others < numvecs); |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 230 | |
Ming Lei | 347253c | 2019-01-25 17:53:43 +0800 | [diff] [blame] | 231 | free_node_to_cpumask(node_to_cpumask); |
| 232 | |
| 233 | fail_npresmsk: |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 234 | free_cpumask_var(npresmsk); |
| 235 | |
Ming Lei | 347253c | 2019-01-25 17:53:43 +0800 | [diff] [blame] | 236 | fail_nmsk: |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 237 | free_cpumask_var(nmsk); |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 238 | return ret; |
Ming Lei | 5c903e10 | 2018-11-02 22:59:49 +0800 | [diff] [blame] | 239 | } |
| 240 | |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 241 | static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs) |
| 242 | { |
| 243 | affd->nr_sets = 1; |
| 244 | affd->set_size[0] = affvecs; |
| 245 | } |
| 246 | |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 247 | /** |
| 248 | * irq_create_affinity_masks - Create affinity masks for multiqueue spreading |
| 249 | * @nvecs: The total number of vectors |
| 250 | * @affd: Description of the affinity requirements |
| 251 | * |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 252 | * Returns the irq_affinity_desc pointer or NULL if allocation failed. |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 253 | */ |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 254 | struct irq_affinity_desc * |
Ming Lei | 9cfef55 | 2019-02-16 18:13:08 +0100 | [diff] [blame] | 255 | irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 256 | { |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 257 | unsigned int affvecs, curvec, usedvecs, i; |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 258 | struct irq_affinity_desc *masks = NULL; |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 259 | |
| 260 | /* |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 261 | * Determine the number of vectors which need interrupt affinities |
| 262 | * assigned. If the pre/post request exhausts the available vectors |
| 263 | * then nothing to do here except for invoking the calc_sets() |
Ming Lei | 491beed | 2019-08-05 09:19:06 +0800 | [diff] [blame] | 264 | * callback so the device driver can adjust to the situation. |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 265 | */ |
Ming Lei | 491beed | 2019-08-05 09:19:06 +0800 | [diff] [blame] | 266 | if (nvecs > affd->pre_vectors + affd->post_vectors) |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 267 | affvecs = nvecs - affd->pre_vectors - affd->post_vectors; |
| 268 | else |
| 269 | affvecs = 0; |
| 270 | |
| 271 | /* |
| 272 | * Simple invocations do not provide a calc_sets() callback. Install |
Thomas Gleixner | a6a309e | 2019-02-16 18:13:11 +0100 | [diff] [blame] | 273 | * the generic one. |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 274 | */ |
Thomas Gleixner | a6a309e | 2019-02-16 18:13:11 +0100 | [diff] [blame] | 275 | if (!affd->calc_sets) |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 276 | affd->calc_sets = default_calc_sets; |
| 277 | |
Thomas Gleixner | a6a309e | 2019-02-16 18:13:11 +0100 | [diff] [blame] | 278 | /* Recalculate the sets */ |
| 279 | affd->calc_sets(affd, affvecs); |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 280 | |
Ming Lei | 9cfef55 | 2019-02-16 18:13:08 +0100 | [diff] [blame] | 281 | if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS)) |
| 282 | return NULL; |
| 283 | |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 284 | /* Nothing to assign? */ |
| 285 | if (!affvecs) |
| 286 | return NULL; |
| 287 | |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 288 | masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); |
| 289 | if (!masks) |
Ming Lei | 347253c | 2019-01-25 17:53:43 +0800 | [diff] [blame] | 290 | return NULL; |
Ming Lei | b3e6aaa | 2018-03-08 18:53:56 +0800 | [diff] [blame] | 291 | |
| 292 | /* Fill out vectors at the beginning that don't need affinity */ |
| 293 | for (curvec = 0; curvec < affd->pre_vectors; curvec++) |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 294 | cpumask_copy(&masks[curvec].mask, irq_default_affinity); |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 295 | |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 296 | /* |
| 297 | * Spread on present CPUs starting from affd->pre_vectors. If we |
| 298 | * have multiple sets, build each sets affinity mask separately. |
| 299 | */ |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 300 | for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) { |
| 301 | unsigned int this_vecs = affd->set_size[i]; |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 302 | int ret; |
| 303 | |
Minwoo Im | 0e51833 | 2019-06-02 20:21:17 +0900 | [diff] [blame] | 304 | ret = irq_build_affinity_masks(curvec, this_vecs, |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 305 | curvec, masks); |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 306 | if (ret) { |
Thomas Gleixner | c2899c3 | 2018-12-18 16:06:53 +0100 | [diff] [blame] | 307 | kfree(masks); |
Ming Lei | 347253c | 2019-01-25 17:53:43 +0800 | [diff] [blame] | 308 | return NULL; |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 309 | } |
| 310 | curvec += this_vecs; |
| 311 | usedvecs += this_vecs; |
| 312 | } |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 313 | |
| 314 | /* Fill out vectors at the end that don't need affinity */ |
Ming Lei | d305681 | 2018-03-08 18:53:58 +0800 | [diff] [blame] | 315 | if (usedvecs >= affvecs) |
| 316 | curvec = affd->pre_vectors + affvecs; |
| 317 | else |
| 318 | curvec = affd->pre_vectors + usedvecs; |
Christoph Hellwig | 67c93c2 | 2016-11-08 17:15:03 -0800 | [diff] [blame] | 319 | for (; curvec < nvecs; curvec++) |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 320 | cpumask_copy(&masks[curvec].mask, irq_default_affinity); |
Ming Lei | d305681 | 2018-03-08 18:53:58 +0800 | [diff] [blame] | 321 | |
Dou Liyang | c410abbb | 2018-12-04 23:51:21 +0800 | [diff] [blame] | 322 | /* Mark the managed interrupts */ |
| 323 | for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++) |
| 324 | masks[i].is_managed = 1; |
| 325 | |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 326 | return masks; |
| 327 | } |
| 328 | |
| 329 | /** |
Christoph Hellwig | 212bd84 | 2016-11-08 17:15:02 -0800 | [diff] [blame] | 330 | * irq_calc_affinity_vectors - Calculate the optimal number of vectors |
Michael Hernandez | 6f9a22b | 2017-05-18 10:47:47 -0700 | [diff] [blame] | 331 | * @minvec: The minimum number of vectors available |
Christoph Hellwig | 212bd84 | 2016-11-08 17:15:02 -0800 | [diff] [blame] | 332 | * @maxvec: The maximum number of vectors available |
| 333 | * @affd: Description of the affinity requirements |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 334 | */ |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 335 | unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
| 336 | const struct irq_affinity *affd) |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 337 | { |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 338 | unsigned int resv = affd->pre_vectors + affd->post_vectors; |
| 339 | unsigned int set_vecs; |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 340 | |
Michael Hernandez | 6f9a22b | 2017-05-18 10:47:47 -0700 | [diff] [blame] | 341 | if (resv > minvec) |
| 342 | return 0; |
| 343 | |
Ming Lei | c66d4bd | 2019-02-16 18:13:09 +0100 | [diff] [blame] | 344 | if (affd->calc_sets) { |
| 345 | set_vecs = maxvec - resv; |
Jens Axboe | 6da4b3a | 2018-11-02 22:59:51 +0800 | [diff] [blame] | 346 | } else { |
| 347 | get_online_cpus(); |
| 348 | set_vecs = cpumask_weight(cpu_possible_mask); |
| 349 | put_online_cpus(); |
| 350 | } |
| 351 | |
Thomas Gleixner | 0145c30 | 2019-02-16 18:13:07 +0100 | [diff] [blame] | 352 | return resv + min(set_vecs, maxvec - resv); |
Thomas Gleixner | 34c3d98 | 2016-09-14 16:18:48 +0200 | [diff] [blame] | 353 | } |