Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Processor cache information made available to userspace via sysfs; |
| 3 | * intended to be compatible with x86 intel_cacheinfo implementation. |
| 4 | * |
| 5 | * Copyright 2008 IBM Corporation |
| 6 | * Author: Nathan Lynch |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License version |
| 10 | * 2 as published by the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/cpu.h> |
| 14 | #include <linux/cpumask.h> |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/kobject.h> |
| 17 | #include <linux/list.h> |
| 18 | #include <linux/notifier.h> |
| 19 | #include <linux/of.h> |
| 20 | #include <linux/percpu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 21 | #include <linux/slab.h> |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 22 | #include <asm/prom.h> |
Gautham R. Shenoy | 500fe5f | 2018-10-11 11:03:03 +0530 | [diff] [blame] | 23 | #include <asm/cputhreads.h> |
| 24 | #include <asm/smp.h> |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 25 | |
| 26 | #include "cacheinfo.h" |
| 27 | |
| 28 | /* per-cpu object for tracking: |
| 29 | * - a "cache" kobject for the top-level directory |
| 30 | * - a list of "index" objects representing the cpu's local cache hierarchy |
| 31 | */ |
| 32 | struct cache_dir { |
| 33 | struct kobject *kobj; /* bare (not embedded) kobject for cache |
| 34 | * directory */ |
| 35 | struct cache_index_dir *index; /* list of index objects */ |
| 36 | }; |
| 37 | |
| 38 | /* "index" object: each cpu's cache directory has an index |
| 39 | * subdirectory corresponding to a cache object associated with the |
| 40 | * cpu. This object's lifetime is managed via the embedded kobject. |
| 41 | */ |
| 42 | struct cache_index_dir { |
| 43 | struct kobject kobj; |
| 44 | struct cache_index_dir *next; /* next index in parent directory */ |
| 45 | struct cache *cache; |
| 46 | }; |
| 47 | |
| 48 | /* Template for determining which OF properties to query for a given |
| 49 | * cache type */ |
| 50 | struct cache_type_info { |
| 51 | const char *name; |
| 52 | const char *size_prop; |
| 53 | |
| 54 | /* Allow for both [di]-cache-line-size and |
| 55 | * [di]-cache-block-size properties. According to the PowerPC |
| 56 | * Processor binding, -line-size should be provided if it |
| 57 | * differs from the cache block size (that which is operated |
| 58 | * on by cache instructions), so we look for -line-size first. |
| 59 | * See cache_get_line_size(). */ |
| 60 | |
| 61 | const char *line_size_props[2]; |
| 62 | const char *nr_sets_prop; |
| 63 | }; |
| 64 | |
| 65 | /* These are used to index the cache_type_info array. */ |
Dave Olson | f7e9e35 | 2015-04-02 21:28:45 -0700 | [diff] [blame] | 66 | #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ |
| 67 | #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ |
| 68 | #define CACHE_TYPE_INSTRUCTION 2 |
| 69 | #define CACHE_TYPE_DATA 3 |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 70 | |
| 71 | static const struct cache_type_info cache_type_info[] = { |
| 72 | { |
Dave Olson | f7e9e35 | 2015-04-02 21:28:45 -0700 | [diff] [blame] | 73 | /* Embedded systems that use cache-size, cache-block-size, |
| 74 | * etc. for the Unified (typically L2) cache. */ |
| 75 | .name = "Unified", |
| 76 | .size_prop = "cache-size", |
| 77 | .line_size_props = { "cache-line-size", |
| 78 | "cache-block-size", }, |
| 79 | .nr_sets_prop = "cache-sets", |
| 80 | }, |
| 81 | { |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 82 | /* PowerPC Processor binding says the [di]-cache-* |
| 83 | * must be equal on unified caches, so just use |
| 84 | * d-cache properties. */ |
| 85 | .name = "Unified", |
| 86 | .size_prop = "d-cache-size", |
| 87 | .line_size_props = { "d-cache-line-size", |
| 88 | "d-cache-block-size", }, |
| 89 | .nr_sets_prop = "d-cache-sets", |
| 90 | }, |
| 91 | { |
| 92 | .name = "Instruction", |
| 93 | .size_prop = "i-cache-size", |
| 94 | .line_size_props = { "i-cache-line-size", |
| 95 | "i-cache-block-size", }, |
| 96 | .nr_sets_prop = "i-cache-sets", |
| 97 | }, |
| 98 | { |
| 99 | .name = "Data", |
| 100 | .size_prop = "d-cache-size", |
| 101 | .line_size_props = { "d-cache-line-size", |
| 102 | "d-cache-block-size", }, |
| 103 | .nr_sets_prop = "d-cache-sets", |
| 104 | }, |
| 105 | }; |
| 106 | |
| 107 | /* Cache object: each instance of this corresponds to a distinct cache |
| 108 | * in the system. There are separate objects for Harvard caches: one |
| 109 | * each for instruction and data, and each refers to the same OF node. |
| 110 | * The refcount of the OF node is elevated for the lifetime of the |
| 111 | * cache object. A cache object is released when its shared_cpu_map |
| 112 | * is cleared (see cache_cpu_clear). |
| 113 | * |
| 114 | * A cache object is on two lists: an unsorted global list |
| 115 | * (cache_list) of cache objects; and a singly-linked list |
| 116 | * representing the local cache hierarchy, which is ordered by level |
| 117 | * (e.g. L1d -> L1i -> L2 -> L3). |
| 118 | */ |
| 119 | struct cache { |
| 120 | struct device_node *ofnode; /* OF node for this cache, may be cpu */ |
| 121 | struct cpumask shared_cpu_map; /* online CPUs using this cache */ |
| 122 | int type; /* split cache disambiguation */ |
| 123 | int level; /* level not explicit in device tree */ |
| 124 | struct list_head list; /* global list of cache objects */ |
| 125 | struct cache *next_local; /* next cache of >= level */ |
| 126 | }; |
| 127 | |
Nathan Lynch | fc7a9fe | 2009-01-09 13:12:44 +0000 | [diff] [blame] | 128 | static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 129 | |
| 130 | /* traversal/modification of this list occurs only at cpu hotplug time; |
| 131 | * access is serialized by cpu hotplug locking |
| 132 | */ |
| 133 | static LIST_HEAD(cache_list); |
| 134 | |
| 135 | static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k) |
| 136 | { |
| 137 | return container_of(k, struct cache_index_dir, kobj); |
| 138 | } |
| 139 | |
| 140 | static const char *cache_type_string(const struct cache *cache) |
| 141 | { |
| 142 | return cache_type_info[cache->type].name; |
| 143 | } |
| 144 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 145 | static void cache_init(struct cache *cache, int type, int level, |
| 146 | struct device_node *ofnode) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 147 | { |
| 148 | cache->type = type; |
| 149 | cache->level = level; |
| 150 | cache->ofnode = of_node_get(ofnode); |
| 151 | INIT_LIST_HEAD(&cache->list); |
| 152 | list_add(&cache->list, &cache_list); |
| 153 | } |
| 154 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 155 | static struct cache *new_cache(int type, int level, struct device_node *ofnode) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 156 | { |
| 157 | struct cache *cache; |
| 158 | |
| 159 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); |
| 160 | if (cache) |
| 161 | cache_init(cache, type, level, ofnode); |
| 162 | |
| 163 | return cache; |
| 164 | } |
| 165 | |
| 166 | static void release_cache_debugcheck(struct cache *cache) |
| 167 | { |
| 168 | struct cache *iter; |
| 169 | |
| 170 | list_for_each_entry(iter, &cache_list, list) |
| 171 | WARN_ONCE(iter->next_local == cache, |
Rob Herring | b7c670d | 2017-08-21 10:16:47 -0500 | [diff] [blame] | 172 | "cache for %pOF(%s) refers to cache for %pOF(%s)\n", |
| 173 | iter->ofnode, |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 174 | cache_type_string(iter), |
Rob Herring | b7c670d | 2017-08-21 10:16:47 -0500 | [diff] [blame] | 175 | cache->ofnode, |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 176 | cache_type_string(cache)); |
| 177 | } |
| 178 | |
| 179 | static void release_cache(struct cache *cache) |
| 180 | { |
| 181 | if (!cache) |
| 182 | return; |
| 183 | |
Rob Herring | b7c670d | 2017-08-21 10:16:47 -0500 | [diff] [blame] | 184 | pr_debug("freeing L%d %s cache for %pOF\n", cache->level, |
| 185 | cache_type_string(cache), cache->ofnode); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 186 | |
| 187 | release_cache_debugcheck(cache); |
| 188 | list_del(&cache->list); |
| 189 | of_node_put(cache->ofnode); |
| 190 | kfree(cache); |
| 191 | } |
| 192 | |
| 193 | static void cache_cpu_set(struct cache *cache, int cpu) |
| 194 | { |
| 195 | struct cache *next = cache; |
| 196 | |
| 197 | while (next) { |
| 198 | WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), |
Rob Herring | b7c670d | 2017-08-21 10:16:47 -0500 | [diff] [blame] | 199 | "CPU %i already accounted in %pOF(%s)\n", |
| 200 | cpu, next->ofnode, |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 201 | cache_type_string(next)); |
| 202 | cpumask_set_cpu(cpu, &next->shared_cpu_map); |
| 203 | next = next->next_local; |
| 204 | } |
| 205 | } |
| 206 | |
| 207 | static int cache_size(const struct cache *cache, unsigned int *ret) |
| 208 | { |
| 209 | const char *propname; |
Anton Blanchard | d10bd84 | 2013-08-07 02:01:37 +1000 | [diff] [blame] | 210 | const __be32 *cache_size; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 211 | |
| 212 | propname = cache_type_info[cache->type].size_prop; |
| 213 | |
| 214 | cache_size = of_get_property(cache->ofnode, propname, NULL); |
| 215 | if (!cache_size) |
| 216 | return -ENODEV; |
| 217 | |
Anton Blanchard | d10bd84 | 2013-08-07 02:01:37 +1000 | [diff] [blame] | 218 | *ret = of_read_number(cache_size, 1); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 219 | return 0; |
| 220 | } |
| 221 | |
| 222 | static int cache_size_kb(const struct cache *cache, unsigned int *ret) |
| 223 | { |
| 224 | unsigned int size; |
| 225 | |
| 226 | if (cache_size(cache, &size)) |
| 227 | return -ENODEV; |
| 228 | |
| 229 | *ret = size / 1024; |
| 230 | return 0; |
| 231 | } |
| 232 | |
| 233 | /* not cache_line_size() because that's a macro in include/linux/cache.h */ |
| 234 | static int cache_get_line_size(const struct cache *cache, unsigned int *ret) |
| 235 | { |
Anton Blanchard | d10bd84 | 2013-08-07 02:01:37 +1000 | [diff] [blame] | 236 | const __be32 *line_size; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 237 | int i, lim; |
| 238 | |
| 239 | lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); |
| 240 | |
| 241 | for (i = 0; i < lim; i++) { |
| 242 | const char *propname; |
| 243 | |
| 244 | propname = cache_type_info[cache->type].line_size_props[i]; |
| 245 | line_size = of_get_property(cache->ofnode, propname, NULL); |
| 246 | if (line_size) |
| 247 | break; |
| 248 | } |
| 249 | |
| 250 | if (!line_size) |
| 251 | return -ENODEV; |
| 252 | |
Anton Blanchard | d10bd84 | 2013-08-07 02:01:37 +1000 | [diff] [blame] | 253 | *ret = of_read_number(line_size, 1); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 254 | return 0; |
| 255 | } |
| 256 | |
| 257 | static int cache_nr_sets(const struct cache *cache, unsigned int *ret) |
| 258 | { |
| 259 | const char *propname; |
Anton Blanchard | d10bd84 | 2013-08-07 02:01:37 +1000 | [diff] [blame] | 260 | const __be32 *nr_sets; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 261 | |
| 262 | propname = cache_type_info[cache->type].nr_sets_prop; |
| 263 | |
| 264 | nr_sets = of_get_property(cache->ofnode, propname, NULL); |
| 265 | if (!nr_sets) |
| 266 | return -ENODEV; |
| 267 | |
Anton Blanchard | d10bd84 | 2013-08-07 02:01:37 +1000 | [diff] [blame] | 268 | *ret = of_read_number(nr_sets, 1); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 269 | return 0; |
| 270 | } |
| 271 | |
| 272 | static int cache_associativity(const struct cache *cache, unsigned int *ret) |
| 273 | { |
| 274 | unsigned int line_size; |
| 275 | unsigned int nr_sets; |
| 276 | unsigned int size; |
| 277 | |
| 278 | if (cache_nr_sets(cache, &nr_sets)) |
| 279 | goto err; |
| 280 | |
| 281 | /* If the cache is fully associative, there is no need to |
| 282 | * check the other properties. |
| 283 | */ |
| 284 | if (nr_sets == 1) { |
| 285 | *ret = 0; |
| 286 | return 0; |
| 287 | } |
| 288 | |
| 289 | if (cache_get_line_size(cache, &line_size)) |
| 290 | goto err; |
| 291 | if (cache_size(cache, &size)) |
| 292 | goto err; |
| 293 | |
| 294 | if (!(nr_sets > 0 && size > 0 && line_size > 0)) |
| 295 | goto err; |
| 296 | |
| 297 | *ret = (size / nr_sets) / line_size; |
| 298 | return 0; |
| 299 | err: |
| 300 | return -ENODEV; |
| 301 | } |
| 302 | |
| 303 | /* helper for dealing with split caches */ |
| 304 | static struct cache *cache_find_first_sibling(struct cache *cache) |
| 305 | { |
| 306 | struct cache *iter; |
| 307 | |
Dave Olson | f7e9e35 | 2015-04-02 21:28:45 -0700 | [diff] [blame] | 308 | if (cache->type == CACHE_TYPE_UNIFIED || |
| 309 | cache->type == CACHE_TYPE_UNIFIED_D) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 310 | return cache; |
| 311 | |
| 312 | list_for_each_entry(iter, &cache_list, list) |
| 313 | if (iter->ofnode == cache->ofnode && iter->next_local == cache) |
| 314 | return iter; |
| 315 | |
| 316 | return cache; |
| 317 | } |
| 318 | |
| 319 | /* return the first cache on a local list matching node */ |
| 320 | static struct cache *cache_lookup_by_node(const struct device_node *node) |
| 321 | { |
| 322 | struct cache *cache = NULL; |
| 323 | struct cache *iter; |
| 324 | |
| 325 | list_for_each_entry(iter, &cache_list, list) { |
| 326 | if (iter->ofnode != node) |
| 327 | continue; |
| 328 | cache = cache_find_first_sibling(iter); |
| 329 | break; |
| 330 | } |
| 331 | |
| 332 | return cache; |
| 333 | } |
| 334 | |
| 335 | static bool cache_node_is_unified(const struct device_node *np) |
| 336 | { |
| 337 | return of_get_property(np, "cache-unified", NULL); |
| 338 | } |
| 339 | |
Dave Olson | f7e9e35 | 2015-04-02 21:28:45 -0700 | [diff] [blame] | 340 | /* |
| 341 | * Unified caches can have two different sets of tags. Most embedded |
| 342 | * use cache-size, etc. for the unified cache size, but open firmware systems |
| 343 | * use d-cache-size, etc. Check on initialization for which type we have, and |
| 344 | * return the appropriate structure type. Assume it's embedded if it isn't |
| 345 | * open firmware. If it's yet a 3rd type, then there will be missing entries |
| 346 | * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need |
| 347 | * to be extended further. |
| 348 | */ |
| 349 | static int cache_is_unified_d(const struct device_node *np) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 350 | { |
Dave Olson | f7e9e35 | 2015-04-02 21:28:45 -0700 | [diff] [blame] | 351 | return of_get_property(np, |
| 352 | cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ? |
| 353 | CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; |
| 354 | } |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 355 | |
Dave Olson | f7e9e35 | 2015-04-02 21:28:45 -0700 | [diff] [blame] | 356 | /* |
| 357 | */ |
| 358 | static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) |
| 359 | { |
Rob Herring | b7c670d | 2017-08-21 10:16:47 -0500 | [diff] [blame] | 360 | pr_debug("creating L%d ucache for %pOF\n", level, node); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 361 | |
Dave Olson | f7e9e35 | 2015-04-02 21:28:45 -0700 | [diff] [blame] | 362 | return new_cache(cache_is_unified_d(node), level, node); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 363 | } |
| 364 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 365 | static struct cache *cache_do_one_devnode_split(struct device_node *node, |
| 366 | int level) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 367 | { |
| 368 | struct cache *dcache, *icache; |
| 369 | |
Rob Herring | b7c670d | 2017-08-21 10:16:47 -0500 | [diff] [blame] | 370 | pr_debug("creating L%d dcache and icache for %pOF\n", level, |
| 371 | node); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 372 | |
| 373 | dcache = new_cache(CACHE_TYPE_DATA, level, node); |
| 374 | icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node); |
| 375 | |
| 376 | if (!dcache || !icache) |
| 377 | goto err; |
| 378 | |
| 379 | dcache->next_local = icache; |
| 380 | |
| 381 | return dcache; |
| 382 | err: |
| 383 | release_cache(dcache); |
| 384 | release_cache(icache); |
| 385 | return NULL; |
| 386 | } |
| 387 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 388 | static struct cache *cache_do_one_devnode(struct device_node *node, int level) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 389 | { |
| 390 | struct cache *cache; |
| 391 | |
| 392 | if (cache_node_is_unified(node)) |
| 393 | cache = cache_do_one_devnode_unified(node, level); |
| 394 | else |
| 395 | cache = cache_do_one_devnode_split(node, level); |
| 396 | |
| 397 | return cache; |
| 398 | } |
| 399 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 400 | static struct cache *cache_lookup_or_instantiate(struct device_node *node, |
| 401 | int level) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 402 | { |
| 403 | struct cache *cache; |
| 404 | |
| 405 | cache = cache_lookup_by_node(node); |
| 406 | |
| 407 | WARN_ONCE(cache && cache->level != level, |
| 408 | "cache level mismatch on lookup (got %d, expected %d)\n", |
| 409 | cache->level, level); |
| 410 | |
| 411 | if (!cache) |
| 412 | cache = cache_do_one_devnode(node, level); |
| 413 | |
| 414 | return cache; |
| 415 | } |
| 416 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 417 | static void link_cache_lists(struct cache *smaller, struct cache *bigger) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 418 | { |
| 419 | while (smaller->next_local) { |
| 420 | if (smaller->next_local == bigger) |
| 421 | return; /* already linked */ |
| 422 | smaller = smaller->next_local; |
| 423 | } |
| 424 | |
| 425 | smaller->next_local = bigger; |
| 426 | } |
| 427 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 428 | static void do_subsidiary_caches_debugcheck(struct cache *cache) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 429 | { |
| 430 | WARN_ON_ONCE(cache->level != 1); |
Rob Herring | e5480bd | 2018-11-16 16:11:00 -0600 | [diff] [blame] | 431 | WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu")); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 432 | } |
| 433 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 434 | static void do_subsidiary_caches(struct cache *cache) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 435 | { |
| 436 | struct device_node *subcache_node; |
| 437 | int level = cache->level; |
| 438 | |
| 439 | do_subsidiary_caches_debugcheck(cache); |
| 440 | |
| 441 | while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { |
| 442 | struct cache *subcache; |
| 443 | |
| 444 | level++; |
| 445 | subcache = cache_lookup_or_instantiate(subcache_node, level); |
| 446 | of_node_put(subcache_node); |
| 447 | if (!subcache) |
| 448 | break; |
| 449 | |
| 450 | link_cache_lists(cache, subcache); |
| 451 | cache = subcache; |
| 452 | } |
| 453 | } |
| 454 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 455 | static struct cache *cache_chain_instantiate(unsigned int cpu_id) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 456 | { |
| 457 | struct device_node *cpu_node; |
| 458 | struct cache *cpu_cache = NULL; |
| 459 | |
| 460 | pr_debug("creating cache object(s) for CPU %i\n", cpu_id); |
| 461 | |
| 462 | cpu_node = of_get_cpu_node(cpu_id, NULL); |
| 463 | WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); |
| 464 | if (!cpu_node) |
| 465 | goto out; |
| 466 | |
| 467 | cpu_cache = cache_lookup_or_instantiate(cpu_node, 1); |
| 468 | if (!cpu_cache) |
| 469 | goto out; |
| 470 | |
| 471 | do_subsidiary_caches(cpu_cache); |
| 472 | |
| 473 | cache_cpu_set(cpu_cache, cpu_id); |
| 474 | out: |
| 475 | of_node_put(cpu_node); |
| 476 | |
| 477 | return cpu_cache; |
| 478 | } |
| 479 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 480 | static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 481 | { |
| 482 | struct cache_dir *cache_dir; |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 483 | struct device *dev; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 484 | struct kobject *kobj = NULL; |
| 485 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 486 | dev = get_cpu_device(cpu_id); |
| 487 | WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id); |
| 488 | if (!dev) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 489 | goto err; |
| 490 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 491 | kobj = kobject_create_and_add("cache", &dev->kobj); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 492 | if (!kobj) |
| 493 | goto err; |
| 494 | |
| 495 | cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); |
| 496 | if (!cache_dir) |
| 497 | goto err; |
| 498 | |
| 499 | cache_dir->kobj = kobj; |
| 500 | |
Nathan Lynch | fc7a9fe | 2009-01-09 13:12:44 +0000 | [diff] [blame] | 501 | WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 502 | |
Nathan Lynch | fc7a9fe | 2009-01-09 13:12:44 +0000 | [diff] [blame] | 503 | per_cpu(cache_dir_pcpu, cpu_id) = cache_dir; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 504 | |
| 505 | return cache_dir; |
| 506 | err: |
| 507 | kobject_put(kobj); |
| 508 | return NULL; |
| 509 | } |
| 510 | |
| 511 | static void cache_index_release(struct kobject *kobj) |
| 512 | { |
| 513 | struct cache_index_dir *index; |
| 514 | |
| 515 | index = kobj_to_cache_index_dir(kobj); |
| 516 | |
| 517 | pr_debug("freeing index directory for L%d %s cache\n", |
| 518 | index->cache->level, cache_type_string(index->cache)); |
| 519 | |
| 520 | kfree(index); |
| 521 | } |
| 522 | |
| 523 | static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf) |
| 524 | { |
| 525 | struct kobj_attribute *kobj_attr; |
| 526 | |
| 527 | kobj_attr = container_of(attr, struct kobj_attribute, attr); |
| 528 | |
| 529 | return kobj_attr->show(k, kobj_attr, buf); |
| 530 | } |
| 531 | |
| 532 | static struct cache *index_kobj_to_cache(struct kobject *k) |
| 533 | { |
| 534 | struct cache_index_dir *index; |
| 535 | |
| 536 | index = kobj_to_cache_index_dir(k); |
| 537 | |
| 538 | return index->cache; |
| 539 | } |
| 540 | |
| 541 | static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
| 542 | { |
| 543 | unsigned int size_kb; |
| 544 | struct cache *cache; |
| 545 | |
| 546 | cache = index_kobj_to_cache(k); |
| 547 | |
| 548 | if (cache_size_kb(cache, &size_kb)) |
| 549 | return -ENODEV; |
| 550 | |
| 551 | return sprintf(buf, "%uK\n", size_kb); |
| 552 | } |
| 553 | |
| 554 | static struct kobj_attribute cache_size_attr = |
| 555 | __ATTR(size, 0444, size_show, NULL); |
| 556 | |
| 557 | |
| 558 | static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
| 559 | { |
| 560 | unsigned int line_size; |
| 561 | struct cache *cache; |
| 562 | |
| 563 | cache = index_kobj_to_cache(k); |
| 564 | |
| 565 | if (cache_get_line_size(cache, &line_size)) |
| 566 | return -ENODEV; |
| 567 | |
| 568 | return sprintf(buf, "%u\n", line_size); |
| 569 | } |
| 570 | |
| 571 | static struct kobj_attribute cache_line_size_attr = |
| 572 | __ATTR(coherency_line_size, 0444, line_size_show, NULL); |
| 573 | |
| 574 | static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
| 575 | { |
| 576 | unsigned int nr_sets; |
| 577 | struct cache *cache; |
| 578 | |
| 579 | cache = index_kobj_to_cache(k); |
| 580 | |
| 581 | if (cache_nr_sets(cache, &nr_sets)) |
| 582 | return -ENODEV; |
| 583 | |
| 584 | return sprintf(buf, "%u\n", nr_sets); |
| 585 | } |
| 586 | |
| 587 | static struct kobj_attribute cache_nr_sets_attr = |
| 588 | __ATTR(number_of_sets, 0444, nr_sets_show, NULL); |
| 589 | |
| 590 | static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
| 591 | { |
| 592 | unsigned int associativity; |
| 593 | struct cache *cache; |
| 594 | |
| 595 | cache = index_kobj_to_cache(k); |
| 596 | |
| 597 | if (cache_associativity(cache, &associativity)) |
| 598 | return -ENODEV; |
| 599 | |
| 600 | return sprintf(buf, "%u\n", associativity); |
| 601 | } |
| 602 | |
| 603 | static struct kobj_attribute cache_assoc_attr = |
| 604 | __ATTR(ways_of_associativity, 0444, associativity_show, NULL); |
| 605 | |
| 606 | static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
| 607 | { |
| 608 | struct cache *cache; |
| 609 | |
| 610 | cache = index_kobj_to_cache(k); |
| 611 | |
| 612 | return sprintf(buf, "%s\n", cache_type_string(cache)); |
| 613 | } |
| 614 | |
| 615 | static struct kobj_attribute cache_type_attr = |
| 616 | __ATTR(type, 0444, type_show, NULL); |
| 617 | |
| 618 | static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
| 619 | { |
| 620 | struct cache_index_dir *index; |
| 621 | struct cache *cache; |
| 622 | |
| 623 | index = kobj_to_cache_index_dir(k); |
| 624 | cache = index->cache; |
| 625 | |
| 626 | return sprintf(buf, "%d\n", cache->level); |
| 627 | } |
| 628 | |
| 629 | static struct kobj_attribute cache_level_attr = |
| 630 | __ATTR(level, 0444, level_show, NULL); |
| 631 | |
Gautham R. Shenoy | 500fe5f | 2018-10-11 11:03:03 +0530 | [diff] [blame] | 632 | static unsigned int index_dir_to_cpu(struct cache_index_dir *index) |
| 633 | { |
| 634 | struct kobject *index_dir_kobj = &index->kobj; |
| 635 | struct kobject *cache_dir_kobj = index_dir_kobj->parent; |
| 636 | struct kobject *cpu_dev_kobj = cache_dir_kobj->parent; |
| 637 | struct device *dev = kobj_to_dev(cpu_dev_kobj); |
| 638 | |
| 639 | return dev->id; |
| 640 | } |
| 641 | |
| 642 | /* |
| 643 | * On big-core systems, each core has two groups of CPUs each of which |
| 644 | * has its own L1-cache. The thread-siblings which share l1-cache with |
| 645 | * @cpu can be obtained via cpu_smallcore_mask(). |
| 646 | */ |
| 647 | static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache) |
| 648 | { |
| 649 | if (cache->level == 1) |
| 650 | return cpu_smallcore_mask(cpu); |
| 651 | |
| 652 | return &cache->shared_cpu_map; |
| 653 | } |
| 654 | |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 655 | static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
| 656 | { |
| 657 | struct cache_index_dir *index; |
| 658 | struct cache *cache; |
Gautham R. Shenoy | 500fe5f | 2018-10-11 11:03:03 +0530 | [diff] [blame] | 659 | const struct cpumask *mask; |
| 660 | int ret, cpu; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 661 | |
| 662 | index = kobj_to_cache_index_dir(k); |
| 663 | cache = index->cache; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 664 | |
Gautham R. Shenoy | 500fe5f | 2018-10-11 11:03:03 +0530 | [diff] [blame] | 665 | if (has_big_cores) { |
| 666 | cpu = index_dir_to_cpu(index); |
| 667 | mask = get_big_core_shared_cpu_map(cpu, cache); |
| 668 | } else { |
| 669 | mask = &cache->shared_cpu_map; |
| 670 | } |
| 671 | |
Tejun Heo | 0c118b7b | 2015-02-13 14:37:06 -0800 | [diff] [blame] | 672 | ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n", |
Gautham R. Shenoy | 500fe5f | 2018-10-11 11:03:03 +0530 | [diff] [blame] | 673 | cpumask_pr_args(mask)); |
Tejun Heo | 0c118b7b | 2015-02-13 14:37:06 -0800 | [diff] [blame] | 674 | buf[ret++] = '\n'; |
| 675 | buf[ret] = '\0'; |
| 676 | return ret; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 677 | } |
| 678 | |
| 679 | static struct kobj_attribute cache_shared_cpu_map_attr = |
| 680 | __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); |
| 681 | |
| 682 | /* Attributes which should always be created -- the kobject/sysfs core |
| 683 | * does this automatically via kobj_type->default_attrs. This is the |
| 684 | * minimum data required to uniquely identify a cache. |
| 685 | */ |
| 686 | static struct attribute *cache_index_default_attrs[] = { |
| 687 | &cache_type_attr.attr, |
| 688 | &cache_level_attr.attr, |
| 689 | &cache_shared_cpu_map_attr.attr, |
| 690 | NULL, |
| 691 | }; |
| 692 | |
| 693 | /* Attributes which should be created if the cache device node has the |
| 694 | * right properties -- see cacheinfo_create_index_opt_attrs |
| 695 | */ |
| 696 | static struct kobj_attribute *cache_index_opt_attrs[] = { |
| 697 | &cache_size_attr, |
| 698 | &cache_line_size_attr, |
| 699 | &cache_nr_sets_attr, |
| 700 | &cache_assoc_attr, |
| 701 | }; |
| 702 | |
Emese Revfy | 52cf25d | 2010-01-19 02:58:23 +0100 | [diff] [blame] | 703 | static const struct sysfs_ops cache_index_ops = { |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 704 | .show = cache_index_show, |
| 705 | }; |
| 706 | |
| 707 | static struct kobj_type cache_index_type = { |
| 708 | .release = cache_index_release, |
| 709 | .sysfs_ops = &cache_index_ops, |
| 710 | .default_attrs = cache_index_default_attrs, |
| 711 | }; |
| 712 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 713 | static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 714 | { |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 715 | const char *cache_type; |
| 716 | struct cache *cache; |
| 717 | char *buf; |
| 718 | int i; |
| 719 | |
| 720 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 721 | if (!buf) |
| 722 | return; |
| 723 | |
| 724 | cache = dir->cache; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 725 | cache_type = cache_type_string(cache); |
| 726 | |
| 727 | /* We don't want to create an attribute that can't provide a |
| 728 | * meaningful value. Check the return value of each optional |
| 729 | * attribute's ->show method before registering the |
| 730 | * attribute. |
| 731 | */ |
| 732 | for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { |
| 733 | struct kobj_attribute *attr; |
| 734 | ssize_t rc; |
| 735 | |
| 736 | attr = cache_index_opt_attrs[i]; |
| 737 | |
| 738 | rc = attr->show(&dir->kobj, attr, buf); |
| 739 | if (rc <= 0) { |
| 740 | pr_debug("not creating %s attribute for " |
Rob Herring | b7c670d | 2017-08-21 10:16:47 -0500 | [diff] [blame] | 741 | "%pOF(%s) (rc = %zd)\n", |
| 742 | attr->attr.name, cache->ofnode, |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 743 | cache_type, rc); |
| 744 | continue; |
| 745 | } |
| 746 | if (sysfs_create_file(&dir->kobj, &attr->attr)) |
Rob Herring | b7c670d | 2017-08-21 10:16:47 -0500 | [diff] [blame] | 747 | pr_debug("could not create %s attribute for %pOF(%s)\n", |
| 748 | attr->attr.name, cache->ofnode, cache_type); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 749 | } |
| 750 | |
| 751 | kfree(buf); |
| 752 | } |
| 753 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 754 | static void cacheinfo_create_index_dir(struct cache *cache, int index, |
| 755 | struct cache_dir *cache_dir) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 756 | { |
| 757 | struct cache_index_dir *index_dir; |
| 758 | int rc; |
| 759 | |
| 760 | index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); |
| 761 | if (!index_dir) |
Tobin C. Harding | 7e80397 | 2019-04-30 11:09:23 +1000 | [diff] [blame^] | 762 | return; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 763 | |
| 764 | index_dir->cache = cache; |
| 765 | |
| 766 | rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, |
| 767 | cache_dir->kobj, "index%d", index); |
Tobin C. Harding | 7e80397 | 2019-04-30 11:09:23 +1000 | [diff] [blame^] | 768 | if (rc) { |
| 769 | kobject_put(&index_dir->kobj); |
| 770 | kfree(index_dir); |
| 771 | return; |
| 772 | } |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 773 | |
| 774 | index_dir->next = cache_dir->index; |
| 775 | cache_dir->index = index_dir; |
| 776 | |
| 777 | cacheinfo_create_index_opt_attrs(index_dir); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 778 | } |
| 779 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 780 | static void cacheinfo_sysfs_populate(unsigned int cpu_id, |
| 781 | struct cache *cache_list) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 782 | { |
| 783 | struct cache_dir *cache_dir; |
| 784 | struct cache *cache; |
| 785 | int index = 0; |
| 786 | |
| 787 | cache_dir = cacheinfo_create_cache_dir(cpu_id); |
| 788 | if (!cache_dir) |
| 789 | return; |
| 790 | |
| 791 | cache = cache_list; |
| 792 | while (cache) { |
| 793 | cacheinfo_create_index_dir(cache, index, cache_dir); |
| 794 | index++; |
| 795 | cache = cache->next_local; |
| 796 | } |
| 797 | } |
| 798 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 799 | void cacheinfo_cpu_online(unsigned int cpu_id) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 800 | { |
| 801 | struct cache *cache; |
| 802 | |
| 803 | cache = cache_chain_instantiate(cpu_id); |
| 804 | if (!cache) |
| 805 | return; |
| 806 | |
| 807 | cacheinfo_sysfs_populate(cpu_id, cache); |
| 808 | } |
| 809 | |
Haren Myneni | 6b36ba8 | 2014-02-25 20:02:18 -0800 | [diff] [blame] | 810 | /* functions needed to remove cache entry for cpu offline or suspend/resume */ |
| 811 | |
| 812 | #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \ |
| 813 | defined(CONFIG_HOTPLUG_CPU) |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 814 | |
| 815 | static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) |
| 816 | { |
| 817 | struct device_node *cpu_node; |
| 818 | struct cache *cache; |
| 819 | |
| 820 | cpu_node = of_get_cpu_node(cpu_id, NULL); |
| 821 | WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); |
| 822 | if (!cpu_node) |
| 823 | return NULL; |
| 824 | |
| 825 | cache = cache_lookup_by_node(cpu_node); |
| 826 | of_node_put(cpu_node); |
| 827 | |
| 828 | return cache; |
| 829 | } |
| 830 | |
| 831 | static void remove_index_dirs(struct cache_dir *cache_dir) |
| 832 | { |
| 833 | struct cache_index_dir *index; |
| 834 | |
| 835 | index = cache_dir->index; |
| 836 | |
| 837 | while (index) { |
| 838 | struct cache_index_dir *next; |
| 839 | |
| 840 | next = index->next; |
| 841 | kobject_put(&index->kobj); |
| 842 | index = next; |
| 843 | } |
| 844 | } |
| 845 | |
| 846 | static void remove_cache_dir(struct cache_dir *cache_dir) |
| 847 | { |
| 848 | remove_index_dirs(cache_dir); |
| 849 | |
Paul Mackerras | 91b973f | 2014-01-18 21:14:47 +1100 | [diff] [blame] | 850 | /* Remove cache dir from sysfs */ |
| 851 | kobject_del(cache_dir->kobj); |
| 852 | |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 853 | kobject_put(cache_dir->kobj); |
| 854 | |
| 855 | kfree(cache_dir); |
| 856 | } |
| 857 | |
| 858 | static void cache_cpu_clear(struct cache *cache, int cpu) |
| 859 | { |
| 860 | while (cache) { |
| 861 | struct cache *next = cache->next_local; |
| 862 | |
| 863 | WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), |
Rob Herring | b7c670d | 2017-08-21 10:16:47 -0500 | [diff] [blame] | 864 | "CPU %i not accounted in %pOF(%s)\n", |
| 865 | cpu, cache->ofnode, |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 866 | cache_type_string(cache)); |
| 867 | |
| 868 | cpumask_clear_cpu(cpu, &cache->shared_cpu_map); |
| 869 | |
| 870 | /* Release the cache object if all the cpus using it |
| 871 | * are offline */ |
| 872 | if (cpumask_empty(&cache->shared_cpu_map)) |
| 873 | release_cache(cache); |
| 874 | |
| 875 | cache = next; |
| 876 | } |
| 877 | } |
| 878 | |
| 879 | void cacheinfo_cpu_offline(unsigned int cpu_id) |
| 880 | { |
| 881 | struct cache_dir *cache_dir; |
| 882 | struct cache *cache; |
| 883 | |
| 884 | /* Prevent userspace from seeing inconsistent state - remove |
| 885 | * the sysfs hierarchy first */ |
Nathan Lynch | fc7a9fe | 2009-01-09 13:12:44 +0000 | [diff] [blame] | 886 | cache_dir = per_cpu(cache_dir_pcpu, cpu_id); |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 887 | |
| 888 | /* careful, sysfs population may have failed */ |
| 889 | if (cache_dir) |
| 890 | remove_cache_dir(cache_dir); |
| 891 | |
Nathan Lynch | fc7a9fe | 2009-01-09 13:12:44 +0000 | [diff] [blame] | 892 | per_cpu(cache_dir_pcpu, cpu_id) = NULL; |
Nathan Lynch | 93197a3 | 2008-12-23 18:55:54 +0000 | [diff] [blame] | 893 | |
| 894 | /* clear the CPU's bit in its cache chain, possibly freeing |
| 895 | * cache objects */ |
| 896 | cache = cache_lookup_by_cpu(cpu_id); |
| 897 | if (cache) |
| 898 | cache_cpu_clear(cache, cpu_id); |
| 899 | } |
Haren Myneni | 6b36ba8 | 2014-02-25 20:02:18 -0800 | [diff] [blame] | 900 | #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */ |