Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * cacheinfo support - processor cache information via sysfs
4 : *
5 : * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6 : * Author: Sudeep Holla <sudeep.holla@arm.com>
7 : */
8 : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 :
10 : #include <linux/acpi.h>
11 : #include <linux/bitops.h>
12 : #include <linux/cacheinfo.h>
13 : #include <linux/compiler.h>
14 : #include <linux/cpu.h>
15 : #include <linux/device.h>
16 : #include <linux/init.h>
17 : #include <linux/of.h>
18 : #include <linux/sched.h>
19 : #include <linux/slab.h>
20 : #include <linux/smp.h>
21 : #include <linux/sysfs.h>
22 :
23 : /* pointer to per cpu cacheinfo */
24 : static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25 : #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
26 : #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
27 : #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
28 : #define per_cpu_cacheinfo_idx(cpu, idx) \
29 : (per_cpu_cacheinfo(cpu) + (idx))
30 :
31 : /* Set if no cache information is found in DT/ACPI. */
32 : static bool use_arch_info;
33 :
34 0 : struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
35 : {
36 0 : return ci_cacheinfo(cpu);
37 : }
38 :
39 : static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
40 : struct cacheinfo *sib_leaf)
41 : {
42 : /*
43 : * For non DT/ACPI systems, assume unique level 1 caches,
44 : * system-wide shared caches for all other levels.
45 : */
46 : if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) ||
47 : use_arch_info)
48 0 : return (this_leaf->level != 1) && (sib_leaf->level != 1);
49 :
50 : if ((sib_leaf->attributes & CACHE_ID) &&
51 : (this_leaf->attributes & CACHE_ID))
52 : return sib_leaf->id == this_leaf->id;
53 :
54 : return sib_leaf->fw_token == this_leaf->fw_token;
55 : }
56 :
57 0 : bool last_level_cache_is_valid(unsigned int cpu)
58 : {
59 : struct cacheinfo *llc;
60 :
61 0 : if (!cache_leaves(cpu))
62 : return false;
63 :
64 0 : llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
65 :
66 0 : return (llc->attributes & CACHE_ID) || !!llc->fw_token;
67 :
68 : }
69 :
70 0 : bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
71 : {
72 : struct cacheinfo *llc_x, *llc_y;
73 :
74 0 : if (!last_level_cache_is_valid(cpu_x) ||
75 0 : !last_level_cache_is_valid(cpu_y))
76 : return false;
77 :
78 0 : llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
79 0 : llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
80 :
81 0 : return cache_leaves_are_shared(llc_x, llc_y);
82 : }
83 :
84 : #ifdef CONFIG_OF
85 :
86 : static bool of_check_cache_nodes(struct device_node *np);
87 :
88 : /* OF properties to query for a given cache type */
89 : struct cache_type_info {
90 : const char *size_prop;
91 : const char *line_size_props[2];
92 : const char *nr_sets_prop;
93 : };
94 :
95 : static const struct cache_type_info cache_type_info[] = {
96 : {
97 : .size_prop = "cache-size",
98 : .line_size_props = { "cache-line-size",
99 : "cache-block-size", },
100 : .nr_sets_prop = "cache-sets",
101 : }, {
102 : .size_prop = "i-cache-size",
103 : .line_size_props = { "i-cache-line-size",
104 : "i-cache-block-size", },
105 : .nr_sets_prop = "i-cache-sets",
106 : }, {
107 : .size_prop = "d-cache-size",
108 : .line_size_props = { "d-cache-line-size",
109 : "d-cache-block-size", },
110 : .nr_sets_prop = "d-cache-sets",
111 : },
112 : };
113 :
114 : static inline int get_cacheinfo_idx(enum cache_type type)
115 : {
116 : if (type == CACHE_TYPE_UNIFIED)
117 : return 0;
118 : return type;
119 : }
120 :
121 : static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
122 : {
123 : const char *propname;
124 : int ct_idx;
125 :
126 : ct_idx = get_cacheinfo_idx(this_leaf->type);
127 : propname = cache_type_info[ct_idx].size_prop;
128 :
129 : of_property_read_u32(np, propname, &this_leaf->size);
130 : }
131 :
132 : /* not cache_line_size() because that's a macro in include/linux/cache.h */
133 : static void cache_get_line_size(struct cacheinfo *this_leaf,
134 : struct device_node *np)
135 : {
136 : int i, lim, ct_idx;
137 :
138 : ct_idx = get_cacheinfo_idx(this_leaf->type);
139 : lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
140 :
141 : for (i = 0; i < lim; i++) {
142 : int ret;
143 : u32 line_size;
144 : const char *propname;
145 :
146 : propname = cache_type_info[ct_idx].line_size_props[i];
147 : ret = of_property_read_u32(np, propname, &line_size);
148 : if (!ret) {
149 : this_leaf->coherency_line_size = line_size;
150 : break;
151 : }
152 : }
153 : }
154 :
155 : static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
156 : {
157 : const char *propname;
158 : int ct_idx;
159 :
160 : ct_idx = get_cacheinfo_idx(this_leaf->type);
161 : propname = cache_type_info[ct_idx].nr_sets_prop;
162 :
163 : of_property_read_u32(np, propname, &this_leaf->number_of_sets);
164 : }
165 :
166 : static void cache_associativity(struct cacheinfo *this_leaf)
167 : {
168 : unsigned int line_size = this_leaf->coherency_line_size;
169 : unsigned int nr_sets = this_leaf->number_of_sets;
170 : unsigned int size = this_leaf->size;
171 :
172 : /*
173 : * If the cache is fully associative, there is no need to
174 : * check the other properties.
175 : */
176 : if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
177 : this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
178 : }
179 :
180 : static bool cache_node_is_unified(struct cacheinfo *this_leaf,
181 : struct device_node *np)
182 : {
183 : return of_property_read_bool(np, "cache-unified");
184 : }
185 :
186 : static void cache_of_set_props(struct cacheinfo *this_leaf,
187 : struct device_node *np)
188 : {
189 : /*
190 : * init_cache_level must setup the cache level correctly
191 : * overriding the architecturally specified levels, so
192 : * if type is NONE at this stage, it should be unified
193 : */
194 : if (this_leaf->type == CACHE_TYPE_NOCACHE &&
195 : cache_node_is_unified(this_leaf, np))
196 : this_leaf->type = CACHE_TYPE_UNIFIED;
197 : cache_size(this_leaf, np);
198 : cache_get_line_size(this_leaf, np);
199 : cache_nr_sets(this_leaf, np);
200 : cache_associativity(this_leaf);
201 : }
202 :
203 : static int cache_setup_of_node(unsigned int cpu)
204 : {
205 : struct device_node *np, *prev;
206 : struct cacheinfo *this_leaf;
207 : unsigned int index = 0;
208 :
209 : np = of_cpu_device_node_get(cpu);
210 : if (!np) {
211 : pr_err("Failed to find cpu%d device node\n", cpu);
212 : return -ENOENT;
213 : }
214 :
215 : if (!of_check_cache_nodes(np)) {
216 : of_node_put(np);
217 : return -ENOENT;
218 : }
219 :
220 : prev = np;
221 :
222 : while (index < cache_leaves(cpu)) {
223 : this_leaf = per_cpu_cacheinfo_idx(cpu, index);
224 : if (this_leaf->level != 1) {
225 : np = of_find_next_cache_node(np);
226 : of_node_put(prev);
227 : prev = np;
228 : if (!np)
229 : break;
230 : }
231 : cache_of_set_props(this_leaf, np);
232 : this_leaf->fw_token = np;
233 : index++;
234 : }
235 :
236 : of_node_put(np);
237 :
238 : if (index != cache_leaves(cpu)) /* not all OF nodes populated */
239 : return -ENOENT;
240 :
241 : return 0;
242 : }
243 :
244 : static bool of_check_cache_nodes(struct device_node *np)
245 : {
246 : struct device_node *next;
247 :
248 : if (of_property_present(np, "cache-size") ||
249 : of_property_present(np, "i-cache-size") ||
250 : of_property_present(np, "d-cache-size") ||
251 : of_property_present(np, "cache-unified"))
252 : return true;
253 :
254 : next = of_find_next_cache_node(np);
255 : if (next) {
256 : of_node_put(next);
257 : return true;
258 : }
259 :
260 : return false;
261 : }
262 :
263 : static int of_count_cache_leaves(struct device_node *np)
264 : {
265 : unsigned int leaves = 0;
266 :
267 : if (of_property_read_bool(np, "cache-size"))
268 : ++leaves;
269 : if (of_property_read_bool(np, "i-cache-size"))
270 : ++leaves;
271 : if (of_property_read_bool(np, "d-cache-size"))
272 : ++leaves;
273 :
274 : if (!leaves) {
275 : /* The '[i-|d-|]cache-size' property is required, but
276 : * if absent, fallback on the 'cache-unified' property.
277 : */
278 : if (of_property_read_bool(np, "cache-unified"))
279 : return 1;
280 : else
281 : return 2;
282 : }
283 :
284 : return leaves;
285 : }
286 :
287 : int init_of_cache_level(unsigned int cpu)
288 : {
289 : struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
290 : struct device_node *np = of_cpu_device_node_get(cpu);
291 : struct device_node *prev = NULL;
292 : unsigned int levels = 0, leaves, level;
293 :
294 : if (!of_check_cache_nodes(np)) {
295 : of_node_put(np);
296 : return -ENOENT;
297 : }
298 :
299 : leaves = of_count_cache_leaves(np);
300 : if (leaves > 0)
301 : levels = 1;
302 :
303 : prev = np;
304 : while ((np = of_find_next_cache_node(np))) {
305 : of_node_put(prev);
306 : prev = np;
307 : if (!of_device_is_compatible(np, "cache"))
308 : goto err_out;
309 : if (of_property_read_u32(np, "cache-level", &level))
310 : goto err_out;
311 : if (level <= levels)
312 : goto err_out;
313 :
314 : leaves += of_count_cache_leaves(np);
315 : levels = level;
316 : }
317 :
318 : of_node_put(np);
319 : this_cpu_ci->num_levels = levels;
320 : this_cpu_ci->num_leaves = leaves;
321 :
322 : return 0;
323 :
324 : err_out:
325 : of_node_put(np);
326 : return -EINVAL;
327 : }
328 :
329 : #else
330 : static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
331 0 : int init_of_cache_level(unsigned int cpu) { return 0; }
332 : #endif
333 :
334 0 : int __weak cache_setup_acpi(unsigned int cpu)
335 : {
336 0 : return -ENOTSUPP;
337 : }
338 :
339 : unsigned int coherency_max_size;
340 :
341 : static int cache_setup_properties(unsigned int cpu)
342 : {
343 : int ret = 0;
344 :
345 : if (of_have_populated_dt())
346 : ret = cache_setup_of_node(cpu);
347 : else if (!acpi_disabled)
348 : ret = cache_setup_acpi(cpu);
349 :
350 : // Assume there is no cache information available in DT/ACPI from now.
351 : if (ret && use_arch_cache_info())
352 : use_arch_info = true;
353 :
354 : return ret;
355 : }
356 :
357 0 : static int cache_shared_cpu_map_setup(unsigned int cpu)
358 : {
359 0 : struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
360 : struct cacheinfo *this_leaf, *sib_leaf;
361 : unsigned int index, sib_index;
362 0 : int ret = 0;
363 :
364 0 : if (this_cpu_ci->cpu_map_populated)
365 : return 0;
366 :
367 : /*
368 : * skip setting up cache properties if LLC is valid, just need
369 : * to update the shared cpu_map if the cache attributes were
370 : * populated early before all the cpus are brought online
371 : */
372 0 : if (!last_level_cache_is_valid(cpu) && !use_arch_info) {
373 : ret = cache_setup_properties(cpu);
374 : if (ret)
375 : return ret;
376 : }
377 :
378 0 : for (index = 0; index < cache_leaves(cpu); index++) {
379 : unsigned int i;
380 :
381 0 : this_leaf = per_cpu_cacheinfo_idx(cpu, index);
382 :
383 0 : cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
384 0 : for_each_online_cpu(i) {
385 0 : struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
386 :
387 0 : if (i == cpu || !sib_cpu_ci->info_list)
388 0 : continue;/* skip if itself or no cacheinfo */
389 0 : for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
390 0 : sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
391 :
392 : /*
393 : * Comparing cache IDs only makes sense if the leaves
394 : * belong to the same cache level of same type. Skip
395 : * the check if level and type do not match.
396 : */
397 0 : if (sib_leaf->level != this_leaf->level ||
398 0 : sib_leaf->type != this_leaf->type)
399 0 : continue;
400 :
401 0 : if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
402 0 : cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
403 0 : cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
404 : break;
405 : }
406 : }
407 : }
408 : /* record the maximum cache line size */
409 0 : if (this_leaf->coherency_line_size > coherency_max_size)
410 0 : coherency_max_size = this_leaf->coherency_line_size;
411 : }
412 :
413 : /* shared_cpu_map is now populated for the cpu */
414 0 : this_cpu_ci->cpu_map_populated = true;
415 0 : return 0;
416 : }
417 :
418 0 : static void cache_shared_cpu_map_remove(unsigned int cpu)
419 : {
420 0 : struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
421 : struct cacheinfo *this_leaf, *sib_leaf;
422 : unsigned int sibling, index, sib_index;
423 :
424 0 : for (index = 0; index < cache_leaves(cpu); index++) {
425 0 : this_leaf = per_cpu_cacheinfo_idx(cpu, index);
426 0 : for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
427 0 : struct cpu_cacheinfo *sib_cpu_ci =
428 : get_cpu_cacheinfo(sibling);
429 :
430 0 : if (sibling == cpu || !sib_cpu_ci->info_list)
431 0 : continue;/* skip if itself or no cacheinfo */
432 :
433 0 : for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
434 0 : sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
435 :
436 : /*
437 : * Comparing cache IDs only makes sense if the leaves
438 : * belong to the same cache level of same type. Skip
439 : * the check if level and type do not match.
440 : */
441 0 : if (sib_leaf->level != this_leaf->level ||
442 0 : sib_leaf->type != this_leaf->type)
443 0 : continue;
444 :
445 0 : if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
446 0 : cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
447 0 : cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
448 : break;
449 : }
450 : }
451 : }
452 : }
453 :
454 : /* cpu is no longer populated in the shared map */
455 0 : this_cpu_ci->cpu_map_populated = false;
456 0 : }
457 :
458 : static void free_cache_attributes(unsigned int cpu)
459 : {
460 0 : if (!per_cpu_cacheinfo(cpu))
461 : return;
462 :
463 0 : cache_shared_cpu_map_remove(cpu);
464 : }
465 :
466 0 : int __weak early_cache_level(unsigned int cpu)
467 : {
468 0 : return -ENOENT;
469 : }
470 :
471 1 : int __weak init_cache_level(unsigned int cpu)
472 : {
473 1 : return -ENOENT;
474 : }
475 :
476 0 : int __weak populate_cache_leaves(unsigned int cpu)
477 : {
478 0 : return -ENOENT;
479 : }
480 :
481 : static inline
482 0 : int allocate_cache_info(int cpu)
483 : {
484 0 : per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
485 : sizeof(struct cacheinfo), GFP_ATOMIC);
486 0 : if (!per_cpu_cacheinfo(cpu)) {
487 0 : cache_leaves(cpu) = 0;
488 : return -ENOMEM;
489 : }
490 :
491 : return 0;
492 : }
493 :
494 0 : int fetch_cache_info(unsigned int cpu)
495 : {
496 0 : struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
497 0 : unsigned int levels = 0, split_levels = 0;
498 : int ret;
499 :
500 : if (acpi_disabled) {
501 0 : ret = init_of_cache_level(cpu);
502 : } else {
503 : ret = acpi_get_cache_info(cpu, &levels, &split_levels);
504 : if (!ret) {
505 : this_cpu_ci->num_levels = levels;
506 : /*
507 : * This assumes that:
508 : * - there cannot be any split caches (data/instruction)
509 : * above a unified cache
510 : * - data/instruction caches come by pair
511 : */
512 : this_cpu_ci->num_leaves = levels + split_levels;
513 : }
514 : }
515 :
516 0 : if (ret || !cache_leaves(cpu)) {
517 0 : ret = early_cache_level(cpu);
518 0 : if (ret)
519 : return ret;
520 :
521 0 : if (!cache_leaves(cpu))
522 : return -ENOENT;
523 :
524 0 : this_cpu_ci->early_ci_levels = true;
525 : }
526 :
527 0 : return allocate_cache_info(cpu);
528 : }
529 :
530 1 : static inline int init_level_allocate_ci(unsigned int cpu)
531 : {
532 1 : unsigned int early_leaves = cache_leaves(cpu);
533 :
534 : /* Since early initialization/allocation of the cacheinfo is allowed
535 : * via fetch_cache_info() and this also gets called as CPU hotplug
536 : * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
537 : * as it will happen only once (the cacheinfo memory is never freed).
538 : * Just populate the cacheinfo. However, if the cacheinfo has been
539 : * allocated early through the arch-specific early_cache_level() call,
540 : * there is a chance the info is wrong (this can happen on arm64). In
541 : * that case, call init_cache_level() anyway to give the arch-specific
542 : * code a chance to make things right.
543 : */
544 1 : if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels)
545 : return 0;
546 :
547 1 : if (init_cache_level(cpu) || !cache_leaves(cpu))
548 : return -ENOENT;
549 :
550 : /*
551 : * Now that we have properly initialized the cache level info, make
552 : * sure we don't try to do that again the next time we are called
553 : * (e.g. as CPU hotplug callbacks).
554 : */
555 0 : ci_cacheinfo(cpu)->early_ci_levels = false;
556 :
557 0 : if (cache_leaves(cpu) <= early_leaves)
558 : return 0;
559 :
560 0 : kfree(per_cpu_cacheinfo(cpu));
561 0 : return allocate_cache_info(cpu);
562 : }
563 :
564 1 : int detect_cache_attributes(unsigned int cpu)
565 : {
566 : int ret;
567 :
568 1 : ret = init_level_allocate_ci(cpu);
569 1 : if (ret)
570 : return ret;
571 :
572 : /*
573 : * If LLC is valid the cache leaves were already populated so just go to
574 : * update the cpu map.
575 : */
576 0 : if (!last_level_cache_is_valid(cpu)) {
577 : /*
578 : * populate_cache_leaves() may completely setup the cache leaves and
579 : * shared_cpu_map or it may leave it partially setup.
580 : */
581 0 : ret = populate_cache_leaves(cpu);
582 0 : if (ret)
583 : goto free_ci;
584 : }
585 :
586 : /*
587 : * For systems using DT for cache hierarchy, fw_token
588 : * and shared_cpu_map will be set up here only if they are
589 : * not populated already
590 : */
591 0 : ret = cache_shared_cpu_map_setup(cpu);
592 0 : if (ret) {
593 0 : pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
594 0 : goto free_ci;
595 : }
596 :
597 : return 0;
598 :
599 : free_ci:
600 : free_cache_attributes(cpu);
601 : return ret;
602 : }
603 :
604 : /* pointer to cpuX/cache device */
605 : static DEFINE_PER_CPU(struct device *, ci_cache_dev);
606 : #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
607 :
608 : static cpumask_t cache_dev_map;
609 :
610 : /* pointer to array of devices for cpuX/cache/indexY */
611 : static DEFINE_PER_CPU(struct device **, ci_index_dev);
612 : #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
613 : #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
614 :
615 : #define show_one(file_name, object) \
616 : static ssize_t file_name##_show(struct device *dev, \
617 : struct device_attribute *attr, char *buf) \
618 : { \
619 : struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
620 : return sysfs_emit(buf, "%u\n", this_leaf->object); \
621 : }
622 :
623 0 : show_one(id, id);
624 0 : show_one(level, level);
625 0 : show_one(coherency_line_size, coherency_line_size);
626 0 : show_one(number_of_sets, number_of_sets);
627 0 : show_one(physical_line_partition, physical_line_partition);
628 0 : show_one(ways_of_associativity, ways_of_associativity);
629 :
630 0 : static ssize_t size_show(struct device *dev,
631 : struct device_attribute *attr, char *buf)
632 : {
633 0 : struct cacheinfo *this_leaf = dev_get_drvdata(dev);
634 :
635 0 : return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
636 : }
637 :
638 0 : static ssize_t shared_cpu_map_show(struct device *dev,
639 : struct device_attribute *attr, char *buf)
640 : {
641 0 : struct cacheinfo *this_leaf = dev_get_drvdata(dev);
642 0 : const struct cpumask *mask = &this_leaf->shared_cpu_map;
643 :
644 0 : return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
645 : }
646 :
647 0 : static ssize_t shared_cpu_list_show(struct device *dev,
648 : struct device_attribute *attr, char *buf)
649 : {
650 0 : struct cacheinfo *this_leaf = dev_get_drvdata(dev);
651 0 : const struct cpumask *mask = &this_leaf->shared_cpu_map;
652 :
653 0 : return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
654 : }
655 :
656 0 : static ssize_t type_show(struct device *dev,
657 : struct device_attribute *attr, char *buf)
658 : {
659 0 : struct cacheinfo *this_leaf = dev_get_drvdata(dev);
660 : const char *output;
661 :
662 0 : switch (this_leaf->type) {
663 : case CACHE_TYPE_DATA:
664 : output = "Data";
665 : break;
666 : case CACHE_TYPE_INST:
667 0 : output = "Instruction";
668 0 : break;
669 : case CACHE_TYPE_UNIFIED:
670 0 : output = "Unified";
671 0 : break;
672 : default:
673 : return -EINVAL;
674 : }
675 :
676 0 : return sysfs_emit(buf, "%s\n", output);
677 : }
678 :
679 0 : static ssize_t allocation_policy_show(struct device *dev,
680 : struct device_attribute *attr, char *buf)
681 : {
682 0 : struct cacheinfo *this_leaf = dev_get_drvdata(dev);
683 0 : unsigned int ci_attr = this_leaf->attributes;
684 : const char *output;
685 :
686 0 : if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
687 : output = "ReadWriteAllocate";
688 0 : else if (ci_attr & CACHE_READ_ALLOCATE)
689 : output = "ReadAllocate";
690 0 : else if (ci_attr & CACHE_WRITE_ALLOCATE)
691 : output = "WriteAllocate";
692 : else
693 : return 0;
694 :
695 0 : return sysfs_emit(buf, "%s\n", output);
696 : }
697 :
698 0 : static ssize_t write_policy_show(struct device *dev,
699 : struct device_attribute *attr, char *buf)
700 : {
701 0 : struct cacheinfo *this_leaf = dev_get_drvdata(dev);
702 0 : unsigned int ci_attr = this_leaf->attributes;
703 0 : int n = 0;
704 :
705 0 : if (ci_attr & CACHE_WRITE_THROUGH)
706 0 : n = sysfs_emit(buf, "WriteThrough\n");
707 0 : else if (ci_attr & CACHE_WRITE_BACK)
708 0 : n = sysfs_emit(buf, "WriteBack\n");
709 0 : return n;
710 : }
711 :
712 : static DEVICE_ATTR_RO(id);
713 : static DEVICE_ATTR_RO(level);
714 : static DEVICE_ATTR_RO(type);
715 : static DEVICE_ATTR_RO(coherency_line_size);
716 : static DEVICE_ATTR_RO(ways_of_associativity);
717 : static DEVICE_ATTR_RO(number_of_sets);
718 : static DEVICE_ATTR_RO(size);
719 : static DEVICE_ATTR_RO(allocation_policy);
720 : static DEVICE_ATTR_RO(write_policy);
721 : static DEVICE_ATTR_RO(shared_cpu_map);
722 : static DEVICE_ATTR_RO(shared_cpu_list);
723 : static DEVICE_ATTR_RO(physical_line_partition);
724 :
725 : static struct attribute *cache_default_attrs[] = {
726 : &dev_attr_id.attr,
727 : &dev_attr_type.attr,
728 : &dev_attr_level.attr,
729 : &dev_attr_shared_cpu_map.attr,
730 : &dev_attr_shared_cpu_list.attr,
731 : &dev_attr_coherency_line_size.attr,
732 : &dev_attr_ways_of_associativity.attr,
733 : &dev_attr_number_of_sets.attr,
734 : &dev_attr_size.attr,
735 : &dev_attr_allocation_policy.attr,
736 : &dev_attr_write_policy.attr,
737 : &dev_attr_physical_line_partition.attr,
738 : NULL
739 : };
740 :
741 : static umode_t
742 0 : cache_default_attrs_is_visible(struct kobject *kobj,
743 : struct attribute *attr, int unused)
744 : {
745 0 : struct device *dev = kobj_to_dev(kobj);
746 0 : struct cacheinfo *this_leaf = dev_get_drvdata(dev);
747 0 : const struct cpumask *mask = &this_leaf->shared_cpu_map;
748 0 : umode_t mode = attr->mode;
749 :
750 0 : if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
751 : return mode;
752 0 : if ((attr == &dev_attr_type.attr) && this_leaf->type)
753 : return mode;
754 0 : if ((attr == &dev_attr_level.attr) && this_leaf->level)
755 : return mode;
756 0 : if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
757 : return mode;
758 0 : if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
759 : return mode;
760 0 : if ((attr == &dev_attr_coherency_line_size.attr) &&
761 0 : this_leaf->coherency_line_size)
762 : return mode;
763 0 : if ((attr == &dev_attr_ways_of_associativity.attr) &&
764 0 : this_leaf->size) /* allow 0 = full associativity */
765 : return mode;
766 0 : if ((attr == &dev_attr_number_of_sets.attr) &&
767 0 : this_leaf->number_of_sets)
768 : return mode;
769 0 : if ((attr == &dev_attr_size.attr) && this_leaf->size)
770 : return mode;
771 0 : if ((attr == &dev_attr_write_policy.attr) &&
772 0 : (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
773 : return mode;
774 0 : if ((attr == &dev_attr_allocation_policy.attr) &&
775 0 : (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
776 : return mode;
777 0 : if ((attr == &dev_attr_physical_line_partition.attr) &&
778 0 : this_leaf->physical_line_partition)
779 : return mode;
780 :
781 0 : return 0;
782 : }
783 :
784 : static const struct attribute_group cache_default_group = {
785 : .attrs = cache_default_attrs,
786 : .is_visible = cache_default_attrs_is_visible,
787 : };
788 :
789 : static const struct attribute_group *cache_default_groups[] = {
790 : &cache_default_group,
791 : NULL,
792 : };
793 :
794 : static const struct attribute_group *cache_private_groups[] = {
795 : &cache_default_group,
796 : NULL, /* Place holder for private group */
797 : NULL,
798 : };
799 :
800 : const struct attribute_group *
801 0 : __weak cache_get_priv_group(struct cacheinfo *this_leaf)
802 : {
803 0 : return NULL;
804 : }
805 :
806 : static const struct attribute_group **
807 : cache_get_attribute_groups(struct cacheinfo *this_leaf)
808 : {
809 0 : const struct attribute_group *priv_group =
810 : cache_get_priv_group(this_leaf);
811 :
812 0 : if (!priv_group)
813 : return cache_default_groups;
814 :
815 0 : if (!cache_private_groups[1])
816 0 : cache_private_groups[1] = priv_group;
817 :
818 : return cache_private_groups;
819 : }
820 :
821 : /* Add/Remove cache interface for CPU device */
822 0 : static void cpu_cache_sysfs_exit(unsigned int cpu)
823 : {
824 : int i;
825 : struct device *ci_dev;
826 :
827 0 : if (per_cpu_index_dev(cpu)) {
828 0 : for (i = 0; i < cache_leaves(cpu); i++) {
829 0 : ci_dev = per_cache_index_dev(cpu, i);
830 0 : if (!ci_dev)
831 0 : continue;
832 0 : device_unregister(ci_dev);
833 : }
834 0 : kfree(per_cpu_index_dev(cpu));
835 0 : per_cpu_index_dev(cpu) = NULL;
836 : }
837 0 : device_unregister(per_cpu_cache_dev(cpu));
838 0 : per_cpu_cache_dev(cpu) = NULL;
839 0 : }
840 :
841 0 : static int cpu_cache_sysfs_init(unsigned int cpu)
842 : {
843 0 : struct device *dev = get_cpu_device(cpu);
844 :
845 0 : if (per_cpu_cacheinfo(cpu) == NULL)
846 : return -ENOENT;
847 :
848 0 : per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
849 0 : if (IS_ERR(per_cpu_cache_dev(cpu)))
850 0 : return PTR_ERR(per_cpu_cache_dev(cpu));
851 :
852 : /* Allocate all required memory */
853 0 : per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
854 : sizeof(struct device *), GFP_KERNEL);
855 0 : if (unlikely(per_cpu_index_dev(cpu) == NULL))
856 : goto err_out;
857 :
858 : return 0;
859 :
860 : err_out:
861 0 : cpu_cache_sysfs_exit(cpu);
862 0 : return -ENOMEM;
863 : }
864 :
865 0 : static int cache_add_dev(unsigned int cpu)
866 : {
867 : unsigned int i;
868 : int rc;
869 : struct device *ci_dev, *parent;
870 : struct cacheinfo *this_leaf;
871 : const struct attribute_group **cache_groups;
872 :
873 0 : rc = cpu_cache_sysfs_init(cpu);
874 0 : if (unlikely(rc < 0))
875 : return rc;
876 :
877 0 : parent = per_cpu_cache_dev(cpu);
878 0 : for (i = 0; i < cache_leaves(cpu); i++) {
879 0 : this_leaf = per_cpu_cacheinfo_idx(cpu, i);
880 0 : if (this_leaf->disable_sysfs)
881 0 : continue;
882 0 : if (this_leaf->type == CACHE_TYPE_NOCACHE)
883 : break;
884 0 : cache_groups = cache_get_attribute_groups(this_leaf);
885 0 : ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
886 : "index%1u", i);
887 0 : if (IS_ERR(ci_dev)) {
888 0 : rc = PTR_ERR(ci_dev);
889 : goto err;
890 : }
891 0 : per_cache_index_dev(cpu, i) = ci_dev;
892 : }
893 : cpumask_set_cpu(cpu, &cache_dev_map);
894 :
895 : return 0;
896 : err:
897 0 : cpu_cache_sysfs_exit(cpu);
898 0 : return rc;
899 : }
900 :
901 1 : static int cacheinfo_cpu_online(unsigned int cpu)
902 : {
903 1 : int rc = detect_cache_attributes(cpu);
904 :
905 1 : if (rc)
906 : return rc;
907 0 : rc = cache_add_dev(cpu);
908 0 : if (rc)
909 : free_cache_attributes(cpu);
910 : return rc;
911 : }
912 :
913 0 : static int cacheinfo_cpu_pre_down(unsigned int cpu)
914 : {
915 0 : if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
916 0 : cpu_cache_sysfs_exit(cpu);
917 :
918 0 : free_cache_attributes(cpu);
919 0 : return 0;
920 : }
921 :
922 1 : static int __init cacheinfo_sysfs_init(void)
923 : {
924 1 : return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
925 : "base/cacheinfo:online",
926 : cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
927 : }
928 : device_initcall(cacheinfo_sysfs_init);
|