LCOV - code coverage report
Current view: top level - drivers/base - cacheinfo.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 10 194 5.2 %
Date: 2023-03-27 20:00:47 Functions: 4 32 12.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * cacheinfo support - processor cache information via sysfs
       4             :  *
       5             :  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
       6             :  * Author: Sudeep Holla <sudeep.holla@arm.com>
       7             :  */
       8             : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
       9             : 
      10             : #include <linux/acpi.h>
      11             : #include <linux/bitops.h>
      12             : #include <linux/cacheinfo.h>
      13             : #include <linux/compiler.h>
      14             : #include <linux/cpu.h>
      15             : #include <linux/device.h>
      16             : #include <linux/init.h>
      17             : #include <linux/of_device.h>
      18             : #include <linux/sched.h>
      19             : #include <linux/slab.h>
      20             : #include <linux/smp.h>
      21             : #include <linux/sysfs.h>
      22             : 
      23             : /* pointer to per cpu cacheinfo */
      24             : static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
      25             : #define ci_cacheinfo(cpu)       (&per_cpu(ci_cpu_cacheinfo, cpu))
      26             : #define cache_leaves(cpu)       (ci_cacheinfo(cpu)->num_leaves)
      27             : #define per_cpu_cacheinfo(cpu)  (ci_cacheinfo(cpu)->info_list)
      28             : #define per_cpu_cacheinfo_idx(cpu, idx)         \
      29             :                                 (per_cpu_cacheinfo(cpu) + (idx))
      30             : 
      31           0 : struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
      32             : {
      33           0 :         return ci_cacheinfo(cpu);
      34             : }
      35             : 
      36             : static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
      37             :                                            struct cacheinfo *sib_leaf)
      38             : {
      39             :         /*
      40             :          * For non DT/ACPI systems, assume unique level 1 caches,
      41             :          * system-wide shared caches for all other levels. This will be used
      42             :          * only if arch specific code has not populated shared_cpu_map
      43             :          */
      44             :         if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
      45           0 :                 return !(this_leaf->level == 1);
      46             : 
      47             :         if ((sib_leaf->attributes & CACHE_ID) &&
      48             :             (this_leaf->attributes & CACHE_ID))
      49             :                 return sib_leaf->id == this_leaf->id;
      50             : 
      51             :         return sib_leaf->fw_token == this_leaf->fw_token;
      52             : }
      53             : 
      54           0 : bool last_level_cache_is_valid(unsigned int cpu)
      55             : {
      56             :         struct cacheinfo *llc;
      57             : 
      58           0 :         if (!cache_leaves(cpu))
      59             :                 return false;
      60             : 
      61           0 :         llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
      62             : 
      63           0 :         return (llc->attributes & CACHE_ID) || !!llc->fw_token;
      64             : 
      65             : }
      66             : 
      67           0 : bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
      68             : {
      69             :         struct cacheinfo *llc_x, *llc_y;
      70             : 
      71           0 :         if (!last_level_cache_is_valid(cpu_x) ||
      72           0 :             !last_level_cache_is_valid(cpu_y))
      73             :                 return false;
      74             : 
      75           0 :         llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
      76           0 :         llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
      77             : 
      78           0 :         return cache_leaves_are_shared(llc_x, llc_y);
      79             : }
      80             : 
      81             : #ifdef CONFIG_OF
      82             : /* OF properties to query for a given cache type */
      83             : struct cache_type_info {
      84             :         const char *size_prop;
      85             :         const char *line_size_props[2];
      86             :         const char *nr_sets_prop;
      87             : };
      88             : 
      89             : static const struct cache_type_info cache_type_info[] = {
      90             :         {
      91             :                 .size_prop       = "cache-size",
      92             :                 .line_size_props = { "cache-line-size",
      93             :                                      "cache-block-size", },
      94             :                 .nr_sets_prop    = "cache-sets",
      95             :         }, {
      96             :                 .size_prop       = "i-cache-size",
      97             :                 .line_size_props = { "i-cache-line-size",
      98             :                                      "i-cache-block-size", },
      99             :                 .nr_sets_prop    = "i-cache-sets",
     100             :         }, {
     101             :                 .size_prop       = "d-cache-size",
     102             :                 .line_size_props = { "d-cache-line-size",
     103             :                                      "d-cache-block-size", },
     104             :                 .nr_sets_prop    = "d-cache-sets",
     105             :         },
     106             : };
     107             : 
     108             : static inline int get_cacheinfo_idx(enum cache_type type)
     109             : {
     110             :         if (type == CACHE_TYPE_UNIFIED)
     111             :                 return 0;
     112             :         return type;
     113             : }
     114             : 
     115             : static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
     116             : {
     117             :         const char *propname;
     118             :         int ct_idx;
     119             : 
     120             :         ct_idx = get_cacheinfo_idx(this_leaf->type);
     121             :         propname = cache_type_info[ct_idx].size_prop;
     122             : 
     123             :         of_property_read_u32(np, propname, &this_leaf->size);
     124             : }
     125             : 
     126             : /* not cache_line_size() because that's a macro in include/linux/cache.h */
     127             : static void cache_get_line_size(struct cacheinfo *this_leaf,
     128             :                                 struct device_node *np)
     129             : {
     130             :         int i, lim, ct_idx;
     131             : 
     132             :         ct_idx = get_cacheinfo_idx(this_leaf->type);
     133             :         lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
     134             : 
     135             :         for (i = 0; i < lim; i++) {
     136             :                 int ret;
     137             :                 u32 line_size;
     138             :                 const char *propname;
     139             : 
     140             :                 propname = cache_type_info[ct_idx].line_size_props[i];
     141             :                 ret = of_property_read_u32(np, propname, &line_size);
     142             :                 if (!ret) {
     143             :                         this_leaf->coherency_line_size = line_size;
     144             :                         break;
     145             :                 }
     146             :         }
     147             : }
     148             : 
     149             : static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
     150             : {
     151             :         const char *propname;
     152             :         int ct_idx;
     153             : 
     154             :         ct_idx = get_cacheinfo_idx(this_leaf->type);
     155             :         propname = cache_type_info[ct_idx].nr_sets_prop;
     156             : 
     157             :         of_property_read_u32(np, propname, &this_leaf->number_of_sets);
     158             : }
     159             : 
     160             : static void cache_associativity(struct cacheinfo *this_leaf)
     161             : {
     162             :         unsigned int line_size = this_leaf->coherency_line_size;
     163             :         unsigned int nr_sets = this_leaf->number_of_sets;
     164             :         unsigned int size = this_leaf->size;
     165             : 
     166             :         /*
     167             :          * If the cache is fully associative, there is no need to
     168             :          * check the other properties.
     169             :          */
     170             :         if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
     171             :                 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
     172             : }
     173             : 
     174             : static bool cache_node_is_unified(struct cacheinfo *this_leaf,
     175             :                                   struct device_node *np)
     176             : {
     177             :         return of_property_read_bool(np, "cache-unified");
     178             : }
     179             : 
     180             : static void cache_of_set_props(struct cacheinfo *this_leaf,
     181             :                                struct device_node *np)
     182             : {
     183             :         /*
     184             :          * init_cache_level must setup the cache level correctly
     185             :          * overriding the architecturally specified levels, so
     186             :          * if type is NONE at this stage, it should be unified
     187             :          */
     188             :         if (this_leaf->type == CACHE_TYPE_NOCACHE &&
     189             :             cache_node_is_unified(this_leaf, np))
     190             :                 this_leaf->type = CACHE_TYPE_UNIFIED;
     191             :         cache_size(this_leaf, np);
     192             :         cache_get_line_size(this_leaf, np);
     193             :         cache_nr_sets(this_leaf, np);
     194             :         cache_associativity(this_leaf);
     195             : }
     196             : 
     197             : static int cache_setup_of_node(unsigned int cpu)
     198             : {
     199             :         struct device_node *np, *prev;
     200             :         struct cacheinfo *this_leaf;
     201             :         unsigned int index = 0;
     202             : 
     203             :         np = of_cpu_device_node_get(cpu);
     204             :         if (!np) {
     205             :                 pr_err("Failed to find cpu%d device node\n", cpu);
     206             :                 return -ENOENT;
     207             :         }
     208             : 
     209             :         prev = np;
     210             : 
     211             :         while (index < cache_leaves(cpu)) {
     212             :                 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
     213             :                 if (this_leaf->level != 1) {
     214             :                         np = of_find_next_cache_node(np);
     215             :                         of_node_put(prev);
     216             :                         prev = np;
     217             :                         if (!np)
     218             :                                 break;
     219             :                 }
     220             :                 cache_of_set_props(this_leaf, np);
     221             :                 this_leaf->fw_token = np;
     222             :                 index++;
     223             :         }
     224             : 
     225             :         of_node_put(np);
     226             : 
     227             :         if (index != cache_leaves(cpu)) /* not all OF nodes populated */
     228             :                 return -ENOENT;
     229             : 
     230             :         return 0;
     231             : }
     232             : 
     233             : static int of_count_cache_leaves(struct device_node *np)
     234             : {
     235             :         unsigned int leaves = 0;
     236             : 
     237             :         if (of_property_read_bool(np, "cache-size"))
     238             :                 ++leaves;
     239             :         if (of_property_read_bool(np, "i-cache-size"))
     240             :                 ++leaves;
     241             :         if (of_property_read_bool(np, "d-cache-size"))
     242             :                 ++leaves;
     243             : 
     244             :         if (!leaves) {
     245             :                 /* The '[i-|d-|]cache-size' property is required, but
     246             :                  * if absent, fallback on the 'cache-unified' property.
     247             :                  */
     248             :                 if (of_property_read_bool(np, "cache-unified"))
     249             :                         return 1;
     250             :                 else
     251             :                         return 2;
     252             :         }
     253             : 
     254             :         return leaves;
     255             : }
     256             : 
     257             : int init_of_cache_level(unsigned int cpu)
     258             : {
     259             :         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
     260             :         struct device_node *np = of_cpu_device_node_get(cpu);
     261             :         struct device_node *prev = NULL;
     262             :         unsigned int levels = 0, leaves, level;
     263             : 
     264             :         leaves = of_count_cache_leaves(np);
     265             :         if (leaves > 0)
     266             :                 levels = 1;
     267             : 
     268             :         prev = np;
     269             :         while ((np = of_find_next_cache_node(np))) {
     270             :                 of_node_put(prev);
     271             :                 prev = np;
     272             :                 if (!of_device_is_compatible(np, "cache"))
     273             :                         goto err_out;
     274             :                 if (of_property_read_u32(np, "cache-level", &level))
     275             :                         goto err_out;
     276             :                 if (level <= levels)
     277             :                         goto err_out;
     278             : 
     279             :                 leaves += of_count_cache_leaves(np);
     280             :                 levels = level;
     281             :         }
     282             : 
     283             :         of_node_put(np);
     284             :         this_cpu_ci->num_levels = levels;
     285             :         this_cpu_ci->num_leaves = leaves;
     286             : 
     287             :         return 0;
     288             : 
     289             : err_out:
     290             :         of_node_put(np);
     291             :         return -EINVAL;
     292             : }
     293             : 
     294             : #else
     295             : static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
     296           0 : int init_of_cache_level(unsigned int cpu) { return 0; }
     297             : #endif
     298             : 
     299           0 : int __weak cache_setup_acpi(unsigned int cpu)
     300             : {
     301           0 :         return -ENOTSUPP;
     302             : }
     303             : 
     304             : unsigned int coherency_max_size;
     305             : 
     306             : static int cache_setup_properties(unsigned int cpu)
     307             : {
     308             :         int ret = 0;
     309             : 
     310             :         if (of_have_populated_dt())
     311             :                 ret = cache_setup_of_node(cpu);
     312             :         else if (!acpi_disabled)
     313             :                 ret = cache_setup_acpi(cpu);
     314             : 
     315             :         return ret;
     316             : }
     317             : 
     318           0 : static int cache_shared_cpu_map_setup(unsigned int cpu)
     319             : {
     320           0 :         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
     321             :         struct cacheinfo *this_leaf, *sib_leaf;
     322             :         unsigned int index, sib_index;
     323           0 :         int ret = 0;
     324             : 
     325           0 :         if (this_cpu_ci->cpu_map_populated)
     326             :                 return 0;
     327             : 
     328             :         /*
     329             :          * skip setting up cache properties if LLC is valid, just need
     330             :          * to update the shared cpu_map if the cache attributes were
     331             :          * populated early before all the cpus are brought online
     332             :          */
     333             :         if (!last_level_cache_is_valid(cpu)) {
     334             :                 ret = cache_setup_properties(cpu);
     335             :                 if (ret)
     336             :                         return ret;
     337             :         }
     338             : 
     339           0 :         for (index = 0; index < cache_leaves(cpu); index++) {
     340             :                 unsigned int i;
     341             : 
     342           0 :                 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
     343             : 
     344           0 :                 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
     345           0 :                 for_each_online_cpu(i) {
     346           0 :                         struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
     347             : 
     348           0 :                         if (i == cpu || !sib_cpu_ci->info_list)
     349           0 :                                 continue;/* skip if itself or no cacheinfo */
     350           0 :                         for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
     351           0 :                                 sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
     352           0 :                                 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
     353           0 :                                         cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
     354           0 :                                         cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
     355             :                                         break;
     356             :                                 }
     357             :                         }
     358             :                 }
     359             :                 /* record the maximum cache line size */
     360           0 :                 if (this_leaf->coherency_line_size > coherency_max_size)
     361           0 :                         coherency_max_size = this_leaf->coherency_line_size;
     362             :         }
     363             : 
     364             :         return 0;
     365             : }
     366             : 
     367           0 : static void cache_shared_cpu_map_remove(unsigned int cpu)
     368             : {
     369             :         struct cacheinfo *this_leaf, *sib_leaf;
     370             :         unsigned int sibling, index, sib_index;
     371             : 
     372           0 :         for (index = 0; index < cache_leaves(cpu); index++) {
     373           0 :                 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
     374           0 :                 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
     375           0 :                         struct cpu_cacheinfo *sib_cpu_ci =
     376             :                                                 get_cpu_cacheinfo(sibling);
     377             : 
     378           0 :                         if (sibling == cpu || !sib_cpu_ci->info_list)
     379           0 :                                 continue;/* skip if itself or no cacheinfo */
     380             : 
     381           0 :                         for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
     382           0 :                                 sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
     383           0 :                                 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
     384           0 :                                         cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
     385           0 :                                         cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
     386             :                                         break;
     387             :                                 }
     388             :                         }
     389             :                 }
     390             :         }
     391           0 : }
     392             : 
     393             : static void free_cache_attributes(unsigned int cpu)
     394             : {
     395           0 :         if (!per_cpu_cacheinfo(cpu))
     396             :                 return;
     397             : 
     398           0 :         cache_shared_cpu_map_remove(cpu);
     399             : }
     400             : 
     401           1 : int __weak init_cache_level(unsigned int cpu)
     402             : {
     403           1 :         return -ENOENT;
     404             : }
     405             : 
     406           0 : int __weak populate_cache_leaves(unsigned int cpu)
     407             : {
     408           0 :         return -ENOENT;
     409             : }
     410             : 
     411             : static inline
     412           0 : int allocate_cache_info(int cpu)
     413             : {
     414           0 :         per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
     415             :                                          sizeof(struct cacheinfo), GFP_ATOMIC);
     416           0 :         if (!per_cpu_cacheinfo(cpu)) {
     417           0 :                 cache_leaves(cpu) = 0;
     418             :                 return -ENOMEM;
     419             :         }
     420             : 
     421             :         return 0;
     422             : }
     423             : 
     424           0 : int fetch_cache_info(unsigned int cpu)
     425             : {
     426             :         struct cpu_cacheinfo *this_cpu_ci;
     427           0 :         unsigned int levels = 0, split_levels = 0;
     428             :         int ret;
     429             : 
     430             :         if (acpi_disabled) {
     431           0 :                 ret = init_of_cache_level(cpu);
     432             :                 if (ret < 0)
     433             :                         return ret;
     434             :         } else {
     435             :                 ret = acpi_get_cache_info(cpu, &levels, &split_levels);
     436             :                 if (ret < 0)
     437             :                         return ret;
     438             : 
     439             :                 this_cpu_ci = get_cpu_cacheinfo(cpu);
     440             :                 this_cpu_ci->num_levels = levels;
     441             :                 /*
     442             :                  * This assumes that:
     443             :                  * - there cannot be any split caches (data/instruction)
     444             :                  *   above a unified cache
     445             :                  * - data/instruction caches come by pair
     446             :                  */
     447             :                 this_cpu_ci->num_leaves = levels + split_levels;
     448             :         }
     449           0 :         if (!cache_leaves(cpu))
     450             :                 return -ENOENT;
     451             : 
     452           0 :         return allocate_cache_info(cpu);
     453             : }
     454             : 
     455           1 : int detect_cache_attributes(unsigned int cpu)
     456             : {
     457             :         int ret;
     458             : 
     459             :         /* Since early initialization/allocation of the cacheinfo is allowed
     460             :          * via fetch_cache_info() and this also gets called as CPU hotplug
     461             :          * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
     462             :          * as it will happen only once (the cacheinfo memory is never freed).
     463             :          * Just populate the cacheinfo.
     464             :          */
     465           1 :         if (per_cpu_cacheinfo(cpu))
     466             :                 goto populate_leaves;
     467             : 
     468           1 :         if (init_cache_level(cpu) || !cache_leaves(cpu))
     469             :                 return -ENOENT;
     470             : 
     471           0 :         ret = allocate_cache_info(cpu);
     472           0 :         if (ret)
     473             :                 return ret;
     474             : 
     475             : populate_leaves:
     476             :         /*
     477             :          * populate_cache_leaves() may completely setup the cache leaves and
     478             :          * shared_cpu_map or it may leave it partially setup.
     479             :          */
     480           0 :         ret = populate_cache_leaves(cpu);
     481           0 :         if (ret)
     482             :                 goto free_ci;
     483             : 
     484             :         /*
     485             :          * For systems using DT for cache hierarchy, fw_token
     486             :          * and shared_cpu_map will be set up here only if they are
     487             :          * not populated already
     488             :          */
     489           0 :         ret = cache_shared_cpu_map_setup(cpu);
     490           0 :         if (ret) {
     491           0 :                 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
     492           0 :                 goto free_ci;
     493             :         }
     494             : 
     495             :         return 0;
     496             : 
     497             : free_ci:
     498             :         free_cache_attributes(cpu);
     499             :         return ret;
     500             : }
     501             : 
     502             : /* pointer to cpuX/cache device */
     503             : static DEFINE_PER_CPU(struct device *, ci_cache_dev);
     504             : #define per_cpu_cache_dev(cpu)  (per_cpu(ci_cache_dev, cpu))
     505             : 
     506             : static cpumask_t cache_dev_map;
     507             : 
     508             : /* pointer to array of devices for cpuX/cache/indexY */
     509             : static DEFINE_PER_CPU(struct device **, ci_index_dev);
     510             : #define per_cpu_index_dev(cpu)  (per_cpu(ci_index_dev, cpu))
     511             : #define per_cache_index_dev(cpu, idx)   ((per_cpu_index_dev(cpu))[idx])
     512             : 
     513             : #define show_one(file_name, object)                             \
     514             : static ssize_t file_name##_show(struct device *dev,             \
     515             :                 struct device_attribute *attr, char *buf)       \
     516             : {                                                               \
     517             :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);     \
     518             :         return sysfs_emit(buf, "%u\n", this_leaf->object); \
     519             : }
     520             : 
     521           0 : show_one(id, id);
     522           0 : show_one(level, level);
     523           0 : show_one(coherency_line_size, coherency_line_size);
     524           0 : show_one(number_of_sets, number_of_sets);
     525           0 : show_one(physical_line_partition, physical_line_partition);
     526           0 : show_one(ways_of_associativity, ways_of_associativity);
     527             : 
     528           0 : static ssize_t size_show(struct device *dev,
     529             :                          struct device_attribute *attr, char *buf)
     530             : {
     531           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     532             : 
     533           0 :         return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
     534             : }
     535             : 
     536           0 : static ssize_t shared_cpu_map_show(struct device *dev,
     537             :                                    struct device_attribute *attr, char *buf)
     538             : {
     539           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     540           0 :         const struct cpumask *mask = &this_leaf->shared_cpu_map;
     541             : 
     542           0 :         return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
     543             : }
     544             : 
     545           0 : static ssize_t shared_cpu_list_show(struct device *dev,
     546             :                                     struct device_attribute *attr, char *buf)
     547             : {
     548           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     549           0 :         const struct cpumask *mask = &this_leaf->shared_cpu_map;
     550             : 
     551           0 :         return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
     552             : }
     553             : 
     554           0 : static ssize_t type_show(struct device *dev,
     555             :                          struct device_attribute *attr, char *buf)
     556             : {
     557           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     558             :         const char *output;
     559             : 
     560           0 :         switch (this_leaf->type) {
     561             :         case CACHE_TYPE_DATA:
     562             :                 output = "Data";
     563             :                 break;
     564             :         case CACHE_TYPE_INST:
     565           0 :                 output = "Instruction";
     566           0 :                 break;
     567             :         case CACHE_TYPE_UNIFIED:
     568           0 :                 output = "Unified";
     569           0 :                 break;
     570             :         default:
     571             :                 return -EINVAL;
     572             :         }
     573             : 
     574           0 :         return sysfs_emit(buf, "%s\n", output);
     575             : }
     576             : 
     577           0 : static ssize_t allocation_policy_show(struct device *dev,
     578             :                                       struct device_attribute *attr, char *buf)
     579             : {
     580           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     581           0 :         unsigned int ci_attr = this_leaf->attributes;
     582             :         const char *output;
     583             : 
     584           0 :         if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
     585             :                 output = "ReadWriteAllocate";
     586           0 :         else if (ci_attr & CACHE_READ_ALLOCATE)
     587             :                 output = "ReadAllocate";
     588           0 :         else if (ci_attr & CACHE_WRITE_ALLOCATE)
     589             :                 output = "WriteAllocate";
     590             :         else
     591             :                 return 0;
     592             : 
     593           0 :         return sysfs_emit(buf, "%s\n", output);
     594             : }
     595             : 
     596           0 : static ssize_t write_policy_show(struct device *dev,
     597             :                                  struct device_attribute *attr, char *buf)
     598             : {
     599           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     600           0 :         unsigned int ci_attr = this_leaf->attributes;
     601           0 :         int n = 0;
     602             : 
     603           0 :         if (ci_attr & CACHE_WRITE_THROUGH)
     604           0 :                 n = sysfs_emit(buf, "WriteThrough\n");
     605           0 :         else if (ci_attr & CACHE_WRITE_BACK)
     606           0 :                 n = sysfs_emit(buf, "WriteBack\n");
     607           0 :         return n;
     608             : }
     609             : 
     610             : static DEVICE_ATTR_RO(id);
     611             : static DEVICE_ATTR_RO(level);
     612             : static DEVICE_ATTR_RO(type);
     613             : static DEVICE_ATTR_RO(coherency_line_size);
     614             : static DEVICE_ATTR_RO(ways_of_associativity);
     615             : static DEVICE_ATTR_RO(number_of_sets);
     616             : static DEVICE_ATTR_RO(size);
     617             : static DEVICE_ATTR_RO(allocation_policy);
     618             : static DEVICE_ATTR_RO(write_policy);
     619             : static DEVICE_ATTR_RO(shared_cpu_map);
     620             : static DEVICE_ATTR_RO(shared_cpu_list);
     621             : static DEVICE_ATTR_RO(physical_line_partition);
     622             : 
     623             : static struct attribute *cache_default_attrs[] = {
     624             :         &dev_attr_id.attr,
     625             :         &dev_attr_type.attr,
     626             :         &dev_attr_level.attr,
     627             :         &dev_attr_shared_cpu_map.attr,
     628             :         &dev_attr_shared_cpu_list.attr,
     629             :         &dev_attr_coherency_line_size.attr,
     630             :         &dev_attr_ways_of_associativity.attr,
     631             :         &dev_attr_number_of_sets.attr,
     632             :         &dev_attr_size.attr,
     633             :         &dev_attr_allocation_policy.attr,
     634             :         &dev_attr_write_policy.attr,
     635             :         &dev_attr_physical_line_partition.attr,
     636             :         NULL
     637             : };
     638             : 
     639             : static umode_t
     640           0 : cache_default_attrs_is_visible(struct kobject *kobj,
     641             :                                struct attribute *attr, int unused)
     642             : {
     643           0 :         struct device *dev = kobj_to_dev(kobj);
     644           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     645           0 :         const struct cpumask *mask = &this_leaf->shared_cpu_map;
     646           0 :         umode_t mode = attr->mode;
     647             : 
     648           0 :         if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
     649             :                 return mode;
     650           0 :         if ((attr == &dev_attr_type.attr) && this_leaf->type)
     651             :                 return mode;
     652           0 :         if ((attr == &dev_attr_level.attr) && this_leaf->level)
     653             :                 return mode;
     654           0 :         if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
     655             :                 return mode;
     656           0 :         if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
     657             :                 return mode;
     658           0 :         if ((attr == &dev_attr_coherency_line_size.attr) &&
     659           0 :             this_leaf->coherency_line_size)
     660             :                 return mode;
     661           0 :         if ((attr == &dev_attr_ways_of_associativity.attr) &&
     662           0 :             this_leaf->size) /* allow 0 = full associativity */
     663             :                 return mode;
     664           0 :         if ((attr == &dev_attr_number_of_sets.attr) &&
     665           0 :             this_leaf->number_of_sets)
     666             :                 return mode;
     667           0 :         if ((attr == &dev_attr_size.attr) && this_leaf->size)
     668             :                 return mode;
     669           0 :         if ((attr == &dev_attr_write_policy.attr) &&
     670           0 :             (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
     671             :                 return mode;
     672           0 :         if ((attr == &dev_attr_allocation_policy.attr) &&
     673           0 :             (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
     674             :                 return mode;
     675           0 :         if ((attr == &dev_attr_physical_line_partition.attr) &&
     676           0 :             this_leaf->physical_line_partition)
     677             :                 return mode;
     678             : 
     679           0 :         return 0;
     680             : }
     681             : 
     682             : static const struct attribute_group cache_default_group = {
     683             :         .attrs = cache_default_attrs,
     684             :         .is_visible = cache_default_attrs_is_visible,
     685             : };
     686             : 
     687             : static const struct attribute_group *cache_default_groups[] = {
     688             :         &cache_default_group,
     689             :         NULL,
     690             : };
     691             : 
     692             : static const struct attribute_group *cache_private_groups[] = {
     693             :         &cache_default_group,
     694             :         NULL, /* Place holder for private group */
     695             :         NULL,
     696             : };
     697             : 
     698             : const struct attribute_group *
     699           0 : __weak cache_get_priv_group(struct cacheinfo *this_leaf)
     700             : {
     701           0 :         return NULL;
     702             : }
     703             : 
     704             : static const struct attribute_group **
     705             : cache_get_attribute_groups(struct cacheinfo *this_leaf)
     706             : {
     707           0 :         const struct attribute_group *priv_group =
     708             :                         cache_get_priv_group(this_leaf);
     709             : 
     710           0 :         if (!priv_group)
     711             :                 return cache_default_groups;
     712             : 
     713           0 :         if (!cache_private_groups[1])
     714           0 :                 cache_private_groups[1] = priv_group;
     715             : 
     716             :         return cache_private_groups;
     717             : }
     718             : 
     719             : /* Add/Remove cache interface for CPU device */
     720           0 : static void cpu_cache_sysfs_exit(unsigned int cpu)
     721             : {
     722             :         int i;
     723             :         struct device *ci_dev;
     724             : 
     725           0 :         if (per_cpu_index_dev(cpu)) {
     726           0 :                 for (i = 0; i < cache_leaves(cpu); i++) {
     727           0 :                         ci_dev = per_cache_index_dev(cpu, i);
     728           0 :                         if (!ci_dev)
     729           0 :                                 continue;
     730           0 :                         device_unregister(ci_dev);
     731             :                 }
     732           0 :                 kfree(per_cpu_index_dev(cpu));
     733           0 :                 per_cpu_index_dev(cpu) = NULL;
     734             :         }
     735           0 :         device_unregister(per_cpu_cache_dev(cpu));
     736           0 :         per_cpu_cache_dev(cpu) = NULL;
     737           0 : }
     738             : 
     739           0 : static int cpu_cache_sysfs_init(unsigned int cpu)
     740             : {
     741           0 :         struct device *dev = get_cpu_device(cpu);
     742             : 
     743           0 :         if (per_cpu_cacheinfo(cpu) == NULL)
     744             :                 return -ENOENT;
     745             : 
     746           0 :         per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
     747           0 :         if (IS_ERR(per_cpu_cache_dev(cpu)))
     748           0 :                 return PTR_ERR(per_cpu_cache_dev(cpu));
     749             : 
     750             :         /* Allocate all required memory */
     751           0 :         per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
     752             :                                          sizeof(struct device *), GFP_KERNEL);
     753           0 :         if (unlikely(per_cpu_index_dev(cpu) == NULL))
     754             :                 goto err_out;
     755             : 
     756             :         return 0;
     757             : 
     758             : err_out:
     759           0 :         cpu_cache_sysfs_exit(cpu);
     760           0 :         return -ENOMEM;
     761             : }
     762             : 
     763           0 : static int cache_add_dev(unsigned int cpu)
     764             : {
     765             :         unsigned int i;
     766             :         int rc;
     767             :         struct device *ci_dev, *parent;
     768             :         struct cacheinfo *this_leaf;
     769             :         const struct attribute_group **cache_groups;
     770             : 
     771           0 :         rc = cpu_cache_sysfs_init(cpu);
     772           0 :         if (unlikely(rc < 0))
     773             :                 return rc;
     774             : 
     775           0 :         parent = per_cpu_cache_dev(cpu);
     776           0 :         for (i = 0; i < cache_leaves(cpu); i++) {
     777           0 :                 this_leaf = per_cpu_cacheinfo_idx(cpu, i);
     778           0 :                 if (this_leaf->disable_sysfs)
     779           0 :                         continue;
     780           0 :                 if (this_leaf->type == CACHE_TYPE_NOCACHE)
     781             :                         break;
     782           0 :                 cache_groups = cache_get_attribute_groups(this_leaf);
     783           0 :                 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
     784             :                                            "index%1u", i);
     785           0 :                 if (IS_ERR(ci_dev)) {
     786           0 :                         rc = PTR_ERR(ci_dev);
     787             :                         goto err;
     788             :                 }
     789           0 :                 per_cache_index_dev(cpu, i) = ci_dev;
     790             :         }
     791             :         cpumask_set_cpu(cpu, &cache_dev_map);
     792             : 
     793             :         return 0;
     794             : err:
     795           0 :         cpu_cache_sysfs_exit(cpu);
     796           0 :         return rc;
     797             : }
     798             : 
     799           1 : static int cacheinfo_cpu_online(unsigned int cpu)
     800             : {
     801           1 :         int rc = detect_cache_attributes(cpu);
     802             : 
     803           1 :         if (rc)
     804             :                 return rc;
     805           0 :         rc = cache_add_dev(cpu);
     806           0 :         if (rc)
     807             :                 free_cache_attributes(cpu);
     808             :         return rc;
     809             : }
     810             : 
     811           0 : static int cacheinfo_cpu_pre_down(unsigned int cpu)
     812             : {
     813           0 :         if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
     814           0 :                 cpu_cache_sysfs_exit(cpu);
     815             : 
     816           0 :         free_cache_attributes(cpu);
     817           0 :         return 0;
     818             : }
     819             : 
     820           1 : static int __init cacheinfo_sysfs_init(void)
     821             : {
     822           1 :         return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
     823             :                                  "base/cacheinfo:online",
     824             :                                  cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
     825             : }
     826             : device_initcall(cacheinfo_sysfs_init);

Generated by: LCOV version 1.14