Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */ 2 : #ifndef _LINUX_CPUSET_H 3 : #define _LINUX_CPUSET_H 4 : /* 5 : * cpuset interface 6 : * 7 : * Copyright (C) 2003 BULL SA 8 : * Copyright (C) 2004-2006 Silicon Graphics, Inc. 9 : * 10 : */ 11 : 12 : #include <linux/sched.h> 13 : #include <linux/sched/topology.h> 14 : #include <linux/sched/task.h> 15 : #include <linux/cpumask.h> 16 : #include <linux/nodemask.h> 17 : #include <linux/mm.h> 18 : #include <linux/mmu_context.h> 19 : #include <linux/jump_label.h> 20 : 21 : #ifdef CONFIG_CPUSETS 22 : 23 : /* 24 : * Static branch rewrites can happen in an arbitrary order for a given 25 : * key. In code paths where we need to loop with read_mems_allowed_begin() and 26 : * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need 27 : * to ensure that begin() always gets rewritten before retry() in the 28 : * disabled -> enabled transition. If not, then if local irqs are disabled 29 : * around the loop, we can deadlock since retry() would always be 30 : * comparing the latest value of the mems_allowed seqcount against 0 as 31 : * begin() still would see cpusets_enabled() as false. The enabled -> disabled 32 : * transition should happen in reverse order for the same reasons (want to stop 33 : * looking at real value of mems_allowed.sequence in retry() first). 34 : */ 35 : extern struct static_key_false cpusets_pre_enable_key; 36 : extern struct static_key_false cpusets_enabled_key; 37 : extern struct static_key_false cpusets_insane_config_key; 38 : 39 : static inline bool cpusets_enabled(void) 40 : { 41 : return static_branch_unlikely(&cpusets_enabled_key); 42 : } 43 : 44 : static inline void cpuset_inc(void) 45 : { 46 : static_branch_inc_cpuslocked(&cpusets_pre_enable_key); 47 : static_branch_inc_cpuslocked(&cpusets_enabled_key); 48 : } 49 : 50 : static inline void cpuset_dec(void) 51 : { 52 : static_branch_dec_cpuslocked(&cpusets_enabled_key); 53 : static_branch_dec_cpuslocked(&cpusets_pre_enable_key); 54 : } 55 : 56 : /* 57 : * This will get enabled whenever a cpuset configuration is considered 58 : * unsupportable in general. E.g. movable only node which cannot satisfy 59 : * any non movable allocations (see update_nodemask). Page allocator 60 : * needs to make additional checks for those configurations and this 61 : * check is meant to guard those checks without any overhead for sane 62 : * configurations. 63 : */ 64 : static inline bool cpusets_insane_config(void) 65 : { 66 : return static_branch_unlikely(&cpusets_insane_config_key); 67 : } 68 : 69 : extern int cpuset_init(void); 70 : extern void cpuset_init_smp(void); 71 : extern void cpuset_force_rebuild(void); 72 : extern void cpuset_update_active_cpus(void); 73 : extern void cpuset_wait_for_hotplug(void); 74 : extern void cpuset_read_lock(void); 75 : extern void cpuset_read_unlock(void); 76 : extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 77 : extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); 78 : extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 79 : #define cpuset_current_mems_allowed (current->mems_allowed) 80 : void cpuset_init_current_mems_allowed(void); 81 : int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 82 : 83 : extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); 84 : 85 : static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) 86 : { 87 : if (cpusets_enabled()) 88 : return __cpuset_node_allowed(node, gfp_mask); 89 : return true; 90 : } 91 : 92 : static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 93 : { 94 : return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); 95 : } 96 : 97 : static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 98 : { 99 : if (cpusets_enabled()) 100 : return __cpuset_zone_allowed(z, gfp_mask); 101 : return true; 102 : } 103 : 104 : extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 105 : const struct task_struct *tsk2); 106 : 107 : #define cpuset_memory_pressure_bump() \ 108 : do { \ 109 : if (cpuset_memory_pressure_enabled) \ 110 : __cpuset_memory_pressure_bump(); \ 111 : } while (0) 112 : extern int cpuset_memory_pressure_enabled; 113 : extern void __cpuset_memory_pressure_bump(void); 114 : 115 : extern void cpuset_task_status_allowed(struct seq_file *m, 116 : struct task_struct *task); 117 : extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 118 : struct pid *pid, struct task_struct *tsk); 119 : 120 : extern int cpuset_mem_spread_node(void); 121 : extern int cpuset_slab_spread_node(void); 122 : 123 : static inline int cpuset_do_page_mem_spread(void) 124 : { 125 : return task_spread_page(current); 126 : } 127 : 128 : static inline int cpuset_do_slab_mem_spread(void) 129 : { 130 : return task_spread_slab(current); 131 : } 132 : 133 : extern bool current_cpuset_is_being_rebound(void); 134 : 135 : extern void rebuild_sched_domains(void); 136 : 137 : extern void cpuset_print_current_mems_allowed(void); 138 : 139 : /* 140 : * read_mems_allowed_begin is required when making decisions involving 141 : * mems_allowed such as during page allocation. mems_allowed can be updated in 142 : * parallel and depending on the new value an operation can fail potentially 143 : * causing process failure. A retry loop with read_mems_allowed_begin and 144 : * read_mems_allowed_retry prevents these artificial failures. 145 : */ 146 : static inline unsigned int read_mems_allowed_begin(void) 147 : { 148 : if (!static_branch_unlikely(&cpusets_pre_enable_key)) 149 : return 0; 150 : 151 : return read_seqcount_begin(¤t->mems_allowed_seq); 152 : } 153 : 154 : /* 155 : * If this returns true, the operation that took place after 156 : * read_mems_allowed_begin may have failed artificially due to a concurrent 157 : * update of mems_allowed. It is up to the caller to retry the operation if 158 : * appropriate. 159 : */ 160 : static inline bool read_mems_allowed_retry(unsigned int seq) 161 : { 162 : if (!static_branch_unlikely(&cpusets_enabled_key)) 163 : return false; 164 : 165 : return read_seqcount_retry(¤t->mems_allowed_seq, seq); 166 : } 167 : 168 : static inline void set_mems_allowed(nodemask_t nodemask) 169 : { 170 : unsigned long flags; 171 : 172 : task_lock(current); 173 : local_irq_save(flags); 174 : write_seqcount_begin(¤t->mems_allowed_seq); 175 : current->mems_allowed = nodemask; 176 : write_seqcount_end(¤t->mems_allowed_seq); 177 : local_irq_restore(flags); 178 : task_unlock(current); 179 : } 180 : 181 : #else /* !CONFIG_CPUSETS */ 182 : 183 : static inline bool cpusets_enabled(void) { return false; } 184 : 185 : static inline bool cpusets_insane_config(void) { return false; } 186 : 187 : static inline int cpuset_init(void) { return 0; } 188 : static inline void cpuset_init_smp(void) {} 189 : 190 : static inline void cpuset_force_rebuild(void) { } 191 : 192 : static inline void cpuset_update_active_cpus(void) 193 : { 194 : partition_sched_domains(1, NULL, NULL); 195 : } 196 : 197 : static inline void cpuset_wait_for_hotplug(void) { } 198 : 199 : static inline void cpuset_read_lock(void) { } 200 : static inline void cpuset_read_unlock(void) { } 201 : 202 : static inline void cpuset_cpus_allowed(struct task_struct *p, 203 : struct cpumask *mask) 204 : { 205 0 : cpumask_copy(mask, task_cpu_possible_mask(p)); 206 : } 207 : 208 : static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) 209 : { 210 : return false; 211 : } 212 : 213 : static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 214 : { 215 : return node_possible_map; 216 : } 217 : 218 : #define cpuset_current_mems_allowed (node_states[N_MEMORY]) 219 : static inline void cpuset_init_current_mems_allowed(void) {} 220 : 221 : static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 222 : { 223 : return 1; 224 : } 225 : 226 : static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) 227 : { 228 : return true; 229 : } 230 : 231 : static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 232 : { 233 : return true; 234 : } 235 : 236 : static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 237 : { 238 : return true; 239 : } 240 : 241 : static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 242 : const struct task_struct *tsk2) 243 : { 244 : return 1; 245 : } 246 : 247 : static inline void cpuset_memory_pressure_bump(void) {} 248 : 249 : static inline void cpuset_task_status_allowed(struct seq_file *m, 250 : struct task_struct *task) 251 : { 252 : } 253 : 254 : static inline int cpuset_mem_spread_node(void) 255 : { 256 : return 0; 257 : } 258 : 259 : static inline int cpuset_slab_spread_node(void) 260 : { 261 : return 0; 262 : } 263 : 264 : static inline int cpuset_do_page_mem_spread(void) 265 : { 266 : return 0; 267 : } 268 : 269 : static inline int cpuset_do_slab_mem_spread(void) 270 : { 271 : return 0; 272 : } 273 : 274 : static inline bool current_cpuset_is_being_rebound(void) 275 : { 276 : return false; 277 : } 278 : 279 : static inline void rebuild_sched_domains(void) 280 : { 281 : partition_sched_domains(1, NULL, NULL); 282 : } 283 : 284 : static inline void cpuset_print_current_mems_allowed(void) 285 : { 286 : } 287 : 288 : static inline void set_mems_allowed(nodemask_t nodemask) 289 : { 290 : } 291 : 292 : static inline unsigned int read_mems_allowed_begin(void) 293 : { 294 : return 0; 295 : } 296 : 297 : static inline bool read_mems_allowed_retry(unsigned int seq) 298 : { 299 : return false; 300 : } 301 : 302 : #endif /* !CONFIG_CPUSETS */ 303 : 304 : #endif /* _LINUX_CPUSET_H */